├── .dockerignore ├── server ├── test_requirements.txt ├── alembic │ ├── README │ ├── versions │ │ ├── 5cebc0f48f6e_add_regions_to_orders_table.py │ │ ├── 3ec897840ea4_add_channel_to_order_table.py │ │ ├── 0704901102eb_add_tx_retries_table.py │ │ └── c7b63286fd71_baseline.py │ ├── script.py.mako │ └── env.py ├── workers.sh ├── database.py ├── Dockerfile ├── requirements.txt ├── tests │ ├── conftest.py │ ├── test_queues.py │ ├── test_worker.py │ ├── test_regions.py │ ├── test_info.py │ ├── test_worker_manager.py │ ├── common.py │ ├── test_invoices.py │ └── test_order_helpers.py ├── utils.py ├── queues.py ├── config │ └── database.yml ├── server.sh ├── info.py ├── invoices.py ├── worker.py ├── bidding.py ├── alembic.ini ├── server.py ├── worker_manager.py ├── regions.py ├── models.py ├── error.py ├── schemas.py ├── transmitter.py ├── constants.py ├── invoice_helpers.py └── templates │ └── queue.html ├── terraform ├── modules │ ├── tor │ │ ├── v3.pk │ │ ├── v3.pubk │ │ ├── kms.tf │ │ ├── iam.tf │ │ ├── variables.tf │ │ ├── firewall.tf │ │ ├── data.tf │ │ ├── main.tf │ │ └── cloud-init │ │ │ └── tor.yaml │ ├── prometheus │ │ ├── outputs.tf │ │ ├── iam.tf │ │ ├── firewall.tf │ │ ├── data.tf │ │ ├── variables.tf │ │ ├── main.tf │ │ └── cloud-init │ │ │ └── prometheus.yml │ ├── blc │ │ ├── outputs.tf │ │ ├── iam.tf │ │ ├── network.tf │ │ ├── firewall.tf │ │ ├── data.tf │ │ ├── variables.tf │ │ ├── main.tf │ │ └── cloud-init │ │ │ └── blc.yaml │ ├── dns │ │ ├── variables.tf │ │ └── blockstream-space.tf │ └── lb │ │ ├── outputs.tf │ │ ├── firewall.tf │ │ ├── network.tf │ │ ├── gcs.tf │ │ ├── data.tf │ │ ├── variables.tf │ │ ├── iam.tf │ │ └── main.tf ├── data.tf ├── outputs.tf ├── iam.tf ├── network.tf ├── network-tor.tf ├── variables.tf └── main.tf ├── .gitignore ├── nginx ├── Dockerfile └── nginx.conf ├── sse ├── test-producer.js ├── Dockerfile ├── package.json ├── README.md ├── LICENSE └── server.js ├── .github └── workflows │ └── test.yml ├── LICENSE ├── docker-compose.yml ├── README.md └── .gitlab-ci.yml /.dockerignore: -------------------------------------------------------------------------------- 1 | terraform 2 | *~ 3 | *.pyc -------------------------------------------------------------------------------- /server/test_requirements.txt: -------------------------------------------------------------------------------- 1 | pytest-mock==3.6.1 -------------------------------------------------------------------------------- /terraform/modules/tor/v3.pk: -------------------------------------------------------------------------------- 1 | overwritten_by_ci -------------------------------------------------------------------------------- /terraform/modules/tor/v3.pubk: -------------------------------------------------------------------------------- 1 | overwritten_by_ci -------------------------------------------------------------------------------- /server/alembic/README: -------------------------------------------------------------------------------- 1 | Generic single-database configuration. -------------------------------------------------------------------------------- /server/workers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | python3 worker_manager.py 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .terraform* 2 | **/__pycache__/ 3 | *~ 4 | htmlcov/ 5 | .coverage 6 | -------------------------------------------------------------------------------- /nginx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx 2 | COPY nginx.conf /etc/nginx/conf.d/default.conf 3 | -------------------------------------------------------------------------------- /server/database.py: -------------------------------------------------------------------------------- 1 | from flask_sqlalchemy import SQLAlchemy 2 | 3 | db = SQLAlchemy() 4 | -------------------------------------------------------------------------------- /terraform/modules/prometheus/outputs.tf: -------------------------------------------------------------------------------- 1 | output "prom_svc_acct" { 2 | value = element(concat(google_service_account.prometheus.*.email, [""]), 0) 3 | } 4 | 5 | -------------------------------------------------------------------------------- /server/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9 2 | 3 | WORKDIR /app 4 | COPY requirements.txt . 5 | RUN pip install -r requirements.txt 6 | COPY . . 7 | 8 | ENTRYPOINT ["bash"] 9 | CMD ["server.sh"] 10 | -------------------------------------------------------------------------------- /terraform/modules/blc/outputs.tf: -------------------------------------------------------------------------------- 1 | output "internal_ip" { 2 | #value = google_compute_address.blc-internal[0].address 3 | value = length(google_compute_address.blc-internal) > 0 ? google_compute_address.blc-internal[0].address : "" 4 | } 5 | -------------------------------------------------------------------------------- /sse/test-producer.js: -------------------------------------------------------------------------------- 1 | const redis = require('redis').createClient(process.env.REDIS_URI) 2 | 3 | const chan = process.env.PUB_CHANNEL 4 | 5 | let i = 0 6 | setInterval(_ => redis.publish(chan, JSON.stringify({ foo: 'bar', i: ++i })), 1000) 7 | -------------------------------------------------------------------------------- /sse/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:16-slim 2 | 3 | # Create app directory 4 | WORKDIR /usr/src/app 5 | 6 | # Install app dependencies 7 | COPY package*.json ./ 8 | 9 | RUN npm install 10 | 11 | # Bundle app source 12 | COPY . . 13 | 14 | CMD [ "npm", "start" ] 15 | -------------------------------------------------------------------------------- /server/requirements.txt: -------------------------------------------------------------------------------- 1 | alembic==1.5.5 2 | Flask==1.1.4 3 | Flask-RESTful==0.3.8 4 | Flask-SQLAlchemy==2.4.4 5 | gunicorn[gevent]==20.0.4 6 | markupsafe==2.0.1 7 | marshmallow==3.10.0 8 | pysqlite3==0.4.5 9 | PyYAML==5.4.1 10 | redis==3.5.3 11 | requests===2.25.1 12 | SQLAlchemy==1.3.23 13 | -------------------------------------------------------------------------------- /server/tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.fixture 5 | def mockredis(mocker): 6 | _mr = mocker.Mock(name="mockredis") 7 | mocker.patch("transmitter.redis", return_value=_mr) 8 | mocker.patch("transmitter.redis.from_url", return_value=_mr) 9 | return _mr 10 | -------------------------------------------------------------------------------- /server/utils.py: -------------------------------------------------------------------------------- 1 | import hmac 2 | import hashlib 3 | 4 | 5 | def hmac_sha256_digest(key, data): 6 | assert (isinstance(key, str)) 7 | assert (isinstance(data, str)) 8 | return hmac.new(key.encode(), msg=data.encode(), 9 | digestmod=hashlib.sha256).hexdigest() 10 | -------------------------------------------------------------------------------- /server/queues.py: -------------------------------------------------------------------------------- 1 | from flask_restful import Resource 2 | from flask import render_template, make_response 3 | import constants 4 | 5 | 6 | class QueueResource(Resource): 7 | 8 | def get(self): 9 | return make_response(render_template('queue.html', env=constants.env), 10 | 200, {'Content-Type': 'text/html'}) 11 | -------------------------------------------------------------------------------- /sse/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "redis-to-sse", 3 | "version": "0.1.0", 4 | "description": "Redis to SSE", 5 | "scripts": { 6 | "start": "node server.js" 7 | }, 8 | "author": "Nadav Ivgi", 9 | "license": "MIT", 10 | "dependencies": { 11 | "express": "^4.17.1", 12 | "morgan": "^1.10.0", 13 | "redis": "^3.1.2" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /server/config/database.yml: -------------------------------------------------------------------------------- 1 | development: 2 | adapter: sqlite3 3 | database: /data/satellite-api/db_development.sqlite3 4 | pool: 5 5 | timeout: 5000 6 | 7 | test: 8 | adapter: sqlite3 9 | database: /tmp/satellite_api_test_db.sqlite3 10 | pool: 5 11 | timeout: 5000 12 | 13 | production: 14 | adapter: sqlite3 15 | database: /data/satellite-api/db_production.sqlite3 16 | pool: 5 17 | timeout: 5000 18 | -------------------------------------------------------------------------------- /sse/README.md: -------------------------------------------------------------------------------- 1 | # Redis to SSE 2 | 3 | Subscribes to a redis pub/sub channel and broadcasts messages 4 | over HTTP server-sent events. 5 | 6 | To start the server: 7 | 8 | ```bash 9 | $ git clone git@github.com:shesek/redis-to-sse && cd redis-to-sse 10 | $ npm install 11 | $ REDIS_URI=redis://127.0.0.1:6379 SUB_TOPIC=foobar PORT=4500 npm start 12 | ``` 13 | 14 | To subscribe to events, send a GET request to `/stream`. 15 | -------------------------------------------------------------------------------- /terraform/modules/tor/kms.tf: -------------------------------------------------------------------------------- 1 | resource "google_kms_key_ring" "tor-key-ring" { 2 | project = var.project 3 | name = "${var.name}-keyring" 4 | location = var.region 5 | count = var.create_resources 6 | } 7 | 8 | resource "google_kms_crypto_key" "tor-crypto-key" { 9 | name = "${var.name}-crypto-key" 10 | key_ring = google_kms_key_ring.tor-key-ring[0].id 11 | count = var.create_resources 12 | } 13 | -------------------------------------------------------------------------------- /terraform/modules/dns/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project" { 2 | type = string 3 | } 4 | 5 | variable "satellite_lb" { 6 | type = string 7 | } 8 | 9 | variable "satellite_api_lb" { 10 | type = string 11 | } 12 | 13 | variable "satellite_api_lb_staging" { 14 | type = string 15 | } 16 | 17 | variable "blocksat_monitoring" { 18 | type = string 19 | } 20 | 21 | variable "create_resources" { 22 | type = string 23 | } 24 | 25 | -------------------------------------------------------------------------------- /terraform/modules/prometheus/iam.tf: -------------------------------------------------------------------------------- 1 | resource "google_service_account" "prometheus" { 2 | account_id = var.name 3 | display_name = var.name 4 | 5 | count = var.create_resources 6 | } 7 | 8 | resource "google_project_iam_member" "prometheus" { 9 | project = var.project 10 | role = "roles/editor" 11 | member = "serviceAccount:${google_service_account.prometheus[0].email}" 12 | 13 | count = var.create_resources 14 | } 15 | 16 | -------------------------------------------------------------------------------- /terraform/data.tf: -------------------------------------------------------------------------------- 1 | data "terraform_remote_state" "blc-mainnet" { 2 | backend = "gcs" 3 | workspace = local.env 4 | 5 | config = { 6 | bucket = "terraform-bs-source" 7 | prefix = "satellite-api" 8 | } 9 | } 10 | 11 | data "terraform_remote_state" "blc-testnet" { 12 | backend = "gcs" 13 | workspace = "testnet-${local.env}" 14 | 15 | config = { 16 | bucket = "terraform-bs-source" 17 | prefix = "satellite-api" 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /terraform/modules/blc/iam.tf: -------------------------------------------------------------------------------- 1 | resource "google_service_account" "blc" { 2 | account_id = "${var.name}-${var.net}-${var.env}" 3 | display_name = "${var.name}-${var.net}-${var.env}" 4 | count = var.create_resources 5 | } 6 | 7 | resource "google_project_iam_member" "blc" { 8 | project = var.project 9 | role = "roles/editor" 10 | member = "serviceAccount:${google_service_account.blc[0].email}" 11 | count = var.create_resources 12 | } 13 | 14 | -------------------------------------------------------------------------------- /terraform/modules/prometheus/firewall.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_firewall" "all-traffic" { 2 | name = "prometheus-${var.name}-all-traffic-access" 3 | network = data.google_compute_network.default[0].self_link 4 | 5 | count = var.create_resources 6 | 7 | allow { 8 | protocol = "tcp" 9 | ports = ["80"] 10 | } 11 | 12 | source_ranges = var.prom_allowed_source_ip 13 | 14 | target_service_accounts = [ 15 | google_service_account.prometheus[0].email, 16 | ] 17 | } 18 | 19 | -------------------------------------------------------------------------------- /server/server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Run database migrations 5 | alembic upgrade head 6 | 7 | # Start the server 8 | # set number of worker based on suggestion in: 9 | # https://docs.gunicorn.org/en/stable/design.html#how-many-workers 10 | n_cores=$(nproc --all) 11 | n_workers=$(expr $n_cores \* 2 + 1) 12 | gunicorn \ 13 | --bind 0.0.0.0:9292 \ 14 | --workers=$n_workers \ 15 | --worker-class=gevent \ 16 | --access-logfile=- \ 17 | --access-logformat='%(t)s "%(r)s" %(s)s' \ 18 | "server:create_app()" 19 | -------------------------------------------------------------------------------- /server/tests/test_queues.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from http import HTTPStatus 3 | 4 | import server 5 | 6 | 7 | @pytest.fixture 8 | def client(): 9 | app = server.create_app(from_test=True) 10 | app.app_context().push() 11 | with app.test_client() as client: 12 | yield client 13 | server.teardown_app(app) 14 | 15 | 16 | def test_get_info_successfuly(client): 17 | get_queues_rv = client.get('/queue.html') 18 | assert get_queues_rv.status_code == HTTPStatus.OK 19 | assert get_queues_rv.content_type == 'text/html' 20 | -------------------------------------------------------------------------------- /terraform/modules/blc/network.tf: -------------------------------------------------------------------------------- 1 | # External and internal static IPs 2 | resource "google_compute_address" "blc" { 3 | name = "${var.name}-${var.net}-external-ip-${var.env}" 4 | project = var.project 5 | region = var.region 6 | count = var.create_resources 7 | } 8 | 9 | resource "google_compute_address" "blc-internal" { 10 | name = "${var.name}-${var.net}-internal-ip-${var.env}" 11 | address_type = "INTERNAL" 12 | project = var.project 13 | region = var.region 14 | count = var.create_resources 15 | } 16 | -------------------------------------------------------------------------------- /server/tests/test_worker.py: -------------------------------------------------------------------------------- 1 | import queue 2 | import time 3 | 4 | import worker 5 | 6 | 7 | def test_worker(): 8 | product_queue = queue.Queue() 9 | 10 | def multiply(a, b, q): 11 | return q.put(a * b) 12 | 13 | period = 1.0 14 | w = worker.Worker(period, fcn=multiply, args=(2, 3, product_queue)) 15 | # Sleep 1/3 of the period and stop the worker before the subsequent 16 | # period 17 | time.sleep(period / 3) 18 | w.stop() 19 | product = product_queue.get() 20 | product_queue.task_done() 21 | assert (product == (2 * 3)) 22 | assert (product_queue.empty()) 23 | -------------------------------------------------------------------------------- /server/alembic/versions/5cebc0f48f6e_add_regions_to_orders_table.py: -------------------------------------------------------------------------------- 1 | """Add regions to orders table 2 | 3 | Revision ID: 5cebc0f48f6e 4 | Revises: c7b63286fd71 5 | Create Date: 2021-07-26 22:38:41.193023 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = '5cebc0f48f6e' 13 | down_revision = 'c7b63286fd71' 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | op.add_column('orders', sa.Column('region_code', sa.Integer)) 20 | 21 | 22 | def downgrade(): 23 | op.drop_column('orders', 'region_code') 24 | -------------------------------------------------------------------------------- /server/alembic/script.py.mako: -------------------------------------------------------------------------------- 1 | """${message} 2 | 3 | Revision ID: ${up_revision} 4 | Revises: ${down_revision | comma,n} 5 | Create Date: ${create_date} 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | ${imports if imports else ""} 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = ${repr(up_revision)} 14 | down_revision = ${repr(down_revision)} 15 | branch_labels = ${repr(branch_labels)} 16 | depends_on = ${repr(depends_on)} 17 | 18 | 19 | def upgrade(): 20 | ${upgrades if upgrades else "pass"} 21 | 22 | 23 | def downgrade(): 24 | ${downgrades if downgrades else "pass"} 25 | -------------------------------------------------------------------------------- /terraform/outputs.tf: -------------------------------------------------------------------------------- 1 | # Internal IP used for proxy_pass-ing to correct instance (mainnet vs testnet) 2 | output "blc_internal_ip_testnet" { 3 | value = module.blc-testnet.internal_ip 4 | } 5 | 6 | # Remote service accounts used for firewall rules 7 | output "prom_svc_acct" { 8 | value = module.prometheus.prom_svc_acct 9 | } 10 | 11 | output "lb_svc_acct" { 12 | value = module.lb.lb_svc_acct 13 | } 14 | 15 | output "lb_backend_service" { 16 | value = module.lb.backend_service 17 | } 18 | 19 | output "lb_backend_service_tor" { 20 | value = module.lb.backend_service_tor 21 | } 22 | 23 | output "lb_internal_ip" { 24 | value = module.lb.internal_ip 25 | } -------------------------------------------------------------------------------- /server/alembic/versions/3ec897840ea4_add_channel_to_order_table.py: -------------------------------------------------------------------------------- 1 | """Add channel to order table 2 | 3 | Revision ID: 3ec897840ea4 4 | Revises: 0704901102eb 5 | Create Date: 2022-04-05 21:57:27.398804 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = '3ec897840ea4' 13 | down_revision = '0704901102eb' 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | op.add_column( 20 | 'orders', 21 | sa.Column('channel', sa.Integer, default=1, server_default='1')) 22 | 23 | 24 | def downgrade(): 25 | op.drop_column('orders', 'channel') 26 | -------------------------------------------------------------------------------- /terraform/modules/lb/outputs.tf: -------------------------------------------------------------------------------- 1 | output "lb_svc_acct" { 2 | value = length(google_service_account.satapi-lb) > 0 ? google_service_account.satapi-lb[0].email : "" 3 | } 4 | 5 | output "backend_service" { 6 | value = length(google_compute_backend_service.satapi-lb) > 0 ? google_compute_backend_service.satapi-lb[0].self_link : "" 7 | } 8 | 9 | output "backend_service_tor" { 10 | value = length(google_compute_backend_service.satapi-lb-tor) > 0 ? google_compute_backend_service.satapi-lb-tor[0].self_link : "" 11 | } 12 | 13 | output "internal_ip" { 14 | value = length(google_compute_address.satapi-lb-internal) > 0 ? google_compute_address.satapi-lb-internal[0].address : "" 15 | } 16 | -------------------------------------------------------------------------------- /terraform/iam.tf: -------------------------------------------------------------------------------- 1 | resource "google_service_account" "api_server_ci" { 2 | project = var.project 3 | account_id = "satellite-api-tf-ci" 4 | display_name = "satellite-api-tf-ci" 5 | description = "Terraform/CI" 6 | count = local.create_misc 7 | } 8 | 9 | resource "google_project_iam_member" "api_server_ci" { 10 | project = var.project 11 | role = "roles/editor" 12 | member = "serviceAccount:${google_service_account.api_server_ci[0].email}" 13 | count = local.create_misc 14 | } 15 | 16 | resource "google_project_iam_member" "api_server_ci_storageadm" { 17 | project = var.project 18 | role = "roles/storage.admin" 19 | member = "serviceAccount:${google_service_account.api_server_ci[0].email}" 20 | count = local.create_misc 21 | } 22 | -------------------------------------------------------------------------------- /terraform/modules/tor/iam.tf: -------------------------------------------------------------------------------- 1 | resource "google_service_account" "tor" { 2 | account_id = var.name 3 | display_name = var.name 4 | project = var.project 5 | count = var.create_resources 6 | } 7 | 8 | resource "google_project_iam_member" "tor" { 9 | project = var.project 10 | role = "roles/editor" 11 | member = "serviceAccount:${google_service_account.tor[0].email}" 12 | count = var.create_resources 13 | } 14 | 15 | resource "google_kms_crypto_key_iam_binding" "crypto-key" { 16 | crypto_key_id = google_kms_crypto_key.tor-crypto-key[0].id 17 | role = "roles/cloudkms.cryptoKeyDecrypter" 18 | count = var.create_resources 19 | 20 | members = [ 21 | "serviceAccount:${google_service_account.tor[0].email}", 22 | ] 23 | } 24 | 25 | -------------------------------------------------------------------------------- /terraform/modules/prometheus/data.tf: -------------------------------------------------------------------------------- 1 | data "google_compute_network" "default" { 2 | name = "default" 3 | project = var.project 4 | count = var.create_resources 5 | } 6 | 7 | data "template_file" "prometheus" { 8 | template = file("${path.module}/cloud-init/prometheus.yml") 9 | count = var.create_resources 10 | 11 | vars = { 12 | prom_docker = var.prom_docker 13 | node_exporter_docker = var.node_exporter_docker 14 | retention = var.retention 15 | } 16 | } 17 | 18 | data "template_cloudinit_config" "prometheus" { 19 | gzip = false 20 | base64_encode = false 21 | count = var.create_resources 22 | 23 | part { 24 | content_type = "text/cloud-config" 25 | content = data.template_file.prometheus[0].rendered 26 | } 27 | } 28 | 29 | -------------------------------------------------------------------------------- /server/info.py: -------------------------------------------------------------------------------- 1 | from http import HTTPStatus 2 | import requests 3 | 4 | from error import get_http_error_resp 5 | from flask_restful import Resource 6 | import constants 7 | 8 | 9 | class InfoResource(Resource): 10 | 11 | def get(self): 12 | try: 13 | info_response = requests.get(f"{constants.CHARGE_ROOT}/info", 14 | timeout=(constants.CONNECTION_TIMEOUT, 15 | constants.RESPONSE_TIMEOUT)) 16 | if info_response.status_code != HTTPStatus.OK: 17 | return get_http_error_resp('LIGHTNING_CHARGE_INFO_FAILED') 18 | return info_response.json(), HTTPStatus.OK 19 | except requests.exceptions.RequestException: 20 | return get_http_error_resp('LIGHTNING_CHARGE_INFO_FAILED') 21 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: [push] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v2 10 | - name: Set up Python 11 | uses: actions/setup-python@v2 12 | with: 13 | python-version: '3.9' 14 | - name: Install dependencies 15 | run: | 16 | python -m pip install --upgrade pip 17 | pip install flake8 "yapf>=0.33" pytest pytest-cov 18 | pip install -r server/requirements.txt 19 | pip install -r server/test_requirements.txt 20 | - name: Lint with flake8 21 | run: | 22 | flake8 . 23 | - name: Check formatting 24 | run: | 25 | yapf --diff --recursive --verbose server/ 26 | - name: Test with pytest 27 | env: 28 | ENV: test 29 | run: | 30 | cd server/ && python -m pytest --cov=. 31 | -------------------------------------------------------------------------------- /nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name proxy_server; 4 | location / { 5 | # Based on https://docs.gunicorn.org/en/stable/deploy.html 6 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 7 | proxy_set_header X-Forwarded-Proto $scheme; 8 | proxy_set_header Host $http_host; 9 | proxy_redirect off; 10 | proxy_pass http://api-server:9292/; 11 | } 12 | location /subscribe/ { 13 | proxy_buffering off; 14 | proxy_request_buffering off; 15 | proxy_cache off; 16 | proxy_http_version 1.1; 17 | proxy_pass http://sse-server:4500/stream?channels=; 18 | } 19 | location /admin/subscribe/ { 20 | proxy_buffering off; 21 | proxy_request_buffering off; 22 | proxy_cache off; 23 | proxy_http_version 1.1; 24 | proxy_pass http://sse-server:4500/admin/stream?channels=; 25 | } 26 | } -------------------------------------------------------------------------------- /terraform/modules/prometheus/variables.tf: -------------------------------------------------------------------------------- 1 | variable "boot_image" { 2 | type = string 3 | default = "cos-cloud/cos-stable" 4 | } 5 | 6 | variable "network" { 7 | type = string 8 | default = "default" 9 | } 10 | 11 | variable "retention" { 12 | type = string 13 | default = "31d" 14 | } 15 | 16 | variable "project" { 17 | type = string 18 | } 19 | 20 | variable "name" { 21 | type = string 22 | } 23 | 24 | variable "region" { 25 | type = string 26 | } 27 | 28 | variable "zone" { 29 | type = string 30 | } 31 | 32 | variable "instance_type" { 33 | type = string 34 | } 35 | 36 | variable "create_resources" { 37 | type = string 38 | } 39 | 40 | variable "prom_service_acct" { 41 | type = string 42 | } 43 | 44 | variable "prom_allowed_source_ip" { 45 | type = list(any) 46 | } 47 | 48 | variable "prom_docker" { 49 | type = string 50 | } 51 | 52 | variable "node_exporter_docker" { 53 | type = string 54 | } 55 | -------------------------------------------------------------------------------- /terraform/modules/tor/variables.tf: -------------------------------------------------------------------------------- 1 | variable "boot_image" { 2 | type = string 3 | default = "cos-cloud/cos-stable" 4 | } 5 | 6 | variable "region" { 7 | type = string 8 | } 9 | 10 | variable "project" { 11 | type = string 12 | } 13 | 14 | variable "name" { 15 | type = string 16 | } 17 | 18 | variable "network" { 19 | type = string 20 | default = "default" 21 | } 22 | 23 | variable "zone" { 24 | type = string 25 | } 26 | 27 | variable "instance_type" { 28 | type = string 29 | } 30 | 31 | variable "tor_lb" { 32 | type = string 33 | } 34 | 35 | variable "onion_host" { 36 | type = string 37 | } 38 | 39 | variable "create_resources" { 40 | type = string 41 | } 42 | 43 | variable "prom_service_acct" { 44 | type = string 45 | } 46 | 47 | variable "tor_docker" { 48 | type = string 49 | } 50 | 51 | variable "node_exporter_docker" { 52 | type = string 53 | } 54 | 55 | variable "gcloud_docker" { 56 | type = string 57 | } 58 | 59 | -------------------------------------------------------------------------------- /terraform/modules/lb/firewall.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_firewall" "satapi-lb" { 2 | name = "${var.name}-fw-rule-${var.env}" 3 | network = data.google_compute_network.satapi-lb.self_link 4 | project = var.project 5 | count = var.create_resources 6 | 7 | allow { 8 | protocol = "tcp" 9 | ports = ["80", "81", "443"] 10 | } 11 | 12 | target_service_accounts = [ 13 | google_service_account.satapi-lb[0].email, 14 | ] 15 | } 16 | 17 | resource "google_compute_firewall" "satapi-lb-prom" { 18 | name = "${var.name}-prometheus-access-${var.env}" 19 | network = data.google_compute_network.satapi-lb.self_link 20 | project = var.project 21 | count = var.create_resources 22 | 23 | allow { 24 | protocol = "tcp" 25 | ports = ["9100"] 26 | } 27 | 28 | source_service_accounts = [ 29 | var.prom_service_acct, 30 | ] 31 | 32 | target_service_accounts = [ 33 | google_service_account.satapi-lb[0].email, 34 | ] 35 | } 36 | 37 | -------------------------------------------------------------------------------- /server/alembic/versions/0704901102eb_add_tx_retries_table.py: -------------------------------------------------------------------------------- 1 | """Add tx_retries table 2 | 3 | Revision ID: 0704901102eb 4 | Revises: 5cebc0f48f6e 5 | Create Date: 2021-08-02 21:46:46.095406 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = '0704901102eb' 13 | down_revision = '5cebc0f48f6e' 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | op.create_table( 20 | 'tx_retries', sa.Column('id', sa.Integer, primary_key=True), 21 | sa.Column('order_id', sa.Integer, sa.ForeignKey('orders.id')), 22 | sa.Column('last_attempt', sa.DateTime), 23 | sa.Column('retry_count', sa.Integer, default=0), 24 | sa.Column('region_code', sa.Integer), 25 | sa.Column('pending', sa.Boolean, default=True), 26 | sa.Column('created_at', sa.DateTime, default=sa.func.now())) 27 | 28 | 29 | def downgrade(): 30 | op.drop_table('tx_retries') 31 | -------------------------------------------------------------------------------- /terraform/modules/tor/firewall.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_firewall" "tor-healthcheck" { 2 | name = "${var.name}-healthcheck" 3 | network = data.google_compute_network.default[0].self_link 4 | project = var.project 5 | count = var.create_resources 6 | 7 | allow { 8 | protocol = "tcp" 9 | ports = ["9050"] 10 | } 11 | 12 | source_ranges = ["130.211.0.0/22", "35.191.0.0/16", "10.0.0.0/8"] 13 | 14 | target_service_accounts = [ 15 | google_service_account.tor[0].email, 16 | ] 17 | } 18 | 19 | resource "google_compute_firewall" "prom-traffic" { 20 | name = "${var.name}-prometheus-access" 21 | network = data.google_compute_network.default[0].self_link 22 | project = var.project 23 | count = var.create_resources 24 | 25 | allow { 26 | protocol = "tcp" 27 | ports = ["9100"] 28 | } 29 | 30 | source_service_accounts = [ 31 | var.prom_service_acct, 32 | ] 33 | 34 | target_service_accounts = [ 35 | google_service_account.tor[0].email, 36 | ] 37 | } 38 | 39 | -------------------------------------------------------------------------------- /server/invoices.py: -------------------------------------------------------------------------------- 1 | from constants import OrderStatus 2 | from error import get_http_error_resp 3 | from flask_restful import Resource 4 | from invoice_helpers import get_and_authenticate_invoice, \ 5 | pay_invoice 6 | from models import Order 7 | import transmitter 8 | 9 | 10 | class InvoiceResource(Resource): 11 | # invoice paid callback from charged 12 | def post(self, lid, charged_auth_token): 13 | success, invoice_or_error = get_and_authenticate_invoice( 14 | lid, charged_auth_token) 15 | if not success: 16 | return invoice_or_error 17 | invoice = invoice_or_error 18 | if not invoice.order_id: 19 | return get_http_error_resp('ORPHANED_INVOICE') 20 | 21 | error_msg = pay_invoice(invoice) 22 | if error_msg: 23 | return error_msg 24 | 25 | order = Order.query.filter_by(id=invoice.order_id).first() 26 | if order.status == OrderStatus.paid.value: 27 | transmitter.tx_start(order.channel) 28 | 29 | return {'message': f'invoice {invoice.lid} paid'} 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /sse/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /terraform/modules/tor/data.tf: -------------------------------------------------------------------------------- 1 | data "google_compute_network" "default" { 2 | name = "default" 3 | project = var.project 4 | count = var.create_resources 5 | } 6 | 7 | data "template_file" "tor" { 8 | template = file("${path.module}/cloud-init/tor.yaml") 9 | count = var.create_resources 10 | 11 | vars = { 12 | tor_lb = var.tor_lb 13 | v3_host = var.onion_host 14 | v3_pk = file("${path.module}/v3.pk") 15 | v3_pubk = file("${path.module}/v3.pubk") 16 | tor_docker = var.tor_docker 17 | gcloud_docker = var.gcloud_docker 18 | node_exporter_docker = var.node_exporter_docker 19 | kms_key = google_kms_crypto_key.tor-crypto-key[0].name 20 | kms_key_ring = google_kms_key_ring.tor-key-ring[0].id 21 | kms_location = var.region 22 | } 23 | } 24 | 25 | data "template_cloudinit_config" "tor" { 26 | gzip = false 27 | base64_encode = false 28 | count = var.create_resources 29 | 30 | part { 31 | content_type = "text/cloud-config" 32 | content = data.template_file.tor[0].rendered 33 | } 34 | } 35 | 36 | -------------------------------------------------------------------------------- /terraform/modules/lb/network.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_address" "satapi-lb-internal" { 2 | name = "${var.name}-internal-ip-${var.env}" 3 | address_type = "INTERNAL" 4 | project = var.project 5 | region = var.region 6 | count = var.create_resources 7 | } 8 | 9 | resource "google_compute_backend_service" "satapi-lb" { 10 | name = "${var.name}-backend-service-${var.env}" 11 | description = "Satellite API" 12 | protocol = "HTTP" 13 | port_name = "http" 14 | project = var.project 15 | count = var.create_resources 16 | 17 | backend { 18 | group = google_compute_region_instance_group_manager.satapi-lb[0].instance_group 19 | } 20 | 21 | health_checks = [var.health_check] 22 | } 23 | 24 | resource "google_compute_backend_service" "satapi-lb-tor" { 25 | name = "${var.name}-tor-backend-service-${var.env}" 26 | description = "Satellite API Tor" 27 | protocol = "HTTP" 28 | port_name = "http81" 29 | project = var.project 30 | count = var.env == "staging" ? 0 : var.create_resources 31 | 32 | backend { 33 | group = google_compute_region_instance_group_manager.satapi-lb[0].instance_group 34 | } 35 | 36 | health_checks = [var.health_check] 37 | } -------------------------------------------------------------------------------- /terraform/modules/lb/gcs.tf: -------------------------------------------------------------------------------- 1 | # Public bucket (certbot acme-challenge) 2 | resource "google_storage_bucket" "satapi-lb-public" { 3 | name = "${var.name}-certbot-${var.env}" 4 | location = "US" 5 | storage_class = "MULTI_REGIONAL" 6 | project = var.project 7 | count = var.create_resources 8 | 9 | lifecycle { 10 | ignore_changes = [name] 11 | } 12 | } 13 | 14 | resource "google_storage_bucket_acl" "satapi-lb-public-acl" { 15 | bucket = google_storage_bucket.satapi-lb-public[count.index].name 16 | predefined_acl = "publicread" 17 | count = var.create_resources 18 | } 19 | 20 | # Private bucket (server certs, ssh keys) 21 | resource "google_storage_bucket" "satapi-lb-private" { 22 | name = "${var.name}-certs-${var.env}" 23 | location = "US" 24 | storage_class = "MULTI_REGIONAL" 25 | project = var.project 26 | count = var.create_resources 27 | 28 | lifecycle { 29 | ignore_changes = [name] 30 | } 31 | } 32 | 33 | resource "google_storage_bucket_acl" "satapi-lb-private-acl" { 34 | bucket = google_storage_bucket.satapi-lb-private[count.index].name 35 | predefined_acl = "projectprivate" 36 | count = var.create_resources 37 | } 38 | -------------------------------------------------------------------------------- /server/worker.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import threading 3 | import time 4 | 5 | 6 | class Worker: 7 | """Worker Class 8 | 9 | A class to execute periodic background tasks. 10 | 11 | Args: 12 | period : Worker period in seconds. 13 | fcn : Function to execute every period. 14 | args : Tuple with arguments for the given function. 15 | name : Optional worker name. 16 | 17 | """ 18 | 19 | def __init__(self, period, fcn, args, name=""): 20 | assert (isinstance(args, tuple)) 21 | self.period = period 22 | self.fcn = fcn 23 | self.args = args 24 | self.name = name 25 | self.enable = True 26 | logging.info(f"Starting worker: {self.name}") 27 | self.thread = threading.Thread(target=self.loop, daemon=True) 28 | self.thread.start() 29 | 30 | def loop(self): 31 | next_call = time.time() 32 | while (self.enable): 33 | self.fcn(*self.args) 34 | next_call += self.period 35 | sleep = next_call - time.time() 36 | if (sleep > 0): 37 | time.sleep(sleep) 38 | 39 | def stop(self): 40 | logging.info(f"Stopping worker: {self.name}") 41 | self.enable = False 42 | -------------------------------------------------------------------------------- /terraform/modules/lb/data.tf: -------------------------------------------------------------------------------- 1 | data "google_compute_network" "satapi-lb" { 2 | name = "default" 3 | project = var.project 4 | } 5 | 6 | data "template_file" "satapi-lb" { 7 | template = file("${path.module}/cloud-init/lb.yaml") 8 | count = var.create_resources 9 | 10 | vars = { 11 | mainnet_ip = var.internal_ip_mainnet 12 | testnet_ip = var.internal_ip_testnet 13 | certbot_docker = var.certbot_docker 14 | node_exporter_docker = var.node_exporter_docker 15 | host = var.host 16 | public_bucket_url = "${var.public_bucket_url}-${var.env}" 17 | public_bucket = replace(google_storage_bucket.satapi-lb-public[count.index].url, "gs://", "") 18 | private_bucket = replace(google_storage_bucket.satapi-lb-private[count.index].url, "gs://", "") 19 | letsencrypt_email = var.letsencrypt_email 20 | station1 = var.station1 21 | station2 = var.station2 22 | station3 = var.station3 23 | } 24 | } 25 | 26 | data "template_cloudinit_config" "satapi-lb" { 27 | gzip = false 28 | base64_encode = false 29 | count = var.create_resources 30 | 31 | part { 32 | content_type = "text/cloud-config" 33 | content = data.template_file.satapi-lb[0].rendered 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /terraform/modules/lb/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project" { 2 | type = string 3 | default = "satellite-api" 4 | } 5 | 6 | variable "create_resources" { 7 | type = string 8 | } 9 | 10 | variable "env" { 11 | type = string 12 | } 13 | 14 | variable "name" { 15 | type = string 16 | } 17 | 18 | variable "network" { 19 | type = string 20 | } 21 | 22 | variable "region" { 23 | type = string 24 | } 25 | 26 | variable "zone" { 27 | type = string 28 | } 29 | 30 | variable "instance_type" { 31 | type = string 32 | } 33 | 34 | variable "host" { 35 | type = string 36 | } 37 | 38 | variable "timeout" { 39 | type = string 40 | } 41 | 42 | variable "public_bucket_url" { 43 | type = string 44 | } 45 | 46 | variable "letsencrypt_email" { 47 | type = string 48 | } 49 | 50 | variable "internal_ip_mainnet" { 51 | type = string 52 | } 53 | 54 | variable "internal_ip_testnet" { 55 | type = string 56 | } 57 | 58 | variable "health_check" { 59 | type = string 60 | } 61 | 62 | variable "prom_service_acct" { 63 | type = string 64 | } 65 | 66 | variable "target_pool" { 67 | type = string 68 | } 69 | 70 | variable "station1" { 71 | type = string 72 | } 73 | 74 | variable "station2" { 75 | type = string 76 | } 77 | 78 | variable "station3" { 79 | type = string 80 | } 81 | 82 | variable "node_exporter_docker" { 83 | type = string 84 | } 85 | 86 | variable "certbot_docker" { 87 | type = string 88 | } -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | blc: 4 | image: "shesek/lightning-charge:0.4.23" 5 | environment: 6 | - API_TOKEN=mySecretToken 7 | volumes: 8 | - blc:/data 9 | api-server: 10 | build: server 11 | depends_on: 12 | - blc 13 | links: 14 | - blc 15 | environment: 16 | - CHARGE_ROOT=http://api-token:mySecretToken@blc:9112 17 | - CALLBACK_URI_ROOT=http://127.0.0.1:9292 18 | - CHARGE_API_TOKEN=mySecretToken 19 | - ENV=development 20 | - REDIS_URI=redis://redis:6379 21 | volumes: 22 | - data:/data 23 | workers: 24 | build: server 25 | depends_on: 26 | - api-server 27 | - redis 28 | links: 29 | - redis 30 | environment: 31 | - REDIS_URI=redis://redis:6379 32 | - ENV=development 33 | volumes: 34 | - data:/data 35 | command: workers.sh 36 | sse-server: 37 | build: 38 | context: sse/ 39 | depends_on: 40 | - api-server 41 | - redis 42 | links: 43 | - redis 44 | environment: 45 | - SUB_CHANNELS=transmissions,gossip,btc-src,auth 46 | - REDIS_URI=redis://redis:6379 47 | redis: 48 | image: "redis:latest" 49 | proxy_server: 50 | image: proxy_server 51 | build: nginx 52 | depends_on: 53 | - api-server 54 | - sse-server 55 | ports: 56 | - 8080:80 57 | 58 | volumes: 59 | blc: 60 | data: 61 | -------------------------------------------------------------------------------- /terraform/modules/blc/firewall.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_firewall" "blc" { 2 | name = "${var.name}-${var.net}-fw-rule-${var.env}" 3 | network = data.google_compute_network.blc[0].self_link 4 | project = var.project 5 | count = var.create_resources 6 | 7 | allow { 8 | protocol = "tcp" 9 | ports = ["18333", "8333", "9735"] 10 | } 11 | 12 | target_service_accounts = [ 13 | google_service_account.blc[0].email, 14 | ] 15 | } 16 | 17 | resource "google_compute_firewall" "api-internal" { 18 | name = "${var.name}-${var.net}-lb-internal-fw-rule-${var.env}" 19 | network = data.google_compute_network.blc[0].self_link 20 | project = var.project 21 | count = var.create_resources 22 | 23 | allow { 24 | protocol = "tcp" 25 | ports = ["9292", "4500"] 26 | } 27 | 28 | source_service_accounts = [ 29 | var.lb_svc_acct, 30 | ] 31 | 32 | target_service_accounts = [ 33 | google_service_account.blc[0].email, 34 | ] 35 | } 36 | 37 | resource "google_compute_firewall" "blc-prom" { 38 | name = "${var.name}-${var.net}-prometheus-access-${var.env}" 39 | network = data.google_compute_network.blc[0].self_link 40 | project = var.project 41 | count = var.create_resources 42 | 43 | allow { 44 | protocol = "tcp" 45 | ports = ["9100", "9900"] 46 | } 47 | 48 | source_service_accounts = [ 49 | var.prom_service_acct, 50 | ] 51 | 52 | target_service_accounts = [ 53 | google_service_account.blc[0].email, 54 | ] 55 | } 56 | -------------------------------------------------------------------------------- /server/bidding.py: -------------------------------------------------------------------------------- 1 | from math import ceil 2 | from constants import MIN_BID, MIN_PER_BYTE_BID 3 | 4 | ETH_MTU = 1500 5 | UDP_IP_HEADER = 20 + 8 6 | BLOCKSAT_HEADER = 8 7 | MPE_HEADER = 16 8 | MAX_BLOCKSAT_PAYLOAD = ETH_MTU - (UDP_IP_HEADER + BLOCKSAT_HEADER) 9 | 10 | 11 | def calc_ota_msg_len(msg_len): 12 | """Compute the number of bytes sent over-the-air (OTA) for an API message 13 | 14 | API messages are carried by Blocksat Packets, sent in the payload of UDP 15 | datagrams over IPv4, with a layer-2 MTU of 1500 bytes, and, ultimately, 16 | transported over MPE. If the message size is such that the UDP/IPv4 packet 17 | exceeds the layer-2 MTU, fragmentation is not handled at the IP level but 18 | instead at application layer, i.e., at the Blocksat Packet protocol level. 19 | 20 | Args: 21 | msg_len : Length of the API message to be transmitted 22 | 23 | """ 24 | # Is it going to be fragmented? 25 | n_frags = ceil(msg_len / MAX_BLOCKSAT_PAYLOAD) 26 | 27 | # Including all fragments, considering the Blocksat + UDP + IPv4 + MPE 28 | # layers, the total overhead becomes: 29 | total_overhead = (MPE_HEADER + UDP_IP_HEADER + BLOCKSAT_HEADER) * n_frags 30 | 31 | return total_overhead + msg_len 32 | 33 | 34 | def get_min_bid(data_len): 35 | ota_msg_len = calc_ota_msg_len(data_len) 36 | return max(ceil(ota_msg_len * MIN_PER_BYTE_BID), MIN_BID) 37 | 38 | 39 | def validate_bid(data_len, bid): 40 | min_bid = get_min_bid(data_len) 41 | return bid >= min_bid 42 | -------------------------------------------------------------------------------- /terraform/modules/lb/iam.tf: -------------------------------------------------------------------------------- 1 | resource "google_service_account" "satapi-lb" { 2 | account_id = "${var.name}-${var.env}" 3 | display_name = "${var.name}-${var.env}" 4 | project = var.project 5 | count = var.create_resources 6 | } 7 | 8 | resource "google_project_iam_member" "satapi-lb" { 9 | project = var.project 10 | role = "roles/editor" 11 | member = "serviceAccount:${google_service_account.satapi-lb[0].email}" 12 | count = var.create_resources 13 | } 14 | 15 | # GCS buckets access for TLS management 16 | locals { 17 | buckets = var.create_resources == "1" ? { 18 | public = google_storage_bucket.satapi-lb-public[0].name 19 | private = google_storage_bucket.satapi-lb-private[0].name 20 | } : {} 21 | 22 | roles = { 23 | objectCreator = "roles/storage.objectCreator", 24 | objectViewer = "roles/storage.objectViewer", 25 | legacyBucketWriter = "roles/storage.legacyBucketWriter" 26 | } 27 | 28 | bucket_role_pairs = flatten([ 29 | for b_key, b_name in local.buckets : [ 30 | for r_key, r_value in local.roles : { 31 | bucket_key = b_key 32 | bucket = b_name 33 | role_key = r_key 34 | role = r_value 35 | } 36 | ] 37 | ]) 38 | 39 | bucket_roles = { for br in local.bucket_role_pairs : "${br.bucket_key}_${br.role_key}" => br } 40 | } 41 | 42 | resource "google_storage_bucket_iam_member" "satapi_lb_roles" { 43 | # for_each = local.bucket_roles 44 | for_each = var.create_resources == "1" ? local.bucket_roles : {} 45 | 46 | bucket = each.value.bucket 47 | role = each.value.role 48 | member = "serviceAccount:${google_service_account.satapi-lb[0].email}" 49 | } 50 | -------------------------------------------------------------------------------- /terraform/modules/blc/data.tf: -------------------------------------------------------------------------------- 1 | data "google_compute_network" "blc" { 2 | name = "default" 3 | project = var.project 4 | count = var.create_resources 5 | } 6 | 7 | data "template_file" "blc" { 8 | template = file("${path.module}/cloud-init/blc.yaml") 9 | count = var.create_resources 10 | 11 | vars = { 12 | charge_token = var.charge_token 13 | net = var.net 14 | lightning_cmd = var.lightning_cmd 15 | charge_cmd = "charged -d /data/charge.db -l /root/.lightning --host 0.0.0.0" 16 | announce_addr = google_compute_address.blc[0].address 17 | lightning_port = 9735 18 | lightning_docker = var.lightning_docker 19 | charge_docker = var.charge_docker 20 | redis_port = 6379 21 | sat_api_docker = var.sat_api_docker 22 | sat_api_sse_docker = var.sat_api_sse_docker 23 | node_exporter_docker = var.node_exporter_docker 24 | autossh_docker = var.autossh_docker 25 | certbot_docker = var.certbot_docker 26 | k8s_autossh_lb = var.k8s_autossh_lb 27 | rpcpass = var.rpcpass 28 | k8s_autossh_ssh_port = var.net == "testnet" ? "2222" : "2223" 29 | k8s_autossh_btc_port = var.net == "testnet" ? "18332" : "8332" 30 | private_bucket = "${var.private_bucket}-${var.env}" 31 | ssh_key_net = var.ssh_key_net 32 | network_dir = var.net == "testnet" ? "/testnet" : "/bitcoin" 33 | } 34 | } 35 | 36 | data "template_cloudinit_config" "blc" { 37 | gzip = false 38 | base64_encode = false 39 | count = var.create_resources 40 | 41 | part { 42 | content_type = "text/cloud-config" 43 | content = data.template_file.blc[0].rendered 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /terraform/modules/blc/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project" { 2 | type = string 3 | default = "satellite-api" 4 | } 5 | 6 | variable "boot_image" { 7 | type = string 8 | default = "cos-cloud/cos-stable" 9 | } 10 | 11 | variable "create_resources" { 12 | type = string 13 | } 14 | 15 | variable "charge_token" { 16 | type = string 17 | } 18 | 19 | variable "env" { 20 | type = string 21 | } 22 | 23 | variable "name" { 24 | type = string 25 | } 26 | 27 | variable "network" { 28 | type = string 29 | } 30 | 31 | variable "region" { 32 | type = string 33 | } 34 | 35 | variable "zone" { 36 | type = string 37 | } 38 | 39 | variable "instance_type" { 40 | type = string 41 | } 42 | 43 | variable "net" { 44 | type = string 45 | } 46 | 47 | variable "timeout" { 48 | type = string 49 | } 50 | 51 | variable "prom_service_acct" { 52 | type = string 53 | } 54 | 55 | variable "lb_svc_acct" { 56 | type = string 57 | } 58 | 59 | variable "k8s_autossh_lb" { 60 | type = string 61 | } 62 | 63 | variable "rpcpass" { 64 | type = string 65 | } 66 | 67 | variable "private_bucket" { 68 | type = string 69 | } 70 | 71 | variable "ssh_key_net" { 72 | type = string 73 | } 74 | 75 | variable "lightning_cmd" { 76 | type = string 77 | } 78 | 79 | variable "charge_docker" { 80 | type = string 81 | } 82 | 83 | variable "lightning_docker" { 84 | type = string 85 | } 86 | 87 | variable "sat_api_docker" { 88 | type = string 89 | } 90 | 91 | variable "sat_api_sse_docker" { 92 | type = string 93 | } 94 | 95 | variable "node_exporter_docker" { 96 | type = string 97 | } 98 | 99 | variable "autossh_docker" { 100 | type = string 101 | } 102 | 103 | variable "certbot_docker" { 104 | type = string 105 | } 106 | -------------------------------------------------------------------------------- /terraform/network.tf: -------------------------------------------------------------------------------- 1 | # IP address 2 | resource "google_compute_address" "lb" { 3 | name = "satellite-api-client-lb-${local.env}" 4 | region = var.region 5 | project = var.project 6 | count = local.create_mainnet 7 | } 8 | 9 | # Forwarding rules 10 | resource "google_compute_forwarding_rule" "rule-https" { 11 | name = "satellite-api-https-forwarding-rule-${local.env}" 12 | target = google_compute_target_pool.lb-pool[0].self_link 13 | port_range = "443" 14 | ip_protocol = "TCP" 15 | ip_address = google_compute_address.lb[0].address 16 | region = var.region 17 | project = var.project 18 | count = local.create_mainnet 19 | } 20 | 21 | resource "google_compute_forwarding_rule" "rule-http" { 22 | name = "satellite-api-http-forwarding-rule-${local.env}" 23 | target = google_compute_target_pool.lb-pool[0].self_link 24 | port_range = "80" 25 | ip_protocol = "TCP" 26 | ip_address = google_compute_address.lb[0].address 27 | region = var.region 28 | project = var.project 29 | count = local.create_mainnet 30 | } 31 | 32 | resource "google_compute_target_pool" "lb-pool" { 33 | name = "satellite-api-lb-target-pool-${local.env}" 34 | region = var.region 35 | project = var.project 36 | count = local.create_mainnet 37 | 38 | health_checks = [ 39 | google_compute_http_health_check.lb-health[0].self_link 40 | ] 41 | } 42 | 43 | resource "google_compute_http_health_check" "lb-health" { 44 | name = "satellite-api-lb-http-health-${local.env}" 45 | project = var.project 46 | count = local.create_mainnet 47 | 48 | timeout_sec = 5 49 | check_interval_sec = 10 50 | 51 | host = "${local.env == "staging" ? "staging-" : ""}api.blockstream.space" 52 | port = "80" 53 | request_path = "/healthz" 54 | } 55 | -------------------------------------------------------------------------------- /terraform/modules/dns/blockstream-space.tf: -------------------------------------------------------------------------------- 1 | resource "google_dns_managed_zone" "blockstream-space" { 2 | name = "blockstream-space" 3 | dns_name = "blockstream.space." 4 | description = "A long time ago, in a galaxy far, far away... P.S. Don't edit directly in Gcloud, but rather in the satellite/ionosphere repo (Otherwise, things break and Chase gets really mad)." 5 | project = var.project 6 | count = var.create_resources 7 | 8 | labels = { 9 | managed-by = "terraform" 10 | } 11 | } 12 | 13 | resource "google_dns_record_set" "a-satellite" { 14 | name = google_dns_managed_zone.blockstream-space[0].dns_name 15 | managed_zone = google_dns_managed_zone.blockstream-space[0].name 16 | type = "A" 17 | ttl = 300 18 | count = var.create_resources 19 | 20 | rrdatas = [var.satellite_lb] 21 | } 22 | 23 | resource "google_dns_record_set" "a-satellite-api" { 24 | name = "api.${google_dns_managed_zone.blockstream-space[0].dns_name}" 25 | managed_zone = google_dns_managed_zone.blockstream-space[0].name 26 | type = "A" 27 | ttl = 300 28 | count = var.create_resources 29 | 30 | rrdatas = [var.satellite_api_lb] 31 | } 32 | 33 | resource "google_dns_record_set" "a-satellite-api-staging" { 34 | name = "staging-api.${google_dns_managed_zone.blockstream-space[0].dns_name}" 35 | managed_zone = google_dns_managed_zone.blockstream-space[0].name 36 | type = "A" 37 | ttl = 300 38 | count = var.create_resources 39 | 40 | rrdatas = [var.satellite_api_lb_staging] 41 | } 42 | 43 | resource "google_dns_record_set" "a-blocksat-mon" { 44 | name = "satellite.${google_dns_managed_zone.blockstream-space[0].dns_name}" 45 | managed_zone = google_dns_managed_zone.blockstream-space[0].name 46 | type = "A" 47 | ttl = 300 48 | count = var.create_resources 49 | 50 | rrdatas = [var.blocksat_monitoring] 51 | } 52 | -------------------------------------------------------------------------------- /server/tests/test_regions.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import regions 3 | 4 | 5 | def test_region_number_list_to_code(): 6 | assert regions.region_number_list_to_code([]) == 0x0 7 | assert regions.region_number_list_to_code( 8 | [0, 1, 2, 3, 4, 5]) == regions.REGION_MASK_ALL_REGIONS 9 | assert regions.region_number_list_to_code([0, 2, 4]) == 0x15 10 | assert regions.region_number_list_to_code([1, 3, 5]) == 0x2A 11 | with pytest.raises(AssertionError): 12 | regions.region_number_list_to_code([1, 6]) 13 | 14 | 15 | def test_region_code_to_id_list(): 16 | assert regions.region_code_to_id_list(0x0) == regions.all_region_ids 17 | assert regions.region_code_to_id_list(None) == regions.all_region_ids 18 | assert regions.region_code_to_id_list( 19 | regions.REGION_MASK_ALL_REGIONS) == regions.all_region_ids 20 | assert regions.region_code_to_id_list(0x15) == [ 21 | regions.SATELLITE_REGIONS[regions.Regions.g18]['id'], 22 | regions.SATELLITE_REGIONS[regions.Regions.t11n_afr]['id'], 23 | regions.SATELLITE_REGIONS[regions.Regions.t18v_c]['id'] 24 | ] 25 | assert regions.region_code_to_id_list(0x2A) == [ 26 | regions.SATELLITE_REGIONS[regions.Regions.e113]['id'], 27 | regions.SATELLITE_REGIONS[regions.Regions.t11n_eu]['id'], 28 | regions.SATELLITE_REGIONS[regions.Regions.t18v_ku]['id'] 29 | ] 30 | 31 | 32 | def test_region_code_to_number_list(): 33 | assert regions.region_code_to_number_list( 34 | 0x0) == regions.all_region_numbers 35 | assert regions.region_code_to_number_list( 36 | None) == regions.all_region_numbers 37 | assert regions.region_code_to_number_list( 38 | regions.REGION_MASK_ALL_REGIONS) == regions.all_region_numbers 39 | assert regions.region_code_to_number_list(0x15) == [ 40 | regions.Regions.g18.value, regions.Regions.t11n_afr.value, 41 | regions.Regions.t18v_c.value 42 | ] 43 | assert regions.region_code_to_number_list(0x2A) == [ 44 | regions.Regions.e113.value, regions.Regions.t11n_eu.value, 45 | regions.Regions.t18v_ku.value 46 | ] 47 | -------------------------------------------------------------------------------- /terraform/modules/tor/main.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_health_check" "tor" { 2 | name = "${var.name}-health-check" 3 | timeout_sec = 5 4 | check_interval_sec = 10 5 | project = var.project 6 | count = var.create_resources 7 | 8 | tcp_health_check { 9 | port = "9050" 10 | } 11 | } 12 | 13 | resource "google_compute_region_instance_group_manager" "tor" { 14 | name = "${var.name}-ig" 15 | count = var.create_resources 16 | project = var.project 17 | provider = google-beta 18 | 19 | region = var.region 20 | base_instance_name = var.name 21 | target_size = 1 22 | 23 | version { 24 | name = "original" 25 | instance_template = google_compute_instance_template.tor[0].self_link 26 | } 27 | 28 | update_policy { 29 | type = "OPPORTUNISTIC" 30 | minimal_action = "REPLACE" 31 | max_surge_fixed = 0 32 | max_unavailable_fixed = 3 33 | min_ready_sec = 45 34 | } 35 | } 36 | 37 | resource "google_compute_instance_template" "tor" { 38 | name_prefix = "${var.name}-template-" 39 | description = "This template is used to create ${var.name} instances." 40 | machine_type = var.instance_type 41 | project = var.project 42 | count = var.create_resources 43 | 44 | labels = { 45 | type = "tor" 46 | name = var.name 47 | network = var.network 48 | } 49 | 50 | disk { 51 | source_image = var.boot_image 52 | boot = true 53 | auto_delete = true 54 | disk_type = "pd-standard" 55 | device_name = "boot" 56 | disk_size_gb = "20" 57 | } 58 | 59 | network_interface { 60 | network = data.google_compute_network.default[0].self_link 61 | 62 | access_config { 63 | } 64 | } 65 | 66 | metadata = { 67 | user-data = data.template_cloudinit_config.tor[0].rendered 68 | } 69 | 70 | service_account { 71 | email = google_service_account.tor[0].email 72 | 73 | scopes = [ 74 | "https://www.googleapis.com/auth/cloudkms", 75 | "compute-ro", 76 | "storage-ro", 77 | ] 78 | } 79 | 80 | lifecycle { 81 | create_before_destroy = true 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /terraform/modules/lb/main.tf: -------------------------------------------------------------------------------- 1 | # Instance group & template 2 | resource "google_compute_region_instance_group_manager" "satapi-lb" { 3 | name = "${var.name}-ig-${var.env}" 4 | target_pools = [var.target_pool] 5 | project = var.project 6 | provider = google-beta 7 | count = var.create_resources 8 | 9 | base_instance_name = "${var.name}-ig-${var.env}" 10 | region = var.region 11 | target_size = 1 12 | 13 | version { 14 | name = "original" 15 | instance_template = google_compute_instance_template.satapi-lb[0].self_link 16 | } 17 | 18 | update_policy { 19 | type = var.env == "staging" ? "PROACTIVE" : "OPPORTUNISTIC" 20 | minimal_action = "RESTART" 21 | replacement_method = "RECREATE" 22 | max_surge_fixed = 0 23 | max_unavailable_fixed = 3 24 | min_ready_sec = 60 25 | } 26 | 27 | named_port { 28 | name = "http81" 29 | port = 81 30 | } 31 | } 32 | 33 | resource "google_compute_instance_template" "satapi-lb" { 34 | name_prefix = "${var.name}-${var.env}-tmpl-" 35 | description = "This template is used to create ${var.name} ${var.env} instances." 36 | machine_type = var.instance_type 37 | region = var.region 38 | count = var.create_resources 39 | project = var.project 40 | 41 | labels = { 42 | type = "lightning-app" 43 | name = var.name 44 | } 45 | 46 | scheduling { 47 | automatic_restart = true 48 | on_host_maintenance = "MIGRATE" 49 | } 50 | 51 | disk { 52 | source_image = "cos-cloud/cos-stable" 53 | disk_type = "pd-standard" 54 | auto_delete = true 55 | boot = true 56 | disk_size_gb = 20 57 | } 58 | 59 | network_interface { 60 | network = data.google_compute_network.satapi-lb.self_link 61 | network_ip = google_compute_address.satapi-lb-internal[0].address 62 | access_config {} 63 | } 64 | 65 | metadata = { 66 | user-data = data.template_cloudinit_config.satapi-lb[0].rendered 67 | } 68 | 69 | service_account { 70 | email = google_service_account.satapi-lb[0].email 71 | scopes = ["compute-ro", "storage-rw"] 72 | } 73 | 74 | lifecycle { 75 | create_before_destroy = true 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /sse/server.js: -------------------------------------------------------------------------------- 1 | // Setup redis 2 | const redis = require('redis').createClient(process.env.REDIS_URI), 3 | channels = process.env.SUB_CHANNELS.split(',') 4 | 5 | console.log(`Subscribing to Redis on ${channels.join(',')}`) 6 | channels.forEach(chan => redis.subscribe(chan)) 7 | 8 | // Log messages and number of SSE subscribers 9 | redis.on('message', (chan, msg) => console.log(`Broadcasting ${chan}: ${msg}`)) 10 | setInterval(_ => console.log(`Total subscribers: ${redis.listenerCount('message') - 1}`), 60000) 11 | 12 | // Setup express server 13 | const app = require('express')() 14 | app.set('trust proxy', process.env.PROXIED || 'loopback') 15 | app.use(require('morgan')('dev')) 16 | 17 | function configureStream(req, res, subscriptions) { 18 | res.set({ 19 | 'X-Accel-Buffering': 'no', 20 | 'Cache-Control': 'no-cache', 21 | 'Content-Type': 'text/event-stream', 22 | 'Connection': 'keep-alive' 23 | }).flushHeaders() 24 | 25 | function onMsg(chan, msg) { 26 | if (!subscriptions || subscriptions.includes(chan)) { 27 | res.write(`event:${chan}\ndata:${msg}\n\n`) 28 | } 29 | } 30 | redis.on('message', onMsg) 31 | 32 | const keepAlive = setInterval(_ => res.write(': keepalive\n\n'), 25000) 33 | 34 | req.once('close', _ => (redis.removeListener('message', onMsg), 35 | clearInterval(keepAlive), 36 | console.log('Subscriber disconnected'))) 37 | } 38 | 39 | app.get('/stream', (req, res) => { 40 | const subscriptions = req.query.channels && req.query.channels.split(',') 41 | // Filter out the channels that can only be monitored by the admin 42 | if (subscriptions.includes('auth')) { 43 | res.status(401).send("Operation not supported on the auth channel"); 44 | return; 45 | } 46 | console.log(`New subscriber for ${subscriptions ? subscriptions.join(',') : 'all channels'}`) 47 | configureStream(req, res, subscriptions); 48 | }) 49 | 50 | app.get('/admin/stream', (req, res) => { 51 | const subscriptions = req.query.channels && req.query.channels.split(',') 52 | console.log(`New admin subscriber for ${subscriptions ? subscriptions.join(',') : 'all channels'}`) 53 | configureStream(req, res, subscriptions); 54 | }) 55 | 56 | app.listen( 57 | process.env.PORT || 4500, 58 | function () { console.log(`HTTP server running on ${this.address().address}:${this.address().port}`) } 59 | ) 60 | -------------------------------------------------------------------------------- /server/alembic.ini: -------------------------------------------------------------------------------- 1 | # A generic, single database configuration. 2 | 3 | [alembic] 4 | # path to migration scripts 5 | script_location = alembic 6 | 7 | # template used to generate migration files 8 | # file_template = %%(rev)s_%%(slug)s 9 | 10 | # sys.path path, will be prepended to sys.path if present. 11 | # defaults to the current working directory. 12 | prepend_sys_path = . 13 | 14 | # timezone to use when rendering the date 15 | # within the migration file as well as the filename. 16 | # string value is passed to dateutil.tz.gettz() 17 | # leave blank for localtime 18 | # timezone = 19 | 20 | # max length of characters to apply to the 21 | # "slug" field 22 | # truncate_slug_length = 40 23 | 24 | # set to 'true' to run the environment during 25 | # the 'revision' command, regardless of autogenerate 26 | # revision_environment = false 27 | 28 | # set to 'true' to allow .pyc and .pyo files without 29 | # a source .py file to be detected as revisions in the 30 | # versions/ directory 31 | # sourceless = false 32 | 33 | # version location specification; this defaults 34 | # to alembic/versions. When using multiple version 35 | # directories, initial revisions must be specified with --version-path 36 | # version_locations = %(here)s/bar %(here)s/bat alembic/versions 37 | 38 | # the output encoding used when revision files 39 | # are written from script.py.mako 40 | # output_encoding = utf-8 41 | 42 | 43 | [post_write_hooks] 44 | # post_write_hooks defines scripts or Python functions that are run 45 | # on newly generated revision scripts. See the documentation for further 46 | # detail and examples 47 | 48 | # format using "black" - use the console_scripts runner, against the "black" entrypoint 49 | # hooks=black 50 | # black.type=console_scripts 51 | # black.entrypoint=black 52 | # black.options=-l 79 53 | 54 | # Logging configuration 55 | [loggers] 56 | keys = root,sqlalchemy,alembic 57 | 58 | [handlers] 59 | keys = console 60 | 61 | [formatters] 62 | keys = generic 63 | 64 | [logger_root] 65 | level = WARN 66 | handlers = console 67 | qualname = 68 | 69 | [logger_sqlalchemy] 70 | level = WARN 71 | handlers = 72 | qualname = sqlalchemy.engine 73 | 74 | [logger_alembic] 75 | level = INFO 76 | handlers = 77 | qualname = alembic 78 | 79 | [handler_console] 80 | class = StreamHandler 81 | args = (sys.stderr,) 82 | level = NOTSET 83 | formatter = generic 84 | 85 | [formatter_generic] 86 | format = %(levelname)-5.5s [%(name)s] %(message)s 87 | datefmt = %H:%M:%S 88 | -------------------------------------------------------------------------------- /terraform/modules/prometheus/main.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_disk" "prometheus-data" { 2 | name = "${var.name}-data-disk" 3 | project = var.project 4 | type = "pd-standard" 5 | zone = var.zone 6 | size = "50" 7 | count = var.create_resources 8 | } 9 | 10 | resource "google_compute_address" "prometheus-address" { 11 | name = "${var.name}-address" 12 | project = var.project 13 | region = var.region 14 | count = var.create_resources 15 | } 16 | 17 | resource "google_compute_address" "prometheus-internal-address" { 18 | name = "${var.name}-internal-address" 19 | project = var.project 20 | region = var.region 21 | address_type = "INTERNAL" 22 | count = var.create_resources 23 | } 24 | 25 | locals { 26 | service_account = terraform.workspace == "misc" ? element(concat(google_service_account.prometheus.*.email, [""]), 0) : var.prom_service_acct 27 | } 28 | 29 | resource "google_compute_instance" "prometheus-server" { 30 | name = var.name 31 | machine_type = var.instance_type 32 | zone = var.zone 33 | project = var.project 34 | allow_stopping_for_update = true 35 | count = var.create_resources 36 | 37 | labels = { 38 | type = "prometheus" 39 | name = var.name 40 | network = var.network 41 | } 42 | 43 | service_account { 44 | email = local.service_account 45 | 46 | scopes = [ 47 | "https://www.googleapis.com/auth/compute.readonly", 48 | "https://www.googleapis.com/auth/devstorage.read_only", 49 | "https://www.googleapis.com/auth/pubsub", 50 | ] 51 | } 52 | 53 | boot_disk { 54 | initialize_params { 55 | size = "20" 56 | image = var.boot_image 57 | } 58 | } 59 | 60 | attached_disk { 61 | source = element(google_compute_disk.prometheus-data.*.name, count.index) 62 | device_name = "data" 63 | } 64 | 65 | network_interface { 66 | network = data.google_compute_network.default[0].self_link 67 | network_ip = element( 68 | google_compute_address.prometheus-internal-address.*.address, 69 | count.index, 70 | ) 71 | 72 | access_config { 73 | nat_ip = element( 74 | google_compute_address.prometheus-address.*.address, 75 | count.index, 76 | ) 77 | } 78 | } 79 | 80 | metadata = { 81 | user-data = data.template_cloudinit_config.prometheus[0].rendered 82 | } 83 | } 84 | 85 | -------------------------------------------------------------------------------- /server/server.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import logging 4 | 5 | from flask import Flask 6 | from flask_restful import Api 7 | import redis 8 | 9 | import constants 10 | from database import db 11 | from info import InfoResource 12 | from invoices import InvoiceResource 13 | from orders import \ 14 | BumpOrderResource, \ 15 | GetMessageBySeqNumResource, \ 16 | GetMessageResource, \ 17 | OrderResource, \ 18 | OrdersResource, \ 19 | OrderUploadResource, \ 20 | RxConfirmationResource, \ 21 | TxConfirmationResource 22 | from queues import QueueResource 23 | 24 | 25 | def create_app(from_test=False): 26 | if not os.path.isdir(constants.MSG_STORE_PATH): 27 | os.makedirs(constants.MSG_STORE_PATH) 28 | 29 | app = Flask(__name__) 30 | app.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{constants.DB_FILE}' 31 | app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False 32 | app.config['TESTING'] = from_test 33 | app.config["REDIS_INSTANCE"] = redis.from_url(constants.REDIS_URI) 34 | 35 | db.init_app(app) 36 | with app.app_context(): 37 | db.create_all() 38 | api = Api(app) 39 | api.add_resource(OrderUploadResource, '/order', '/admin/order') 40 | api.add_resource(OrdersResource, '/orders/', 41 | '/admin/orders/') 42 | api.add_resource(OrderResource, '/order/', '/admin/order/') 43 | api.add_resource(BumpOrderResource, '/order//bump') 44 | api.add_resource(TxConfirmationResource, '/order/tx/') 45 | api.add_resource(RxConfirmationResource, '/order/rx/') 46 | api.add_resource(InfoResource, '/info') 47 | api.add_resource(InvoiceResource, '/callback//') 48 | api.add_resource(GetMessageBySeqNumResource, '/message/', 49 | '/admin/message/') 50 | api.add_resource(QueueResource, '/queue.html') 51 | 52 | if constants.env == 'development' or constants.env == 'test': 53 | api.add_resource(GetMessageResource, '/order//sent_message') 54 | 55 | logging.basicConfig(level=logging.DEBUG, format=constants.LOGGING_FORMAT) 56 | return app 57 | 58 | 59 | def teardown_app(app): 60 | if (app.config['TESTING']): 61 | shutil.rmtree(constants.MSG_STORE_PATH) 62 | os.remove(constants.DB_FILE) 63 | 64 | 65 | if __name__ == '__main__': 66 | app = create_app() 67 | app.run(debug=True, host='0.0.0.0', port=constants.SERVER_PORT) 68 | -------------------------------------------------------------------------------- /terraform/modules/blc/main.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_disk" "blc" { 2 | name = "${var.name}-data-${var.net}-${var.env}" 3 | type = "pd-standard" 4 | zone = var.zone 5 | count = var.create_resources 6 | 7 | lifecycle { 8 | prevent_destroy = true 9 | } 10 | } 11 | 12 | # Instance group & template 13 | resource "google_compute_instance_group_manager" "blc" { 14 | name = "${var.name}-ig-${var.net}-${var.env}" 15 | project = var.project 16 | provider = google-beta 17 | count = var.create_resources 18 | 19 | base_instance_name = "${var.name}-ig-${var.net}-${var.env}" 20 | zone = var.zone 21 | target_size = 1 22 | 23 | version { 24 | name = "original" 25 | instance_template = google_compute_instance_template.blc[0].self_link 26 | } 27 | 28 | update_policy { 29 | type = var.env == "staging" ? "PROACTIVE" : "OPPORTUNISTIC" 30 | minimal_action = "RESTART" 31 | replacement_method = "RECREATE" 32 | max_surge_fixed = 0 33 | max_unavailable_fixed = 3 34 | min_ready_sec = 60 35 | } 36 | } 37 | 38 | resource "google_compute_instance_template" "blc" { 39 | name_prefix = "${var.name}-${var.net}-${var.env}-tmpl-" 40 | description = "This template is used to create ${var.name} ${var.net} ${var.env} instances." 41 | machine_type = var.instance_type 42 | region = var.region 43 | count = var.create_resources 44 | project = var.project 45 | 46 | labels = { 47 | type = "lightning-app-blc" 48 | name = var.name 49 | net = var.net 50 | } 51 | 52 | scheduling { 53 | automatic_restart = true 54 | on_host_maintenance = "MIGRATE" 55 | } 56 | 57 | disk { 58 | source_image = var.boot_image 59 | disk_type = "pd-balanced" 60 | auto_delete = true 61 | boot = true 62 | disk_size_gb = 20 63 | } 64 | 65 | disk { 66 | source = google_compute_disk.blc[0].name 67 | auto_delete = false 68 | device_name = "data" 69 | } 70 | 71 | network_interface { 72 | network = data.google_compute_network.blc[0].self_link 73 | network_ip = google_compute_address.blc-internal[0].address 74 | 75 | access_config { 76 | nat_ip = google_compute_address.blc[0].address 77 | } 78 | } 79 | 80 | metadata = { 81 | user-data = data.template_cloudinit_config.blc[0].rendered 82 | } 83 | 84 | service_account { 85 | email = google_service_account.blc[0].email 86 | scopes = ["compute-ro", "storage-rw"] 87 | } 88 | 89 | lifecycle { 90 | create_before_destroy = true 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /terraform/network-tor.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_global_address" "tor-lb" { 2 | name = "${var.name}-lb" 3 | project = var.project 4 | count = local.create_misc 5 | } 6 | 7 | resource "google_compute_global_forwarding_rule" "tor-rule" { 8 | name = "${var.name}-forwarding-rule" 9 | target = google_compute_target_http_proxy.tor-proxy[0].self_link 10 | port_range = "80" 11 | ip_protocol = "TCP" 12 | ip_address = google_compute_global_address.tor-lb[0].address 13 | 14 | count = local.create_misc 15 | } 16 | 17 | resource "google_compute_target_http_proxy" "tor-proxy" { 18 | name = "${var.name}-http-proxy" 19 | url_map = google_compute_url_map.tor-proxy[0].self_link 20 | 21 | count = local.create_misc 22 | } 23 | 24 | resource "google_compute_url_map" "tor-proxy" { 25 | name = "${var.name}-urlmap" 26 | default_service = google_compute_backend_bucket.tor_deadhole_backend[0].self_link 27 | 28 | count = local.create_misc 29 | 30 | host_rule { 31 | hosts = ["*"] 32 | path_matcher = "deadpaths" 33 | } 34 | 35 | path_matcher { 36 | name = "deadpaths" 37 | default_service = google_compute_backend_bucket.tor_deadhole_backend[0].self_link 38 | 39 | path_rule { 40 | paths = ["/*"] 41 | service = google_compute_backend_bucket.tor_deadhole_backend[0].self_link 42 | } 43 | } 44 | 45 | host_rule { 46 | hosts = [var.onion_host] 47 | path_matcher = "allpaths" 48 | } 49 | 50 | path_matcher { 51 | name = "allpaths" 52 | default_service = data.terraform_remote_state.blc-mainnet.outputs.lb_backend_service_tor 53 | 54 | path_rule { 55 | paths = ["/*"] 56 | service = data.terraform_remote_state.blc-mainnet.outputs.lb_backend_service_tor 57 | } 58 | } 59 | 60 | test { 61 | service = data.terraform_remote_state.blc-mainnet.outputs.lb_backend_service_tor 62 | host = var.onion_host 63 | path = "/queue.html" 64 | } 65 | 66 | test { 67 | service = google_compute_backend_bucket.tor_deadhole_backend[0].self_link 68 | host = google_compute_global_address.tor-lb[0].address 69 | path = "/*" 70 | } 71 | } 72 | 73 | resource "google_compute_backend_bucket" "tor_deadhole_backend" { 74 | name = "${var.name}-deadhole-backend-bucket" 75 | description = "Unmatched hosts end up in this deadhole" 76 | bucket_name = google_storage_bucket.tor_deadhole[0].name 77 | enable_cdn = false 78 | 79 | count = local.create_misc 80 | } 81 | 82 | resource "google_storage_bucket" "tor_deadhole" { 83 | name = "${var.name}-deadhole-bucket" 84 | location = "US" 85 | 86 | count = local.create_misc 87 | } 88 | 89 | -------------------------------------------------------------------------------- /server/tests/test_info.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import requests 3 | from http import HTTPStatus 4 | from unittest.mock import Mock, patch 5 | 6 | import server 7 | from error import assert_error 8 | 9 | 10 | @pytest.fixture 11 | def client(): 12 | app = server.create_app(from_test=True) 13 | app.app_context().push() 14 | with app.test_client() as client: 15 | yield client 16 | server.teardown_app(app) 17 | 18 | 19 | @patch('info.requests.get') 20 | def test_get_info_successfuly(mock_get_info, client): 21 | mock_get_info.return_value = Mock() 22 | mock_get_info.return_value.status_code = HTTPStatus.OK 23 | 24 | SAMPLE_INFO = { 25 | "id": 26 | "0317109ca2848f061e27dbf497ec47243d7aea6", 27 | "alias": 28 | "VIOLETSCAN", 29 | "color": 30 | "031710", 31 | "num_peers": 32 | 0, 33 | "num_pending_channels": 34 | 0, 35 | "num_active_channels": 36 | 0, 37 | "num_inactive_channels": 38 | 0, 39 | "address": [], 40 | "binding": [{ 41 | "type": "ipv6", 42 | "address": "::", 43 | "port": 9735 44 | }, { 45 | "type": "ipv4", 46 | "address": "0.0.0.0", 47 | "port": 9735 48 | }], 49 | "version": 50 | "v0.9.3", 51 | "blockheight": 52 | 0, 53 | "network": 54 | "testnet", 55 | "msatoshi_fees_collected": 56 | 0, 57 | "fees_collected_msat": 58 | "0msat", 59 | "lightning-dir": 60 | "/data/lightning/testnet", 61 | "warning_bitcoind_sync": 62 | "Bitcoind is not up-to-date with network." 63 | } 64 | mock_get_info.return_value.json = lambda: SAMPLE_INFO 65 | 66 | get_info_rv = client.get('/info') 67 | get_json_resp = get_info_rv.get_json() 68 | assert get_info_rv.status_code == HTTPStatus.OK 69 | assert get_json_resp == SAMPLE_INFO 70 | 71 | 72 | @patch('info.requests.get') 73 | def test_get_info_failure(mock_get_info, client): 74 | mock_get_info.return_value = Mock() 75 | mock_get_info.return_value.status_code = HTTPStatus.UNAUTHORIZED 76 | get_info_rv = client.get('/info') 77 | assert get_info_rv.status_code == HTTPStatus.INTERNAL_SERVER_ERROR 78 | assert_error(get_info_rv.get_json(), 'LIGHTNING_CHARGE_INFO_FAILED') 79 | 80 | 81 | @patch('info.requests.get') 82 | def test_get_info_exception(mock_get_info, client): 83 | mock_get_info.side_effect = requests.exceptions.RequestException 84 | get_info_rv = client.get('/info') 85 | assert get_info_rv.status_code == HTTPStatus.INTERNAL_SERVER_ERROR 86 | assert_error(get_info_rv.get_json(), 'LIGHTNING_CHARGE_INFO_FAILED') 87 | -------------------------------------------------------------------------------- /server/worker_manager.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | 4 | from flask import Flask 5 | import redis 6 | 7 | import constants 8 | import invoice_helpers 9 | import order_helpers 10 | import transmitter 11 | from database import db 12 | from models import TxRetry 13 | from worker import Worker 14 | 15 | ONE_MINUTE = 60 16 | CLEANUP_DUTY_CYCLE = 5 * ONE_MINUTE # five minutes 17 | ORDER_RETRANSMIT_CYCLE_SECONDS = 10 18 | 19 | 20 | def cleanup_database(app): 21 | with app.app_context(): 22 | (expired_invoices, 23 | expired_orders) = invoice_helpers.expire_unpaid_invoices() 24 | expired_orders.extend(order_helpers.expire_old_pending_orders()) 25 | cleaned_up_orders = order_helpers.cleanup_old_message_files() 26 | 27 | work = [ 28 | len(x) 29 | for x in [expired_invoices, expired_orders, cleaned_up_orders] 30 | ] 31 | if (any(work)): 32 | logging.info("Database cleanup: expired {} invoices, " 33 | "{} orders, and removed {} files".format(*work)) 34 | 35 | 36 | def retry_transmission(app): 37 | with app.app_context(): 38 | order_helpers.refresh_retransmission_table() 39 | any_retry_record = TxRetry.query.first() 40 | if any_retry_record: 41 | transmitter.tx_start() 42 | 43 | 44 | def start_workers(app): 45 | cleanup_worker = Worker(period=CLEANUP_DUTY_CYCLE, 46 | fcn=cleanup_database, 47 | args=(app, ), 48 | name="database cleaner") 49 | 50 | retry_worker = Worker(period=ORDER_RETRANSMIT_CYCLE_SECONDS, 51 | fcn=retry_transmission, 52 | args=(app, ), 53 | name="order retransmission") 54 | 55 | cleanup_worker.thread.join() 56 | retry_worker.thread.join() 57 | 58 | 59 | def create_app(): 60 | app = Flask(__name__) 61 | app.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{constants.DB_FILE}' 62 | app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False 63 | app.config["REDIS_INSTANCE"] = redis.from_url(constants.REDIS_URI) 64 | db.init_app(app) 65 | return app 66 | 67 | 68 | def main(): 69 | logging.basicConfig(level=logging.DEBUG, format=constants.LOGGING_FORMAT) 70 | app = create_app() 71 | 72 | with app.app_context(): 73 | db.create_all() 74 | # To avoid calling tx_start on each gunicorn worker, call it here once 75 | # instead. Also, wait a bit before calling tx_start so that clients 76 | # have enough time to reconnect to the SSE server. 77 | time.sleep(3) 78 | transmitter.tx_start() 79 | start_workers(app) 80 | 81 | 82 | if __name__ == '__main__': 83 | main() 84 | -------------------------------------------------------------------------------- /server/alembic/env.py: -------------------------------------------------------------------------------- 1 | import os 2 | import yaml 3 | from logging.config import fileConfig 4 | 5 | from sqlalchemy import engine_from_config 6 | from sqlalchemy import pool 7 | 8 | from alembic import context 9 | 10 | # this is the Alembic Config object, which provides 11 | # access to the values within the .ini file in use. 12 | config = context.config 13 | 14 | # Interpret the config file for Python logging. 15 | # This line sets up loggers basically. 16 | fileConfig(config.config_file_name) 17 | 18 | # add your model's MetaData object here 19 | # for 'autogenerate' support 20 | # from myapp import mymodel 21 | # target_metadata = mymodel.Base.metadata 22 | target_metadata = None 23 | 24 | # other values from the config, defined by the needs of env.py, 25 | # can be acquired: 26 | # my_important_option = config.get_main_option("my_important_option") 27 | # ... etc. 28 | db_yaml_path = os.path.join("config", 'database.yml') 29 | with open(db_yaml_path, 'r') as fd: 30 | db_conf = yaml.safe_load(fd) 31 | env = os.getenv('ENV', 'development') 32 | db_file = db_conf[env]['database'] 33 | db_dir = os.path.dirname(db_file) 34 | if not os.path.isdir(db_dir): 35 | os.makedirs(db_dir) 36 | config.set_main_option('sqlalchemy.url', f'sqlite:///{db_file}') 37 | 38 | 39 | def run_migrations_offline(): 40 | """Run migrations in 'offline' mode. 41 | 42 | This configures the context with just a URL 43 | and not an Engine, though an Engine is acceptable 44 | here as well. By skipping the Engine creation 45 | we don't even need a DBAPI to be available. 46 | 47 | Calls to context.execute() here emit the given string to the 48 | script output. 49 | 50 | """ 51 | url = config.get_main_option("sqlalchemy.url") 52 | context.configure( 53 | url=url, 54 | target_metadata=target_metadata, 55 | literal_binds=True, 56 | dialect_opts={"paramstyle": "named"}, 57 | ) 58 | 59 | with context.begin_transaction(): 60 | context.run_migrations() 61 | 62 | 63 | def run_migrations_online(): 64 | """Run migrations in 'online' mode. 65 | 66 | In this scenario we need to create an Engine 67 | and associate a connection with the context. 68 | 69 | """ 70 | connectable = engine_from_config( 71 | config.get_section(config.config_ini_section), 72 | prefix="sqlalchemy.", 73 | poolclass=pool.NullPool, 74 | ) 75 | 76 | with connectable.connect() as connection: 77 | context.configure(connection=connection, 78 | target_metadata=target_metadata) 79 | 80 | with context.begin_transaction(): 81 | context.run_migrations() 82 | 83 | 84 | if context.is_offline_mode(): 85 | run_migrations_offline() 86 | else: 87 | run_migrations_online() 88 | -------------------------------------------------------------------------------- /server/tests/test_worker_manager.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta 2 | import os 3 | import pytest 4 | from unittest.mock import patch 5 | 6 | from common import generate_test_order 7 | from constants import InvoiceStatus, OrderStatus 8 | from database import db 9 | from models import Order, Invoice 10 | import constants 11 | import server 12 | from worker_manager import cleanup_database 13 | 14 | 15 | @pytest.fixture 16 | def app(): 17 | app = server.create_app(from_test=True) 18 | app.app_context().push() 19 | yield app 20 | server.teardown_app(app) 21 | 22 | 23 | @pytest.fixture 24 | def client(app): 25 | with app.test_client() as client: 26 | yield client 27 | 28 | 29 | @patch('orders.new_invoice') 30 | def test_cleanup_database(mock_new_invoice, client, app): 31 | # prepare test env 32 | 33 | # create an invoice that must get expired 34 | pending_invoice_lid = generate_test_order( 35 | mock_new_invoice, client, order_id=2)['lightning_invoice']['id'] 36 | pending_db_invoice = \ 37 | Invoice.query.filter_by(lid=pending_invoice_lid).first() 38 | pending_db_invoice.expires_at = datetime.utcnow() - timedelta(days=1) 39 | db.session.commit() 40 | 41 | # create an order that must get expired 42 | pending_order_uuid = generate_test_order(mock_new_invoice, 43 | client, 44 | order_id=3)['uuid'] 45 | pending_db_order = Order.query.filter_by(uuid=pending_order_uuid).first() 46 | pending_db_order.created_at = datetime.utcnow() - \ 47 | timedelta(days=constants.EXPIRE_PENDING_ORDERS_AFTER_DAYS 48 | + 1) 49 | db.session.commit() 50 | 51 | # Create an order whose transmission ended a long time ago. The 52 | # corresponding message file should be deleted. 53 | sent_order_uuid = generate_test_order( 54 | mock_new_invoice, 55 | client, 56 | order_id=4, 57 | invoice_status=InvoiceStatus.paid)['uuid'] 58 | sent_db_order = Order.query.filter_by(uuid=sent_order_uuid).first() 59 | sent_db_order.ended_transmission_at = datetime.utcnow() -\ 60 | timedelta(days=constants.MESSAGE_FILE_RETENTION_TIME_DAYS 61 | + 1) 62 | db.session.commit() 63 | 64 | cleanup_database(app) 65 | 66 | pending_db_invoice = \ 67 | Invoice.query.filter_by(lid=pending_invoice_lid).first() 68 | assert pending_db_invoice.status == InvoiceStatus.expired.value 69 | 70 | pending_db_order = Order.query.filter_by(uuid=pending_order_uuid).first() 71 | assert pending_db_order.status == OrderStatus.expired.value 72 | 73 | message_path = os.path.join(constants.MSG_STORE_PATH, pending_order_uuid) 74 | assert not os.path.exists(message_path) 75 | 76 | message_path = os.path.join(constants.MSG_STORE_PATH, sent_order_uuid) 77 | assert not os.path.exists(message_path) 78 | -------------------------------------------------------------------------------- /server/alembic/versions/c7b63286fd71_baseline.py: -------------------------------------------------------------------------------- 1 | """baseline 2 | 3 | Revision ID: c7b63286fd71 4 | Revises: 5 | Create Date: 2021-05-27 21:56:12.258456 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.engine.reflection import Inspector 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'c7b63286fd71' 14 | down_revision = None 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | conn = op.get_bind() 21 | inspector = Inspector.from_engine(conn) 22 | tables = inspector.get_table_names() 23 | 24 | if 'orders' not in tables: 25 | op.create_table( 26 | 'orders', sa.Column('id', sa.Integer, primary_key=True), 27 | sa.Column('bid', sa.Integer, default=0), 28 | sa.Column('message_size', sa.Integer, nullable=False), 29 | sa.Column('bid_per_byte', sa.Float, default=0), 30 | sa.Column('message_digest', sa.String(64), nullable=False), 31 | sa.Column('status', sa.Integer), 32 | sa.Column('uuid', sa.String(36), nullable=False), 33 | sa.Column('created_at', sa.DateTime, default=sa.func.now()), 34 | sa.Column('cancelled_at', sa.DateTime), 35 | sa.Column('started_transmission_at', sa.DateTime), 36 | sa.Column('ended_transmission_at', sa.DateTime), 37 | sa.Column('tx_seq_num', sa.Integer, unique=True), 38 | sa.Column('unpaid_bid', sa.Integer, nullable=False)) 39 | 40 | if 'invoices' not in tables: 41 | op.create_table( 42 | 'invoices', sa.Column('id', sa.Integer, primary_key=True), 43 | sa.Column('lid', sa.String(100), nullable=False), 44 | sa.Column('invoice', sa.String(1024), nullable=False), 45 | sa.Column('paid_at', sa.DateTime), 46 | sa.Column('created_at', sa.DateTime, default=sa.func.now()), 47 | sa.Column('order_id', sa.Integer, sa.ForeignKey('orders.id')), 48 | sa.Column('status', sa.Integer), sa.Column('amount', sa.Integer), 49 | sa.Column('expires_at', sa.DateTime, nullable=False)) 50 | 51 | if 'tx_confirmations' not in tables: 52 | op.create_table( 53 | 'tx_confirmations', sa.Column('id', sa.Integer, primary_key=True), 54 | sa.Column('created_at', sa.DateTime, default=sa.func.now()), 55 | sa.Column('order_id', sa.Integer, sa.ForeignKey('orders.id')), 56 | sa.Column('region_id', sa.Integer), 57 | sa.Column('presumed', sa.Boolean, default=False)) 58 | 59 | if 'rx_confirmations' not in tables: 60 | op.create_table( 61 | 'rx_confirmations', sa.Column('id', sa.Integer, primary_key=True), 62 | sa.Column('created_at', sa.DateTime, default=sa.func.now()), 63 | sa.Column('order_id', sa.Integer, sa.ForeignKey('orders.id')), 64 | sa.Column('region_id', sa.Integer), 65 | sa.Column('presumed', sa.Boolean, default=False)) 66 | 67 | 68 | def downgrade(): 69 | op.drop_table('orders') 70 | op.drop_table('invoices') 71 | op.drop_table('tx_confirmations') 72 | op.drop_table('rx_confirmations') 73 | -------------------------------------------------------------------------------- /server/regions.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class Regions(Enum): 5 | g18 = 0 6 | e113 = 1 7 | t11n_afr = 2 8 | t11n_eu = 3 9 | t18v_c = 4 10 | t18v_ku = 5 11 | 12 | 13 | # NOTE: an id field equals to "region.value + 1" is required for 14 | # backward compatibility with the previous Ruby-based implementation. 15 | SATELLITE_REGIONS = { 16 | Regions.g18: { 17 | 'id': Regions.g18.value + 1, 18 | 'satellite_name': 'Galaxy 18', 19 | 'coverage': 'North America', 20 | 'has_receiver': True 21 | }, 22 | Regions.e113: { 23 | 'id': Regions.e113.value + 1, 24 | 'satellite_name': 'Eutelsat 113', 25 | 'coverage': 'South America', 26 | 'has_receiver': True 27 | }, 28 | Regions.t11n_afr: { 29 | 'id': Regions.t11n_afr.value + 1, 30 | 'satellite_name': 'Telstar 11N', 31 | 'coverage': 'Africa', 32 | 'has_receiver': False 33 | }, 34 | Regions.t11n_eu: { 35 | 'id': Regions.t11n_eu.value + 1, 36 | 'satellite_name': 'Telstar 11N', 37 | 'coverage': 'Europe', 38 | 'has_receiver': False 39 | }, 40 | Regions.t18v_c: { 41 | 'id': Regions.t18v_c.value + 1, 42 | 'satellite_name': 'Telstar 18V C', 43 | 'coverage': 'Asia Pacific', 44 | 'has_receiver': True 45 | }, 46 | Regions.t18v_ku: { 47 | 'id': Regions.t18v_ku.value + 1, 48 | 'satellite_name': 'Telstar 18V Ku', 49 | 'coverage': 'Asia Pacific', 50 | 'has_receiver': True 51 | }, 52 | } 53 | 54 | all_region_ids = list(region['id'] for region in SATELLITE_REGIONS.values()) 55 | all_region_numbers = list(item.value for item in Regions) 56 | 57 | # Subset of regions that should confirm rx 58 | monitored_rx_regions = set([ 59 | info['id'] for info in SATELLITE_REGIONS.values() if info['has_receiver'] 60 | ]) 61 | 62 | REGION_MASK_ALL_REGIONS = 2**len(SATELLITE_REGIONS) - 1 63 | 64 | 65 | def region_number_to_id(region_number): 66 | return SATELLITE_REGIONS[Regions(region_number)]['id'] 67 | 68 | 69 | def region_id_to_number(region_id): 70 | for region_number, region_detail in SATELLITE_REGIONS.items(): 71 | if region_detail['id'] == region_id: 72 | return region_number.value 73 | 74 | 75 | def region_number_list_to_code(order_region_numbers): 76 | assert (all([x in all_region_numbers for x in order_region_numbers])) 77 | code = 0 78 | for region_number in order_region_numbers: 79 | code |= 1 << region_number 80 | return code 81 | 82 | 83 | def region_id_list_to_code(order_region_ids): 84 | order_region_numbers = [region_id_to_number(x) for x in order_region_ids] 85 | return region_number_list_to_code(order_region_numbers) 86 | 87 | 88 | def region_code_to_number_list(code): 89 | if not code: 90 | return all_region_numbers 91 | order_region_numbers = [] 92 | for region in all_region_numbers: 93 | mask = 1 << region 94 | if mask & code: 95 | order_region_numbers.append(region) 96 | return order_region_numbers 97 | 98 | 99 | def region_code_to_id_list(code): 100 | return [region_number_to_id(x) for x in region_code_to_number_list(code)] 101 | -------------------------------------------------------------------------------- /server/models.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy.sql import func 2 | 3 | from database import db 4 | 5 | 6 | class Order(db.Model): 7 | __tablename__ = 'orders' 8 | id = db.Column(db.Integer, primary_key=True) 9 | bid = db.Column(db.Integer, default=0) 10 | message_size = db.Column(db.Integer, nullable=False) 11 | bid_per_byte = db.Column(db.Float, default=0) # TODO: remove (redundant) 12 | message_digest = db.Column(db.String(64), nullable=False) 13 | status = db.Column(db.Integer) 14 | uuid = db.Column(db.String(36), nullable=False) 15 | created_at = db.Column(db.DateTime, default=func.now()) 16 | cancelled_at = db.Column(db.DateTime) 17 | started_transmission_at = db.Column(db.DateTime) 18 | ended_transmission_at = db.Column(db.DateTime) 19 | tx_seq_num = db.Column(db.Integer, unique=True) 20 | unpaid_bid = db.Column(db.Integer, nullable=False) 21 | region_code = db.Column(db.Integer) 22 | channel = db.Column(db.Integer, default=1) 23 | invoices = db.relationship('Invoice', backref='order', lazy=True) 24 | tx_confirmations = db.relationship('TxConfirmation', 25 | backref='order', 26 | lazy=True) 27 | rx_confirmations = db.relationship('RxConfirmation', 28 | backref='order', 29 | lazy=True) 30 | retransmission = db.relationship("TxRetry", 31 | uselist=False, 32 | backref="order", 33 | lazy=True) 34 | 35 | 36 | class Invoice(db.Model): 37 | __tablename__ = 'invoices' 38 | id = db.Column(db.Integer, primary_key=True) 39 | lid = db.Column(db.String(100), nullable=False) 40 | invoice = db.Column(db.String(1024), nullable=False) 41 | paid_at = db.Column(db.DateTime) 42 | created_at = db.Column(db.DateTime, default=func.now()) 43 | order_id = db.Column(db.Integer, db.ForeignKey('orders.id')) 44 | status = db.Column(db.Integer) 45 | amount = db.Column(db.Integer) 46 | expires_at = db.Column(db.DateTime, nullable=False) 47 | 48 | 49 | class TxConfirmation(db.Model): 50 | __tablename__ = 'tx_confirmations' 51 | id = db.Column(db.Integer, primary_key=True) 52 | created_at = db.Column(db.DateTime, default=func.now()) 53 | order_id = db.Column(db.Integer, 54 | db.ForeignKey('orders.id'), 55 | nullable=False) 56 | region_id = db.Column(db.Integer, nullable=False) 57 | presumed = db.Column(db.Boolean, default=False) 58 | 59 | 60 | class RxConfirmation(db.Model): 61 | __tablename__ = 'rx_confirmations' 62 | id = db.Column(db.Integer, primary_key=True) 63 | created_at = db.Column(db.DateTime, default=func.now()) 64 | order_id = db.Column(db.Integer, 65 | db.ForeignKey('orders.id'), 66 | nullable=False) 67 | region_id = db.Column(db.Integer, nullable=False) 68 | presumed = db.Column(db.Boolean, default=False) 69 | 70 | 71 | class TxRetry(db.Model): 72 | __tablename__ = 'tx_retries' 73 | id = db.Column(db.Integer, primary_key=True) 74 | order_id = db.Column(db.Integer, db.ForeignKey('orders.id')) 75 | last_attempt = db.Column(db.DateTime) 76 | retry_count = db.Column(db.Integer, default=0) 77 | region_code = db.Column(db.Integer) 78 | pending = db.Column(db.Boolean, default=True) 79 | created_at = db.Column(db.DateTime, default=func.now()) 80 | -------------------------------------------------------------------------------- /server/error.py: -------------------------------------------------------------------------------- 1 | from http import HTTPStatus 2 | 3 | errors = { 4 | 'PARAM_COERCION': 5 | (2, "type coercion error", "{} does not have the expected type", 6 | HTTPStatus.INTERNAL_SERVER_ERROR), 7 | 'BID_TOO_SMALL': (102, "Bid too low", 8 | "The minimum bid for this message is {} millisatoshis.", 9 | HTTPStatus.BAD_REQUEST), 10 | 'ORDER_NOT_FOUND': (104, "Order not found", "UUID {} not found", 11 | HTTPStatus.NOT_FOUND), 12 | 'INVALID_AUTH_TOKEN': (109, "Unauthorized", "Invalid authentication token", 13 | HTTPStatus.UNAUTHORIZED), 14 | 'LIGHTNING_CHARGE_INVOICE_ERROR': 15 | (110, "Invoice Creation Error", "Lightning Charge invoice creation error", 16 | HTTPStatus.BAD_REQUEST), 17 | 'LIGHTNING_CHARGE_WEBHOOK_REGISTRATION_ERROR': 18 | (111, "Invoice Creation Error", 19 | "Lightning Charge webhook registration error", HTTPStatus.BAD_REQUEST), 20 | 'INVOICE_ID_NOT_FOUND_ERROR': (112, "Not found", "Invoice id {} not found", 21 | HTTPStatus.NOT_FOUND), 22 | 'SEQUENCE_NUMBER_NOT_FOUND': 23 | (114, "Sequence number not found", 24 | "Sent order with sequence number {} not found", HTTPStatus.NOT_FOUND), 25 | 'MESSAGE_FILE_TOO_SMALL': (117, "Message too small", 26 | "Minimum message size is {} byte", 27 | HTTPStatus.BAD_REQUEST), 28 | 'MESSAGE_FILE_TOO_LARGE': (118, "Message too large", 29 | "Message size exceeds max size of {:.2f} MB", 30 | HTTPStatus.REQUEST_ENTITY_TOO_LARGE), 31 | 'ORDER_CANCELLATION_ERROR': (120, "Cannot cancel order", 32 | "Order already {}", HTTPStatus.BAD_REQUEST), 33 | 'ORDER_BUMP_ERROR': (121, "Cannot bump order", "Order already {}", 34 | HTTPStatus.BAD_REQUEST), 35 | 'ORPHANED_INVOICE': (122, "Payment problem", "Orphaned invoice", 36 | HTTPStatus.NOT_FOUND), 37 | 'INVOICE_ALREADY_PAID': (123, "Payment problem", "Invoice already paid", 38 | HTTPStatus.BAD_REQUEST), 39 | 'MESSAGE_MISSING': 40 | (126, "Message upload problem", 41 | "Either a message file or a message parameter is required", 42 | HTTPStatus.BAD_REQUEST), 43 | 'LIGHTNING_CHARGE_INFO_FAILED': 44 | (128, "Lightning Charge communication error", 45 | "Failed to fetch information about the Lightning node", 46 | HTTPStatus.INTERNAL_SERVER_ERROR), 47 | 'INVOICE_ALREADY_EXPIRED': (129, "Payment problem", 48 | "Invoice already expired", 49 | HTTPStatus.BAD_REQUEST), 50 | 'ORDER_CHANNEL_UNAUTHORIZED_OP': (130, "Unauthorized channel operation", 51 | "Operation not supported on channel {}", 52 | HTTPStatus.UNAUTHORIZED), 53 | } 54 | 55 | 56 | def _err_to_dict(key, *args): 57 | """Translate an error key to the full error response dictionary""" 58 | assert (key in errors) 59 | code = errors[key][0] 60 | title = errors[key][1] 61 | detail = errors[key][2].format(*args) 62 | return { 63 | 'message': title, 64 | 'errors': [{ 65 | 'title': title, 66 | 'detail': detail, 67 | 'code': code 68 | }] 69 | } 70 | 71 | 72 | def get_http_error_resp(key, *args): 73 | """Return the HTTP error response 74 | 75 | Returns: Pair with error response dictionary and the HTTP error code. The 76 | former contains the satellite-specific error code and information. 77 | 78 | """ 79 | return _err_to_dict(key, *args), errors[key][3] 80 | 81 | 82 | def assert_error(err_data, key): 83 | """Verify that the error response is as expected for the given error key""" 84 | assert 'message' in err_data 85 | assert 'errors' in err_data 86 | # Check title and code (but not detail, which is set dynamically) 87 | assert err_data['errors'][0]['title'] == errors[key][1] 88 | assert err_data['errors'][0]['code'] == errors[key][0] 89 | -------------------------------------------------------------------------------- /server/schemas.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from marshmallow import fields, Schema, validate, ValidationError, \ 4 | validates_schema 5 | 6 | from regions import all_region_numbers, region_code_to_number_list, \ 7 | region_id_to_number 8 | import constants 9 | 10 | 11 | class OrderSchema(Schema): 12 | uuid = fields.String() 13 | bid = fields.Integer() 14 | message_size = fields.Integer() 15 | bid_per_byte = fields.Float() 16 | message_digest = fields.String() 17 | status = fields.Function( 18 | lambda obj: constants.OrderStatus(obj.status).name) 19 | created_at = fields.DateTime() 20 | cancelled_at = fields.DateTime() 21 | started_transmission_at = fields.DateTime() 22 | ended_transmission_at = fields.DateTime() 23 | tx_seq_num = fields.Integer() 24 | unpaid_bid = fields.Integer() 25 | regions = fields.Function( 26 | lambda obj: region_code_to_number_list(obj.region_code)) 27 | 28 | 29 | class TxRetrySchema(Schema): 30 | last_attempt = fields.DateTime() 31 | retry_count = fields.Integer() 32 | region_code = fields.Integer() 33 | 34 | 35 | class AdminOrderSchema(OrderSchema): 36 | channel = fields.Integer() 37 | tx_confirmations = fields.Function( 38 | lambda obj: 39 | [region_id_to_number(x.region_id) for x in obj.tx_confirmations]) 40 | rx_confirmations = fields.Function( 41 | lambda obj: 42 | [region_id_to_number(x.region_id) for x in obj.rx_confirmations]) 43 | retransmission = fields.Nested(TxRetrySchema) 44 | 45 | 46 | def must_be_region_number(input): 47 | if input not in all_region_numbers: 48 | raise ValidationError( 49 | "Region number not found. The number should be one of " 50 | f"{all_region_numbers}.") 51 | 52 | 53 | def must_be_region_number_list(data): 54 | try: 55 | regions_list = json.loads(data) 56 | if not isinstance(regions_list, list) or len(regions_list) < 1: 57 | raise ValidationError("Invalid json array.") 58 | for region_number in regions_list: 59 | must_be_region_number(region_number) 60 | except json.JSONDecodeError: 61 | raise ValidationError("Invalid json array.") 62 | 63 | 64 | class OrderUploadReqSchema(Schema): 65 | bid = fields.Int(missing=0, validate=validate.Range(min=0)) 66 | message = fields.Str(validate=validate.Length( 67 | max=constants.MAX_TEXT_MSG_LEN)) 68 | regions = fields.String(required=False, 69 | validate=must_be_region_number_list) 70 | channel = fields.Int(missing=constants.USER_CHANNEL, 71 | validate=validate.OneOf(constants.CHANNELS)) 72 | 73 | 74 | class OrderBumpSchema(Schema): 75 | uuid = fields.String() 76 | bid_increase = fields.Int(required=True, validate=validate.Range(min=0)) 77 | auth_token = fields.Str() 78 | 79 | 80 | class OrdersSchema(Schema): 81 | before = fields.DateTime(format='iso') 82 | before_delta = fields.TimeDelta('seconds') 83 | after = fields.DateTime(format='iso') 84 | after_delta = fields.TimeDelta('seconds') 85 | limit = fields.Int(missing=lambda: constants.PAGE_SIZE, 86 | validate=validate.Range(min=1, 87 | max=constants.MAX_PAGE_SIZE)) 88 | channel = fields.Int(missing=constants.USER_CHANNEL, 89 | validate=validate.OneOf(constants.CHANNELS)) 90 | 91 | @validates_schema 92 | def validate_numbers(self, data, **kwargs): 93 | if ('before' in data and 'before_delta' in data): 94 | raise ValidationError( 95 | "Only one of before or before_delta is allowed") 96 | if ('after' in data and 'after_delta' in data): 97 | raise ValidationError( 98 | "Only one of after or after_delta is allowed") 99 | 100 | 101 | class TxConfirmationSchema(Schema): 102 | regions = fields.String(required=True, validate=must_be_region_number_list) 103 | 104 | 105 | class RxConfirmationSchema(Schema): 106 | region = fields.Int(required=True, validate=must_be_region_number) 107 | 108 | 109 | order_schema = OrderSchema() 110 | admin_order_schema = AdminOrderSchema() 111 | order_upload_req_schema = OrderUploadReqSchema() 112 | order_bump_schema = OrderBumpSchema() 113 | orders_schema = OrdersSchema() 114 | tx_confirmation_schema = TxConfirmationSchema() 115 | rx_confirmation_schema = RxConfirmationSchema() 116 | -------------------------------------------------------------------------------- /server/transmitter.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from datetime import datetime 4 | 5 | from flask import current_app 6 | from sqlalchemy import and_ 7 | 8 | import constants 9 | import order_helpers 10 | from database import db 11 | from models import Order, TxRetry 12 | from regions import region_code_to_number_list 13 | from schemas import order_schema 14 | 15 | 16 | def assign_tx_seq_num(order): 17 | """Assign Tx sequence number to order""" 18 | last_tx_order = Order.query.order_by(Order.tx_seq_num.desc()).first() 19 | 20 | if last_tx_order.tx_seq_num: 21 | order.tx_seq_num = last_tx_order.tx_seq_num + 1 22 | else: 23 | order.tx_seq_num = 1 24 | db.session.commit() 25 | 26 | 27 | def redis(): 28 | return current_app.config.get("REDIS_INSTANCE") 29 | 30 | 31 | def publish_to_sse_server(order, retransmit_info=None): 32 | msg = order_schema.dump(order) 33 | # If it's a retransmission, take the regions list from the tx_retries table 34 | # instead of the orders table. 35 | if retransmit_info: 36 | msg['regions'] = region_code_to_number_list( 37 | retransmit_info.region_code) 38 | msg = json.dumps(msg) 39 | redis().publish(channel=constants.CHANNEL_INFO[order.channel].name, 40 | message=msg) 41 | return 42 | 43 | 44 | def tx_start(channel=None): 45 | """Look for pending transmissions and serve them 46 | 47 | An order is ready for transmission when already paid or when being 48 | retransmitted. Also, a pending transmission can only be served if there is 49 | no other ongoing transmission on the logical channel. Each channel can only 50 | serve one transmission at a time. 51 | 52 | This function works both for a single defined channel or for all channels. 53 | When the channel parameter is undefined, it looks for pending transmissions 54 | in all channels. Otherwise, it processes the specified channel only. 55 | 56 | Args: 57 | channel (int, optional): Logical transmission channel to serve. 58 | Defaults to None. 59 | 60 | """ 61 | # Call itself recursively if the channel is not defined 62 | if (channel is None): 63 | for channel in constants.CHANNELS: 64 | tx_start(channel) 65 | return 66 | 67 | transmitting_orders = Order.query.filter( 68 | and_(Order.status == constants.OrderStatus.transmitting.value, 69 | Order.channel == channel)).all() 70 | 71 | # Do not start a new transmission if another order is being transmitted 72 | # right now 73 | if len(transmitting_orders) > 0: 74 | return 75 | 76 | # First, try to find a paid order with the highest bid in the orders table 77 | # and start its transmission. If no paid orders are found there, look into 78 | # the tx_retries table and retransmit one of the orders from there if it 79 | # meets the retransmission criteria 80 | order = Order.query.filter( 81 | and_(Order.status == constants.OrderStatus.paid.value, 82 | Order.channel == channel)).order_by( 83 | Order.bid_per_byte.desc()).first() 84 | 85 | if order: 86 | logging.info(f'transmission start {order.uuid}') 87 | assign_tx_seq_num(order) 88 | order.status = constants.OrderStatus.transmitting.value 89 | order.started_transmission_at = datetime.utcnow() 90 | db.session.commit() 91 | publish_to_sse_server(order) 92 | else: 93 | # No order found for the first transmission. 94 | # Check if any order requires retransmission. 95 | order, retransmit_info = order_helpers.get_next_retransmission(channel) 96 | if order and retransmit_info: 97 | logging.info(f'retransmission start {order.uuid}') 98 | order.status = constants.OrderStatus.transmitting.value 99 | retransmit_info.retry_count += 1 100 | retransmit_info.last_attempt = datetime.utcnow() 101 | retransmit_info.pending = False 102 | db.session.commit() 103 | publish_to_sse_server(order, retransmit_info) 104 | 105 | 106 | def tx_end(order): 107 | """End transmission""" 108 | if order.ended_transmission_at is None: 109 | logging.info(f'transmission end {order.uuid}') 110 | order.ended_transmission_at = datetime.utcnow() 111 | retransmit_info = TxRetry.query.filter_by(order_id=order.id).first() 112 | # Cleanup the TxRetry 113 | TxRetry.query.filter_by(order_id=order.id).delete() 114 | db.session.commit() 115 | publish_to_sse_server(order, retransmit_info) 116 | # Start the next queued order as soon as the current order finishes 117 | tx_start(order.channel) 118 | -------------------------------------------------------------------------------- /server/constants.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | import yaml 4 | from enum import Enum 5 | 6 | from utils import hmac_sha256_digest 7 | 8 | 9 | class OrderStatus(Enum): 10 | pending = 0 11 | paid = 1 12 | transmitting = 2 13 | sent = 3 14 | received = 4 15 | cancelled = 5 16 | expired = 6 17 | confirming = 7 # confirming Tx (between transmitting and sent) 18 | 19 | 20 | ORDER_FETCH_STATES = [ 21 | 'pending', 'paid', 'transmitting', 'confirming', 'queued', 'sent', 22 | 'rx-pending', 'received', 'retransmitting' 23 | ] 24 | 25 | 26 | class InvoiceStatus(Enum): 27 | pending = 0 28 | paid = 1 29 | expired = 2 30 | 31 | 32 | db_yaml_path = os.path.join("config", 'database.yml') 33 | with open(db_yaml_path, 'r') as fd: 34 | db_conf = yaml.safe_load(fd) 35 | 36 | env = os.getenv('ENV', 'development') 37 | 38 | EXPIRE_PENDING_ORDERS_AFTER_DAYS = 1 39 | MESSAGE_FILE_RETENTION_TIME_DAYS = 31 40 | DEFAULT_TX_CONFIRM_TIMEOUT_SECS = 60 41 | 42 | SERVER_PORT = 9292 43 | CALLBACK_URI_ROOT = os.getenv('CALLBACK_URI_ROOT', 44 | "http://127.0.0.1:{}".format(SERVER_PORT)) 45 | CHARGE_API_TOKEN = os.getenv('CHARGE_API_TOKEN', str(uuid.uuid4())) 46 | LIGHTNING_WEBHOOK_KEY = hmac_sha256_digest('charged-token', CHARGE_API_TOKEN) 47 | 48 | CHARGE_ROOT = os.getenv('CHARGE_ROOT', 49 | f'http://api-token:{CHARGE_API_TOKEN}@127.0.0.1:9112') 50 | CONNECTION_TIMEOUT = 2 51 | DB_FILE = db_conf[env]['database'] 52 | DB_ROOT = os.path.dirname(DB_FILE) 53 | LN_INVOICE_EXPIRY = 60 * 60 # one hour 54 | LN_INVOICE_DESCRIPTION = 'Blockstream Satellite Transmission' if os.getenv( 55 | 'ENV') == 'production' else 'BSS Test' 56 | 57 | DEFAULT_MAX_MESSAGE_SIZE = 2**20 58 | MAX_TEXT_MSG_LEN = 1024 # valid for message (not file) 59 | 60 | MIN_BID = int(os.getenv('MIN_BID', 1000)) 61 | MIN_MESSAGE_SIZE = 1 62 | MIN_PER_BYTE_BID = float(os.getenv('MIN_PER_BYTE_BID', 1)) 63 | MSG_STORE_PATH = os.path.join(DB_ROOT, 'messages') 64 | PAGE_SIZE = 20 65 | MAX_PAGE_SIZE = 100 66 | RESPONSE_TIMEOUT = 2 67 | 68 | FORCE_PAYMENT = os.getenv('FORCE_PAYMENT', False) 69 | 70 | LOGGING_FORMAT = '%(asctime)s %(levelname)s %(name)s : %(message)s' 71 | REDIS_URI = os.getenv('REDIS_URI', 'redis://127.0.0.1:6379') 72 | 73 | USER_CHANNEL = 1 74 | AUTH_CHANNEL = 3 75 | GOSSIP_CHANNEL = 4 76 | BTC_SRC_CHANNEL = 5 77 | 78 | 79 | class ChannelInfo: 80 | 81 | def __init__(self, 82 | name, 83 | user_permissions, 84 | tx_rate, 85 | max_msg_size, 86 | tx_confirm_timeout_secs=DEFAULT_TX_CONFIRM_TIMEOUT_SECS): 87 | """Construct channel information 88 | 89 | Args: 90 | name (str): Channel name. 91 | user_permissions (list): User permissions. An empty list means the 92 | channel messages are only sent over satellite. A list with 93 | 'get' permission only means the users can only fetch messages 94 | but not post them, and only the admin can post messages. 95 | tx_rate (float): Transmit rate in bytes/sec. Used to handle the 96 | retransmission timeout intervals independently on each channel. 97 | max_msg_size (int): Maximum message size on this channel. 98 | tx_confirm_timeout_secs (int): Tx confirmation timeout in seconds 99 | leading to retransmission decisions. 100 | """ 101 | assert isinstance(user_permissions, list) 102 | assert len(user_permissions) == 0 or \ 103 | [x in ['get', 'post'] for x in user_permissions] 104 | self.name = name 105 | self.user_permissions = user_permissions 106 | # Attribute indicating whether the channel messages must be paid by the 107 | # user. The channels on which users can post messages necessarily 108 | # require payment. The other channels can only have messages posted by 109 | # the admin, and these messages are not paid. 110 | self.requires_payment = 'post' in user_permissions 111 | self.tx_rate = tx_rate 112 | self.max_msg_size = max_msg_size 113 | self.tx_confirm_timeout_secs = tx_confirm_timeout_secs 114 | 115 | 116 | CHANNEL_INFO = { 117 | USER_CHANNEL: 118 | ChannelInfo('transmissions', ['get', 'post', 'delete'], 1000, 119 | DEFAULT_MAX_MESSAGE_SIZE), 120 | AUTH_CHANNEL: 121 | ChannelInfo('auth', [], 125, DEFAULT_MAX_MESSAGE_SIZE), 122 | GOSSIP_CHANNEL: 123 | ChannelInfo( 124 | 'gossip', 125 | ['get'], 126 | 500, 127 | 1800000, # tx over 1h at 500 bytes/sec 128 | 300 # Tx confirmation timeout = 5 min 129 | ), 130 | BTC_SRC_CHANNEL: 131 | ChannelInfo( 132 | 'btc-src', 133 | ['get'], 134 | 500, 135 | 16200000, # tx over 9h at 500 bytes/sec 136 | 300 # Tx confirmation timeout = 5 min 137 | ), 138 | } 139 | 140 | CHANNELS = list(CHANNEL_INFO.keys()) 141 | -------------------------------------------------------------------------------- /terraform/variables.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | context_variables = { 3 | "staging" = { 4 | env = "staging" 5 | create_mainnet = 1 6 | create_testnet = 0 7 | create_misc = 0 8 | } 9 | "prod" = { 10 | env = "prod" 11 | create_mainnet = 1 12 | create_testnet = 0 13 | create_misc = 0 14 | } 15 | "testnet-prod" = { 16 | env = "prod" 17 | create_mainnet = 0 18 | create_testnet = 1 19 | create_misc = 0 20 | } 21 | "misc" = { 22 | env = "prod" 23 | create_mainnet = 0 24 | create_testnet = 0 25 | create_misc = 1 26 | } 27 | } 28 | 29 | env = local.context_variables[terraform.workspace]["env"] 30 | create_mainnet = local.context_variables[terraform.workspace]["create_mainnet"] 31 | create_testnet = local.context_variables[terraform.workspace]["create_testnet"] 32 | create_misc = local.context_variables[terraform.workspace]["create_misc"] 33 | } 34 | 35 | variable "project" { 36 | type = string 37 | default = "satellite-api" 38 | } 39 | 40 | variable "name" { 41 | type = string 42 | default = "satapi-tor" 43 | } 44 | 45 | variable "create_resources" { 46 | type = string 47 | default = "" 48 | } 49 | 50 | variable "target_pool" { 51 | type = string 52 | default = "" 53 | } 54 | 55 | variable "charge_token" { 56 | type = string 57 | default = "" 58 | } 59 | 60 | variable "host" { 61 | type = string 62 | default = "" 63 | } 64 | 65 | variable "onion_host" { 66 | type = string 67 | default = "" 68 | } 69 | 70 | variable "region" { 71 | type = string 72 | default = "" 73 | } 74 | 75 | variable "zone" { 76 | type = string 77 | default = "" 78 | } 79 | 80 | variable "instance_type" { 81 | type = list(string) 82 | default = ["", "", ""] 83 | } 84 | 85 | variable "timeout" { 86 | type = string 87 | default = 7200 88 | } 89 | 90 | variable "prom_service_acct" { 91 | type = string 92 | default = "" 93 | } 94 | 95 | variable "lb_svc_acct" { 96 | type = string 97 | default = "" 98 | } 99 | 100 | variable "prom_allowed_source_ip" { 101 | type = list(any) 102 | default = [] 103 | } 104 | 105 | 106 | variable "satellite_lb" { 107 | type = string 108 | default = "" 109 | } 110 | 111 | variable "satellite_api_lb" { 112 | type = string 113 | default = "" 114 | } 115 | 116 | variable "satellite_api_lb_staging" { 117 | type = string 118 | default = "" 119 | } 120 | 121 | variable "blocksat_monitoring" { 122 | type = string 123 | default = "" 124 | } 125 | 126 | variable "internal_ip_mainnet" { 127 | type = string 128 | default = "" 129 | } 130 | 131 | variable "internal_ip_testnet" { 132 | type = string 133 | default = "" 134 | } 135 | 136 | variable "health_check" { 137 | type = string 138 | default = "" 139 | } 140 | 141 | variable "k8s_autossh_lb" { 142 | type = string 143 | default = "" 144 | } 145 | 146 | variable "rpcpass" { 147 | type = string 148 | default = "" 149 | } 150 | 151 | variable "ssh_key_net" { 152 | type = string 153 | default = "" 154 | } 155 | 156 | variable "lightning_cmd" { 157 | type = string 158 | default = "" 159 | } 160 | 161 | # Overwritten by CI 162 | variable "public_bucket_url" { 163 | type = string 164 | default = "" 165 | } 166 | 167 | variable "private_bucket" { 168 | type = string 169 | default = "" 170 | } 171 | 172 | variable "letsencrypt_email" { 173 | type = string 174 | default = "" 175 | } 176 | 177 | variable "station1" { 178 | type = string 179 | default = "" 180 | } 181 | 182 | variable "station2" { 183 | type = string 184 | default = "" 185 | } 186 | 187 | variable "station3" { 188 | type = string 189 | default = "" 190 | } 191 | 192 | variable "sat_api_docker" { 193 | type = string 194 | default = "" 195 | } 196 | 197 | variable "sat_api_sse_docker" { 198 | type = string 199 | default = "" 200 | } 201 | 202 | # Less frequently updated images 203 | variable "lightning_docker" { 204 | type = string 205 | default = "blockstream/lightningd:v25.05" 206 | } 207 | 208 | variable "charge_docker" { 209 | type = string 210 | default = "blockstream/charged:v0.4.24" 211 | } 212 | 213 | variable "tor_docker" { 214 | type = string 215 | default = "blockstream/tor:0.4.6.8" 216 | } 217 | 218 | variable "node_exporter_docker" { 219 | type = string 220 | default = "prom/node-exporter:v1.1.2" 221 | } 222 | 223 | variable "prom_docker" { 224 | type = string 225 | default = "prom/prometheus:v2.29.1" 226 | } 227 | 228 | variable "gcloud_docker" { 229 | type = string 230 | default = "google/cloud-sdk@sha256:ce81a5731934dabf2a402412a6cd4ef5733581302053007ba7de261513bff9bd" 231 | } 232 | 233 | variable "certbot_docker" { 234 | type = string 235 | default = "blockstream/certbot-gcs@sha256:fc5d7cb31bcf04169f37cbebd74c3bde49651f79e54e1ff3c3eaf6ec47b9f6d0" 236 | } 237 | 238 | variable "autossh_docker" { 239 | type = string 240 | default = "blockstream/autossh@sha256:5e30a60d6ef17aeafdde63bb859238e132fadef174af4092a435bc7325430ebd" 241 | } 242 | -------------------------------------------------------------------------------- /terraform/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | google = { 4 | source = "hashicorp/google" 5 | } 6 | google-beta = { 7 | source = "hashicorp/google-beta" 8 | } 9 | } 10 | required_version = ">= 0.15" 11 | 12 | backend "gcs" { 13 | bucket = "terraform-bs-source" 14 | prefix = "satellite-api" 15 | } 16 | } 17 | 18 | provider "google" { 19 | project = var.project 20 | } 21 | 22 | provider "google-beta" { 23 | project = var.project 24 | } 25 | 26 | module "blc-mainnet" { 27 | source = "./modules/blc" 28 | 29 | project = var.project 30 | name = "satellite-api" 31 | network = "default" 32 | lightning_docker = var.lightning_docker 33 | charge_docker = var.charge_docker 34 | sat_api_docker = var.sat_api_docker 35 | sat_api_sse_docker = var.sat_api_sse_docker 36 | node_exporter_docker = var.node_exporter_docker 37 | autossh_docker = var.autossh_docker 38 | certbot_docker = var.certbot_docker 39 | net = "mainnet" 40 | env = local.env 41 | lb_svc_acct = module.lb.lb_svc_acct 42 | ssh_key_net = "" 43 | lightning_cmd = "--mainnet --conf=/root/.lightning/bitcoin/lightning.conf" 44 | 45 | create_resources = local.create_mainnet 46 | 47 | # CI vars 48 | region = var.region 49 | zone = var.zone 50 | instance_type = var.instance_type[1] 51 | timeout = var.timeout 52 | prom_service_acct = var.prom_service_acct 53 | rpcpass = var.rpcpass 54 | charge_token = var.charge_token 55 | k8s_autossh_lb = var.k8s_autossh_lb 56 | private_bucket = var.private_bucket 57 | } 58 | 59 | module "blc-testnet" { 60 | source = "./modules/blc" 61 | 62 | project = var.project 63 | name = "satellite-api" 64 | network = "default" 65 | lightning_docker = var.lightning_docker 66 | charge_docker = var.charge_docker 67 | sat_api_docker = var.sat_api_docker 68 | sat_api_sse_docker = var.sat_api_sse_docker 69 | node_exporter_docker = var.node_exporter_docker 70 | autossh_docker = var.autossh_docker 71 | certbot_docker = var.certbot_docker 72 | net = "testnet" 73 | env = local.env 74 | lb_svc_acct = length(data.terraform_remote_state.blc-mainnet.outputs) > 1 ? data.terraform_remote_state.blc-mainnet.outputs.lb_svc_acct : "" 75 | ssh_key_net = "_testnet" 76 | lightning_cmd = "--testnet --conf=/root/.lightning/testnet/lightning.conf" 77 | 78 | create_resources = local.create_testnet 79 | 80 | # CI vars 81 | region = var.region 82 | zone = var.zone 83 | instance_type = var.instance_type[1] 84 | timeout = var.timeout 85 | prom_service_acct = var.prom_service_acct 86 | rpcpass = var.rpcpass 87 | charge_token = var.charge_token 88 | k8s_autossh_lb = var.k8s_autossh_lb 89 | private_bucket = var.private_bucket 90 | } 91 | 92 | module "lb" { 93 | source = "./modules/lb" 94 | 95 | project = var.project 96 | name = "satellite-api-lb" 97 | network = "default" 98 | certbot_docker = var.certbot_docker 99 | node_exporter_docker = var.node_exporter_docker 100 | env = local.env 101 | internal_ip_mainnet = module.blc-mainnet.internal_ip 102 | internal_ip_testnet = local.env == "staging" ? "127.0.0.1" : data.terraform_remote_state.blc-testnet.outputs.blc_internal_ip_testnet 103 | # NOTE: There is no testnet server on staging. The IP is set to 127.0.0.1 104 | # above so that the nginx conf does not see an empty IP and fail. 105 | target_pool = length(google_compute_target_pool.lb-pool) > 0 ? google_compute_target_pool.lb-pool[0].self_link : "" 106 | health_check = length(google_compute_http_health_check.lb-health) > 0 ? google_compute_http_health_check.lb-health[0].self_link : "" 107 | 108 | create_resources = local.create_mainnet 109 | 110 | # CI vars 111 | region = var.region 112 | zone = var.zone 113 | instance_type = var.instance_type[0] 114 | host = var.host 115 | timeout = var.timeout 116 | prom_service_acct = var.prom_service_acct 117 | letsencrypt_email = var.letsencrypt_email 118 | public_bucket_url = var.public_bucket_url 119 | station1 = var.station1 120 | station2 = var.station2 121 | station3 = var.station3 122 | } 123 | 124 | module "tor" { 125 | source = "./modules/tor" 126 | 127 | project = var.project 128 | network = "default" 129 | name = "satapi-tor" 130 | gcloud_docker = var.gcloud_docker 131 | tor_docker = var.tor_docker 132 | node_exporter_docker = var.node_exporter_docker 133 | tor_lb = element( 134 | concat(google_compute_global_address.tor-lb.*.address, [""]), 135 | 0, 136 | ) 137 | 138 | create_resources = local.create_misc 139 | 140 | # CI vars 141 | region = var.region 142 | zone = var.zone 143 | instance_type = var.instance_type[0] 144 | onion_host = var.onion_host 145 | prom_service_acct = var.prom_service_acct 146 | } 147 | 148 | module "prometheus" { 149 | source = "./modules/prometheus" 150 | 151 | project = var.project 152 | network = "default" 153 | name = "satapi-prometheus" 154 | prom_docker = var.prom_docker 155 | node_exporter_docker = var.node_exporter_docker 156 | 157 | create_resources = local.create_misc 158 | 159 | # CI vars 160 | region = var.region 161 | zone = var.zone 162 | instance_type = var.instance_type[1] 163 | prom_allowed_source_ip = var.prom_allowed_source_ip 164 | prom_service_acct = var.prom_service_acct 165 | } 166 | 167 | module "dns" { 168 | source = "./modules/dns" 169 | 170 | project = var.project 171 | 172 | create_resources = local.create_misc 173 | 174 | # CI vars 175 | satellite_lb = var.satellite_lb 176 | satellite_api_lb = var.satellite_api_lb 177 | satellite_api_lb_staging = var.satellite_api_lb_staging 178 | blocksat_monitoring = var.blocksat_monitoring 179 | } 180 | -------------------------------------------------------------------------------- /server/invoice_helpers.py: -------------------------------------------------------------------------------- 1 | from http import HTTPStatus 2 | import datetime 3 | import requests 4 | 5 | from sqlalchemy import and_, func 6 | 7 | from constants import InvoiceStatus 8 | import constants 9 | from database import db 10 | from error import get_http_error_resp 11 | from models import Invoice 12 | from order_helpers import maybe_mark_order_as_expired, maybe_mark_order_as_paid 13 | from utils import hmac_sha256_digest 14 | 15 | 16 | def new_invoice(order, bid): 17 | """Generate a lightning invoice 18 | 19 | Args: 20 | order: The order for which this invoice is being generated. 21 | bid: Bid amount. 22 | Returns: 23 | A pair whose first element is a boolean indicating whether 24 | the invoice generation was successful or not. If False, then 25 | the second element is the error key. If True, then the second 26 | element is the newly generated invoice. 27 | """ 28 | try: 29 | bid = int(bid) 30 | # generate Lightning invoice 31 | charged_response = requests.post(f"{constants.CHARGE_ROOT}/invoice", 32 | json={ 33 | 'msatoshi': bid, 34 | 'description': 35 | constants.LN_INVOICE_DESCRIPTION, 36 | 'expiry': 37 | constants.LN_INVOICE_EXPIRY, 38 | 'metadata': { 39 | 'uuid': 40 | order.uuid, 41 | 'sha256_message_digest': 42 | order.message_digest 43 | } 44 | }, 45 | timeout=(constants.CONNECTION_TIMEOUT, 46 | constants.RESPONSE_TIMEOUT)) 47 | except requests.exceptions.RequestException: 48 | return False, get_http_error_resp('LIGHTNING_CHARGE_INVOICE_ERROR') 49 | except ValueError: 50 | return False, get_http_error_resp('PARAM_COERCION', 'bid') 51 | 52 | if charged_response.status_code != HTTPStatus.CREATED: 53 | return False, get_http_error_resp('LIGHTNING_CHARGE_INVOICE_ERROR') 54 | 55 | lightning_invoice = charged_response.json() 56 | if 'id' not in lightning_invoice: 57 | return False, get_http_error_resp('LIGHTNING_CHARGE_INVOICE_ERROR') 58 | 59 | invoice = Invoice(order_id=order.id, 60 | amount=bid, 61 | lid=lightning_invoice['id'], 62 | invoice=charged_response.content, 63 | status=InvoiceStatus.pending.value, 64 | expires_at=datetime.datetime.utcnow() + 65 | datetime.timedelta(seconds=constants.LN_INVOICE_EXPIRY)) 66 | 67 | try: 68 | # register the webhook 69 | charged_auth_token = hmac_sha256_digest( 70 | constants.LIGHTNING_WEBHOOK_KEY, invoice.lid) 71 | callback_url = (f"{constants.CALLBACK_URI_ROOT}/callback" 72 | f"/{invoice.lid}/{charged_auth_token}") 73 | 74 | webhook_registration_response = requests.post( 75 | f"{constants.CHARGE_ROOT}/invoice/{invoice.lid}/webhook", 76 | json={'url': callback_url}, 77 | timeout=(constants.CONNECTION_TIMEOUT, constants.RESPONSE_TIMEOUT)) 78 | 79 | if webhook_registration_response.status_code != HTTPStatus.CREATED: 80 | return False, get_http_error_resp( 81 | 'LIGHTNING_CHARGE_WEBHOOK_REGISTRATION_ERROR') 82 | 83 | except requests.exceptions.RequestException: 84 | return False, get_http_error_resp( 85 | 'LIGHTNING_CHARGE_WEBHOOK_REGISTRATION_ERROR') 86 | 87 | return True, invoice 88 | 89 | 90 | def get_and_authenticate_invoice(lid, charged_auth_token): 91 | invoice = Invoice.query.filter_by(lid=lid).first() 92 | 93 | if invoice is None: 94 | return False, get_http_error_resp('INVOICE_ID_NOT_FOUND_ERROR', lid) 95 | 96 | db_invoice_charged_auth_token = hmac_sha256_digest( 97 | constants.LIGHTNING_WEBHOOK_KEY, invoice.lid) 98 | 99 | if db_invoice_charged_auth_token != charged_auth_token: 100 | return False, get_http_error_resp('INVALID_AUTH_TOKEN') 101 | 102 | return True, invoice 103 | 104 | 105 | def pay_invoice(invoice): 106 | if invoice.status == InvoiceStatus.paid.value: 107 | return get_http_error_resp('INVOICE_ALREADY_PAID') 108 | if invoice.status == InvoiceStatus.expired.value: 109 | return get_http_error_resp('INVOICE_ALREADY_EXPIRED') 110 | 111 | invoice.status = InvoiceStatus.paid.value 112 | invoice.paid_at = datetime.datetime.utcnow() 113 | db.session.commit() 114 | maybe_mark_order_as_paid(invoice.order_id) 115 | return None 116 | 117 | 118 | def get_pending_invoices(order_id): 119 | return Invoice.query.filter( 120 | and_(Invoice.order_id == order_id, 121 | Invoice.status == constants.InvoiceStatus.pending.value)).all() 122 | 123 | 124 | def expire_invoice(invoice): 125 | if invoice.status == InvoiceStatus.pending.value: 126 | invoice.status = constants.InvoiceStatus.expired.value 127 | db.session.commit() 128 | 129 | 130 | def expire_unpaid_invoices(): 131 | """Expire unpaid invoices 132 | 133 | Expire any unpaid invoice that has reached its expiration time. The 134 | corresponding order may be auto-expired as a result. 135 | 136 | Returns: 137 | Tuple with the list of invoices and the list of orders that got expired 138 | by this function. 139 | 140 | """ 141 | invoices_to_expire = Invoice.query.filter( 142 | and_(Invoice.status == constants.InvoiceStatus.pending.value, 143 | func.datetime(Invoice.expires_at) 144 | < datetime.datetime.utcnow())).all() 145 | expired_orders = [] 146 | for invoice in invoices_to_expire: 147 | expire_invoice(invoice) 148 | expired_order = maybe_mark_order_as_expired(invoice.order_id) 149 | if (expired_order is not None): 150 | expired_orders.append(expired_order) 151 | return invoices_to_expire, expired_orders 152 | -------------------------------------------------------------------------------- /terraform/modules/tor/cloud-init/tor.yaml: -------------------------------------------------------------------------------- 1 | users: 2 | - name: bs 3 | uid: 2000 4 | 5 | write_files: 6 | - path: /etc/docker/daemon.json 7 | permissions: 0644 8 | owner: root 9 | content: | 10 | { 11 | "live-restore": true, 12 | "log-opts": { 13 | "tag": "{{.Name}}", 14 | "max-size": "1g", 15 | "max-files": "2" 16 | }, 17 | "storage-driver": "overlay2", 18 | "mtu": 1460 19 | } 20 | 21 | - path: /home/bs/tor/hidden_service_v3/hs_ed25519_public_key.enc.b64 22 | permissions: 0600 23 | owner: root 24 | content: | 25 | ${v3_pubk} 26 | 27 | - path: /home/bs/tor/hidden_service_v3/hs_ed25519_secret_key.enc.b64 28 | permissions: 0600 29 | owner: root 30 | content: | 31 | ${v3_pk} 32 | 33 | - path: /home/bs/torrc 34 | permissions: 0644 35 | owner: root 36 | content: | 37 | DataDirectory /home/tor/tor 38 | PidFile /var/run/tor/tor.pid 39 | 40 | ControlSocket /var/run/tor/control GroupWritable RelaxDirModeCheck 41 | ControlSocketsGroupWritable 1 42 | SocksPort 0 43 | 44 | CookieAuthentication 1 45 | CookieAuthFileGroupReadable 1 46 | CookieAuthFile /var/run/tor/control.authcookie 47 | 48 | Log [*]notice stderr 49 | 50 | HiddenServiceDir /home/tor/tor/hidden_service_v3/ 51 | HiddenServiceVersion 3 52 | HiddenServicePort 80 ${tor_lb}:80 53 | 54 | - path: /home/bs/tor/hidden_service_v3/hostname 55 | permissions: 0600 56 | owner: root 57 | content: | 58 | ${v3_host} 59 | 60 | - path: /etc/systemd/system/decrypt.service 61 | permissions: 0644 62 | owner: root 63 | content: | 64 | [Unit] 65 | Description=decrypt secrets against KMS 66 | Wants=gcr-online.target 67 | After=gcr-online.target 68 | 69 | [Service] 70 | Type=oneshot 71 | RemainAfterExit=true 72 | Environment=HOME=/home/bs 73 | ExecStartPre=/usr/bin/docker-credential-gcr configure-docker 74 | ExecStartPre=/usr/bin/docker pull ${gcloud_docker} 75 | ExecStart=/usr/bin/docker run \ 76 | --name=decrypt \ 77 | --rm \ 78 | -v /home/bs/tor/hidden_service_v3/:/root/secrets:rw \ 79 | "${gcloud_docker}" gcloud kms decrypt \ 80 | --location=${kms_location} \ 81 | --keyring=${kms_key_ring} \ 82 | --key=${kms_key} \ 83 | --ciphertext-file=/root/secrets/hs_ed25519_public_key.enc \ 84 | --plaintext-file=/root/secrets/hs_ed25519_public_key 85 | ExecStart=/usr/bin/docker run \ 86 | --name=decrypt \ 87 | --rm \ 88 | -v /home/bs/tor/hidden_service_v3/:/root/secrets:rw \ 89 | "${gcloud_docker}" gcloud kms decrypt \ 90 | --location=${kms_location} \ 91 | --keyring=${kms_key_ring} \ 92 | --key=${kms_key} \ 93 | --ciphertext-file=/root/secrets/hs_ed25519_secret_key.enc \ 94 | --plaintext-file=/root/secrets/hs_ed25519_secret_key 95 | ExecStartPost=-/bin/rm /home/bs/tor/hidden_service_v3/hs_ed25519_public_key.enc /home/bs/tor/hidden_service_v3/hs_ed25519_public_key.enc.b64 96 | ExecStartPost=-/bin/rm /home/bs/tor/hidden_service_v3/hs_ed25519_secret_key.enc /home/bs/tor/hidden_service_v3/hs_ed25519_secret_key.enc.b64 97 | 98 | - path: /etc/systemd/system/tor.service 99 | permissions: 0644 100 | owner: root 101 | content: | 102 | [Unit] 103 | Description=tor node 104 | Wants=gcr-online.target docker.service decrypt.service 105 | After=gcr-online.service docker.service decrypt.service 106 | 107 | [Service] 108 | Restart=always 109 | RestartSec=1 110 | Environment=HOME=/home/bs 111 | ExecStartPre=/usr/bin/docker-credential-gcr configure-docker 112 | ExecStartPre=/usr/bin/docker pull ${tor_docker} 113 | ExecStartPre=-/bin/chown -R bs:bs /home/bs/tor/ 114 | ExecStartPre=-/bin/chmod -R 2700 /home/bs/tor/ 115 | ExecStartPre=-/bin/chmod 0644 /home/bs/tor/hidden_service_v3/hs_ed25519_public_key 116 | ExecStartPre=-/bin/chmod 0600 /home/bs/tor/hidden_service_v3/hs_ed25519_secret_key 117 | ExecStartPre=/sbin/iptables -A INPUT -m tcp -p tcp --dport 9050 -j ACCEPT 118 | ExecStart=/usr/bin/docker run \ 119 | --network=host \ 120 | --pid=host \ 121 | --name=tor \ 122 | --tmpfs /tmp/ \ 123 | -v /home/bs/torrc:/home/tor/torrc:ro \ 124 | -v /home/bs/tor:/home/tor/tor:rw \ 125 | "${tor_docker}" tor -f /home/tor/torrc 126 | ExecStop=/usr/bin/docker stop tor 127 | ExecStopPost=/usr/bin/docker rm tor 128 | ExecStopPost=/sbin/iptables -D INPUT -m tcp -p tcp --dport 9050 -j ACCEPT 129 | 130 | [Install] 131 | WantedBy=multi-user.target 132 | 133 | - path: /etc/systemd/system/node-exporter.service 134 | permissions: 0644 135 | owner: root 136 | content: | 137 | [Unit] 138 | Description=prometheus node-exporter 139 | Wants=gcr-online.target docker.service 140 | After=gcr-online.service docker.service 141 | 142 | [Service] 143 | Restart=always 144 | RestartSec=1 145 | Environment=HOME=/home/exec 146 | ExecStartPre=/usr/bin/docker-credential-gcr configure-docker 147 | ExecStartPre=/usr/bin/docker pull ${node_exporter_docker} 148 | ExecStartPre=/sbin/iptables -A INPUT -m tcp -p tcp --dport 9100 -j ACCEPT 149 | ExecStart=/usr/bin/docker run \ 150 | --name=node-exporter \ 151 | --network=host \ 152 | --read-only \ 153 | -v /proc:/host/proc:ro \ 154 | -v /sys:/host/sys:ro \ 155 | -v /:/rootfs:ro \ 156 | -v metrics:/metrics:ro \ 157 | -v /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:ro \ 158 | "${node_exporter_docker}" --path.procfs /host/proc --path.sysfs /host/sys --collector.textfile.directory /metrics --collector.filesystem.ignored-mount-points "^/(sys|proc|dev|host|etc($|/))" --collector.systemd 159 | ExecStop=/usr/bin/docker stop node-exporter 160 | ExecStopPost=/usr/bin/docker rm node-exporter 161 | ExecStopPost=/sbin/iptables -D INPUT -m tcp -p tcp --dport 9100 -j ACCEPT 162 | 163 | [Install] 164 | WantedBy=multi-user.target 165 | 166 | runcmd: 167 | - systemctl daemon-reload 168 | - base64 -d /home/bs/tor/hidden_service_v3/hs_ed25519_public_key.enc.b64 > /home/bs/tor/hidden_service_v3/hs_ed25519_public_key.enc 169 | - base64 -d /home/bs/tor/hidden_service_v3/hs_ed25519_secret_key.enc.b64 > /home/bs/tor/hidden_service_v3/hs_ed25519_secret_key.enc 170 | - systemctl enable tor.service 171 | - systemctl enable decrypt.service 172 | - systemctl start decrypt.service 173 | - systemctl start tor.service 174 | - systemctl enable node-exporter.service 175 | - systemctl start node-exporter.service 176 | -------------------------------------------------------------------------------- /server/tests/common.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import io 3 | import json 4 | import os 5 | import random 6 | import string 7 | from http import HTTPStatus 8 | 9 | from database import db 10 | from models import Invoice, Order 11 | from utils import hmac_sha256_digest 12 | import bidding 13 | import constants 14 | 15 | 16 | def rnd_string(n_bytes): 17 | """Generate random string with given number of bytes""" 18 | return ''.join( 19 | random.choice(string.ascii_letters + string.digits) 20 | for _ in range(n_bytes)) 21 | 22 | 23 | def upload_test_file(client, msg, bid, regions=[], channel=None, admin=False): 24 | post_data = {'bid': bid, 'file': (io.BytesIO(msg.encode()), 'testfile')} 25 | 26 | if len(regions) > 0: 27 | post_data['regions'] = [regions] 28 | if channel: 29 | post_data['channel'] = channel 30 | endpoint = '/admin/order' if admin else '/order' 31 | 32 | return client.post(endpoint, 33 | data=post_data, 34 | content_type='multipart/form-data') 35 | 36 | 37 | def place_order(client, 38 | n_bytes, 39 | regions=[], 40 | bid=None, 41 | channel=None, 42 | admin=False): 43 | if bid is None: 44 | bid = bidding.get_min_bid(n_bytes) 45 | msg = rnd_string(n_bytes) 46 | return upload_test_file(client, msg, bid, regions, channel, admin) 47 | 48 | 49 | def check_upload(order_uuid, expected_data): 50 | path = os.path.join(constants.MSG_STORE_PATH, order_uuid) 51 | assert os.path.exists(path) 52 | 53 | with open(path) as fd: 54 | upload_data = fd.read() 55 | assert upload_data == expected_data 56 | 57 | db_order = Order.query.filter_by(uuid=order_uuid).first() 58 | assert db_order is not None 59 | 60 | 61 | def check_invoice(generated_invoice, order_uuid): 62 | db_order = Order.query.filter_by(uuid=order_uuid).first() 63 | assert db_order is not None 64 | db_invoice = \ 65 | Invoice.query.filter_by(lid=generated_invoice['id']).first() 66 | assert db_invoice is not None 67 | assert db_invoice.order_id == db_order.id 68 | assert db_invoice.amount == db_order.unpaid_bid 69 | 70 | 71 | def pay_invoice(invoice, client): 72 | charged_auth_token = hmac_sha256_digest(constants.LIGHTNING_WEBHOOK_KEY, 73 | invoice.lid) 74 | rv = client.post(f'/callback/{invoice.lid}/{charged_auth_token}') 75 | assert rv.status_code == HTTPStatus.OK 76 | 77 | 78 | def confirm_tx(tx_seq_num, regions, client): 79 | tx_rv = client.post(f'/order/tx/{tx_seq_num}', data={'regions': [regions]}) 80 | assert tx_rv.status_code == HTTPStatus.OK 81 | 82 | 83 | def new_invoice(order_id, invoice_status, amount): 84 | assert (isinstance(invoice_status, constants.InvoiceStatus)) 85 | lid = rnd_string(50) 86 | return Invoice( 87 | id=random.randint(1, 1000000), 88 | lid=lid, 89 | invoice=json.dumps({ 90 | "id": 91 | lid, 92 | "msatoshi": 93 | amount, 94 | "description": 95 | "BSS Test", 96 | "rhash": 97 | "94855ac3b06543", 98 | "payreq": 99 | "lntb100n1psfy", 100 | "expires_at": 101 | str(datetime.datetime.utcnow() + 102 | datetime.timedelta(seconds=constants.LN_INVOICE_EXPIRY)), 103 | "created_at": 104 | str(datetime.datetime.utcnow()), 105 | "metadata": { 106 | "uuid": "7f9a5b81-5358-4be0-9af6-b8c6fbac9dcd", 107 | "sha256_message_digest": 108 | "a591a6d40bf420404a011733cfb7b190d62c6" 109 | }, 110 | "status": 111 | "unpaid" 112 | }), 113 | order_id=order_id, 114 | status=invoice_status.value, 115 | amount=amount, 116 | expires_at=datetime.datetime.utcnow() + 117 | datetime.timedelta(seconds=constants.LN_INVOICE_EXPIRY)) 118 | 119 | 120 | def generate_test_order(mock_new_invoice, 121 | client, 122 | order_status=None, 123 | invoice_status=constants.InvoiceStatus.pending, 124 | tx_seq_num=None, 125 | n_bytes=500, 126 | bid=None, 127 | order_id=1, 128 | regions=[], 129 | started_transmission_at=None, 130 | channel=None, 131 | admin=False): 132 | """Generate a valid order and add it to the database 133 | 134 | This function generates an order with a related invoice with 135 | given parameters and stores them in the database. 136 | 137 | Args: 138 | mock_new_invoice: A python mock for simulation 139 | orders.new_invoice function 140 | client: Flask client used to send api calls 141 | order_status: status to be set for the generated order, 142 | default input value is None but in the 143 | database it will be set to pending 144 | invoice_status: status to be set for the generated invoice, 145 | default is pending 146 | tx_seq_num: tx_seq_num value to be set for the generated 147 | order, default value is None 148 | n_bytes: length of generated message 149 | bid: amount of bid, default value is None, if None a minimum 150 | valid value will be set 151 | order_id: the id to be used when connecting invoice to an 152 | order, default value is 1 153 | regions: list of regions over which this order should be 154 | transmitted. The default value is an empty list implying 155 | the order should be sent over all regions. 156 | channel: Logical channel on which to transmit the order. 157 | admin: Whether to post the order via the /admin/order route. 158 | 159 | Returns: 160 | The json response of the create order endpoint. 161 | 162 | """ 163 | assert (isinstance(invoice_status, constants.InvoiceStatus)) 164 | 165 | if not bid: 166 | bid = bidding.get_min_bid(n_bytes) 167 | 168 | mock_new_invoice.return_value = (True, 169 | new_invoice(order_id, invoice_status, 170 | bid)) 171 | post_rv = place_order(client, n_bytes, regions, bid, channel, admin) 172 | assert post_rv.status_code == HTTPStatus.OK 173 | uuid = post_rv.get_json()['uuid'] 174 | # Set order's sequence number and status 175 | db_order = Order.query.filter_by(uuid=uuid).first() 176 | 177 | if order_status: 178 | assert (isinstance(order_status, constants.OrderStatus)) 179 | db_order.status = order_status.value 180 | 181 | if tx_seq_num: 182 | db_order.tx_seq_num = tx_seq_num 183 | 184 | if started_transmission_at: 185 | db_order.started_transmission_at = started_transmission_at 186 | 187 | db.session.commit() 188 | return post_rv.get_json() 189 | -------------------------------------------------------------------------------- /terraform/modules/prometheus/cloud-init/prometheus.yml: -------------------------------------------------------------------------------- 1 | bootcmd: 2 | - blkid /dev/disk/by-id/google-data || mkfs.ext4 -L data -E lazy_itable_init=0,lazy_journal_init=0,discard /dev/disk/by-id/google-data 3 | 4 | mounts: 5 | - [/dev/disk/by-label/data, /mnt/disks/data, auto, defaults] 6 | 7 | users: 8 | - name: bs 9 | uid: 2000 10 | 11 | write_files: 12 | - path: /etc/docker/daemon.json 13 | permissions: 0644 14 | owner: root 15 | content: | 16 | { 17 | "live-restore": true, 18 | "log-opts": { 19 | "tag": "{{.Name}}", 20 | "max-size": "1g", 21 | "max-files": "2" 22 | }, 23 | "storage-driver": "overlay2", 24 | "mtu": 1460 25 | } 26 | 27 | - path: /home/bs/prometheus/prometheus.yml 28 | permissions: 0644 29 | owner: root 30 | content: | 31 | global: 32 | evaluation_interval: 15s 33 | scrape_interval: 15s 34 | external_labels: 35 | project: satellite-api 36 | 37 | scrape_configs: 38 | - job_name: prometheus 39 | relabel_configs: 40 | - source_labels: 41 | - '__meta_gce_label_network' 42 | target_label: 'network' 43 | - source_labels: 44 | - '__meta_gce_label_name' 45 | target_label: 'name' 46 | - source_labels: 47 | - '__meta_gce_instance_name' 48 | target_label: 'instance_name' 49 | gce_sd_configs: 50 | - project: satellite-api 51 | filter: (labels.type = "prometheus") 52 | zone: us-west1-a 53 | port: 9100 54 | - project: satellite-api 55 | filter: (labels.type = "prometheus") 56 | zone: us-west1-b 57 | port: 9100 58 | - project: satellite-api 59 | filter: (labels.type = "prometheus") 60 | zone: us-west1-c 61 | port: 9100 62 | 63 | - job_name: satellite-api-node-exporter 64 | relabel_configs: 65 | - source_labels: 66 | - '__meta_gce_label_network' 67 | target_label: 'network' 68 | - source_labels: 69 | - '__meta_gce_label_name' 70 | target_label: 'name' 71 | - source_labels: 72 | - '__meta_gce_instance_name' 73 | target_label: 'instance_name' 74 | gce_sd_configs: 75 | - project: satellite-api 76 | zone: us-west1-a 77 | port: 9100 78 | - project: satellite-api 79 | zone: us-west1-b 80 | port: 9100 81 | - project: satellite-api 82 | zone: us-west1-c 83 | port: 9100 84 | 85 | - job_name: iridium-analyzer-node-exporter 86 | relabel_configs: 87 | - source_labels: 88 | - '__meta_gce_label_network' 89 | target_label: 'network' 90 | - source_labels: 91 | - '__meta_gce_label_name' 92 | target_label: 'name' 93 | - source_labels: 94 | - '__meta_gce_instance_name' 95 | target_label: 'instance_name' 96 | gce_sd_configs: 97 | - project: satellite-api 98 | zone: us-east1-b 99 | port: 9100 100 | - project: satellite-api 101 | zone: us-east1-c 102 | port: 9100 103 | - project: satellite-api 104 | zone: us-east1-d 105 | port: 9100 106 | 107 | - job_name: lightningd 108 | relabel_configs: 109 | - source_labels: 110 | - '__meta_gce_label_network' 111 | target_label: 'network' 112 | - source_labels: 113 | - '__meta_gce_label_name' 114 | target_label: 'name' 115 | - source_labels: 116 | - '__meta_gce_instance_name' 117 | target_label: 'instance_name' 118 | gce_sd_configs: 119 | - project: satellite-api 120 | filter: (labels.type = "lightning-app-blc") 121 | zone: us-west1-a 122 | port: 9750 123 | - project: satellite-api 124 | filter: (labels.type = "lightning-app-blc") 125 | zone: us-west1-b 126 | port: 9750 127 | - project: satellite-api 128 | filter: (labels.type = "lightning-app-blc") 129 | zone: us-west1-c 130 | port: 9750 131 | 132 | - path: /etc/systemd/system/prometheus.service 133 | permissions: 0644 134 | owner: root 135 | content: | 136 | [Unit] 137 | Description=prometheus-server instance 138 | Wants=gcr-online.target docker.service 139 | After=gcr-online.service docker.service 140 | 141 | [Service] 142 | Restart=always 143 | RestartSec=1 144 | Environment=HOME=/home/bs 145 | ExecStartPre=/usr/bin/docker pull ${prom_docker} 146 | ExecStartPre=/sbin/iptables -A INPUT -m tcp -p tcp --dport 9090 -j ACCEPT 147 | ExecStartPre=/sbin/iptables -A INPUT -m tcp -p tcp --dport 80 -j ACCEPT 148 | ExecStartPre=/sbin/iptables -A PREROUTING -t nat -p tcp --dport 80 -j REDIRECT --to-port 9090 149 | ExecStart=/usr/bin/docker run \ 150 | --network=host \ 151 | -v /mnt/disks/data:/data:rw \ 152 | -v /home/bs/prometheus:/config:ro \ 153 | --read-only \ 154 | --name prometheus \ 155 | "${prom_docker}" --config.file=/config/prometheus.yml --web.enable-lifecycle --web.enable-admin-api --storage.tsdb.path=/data/metrics --storage.tsdb.retention=${retention} 156 | ExecStop=/usr/bin/docker stop prometheus 157 | ExecStopPost=-/usr/bin/docker rm prometheus 158 | ExecStopPost=/sbin/iptables -D INPUT -m tcp -p tcp --dport 9090 -j ACCEPT 159 | ExecStopPost=/sbin/iptables -D INPUT -m tcp -p tcp --dport 80 -j ACCEPT 160 | ExecStopPost=/sbin/iptables -D PREROUTING -t nat -p tcp --dport 80 -j REDIRECT --to-port 9090 161 | 162 | [Install] 163 | WantedBy=multi-user.target 164 | 165 | - path: /etc/systemd/system/node-exporter.service 166 | permissions: 0644 167 | owner: root 168 | content: | 169 | [Unit] 170 | Description=prometheus node-exporter 171 | Wants=gcr-online.target docker.service 172 | After=gcr-online.service docker.service 173 | 174 | [Service] 175 | Restart=always 176 | RestartSec=1 177 | Environment=HOME=/home/bs 178 | ExecStartPre=/usr/bin/docker pull ${node_exporter_docker} 179 | ExecStartPre=/sbin/iptables -A INPUT -m tcp -p tcp --dport 9100 -j ACCEPT 180 | ExecStart=/usr/bin/docker run \ 181 | --name=node-exporter \ 182 | --network=host \ 183 | --read-only \ 184 | -v /proc:/host/proc:ro \ 185 | -v /sys:/host/sys:ro \ 186 | -v /:/rootfs:ro \ 187 | -v metrics:/metrics:ro \ 188 | -v /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:ro \ 189 | "${node_exporter_docker}" --path.procfs /host/proc --path.sysfs /host/sys --collector.textfile.directory /metrics --collector.filesystem.ignored-mount-points "^/(sys|proc|dev|host|etc($|/))" --collector.systemd 190 | ExecStop=/usr/bin/docker stop node-exporter 191 | ExecStopPost=-/usr/bin/docker rm node-exporter 192 | ExecStopPost=/sbin/iptables -D INPUT -m tcp -p tcp --dport 9100 -j ACCEPT 193 | 194 | [Install] 195 | WantedBy=multi-user.target 196 | 197 | runcmd: 198 | - /bin/mkdir -p /mnt/disks/data/metrics 199 | - /bin/chown nobody:nobody /mnt/disks/data/metrics 200 | - systemctl daemon-reload 201 | - systemctl enable --now prometheus.service 202 | - systemctl enable --now node-exporter.service 203 | -------------------------------------------------------------------------------- /server/templates/queue.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Satellite API Queues 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 52 |
53 |
54 |
55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 |
CreatedBid per Byte (millisatoshis)Message SizeUnpaid BidOrder IDStatusTransmission Started
68 |
69 |
70 |
71 |
72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 |
CreatedBid per Byte (millisatoshis)Message SizeUnpaid BidOrder IDStatus
84 |
85 |
86 |
87 |
88 | 89 | 90 | 91 | 92 | 93 | {% if env == "development" %} 94 | 95 | {% endif %} 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 |
CreatedBid per Byte (millisatoshis)MessageMessage SizeUnpaid BidOrder IDStatusTransmission StartedTransmission Ended
105 |
106 |
107 |
108 | 109 | 170 | 171 | 172 | 173 | -------------------------------------------------------------------------------- /server/tests/test_invoices.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from http import HTTPStatus 3 | from unittest.mock import patch 4 | 5 | from database import db 6 | from constants import InvoiceStatus, OrderStatus 7 | from error import assert_error 8 | from models import Invoice, Order 9 | from utils import hmac_sha256_digest 10 | import bidding 11 | import constants 12 | import server 13 | 14 | from common import new_invoice, \ 15 | place_order, rnd_string, upload_test_file 16 | 17 | 18 | @pytest.fixture 19 | def client(mockredis): 20 | app = server.create_app(from_test=True) 21 | app.app_context().push() 22 | with app.test_client() as client: 23 | yield client 24 | server.teardown_app(app) 25 | 26 | 27 | def test_paid_invoice_callback_parameter_validation(client): 28 | rv = client.post('/callback') 29 | assert rv.status_code == HTTPStatus.NOT_FOUND 30 | 31 | rv = client.post('/callback/test_lid') 32 | assert rv.status_code == HTTPStatus.NOT_FOUND 33 | 34 | rv = client.get('/callback/test_lid/test_auth_token') 35 | assert rv.status_code == HTTPStatus.METHOD_NOT_ALLOWED 36 | 37 | 38 | @patch('orders.new_invoice') 39 | def test_paid_invoice_callback_invalid_input(mock_new_invoice, client): 40 | n_bytes = 500 41 | invoice = new_invoice(1, InvoiceStatus.paid, bidding.get_min_bid(n_bytes)) 42 | mock_new_invoice.return_value = (True, invoice) 43 | post_rv = place_order(client, n_bytes) 44 | assert post_rv.status_code == HTTPStatus.OK 45 | 46 | charged_auth_token = hmac_sha256_digest(constants.LIGHTNING_WEBHOOK_KEY, 47 | invoice.lid) 48 | # invalid lid 49 | rv = client.post(f'/callback/some_text/{charged_auth_token}') 50 | assert rv.status_code == HTTPStatus.NOT_FOUND 51 | 52 | # invalid auth token 53 | rv = client.post(f'/callback/{invoice.lid}/some_text') 54 | assert rv.status_code == HTTPStatus.UNAUTHORIZED 55 | 56 | 57 | @patch('orders.new_invoice') 58 | def test_paid_invoice_callback_orphaned_invoice(mock_new_invoice, client): 59 | n_bytes = 500 60 | invoice = new_invoice(1, InvoiceStatus.paid, bidding.get_min_bid(n_bytes)) 61 | mock_new_invoice.return_value = (True, invoice) 62 | post_rv = place_order(client, n_bytes) 63 | assert post_rv.status_code == HTTPStatus.OK 64 | 65 | db_invoice = Invoice.query.filter_by(order_id=1).first() 66 | # nullify the order_id of an invoice to mimic an orphaned invoice 67 | db_invoice.order_id = None 68 | db.session.commit() 69 | 70 | charged_auth_token = hmac_sha256_digest(constants.LIGHTNING_WEBHOOK_KEY, 71 | invoice.lid) 72 | 73 | rv = client.post(f'/callback/{invoice.lid}/{charged_auth_token}') 74 | assert rv.status_code == HTTPStatus.NOT_FOUND 75 | assert_error(rv.get_json(), 'ORPHANED_INVOICE') 76 | 77 | 78 | @patch('orders.new_invoice') 79 | def test_paid_invoice_callback_pay_twice(mock_new_invoice, client): 80 | n_bytes = 500 81 | invoice = new_invoice(1, InvoiceStatus.paid, bidding.get_min_bid(n_bytes)) 82 | mock_new_invoice.return_value = (True, invoice) 83 | post_rv = place_order(client, n_bytes) 84 | assert post_rv.status_code == HTTPStatus.OK 85 | 86 | charged_auth_token = hmac_sha256_digest(constants.LIGHTNING_WEBHOOK_KEY, 87 | invoice.lid) 88 | rv = client.post(f'/callback/{invoice.lid}/{charged_auth_token}') 89 | assert rv.status_code == HTTPStatus.BAD_REQUEST 90 | assert_error(rv.get_json(), 'INVOICE_ALREADY_PAID') 91 | 92 | 93 | @patch('orders.new_invoice') 94 | def test_paid_invoice_callback_successfully(mock_new_invoice, client): 95 | n_bytes = 500 96 | invoice = new_invoice(1, InvoiceStatus.pending, 97 | bidding.get_min_bid(n_bytes)) 98 | mock_new_invoice.return_value = (True, invoice) 99 | post_rv = place_order(client, n_bytes) 100 | uuid_order = post_rv.get_json()['uuid'] 101 | assert post_rv.status_code == HTTPStatus.OK 102 | 103 | charged_auth_token = hmac_sha256_digest(constants.LIGHTNING_WEBHOOK_KEY, 104 | invoice.lid) 105 | rv = client.post(f'/callback/{invoice.lid}/{charged_auth_token}') 106 | assert rv.status_code == HTTPStatus.OK 107 | 108 | # refetch the order and the invoice from the database 109 | db_invoice = Invoice.query.filter_by(lid=invoice.lid).first() 110 | db_order = Order.query.filter_by(uuid=uuid_order).first() 111 | assert db_invoice.status == InvoiceStatus.paid.value 112 | assert db_order.status == OrderStatus.transmitting.value 113 | assert db_invoice.paid_at is not None 114 | 115 | 116 | @patch('orders.new_invoice') 117 | def test_pay_multiple_invoices(mock_new_invoice, client): 118 | n_bytes = 500 119 | first_bid = 1000 120 | total_bid = first_bid 121 | msg = rnd_string(n_bytes) 122 | invoice1 = new_invoice(1, InvoiceStatus.pending, first_bid) 123 | charged_auth_token1 = hmac_sha256_digest(constants.LIGHTNING_WEBHOOK_KEY, 124 | invoice1.lid) 125 | mock_new_invoice.return_value = (True, invoice1) 126 | 127 | rv = upload_test_file(client, msg, first_bid) 128 | assert rv.status_code == HTTPStatus.OK 129 | post_json_resp = rv.get_json() 130 | uuid = post_json_resp['uuid'] 131 | auth_token = post_json_resp['auth_token'] 132 | 133 | # Bump the bid on the existing order 134 | second_bid = 2000 135 | total_bid += second_bid 136 | invoice2 = new_invoice(1, InvoiceStatus.pending, second_bid) 137 | charged_auth_token2 = hmac_sha256_digest(constants.LIGHTNING_WEBHOOK_KEY, 138 | invoice2.lid) 139 | mock_new_invoice.return_value = (True, invoice2) 140 | bump_rv = client.post(f'/order/{uuid}/bump', 141 | data={ 142 | 'bid_increase': second_bid, 143 | }, 144 | headers={'X-Auth-Token': auth_token}) 145 | assert bump_rv.status_code == HTTPStatus.OK 146 | 147 | # Bump again 148 | third_bid = 3000 149 | total_bid += third_bid 150 | invoice3 = new_invoice(1, InvoiceStatus.pending, third_bid) 151 | charged_auth_token3 = hmac_sha256_digest(constants.LIGHTNING_WEBHOOK_KEY, 152 | invoice3.lid) 153 | mock_new_invoice.return_value = (True, invoice3) 154 | bump_rv = client.post(f'/order/{uuid}/bump', 155 | data={ 156 | 'bid_increase': third_bid, 157 | }, 158 | headers={'X-Auth-Token': auth_token}) 159 | assert bump_rv.status_code == HTTPStatus.OK 160 | 161 | db_order = Order.query.filter_by(uuid=uuid).first() 162 | assert db_order.unpaid_bid == total_bid 163 | 164 | # pay the first invoice 165 | rv = client.post(f'/callback/{invoice1.lid}/{charged_auth_token1}') 166 | assert rv.status_code == HTTPStatus.OK 167 | # refetch the order and the invoice from the database 168 | db_invoice = Invoice.query.filter_by(lid=invoice1.lid).first() 169 | db_order = Order.query.filter_by(uuid=uuid).first() 170 | assert db_invoice.status == InvoiceStatus.paid.value 171 | assert db_invoice.paid_at is not None 172 | assert db_order.status == OrderStatus.transmitting.value 173 | assert db_order.bid == first_bid 174 | assert db_order.unpaid_bid == total_bid - first_bid 175 | 176 | # pay the second invoice 177 | rv = client.post(f'/callback/{invoice2.lid}/{charged_auth_token2}') 178 | assert rv.status_code == HTTPStatus.OK 179 | # refetch the order and the invoice from the database 180 | db_invoice = Invoice.query.filter_by(lid=invoice2.lid).first() 181 | db_order = Order.query.filter_by(uuid=uuid).first() 182 | assert db_invoice.status == InvoiceStatus.paid.value 183 | assert db_invoice.paid_at is not None 184 | assert db_order.status == OrderStatus.transmitting.value 185 | assert db_order.bid == first_bid + second_bid 186 | assert db_order.unpaid_bid == total_bid - first_bid - second_bid 187 | 188 | # pay the last invoice 189 | rv = client.post(f'/callback/{invoice3.lid}/{charged_auth_token3}') 190 | assert rv.status_code == HTTPStatus.OK 191 | # refetch the order and the invoice from database 192 | db_invoice = Invoice.query.filter_by(lid=invoice3.lid).first() 193 | db_order = Order.query.filter_by(uuid=uuid).first() 194 | assert db_invoice.status == InvoiceStatus.paid.value 195 | assert db_invoice.paid_at is not None 196 | assert db_order.status == OrderStatus.transmitting.value 197 | assert db_order.bid == total_bid 198 | assert db_order.unpaid_bid == 0 199 | 200 | 201 | @patch('orders.new_invoice') 202 | def test_try_to_pay_an_expired_invoice(mock_new_invoice, client): 203 | n_bytes = 500 204 | invoice = new_invoice(1, InvoiceStatus.expired, 205 | bidding.get_min_bid(n_bytes)) 206 | mock_new_invoice.return_value = (True, invoice) 207 | post_rv = place_order(client, n_bytes) 208 | assert post_rv.status_code == HTTPStatus.OK 209 | uuid_order = post_rv.get_json()['uuid'] 210 | 211 | charged_auth_token = hmac_sha256_digest(constants.LIGHTNING_WEBHOOK_KEY, 212 | invoice.lid) 213 | rv = client.post(f'/callback/{invoice.lid}/{charged_auth_token}') 214 | assert rv.status_code == HTTPStatus.BAD_REQUEST 215 | assert_error(rv.get_json(), 'INVOICE_ALREADY_EXPIRED') 216 | 217 | # refetch the order and the invoice from the database 218 | # expecation is that none of them change their status 219 | db_invoice = Invoice.query.filter_by(lid=invoice.lid).first() 220 | db_order = Order.query.filter_by(uuid=uuid_order).first() 221 | assert db_invoice.status == InvoiceStatus.expired.value 222 | assert db_order.status == InvoiceStatus.pending.value 223 | assert db_invoice.paid_at is None 224 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Satellite API 2 | 3 | [![Tests](https://github.com/Blockstream/satellite-api/actions/workflows/test.yml/badge.svg?branch=master)](https://github.com/Blockstream/satellite-api/actions/workflows/test.yml) 4 | 5 | A lightning app (Lapp) based on c-lightning. Presents an API to submit messages for global broadcast over Blockstream Satellite with payments via Bitcoin Lightning. 6 | 7 | 8 | 9 | - [Satellite API](#satellite-api) 10 | - [Setup](#setup) 11 | - [Run](#run) 12 | - [Example Applications](#example-applications) 13 | - [REST API](#rest-api) 14 | - [POST /order](#post-order) 15 | - [POST /order/:uuid/bump](#post-orderuuidbump) 16 | - [GET /order/:uuid](#get-orderuuid) 17 | - [DELETE /order/:uuid](#delete-orderuuid) 18 | - [GET /orders/:state](#get-ordersstate) 19 | - [GET /message/:seq\_num](#get-messageseq_num) 20 | - [GET /info](#get-info) 21 | - [GET /subscribe/:channels](#get-subscribechannels) 22 | - [Queue Page](#queue-page) 23 | - [Future Work](#future-work) 24 | 25 | 26 | 27 | ## Setup 28 | 29 | The Satellite API comprises a RESTful API server and a transmitter daemon. The API server speaks JSON and is used for creating and managing message transmission orders and for processing lightning-charge payment callbacks. The transmitter daemon continuously dequeues paid messages and coordinates the corresponding satellite transmissions. 30 | 31 | The Blockstream Satellite API is dependent on [lightning-charge](https://github.com/ElementsProject/lightning-charge), which itself is dependent on [c-lightning](https://github.com/ElementsProject/lightning) and [bitcoin](https://github.com/bitcoin/bitcoin). The Satellite API server communicates with the Bitcoin Lightning Charge (BLC) stack to handle the Bitcoin Lightning payment required for each transmission order. 32 | 33 | ## Run ## 34 | 35 | A docker-compose script is available to bring up the Satellite API server, the transmitter daemon, and the other dependencies (BLC and Redis). To launch the container stack, run: 36 | 37 | ``` 38 | docker-compose up 39 | ``` 40 | 41 | ## Example Applications 42 | 43 | The Blockstream Satellite command-line interface (CLI) has commands to submit messages to the Satellite API for global broadcasting. It also has commands to receive those messages through an actual satellite receiver or a simulated/demo receiver for testing. Please refer to the [CLI documentation](https://blockstream.github.io/satellite/doc/api.html). Alternatively, if you are interested in implementing the communication with the Satellite API from scratch, the referred CLI can be used as a reference. The source code is available on the [Satellite repository](https://github.com/Blockstream/satellite/tree/master/blocksatcli/api). 44 | 45 | ## REST API ## 46 | 47 | Each call to an API endpoint responds with a JSON object, whether the call is successful or results in an error. 48 | 49 | The code samples below assume that you've set `SATELLITE_API` in your shell to the public base URL of your server. 50 | 51 | ### POST /order ### 52 | 53 | Place an order for a message transmission. The body of the POST must provide a `bid` in millisatoshis and a message, provided either as a `message` parameter string or as an HTTP form-based `file` upload. If the bid is below an allowed minimum millisatoshis per byte, a `BID_TOO_SMALL` (102) error is returned. 54 | 55 | For example, to place an order to transmit the message "Hello world" with an initial bid of 10,000 millisatoshi, issue an HTTP POST request like this: 56 | 57 | ```bash 58 | curl -F "bid=10000" -F "message=Hello World" $SATELLITE_API/order 59 | ``` 60 | 61 | Or, to place an order to transmit the file `hello_world.png` with an initial bid of 10,000 millisatoshi, issue an HTTP POST request like this: 62 | 63 | ```bash 64 | curl -F "bid=10000" -F "file=@/path/to/upload/file/hello_world.png" $SATELLITE_API/order 65 | ``` 66 | 67 | If successful, the response includes the JSON Lightning invoice as returned by Lightning Charge's [POST /invoice](https://github.com/ElementsProject/lightning-charge#post-invoice) and an authentication token that can be used to modify the order. Within the metadata of the Lightning invoice, metadata is included providing: the bid (in millisatoshis), the SHA256 digest of the uploaded message file, and a UUID for the order. 68 | 69 | ```bash 70 | {"auth_token":"d784e322dad7ec2671086ce3ad94e05108f2501180d8228577fbec4115774750","uuid":"409348bc-6af0-4999-b715-4136753979df","lightning_invoice":{"id":"N0LOTYc9j0gWtQVjVW7pK","msatoshi":"514200","description":"BSS Test","rhash":"5e5c9d111bc76ce4bf9b211f12ca2d9b66b81ae9839b4e530b16cedbef653a3a","payreq":"lntb5142n1pd78922pp5tewf6ygmcakwf0umyy039j3dndntsxhfswd5u5ctzm8dhmm98gaqdqdgff4xgz5v4ehgxqzjccqp286gfgrcpvzl04sdg2f9sany7ptc5aracnd6kvr2nr0e0x5ajpmfhsjkqzw679ytqgnt6w4490jjrgcvuemz790salqyz9far68cpqtgq3q23el","expires_at":1541642146,"created_at":1541641546,"metadata":{"sha256_message_digest":"0e2bddf3bba1893b5eef660295ef12d6fc72870da539c328cf24e9e6dbb00f00","uuid":"409348bc-6af0-4999-b715-4136753979df"},"status":"unpaid"}} 71 | ``` 72 | 73 | The error codes that can be returned by this endpoint include `BID_TOO_SMALL` (102), `MESSAGE_FILE_TOO_SMALL` (117), `MESSAGE_FILE_TOO_LARGE` (118), `MESSAGE_MISSING` (126), and `ORDER_CHANNEL_UNAUTHORIZED_OP` (130). 74 | 75 | ### POST /order/:uuid/bump ### 76 | 77 | Increase the bid for an order sitting in the transmission queue. The `bid_increase` must be provided in the body of the POST. A Lightning invoice is returned for it and, when it is paid, the increase is added to the current bid. An `auth_token` must also be provided. For example, to increase the bid on the order placed above by 100,000 millisatoshis, issue a POST like this: 78 | 79 | ```bash 80 | curl -v -F "bid_increase=100000" -F "auth_token=d784e322dad7ec2671086ce3ad94e05108f2501180d8228577fbec4115774750" $SATELLITE_API/order/409348bc-6af0-4999-b715-4136753979df/bump 81 | ``` 82 | 83 | Response object is in the same format as for `POST /order`. 84 | 85 | As shown below for DELETE, the `auth_token` may alternatively be provided using the `X-Auth-Token` HTTP header. 86 | 87 | The error codes that can be returned by this endpoint include `INVALID_AUTH_TOKEN` (109), `ORDER_NOT_FOUND` (104), and `ORDER_CHANNEL_UNAUTHORIZED_OP` (130). 88 | 89 | ### GET /order/:uuid ### 90 | 91 | Retrieve an order by UUID. Must provide the corresponding auth token to prove that it is yours. 92 | 93 | ```bash 94 | curl -v -H "X-Auth-Token: 5248b13a722cd9b2e17ed3a2da8f7ac6bd9a8fe7130357615e074596e3d5872f" $SATELLITE_API/order/409348bc-6af0-4999-b715-4136753979df 95 | ``` 96 | 97 | The error codes that can be returned by this endpoint include `INVALID_AUTH_TOKEN` (109), `ORDER_NOT_FOUND` (104), and `ORDER_CHANNEL_UNAUTHORIZED_OP` (130). 98 | 99 | ### DELETE /order/:uuid ### 100 | 101 | To cancel an order, issue an HTTP DELETE request to the API endpoint `/order/:uuid/` providing the UUID of the order. An `auth_token` must also be provided. For example, to cancel the order above, issue a request like this: 102 | 103 | ```bash 104 | curl -v -X DELETE -F "auth_token=5248b13a722cd9b2e17ed3a2da8f7ac6bd9a8fe7130357615e074596e3d5872f" $SATELLITE_API/order/409348bc-6af0-4999-b715-4136753979df 105 | ``` 106 | 107 | The `auth_token` may be provided as a parameter in the DELETE body as above or may be provided using the `X-Auth-Token` HTTP header, like this: 108 | 109 | ```bash 110 | curl -v -X DELETE -H "X-Auth-Token: 5248b13a722cd9b2e17ed3a2da8f7ac6bd9a8fe7130357615e074596e3d5872f" $SATELLITE_API/order/409348bc-6af0-4999-b715-4136753979df 111 | ``` 112 | 113 | Error codes that can be returned by this endpoint include: `INVALID_AUTH_TOKEN` (109), `ORDER_NOT_FOUND` (104), `ORDER_CANCELLATION_ERROR` (120). 114 | 115 | ### GET /orders/:state ### 116 | 117 | Retrieve a list of up to 20 orders in a given state. The following states are supported: 118 | 119 | | State | Description | 120 | | ---------------- | ---------------------------------------------------------------------------------------------------------- | 121 | | `pending` | Orders waiting for payment. Sorted by creation time. | 122 | | `paid` | Orders already paid and waiting for transmission. Sorted by creation time. | 123 | | `transmitting` | Orders being transmitted over satellite. Sorted by the transmission start time. | 124 | | `confirming` | Orders whose transmissions are being confirmed (almost finished). Sorted by the transmission start time. | 125 | | `queued` | Combination of orders in `paid`, `transmitting`, and `confirming` state. Sorted by the order creation time | 126 | | `sent` | Orders already transmitted. Sorted by the transmission end time. | 127 | | `rx-pending` | Orders already transmitted but with pending Rx confirmations. Sorted by the transmission end time. | 128 | | `retransmitting` | Orders under retransmission in one or more regions. Sorted by the time of the last retransmission attempt. | 129 | | `received` | Orders completely transmitted and received in all targeted regions. Sorted by the transmission end time. | 130 | 131 | For example: 132 | ```bash 133 | curl $SATELLITE_API/orders/pending 134 | ``` 135 | 136 | For pagination or time filtering, optionally specify the `before` and/or `after` parameters (in ISO 8601 format) so that only orders in that time range are returned. 137 | 138 | ```bash 139 | curl $SATELLITE_API/orders/pending\?after=2023-02-10T00:00:00\&before=2023-02-10T23:59:59 140 | ``` 141 | 142 | Alternatively, specify the time range based on deltas in seconds relative to the current time. For instance, the following example returns the pending orders created within a window that starts two minutes ago and ends one minute ago. 143 | 144 | ```bash 145 | curl $SATELLITE_API/orders/pending\?after_delta=120\&before_delta=60 146 | ``` 147 | 148 | The response is a JSON array of records (one for each queued message). The revealed fields for each record include: `uuid`, `bid`, `bid_per_byte`, `message_size`, `message_digest`, `status`, `created_at`, `started_transmission_at`, and `ended_transmission_at`. 149 | 150 | ### GET /message/:seq_num 151 | 152 | Retrieve a transmitted message by its unique sequence number. For example: 153 | 154 | ```bash 155 | curl -v $SATELLITE_API/message/3 156 | ``` 157 | 158 | The error codes that can be returned by this endpoint include `SEQUENCE_NUMBER_NOT_FOUND` (114) and `ORDER_CHANNEL_UNAUTHORIZED_OP` (130). 159 | 160 | ### GET /info 161 | 162 | Returns information about the c-lightning node where satellite API payments are terminated. The response is a JSON object consisting of the node ID, port, IP addresses, and other information useful for opening payment channels. For example: 163 | 164 | ```bash 165 | {"id":"032c6ba19a2141c5fee6ac8b6ff6cf24456fd4e8e206716a39af3300876c3a4835","port":42259,"address":[],"version":"v0.5.2-2016-11-21-1937-ge97ee3d","blockheight":434,"network":"regtest"} 166 | ``` 167 | 168 | ### GET /subscribe/:channels 169 | 170 | Subscribe to one or more [server-sent events](https://en.wikipedia.org/wiki/Server-sent_events) channels. The `channels` parameter is a comma-separated list of event channels. Currently, the following channels are available: `transmissions`, `auth`, `gossip`, and `btc-src`. An event is broadcast on a channel each time a message transmission begins and ends on that channel. The event data consists of the order's JSON representation, including its current status. 171 | 172 | ```bash 173 | curl $SATELLITE_API/subscribe/:channels 174 | ``` 175 | 176 | ### Queue Page ### 177 | 178 | A simple table view of queued, pending and sent messages is available at `$SATELLITE_API/queue.html`. This page can be used for debugging and as an example for building a web front-end to the satellite API. 179 | 180 | ## Future Work ## 181 | 182 | * Configure `Flask-Limiter` or similar to block and throttle abusive requests. 183 | * Support bids priced in fiat currencies. 184 | * Report the top `bid_per_byte`, queue depth, and estimated time to transmit in the response of `POST /order`. 185 | -------------------------------------------------------------------------------- /server/tests/test_order_helpers.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | from datetime import datetime, timedelta 4 | from http import HTTPStatus 5 | from unittest.mock import patch 6 | 7 | from constants import EXPIRE_PENDING_ORDERS_AFTER_DAYS, \ 8 | InvoiceStatus, MESSAGE_FILE_RETENTION_TIME_DAYS, \ 9 | OrderStatus, MSG_STORE_PATH 10 | from database import db 11 | from models import Order, TxRetry 12 | from regions import Regions, region_number_list_to_code, region_number_to_id 13 | import order_helpers 14 | import server 15 | 16 | from common import generate_test_order 17 | 18 | 19 | @pytest.fixture 20 | def client(): 21 | app = server.create_app(from_test=True) 22 | app.app_context().push() 23 | with app.test_client() as client: 24 | yield client 25 | server.teardown_app(app) 26 | 27 | 28 | @patch('orders.new_invoice') 29 | def test_expire_pending_orders(mock_new_invoice, client): 30 | to_be_expired_order_uuid = generate_test_order(mock_new_invoice, 31 | client)['uuid'] 32 | to_be_expired_db_order = Order.query.filter_by( 33 | uuid=to_be_expired_order_uuid).first() 34 | to_be_expired_db_order.created_at = datetime.utcnow() - \ 35 | timedelta(days=EXPIRE_PENDING_ORDERS_AFTER_DAYS 36 | + 1) 37 | db.session.commit() 38 | 39 | pending_not_yet_expired_uuid = generate_test_order(mock_new_invoice, 40 | client, 41 | order_id=2)['uuid'] 42 | 43 | expired_orders = order_helpers.expire_old_pending_orders() 44 | assert len(expired_orders) == 1 45 | assert expired_orders[0].uuid == to_be_expired_order_uuid 46 | 47 | # refetch and check 48 | # expectation is that the order gets expired 49 | to_be_expired_db_order = Order.query.filter_by( 50 | uuid=to_be_expired_order_uuid).first() 51 | assert to_be_expired_db_order.status == OrderStatus.expired.value 52 | 53 | # The pending order whose expiration time has not been reached yet should 54 | # not get expired 55 | pending_not_yet_expired_db_order = Order.query.filter_by( 56 | uuid=pending_not_yet_expired_uuid).first() 57 | assert pending_not_yet_expired_db_order.status == OrderStatus.pending.value 58 | 59 | 60 | @patch('orders.new_invoice') 61 | @pytest.mark.parametrize("status", [ 62 | OrderStatus.paid, OrderStatus.transmitting, OrderStatus.sent, 63 | OrderStatus.received, OrderStatus.cancelled, OrderStatus.expired 64 | ]) 65 | def test_expire_non_pending_orders(mock_new_invoice, client, status): 66 | order_uuid = generate_test_order(mock_new_invoice, 67 | client, 68 | order_status=status)['uuid'] 69 | db_order = Order.query.filter_by(uuid=order_uuid).first() 70 | db_order.created_at = datetime.utcnow() - \ 71 | timedelta(days=EXPIRE_PENDING_ORDERS_AFTER_DAYS 72 | + 1) 73 | db.session.commit() 74 | 75 | order_helpers.expire_old_pending_orders() 76 | 77 | # refetch and check 78 | # expectation is that the order's status does not change 79 | db_order = Order.query.filter_by(uuid=order_uuid).first() 80 | assert db_order.status == status.value 81 | 82 | 83 | @patch('orders.new_invoice') 84 | def test_cleanup_old_message_files(mock_new_invoice, client): 85 | to_be_cleaned_order_uuid = generate_test_order( 86 | mock_new_invoice, 87 | client, 88 | order_id=1, 89 | invoice_status=InvoiceStatus.paid)['uuid'] 90 | to_be_cleaned_db_order = Order.query.filter_by( 91 | uuid=to_be_cleaned_order_uuid).first() 92 | to_be_cleaned_db_order.ended_transmission_at = datetime.utcnow() -\ 93 | timedelta(days=MESSAGE_FILE_RETENTION_TIME_DAYS 94 | + 1) 95 | db.session.commit() 96 | 97 | not_to_be_cleaned_order_uuid = generate_test_order( 98 | mock_new_invoice, 99 | client, 100 | order_id=1, 101 | invoice_status=InvoiceStatus.paid)['uuid'] 102 | not_to_be_cleaned_db_order = Order.query.filter_by( 103 | uuid=not_to_be_cleaned_order_uuid).first() 104 | not_to_be_cleaned_db_order.ended_transmission_at = datetime.utcnow() -\ 105 | timedelta(days=MESSAGE_FILE_RETENTION_TIME_DAYS) 106 | db.session.commit() 107 | 108 | cleaned_up_orders = order_helpers.cleanup_old_message_files() 109 | assert len(cleaned_up_orders) == 1 110 | assert cleaned_up_orders[0].uuid == to_be_cleaned_order_uuid 111 | 112 | # refetch and check 113 | message_path = os.path.join(MSG_STORE_PATH, to_be_cleaned_order_uuid) 114 | assert not os.path.exists(message_path) 115 | message_path = os.path.join(MSG_STORE_PATH, not_to_be_cleaned_order_uuid) 116 | assert os.path.exists(message_path) 117 | 118 | 119 | @patch('orders.new_invoice') 120 | @pytest.mark.parametrize("status", [ 121 | OrderStatus.paid, OrderStatus.transmitting, OrderStatus.sent, 122 | OrderStatus.received, OrderStatus.cancelled, OrderStatus.expired 123 | ]) 124 | def test_maybe_mark_order_as_expired_for_invalid_order(mock_new_invoice, 125 | client, status): 126 | # when order does not exist 127 | assert order_helpers.maybe_mark_order_as_expired(1) is None 128 | 129 | # when order exists, but its status is not pending 130 | generate_test_order(mock_new_invoice, 131 | client, 132 | order_id=1, 133 | order_status=status) 134 | assert order_helpers.maybe_mark_order_as_expired(1) is None 135 | 136 | 137 | @patch('orders.new_invoice') 138 | def test_maybe_mark_order_as_expired_pending_order_has_pending_invoice( 139 | mock_new_invoice, client): 140 | # when a pending order has a pending invoice 141 | generate_test_order(mock_new_invoice, client, order_id=1) 142 | assert order_helpers.maybe_mark_order_as_expired(1) is None 143 | 144 | 145 | @patch('orders.new_invoice') 146 | @pytest.mark.parametrize("invoice_status", 147 | [InvoiceStatus.paid, InvoiceStatus.expired]) 148 | def test_maybe_mark_order_as_expired_successfully(mock_new_invoice, client, 149 | invoice_status): 150 | # when a pending order does not have any pending invoice 151 | uuid = generate_test_order(mock_new_invoice, 152 | client, 153 | order_id=1, 154 | invoice_status=invoice_status)['uuid'] 155 | assert order_helpers.maybe_mark_order_as_expired(1) is not None 156 | db_order = Order.query.filter_by(uuid=uuid).first() 157 | assert db_order.status == OrderStatus.expired.value 158 | 159 | 160 | @patch('orders.new_invoice') 161 | def test_upsert_retransmission(mock_new_invoice, client, mockredis): 162 | uuid = generate_test_order(mock_new_invoice, 163 | client, 164 | tx_seq_num=1, 165 | order_status=OrderStatus.transmitting, 166 | regions=[ 167 | Regions.g18.value, Regions.e113.value, 168 | Regions.t11n_afr.value, 169 | Regions.t11n_eu.value 170 | ])['uuid'] 171 | 172 | db_order = Order.query.filter_by(uuid=uuid).first() 173 | post_rv = client.post( 174 | '/order/tx/1', 175 | data={'regions': [[Regions.g18.value, Regions.e113.value]]}) 176 | assert post_rv.status_code == HTTPStatus.OK 177 | db_order = Order.query.filter_by(uuid=uuid).first() 178 | order_helpers.upsert_retransmission(db_order) 179 | # t11n_afr and t11n_eu confirmations are missing 180 | retry_order = TxRetry.query.filter_by(order_id=db_order.id).first() 181 | assert retry_order.order_id == db_order.id 182 | assert retry_order.region_code == region_number_list_to_code( 183 | [Regions.t11n_afr.value, Regions.t11n_eu.value]) 184 | 185 | post_rv = client.post('/order/tx/1', 186 | data={'regions': [[Regions.t11n_eu.value]]}) 187 | assert post_rv.status_code == HTTPStatus.OK 188 | db_order = Order.query.filter_by(uuid=uuid).first() 189 | order_helpers.upsert_retransmission(db_order) 190 | # only t11n_afr confirmations are missing 191 | # expectation is that the existing record in TxRetry gets updated 192 | retry_order = TxRetry.query.filter_by(order_id=db_order.id).all() 193 | assert len(retry_order) == 1 194 | assert retry_order[0].order_id == db_order.id 195 | assert retry_order[0].region_code == region_number_list_to_code( 196 | [Regions.t11n_afr.value]) 197 | 198 | 199 | @patch('orders.new_invoice') 200 | def test_upsert_retransmission_for_order_without_regions( 201 | mock_new_invoice, client): 202 | uuid = generate_test_order(mock_new_invoice, 203 | client, 204 | tx_seq_num=1, 205 | order_status=OrderStatus.transmitting)['uuid'] 206 | 207 | db_order = Order.query.filter_by(uuid=uuid).first() 208 | post_rv = client.post( 209 | '/order/tx/1', 210 | data={'regions': [[Regions.g18.value, Regions.e113.value]]}) 211 | assert post_rv.status_code == HTTPStatus.OK 212 | db_order = Order.query.filter_by(uuid=uuid).first() 213 | order_helpers.upsert_retransmission(db_order) 214 | # No specific region was provided during order creation, so all regions 215 | # should confrim tx 216 | retry_order = TxRetry.query.filter_by(order_id=db_order.id).first() 217 | assert retry_order.order_id == db_order.id 218 | assert retry_order.region_code == region_number_list_to_code([ 219 | Regions.t11n_afr.value, Regions.t11n_eu.value, Regions.t18v_c.value, 220 | Regions.t18v_ku.value 221 | ]) 222 | 223 | 224 | @patch('orders.new_invoice') 225 | def test_upsert_retransmission_for_non_transmitting_order( 226 | mock_new_invoice, client): 227 | uuid = generate_test_order(mock_new_invoice, 228 | client, 229 | order_status=OrderStatus.paid, 230 | regions=[ 231 | Regions.g18.value, Regions.e113.value, 232 | Regions.t11n_afr.value, 233 | Regions.t11n_eu.value 234 | ])['uuid'] 235 | db_order = Order.query.filter_by(uuid=uuid).first() 236 | order_helpers.upsert_retransmission(db_order) 237 | # There should be no retransmission record for this order yet since it's 238 | # not transmitting 239 | retry_order = TxRetry.query.filter_by(order_id=db_order.id).first() 240 | assert not retry_order 241 | 242 | 243 | @patch('orders.new_invoice') 244 | def test_get_missing_tx_confirmations(mock_new_invoice, client): 245 | selected_regions = [ 246 | Regions.g18.value, Regions.e113.value, Regions.t11n_afr.value, 247 | Regions.t11n_eu.value 248 | ] 249 | uuid = generate_test_order(mock_new_invoice, 250 | client, 251 | tx_seq_num=1, 252 | order_status=OrderStatus.transmitting, 253 | regions=selected_regions)['uuid'] 254 | db_order = Order.query.filter_by(uuid=uuid).first() 255 | 256 | # So far, the confirmations from the selected regions should all be missing 257 | missing_confirmations = order_helpers.get_missing_tx_confirmations( 258 | db_order) 259 | assert len(missing_confirmations) == len(selected_regions) 260 | assert (all([ 261 | region_number_to_id(x) in missing_confirmations 262 | for x in selected_regions 263 | ])) 264 | 265 | # Send some Tx confirmations, including one for a region (T18V Ku) that was 266 | # not part of the original region selection 267 | post_rv = client.post( 268 | '/order/tx/1', 269 | data={ 270 | 'regions': 271 | [[Regions.g18.value, Regions.e113.value, Regions.t18v_ku.value]] 272 | }) 273 | assert post_rv.status_code == HTTPStatus.OK 274 | db_order = Order.query.filter_by(uuid=uuid).first() 275 | missing_confirmations = order_helpers.get_missing_tx_confirmations( 276 | db_order) 277 | # Now, only T11N AFR and T11N EU should be missing 278 | assert (all([ 279 | region_number_to_id(x) in missing_confirmations 280 | for x in [Regions.t11n_afr.value, Regions.t11n_eu.value] 281 | ])) 282 | 283 | 284 | @patch('orders.new_invoice') 285 | def test_get_missing_tx_confirmations_for_non_transmitting_order( 286 | mock_new_invoice, client): 287 | uuid = generate_test_order(mock_new_invoice, 288 | client, 289 | tx_seq_num=1, 290 | order_status=OrderStatus.paid, 291 | regions=[ 292 | Regions.g18.value, Regions.e113.value, 293 | Regions.t11n_afr.value, 294 | Regions.t11n_eu.value 295 | ])['uuid'] 296 | db_order = Order.query.filter_by(uuid=uuid).first() 297 | # There should be no missing Tx confirmation record for this order yet 298 | # since it's not transmitting 299 | missing_confirmations = order_helpers.get_missing_tx_confirmations( 300 | db_order) 301 | assert len(missing_confirmations) == 0 302 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | variables: 2 | GIT_SUBMODULE_STRATEGY: none 3 | CI_DISPOSABLE_ENVIRONMENT: "true" 4 | DOCKER_DRIVER: overlay2 5 | DOCKER_HOST: tcp://localhost:2375 6 | DOCKER_TLS_CERTDIR: "" 7 | API_IMAGE: us.gcr.io/satellite-api/satellite-api 8 | SSE_IMAGE: us.gcr.io/satellite-api/satellite-api-sse 9 | 10 | default: 11 | image: blockstream/gcloud-docker:502.0.0-27.3 12 | tags: 13 | - k8s 14 | 15 | stages: 16 | - test 17 | - build 18 | - plan 19 | - deploy 20 | 21 | before_script: 22 | - TMPF=$(mktemp) || exit 1 23 | - echo $GCLOUD_KEY > $TMPF 24 | - export GOOGLE_APPLICATION_CREDENTIALS=$TMPF 25 | - gcloud auth activate-service-account --key-file=$TMPF 26 | - gcloud auth configure-docker 27 | - gcloud auth list 28 | - gcloud --version 29 | 30 | # Run the unit tests 31 | unit_tests: 32 | stage: test 33 | image: python:3.9 34 | except: 35 | - cleanup_staging@satellite/satellite-api 36 | - /^staging_.*/ 37 | - /^prod_.*/ 38 | - /^testnet_prod_.*/ 39 | - /^misc_.*/ 40 | before_script: 41 | - pip install flake8 "yapf>=0.33" pytest pytest-cov 42 | - pip install -r server/requirements.txt 43 | - pip install -r server/test_requirements.txt 44 | script: 45 | - flake8 . 46 | - yapf --diff --recursive --verbose server/ 47 | - cd server/ && python -m pytest --cov=. 48 | 49 | # Test the Satellite API docker image build 50 | docker_build: 51 | stage: build 52 | tags: 53 | - cloud 54 | services: 55 | - docker:27-dind 56 | only: 57 | - branches@satellite/satellite-api 58 | except: 59 | - cleanup_staging@satellite/satellite-api 60 | - /^staging_.*/ 61 | - /^prod_.*/ 62 | - /^misc_.*/ 63 | parallel: 64 | matrix: 65 | - IMAGE: $API_IMAGE 66 | DIR: server 67 | - IMAGE: $SSE_IMAGE 68 | DIR: sse 69 | script: 70 | - cd $DIR 71 | - docker pull $IMAGE:$CI_COMMIT_REF_SLUG || docker pull $IMAGE:latest || true 72 | - > 73 | docker build 74 | -f Dockerfile 75 | --network=host 76 | --cache-from $IMAGE:$CI_COMMIT_REF_SLUG 77 | --cache-from $IMAGE:latest 78 | --build-arg BUILDKIT_INLINE_CACHE=1 79 | . 80 | 81 | # Build and push the Satellite API docker image 82 | docker_push: 83 | stage: build 84 | tags: 85 | - cloud 86 | when: manual 87 | services: 88 | - docker:27-dind 89 | only: 90 | - branches@satellite/satellite-api 91 | parallel: 92 | matrix: 93 | - IMAGE: $API_IMAGE 94 | DIR: server 95 | - IMAGE: $SSE_IMAGE 96 | DIR: sse 97 | script: 98 | - cd $DIR 99 | - docker pull $IMAGE:$CI_COMMIT_REF_SLUG || docker pull $IMAGE:latest || true 100 | - > 101 | docker build 102 | -f Dockerfile 103 | --network=host 104 | --cache-from $IMAGE:$CI_COMMIT_REF_SLUG 105 | --cache-from $IMAGE:latest 106 | --build-arg BUILDKIT_INLINE_CACHE=1 107 | -t $IMAGE:$CI_COMMIT_SHA 108 | -t $IMAGE:$CI_COMMIT_REF_SLUG 109 | -t $IMAGE:latest 110 | . 111 | - docker push $IMAGE:$CI_COMMIT_SHA 112 | - docker push $IMAGE:$CI_COMMIT_REF_SLUG 113 | - if [ $CI_COMMIT_BRANCH == "master" ]; then docker push $IMAGE:latest; fi 114 | 115 | # Build and push the Satellite API docker image on deployments 116 | tag_docker_push: 117 | stage: build 118 | tags: 119 | - cloud 120 | services: 121 | - docker:27-dind 122 | only: 123 | - /^staging_.*/ 124 | - /^prod_.*/ 125 | - /^testnet_prod_.*/ 126 | except: 127 | - branches@satellite/satellite-api 128 | parallel: 129 | matrix: 130 | - IMAGE: $API_IMAGE 131 | DIR: server 132 | - IMAGE: $SSE_IMAGE 133 | DIR: sse 134 | script: 135 | - cd $DIR 136 | - docker pull $IMAGE:$CI_COMMIT_REF_SLUG || docker pull $IMAGE:latest || true 137 | - > 138 | docker build 139 | -f Dockerfile 140 | --network=host 141 | --cache-from $IMAGE:$CI_COMMIT_REF_SLUG 142 | --cache-from $IMAGE:latest 143 | --build-arg BUILDKIT_INLINE_CACHE=1 144 | -t $IMAGE:$CI_COMMIT_SHA 145 | -t $IMAGE:$CI_COMMIT_REF_SLUG 146 | -t $IMAGE:latest 147 | . 148 | - docker push $IMAGE:$CI_COMMIT_SHA 149 | - docker push $IMAGE:$CI_COMMIT_REF_SLUG 150 | - if [ $CI_COMMIT_BRANCH == "master" ]; then docker push $IMAGE:latest; fi 151 | 152 | # Any newly-pushed, WIP branch will be ran through plan (keep in mind docker images are using the latest tag, which may be an older one since the current commit may not match an available image) 153 | plan_staging: 154 | stage: plan 155 | only: 156 | - branches@satellite/satellite-api 157 | except: 158 | - cleanup_staging@satellite/satellite-api 159 | - /^staging_.*/ 160 | - /^prod_.*/ 161 | - /^testnet_prod_.*/ 162 | - /^misc_.*/ 163 | script: 164 | - (cd terraform 165 | && terraform init -input=false 166 | && terraform workspace select staging 167 | && terraform init -input=false 168 | && terraform plan 169 | -var "sat_api_docker=$API_IMAGE:$CI_COMMIT_SHA" 170 | -var "sat_api_sse_docker=$SSE_IMAGE:$CI_COMMIT_SHA" 171 | -var "region=$REGION" 172 | -var "zone=$ZONE" 173 | -var "instance_type=$INSTANCE_TYPE" 174 | -var "host=$HOST_STAGING" 175 | -var "timeout=$TIMEOUT" 176 | -var "prom_service_acct=$PROM_SA" 177 | -var "public_bucket_url=$PUBLIC_BUCKET_URL" 178 | -var "private_bucket=$PRIVATE_BUCKET" 179 | -var "letsencrypt_email=$LE_EMAIL" 180 | -var "charge_token=$CHARGE_TOKEN" 181 | -var "rpcpass=$RPCPASS" 182 | -var "k8s_autossh_lb=$GKE_LB" 183 | -var "station1=$STATION_1" 184 | -var "station2=$STATION_2" 185 | -var "station3=$STATION_3" 186 | -input=false) 187 | 188 | # Tag with staging_v.* to deploy mainnet + LB to staging (e.g. staging_v0.1.1) 189 | deploy_staging: 190 | stage: deploy 191 | when: manual 192 | only: 193 | - /^staging_v.*/ 194 | script: 195 | - (cd terraform 196 | && terraform init -input=false 197 | && terraform workspace select staging 198 | && terraform init -input=false 199 | && terraform apply 200 | -var "sat_api_docker=$API_IMAGE:$CI_COMMIT_SHA" 201 | -var "sat_api_sse_docker=$SSE_IMAGE:$CI_COMMIT_SHA" 202 | -var "region=$REGION" 203 | -var "zone=$ZONE" 204 | -var "instance_type=$INSTANCE_TYPE" 205 | -var "host=$HOST_STAGING" 206 | -var "timeout=$TIMEOUT" 207 | -var "prom_service_acct=$PROM_SA" 208 | -var "public_bucket_url=$PUBLIC_BUCKET_URL" 209 | -var "private_bucket=$PRIVATE_BUCKET" 210 | -var "letsencrypt_email=$LE_EMAIL" 211 | -var "charge_token=$CHARGE_TOKEN" 212 | -var "rpcpass=$RPCPASS" 213 | -var "k8s_autossh_lb=$GKE_LB" 214 | -var "station1=$STATION_1" 215 | -var "station2=$STATION_2" 216 | -var "station3=$STATION_3" 217 | -input=false -auto-approve) 218 | 219 | # Tag with prod_v.* to plan mainnet + LB to production (e.g. prod_v0.1.1) 220 | plan_production: 221 | stage: plan 222 | only: 223 | - /^prod_v.*/ 224 | script: 225 | - (cd terraform 226 | && terraform init -input=false 227 | && terraform workspace select prod 228 | && terraform init -input=false 229 | && terraform plan 230 | -var "sat_api_docker=$API_IMAGE:$CI_COMMIT_SHA" 231 | -var "sat_api_sse_docker=$SSE_IMAGE:$CI_COMMIT_SHA" 232 | -var "region=$REGION" 233 | -var "zone=$ZONE" 234 | -var "instance_type=$INSTANCE_TYPE" 235 | -var "host=$HOST" 236 | -var "timeout=$TIMEOUT" 237 | -var "prom_service_acct=$PROM_SA" 238 | -var "public_bucket_url=$PUBLIC_BUCKET_URL" 239 | -var "private_bucket=$PRIVATE_BUCKET" 240 | -var "letsencrypt_email=$LE_EMAIL" 241 | -var "charge_token=$CHARGE_TOKEN" 242 | -var "rpcpass=$RPCPASS" 243 | -var "k8s_autossh_lb=$GKE_LB" 244 | -var "station1=$STATION_1" 245 | -var "station2=$STATION_2" 246 | -var "station3=$STATION_3" 247 | -input=false) 248 | 249 | # Tag with prod_v.* to deploy mainnet + LB to production (e.g. prod_v0.1.1) 250 | deploy_production: 251 | stage: deploy 252 | when: manual 253 | only: 254 | - /^prod_v.*/ 255 | script: 256 | - (cd terraform 257 | && terraform init -input=false 258 | && terraform workspace select prod 259 | && terraform init -input=false 260 | && terraform apply 261 | -var "sat_api_docker=$API_IMAGE:$CI_COMMIT_SHA" 262 | -var "sat_api_sse_docker=$SSE_IMAGE:$CI_COMMIT_SHA" 263 | -var "region=$REGION" 264 | -var "zone=$ZONE" 265 | -var "instance_type=$INSTANCE_TYPE" 266 | -var "host=$HOST" 267 | -var "timeout=$TIMEOUT" 268 | -var "prom_service_acct=$PROM_SA" 269 | -var "public_bucket_url=$PUBLIC_BUCKET_URL" 270 | -var "private_bucket=$PRIVATE_BUCKET" 271 | -var "letsencrypt_email=$LE_EMAIL" 272 | -var "charge_token=$CHARGE_TOKEN" 273 | -var "rpcpass=$RPCPASS" 274 | -var "k8s_autossh_lb=$GKE_LB" 275 | -var "station1=$STATION_1" 276 | -var "station2=$STATION_2" 277 | -var "station3=$STATION_3" 278 | -input=false -auto-approve) 279 | 280 | # Tag with testnet_prod_v.* to plan testnet to production (e.g. testnet_prod_v0.1.1) 281 | plan_production_testnet: 282 | stage: plan 283 | only: 284 | - /^testnet_prod_v.*/ 285 | script: 286 | - (cd terraform 287 | && terraform init -input=false 288 | && terraform workspace select testnet-prod 289 | && terraform init -input=false 290 | && terraform plan 291 | -var "sat_api_docker=$API_IMAGE:$CI_COMMIT_SHA" 292 | -var "sat_api_sse_docker=$SSE_IMAGE:$CI_COMMIT_SHA" 293 | -var "region=$REGION" 294 | -var "zone=$ZONE" 295 | -var "instance_type=$INSTANCE_TYPE" 296 | -var "timeout=$TIMEOUT" 297 | -var "prom_service_acct=$PROM_SA" 298 | -var "public_bucket_url=$PUBLIC_BUCKET_URL" 299 | -var "private_bucket=$PRIVATE_BUCKET" 300 | -var "letsencrypt_email=$LE_EMAIL" 301 | -var "charge_token=$CHARGE_TOKEN" 302 | -var "rpcpass=$RPCPASS_TESTNET" 303 | -var "k8s_autossh_lb=$GKE_LB" 304 | -input=false) 305 | 306 | # Tag with testnet_prod_v.* to deploy testnet to production (e.g. testnet_prod_v0.1.1) 307 | deploy_production_testnet: 308 | stage: deploy 309 | when: manual 310 | only: 311 | - /^testnet_prod_v.*/ 312 | script: 313 | - (cd terraform 314 | && terraform init -input=false 315 | && terraform workspace select testnet-prod 316 | && terraform init -input=false 317 | && terraform apply 318 | -var "sat_api_docker=$API_IMAGE:$CI_COMMIT_SHA" 319 | -var "sat_api_sse_docker=$SSE_IMAGE:$CI_COMMIT_SHA" 320 | -var "region=$REGION" 321 | -var "zone=$ZONE" 322 | -var "instance_type=$INSTANCE_TYPE" 323 | -var "timeout=$TIMEOUT" 324 | -var "prom_service_acct=$PROM_SA" 325 | -var "public_bucket_url=$PUBLIC_BUCKET_URL" 326 | -var "private_bucket=$PRIVATE_BUCKET" 327 | -var "letsencrypt_email=$LE_EMAIL" 328 | -var "charge_token=$CHARGE_TOKEN" 329 | -var "rpcpass=$RPCPASS_TESTNET" 330 | -var "k8s_autossh_lb=$GKE_LB" 331 | -input=false -auto-approve) 332 | 333 | # This plan gets triggered only for miscellaneous branches/tags (i.e. tor, prometheus, etc), so make sure the branch/tag name starts with misc_ 334 | plan_misc: 335 | stage: plan 336 | only: 337 | - /^misc_v.*/ 338 | script: 339 | - (echo -n "$V3_PK" > terraform/modules/tor/v3.pk) 340 | - (echo -n "$V3_PUBK" > terraform/modules/tor/v3.pubk) 341 | - (cd terraform 342 | && terraform init -input=false 343 | && terraform workspace select misc 344 | && terraform init -input=false 345 | && terraform plan 346 | -var "region=$REGION" 347 | -var "zone=$ZONE" 348 | -var "instance_type=$INSTANCE_TYPE" 349 | -var "onion_host=$ONION_HOST" 350 | -var "prom_allowed_source_ip=$PROMETHEUS_ALLOWED_SOURCE_IP" 351 | -var "prom_service_acct=$PROM_SA" 352 | -var "satellite_lb=$SATELLITE_LB" 353 | -var "satellite_api_lb=$SATELLITE_API_LB" 354 | -var "satellite_api_lb_staging=$SATELLITE_API_LB_STAGING" 355 | -var "blocksat_monitoring=$BLOCKSAT_MONITORING_LB" 356 | -input=false) 357 | 358 | # This deploys only tags/branches starting with misc_v.* (i.e. tor, prometheus, etc) 359 | deploy_misc: 360 | stage: deploy 361 | when: manual 362 | only: 363 | - /^misc_v.*/ 364 | script: 365 | - (echo -n "$V3_PK" > terraform/modules/tor/v3.pk) 366 | - (echo -n "$V3_PUBK" > terraform/modules/tor/v3.pubk) 367 | - (cd terraform 368 | && terraform init -input=false 369 | && terraform workspace select misc 370 | && terraform init -input=false 371 | && terraform apply 372 | -var "region=$REGION" 373 | -var "zone=$ZONE" 374 | -var "instance_type=$INSTANCE_TYPE" 375 | -var "onion_host=$ONION_HOST" 376 | -var "prom_allowed_source_ip=$PROMETHEUS_ALLOWED_SOURCE_IP" 377 | -var "prom_service_acct=$PROM_SA" 378 | -var "satellite_lb=$SATELLITE_LB" 379 | -var "satellite_api_lb=$SATELLITE_API_LB" 380 | -var "satellite_api_lb_staging=$SATELLITE_API_LB_STAGING" 381 | -var "blocksat_monitoring=$BLOCKSAT_MONITORING_LB" 382 | -input=false -auto-approve) 383 | 384 | # Pushing to this branch destroys the staging infrastructure 385 | cleanup_staging: 386 | stage: deploy 387 | only: 388 | - cleanup_staging@satellite/satellite-api 389 | script: 390 | - (cd terraform && terraform init -input=false && 391 | terraform workspace select staging && 392 | terraform destroy 393 | -target module.blc.google_compute_instance_group_manager.blc 394 | -target module.lb.google_compute_region_instance_group_manager.satapi-lb 395 | -auto-approve) 396 | - (cd terraform && terraform init -input=false && 397 | terraform workspace select testnet-staging && 398 | terraform destroy 399 | -target module.blc.google_compute_instance_group_manager.blc 400 | -auto-approve) 401 | -------------------------------------------------------------------------------- /terraform/modules/blc/cloud-init/blc.yaml: -------------------------------------------------------------------------------- 1 | bootcmd: 2 | - blkid /dev/disk/by-id/google-data || mkfs.ext4 -L data /dev/disk/by-id/google-data 3 | - mkdir -p /mnt/disks/data 4 | mounts: 5 | - [ 6 | /dev/disk/by-id/google-data, 7 | /mnt/disks/data, 8 | auto, 9 | "rw,noatime,discard,nobarrier,nodev", 10 | ] 11 | 12 | users: 13 | - name: bs 14 | uid: 2000 15 | 16 | write_files: 17 | - path: /etc/docker/daemon.json 18 | permissions: 0644 19 | owner: root 20 | content: | 21 | { 22 | "live-restore": true, 23 | "log-opts": { 24 | "tag": "{{.Name}}", 25 | "max-size": "1g", 26 | "max-files": "2" 27 | }, 28 | "storage-driver": "overlay2", 29 | "mtu": 1460 30 | } 31 | 32 | - path: /home/bs/check_containers.sh 33 | permissions: 0744 34 | owner: root 35 | content: | 36 | #!/bin/bash 37 | 38 | # Set the file path 39 | output_file="/var/tmp/tempmetrics" 40 | scrape_file="/var/tmp/nodeexporter/container_metrics.prom" 41 | 42 | # Check if the file exists, if not, create it 43 | if [[ ! -f "$output_file" ]]; then 44 | touch "$output_file" 45 | fi 46 | 47 | while true; do 48 | # Empty the file to start fresh 49 | > "$output_file" 50 | 51 | # Fetch the list of all container names and their statuses 52 | docker ps -a --format "{{.Names}} {{.Status}}" | while read -r line; do 53 | # Split the line into container name and status 54 | container_name=$(echo "$line" | awk '{print $1}') 55 | status=$(echo "$line" | awk '{print $2}') 56 | 57 | # Check if the container status is Running 58 | if [[ "$status" == "Up" ]]; then 59 | echo "running_container{cont=\"$container_name\"} 1" >> "$output_file" 60 | else 61 | echo "running_container{cont=\"$container_name\"} 0" >> "$output_file" 62 | fi 63 | done 64 | 65 | # move file for nodeexporter to scrape when finished writing it 66 | mv $output_file $scrape_file 67 | 68 | # Sleep for 30 seconds before the next iteration 69 | sleep 30 70 | done 71 | 72 | - path: /etc/systemd/system/check-containers.service 73 | permissions: 0644 74 | owner: root 75 | content: | 76 | [Unit] 77 | Description=Check # of containers every 10 mins 78 | Wants=check-containers.timer 79 | Requires=charge.service 80 | 81 | [Service] 82 | ExecStart=/bin/bash /home/bs/check_containers.sh 83 | Restart=always 84 | RestartSec=1 85 | User=root 86 | 87 | - path: /etc/systemd/system/check-containers.timer 88 | permissions: 0644 89 | owner: root 90 | content: | 91 | [Unit] 92 | Description=Run check-containers service after initial 7min delay 93 | 94 | [Timer] 95 | OnBootSec=420s 96 | Persistent=true 97 | 98 | [Install] 99 | WantedBy=timers.target 100 | 101 | - path: /etc/systemd/system/node-exporter.service 102 | permissions: 0644 103 | owner: root 104 | content: | 105 | [Unit] 106 | Description=Prometheus node-exporter 107 | Wants=gcr-online.target docker.service 108 | After=gcr-online.service docker.service 109 | 110 | [Service] 111 | Restart=always 112 | RestartSec=3 113 | Environment=HOME=/home/bs 114 | ExecStartPre=/usr/bin/docker pull ${node_exporter_docker} 115 | ExecStartPre=/sbin/iptables -A INPUT -m tcp -p tcp --dport 9100 -j ACCEPT 116 | ExecStart=/usr/bin/docker run \ 117 | --name=node-exporter \ 118 | --network=host \ 119 | --read-only \ 120 | -v /proc:/host/proc:ro \ 121 | -v /sys:/host/sys:ro \ 122 | -v /:/rootfs:ro \ 123 | -v /var/tmp/nodeexporter:/metrics:ro \ 124 | -v /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:ro \ 125 | "${node_exporter_docker}" --path.procfs /host/proc --path.sysfs /host/sys --collector.textfile.directory /metrics --collector.filesystem.ignored-mount-points "^/(sys|proc|dev|host|etc($|/))" --collector.systemd 126 | ExecStop=/usr/bin/docker stop node-exporter 127 | ExecStopPost=/usr/bin/docker rm node-exporter 128 | ExecStopPost=/sbin/iptables -D INPUT -m tcp -p tcp --dport 9100 -j ACCEPT 129 | 130 | - path: /etc/systemd/system/autossh-key-downloader.service 131 | permissions: 0644 132 | owner: root 133 | content: | 134 | [Unit] 135 | Description=Download SSH privkey from GCS 136 | Wants=gcr-online.target 137 | After=gcr-online.target 138 | 139 | [Service] 140 | Type=oneshot 141 | RemainAfterExit=true 142 | Environment=HOME=/home/bs 143 | ExecStart=/usr/bin/docker run \ 144 | --name=autosssh-key \ 145 | --tmpfs /root \ 146 | --tmpfs /tmp \ 147 | --rm \ 148 | -v /home/bs:/mnt/bs:rw \ 149 | "${certbot_docker}" /google-cloud-sdk/bin/gsutil -m cp -r ${private_bucket}/k8s_keys${ssh_key_net}/* /mnt/bs/ 150 | ExecStartPost=-/bin/chmod 0600 /home/bs/k8s_autossh.key 151 | ExecStopPost=-/bin/chmod 0600 /home/bs/k8s_autossh.key 152 | 153 | - path: /etc/systemd/system/k8s-autossh.service 154 | permissions: 0644 155 | owner: root 156 | content: | 157 | [Unit] 158 | Description=SSH tunnel to on-prem K8s node 159 | Wants=gcr-online.target 160 | After=autossh-key-downloader.service 161 | 162 | [Service] 163 | Restart=always 164 | RestartSec=5 165 | Environment=HOME=/home/bs 166 | ExecStartPre=/sbin/iptables -A INPUT -p tcp -s localhost --dport ${k8s_autossh_btc_port} -j ACCEPT 167 | ExecStart=/usr/bin/docker run \ 168 | --network=host \ 169 | --name=k8s-autossh \ 170 | -e AUTOSSH_GATETIME=0 \ 171 | -v /home/bs/k8s_autossh.key:/root/.ssh/id_ed25519:ro \ 172 | ${autossh_docker} ${k8s_autossh_btc_port}:localhost:${k8s_autossh_btc_port} -p ${k8s_autossh_ssh_port} root@${k8s_autossh_lb} 173 | ExecStop=/usr/bin/docker stop k8s-autossh 174 | ExecStopPost=/usr/bin/docker rm k8s-autossh 175 | ExecStopPost=/sbin/iptables -D INPUT -p tcp -s localhost --dport ${k8s_autossh_btc_port} -j ACCEPT 176 | 177 | - path: /home/bs/lightning.conf 178 | permissions: 0644 179 | owner: root 180 | content: | 181 | plugin-dir=/opt/plugins 182 | alias=satellite-api-${net} 183 | bitcoin-rpcuser=${net}-def 184 | bitcoin-rpcpassword=${rpcpass} 185 | announce-addr=${announce_addr} 186 | bind-addr=0.0.0.0 187 | 188 | - path: /etc/systemd/system/lightning.service 189 | permissions: 0644 190 | owner: root 191 | content: | 192 | [Unit] 193 | Description=Lightning node 194 | Wants=gcr-online.target 195 | After=k8s-autossh.service 196 | 197 | [Service] 198 | Restart=always 199 | RestartSec=3 200 | Environment=HOME=/home/bs 201 | ExecStartPre=/usr/bin/docker pull ${lightning_docker} 202 | ExecStartPre=/sbin/iptables -A INPUT -p tcp --dport ${lightning_port} -j ACCEPT 203 | ExecStartPre=/sbin/iptables -A INPUT -m tcp -p tcp --dport 9750 -j ACCEPT 204 | ExecStart=/usr/bin/docker run \ 205 | --network=host \ 206 | --pid=host \ 207 | --name=lightning \ 208 | --cap-add=SYS_PTRACE \ 209 | --memory=2g \ 210 | --log-opt max-size=1g \ 211 | -v /home/bs/lightning.conf:/root/.lightning${network_dir}/lightning.conf:ro \ 212 | -v /mnt/disks/data/lightning:/root/.lightning:rw \ 213 | "${lightning_docker}" ${lightning_cmd} 214 | ExecStop=/usr/bin/docker exec lightning lightning-cli stop 215 | ExecStopPost=/usr/bin/sleep 3 216 | ExecStopPost=/usr/bin/docker rm -f lightning 217 | ExecStopPost=/sbin/iptables -D INPUT -p tcp --dport ${lightning_port} -j ACCEPT 218 | ExecStopPost=/sbin/iptables -D INPUT -m tcp -p tcp --dport 9750 -j ACCEPT 219 | 220 | - path: /etc/systemd/system/redis.service 221 | permissions: 0644 222 | owner: root 223 | content: | 224 | [Unit] 225 | Description=Redis db for server-side events 226 | Wants=gcr-online.target 227 | After=gcr-online.service 228 | 229 | [Service] 230 | Restart=always 231 | RestartSec=3 232 | Environment=HOME=/home/bs 233 | ExecStartPre=/usr/bin/docker pull redis:latest 234 | ExecStartPre=/sbin/iptables -A INPUT -p tcp -s localhost --dport ${redis_port} -j ACCEPT 235 | ExecStart=/usr/bin/docker run \ 236 | --network=host \ 237 | --pid=host \ 238 | --name=sse-redis-db \ 239 | "redis:latest" 240 | ExecStop=/usr/bin/docker stop sse-redis-db 241 | ExecStopPost=/usr/bin/docker rm sse-redis-db 242 | ExecStopPost=/sbin/iptables -D INPUT -p tcp -s localhost --dport ${redis_port} -j ACCEPT 243 | 244 | - path: /etc/systemd/system/api-server.service 245 | permissions: 0644 246 | owner: root 247 | content: | 248 | [Unit] 249 | Description=API server daemon 250 | Wants=gcr-online.target 251 | Requires=charge.service 252 | After=charge.service 253 | 254 | [Service] 255 | Restart=always 256 | RestartSec=3 257 | Environment=HOME=/home/bs 258 | ExecStartPre=/usr/bin/docker-credential-gcr configure-docker 259 | ExecStartPre=/usr/bin/docker pull ${sat_api_docker} 260 | ExecStartPre=/sbin/iptables -A INPUT -p tcp -s 10.138.0.0/16 --dport 9292 -j ACCEPT 261 | ExecStart=/usr/bin/docker run \ 262 | --network=host \ 263 | --pid=host \ 264 | --name=api-server \ 265 | --log-opt max-size=200m \ 266 | --log-opt max-file=3 \ 267 | -v /mnt/disks/data/satellite-api:/data \ 268 | -e "ENV=production" \ 269 | -e "CHARGE_ROOT=http://api-token:${charge_token}@127.0.0.1:9112" \ 270 | -e "CALLBACK_URI_ROOT=http://127.0.0.1:9292" \ 271 | -e "CHARGE_API_TOKEN=${charge_token}" \ 272 | -e "MIN_PER_BYTE_BID=1" \ 273 | -e "REDIS_URI=redis://127.0.0.1:6379" \ 274 | "${sat_api_docker}" 275 | ExecStop=/usr/bin/docker stop api-server 276 | ExecStopPost=/usr/bin/docker rm api-server 277 | ExecStopPost=/sbin/iptables -D INPUT -p tcp -s 10.138.0.0/16 --dport 9292 -j ACCEPT 278 | 279 | - path: /etc/systemd/system/sse-server.service 280 | permissions: 0644 281 | owner: root 282 | content: | 283 | [Unit] 284 | Description=Server-Side Events Server 285 | Wants=gcr-online.target 286 | After=redis.service 287 | 288 | [Service] 289 | Restart=always 290 | RestartSec=3 291 | Environment=HOME=/home/bs 292 | ExecStartPre=/usr/bin/docker-credential-gcr configure-docker 293 | ExecStartPre=/usr/bin/docker pull ${sat_api_sse_docker} 294 | ExecStartPre=/sbin/iptables -A INPUT -p tcp -s 10.138.0.0/16 --dport 4500 -j ACCEPT 295 | ExecStart=/usr/bin/docker run \ 296 | --network=host \ 297 | --pid=host \ 298 | --name=sse-server \ 299 | -e "SUB_CHANNELS=transmissions,gossip,btc-src,auth" \ 300 | -e "REDIS_URI=redis://127.0.0.1:6379" \ 301 | "${sat_api_sse_docker}" 302 | ExecStop=/usr/bin/docker stop sse-server 303 | ExecStopPost=/usr/bin/docker rm sse-server 304 | ExecStopPost=/sbin/iptables -D INPUT -p tcp -s 10.138.0.0/16 --dport 4500 -j ACCEPT 305 | 306 | - path: /etc/systemd/system/api-workers.service 307 | permissions: 0644 308 | owner: root 309 | content: | 310 | [Unit] 311 | Description=API Server Workers 312 | Wants=gcr-online.target 313 | After=api-server.service 314 | 315 | [Service] 316 | Restart=always 317 | RestartSec=3 318 | Environment=HOME=/home/bs 319 | ExecStart=/usr/bin/docker run \ 320 | --network=host \ 321 | --pid=host \ 322 | --name=api-workers \ 323 | -v /mnt/disks/data/satellite-api:/data \ 324 | -e ENV="production" \ 325 | -e "REDIS_URI=redis://127.0.0.1:6379" \ 326 | "${sat_api_docker}" workers.sh 327 | ExecStop=/usr/bin/docker stop api-workers 328 | ExecStopPost=/usr/bin/docker rm api-workers 329 | 330 | - path: /etc/systemd/system/charge.service 331 | permissions: 0644 332 | owner: root 333 | content: | 334 | [Unit] 335 | Description=Charge instance 336 | Wants=gcr-online.target 337 | Requires=lightning.service 338 | After=lightning.service 339 | 340 | [Service] 341 | Restart=always 342 | RestartSec=200 343 | Environment=HOME=/home/bs 344 | ExecStartPre=/usr/bin/docker pull ${charge_docker} 345 | ExecStartPre=/sbin/iptables -A INPUT -p tcp -s localhost --dport 9112 -j ACCEPT 346 | ExecStart=/usr/bin/docker run \ 347 | --network=host \ 348 | --pid=host \ 349 | --name=charge \ 350 | -v /mnt/disks/data/lightning${network_dir}:/root/.lightning:ro \ 351 | -v /mnt/disks/data/charge:/data:rw \ 352 | -e "API_TOKEN=${charge_token}" \ 353 | "${charge_docker}" ${charge_cmd} 354 | ExecStop=/usr/bin/docker stop charge 355 | ExecStopPost=/usr/bin/docker rm charge 356 | ExecStopPost=/sbin/iptables -D INPUT -p tcp -s localhost --dport 9112 -j ACCEPT 357 | 358 | runcmd: 359 | - systemctl daemon-reload 360 | - systemctl enable --now autossh-key-downloader.service 361 | - systemctl enable --now k8s-autossh.service 362 | - systemctl enable --now lightning.service 363 | - systemctl enable --now charge.service 364 | - systemctl enable --now redis.service 365 | - systemctl enable --now api-server.service 366 | - systemctl enable --now api-workers.service 367 | - systemctl enable --now sse-server.service 368 | - systemctl enable --now node-exporter.service 369 | - systemctl enable --now check-containers.timer 370 | --------------------------------------------------------------------------------