├── disco ├── utils │ ├── __init__.py │ ├── randomname.py │ ├── stats.py │ ├── dns.py │ ├── apikeyinvites.py │ ├── encryption.py │ ├── subprocess.py │ ├── corsorigins.py │ ├── projectkeyvalues.py │ ├── keyvalues.py │ ├── envvariables.py │ ├── meta.py │ ├── imagecleanup.py │ ├── discofile.py │ ├── apikeys.py │ ├── syslog.py │ ├── logs.py │ ├── tunnels.py │ ├── commandruns.py │ ├── filesystem.py │ ├── commandoutputs.py │ ├── projectdomains.py │ └── events.py ├── endpoints │ ├── __init__.py │ ├── dependencies.py │ ├── events.py │ ├── corsorigins.py │ ├── apikeys.py │ ├── syslog.py │ ├── apikeyinvites.py │ ├── nodes.py │ ├── logs.py │ ├── scale.py │ ├── projectkeyvalues.py │ ├── projectdomains.py │ ├── envvariables.py │ ├── tunnels.py │ ├── run.py │ └── meta.py ├── scripts │ ├── __init__.py │ ├── setcorelogging.py │ └── leave_swarm.py ├── __init__.py ├── errors.py ├── config.py ├── alembic │ ├── script.py.mako │ ├── versions │ │ ├── eba27af20db2_0_2_0.py │ │ ├── 9087484963d4_0_18_0.py │ │ ├── b0b4edb3672a_0_27_0.py │ │ ├── 87c62632dfd1_0_5_0.py │ │ ├── 3fe4af6efa33_0_8_0a.py │ │ ├── b2c4ac1469de_0_14_0.py │ │ ├── 3eb8871ccb85_0_4_0.py │ │ ├── b570b8c2424d_0_12_0.py │ │ ├── 41a2f999a3e9_0_11_0.py │ │ ├── 7867432539d9_0_8_0b.py │ │ ├── 26877eda6774_0_17_0.py │ │ ├── 47da35039f6f_0_7_0b.py │ │ ├── 97e98737cba8_0_7_0a.py │ │ └── d0cba3cd3238_0_3_0.py │ └── env.py ├── models │ ├── db.py │ ├── keyvalue.py │ ├── apikeyusage.py │ ├── pendinggithubapp.py │ ├── __init__.py │ ├── projectkeyvalue.py │ ├── meta.py │ ├── projectdomain.py │ ├── corsorigin.py │ ├── projectgithubrepo.py │ ├── deploymentenvironmentvariable.py │ ├── githubapprepo.py │ ├── githubappinstallation.py │ ├── projectenvironmentvariable.py │ ├── githubapp.py │ ├── apikeyinvite.py │ ├── commandrun.py │ ├── project.py │ ├── apikey.py │ └── deployment.py ├── middleware.py ├── app.py └── auth.py ├── .dockerignore ├── bin ├── ruff └── mypy ├── .devcontainer └── devcontainer.json ├── requirements.in ├── docker-compose.yml ├── pyproject.toml ├── alembic.ini ├── BUILD.md ├── Dockerfile ├── LICENSE ├── dev-deploy.sh ├── requirements.txt ├── .gitignore └── README.md /disco/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /disco/endpoints/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /disco/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /disco/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.27.0" 2 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | **/__pycache__ 2 | *.pyc 3 | disco.egg-info 4 | .git -------------------------------------------------------------------------------- /bin/ruff: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker compose run --no-deps --rm web ruff "$@" 4 | -------------------------------------------------------------------------------- /bin/mypy: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker compose run --no-deps --rm web mypy --install-types --non-interactive "$@" 4 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Project Dockerfile", 3 | "build": { 4 | "context": "..", 5 | "dockerfile": "../Dockerfile" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /disco/errors.py: -------------------------------------------------------------------------------- 1 | class DiscoError(Exception): 2 | pass 3 | 4 | 5 | class ProcessStatusError(DiscoError): 6 | def __init__(self, *arg, status: int | None, **kw): 7 | self.status = status 8 | super().__init__(*arg, **kw) 9 | -------------------------------------------------------------------------------- /disco/config.py: -------------------------------------------------------------------------------- 1 | CADDY_VERSION = "2.9.1" 2 | SQLALCHEMY_DATABASE_URL = "sqlite:////disco/data/disco.sqlite3" 3 | SQLALCHEMY_ASYNC_DATABASE_URL = "sqlite+aiosqlite:////disco/data/disco.sqlite3" 4 | DISCO_TUNNEL_VERSION = "1.0.0" 5 | BUSYBOX_VERSION = "1.37.0" 6 | -------------------------------------------------------------------------------- /disco/utils/randomname.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import friendlywords 4 | 5 | 6 | def generate_random_name_sync() -> str: 7 | # sync because it reads from file system 8 | return friendlywords.generate("po", separator="-") 9 | 10 | 11 | async def generate_random_name() -> str: 12 | loop = asyncio.get_running_loop() 13 | return await loop.run_in_executor(None, generate_random_name_sync) 14 | -------------------------------------------------------------------------------- /requirements.in: -------------------------------------------------------------------------------- 1 | aiohttp==3.11.13 2 | aiofiles==24.1.0 3 | aiosqlite==0.21.0 4 | alembic==1.15.1 5 | bcrypt==4.3.0 6 | croniter==6.0.0 7 | cryptography==44.0.2 8 | fastapi==0.115.11 9 | friendlywords==1.1.3 10 | greenlet==3.1.1 11 | mypy==1.15.0 12 | pydantic==2.10.6 13 | PyJWT==2.10.1 14 | PyJWT[crypto] 15 | requests==2.32.3 16 | ruff==0.9.9 17 | SQLAlchemy==2.0.38 18 | sse-starlette==2.2.1 19 | uvicorn==0.34.0 20 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | web: 5 | build: 6 | context: . 7 | dockerfile: Dockerfile 8 | image: disco/disco-daemon-dev 9 | command: hypercorn --bind 0.0.0.0:6543 disco.app:app 10 | ports: 11 | - 6543:6543 12 | volumes: 13 | - .:/disco/app:cached 14 | - ./data:/disco/data 15 | - /disco/app/disco.egg-info 16 | 17 | networks: 18 | default: 19 | name: disco_default 20 | -------------------------------------------------------------------------------- /disco/alembic/script.py.mako: -------------------------------------------------------------------------------- 1 | """${message} 2 | 3 | Revision ID: ${up_revision} 4 | Revises: ${down_revision | comma,n} 5 | Create Date: ${create_date} 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | ${imports if imports else ""} 11 | 12 | revision = ${repr(up_revision)} 13 | down_revision = ${repr(down_revision)} 14 | branch_labels = ${repr(branch_labels)} 15 | depends_on = ${repr(depends_on)} 16 | 17 | def upgrade(): 18 | ${upgrades if upgrades else "pass"} 19 | 20 | def downgrade(): 21 | ${downgrades if downgrades else "pass"} 22 | -------------------------------------------------------------------------------- /disco/alembic/versions/eba27af20db2_0_2_0.py: -------------------------------------------------------------------------------- 1 | """0.2.0 2 | 3 | Revision ID: eba27af20db2 4 | Revises: b09bcf2ef8df 5 | Create Date: 2024-03-23 20:25:40.386154 6 | 7 | """ 8 | 9 | import sqlalchemy as sa 10 | from alembic import op 11 | 12 | revision = "eba27af20db2" 13 | down_revision = "b09bcf2ef8df" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | op.add_column( 20 | "deployments", 21 | sa.Column("registry_host", sa.Unicode(length=2048), nullable=True), 22 | ) 23 | 24 | 25 | def downgrade(): 26 | op.drop_column("deployments", "registry_host") 27 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "disco" 3 | dynamic = ["version"] 4 | 5 | [tool.setuptools] 6 | packages = ["disco"] 7 | 8 | [tool.setuptools.dynamic] 9 | version = {attr = "disco.__version__"} 10 | 11 | [project.scripts] 12 | disco_worker = "disco.worker:main" 13 | disco_init = "disco.scripts.init:main" 14 | disco_update = "disco.scripts.update:main" 15 | disco_set_core_logging = "disco.scripts.setcorelogging:main" 16 | disco_leave_swarm = "disco.scripts.leave_swarm:main" 17 | 18 | [tool.ruff.lint] 19 | # Enable the isort rules. 20 | extend-select = ["I"] 21 | 22 | [tool.mypy] 23 | ignore_missing_imports = true 24 | -------------------------------------------------------------------------------- /disco/alembic/versions/9087484963d4_0_18_0.py: -------------------------------------------------------------------------------- 1 | """0.18.0 2 | 3 | Revision ID: 9087484963d4 4 | Revises: 26877eda6774 5 | Create Date: 2025-01-12 01:50:37.649205 6 | 7 | """ 8 | 9 | import sqlalchemy as sa 10 | from alembic import op 11 | 12 | revision = "9087484963d4" 13 | down_revision = "26877eda6774" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | with op.batch_alter_table("deployments", schema=None) as batch_op: 20 | batch_op.add_column(sa.Column("task_id", sa.String(length=32), nullable=True)) 21 | 22 | 23 | def downgrade(): 24 | with op.batch_alter_table("deployments", schema=None) as batch_op: 25 | batch_op.drop_column("task_id") 26 | -------------------------------------------------------------------------------- /disco/alembic/versions/b0b4edb3672a_0_27_0.py: -------------------------------------------------------------------------------- 1 | """0.27.0 2 | 3 | Revision ID: b0b4edb3672a 4 | Revises: 9087484963d4 5 | Create Date: 2025-11-22 22:32:42.378628 6 | 7 | """ 8 | 9 | import sqlalchemy as sa 10 | from alembic import op 11 | 12 | revision = "b0b4edb3672a" 13 | down_revision = "9087484963d4" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | with op.batch_alter_table("pending_github_apps", schema=None) as batch_op: 20 | batch_op.add_column( 21 | sa.Column("setup_url", sa.Unicode(length=1000), nullable=True) 22 | ) 23 | 24 | 25 | def downgrade(): 26 | with op.batch_alter_table("pending_github_apps", schema=None) as batch_op: 27 | batch_op.drop_column("setup_url") 28 | -------------------------------------------------------------------------------- /disco/models/db.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from sqlalchemy import create_engine 4 | from sqlalchemy.ext.asyncio import async_sessionmaker, create_async_engine 5 | from sqlalchemy.orm import sessionmaker 6 | 7 | from disco.config import SQLALCHEMY_ASYNC_DATABASE_URL, SQLALCHEMY_DATABASE_URL 8 | 9 | log = logging.getLogger(__name__) 10 | 11 | 12 | engine = create_engine( 13 | SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False} 14 | ) 15 | Session = sessionmaker(autocommit=False, autoflush=False, bind=engine) 16 | 17 | 18 | async_engine = create_async_engine( 19 | SQLALCHEMY_ASYNC_DATABASE_URL, connect_args={"check_same_thread": False} 20 | ) 21 | AsyncSession = async_sessionmaker(autocommit=False, autoflush=False, bind=async_engine) 22 | -------------------------------------------------------------------------------- /alembic.ini: -------------------------------------------------------------------------------- 1 | [alembic] 2 | script_location = disco/alembic 3 | version_path_separator = os 4 | sqlalchemy.url = sqlite:////disco/data/disco.sqlite3 5 | 6 | [loggers] 7 | keys = root,sqlalchemy,alembic 8 | 9 | [handlers] 10 | keys = console 11 | 12 | [formatters] 13 | keys = generic 14 | 15 | [logger_root] 16 | level = WARN 17 | handlers = console 18 | qualname = 19 | 20 | [logger_sqlalchemy] 21 | level = WARN 22 | handlers = 23 | qualname = sqlalchemy.engine 24 | 25 | [logger_alembic] 26 | level = INFO 27 | handlers = 28 | qualname = alembic 29 | 30 | [handler_console] 31 | class = StreamHandler 32 | args = (sys.stderr,) 33 | level = NOTSET 34 | formatter = generic 35 | 36 | [formatter_generic] 37 | format = %(levelname)-5.5s [%(name)s] %(message)s 38 | datefmt = %H:%M:%S 39 | -------------------------------------------------------------------------------- /disco/scripts/setcorelogging.py: -------------------------------------------------------------------------------- 1 | """Set core syslogs.""" 2 | 3 | import asyncio 4 | import logging 5 | import sys 6 | 7 | from disco.models.db import AsyncSession 8 | from disco.utils import keyvalues 9 | from disco.utils.syslog import set_core_syslogs, set_syslog_services 10 | 11 | log = logging.getLogger(__name__) 12 | 13 | 14 | def main() -> None: 15 | logging.basicConfig(level=logging.INFO) 16 | urls = sys.argv[1:] 17 | asyncio.run(main_async(urls)) 18 | 19 | 20 | async def main_async(urls: list[str]) -> None: 21 | async with AsyncSession.begin() as dbsession: 22 | disco_host = await keyvalues.get_value_str(dbsession, "DISCO_HOST") 23 | syslog_urls = await set_core_syslogs(dbsession, urls) 24 | await set_syslog_services(disco_host, syslog_urls) 25 | -------------------------------------------------------------------------------- /BUILD.md: -------------------------------------------------------------------------------- 1 | # Disco Daemon 2 | 3 | ## Build for Docker Hub 4 | 5 | ```bash 6 | docker buildx build \ 7 | --platform linux/amd64,linux/arm64/v8 \ 8 | --tag letsdiscodev/daemon \ 9 | --push \ 10 | . 11 | ``` 12 | 13 | ## Linters/Formatters 14 | 15 | ``` 16 | bin/ruff check --fix . 17 | bin/ruff format . 18 | bin/mypy . 19 | ``` 20 | 21 | ## Generating an Alembic revision 22 | 23 | ``` 24 | docker compose build --no-cache web 25 | docker compose run --rm web rm data/disco.sqlite3 26 | docker compose run --rm web alembic upgrade head 27 | docker compose run --rm web alembic revision --autogenerate -m "0.1.0" 28 | ``` 29 | 30 | ## Regenerate requirements.txt 31 | 32 | We edit `requirements.in` to list the dependencies. 33 | ```bash 34 | docker compose run --rm --no-deps web \ 35 | uv pip compile requirements.in -o requirements.txt 36 | ``` 37 | -------------------------------------------------------------------------------- /disco/models/keyvalue.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timezone 2 | 3 | from sqlalchemy import String, UnicodeText 4 | from sqlalchemy.orm import Mapped, mapped_column 5 | 6 | from disco.models.meta import Base, DateTimeTzAware 7 | 8 | 9 | class KeyValue(Base): 10 | __tablename__ = "key_values" 11 | 12 | key: Mapped[str] = mapped_column(String(255), primary_key=True) 13 | created: Mapped[datetime] = mapped_column( 14 | DateTimeTzAware(), 15 | default=lambda: datetime.now(timezone.utc), 16 | nullable=False, 17 | ) 18 | updated: Mapped[datetime] = mapped_column( 19 | DateTimeTzAware(), 20 | default=lambda: datetime.now(timezone.utc), 21 | onupdate=lambda: datetime.now(timezone.utc), 22 | nullable=False, 23 | ) 24 | value: Mapped[str | None] = mapped_column(UnicodeText()) 25 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.9 2 | ENV PYTHONUNBUFFERED=0 3 | RUN apt-get install ca-certificates curl 4 | RUN install -m 0755 -d /etc/apt/keyrings 5 | RUN curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc 6 | RUN chmod a+r /etc/apt/keyrings/docker.asc 7 | RUN echo \ 8 | "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian \ 9 | $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ 10 | tee /etc/apt/sources.list.d/docker.list > /dev/null 11 | RUN apt-get update 12 | RUN apt-get install -y ssh docker-ce-cli 13 | RUN pip install uv 14 | WORKDIR /disco/app 15 | ADD pyproject.toml /disco/app/ 16 | ADD requirements.txt /disco/app/ 17 | ADD alembic.ini /disco/app/alembic.ini 18 | RUN pip install -r requirements.txt 19 | ADD disco /disco/app/disco 20 | RUN pip install -e . 21 | -------------------------------------------------------------------------------- /disco/alembic/versions/87c62632dfd1_0_5_0.py: -------------------------------------------------------------------------------- 1 | """0.5.0 2 | 3 | Revision ID: 87c62632dfd1 4 | Revises: 3eb8871ccb85 5 | Create Date: 2024-04-25 20:45:49.295620 6 | 7 | """ 8 | 9 | import sqlalchemy as sa 10 | from alembic import op 11 | 12 | revision = "87c62632dfd1" 13 | down_revision = "3eb8871ccb85" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | op.add_column( 20 | "projects", 21 | sa.Column("github_webhook_secret", sa.String(length=32), nullable=True), 22 | ) 23 | op.create_index( 24 | op.f("ix_projects_github_webhook_token"), 25 | "projects", 26 | ["github_webhook_token"], 27 | unique=False, 28 | ) 29 | 30 | 31 | def downgrade(): 32 | op.drop_index(op.f("ix_projects_github_webhook_token"), table_name="projects") 33 | op.drop_column("projects", "github_webhook_secret") 34 | -------------------------------------------------------------------------------- /disco/alembic/versions/3fe4af6efa33_0_8_0a.py: -------------------------------------------------------------------------------- 1 | """0.8.0a 2 | 3 | Revision ID: 3fe4af6efa33 4 | Revises: 47da35039f6f 5 | Create Date: 2024-05-11 01:44:07.784733 6 | 7 | """ 8 | 9 | import sqlalchemy as sa 10 | from alembic import op 11 | 12 | revision = "3fe4af6efa33" 13 | down_revision = "47da35039f6f" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | with op.batch_alter_table("project_github_repos", schema=None) as batch_op: 20 | batch_op.add_column( 21 | sa.Column("full_name", sa.Unicode(length=255), nullable=True) 22 | ) 23 | batch_op.create_index( 24 | batch_op.f("ix_project_github_repos_full_name"), ["full_name"], unique=False 25 | ) 26 | 27 | 28 | def downgrade(): 29 | with op.batch_alter_table("project_github_repos", schema=None) as batch_op: 30 | batch_op.drop_index(batch_op.f("ix_project_github_repos_full_name")) 31 | batch_op.drop_column("full_name") 32 | -------------------------------------------------------------------------------- /disco/alembic/versions/b2c4ac1469de_0_14_0.py: -------------------------------------------------------------------------------- 1 | """0.14.0 2 | 3 | Revision ID: b2c4ac1469de 4 | Revises: b570b8c2424d 5 | Create Date: 2024-06-11 00:37:13.190145 6 | 7 | """ 8 | 9 | import sqlalchemy as sa 10 | from alembic import op 11 | 12 | revision = "b2c4ac1469de" 13 | down_revision = "b570b8c2424d" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | with op.batch_alter_table("deployments", schema=None) as batch_op: 20 | batch_op.add_column(sa.Column("branch", sa.Unicode(length=255), nullable=True)) 21 | 22 | with op.batch_alter_table("project_github_repos", schema=None) as batch_op: 23 | batch_op.add_column(sa.Column("branch", sa.Unicode(length=255), nullable=True)) 24 | 25 | 26 | def downgrade(): 27 | with op.batch_alter_table("project_github_repos", schema=None) as batch_op: 28 | batch_op.drop_column("branch") 29 | 30 | with op.batch_alter_table("deployments", schema=None) as batch_op: 31 | batch_op.drop_column("branch") 32 | -------------------------------------------------------------------------------- /disco/alembic/versions/3eb8871ccb85_0_4_0.py: -------------------------------------------------------------------------------- 1 | """0.4.0 2 | 3 | Revision ID: 3eb8871ccb85 4 | Revises: d0cba3cd3238 5 | Create Date: 2024-04-11 01:24:05.745167 6 | 7 | """ 8 | 9 | import sqlalchemy as sa 10 | from alembic import op 11 | 12 | revision = "3eb8871ccb85" 13 | down_revision = "d0cba3cd3238" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | op.drop_table("tasks") 20 | 21 | 22 | def downgrade(): 23 | op.create_table( 24 | "tasks", 25 | sa.Column("id", sa.VARCHAR(length=32), nullable=False), 26 | sa.Column("created", sa.DATETIME(), nullable=True), 27 | sa.Column("updated", sa.DATETIME(), nullable=True), 28 | sa.Column("name", sa.VARCHAR(length=255), nullable=False), 29 | sa.Column("status", sa.VARCHAR(length=32), nullable=False), 30 | sa.Column("body", sa.VARCHAR(length=10000), nullable=False), 31 | sa.Column("result", sa.VARCHAR(length=10000), nullable=True), 32 | sa.PrimaryKeyConstraint("id", name="pk_tasks"), 33 | ) 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Antoine Leclair and Greg Sadetsky 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /disco/models/apikeyusage.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import uuid 4 | from datetime import datetime, timezone 5 | from typing import TYPE_CHECKING 6 | 7 | from sqlalchemy import ForeignKey, String 8 | from sqlalchemy.orm import Mapped, mapped_column, relationship 9 | 10 | if TYPE_CHECKING: 11 | from disco.models import ApiKey 12 | from disco.models.meta import Base, DateTimeTzAware 13 | 14 | 15 | class ApiKeyUsage(Base): 16 | __tablename__ = "api_key_usages" 17 | 18 | id: Mapped[str] = mapped_column( 19 | String(32), default=lambda: uuid.uuid4().hex, primary_key=True 20 | ) 21 | created: Mapped[datetime] = mapped_column( 22 | DateTimeTzAware(), 23 | default=lambda: datetime.now(timezone.utc), 24 | nullable=False, 25 | ) 26 | api_key_id: Mapped[str] = mapped_column( 27 | String(32), 28 | ForeignKey("api_keys.id"), 29 | nullable=False, 30 | index=True, 31 | ) 32 | 33 | api_key: Mapped[ApiKey] = relationship( 34 | "ApiKey", 35 | back_populates="usages", 36 | ) 37 | 38 | def log(self): 39 | return f"API_KEY_USAGE_{self.id}" 40 | -------------------------------------------------------------------------------- /disco/middleware.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from starlette.middleware import Middleware 4 | from starlette.middleware.cors import CORSMiddleware 5 | 6 | from disco.models.db import Session 7 | from disco.utils.corsorigins import get_all_cors_origins_sync 8 | 9 | log = logging.getLogger(__name__) 10 | 11 | with Session.begin() as dbsession: 12 | cors_origins = get_all_cors_origins_sync(dbsession) 13 | allowed_origins = [o.origin for o in cors_origins] 14 | 15 | middleware = [ 16 | Middleware( 17 | CORSMiddleware, 18 | allow_origins=allowed_origins, 19 | allow_credentials=True, 20 | allow_methods=["*"], 21 | allow_headers=["*"], 22 | ) 23 | ] 24 | 25 | 26 | def update_cors(allowed_origins: list[str]) -> None: 27 | from disco.app import app 28 | 29 | log.info("Updating CORS allowed origin in middleware %s", allowed_origins) 30 | mw = app.middleware_stack 31 | while mw is not None: 32 | if isinstance(mw, CORSMiddleware): 33 | mw.allow_origins = allowed_origins 34 | log.info("CORS allowed origin in middleware updated") 35 | return 36 | mw = getattr(mw, "app", None) 37 | -------------------------------------------------------------------------------- /disco/utils/stats.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import aiohttp 4 | 5 | 6 | class AsyncDockerStats: 7 | def __init__(self, docker_socket="/var/run/docker.sock"): 8 | self.docker_socket = docker_socket 9 | 10 | async def get_container_stats(self, container_id): 11 | async with aiohttp.UnixConnector(path=self.docker_socket) as connector: 12 | async with aiohttp.ClientSession(connector=connector) as session: 13 | url = f"http://localhost/containers/{container_id}/stats?stream=false" 14 | async with session.get(url) as response: 15 | return await response.json() 16 | 17 | async def get_all_container_stats(self): 18 | async with aiohttp.UnixConnector(path=self.docker_socket) as connector: 19 | async with aiohttp.ClientSession(connector=connector) as session: 20 | async with session.get("http://localhost/containers/json") as response: 21 | containers = await response.json() 22 | tasks = [self.get_container_stats(c["Id"]) for c in containers] 23 | return [ 24 | stats for stats in await asyncio.gather(*tasks) if stats is not None 25 | ] 26 | -------------------------------------------------------------------------------- /disco/alembic/versions/b570b8c2424d_0_12_0.py: -------------------------------------------------------------------------------- 1 | """0.12.0 2 | 3 | Revision ID: b570b8c2424d 4 | Revises: 41a2f999a3e9 5 | Create Date: 2024-06-05 01:12:09.118281 6 | 7 | """ 8 | 9 | import sqlalchemy as sa 10 | from alembic import op 11 | 12 | revision = "b570b8c2424d" 13 | down_revision = "41a2f999a3e9" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | with op.batch_alter_table("deployments", schema=None) as batch_op: 20 | batch_op.drop_index("ix_deployments_github_repo_id") 21 | batch_op.drop_constraint( 22 | "fk_deployments_github_repo_id_github_app_repos", type_="foreignkey" 23 | ) 24 | batch_op.drop_column("github_repo_id") 25 | 26 | 27 | def downgrade(): 28 | with op.batch_alter_table("deployments", schema=None) as batch_op: 29 | batch_op.add_column( 30 | sa.Column("github_repo_id", sa.VARCHAR(length=32), nullable=True) 31 | ) 32 | batch_op.create_foreign_key( 33 | "fk_deployments_github_repo_id_github_app_repos", 34 | "github_app_repos", 35 | ["github_repo_id"], 36 | ["id"], 37 | ) 38 | batch_op.create_index( 39 | "ix_deployments_github_repo_id", ["github_repo_id"], unique=False 40 | ) 41 | -------------------------------------------------------------------------------- /disco/utils/dns.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import socket 3 | 4 | from sqlalchemy.ext.asyncio import AsyncSession as AsyncDBSession 5 | from sqlalchemy.orm.session import Session as DBSession 6 | 7 | from disco.utils import keyvalues 8 | 9 | 10 | def domain_points_to_here_sync(dbsession: DBSession, domain: str) -> bool: 11 | disco_host = keyvalues.get_value_sync(dbsession=dbsession, key="DISCO_HOST") 12 | assert disco_host is not None 13 | try: 14 | domain_ip = socket.gethostbyname(domain) 15 | disco_ip = socket.gethostbyname(disco_host) 16 | except socket.gaierror: 17 | return False 18 | return domain_ip == disco_ip 19 | 20 | 21 | async def domain_points_to_here(dbsession: AsyncDBSession, domain: str) -> bool: 22 | disco_host = await keyvalues.get_value(dbsession=dbsession, key="DISCO_HOST") 23 | assert disco_host is not None 24 | 25 | def point_to_same_ip() -> bool: 26 | # TODO socket.gethostbyname async? 27 | try: 28 | domain_ip = socket.gethostbyname(domain) 29 | disco_ip = socket.gethostbyname(disco_host) 30 | except socket.gaierror: 31 | return False 32 | return domain_ip == disco_ip 33 | 34 | return await asyncio.get_event_loop().run_in_executor(None, point_to_same_ip) 35 | -------------------------------------------------------------------------------- /disco/models/pendinggithubapp.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | from datetime import datetime, timezone 3 | 4 | from sqlalchemy import String, Unicode 5 | from sqlalchemy.orm import Mapped, mapped_column 6 | 7 | from disco.models.meta import Base, DateTimeTzAware 8 | 9 | 10 | class PendingGithubApp(Base): 11 | __tablename__ = "pending_github_apps" 12 | 13 | id: Mapped[str] = mapped_column( 14 | String(32), default=lambda: uuid.uuid4().hex, primary_key=True 15 | ) 16 | created: Mapped[datetime] = mapped_column( 17 | DateTimeTzAware(), 18 | default=lambda: datetime.now(timezone.utc), 19 | nullable=False, 20 | ) 21 | updated: Mapped[datetime] = mapped_column( 22 | DateTimeTzAware(), 23 | default=lambda: datetime.now(timezone.utc), 24 | onupdate=lambda: datetime.now(timezone.utc), 25 | nullable=False, 26 | ) 27 | expires: Mapped[datetime] = mapped_column( 28 | DateTimeTzAware(), 29 | nullable=False, 30 | ) 31 | state: Mapped[str] = mapped_column(String(32), nullable=False) 32 | setup_url: Mapped[str] = mapped_column(Unicode(1000), nullable=True) 33 | organization: Mapped[str] = mapped_column(Unicode(250), nullable=True) 34 | 35 | def log(self): 36 | return f"PENDING_GITHUB_APP_{self.id}" 37 | -------------------------------------------------------------------------------- /disco/alembic/versions/41a2f999a3e9_0_11_0.py: -------------------------------------------------------------------------------- 1 | """0.11.0 2 | 3 | Revision ID: 41a2f999a3e9 4 | Revises: 7867432539d9 5 | Create Date: 2024-05-18 00:49:24.293133 6 | 7 | """ 8 | 9 | import sqlalchemy as sa 10 | from alembic import op 11 | 12 | revision = "41a2f999a3e9" 13 | down_revision = "7867432539d9" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | with op.batch_alter_table("command_outputs", schema=None) as batch_op: 20 | batch_op.drop_index("ix_command_outputs_created") 21 | batch_op.drop_index("ix_command_outputs_source") 22 | 23 | op.drop_table("command_outputs") 24 | 25 | 26 | def downgrade(): 27 | op.create_table( 28 | "command_outputs", 29 | sa.Column("id", sa.VARCHAR(length=32), nullable=False), 30 | sa.Column("created", sa.DATETIME(), nullable=False), 31 | sa.Column("source", sa.VARCHAR(length=100), nullable=False), 32 | sa.Column("text", sa.TEXT(), nullable=True), 33 | sa.PrimaryKeyConstraint("id", name="pk_command_outputs"), 34 | ) 35 | with op.batch_alter_table("command_outputs", schema=None) as batch_op: 36 | batch_op.create_index("ix_command_outputs_source", ["source"], unique=False) 37 | batch_op.create_index("ix_command_outputs_created", ["created"], unique=False) 38 | -------------------------------------------------------------------------------- /disco/models/__init__.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy.orm import configure_mappers 2 | 3 | from disco.models.apikey import ApiKey # noqa: F401 4 | from disco.models.apikeyinvite import ApiKeyInvite # noqa: F401 5 | from disco.models.apikeyusage import ApiKeyUsage # noqa: F401 6 | from disco.models.commandrun import ( 7 | CommandRun, # noqa: F401 8 | ) 9 | from disco.models.corsorigin import ( 10 | CorsOrigin, # noqa: F401 11 | ) 12 | from disco.models.deployment import Deployment # noqa: F401 13 | from disco.models.deploymentenvironmentvariable import ( 14 | DeploymentEnvironmentVariable, # noqa: F401 15 | ) 16 | from disco.models.githubapp import GithubApp # noqa: F401 17 | from disco.models.githubappinstallation import GithubAppInstallation # noqa: F401 18 | from disco.models.githubapprepo import GithubAppRepo # noqa: F401 19 | from disco.models.keyvalue import KeyValue # noqa: F401 20 | from disco.models.pendinggithubapp import PendingGithubApp # noqa: F401 21 | from disco.models.project import Project # noqa: F401 22 | from disco.models.projectdomain import ProjectDomain # noqa: F401 23 | from disco.models.projectenvironmentvariable import ( 24 | ProjectEnvironmentVariable, # noqa: F401 25 | ) 26 | from disco.models.projectgithubrepo import ProjectGithubRepo # noqa: F401 27 | from disco.models.projectkeyvalue import ProjectKeyValue # noqa: F401 28 | 29 | configure_mappers() 30 | -------------------------------------------------------------------------------- /disco/utils/apikeyinvites.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from datetime import datetime, timedelta, timezone 3 | from secrets import token_hex 4 | 5 | from sqlalchemy.orm.session import Session as DBSession 6 | 7 | from disco.models import ApiKey, ApiKeyInvite 8 | from disco.utils.apikeys import create_api_key 9 | 10 | log = logging.getLogger(__name__) 11 | 12 | 13 | def create_api_key_invite( 14 | dbsession: DBSession, name: str, by_api_key: ApiKey 15 | ) -> ApiKeyInvite: 16 | invite = ApiKeyInvite( 17 | id=token_hex(16), 18 | name=name, 19 | expires=datetime.now(timezone.utc) + timedelta(days=1), 20 | by_api_key=by_api_key, 21 | ) 22 | dbsession.add(invite) 23 | log.info("Created API Key invite %s by %s", invite.log(), by_api_key.log()) 24 | return invite 25 | 26 | 27 | def get_api_key_invite_by_id( 28 | dbsession: DBSession, invite_id: str 29 | ) -> ApiKeyInvite | None: 30 | return dbsession.query(ApiKeyInvite).filter(ApiKeyInvite.id == invite_id).first() 31 | 32 | 33 | def invite_is_active(invite): 34 | return invite.expires > datetime.now(timezone.utc) and invite.api_key_id is None 35 | 36 | 37 | def use_api_key_invite(dbsession: DBSession, invite: ApiKeyInvite) -> ApiKey: 38 | assert invite.expires > datetime.now(timezone.utc) 39 | assert invite.api_key_id is None 40 | api_key = create_api_key(dbsession, invite.name) 41 | invite.api_key = api_key 42 | return api_key 43 | -------------------------------------------------------------------------------- /disco/models/projectkeyvalue.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from datetime import datetime, timezone 4 | from typing import TYPE_CHECKING 5 | 6 | from sqlalchemy import ForeignKey, String, UnicodeText 7 | from sqlalchemy.orm import Mapped, mapped_column, relationship 8 | 9 | if TYPE_CHECKING: 10 | from disco.models import ( 11 | Project, 12 | ) 13 | from disco.models.meta import Base, DateTimeTzAware 14 | 15 | 16 | class ProjectKeyValue(Base): 17 | __tablename__ = "project_key_values" 18 | 19 | key: Mapped[str] = mapped_column(String(255), primary_key=True) 20 | project_id: Mapped[str] = mapped_column( 21 | String(32), 22 | ForeignKey("projects.id"), 23 | primary_key=True, 24 | ) 25 | created: Mapped[datetime] = mapped_column( 26 | DateTimeTzAware(), 27 | default=lambda: datetime.now(timezone.utc), 28 | nullable=False, 29 | ) 30 | updated: Mapped[datetime] = mapped_column( 31 | DateTimeTzAware(), 32 | default=lambda: datetime.now(timezone.utc), 33 | onupdate=lambda: datetime.now(timezone.utc), 34 | nullable=False, 35 | ) 36 | value: Mapped[str | None] = mapped_column(UnicodeText()) 37 | 38 | project: Mapped[Project] = relationship( 39 | "Project", 40 | back_populates="key_values", 41 | ) 42 | 43 | def log(self): 44 | return f"PROJECT_KEY_VAL_{self.key} ({self.project.name})" 45 | -------------------------------------------------------------------------------- /disco/models/meta.py: -------------------------------------------------------------------------------- 1 | from datetime import timezone 2 | 3 | import sqlalchemy.types as types 4 | from sqlalchemy import DateTime 5 | from sqlalchemy.ext.asyncio import AsyncAttrs 6 | from sqlalchemy.orm import DeclarativeBase 7 | from sqlalchemy.schema import MetaData 8 | 9 | # Recommended naming convention used by Alembic, as various different database 10 | # providers will autogenerate vastly different names making migrations more 11 | # difficult. See: https://alembic.sqlalchemy.org/en/latest/naming.html 12 | NAMING_CONVENTION = { 13 | "ix": "ix_%(column_0_label)s", 14 | "uq": "uq_%(table_name)s_%(column_0_name)s", 15 | "ck": "ck_%(table_name)s_%(constraint_name)s", 16 | "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", 17 | "pk": "pk_%(table_name)s", 18 | } 19 | 20 | base_metadata = MetaData(naming_convention=NAMING_CONVENTION) 21 | 22 | 23 | class Base(AsyncAttrs, DeclarativeBase): 24 | metadata = base_metadata 25 | 26 | 27 | class DateTimeTzAware(types.TypeDecorator): 28 | impl = DateTime 29 | cache_ok = True 30 | 31 | def process_bind_param(self, value, dialect): 32 | if value is not None and value.tzinfo != timezone.utc: 33 | raise TypeError("tzinfo has to be timezone.utc") 34 | return value 35 | 36 | def process_result_value(self, value, dialect): 37 | if value is not None: 38 | value = value.replace(tzinfo=timezone.utc) 39 | return value 40 | -------------------------------------------------------------------------------- /disco/scripts/leave_swarm.py: -------------------------------------------------------------------------------- 1 | """Script to leave Docker Swarm 2 | 3 | It waits until it's the last container running, 4 | then leaves the swarm. 5 | 6 | Once the script is started, the swarm manager 7 | will change the availability of the node to "drain", 8 | so all services will be moved to other nodes. 9 | 10 | """ 11 | 12 | import logging 13 | import subprocess 14 | import time 15 | from datetime import datetime, timedelta, timezone 16 | 17 | log = logging.getLogger(__name__) 18 | 19 | 20 | def main() -> None: 21 | logging.basicConfig(level=logging.INFO) 22 | wait_is_last_container() 23 | leave_swarm() 24 | 25 | 26 | def ps_count() -> int: 27 | args = ["docker", "ps", "--format", "json"] 28 | process = subprocess.run(args, check=True, capture_output=True) 29 | return len(process.stdout.split(b"\n")) - 1 30 | 31 | 32 | def wait_is_last_container(): 33 | log.info("Waiting for all containers to be stopped") 34 | timeout = datetime.now(timezone.utc) + timedelta(days=1) 35 | while True: 36 | if datetime.now(timezone.utc) > timeout: 37 | log.error("Timed out") 38 | return 39 | count = ps_count() 40 | if count == 1: 41 | log.info("Done waiting") 42 | return 43 | log.info("There are %d other containers running, waiting...", count - 1) 44 | time.sleep(1) 45 | 46 | 47 | def leave_swarm() -> None: 48 | log.info("Leaving swarm") 49 | args = ["docker", "swarm", "leave"] 50 | subprocess.run(args) 51 | -------------------------------------------------------------------------------- /disco/alembic/env.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from alembic import context 4 | 5 | from disco.config import SQLALCHEMY_DATABASE_URL 6 | from disco.models.meta import Base, DateTimeTzAware 7 | 8 | config = context.config 9 | 10 | target_metadata = Base.metadata 11 | 12 | 13 | def render_item(type_, obj, autogen_context): 14 | if type_ == "type" and isinstance(obj, DateTimeTzAware): 15 | return "sa.DateTime()" 16 | # default rendering for other objects 17 | return False 18 | 19 | 20 | def run_migrations_offline() -> None: 21 | context.configure( 22 | url=SQLALCHEMY_DATABASE_URL, 23 | target_metadata=target_metadata, 24 | literal_binds=True, 25 | dialect_opts={"paramstyle": "named"}, 26 | render_item=render_item, 27 | render_as_batch=True, 28 | ) 29 | 30 | with context.begin_transaction(): 31 | context.run_migrations() 32 | 33 | 34 | def run_migrations_online(): 35 | logging.basicConfig(level=logging.INFO) 36 | 37 | from disco.models.db import engine 38 | 39 | connection = engine.connect() 40 | context.configure( 41 | connection=connection, 42 | target_metadata=target_metadata, 43 | render_item=render_item, 44 | render_as_batch=True, 45 | ) 46 | try: 47 | with context.begin_transaction(): 48 | context.run_migrations() 49 | finally: 50 | connection.close() 51 | 52 | 53 | if context.is_offline_mode(): 54 | run_migrations_offline() 55 | else: 56 | run_migrations_online() 57 | -------------------------------------------------------------------------------- /disco/models/projectdomain.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import uuid 4 | from datetime import datetime, timezone 5 | from typing import TYPE_CHECKING 6 | 7 | from sqlalchemy import ForeignKey, String, Unicode 8 | from sqlalchemy.orm import Mapped, mapped_column, relationship 9 | 10 | if TYPE_CHECKING: 11 | from disco.models import ( 12 | Project, 13 | ) 14 | from disco.models.meta import Base, DateTimeTzAware 15 | 16 | 17 | class ProjectDomain(Base): 18 | __tablename__ = "project_domains" 19 | 20 | id: Mapped[str] = mapped_column( 21 | String(32), default=lambda: uuid.uuid4().hex, primary_key=True 22 | ) 23 | created: Mapped[datetime] = mapped_column( 24 | DateTimeTzAware(), 25 | default=lambda: datetime.now(timezone.utc), 26 | nullable=False, 27 | ) 28 | updated: Mapped[datetime] = mapped_column( 29 | DateTimeTzAware(), 30 | default=lambda: datetime.now(timezone.utc), 31 | onupdate=lambda: datetime.now(timezone.utc), 32 | nullable=False, 33 | ) 34 | name: Mapped[str] = mapped_column( 35 | Unicode(255), nullable=False, index=True, unique=True 36 | ) 37 | project_id: Mapped[str] = mapped_column( 38 | String(32), 39 | ForeignKey("projects.id"), 40 | nullable=False, 41 | index=True, 42 | ) 43 | 44 | project: Mapped[Project] = relationship( 45 | "Project", 46 | back_populates="domains", 47 | ) 48 | 49 | def log(self): 50 | return f"PROJECT_DOMAIN_{self.id} ({self.name})" 51 | -------------------------------------------------------------------------------- /disco/models/corsorigin.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import uuid 4 | from datetime import datetime, timezone 5 | from typing import TYPE_CHECKING 6 | 7 | from sqlalchemy import ForeignKey, String, Unicode 8 | from sqlalchemy.orm import Mapped, mapped_column, relationship 9 | 10 | if TYPE_CHECKING: 11 | from disco.models import ApiKey 12 | from disco.models.meta import Base, DateTimeTzAware 13 | 14 | 15 | class CorsOrigin(Base): 16 | __tablename__ = "cors_origins" 17 | 18 | id: Mapped[str] = mapped_column( 19 | String(32), default=lambda: uuid.uuid4().hex, primary_key=True 20 | ) 21 | created: Mapped[datetime] = mapped_column( 22 | DateTimeTzAware(), 23 | default=lambda: datetime.now(timezone.utc), 24 | nullable=False, 25 | ) 26 | updated: Mapped[datetime] = mapped_column( 27 | DateTimeTzAware(), 28 | default=lambda: datetime.now(timezone.utc), 29 | onupdate=lambda: datetime.now(timezone.utc), 30 | nullable=False, 31 | ) 32 | origin: Mapped[str] = mapped_column( 33 | Unicode(255), nullable=False, index=True, unique=True 34 | ) 35 | by_api_key_id: Mapped[str] = mapped_column( 36 | String(32), 37 | ForeignKey("api_keys.id"), 38 | nullable=False, 39 | index=True, 40 | ) 41 | 42 | by_api_key: Mapped[ApiKey] = relationship( 43 | "ApiKey", 44 | foreign_keys=by_api_key_id, 45 | back_populates="created_cors_origins", 46 | ) 47 | 48 | def log(self): 49 | return f"CORS_ORIGIN_{self.id} ({self.origin})" 50 | -------------------------------------------------------------------------------- /disco/models/projectgithubrepo.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import uuid 4 | from datetime import datetime, timezone 5 | from typing import TYPE_CHECKING 6 | 7 | from sqlalchemy import ForeignKey, String, Unicode 8 | from sqlalchemy.orm import Mapped, mapped_column, relationship 9 | 10 | if TYPE_CHECKING: 11 | from disco.models import ( 12 | Project, 13 | ) 14 | from disco.models.meta import Base, DateTimeTzAware 15 | 16 | 17 | class ProjectGithubRepo(Base): 18 | __tablename__ = "project_github_repos" 19 | 20 | id: Mapped[str] = mapped_column( 21 | String(32), default=lambda: uuid.uuid4().hex, primary_key=True 22 | ) 23 | created: Mapped[datetime] = mapped_column( 24 | DateTimeTzAware(), 25 | default=lambda: datetime.now(timezone.utc), 26 | nullable=False, 27 | ) 28 | updated: Mapped[datetime] = mapped_column( 29 | DateTimeTzAware(), 30 | default=lambda: datetime.now(timezone.utc), 31 | onupdate=lambda: datetime.now(timezone.utc), 32 | nullable=False, 33 | ) 34 | project_id: Mapped[str] = mapped_column( 35 | String(32), 36 | ForeignKey("projects.id"), 37 | nullable=False, 38 | index=True, 39 | ) 40 | full_name: Mapped[str] = mapped_column(Unicode(255), nullable=False, index=True) 41 | branch: Mapped[str] = mapped_column(Unicode(255), nullable=True) 42 | 43 | project: Mapped[Project] = relationship("Project", back_populates="github_repo") 44 | 45 | def log(self): 46 | return f"PROJECT_GITHUB_REPO_{self.id} ({self.name})" 47 | -------------------------------------------------------------------------------- /disco/endpoints/dependencies.py: -------------------------------------------------------------------------------- 1 | from typing import Annotated 2 | 3 | from fastapi import Depends, HTTPException, Path 4 | from sqlalchemy.ext.asyncio import AsyncSession as AsyncDBSession 5 | from sqlalchemy.orm.session import Session as DBSession 6 | 7 | from disco.models.db import AsyncSession, Session 8 | from disco.utils.projects import get_project_by_name, get_project_by_name_sync 9 | 10 | 11 | def get_db_sync(): 12 | with Session.begin() as dbsession: 13 | yield dbsession 14 | 15 | 16 | async def get_db(): 17 | async with AsyncSession.begin() as dbsession: 18 | yield dbsession 19 | 20 | 21 | async def get_project_name_from_url_wo_tx( 22 | project_name: Annotated[str, Path()], 23 | ): 24 | async with AsyncSession.begin() as dbsession: 25 | project = await get_project_by_name(dbsession, project_name) 26 | if project is None: 27 | raise HTTPException(status_code=404) 28 | yield project_name 29 | 30 | 31 | def get_project_from_url_sync( 32 | project_name: Annotated[str, Path()], 33 | dbsession: Annotated[DBSession, Depends(get_db_sync)], 34 | ): 35 | project = get_project_by_name_sync(dbsession, project_name) 36 | if project is None: 37 | raise HTTPException(status_code=404) 38 | yield project 39 | 40 | 41 | async def get_project_from_url( 42 | project_name: Annotated[str, Path()], 43 | dbsession: Annotated[AsyncDBSession, Depends(get_db)], 44 | ): 45 | project = await get_project_by_name(dbsession, project_name) 46 | if project is None: 47 | raise HTTPException(status_code=404) 48 | yield project 49 | -------------------------------------------------------------------------------- /disco/endpoints/events.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from typing import Annotated 4 | 5 | from fastapi import APIRouter, Depends, Header 6 | from sse_starlette import ServerSentEvent 7 | from sse_starlette.sse import EventSourceResponse 8 | 9 | from disco.auth import get_api_key_wo_tx 10 | from disco.utils.events import DiscoEvent, get_events_since, subscribe, unsubscribe 11 | 12 | log = logging.getLogger(__name__) 13 | 14 | router = APIRouter() 15 | 16 | 17 | @router.get( 18 | "/api/disco/events", 19 | dependencies=[Depends(get_api_key_wo_tx)], 20 | ) 21 | async def events_get( 22 | last_event_id: Annotated[str | None, Header()] = None, 23 | ): 24 | def sse_from_event(event: DiscoEvent) -> ServerSentEvent: 25 | return ServerSentEvent( 26 | id=event.id, 27 | event=event.type, 28 | data=json.dumps( 29 | { 30 | "id": event.id, 31 | "timestamp": event.timestamp.isoformat(), 32 | "type": event.type, 33 | "data": event.data, 34 | } 35 | ), 36 | ) 37 | 38 | async def get_events(): 39 | if last_event_id is not None: 40 | for event in get_events_since(last_event_id): 41 | yield sse_from_event(event) 42 | try: 43 | subscriber = subscribe() 44 | while True: 45 | event = await subscriber.get() 46 | yield sse_from_event(event) 47 | finally: 48 | unsubscribe(subscriber) 49 | 50 | return EventSourceResponse(get_events()) 51 | -------------------------------------------------------------------------------- /disco/models/deploymentenvironmentvariable.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import uuid 4 | from datetime import datetime, timezone 5 | from typing import TYPE_CHECKING 6 | 7 | from sqlalchemy import ForeignKey, String, Unicode 8 | from sqlalchemy.orm import Mapped, mapped_column, relationship 9 | 10 | if TYPE_CHECKING: 11 | from disco.models import Deployment 12 | 13 | from disco.models.meta import Base, DateTimeTzAware 14 | 15 | 16 | class DeploymentEnvironmentVariable(Base): 17 | __tablename__ = "deployment_env_variables" 18 | 19 | id: Mapped[str] = mapped_column( 20 | String(32), default=lambda: uuid.uuid4().hex, primary_key=True 21 | ) 22 | created: Mapped[datetime] = mapped_column( 23 | DateTimeTzAware(), 24 | default=lambda: datetime.now(timezone.utc), 25 | nullable=False, 26 | ) 27 | updated: Mapped[datetime] = mapped_column( 28 | DateTimeTzAware(), 29 | default=lambda: datetime.now(timezone.utc), 30 | onupdate=lambda: datetime.now(timezone.utc), 31 | nullable=False, 32 | ) 33 | name: Mapped[str] = mapped_column(String(255), nullable=False) 34 | value: Mapped[str] = mapped_column(Unicode(4000), nullable=False) 35 | deployment_id: Mapped[str] = mapped_column( 36 | String(32), 37 | ForeignKey("deployments.id"), 38 | nullable=False, 39 | index=True, 40 | ) 41 | 42 | deployment: Mapped[Deployment] = relationship( 43 | "Deployment", 44 | back_populates="env_variables", 45 | ) 46 | 47 | def log(self): 48 | return f"DEPLOY_ENV_VAR_{self.deployment_id}_{self.name}" 49 | -------------------------------------------------------------------------------- /disco/models/githubapprepo.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import uuid 4 | from datetime import datetime, timezone 5 | from typing import TYPE_CHECKING 6 | 7 | from sqlalchemy import ( 8 | ForeignKey, 9 | Integer, 10 | String, 11 | Unicode, 12 | ) 13 | from sqlalchemy.orm import Mapped, mapped_column, relationship 14 | 15 | from disco.models.meta import Base, DateTimeTzAware 16 | 17 | if TYPE_CHECKING: 18 | from disco.models import GithubAppInstallation 19 | 20 | 21 | class GithubAppRepo(Base): 22 | __tablename__ = "github_app_repos" 23 | 24 | id: Mapped[str] = mapped_column( 25 | String(32), default=lambda: uuid.uuid4().hex, primary_key=True 26 | ) 27 | created: Mapped[datetime] = mapped_column( 28 | DateTimeTzAware(), 29 | default=lambda: datetime.now(timezone.utc), 30 | nullable=False, 31 | ) 32 | updated: Mapped[datetime] = mapped_column( 33 | DateTimeTzAware(), 34 | default=lambda: datetime.now(timezone.utc), 35 | onupdate=lambda: datetime.now(timezone.utc), 36 | nullable=False, 37 | ) 38 | installation_id: Mapped[int] = mapped_column( 39 | Integer, 40 | ForeignKey("github_app_installations.id"), 41 | nullable=False, 42 | index=True, 43 | ) 44 | full_name: Mapped[str] = mapped_column(Unicode(255), nullable=False, index=True) 45 | 46 | installation: Mapped[GithubAppInstallation] = relationship( 47 | "GithubAppInstallation", 48 | back_populates="github_app_repos", 49 | ) 50 | 51 | def log(self): 52 | return f"GITHUB_APP_REPO_{self.id} ({self.full_name})" 53 | -------------------------------------------------------------------------------- /disco/alembic/versions/7867432539d9_0_8_0b.py: -------------------------------------------------------------------------------- 1 | """0.8.0b 2 | 3 | Revision ID: 7867432539d9 4 | Revises: 3fe4af6efa33 5 | Create Date: 2024-05-11 01:56:26.981025 6 | 7 | """ 8 | 9 | import sqlalchemy as sa 10 | from alembic import op 11 | 12 | revision = "7867432539d9" 13 | down_revision = "3fe4af6efa33" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | with op.batch_alter_table("project_github_repos", schema=None) as batch_op: 20 | batch_op.alter_column( 21 | "full_name", existing_type=sa.VARCHAR(length=255), nullable=False 22 | ) 23 | batch_op.drop_index("ix_project_github_repos_github_app_repo_id") 24 | batch_op.drop_constraint( 25 | "fk_project_github_repos_github_app_repo_id_github_app_repos", 26 | type_="foreignkey", 27 | ) 28 | batch_op.drop_column("github_app_repo_id") 29 | 30 | 31 | def downgrade(): 32 | with op.batch_alter_table("project_github_repos", schema=None) as batch_op: 33 | batch_op.add_column( 34 | sa.Column("github_app_repo_id", sa.VARCHAR(length=32), nullable=False) 35 | ) 36 | batch_op.create_foreign_key( 37 | "fk_project_github_repos_github_app_repo_id_github_app_repos", 38 | "github_app_repos", 39 | ["github_app_repo_id"], 40 | ["id"], 41 | ) 42 | batch_op.create_index( 43 | "ix_project_github_repos_github_app_repo_id", 44 | ["github_app_repo_id"], 45 | unique=False, 46 | ) 47 | batch_op.alter_column( 48 | "full_name", existing_type=sa.VARCHAR(length=255), nullable=True 49 | ) 50 | -------------------------------------------------------------------------------- /disco/alembic/versions/26877eda6774_0_17_0.py: -------------------------------------------------------------------------------- 1 | """0.17.0 2 | 3 | Revision ID: 26877eda6774 4 | Revises: b2c4ac1469de 5 | Create Date: 2024-08-21 00:26:43.456565 6 | 7 | """ 8 | 9 | import sqlalchemy as sa 10 | from alembic import op 11 | 12 | revision = "26877eda6774" 13 | down_revision = "b2c4ac1469de" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | op.create_table( 20 | "cors_origins", 21 | sa.Column("id", sa.String(length=32), nullable=False), 22 | sa.Column("created", sa.DateTime(), nullable=False), 23 | sa.Column("updated", sa.DateTime(), nullable=False), 24 | sa.Column("origin", sa.Unicode(length=255), nullable=False), 25 | sa.Column("by_api_key_id", sa.String(length=32), nullable=False), 26 | sa.ForeignKeyConstraint( 27 | ["by_api_key_id"], 28 | ["api_keys.id"], 29 | name=op.f("fk_cors_origins_by_api_key_id_api_keys"), 30 | ), 31 | sa.PrimaryKeyConstraint("id", name=op.f("pk_cors_origins")), 32 | ) 33 | with op.batch_alter_table("cors_origins", schema=None) as batch_op: 34 | batch_op.create_index( 35 | batch_op.f("ix_cors_origins_by_api_key_id"), ["by_api_key_id"], unique=False 36 | ) 37 | batch_op.create_index( 38 | batch_op.f("ix_cors_origins_origin"), ["origin"], unique=True 39 | ) 40 | 41 | 42 | def downgrade(): 43 | with op.batch_alter_table("cors_origins", schema=None) as batch_op: 44 | batch_op.drop_index(batch_op.f("ix_cors_origins_origin")) 45 | batch_op.drop_index(batch_op.f("ix_cors_origins_by_api_key_id")) 46 | 47 | op.drop_table("cors_origins") 48 | -------------------------------------------------------------------------------- /disco/utils/encryption.py: -------------------------------------------------------------------------------- 1 | from base64 import standard_b64decode, standard_b64encode 2 | from typing import overload 3 | 4 | from cryptography.fernet import Fernet 5 | 6 | 7 | @overload 8 | def encrypt(string: str) -> str: ... 9 | 10 | 11 | @overload 12 | def encrypt(string: None) -> None: ... 13 | 14 | 15 | def encrypt(string: str | None) -> str | None: 16 | if string is None: 17 | return None 18 | cipher_suite = Fernet(_encryption_key()) 19 | string_bytes = string.encode("utf-8") 20 | encoded_bytes = cipher_suite.encrypt(string_bytes) 21 | encoded_str = standard_b64encode(encoded_bytes).decode("ascii") 22 | return encoded_str 23 | 24 | 25 | @overload 26 | def decrypt(string: str) -> str: ... 27 | 28 | 29 | @overload 30 | def decrypt(string: None) -> None: ... 31 | 32 | 33 | def decrypt(string: str | None) -> str | None: 34 | if string is None: 35 | return None 36 | cipher_suite = Fernet(_encryption_key()) 37 | encoded_bytes = standard_b64decode(string) 38 | decoded_bytes = cipher_suite.decrypt(encoded_bytes) 39 | decoded_text = decoded_bytes.decode("utf-8") 40 | return decoded_text 41 | 42 | 43 | def generate_key() -> bytes: 44 | return Fernet.generate_key() 45 | 46 | 47 | _cached_encryption_key: bytes | None = None 48 | 49 | 50 | def _encryption_key() -> bytes: 51 | global _cached_encryption_key 52 | if _cached_encryption_key is None: 53 | with open("/run/secrets/disco_encryption_key", "rb") as f: 54 | _cached_encryption_key = f.read() 55 | return _cached_encryption_key 56 | 57 | 58 | def obfuscate(string: str) -> str: 59 | asterisks = "*" * (len(string) - 4) 60 | return f"{string[:3]}{asterisks}{string[-1]}" 61 | -------------------------------------------------------------------------------- /disco/endpoints/corsorigins.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Annotated 3 | 4 | from fastapi import APIRouter, Depends 5 | from pydantic import BaseModel, Field 6 | from sqlalchemy.ext.asyncio import AsyncSession as AsyncDBSession 7 | 8 | from disco.auth import get_api_key 9 | from disco.endpoints.dependencies import get_db 10 | from disco.models import ApiKey 11 | from disco.utils.corsorigins import allow_origin, get_all_cors_origins 12 | 13 | log = logging.getLogger(__name__) 14 | 15 | router = APIRouter() 16 | 17 | 18 | class AddCorsOriginRequestBody(BaseModel): 19 | origin: str = Field(..., max_length=1024) 20 | 21 | 22 | @router.get("/api/cors/origins", status_code=200, dependencies=[Depends(get_api_key)]) 23 | async def cors_origins_get( 24 | dbsession: Annotated[AsyncDBSession, Depends(get_db)], 25 | ): 26 | cors_origins = await get_all_cors_origins(dbsession) 27 | return { 28 | "corsOrigins": [ 29 | { 30 | "id": o.id, 31 | "origin": o.origin, 32 | } 33 | for o in cors_origins 34 | ], 35 | } 36 | 37 | 38 | @router.post("/api/cors/origins", status_code=200) 39 | async def cors_origins_post( 40 | dbsession: Annotated[AsyncDBSession, Depends(get_db)], 41 | api_key: Annotated[ApiKey, Depends(get_api_key)], 42 | req_body: AddCorsOriginRequestBody, 43 | ): 44 | await allow_origin(dbsession=dbsession, origin=req_body.origin, by_api_key=api_key) 45 | cors_origins = await get_all_cors_origins(dbsession) 46 | return { 47 | "corsOrigins": [ 48 | { 49 | "id": o.id, 50 | "origin": o.origin, 51 | } 52 | for o in cors_origins 53 | ], 54 | } 55 | -------------------------------------------------------------------------------- /disco/models/githubappinstallation.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from datetime import datetime, timezone 4 | from typing import TYPE_CHECKING 5 | 6 | from sqlalchemy import ( 7 | ForeignKey, 8 | Integer, 9 | UnicodeText, 10 | ) 11 | from sqlalchemy.orm import Mapped, mapped_column, relationship 12 | 13 | from disco.models.meta import Base, DateTimeTzAware 14 | 15 | if TYPE_CHECKING: 16 | from disco.models import GithubApp, GithubAppRepo 17 | 18 | 19 | class GithubAppInstallation(Base): 20 | __tablename__ = "github_app_installations" 21 | 22 | id: Mapped[int] = mapped_column(Integer, primary_key=True) # provided by Github 23 | created: Mapped[datetime] = mapped_column( 24 | DateTimeTzAware(), 25 | default=lambda: datetime.now(timezone.utc), 26 | nullable=False, 27 | ) 28 | updated: Mapped[datetime] = mapped_column( 29 | DateTimeTzAware(), 30 | default=lambda: datetime.now(timezone.utc), 31 | onupdate=lambda: datetime.now(timezone.utc), 32 | nullable=False, 33 | ) 34 | access_token: Mapped[str | None] = mapped_column(UnicodeText, nullable=True) 35 | access_token_expires: Mapped[datetime | None] = mapped_column( 36 | DateTimeTzAware, nullable=True 37 | ) 38 | github_app_id: Mapped[int] = mapped_column( 39 | Integer, 40 | ForeignKey("github_apps.id"), 41 | nullable=False, 42 | index=True, 43 | ) 44 | 45 | github_app: Mapped[GithubApp] = relationship( 46 | "GithubApp", 47 | back_populates="installations", 48 | ) 49 | github_app_repos: Mapped[list[GithubAppRepo]] = relationship( 50 | "GithubAppRepo", 51 | back_populates="installation", 52 | ) 53 | 54 | def log(self): 55 | return f"GITHUB_APP_INSTALLATION_{self.id}" 56 | -------------------------------------------------------------------------------- /disco/utils/subprocess.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | from asyncio import subprocess 4 | from typing import Sequence 5 | 6 | log = logging.getLogger(__name__) 7 | 8 | 9 | async def call( 10 | args: Sequence[str], stdin: str | None = None 11 | ) -> tuple[list[str], list[str], subprocess.Process]: 12 | process = await asyncio.create_subprocess_exec( 13 | *args, 14 | stdin=subprocess.PIPE if stdin is not None else None, 15 | stdout=subprocess.PIPE, 16 | stderr=subprocess.PIPE, 17 | ) 18 | stdout, stderr = await process.communicate( 19 | input=stdin.encode("utf-8") if stdin is not None else None 20 | ) 21 | await process.wait() 22 | return decode_output(stdout), decode_output(stderr), process 23 | 24 | 25 | async def check_call( 26 | args: Sequence[str], stdin: str | None = None 27 | ) -> tuple[list[str], list[str], subprocess.Process]: 28 | stdout, stderr, process = await call(args=args, stdin=stdin) 29 | if process.returncode != 0: 30 | for line in stdout: 31 | log.info("Stdout: %s", line) 32 | for line in stderr: 33 | log.info("Stderr: %s", line) 34 | raise Exception(f"Processs returned status {process.returncode}") 35 | return stdout, stderr, process 36 | 37 | 38 | def decode_output(output: bytes) -> list[str]: 39 | lines = [decode_text(line) for line in output.split(b"\n")] 40 | if len(lines[-1].strip()) == 0: 41 | lines = lines[:-1] 42 | return lines 43 | 44 | 45 | def decode_text(output_line: bytes) -> str: 46 | encodings = ["utf-8", "latin-1", "cp1252"] 47 | for encoding in encodings: 48 | try: 49 | return output_line.decode(encoding) 50 | except UnicodeDecodeError: 51 | pass 52 | return output_line.decode("utf-8", errors="replace") 53 | -------------------------------------------------------------------------------- /disco/alembic/versions/47da35039f6f_0_7_0b.py: -------------------------------------------------------------------------------- 1 | """0.7.0b 2 | 3 | Revision ID: 47da35039f6f 4 | Revises: 97e98737cba8 5 | Create Date: 2024-05-07 23:59:06.826118 6 | 7 | """ 8 | 9 | import sqlalchemy as sa 10 | from alembic import op 11 | 12 | revision = "47da35039f6f" 13 | down_revision = "97e98737cba8" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | with op.batch_alter_table("deployments", schema=None) as batch_op: 20 | batch_op.drop_column("domain") 21 | 22 | with op.batch_alter_table("github_apps", schema=None) as batch_op: 23 | batch_op.alter_column("owner_id", existing_type=sa.INTEGER(), nullable=False) 24 | batch_op.alter_column( 25 | "owner_login", existing_type=sa.VARCHAR(length=255), nullable=False 26 | ) 27 | batch_op.alter_column( 28 | "owner_type", existing_type=sa.VARCHAR(length=255), nullable=False 29 | ) 30 | 31 | with op.batch_alter_table("projects", schema=None) as batch_op: 32 | batch_op.drop_column("domain") 33 | 34 | 35 | def downgrade(): 36 | with op.batch_alter_table("projects", schema=None) as batch_op: 37 | batch_op.add_column(sa.Column("domain", sa.VARCHAR(length=255), nullable=True)) 38 | 39 | with op.batch_alter_table("github_apps", schema=None) as batch_op: 40 | batch_op.alter_column( 41 | "owner_type", existing_type=sa.VARCHAR(length=255), nullable=True 42 | ) 43 | batch_op.alter_column( 44 | "owner_login", existing_type=sa.VARCHAR(length=255), nullable=True 45 | ) 46 | batch_op.alter_column("owner_id", existing_type=sa.INTEGER(), nullable=True) 47 | 48 | with op.batch_alter_table("deployments", schema=None) as batch_op: 49 | batch_op.add_column(sa.Column("domain", sa.VARCHAR(length=255), nullable=True)) 50 | -------------------------------------------------------------------------------- /disco/utils/corsorigins.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import uuid 3 | from typing import Sequence 4 | 5 | from sqlalchemy import select 6 | from sqlalchemy.ext.asyncio import AsyncSession as AsyncDBSession 7 | from sqlalchemy.orm.session import Session as DBSession 8 | 9 | from disco.models import ApiKey, CorsOrigin 10 | 11 | log = logging.getLogger(__name__) 12 | 13 | 14 | async def allow_origin( 15 | dbsession: AsyncDBSession, origin: str, by_api_key: ApiKey 16 | ) -> None: 17 | from disco.middleware import update_cors 18 | 19 | cors_origin = await get_cors_origin(dbsession, origin) 20 | if cors_origin is None: 21 | cors_origin = CorsOrigin( 22 | id=uuid.uuid4().hex, 23 | origin=origin, 24 | by_api_key=by_api_key, 25 | ) 26 | dbsession.add(cors_origin) 27 | all_origins = await get_all_cors_origins(dbsession) 28 | log.info( 29 | "Added CORS origin to allowed origins in database %s", cors_origin.log() 30 | ) 31 | origins = set([o.origin for o in all_origins] + [cors_origin.origin]) 32 | update_cors(list(origins)) 33 | else: 34 | log.info( 35 | "CORS origin already present in database, not adding %s", cors_origin.log() 36 | ) 37 | 38 | 39 | async def get_cors_origin(dbsession: AsyncDBSession, origin: str) -> CorsOrigin | None: 40 | stmt = select(CorsOrigin).where(CorsOrigin.origin == origin) 41 | result = await dbsession.execute(stmt) 42 | return result.scalars().first() 43 | 44 | 45 | async def get_all_cors_origins(dbsession: AsyncDBSession) -> Sequence[CorsOrigin]: 46 | stmt = select(CorsOrigin) 47 | result = await dbsession.execute(stmt) 48 | return result.scalars().all() 49 | 50 | 51 | def get_all_cors_origins_sync(dbsession: DBSession) -> Sequence[CorsOrigin]: 52 | stmt = select(CorsOrigin) 53 | result = dbsession.execute(stmt) 54 | return result.scalars().all() 55 | -------------------------------------------------------------------------------- /disco/models/projectenvironmentvariable.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import uuid 4 | from datetime import datetime, timezone 5 | from typing import TYPE_CHECKING 6 | 7 | from sqlalchemy import ForeignKey, String, Unicode 8 | from sqlalchemy.orm import Mapped, mapped_column, relationship 9 | 10 | if TYPE_CHECKING: 11 | from disco.models import ( 12 | ApiKey, 13 | Project, 14 | ) 15 | from disco.models.meta import Base, DateTimeTzAware 16 | 17 | 18 | class ProjectEnvironmentVariable(Base): 19 | __tablename__ = "project_env_variables" 20 | 21 | id: Mapped[str] = mapped_column( 22 | String(32), default=lambda: uuid.uuid4().hex, primary_key=True 23 | ) 24 | created: Mapped[datetime] = mapped_column( 25 | DateTimeTzAware(), 26 | default=lambda: datetime.now(timezone.utc), 27 | nullable=False, 28 | ) 29 | updated: Mapped[datetime] = mapped_column( 30 | DateTimeTzAware(), 31 | default=lambda: datetime.now(timezone.utc), 32 | onupdate=lambda: datetime.now(timezone.utc), 33 | nullable=False, 34 | ) 35 | name: Mapped[str] = mapped_column(String(255), nullable=False, index=True) 36 | value: Mapped[str] = mapped_column(Unicode(4000), nullable=False) 37 | project_id: Mapped[str] = mapped_column( 38 | String(32), 39 | ForeignKey("projects.id"), 40 | nullable=False, 41 | index=True, 42 | ) 43 | by_api_key_id: Mapped[str] = mapped_column( 44 | String(32), 45 | ForeignKey("api_keys.id"), 46 | nullable=False, 47 | index=True, 48 | ) 49 | 50 | project: Mapped[Project] = relationship( 51 | "Project", 52 | back_populates="env_variables", 53 | ) 54 | by_api_key: Mapped[ApiKey] = relationship( 55 | "ApiKey", 56 | back_populates="env_variables", 57 | ) 58 | 59 | def log(self): 60 | return f"PROJECT_ENV_VAR_{self.name}" 61 | -------------------------------------------------------------------------------- /disco/models/githubapp.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from datetime import datetime, timezone 4 | from typing import TYPE_CHECKING 5 | 6 | from sqlalchemy import Integer, String, Unicode, UnicodeText 7 | from sqlalchemy.orm import Mapped, mapped_column, relationship 8 | 9 | from disco.models.meta import Base, DateTimeTzAware 10 | 11 | if TYPE_CHECKING: 12 | from disco.models import GithubAppInstallation 13 | 14 | 15 | class GithubApp(Base): 16 | __tablename__ = "github_apps" 17 | 18 | id: Mapped[int] = mapped_column(Integer, primary_key=True) # provided by Github 19 | created: Mapped[datetime] = mapped_column( 20 | DateTimeTzAware(), 21 | default=lambda: datetime.now(timezone.utc), 22 | nullable=False, 23 | ) 24 | updated: Mapped[datetime] = mapped_column( 25 | DateTimeTzAware(), 26 | default=lambda: datetime.now(timezone.utc), 27 | onupdate=lambda: datetime.now(timezone.utc), 28 | nullable=False, 29 | ) 30 | slug: Mapped[str] = mapped_column(Unicode(255), nullable=False) 31 | name: Mapped[str] = mapped_column(Unicode(255), nullable=False) 32 | webhook_secret: Mapped[str] = mapped_column(String(32), nullable=False) 33 | pem: Mapped[str] = mapped_column(UnicodeText, nullable=False) 34 | client_secret: Mapped[str] = mapped_column(String(32), nullable=False) 35 | html_url: Mapped[str] = mapped_column(Unicode(2000), nullable=False) 36 | owner_id: Mapped[int] = mapped_column(Integer, nullable=False) 37 | owner_login: Mapped[str] = mapped_column(Unicode(255), nullable=False) 38 | owner_type: Mapped[str] = mapped_column(Unicode(255), nullable=False) 39 | app_info: Mapped[str] = mapped_column(UnicodeText, nullable=False) 40 | 41 | installations: Mapped[list[GithubAppInstallation]] = relationship( 42 | "GithubAppInstallation", 43 | back_populates="github_app", 44 | ) 45 | 46 | def log(self): 47 | return f"GITHUB_APP_{self.id} ({self.name})" 48 | -------------------------------------------------------------------------------- /disco/models/apikeyinvite.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from datetime import datetime, timezone 4 | from secrets import token_hex 5 | from typing import TYPE_CHECKING 6 | 7 | from sqlalchemy import ForeignKey, String, Unicode 8 | from sqlalchemy.orm import Mapped, mapped_column, relationship 9 | 10 | if TYPE_CHECKING: 11 | from disco.models import ApiKey 12 | from disco.models.meta import Base, DateTimeTzAware 13 | 14 | 15 | class ApiKeyInvite(Base): 16 | __tablename__ = "api_key_invites" 17 | 18 | id: Mapped[str] = mapped_column( 19 | String(32), default=lambda: token_hex(16), primary_key=True 20 | ) 21 | created: Mapped[datetime] = mapped_column( 22 | DateTimeTzAware(), 23 | default=lambda: datetime.now(timezone.utc), 24 | nullable=False, 25 | ) 26 | updated: Mapped[datetime] = mapped_column( 27 | DateTimeTzAware(), 28 | default=lambda: datetime.now(timezone.utc), 29 | onupdate=lambda: datetime.now(timezone.utc), 30 | nullable=False, 31 | ) 32 | name: Mapped[str] = mapped_column(Unicode(255), nullable=False) 33 | expires: Mapped[datetime] = mapped_column(DateTimeTzAware(), nullable=False) 34 | by_api_key_id: Mapped[str] = mapped_column( 35 | String(32), 36 | ForeignKey("api_keys.id"), 37 | nullable=False, 38 | index=True, 39 | ) 40 | api_key_id: Mapped[str | None] = mapped_column( 41 | String(32), 42 | ForeignKey("api_keys.id"), 43 | nullable=True, 44 | index=True, 45 | ) 46 | 47 | by_api_key: Mapped[ApiKey] = relationship( 48 | "ApiKey", 49 | foreign_keys=by_api_key_id, 50 | back_populates="created_api_key_invites", 51 | ) 52 | api_key: Mapped[ApiKey | None] = relationship( 53 | "ApiKey", 54 | foreign_keys=api_key_id, 55 | back_populates="from_invite", 56 | ) 57 | 58 | def log(self): 59 | return f"API_KEY_INVITE_{self.id} ({self.name})" 60 | -------------------------------------------------------------------------------- /disco/utils/projectkeyvalues.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from sqlalchemy.orm.session import Session as DBSession 4 | 5 | from disco.models import ApiKey, Project, ProjectKeyValue 6 | from disco.utils.encryption import decrypt, encrypt 7 | 8 | log = logging.getLogger(__name__) 9 | 10 | 11 | def get_value(dbsession: DBSession, project: Project, key: str) -> str | None: 12 | key_value = dbsession.query(ProjectKeyValue).get( 13 | {"key": key, "project_id": project.id} 14 | ) 15 | if key_value is None: 16 | return None 17 | return decrypt(key_value.value) 18 | 19 | 20 | def get_all_key_values_for_project( 21 | dbsession: DBSession, project: Project 22 | ) -> list[ProjectKeyValue]: 23 | return ( 24 | dbsession.query(ProjectKeyValue) 25 | .filter(ProjectKeyValue.project == project) 26 | .all() 27 | ) 28 | 29 | 30 | def set_value( 31 | dbsession: DBSession, 32 | project: Project, 33 | key: str, 34 | value: str | None, 35 | by_api_key: ApiKey, 36 | ) -> None: 37 | log.info( 38 | "Project key value set %s (%s) by %s", key, project.log(), by_api_key.log() 39 | ) 40 | key_value = dbsession.query(ProjectKeyValue).get( 41 | {"key": key, "project_id": project.id} 42 | ) 43 | if key_value is not None: 44 | key_value.value = encrypt(value) 45 | else: 46 | key_value = ProjectKeyValue( 47 | project=project, 48 | key=key, 49 | value=encrypt(value), 50 | ) 51 | dbsession.add(key_value) 52 | 53 | 54 | def delete_value( 55 | dbsession: DBSession, project: Project, key: str, by_api_key: ApiKey 56 | ) -> None: 57 | key_value = dbsession.query(ProjectKeyValue).get( 58 | {"key": key, "project_id": project.id} 59 | ) 60 | if key_value is not None: 61 | log.info( 62 | "Project key value deleted %s (%s) by %s", 63 | key, 64 | project.log(), 65 | by_api_key.log(), 66 | ) 67 | dbsession.delete(key_value) 68 | -------------------------------------------------------------------------------- /dev-deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e # Exit immediately if a command exits with a non-zero status. 3 | set -o pipefail # The return value of a pipeline is the status of the last command to exit with a non-zero status. 4 | 5 | # --- Configuration --- 6 | if [ -z "$1" ]; then 7 | echo "Usage: $0 " 8 | echo "Example: $0 10.123.123.123" 9 | exit 1 10 | fi 11 | 12 | REMOTE_USER="root" 13 | REMOTE_HOST="$1" 14 | LOCAL_PATH="." # Assumes you run this from the root of the disco repo 15 | REMOTE_PATH="/root/disco-dev-src" # A dedicated directory on the server for our dev source 16 | DEV_IMAGE_TAG="disco-daemon:dev-latest" # A unique tag for our development image 17 | 18 | # --- Logging --- 19 | echo "INFO: Starting Disco Daemon development deployment..." 20 | echo "----------------------------------------------------" 21 | echo " Remote Host: $REMOTE_HOST" 22 | echo " Local Path: $LOCAL_PATH" 23 | echo " Remote Path: $REMOTE_PATH" 24 | echo " Dev Image Tag: $DEV_IMAGE_TAG" 25 | echo "----------------------------------------------------" 26 | 27 | # --- Phase 1: Sync Code --- 28 | echo "" 29 | echo "INFO: Phase 1: Synchronizing source code to remote server..." 30 | rsync -avz --delete \ 31 | --exclude '.git/' \ 32 | --exclude '.venv/' \ 33 | --exclude '__pycache__/' \ 34 | --exclude '*.pyc' \ 35 | "$LOCAL_PATH/" "${REMOTE_USER}@${REMOTE_HOST}:${REMOTE_PATH}/" 36 | echo "SUCCESS: Code synchronization complete." 37 | 38 | # --- Phase 2: Remote Build --- 39 | echo "" 40 | echo "INFO: Phase 2: Building development image on remote server..." 41 | ssh "${REMOTE_USER}@${REMOTE_HOST}" "DOCKER_BUILDKIT=1 docker build -t ${DEV_IMAGE_TAG} ${REMOTE_PATH}/" 42 | echo "SUCCESS: Remote build complete. Image tagged as ${DEV_IMAGE_TAG}." 43 | 44 | # --- Phase 3: Hot-Swap Service --- 45 | echo "" 46 | echo "INFO: Phase 3: Updating the 'disco' service to use the new image..." 47 | ssh "${REMOTE_USER}@${REMOTE_HOST}" "docker service update --image ${DEV_IMAGE_TAG} --force disco" 48 | echo "SUCCESS: 'disco' service update initiated." 49 | echo "" 50 | echo "INFO: Tailing logs to confirm restart..." 51 | ssh "${REMOTE_USER}@${REMOTE_HOST}" "docker service logs --tail 20 disco" 52 | -------------------------------------------------------------------------------- /disco/app.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | from contextlib import asynccontextmanager 4 | 5 | from fastapi import FastAPI 6 | 7 | from disco.endpoints import ( 8 | apikeyinvites, 9 | apikeys, 10 | cgi, 11 | corsorigins, 12 | deployments, 13 | envvariables, 14 | events, 15 | githubapps, 16 | logs, 17 | meta, 18 | nodes, 19 | projectdomains, 20 | projectkeyvalues, 21 | projects, 22 | run, 23 | scale, 24 | syslog, 25 | tunnels, 26 | volumes, 27 | ) 28 | from disco.middleware import middleware 29 | from disco.utils.asyncworker import async_worker 30 | from disco.utils.deployments import ( 31 | cleanup_deployments_on_disco_boot, 32 | enqueue_deployments_on_disco_boot, 33 | ) 34 | 35 | logging.basicConfig(level=logging.INFO) 36 | 37 | log = logging.getLogger(__name__) 38 | 39 | log.info("Initializing Disco daemon") 40 | 41 | 42 | @asynccontextmanager 43 | async def lifespan(app: FastAPI): 44 | loop = asyncio.get_running_loop() 45 | async_worker.set_loop(loop) 46 | worker_task = loop.create_task(async_worker.work()) 47 | await cleanup_deployments_on_disco_boot() 48 | await enqueue_deployments_on_disco_boot() 49 | yield 50 | async_worker.stop() 51 | await worker_task 52 | 53 | 54 | app = FastAPI(lifespan=lifespan, middleware=middleware) 55 | 56 | app.include_router(meta.router) 57 | app.include_router(projects.router) 58 | app.include_router(volumes.router) 59 | app.include_router(deployments.router) 60 | app.include_router(run.router) 61 | app.include_router(envvariables.router) 62 | app.include_router(projectdomains.router) 63 | app.include_router(projectkeyvalues.router) 64 | app.include_router(logs.router) 65 | app.include_router(nodes.router) 66 | app.include_router(scale.router) 67 | app.include_router(apikeys.router) 68 | app.include_router(apikeyinvites.router) 69 | app.include_router(syslog.router) 70 | app.include_router(tunnels.router) 71 | app.include_router(corsorigins.router) 72 | app.include_router(cgi.router) 73 | app.include_router(githubapps.router) 74 | app.include_router(events.router) 75 | 76 | 77 | @app.get("/") 78 | def root_get(): 79 | return {"disco": True} 80 | 81 | 82 | log.info("Ready to disco") 83 | -------------------------------------------------------------------------------- /disco/utils/keyvalues.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy.ext.asyncio import AsyncSession as AsyncDBSession 2 | from sqlalchemy.orm.session import Session as DBSession 3 | 4 | from disco.models import KeyValue 5 | 6 | 7 | class KeyNotFoundError(Exception): 8 | pass 9 | 10 | 11 | def get_value_str_sync(dbsession: DBSession, key: str) -> str: 12 | key_value = dbsession.query(KeyValue).get(key) 13 | if key_value is None: 14 | raise KeyNotFoundError(f"Key {key} not found") 15 | return key_value.value 16 | 17 | 18 | async def get_value_str(dbsession: AsyncDBSession, key: str) -> str: 19 | key_value = await dbsession.get(KeyValue, key) 20 | if key_value is None: 21 | raise KeyNotFoundError(f"Key {key} not found") 22 | if key_value.value is None: 23 | raise KeyNotFoundError(f"Key {key} has value None") 24 | return key_value.value 25 | 26 | 27 | async def get_value(dbsession: AsyncDBSession, key: str) -> str | None: 28 | key_value = await dbsession.get(KeyValue, key) 29 | if key_value is None: 30 | return None 31 | return key_value.value 32 | 33 | 34 | def get_value_sync(dbsession: DBSession, key: str) -> str | None: 35 | key_value = dbsession.query(KeyValue).get(key) 36 | if key_value is None: 37 | return None 38 | return key_value.value 39 | 40 | 41 | def set_value_sync(dbsession: DBSession, key: str, value: str | None) -> None: 42 | key_value = dbsession.query(KeyValue).get(key) 43 | if key_value is not None: 44 | key_value.value = value 45 | else: 46 | key_value = KeyValue( 47 | key=key, 48 | value=value, 49 | ) 50 | dbsession.add(key_value) 51 | 52 | 53 | async def set_value(dbsession: AsyncDBSession, key: str, value: str | None) -> None: 54 | key_value = await dbsession.get(KeyValue, key) 55 | if key_value is not None: 56 | key_value.value = value 57 | else: 58 | key_value = KeyValue( 59 | key=key, 60 | value=value, 61 | ) 62 | dbsession.add(key_value) 63 | 64 | 65 | def delete_value_sync(dbsession: DBSession, key: str) -> None: 66 | key_value = dbsession.query(KeyValue).get(key) 67 | if key_value is not None: 68 | dbsession.delete(key_value) 69 | -------------------------------------------------------------------------------- /disco/endpoints/apikeys.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Annotated 3 | 4 | from fastapi import APIRouter, Depends, HTTPException, Path 5 | from pydantic import BaseModel, Field 6 | from sqlalchemy.orm.session import Session as DBSession 7 | 8 | from disco.auth import get_api_key_sync 9 | from disco.endpoints.dependencies import get_db_sync 10 | from disco.models import ApiKey 11 | from disco.utils.apikeys import ( 12 | delete_api_key, 13 | get_all_api_keys, 14 | get_api_key_by_public_key_sync, 15 | ) 16 | from disco.utils.encryption import obfuscate 17 | 18 | log = logging.getLogger(__name__) 19 | 20 | router = APIRouter(dependencies=[Depends(get_api_key_sync)]) 21 | 22 | 23 | def get_api_key_from_url( 24 | dbsession: Annotated[DBSession, Depends(get_db_sync)], 25 | public_key: Annotated[str, Path()], 26 | ): 27 | api_key = get_api_key_by_public_key_sync(dbsession, public_key) 28 | if api_key is None: 29 | raise HTTPException(status_code=404) 30 | yield api_key 31 | 32 | 33 | @router.get("/api/api-keys") 34 | def api_keys_get(dbsession: Annotated[DBSession, Depends(get_db_sync)]): 35 | api_keys = get_all_api_keys(dbsession) 36 | return { 37 | "apiKeys": [ 38 | { 39 | "name": api_key.name, 40 | "publicKey": api_key.public_key, 41 | "privateKey": obfuscate(api_key.id), 42 | "lastUsed": api_key.usages[0].created.isoformat() 43 | if len(api_key.usages) > 0 44 | else None, 45 | } 46 | for api_key in api_keys 47 | ], 48 | } 49 | 50 | 51 | class NewApiKeyRequestBody(BaseModel): 52 | name: str = Field(..., max_length=255) 53 | 54 | 55 | @router.delete("/api/api-keys/{public_key}", status_code=200) 56 | def api_key_delete( 57 | dbsession: Annotated[DBSession, Depends(get_db_sync)], 58 | api_key: Annotated[ApiKey, Depends(get_api_key_from_url)], 59 | by_api_key: Annotated[ApiKey, Depends(get_api_key_sync)], 60 | ): 61 | api_keys = get_all_api_keys(dbsession) 62 | if len(api_keys) == 1: 63 | assert api_key == api_keys[0] 64 | raise HTTPException(422, "Can't delete last API key.") 65 | delete_api_key(api_key=api_key, by_api_key=by_api_key) 66 | return {"deleted": True} 67 | -------------------------------------------------------------------------------- /disco/alembic/versions/97e98737cba8_0_7_0a.py: -------------------------------------------------------------------------------- 1 | """0.7.0 Part A 2 | 3 | Revision ID: 97e98737cba8 4 | Revises: 5540c20f9acd 5 | Create Date: 2024-05-07 19:28:07.696067 6 | 7 | """ 8 | 9 | import sqlalchemy as sa 10 | from alembic import op 11 | 12 | revision = "97e98737cba8" 13 | down_revision = "5540c20f9acd" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | op.create_table( 20 | "project_domains", 21 | sa.Column("id", sa.String(length=32), nullable=False), 22 | sa.Column("created", sa.DateTime(), nullable=False), 23 | sa.Column("updated", sa.DateTime(), nullable=False), 24 | sa.Column("name", sa.Unicode(length=255), nullable=False), 25 | sa.Column("project_id", sa.String(length=32), nullable=False), 26 | sa.ForeignKeyConstraint( 27 | ["project_id"], 28 | ["projects.id"], 29 | name=op.f("fk_project_domains_project_id_projects"), 30 | ), 31 | sa.PrimaryKeyConstraint("id", name=op.f("pk_project_domains")), 32 | ) 33 | with op.batch_alter_table("project_domains", schema=None) as batch_op: 34 | batch_op.create_index( 35 | batch_op.f("ix_project_domains_name"), ["name"], unique=True 36 | ) 37 | batch_op.create_index( 38 | batch_op.f("ix_project_domains_project_id"), ["project_id"], unique=False 39 | ) 40 | 41 | with op.batch_alter_table("github_apps", schema=None) as batch_op: 42 | batch_op.add_column(sa.Column("owner_id", sa.Integer(), nullable=True)) 43 | batch_op.add_column( 44 | sa.Column("owner_login", sa.Unicode(length=255), nullable=True) 45 | ) 46 | batch_op.add_column( 47 | sa.Column("owner_type", sa.Unicode(length=255), nullable=True) 48 | ) 49 | 50 | 51 | def downgrade(): 52 | with op.batch_alter_table("github_apps", schema=None) as batch_op: 53 | batch_op.drop_column("owner_type") 54 | batch_op.drop_column("owner_login") 55 | batch_op.drop_column("owner_id") 56 | 57 | with op.batch_alter_table("project_domains", schema=None) as batch_op: 58 | batch_op.drop_index(batch_op.f("ix_project_domains_project_id")) 59 | batch_op.drop_index(batch_op.f("ix_project_domains_name")) 60 | 61 | op.drop_table("project_domains") 62 | -------------------------------------------------------------------------------- /disco/models/commandrun.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import uuid 4 | from datetime import datetime, timezone 5 | from typing import TYPE_CHECKING 6 | 7 | from sqlalchemy import ( 8 | ForeignKey, 9 | Integer, 10 | String, 11 | Unicode, 12 | UnicodeText, 13 | ) 14 | from sqlalchemy.orm import Mapped, mapped_column, relationship 15 | 16 | if TYPE_CHECKING: 17 | from disco.models import ApiKey, Deployment, Project 18 | from disco.models.meta import Base, DateTimeTzAware 19 | 20 | 21 | class CommandRun(Base): 22 | __tablename__ = "command_runs" 23 | 24 | id: Mapped[str] = mapped_column( 25 | String(32), default=lambda: uuid.uuid4().hex, primary_key=True 26 | ) 27 | created: Mapped[datetime] = mapped_column( 28 | DateTimeTzAware(), default=lambda: datetime.now(timezone.utc) 29 | ) 30 | updated: Mapped[datetime] = mapped_column( 31 | DateTimeTzAware(), 32 | default=lambda: datetime.now(timezone.utc), 33 | onupdate=lambda: datetime.now(timezone.utc), 34 | ) 35 | number: Mapped[int] = mapped_column(Integer, nullable=False, index=True) 36 | service: Mapped[str] = mapped_column(Unicode(), nullable=False) 37 | command: Mapped[str] = mapped_column(UnicodeText(), nullable=False) 38 | status: Mapped[str] = mapped_column(String(32), nullable=False) 39 | project_id: Mapped[str] = mapped_column( 40 | String(32), 41 | ForeignKey("projects.id"), 42 | nullable=False, 43 | index=True, 44 | ) 45 | deployment_id: Mapped[str | None] = mapped_column( 46 | String(32), 47 | ForeignKey("deployments.id"), 48 | nullable=True, 49 | index=True, 50 | ) 51 | by_api_key_id: Mapped[str] = mapped_column( 52 | String(32), 53 | ForeignKey("api_keys.id"), 54 | nullable=False, 55 | index=True, 56 | ) 57 | 58 | project: Mapped[Project] = relationship( 59 | "Project", 60 | back_populates="command_runs", 61 | ) 62 | by_api_key: Mapped[ApiKey] = relationship( 63 | "ApiKey", 64 | back_populates="command_runs", 65 | ) 66 | deployment: Mapped[Deployment] = relationship( 67 | "Deployment", 68 | back_populates="command_runs", 69 | ) 70 | 71 | def log(self): 72 | return f"COMMAND_RUN_{self.id} ({self.project_name} {self.number})" 73 | -------------------------------------------------------------------------------- /disco/models/project.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import uuid 4 | from datetime import datetime, timezone 5 | from typing import TYPE_CHECKING 6 | 7 | from sqlalchemy import String, Unicode 8 | from sqlalchemy.orm import Mapped, mapped_column, relationship 9 | 10 | if TYPE_CHECKING: 11 | from disco.models import ( 12 | CommandRun, 13 | Deployment, 14 | ProjectDomain, 15 | ProjectEnvironmentVariable, 16 | ProjectGithubRepo, 17 | ProjectKeyValue, 18 | ) 19 | from disco.models.meta import Base, DateTimeTzAware 20 | 21 | 22 | class Project(Base): 23 | __tablename__ = "projects" 24 | 25 | id: Mapped[str] = mapped_column( 26 | String(32), default=lambda: uuid.uuid4().hex, primary_key=True 27 | ) 28 | created: Mapped[datetime] = mapped_column( 29 | DateTimeTzAware(), 30 | default=lambda: datetime.now(timezone.utc), 31 | nullable=False, 32 | ) 33 | updated: Mapped[datetime] = mapped_column( 34 | DateTimeTzAware(), 35 | default=lambda: datetime.now(timezone.utc), 36 | onupdate=lambda: datetime.now(timezone.utc), 37 | nullable=False, 38 | ) 39 | name: Mapped[str] = mapped_column(Unicode(255), nullable=False) 40 | deployment_type: Mapped[str | None] = mapped_column(Unicode(255), nullable=True) 41 | 42 | command_runs: Mapped[list[CommandRun]] = relationship( 43 | "CommandRun", back_populates="project", order_by="CommandRun.number.desc()" 44 | ) 45 | deployments: Mapped[list[Deployment]] = relationship( 46 | "Deployment", back_populates="project", order_by="Deployment.number.desc()" 47 | ) 48 | env_variables: Mapped[list[ProjectEnvironmentVariable]] = relationship( 49 | "ProjectEnvironmentVariable", 50 | back_populates="project", 51 | ) 52 | key_values: Mapped[list[ProjectKeyValue]] = relationship( 53 | "ProjectKeyValue", 54 | back_populates="project", 55 | ) 56 | github_repo: Mapped[ProjectGithubRepo | None] = relationship( 57 | "ProjectGithubRepo", 58 | back_populates="project", 59 | uselist=False, 60 | ) 61 | domains: Mapped[list[ProjectDomain]] = relationship( 62 | "ProjectDomain", back_populates="project", order_by="ProjectDomain.name" 63 | ) 64 | 65 | def log(self): 66 | return f"PROJECT_{self.id} ({self.name})" 67 | -------------------------------------------------------------------------------- /disco/alembic/versions/d0cba3cd3238_0_3_0.py: -------------------------------------------------------------------------------- 1 | """0.3.0 2 | 3 | Revision ID: d0cba3cd3238 4 | Revises: eba27af20db2 5 | Create Date: 2024-04-03 01:34:39.255972 6 | 7 | """ 8 | 9 | import sqlalchemy as sa 10 | from alembic import op 11 | 12 | revision = "d0cba3cd3238" 13 | down_revision = "eba27af20db2" 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | op.create_table( 20 | "api_key_invites", 21 | sa.Column("id", sa.String(length=32), nullable=False), 22 | sa.Column("created", sa.DateTime(), nullable=True), 23 | sa.Column("updated", sa.DateTime(), nullable=True), 24 | sa.Column("name", sa.Unicode(length=255), nullable=False), 25 | sa.Column("expires", sa.DateTime(), nullable=False), 26 | sa.Column("by_api_key_id", sa.String(length=32), nullable=False), 27 | sa.Column("api_key_id", sa.String(length=32), nullable=True), 28 | sa.ForeignKeyConstraint( 29 | ["api_key_id"], 30 | ["api_keys.id"], 31 | name=op.f("fk_api_key_invites_api_key_id_api_keys"), 32 | ), 33 | sa.ForeignKeyConstraint( 34 | ["by_api_key_id"], 35 | ["api_keys.id"], 36 | name=op.f("fk_api_key_invites_by_api_key_id_api_keys"), 37 | ), 38 | sa.PrimaryKeyConstraint("id", name=op.f("pk_api_key_invites")), 39 | ) 40 | op.create_index( 41 | op.f("ix_api_key_invites_api_key_id"), 42 | "api_key_invites", 43 | ["api_key_id"], 44 | unique=False, 45 | ) 46 | op.create_index( 47 | op.f("ix_api_key_invites_by_api_key_id"), 48 | "api_key_invites", 49 | ["by_api_key_id"], 50 | unique=False, 51 | ) 52 | op.execute("ALTER TABLE api_keys RENAME COLUMN log_id TO public_key;") 53 | op.add_column("api_keys", sa.Column("deleted", sa.DateTime(), nullable=True)) 54 | op.create_index( 55 | op.f("ix_api_keys_public_key"), "api_keys", ["public_key"], unique=False 56 | ) 57 | 58 | 59 | def downgrade(): 60 | op.drop_index(op.f("ix_api_keys_public_key"), table_name="api_keys") 61 | op.drop_column("api_keys", "deleted") 62 | op.execute("ALTER TABLE api_keys RENAME COLUMN public_key TO log_id;") 63 | op.drop_index( 64 | op.f("ix_api_key_invites_by_api_key_id"), table_name="api_key_invites" 65 | ) 66 | op.drop_index(op.f("ix_api_key_invites_api_key_id"), table_name="api_key_invites") 67 | op.drop_table("api_key_invites") 68 | -------------------------------------------------------------------------------- /disco/endpoints/syslog.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from enum import Enum 3 | from typing import Annotated 4 | 5 | from fastapi import APIRouter, Depends 6 | from pydantic import BaseModel, Field 7 | from sqlalchemy.ext.asyncio import AsyncSession as AsyncDBSession 8 | 9 | from disco.auth import get_api_key_wo_tx 10 | from disco.endpoints.dependencies import get_db 11 | from disco.models.db import AsyncSession 12 | from disco.utils import keyvalues 13 | from disco.utils.apikeys import get_valid_api_key_by_id 14 | from disco.utils.syslog import ( 15 | add_syslog_url, 16 | get_syslog_urls, 17 | remove_syslog_url, 18 | set_syslog_services, 19 | ) 20 | 21 | log = logging.getLogger(__name__) 22 | 23 | router = APIRouter(dependencies=[Depends(get_api_key_wo_tx)]) 24 | 25 | 26 | class SyslogAction(Enum): 27 | add = "add" 28 | remove = "remove" 29 | 30 | 31 | class AddRemoveSyslogReqBody(BaseModel): 32 | action: SyslogAction 33 | url: str = Field(..., pattern=r"^syslog(\+tls)?://\S+:\d+$") 34 | 35 | 36 | @router.post("/api/syslog") 37 | async def syslog_post( 38 | api_key_id: Annotated[str, Depends(get_api_key_wo_tx)], 39 | add_remove_syslog: AddRemoveSyslogReqBody, 40 | ): 41 | async with AsyncSession.begin() as dbsession: 42 | api_key = await get_valid_api_key_by_id(dbsession, api_key_id) 43 | assert api_key is not None 44 | if add_remove_syslog.action == SyslogAction.add: 45 | syslog_urls = await add_syslog_url( 46 | dbsession, add_remove_syslog.url, api_key 47 | ) 48 | else: 49 | assert add_remove_syslog.action == SyslogAction.remove 50 | syslog_urls = await remove_syslog_url( 51 | dbsession, add_remove_syslog.url, api_key 52 | ) 53 | disco_host = await keyvalues.get_value_str(dbsession, "DISCO_HOST") 54 | await set_syslog_services(disco_host=disco_host, syslog_urls=syslog_urls) 55 | return { 56 | "urls": [ 57 | syslog_url["url"] 58 | for syslog_url in syslog_urls 59 | if syslog_url["type"] != "CORE" 60 | ], 61 | } 62 | 63 | 64 | @router.get("/api/syslog") 65 | async def syslog_get( 66 | dbsession: Annotated[AsyncDBSession, Depends(get_db)], 67 | ): 68 | syslog_urls = await get_syslog_urls(dbsession) 69 | return { 70 | "urls": [ 71 | syslog_url["url"] 72 | for syslog_url in syslog_urls 73 | if syslog_url["type"] != "CORE" 74 | ], 75 | } 76 | -------------------------------------------------------------------------------- /disco/utils/envvariables.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | 3 | from sqlalchemy import select 4 | from sqlalchemy.ext.asyncio import AsyncSession as AsyncDBSession 5 | from sqlalchemy.orm.session import Session as DBSession 6 | 7 | from disco.models import ApiKey, Project, ProjectEnvironmentVariable 8 | from disco.utils import events 9 | from disco.utils.encryption import encrypt 10 | 11 | 12 | async def get_env_variable_by_name( 13 | dbsession: AsyncDBSession, 14 | project: Project, 15 | name: str, 16 | ) -> ProjectEnvironmentVariable | None: 17 | stmt = ( 18 | select(ProjectEnvironmentVariable) 19 | .where(ProjectEnvironmentVariable.project == project) 20 | .where(ProjectEnvironmentVariable.name == name) 21 | .limit(1) 22 | ) 23 | result = await dbsession.execute(stmt) 24 | return result.scalars().first() 25 | 26 | 27 | def get_env_variables_for_project_sync( 28 | dbsession: DBSession, project: Project 29 | ) -> list[ProjectEnvironmentVariable]: 30 | return ( 31 | dbsession.query(ProjectEnvironmentVariable) 32 | .filter(ProjectEnvironmentVariable.project == project) 33 | .order_by(ProjectEnvironmentVariable.name) 34 | .all() 35 | ) 36 | 37 | 38 | async def set_env_variables( 39 | dbsession: AsyncDBSession, 40 | project: Project, 41 | env_variables: list[tuple[str, str]], 42 | by_api_key: ApiKey, 43 | ) -> None: 44 | for name, value in env_variables: 45 | existed = False 46 | for env_variable in await project.awaitable_attrs.env_variables: 47 | if env_variable.name == name: 48 | existed = True 49 | env_variable.value = encrypt(value) 50 | env_variable.by_api_key = by_api_key 51 | events.env_variable_updated(project_name=project.name, env_var=name) 52 | if not existed: 53 | env_variable = ProjectEnvironmentVariable( 54 | id=uuid.uuid4().hex, 55 | name=name, 56 | value=encrypt(value), 57 | project=project, 58 | by_api_key=by_api_key, 59 | ) 60 | dbsession.add(env_variable) 61 | events.env_variable_created(project_name=project.name, env_var=name) 62 | 63 | 64 | async def delete_env_variable( 65 | dbsession: AsyncDBSession, 66 | env_variable: ProjectEnvironmentVariable, 67 | ) -> None: 68 | project: Project = await env_variable.awaitable_attrs.project 69 | events.env_variable_removed(project_name=project.name, env_var=env_variable.name) 70 | await dbsession.delete(env_variable) 71 | -------------------------------------------------------------------------------- /disco/models/apikey.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from datetime import datetime, timezone 4 | from secrets import token_hex 5 | from typing import TYPE_CHECKING 6 | 7 | from sqlalchemy import String, Unicode 8 | from sqlalchemy.orm import Mapped, mapped_column, relationship 9 | 10 | if TYPE_CHECKING: 11 | from disco.models import ( 12 | ApiKeyInvite, 13 | ApiKeyUsage, 14 | CommandRun, 15 | CorsOrigin, 16 | Deployment, 17 | ProjectEnvironmentVariable, 18 | ) 19 | 20 | from disco.models.meta import Base, DateTimeTzAware 21 | 22 | 23 | class ApiKey(Base): 24 | __tablename__ = "api_keys" 25 | 26 | id: Mapped[str] = mapped_column( 27 | String(32), default=lambda: token_hex(16), primary_key=True 28 | ) 29 | created: Mapped[datetime] = mapped_column( 30 | DateTimeTzAware(), 31 | default=lambda: datetime.now(timezone.utc), 32 | nullable=False, 33 | ) 34 | updated: Mapped[datetime] = mapped_column( 35 | DateTimeTzAware(), 36 | default=lambda: datetime.now(timezone.utc), 37 | onupdate=lambda: datetime.now(timezone.utc), 38 | nullable=False, 39 | ) 40 | name: Mapped[str] = mapped_column(Unicode(255), nullable=False) 41 | public_key: Mapped[str] = mapped_column( 42 | String(32), default=lambda: token_hex(16), nullable=False, index=True 43 | ) 44 | deleted: Mapped[datetime | None] = mapped_column(DateTimeTzAware()) 45 | 46 | created_api_key_invites: Mapped[list[ApiKeyInvite]] = relationship( 47 | "ApiKeyInvite", 48 | foreign_keys="ApiKeyInvite.by_api_key_id", 49 | back_populates="by_api_key", 50 | ) 51 | created_cors_origins: Mapped[list[CorsOrigin]] = relationship( 52 | "CorsOrigin", 53 | foreign_keys="CorsOrigin.by_api_key_id", 54 | back_populates="by_api_key", 55 | ) 56 | from_invite: Mapped[ApiKeyInvite | None] = relationship( 57 | "ApiKeyInvite", foreign_keys="ApiKeyInvite.api_key_id", back_populates="api_key" 58 | ) 59 | command_runs: Mapped[list[CommandRun]] = relationship( 60 | "CommandRun", back_populates="by_api_key", order_by="CommandRun.number.desc()" 61 | ) 62 | deployments: Mapped[list[Deployment]] = relationship( 63 | "Deployment", order_by="Deployment.number.desc()" 64 | ) 65 | env_variables: Mapped[list[ProjectEnvironmentVariable]] = relationship( 66 | "ProjectEnvironmentVariable", back_populates="by_api_key" 67 | ) 68 | usages: Mapped[list[ApiKeyUsage]] = relationship( 69 | "ApiKeyUsage", order_by="desc(ApiKeyUsage.created)", back_populates="api_key" 70 | ) 71 | 72 | def log(self): 73 | return f"API_KEY_{self.public_key} ({self.name})" 74 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # This file was autogenerated by uv via the following command: 2 | # uv pip compile requirements.in -o requirements.txt 3 | aiofiles==24.1.0 4 | # via -r requirements.in 5 | aiohappyeyeballs==2.4.4 6 | # via aiohttp 7 | aiohttp==3.11.13 8 | # via -r requirements.in 9 | aiosignal==1.3.2 10 | # via aiohttp 11 | aiosqlite==0.21.0 12 | # via -r requirements.in 13 | alembic==1.15.1 14 | # via -r requirements.in 15 | annotated-types==0.6.0 16 | # via pydantic 17 | anyio==4.8.0 18 | # via 19 | # sse-starlette 20 | # starlette 21 | attrs==25.1.0 22 | # via aiohttp 23 | bcrypt==4.3.0 24 | # via -r requirements.in 25 | certifi==2024.2.2 26 | # via requests 27 | cffi==1.16.0 28 | # via cryptography 29 | charset-normalizer==3.3.2 30 | # via requests 31 | click==8.1.7 32 | # via uvicorn 33 | croniter==6.0.0 34 | # via -r requirements.in 35 | cryptography==44.0.2 36 | # via 37 | # -r requirements.in 38 | # pyjwt 39 | fastapi==0.115.11 40 | # via -r requirements.in 41 | friendlywords==1.1.3 42 | # via -r requirements.in 43 | frozenlist==1.5.0 44 | # via 45 | # aiohttp 46 | # aiosignal 47 | greenlet==3.1.1 48 | # via 49 | # -r requirements.in 50 | # sqlalchemy 51 | h11==0.14.0 52 | # via uvicorn 53 | idna==3.6 54 | # via 55 | # anyio 56 | # requests 57 | # yarl 58 | mako==1.3.2 59 | # via alembic 60 | markupsafe==2.1.5 61 | # via mako 62 | multidict==6.1.0 63 | # via 64 | # aiohttp 65 | # yarl 66 | mypy==1.15.0 67 | # via -r requirements.in 68 | mypy-extensions==1.0.0 69 | # via mypy 70 | propcache==0.2.1 71 | # via 72 | # aiohttp 73 | # yarl 74 | pycparser==2.21 75 | # via cffi 76 | pydantic==2.10.6 77 | # via 78 | # -r requirements.in 79 | # fastapi 80 | pydantic-core==2.27.2 81 | # via pydantic 82 | pyjwt==2.10.1 83 | # via -r requirements.in 84 | python-dateutil==2.9.0.post0 85 | # via croniter 86 | pytz==2024.1 87 | # via croniter 88 | requests==2.32.3 89 | # via -r requirements.in 90 | ruff==0.9.9 91 | # via -r requirements.in 92 | six==1.16.0 93 | # via python-dateutil 94 | sniffio==1.3.1 95 | # via anyio 96 | sqlalchemy==2.0.38 97 | # via 98 | # -r requirements.in 99 | # alembic 100 | sse-starlette==2.2.1 101 | # via -r requirements.in 102 | starlette==0.46.0 103 | # via 104 | # fastapi 105 | # sse-starlette 106 | typing-extensions==4.12.2 107 | # via 108 | # aiosqlite 109 | # alembic 110 | # anyio 111 | # fastapi 112 | # mypy 113 | # pydantic 114 | # pydantic-core 115 | # sqlalchemy 116 | urllib3==2.2.1 117 | # via requests 118 | uvicorn==0.34.0 119 | # via -r requirements.in 120 | yarl==1.18.3 121 | # via aiohttp 122 | -------------------------------------------------------------------------------- /disco/utils/meta.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import subprocess 4 | 5 | from sqlalchemy.ext.asyncio import AsyncSession as AsyncDBSession 6 | from sqlalchemy.orm.session import Session as DBSession 7 | 8 | from disco.models import ApiKey 9 | from disco.utils import caddy, docker, keyvalues 10 | from disco.utils.subprocess import decode_text 11 | 12 | log = logging.getLogger(__name__) 13 | 14 | 15 | def update_disco( 16 | dbsession: DBSession, image: str = "letsdiscodev/daemon:latest", pull: bool = True 17 | ) -> None: 18 | if is_updating(dbsession): 19 | raise Exception("An update is already in progress") 20 | save_is_updating(dbsession) 21 | if pull: 22 | asyncio.run(docker.pull(image)) 23 | _run_cmd( 24 | [ 25 | "docker", 26 | "run", 27 | "--rm", 28 | "--detach", 29 | "--label", 30 | "disco.log.core=true", 31 | "--env", 32 | f"DISCO_IMAGE={image}", 33 | "--mount", 34 | "source=disco-data,target=/disco/data", 35 | "--mount", 36 | "type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock", 37 | image, 38 | "disco_update", 39 | ] 40 | ) 41 | 42 | 43 | def _run_cmd(args: list[str], timeout=600) -> str: 44 | process = subprocess.Popen( 45 | args=args, 46 | stdout=subprocess.PIPE, 47 | stderr=subprocess.STDOUT, 48 | ) 49 | assert process.stdout is not None 50 | output = "" 51 | for line in process.stdout: 52 | decoded_line = decode_text(line) 53 | output += decoded_line 54 | process.wait() 55 | if process.returncode != 0: 56 | raise Exception(f"Docker returned status {process.returncode}:\n{output}") 57 | return output 58 | 59 | 60 | def is_updating(dbsession: DBSession) -> bool: 61 | updating = keyvalues.get_value_sync(dbsession, "DISCO_IS_UPDATING") 62 | return updating is not None 63 | 64 | 65 | def save_is_updating(dbsession: DBSession) -> None: 66 | keyvalues.set_value_sync(dbsession, "DISCO_IS_UPDATING", "true") 67 | 68 | 69 | def save_done_updating(dbsession: DBSession) -> None: 70 | keyvalues.delete_value_sync(dbsession, "DISCO_IS_UPDATING") 71 | 72 | 73 | async def set_disco_host( 74 | dbsession: AsyncDBSession, host: str, by_api_key: ApiKey 75 | ) -> None: 76 | from disco.utils import docker 77 | 78 | prev_host = await keyvalues.get_value_str(dbsession=dbsession, key="DISCO_HOST") 79 | log.info( 80 | "Setting Disco host from %s to %s by %s", prev_host, host, by_api_key.log() 81 | ) 82 | await caddy.update_disco_host(host) 83 | await keyvalues.set_value(dbsession=dbsession, key="DISCO_HOST", value=host) 84 | syslog_services = await docker.list_syslog_services() 85 | for syslog_service in syslog_services: 86 | await docker.update_syslog_hostname( 87 | service_name=syslog_service.name, disco_host=host 88 | ) 89 | -------------------------------------------------------------------------------- /disco/utils/imagecleanup.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from dataclasses import dataclass 3 | from typing import Sequence 4 | 5 | from disco.models import Deployment 6 | from disco.models.db import AsyncSession 7 | from disco.utils import docker 8 | from disco.utils.projects import get_all_projects 9 | 10 | log = logging.getLogger(__name__) 11 | 12 | 13 | async def remove_unused_images() -> None: 14 | log.info("Cleaning up Docker images") 15 | images = await docker.ls_images_swarm() 16 | active_projects = await get_active_projects() 17 | images_to_remove = [ 18 | (image, tag) 19 | for image, tag in images 20 | if should_remove_image(image=image, tag=tag, active_projects=active_projects) 21 | ] 22 | for image, tag in images_to_remove: 23 | if tag == "": 24 | image_str = image 25 | else: 26 | image_str = f"{image}:{tag}" 27 | log.info("Removing Docker image %s", image_str) 28 | await docker.rm_image_swarm(image_str) 29 | log.info("Done cleaning up Docker images") 30 | 31 | 32 | @dataclass 33 | class ActiveProject: 34 | project_name: str 35 | deployment_number: int 36 | 37 | 38 | def should_remove_image( 39 | image: str, tag: str, active_projects: list[ActiveProject] 40 | ) -> bool: 41 | if not image.startswith("disco/project-"): 42 | # we're currently not removing images that Disco may not have added 43 | return False 44 | for active_project in active_projects: 45 | if not image.startswith(f"disco/project-{active_project.project_name}-"): 46 | continue # does not match this project 47 | try: 48 | deployment_number = int(tag) 49 | except ValueError: 50 | continue # is not a deployment number 51 | if deployment_number == active_project.deployment_number: 52 | return False # project matches, deployment matches 53 | return True 54 | 55 | 56 | async def get_active_projects() -> list[ActiveProject]: 57 | active_projects = [] 58 | async with AsyncSession.begin() as dbsession: 59 | projects = await get_all_projects(dbsession) 60 | for project in projects: 61 | deployments: Sequence[ 62 | Deployment 63 | ] = await project.awaitable_attrs.deployments 64 | for deployment in deployments: 65 | if deployment.status in [ 66 | "QUEUEUD", 67 | "PREPARING", 68 | "REPLACING", 69 | "COMPLETE", 70 | ]: 71 | # add all deployments that are live or will be live 72 | active_project = ActiveProject( 73 | project_name=project.name, 74 | deployment_number=deployment.number, 75 | ) 76 | active_projects.append(active_project) 77 | if deployment.status == "COMPLETE": 78 | # but don't go back farther than current live deployment 79 | break 80 | return active_projects 81 | -------------------------------------------------------------------------------- /disco/utils/discofile.py: -------------------------------------------------------------------------------- 1 | from decimal import Decimal 2 | from enum import Enum 3 | 4 | from pydantic import BaseModel, Field 5 | 6 | 7 | class Volume(BaseModel): 8 | name: str 9 | destination_path: str = Field(..., alias="destinationPath") 10 | 11 | 12 | class PublishedPort(BaseModel): 13 | published_as: int = Field(..., alias="publishedAs") 14 | from_container_port: int = Field(..., alias="fromContainerPort") 15 | protocol: str = "tcp" 16 | 17 | 18 | class Image(BaseModel): 19 | dockerfile: str = "Dockerfile" 20 | context: str = "." 21 | 22 | 23 | class ServiceType(str, Enum): 24 | container = "container" 25 | static = "static" 26 | generator = "generator" 27 | command = "command" 28 | cron = "cron" 29 | cgi = "cgi" 30 | 31 | 32 | class Health(BaseModel): 33 | command: str 34 | 35 | 36 | class Service(BaseModel): 37 | type: ServiceType = ServiceType.container 38 | public_path: str | None = Field( 39 | "dist", 40 | alias="publicPath", 41 | ) 42 | image: str = "default" 43 | port: int = 8000 44 | command: str | None = None 45 | build: str | None = None 46 | published_ports: list[PublishedPort] = Field( 47 | [], 48 | alias="publishedPorts", 49 | ) 50 | volumes: list[Volume] = [] 51 | schedule: str = Field("* * * * *", pattern=r"^\*|\d+ \*|\d+ \*|\d+ \*|\d+ \*|\d+$") 52 | exposed_internally: bool = Field( 53 | False, 54 | alias="exposedInternally", 55 | ) 56 | timeout: int = 300 # commands, static site generation, crons 57 | health: Health | None = None 58 | extra_swarm_params: str | None = Field( 59 | None, 60 | alias="extraSwarmParams", 61 | ) 62 | 63 | 64 | class DiscoFile(BaseModel): 65 | version: Decimal 66 | services: dict[str, Service] = {} 67 | images: dict[str, Image] = {} 68 | 69 | 70 | DEFAULT_DISCO_FILE = """{ 71 | "version": "1.0", 72 | "services": { 73 | "web": {} 74 | } 75 | }""" 76 | 77 | 78 | def get_disco_file_from_str(disco_file_str: str | None) -> DiscoFile: 79 | if disco_file_str is None: 80 | disco_file_str = DEFAULT_DISCO_FILE 81 | disco_file = DiscoFile.model_validate_json(disco_file_str) 82 | if _should_add_default_image(disco_file): 83 | disco_file.images["default"] = Image( 84 | dockerfile="Dockerfile", 85 | context=".", 86 | ) 87 | return disco_file 88 | 89 | 90 | def _should_add_default_image(disco_file: DiscoFile) -> bool: 91 | if "default" in disco_file.images: 92 | # already defined 93 | return False 94 | for service in disco_file.services.values(): 95 | if service.image != "default": 96 | continue 97 | if service.type == ServiceType.static and service.command is None: 98 | continue 99 | if service.build is not None: 100 | # uses build command, does not rely on images 101 | continue 102 | # at this point, it uses default and will execute something 103 | return True 104 | # no service used the default image, no need to add it 105 | return False 106 | -------------------------------------------------------------------------------- /disco/models/deployment.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import uuid 4 | from datetime import datetime, timezone 5 | from typing import TYPE_CHECKING 6 | 7 | from sqlalchemy import ForeignKey, Integer, String, Unicode 8 | from sqlalchemy.orm import Mapped, mapped_column, relationship 9 | 10 | if TYPE_CHECKING: 11 | from disco.models import ( 12 | ApiKey, 13 | CommandRun, 14 | DeploymentEnvironmentVariable, 15 | Project, 16 | ) 17 | from disco.models.meta import Base, DateTimeTzAware 18 | 19 | 20 | class Deployment(Base): 21 | __tablename__ = "deployments" 22 | 23 | id: Mapped[str] = mapped_column( 24 | String(32), default=lambda: uuid.uuid4().hex, primary_key=True 25 | ) 26 | created: Mapped[datetime] = mapped_column( 27 | DateTimeTzAware(), 28 | default=lambda: datetime.now(timezone.utc), 29 | nullable=False, 30 | ) 31 | updated: Mapped[datetime] = mapped_column( 32 | DateTimeTzAware(), 33 | default=lambda: datetime.now(timezone.utc), 34 | onupdate=lambda: datetime.now(timezone.utc), 35 | nullable=False, 36 | ) 37 | number: Mapped[int] = mapped_column(Integer, nullable=False, index=True) 38 | status: Mapped[str] = mapped_column(String(32), nullable=False) 39 | commit_hash: Mapped[str | None] = mapped_column(String(200), nullable=True) 40 | disco_file: Mapped[str | None] = mapped_column(Unicode(5000), nullable=True) 41 | project_name: Mapped[str] = mapped_column(Unicode(255), nullable=False) 42 | github_repo_full_name: Mapped[str | None] = mapped_column( 43 | Unicode(2048), nullable=True 44 | ) 45 | branch: Mapped[str] = mapped_column(Unicode(255), nullable=True) 46 | registry_host: Mapped[str | None] = mapped_column(Unicode(2048), nullable=True) 47 | project_id: Mapped[str] = mapped_column( 48 | String(32), 49 | ForeignKey("projects.id"), 50 | nullable=False, 51 | index=True, 52 | ) 53 | prev_deployment_id: Mapped[str | None] = mapped_column( 54 | String(32), 55 | ForeignKey("deployments.id"), 56 | nullable=True, 57 | index=True, 58 | ) 59 | by_api_key_id: Mapped[str | None] = mapped_column( 60 | String(32), 61 | ForeignKey("api_keys.id"), 62 | nullable=True, 63 | index=True, 64 | ) 65 | task_id: Mapped[str | None] = mapped_column( 66 | String(32), 67 | nullable=True, 68 | ) 69 | 70 | project: Mapped[Project] = relationship( 71 | "Project", 72 | back_populates="deployments", 73 | ) 74 | by_api_key: Mapped[ApiKey | None] = relationship( 75 | "ApiKey", 76 | back_populates="deployments", 77 | ) 78 | prev_deployment: Mapped[Deployment | None] = relationship( 79 | "Deployment", 80 | ) 81 | command_runs: Mapped[list[CommandRun]] = relationship( 82 | "CommandRun", back_populates="deployment", order_by="CommandRun.number.desc()" 83 | ) 84 | env_variables: Mapped[list[DeploymentEnvironmentVariable]] = relationship( 85 | "DeploymentEnvironmentVariable", back_populates="deployment" 86 | ) 87 | 88 | def log(self): 89 | return f"DEPLOYMENT_{self.id} ({self.project_name} {self.number})" 90 | -------------------------------------------------------------------------------- /disco/endpoints/apikeyinvites.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Annotated 3 | 4 | from fastapi import APIRouter, Depends, HTTPException, Path 5 | from fastapi.responses import PlainTextResponse 6 | from pydantic import BaseModel, Field 7 | from sqlalchemy.orm.session import Session as DBSession 8 | 9 | import disco 10 | from disco.auth import get_api_key_sync 11 | from disco.endpoints.dependencies import get_db_sync 12 | from disco.models import ApiKey, ApiKeyInvite 13 | from disco.utils import keyvalues 14 | from disco.utils.apikeyinvites import ( 15 | create_api_key_invite, 16 | get_api_key_invite_by_id, 17 | invite_is_active, 18 | use_api_key_invite, 19 | ) 20 | 21 | log = logging.getLogger(__name__) 22 | 23 | router = APIRouter() 24 | 25 | 26 | class NewApiKeyRequestBody(BaseModel): 27 | name: str = Field(..., max_length=255) 28 | 29 | 30 | @router.post("/api/api-key-invites", status_code=201) 31 | def api_keys_post( 32 | dbsession: Annotated[DBSession, Depends(get_db_sync)], 33 | api_key: Annotated[ApiKey, Depends(get_api_key_sync)], 34 | req_body: NewApiKeyRequestBody, 35 | ): 36 | disco_host = keyvalues.get_value_sync(dbsession, "DISCO_HOST") 37 | invite = create_api_key_invite( 38 | dbsession=dbsession, 39 | name=req_body.name, 40 | by_api_key=api_key, 41 | ) 42 | return { 43 | "apiKeyInvite": { 44 | "url": f"https://{disco_host}/api-key-invites/{invite.id}", 45 | "expires": invite.expires.isoformat(), 46 | }, 47 | } 48 | 49 | 50 | def get_api_key_invite_from_url( 51 | invite_id: Annotated[str, Path()], 52 | dbsession: Annotated[DBSession, Depends(get_db_sync)], 53 | ): 54 | invite = get_api_key_invite_by_id(dbsession, invite_id) 55 | if invite is None: 56 | raise HTTPException(status_code=404) 57 | yield invite 58 | 59 | 60 | RESP_TXT = ( 61 | "To accept invite, install Disco CLI (https://letsdisco.dev) " 62 | "and run this command:\n\n " 63 | "disco invite:accept https://{disco_host}/api-key-invites/{invite_id}" 64 | ) 65 | 66 | 67 | @router.get("/api-key-invites/{invite_id}", response_class=PlainTextResponse) 68 | def api_key_invite_get( 69 | dbsession: Annotated[DBSession, Depends(get_db_sync)], 70 | invite: Annotated[ApiKey, Depends(get_api_key_invite_from_url)], 71 | ): 72 | disco_host = keyvalues.get_value_sync(dbsession, "DISCO_HOST") 73 | return RESP_TXT.format(disco_host=disco_host, invite_id=invite.id) 74 | 75 | 76 | @router.post("/api-key-invites/{invite_id}") 77 | def api_key_invite_post( 78 | dbsession: Annotated[DBSession, Depends(get_db_sync)], 79 | invite: Annotated[ApiKeyInvite, Depends(get_api_key_invite_from_url)], 80 | ): 81 | if not invite_is_active(invite): 82 | raise HTTPException(422, "Invite expired") 83 | api_key = use_api_key_invite(dbsession, invite) 84 | return { 85 | "apiKey": { 86 | "name": api_key.name, 87 | "privateKey": api_key.id, 88 | "publicKey": api_key.public_key, 89 | }, 90 | "meta": { 91 | "version": disco.__version__, 92 | "discoHost": keyvalues.get_value_sync(dbsession, "DISCO_HOST"), 93 | "registryHost": keyvalues.get_value_sync(dbsession, "REGISTRY_HOST"), 94 | }, 95 | } 96 | -------------------------------------------------------------------------------- /disco/utils/apikeys.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from datetime import datetime, timezone 3 | from secrets import token_hex 4 | 5 | from sqlalchemy import select 6 | from sqlalchemy.ext.asyncio import AsyncSession as AsyncDBSession 7 | from sqlalchemy.orm.session import Session as DBSession 8 | 9 | from disco.models import ApiKey, ApiKeyUsage 10 | from disco.utils import events 11 | 12 | log = logging.getLogger(__name__) 13 | 14 | 15 | def create_api_key(dbsession: DBSession, name: str) -> ApiKey: 16 | api_key = ApiKey( 17 | id=token_hex(16), 18 | name=name, 19 | public_key=token_hex(16), 20 | ) 21 | dbsession.add(api_key) 22 | log.info("Created API key %s", api_key.log()) 23 | events.api_key_created(public_key=api_key.public_key, name=name) 24 | return api_key 25 | 26 | 27 | def get_valid_api_key_by_id_sync( 28 | dbsession: DBSession, api_key_id: str 29 | ) -> ApiKey | None: 30 | api_key = get_api_key_by_id_sync(dbsession, api_key_id) 31 | if api_key is None: 32 | return None 33 | if api_key.deleted is not None: 34 | return None 35 | return api_key 36 | 37 | 38 | async def get_valid_api_key_by_id( 39 | dbsession: AsyncDBSession, api_key_id: str 40 | ) -> ApiKey | None: 41 | api_key = await get_api_key_by_id(dbsession, api_key_id) 42 | if api_key is None: 43 | return None 44 | if api_key.deleted is not None: 45 | return None 46 | return api_key 47 | 48 | 49 | def get_all_api_keys(dbsession: DBSession) -> list[ApiKey]: 50 | return ( 51 | dbsession.query(ApiKey) 52 | .filter(ApiKey.deleted.is_(None)) 53 | .order_by(ApiKey.created.asc()) 54 | .all() 55 | ) 56 | 57 | 58 | def get_api_key_by_id_sync(dbsession: DBSession, api_key_id: str) -> ApiKey | None: 59 | return dbsession.query(ApiKey).filter(ApiKey.id == api_key_id).first() 60 | 61 | 62 | async def get_api_key_by_id( 63 | dbsession: AsyncDBSession, api_key_id: str 64 | ) -> ApiKey | None: 65 | return await dbsession.get(ApiKey, api_key_id) 66 | 67 | 68 | def get_api_key_by_public_key_sync( 69 | dbsession: DBSession, public_key: str 70 | ) -> ApiKey | None: 71 | stmt = ( 72 | select(ApiKey) 73 | .where(ApiKey.public_key == public_key) 74 | .where(ApiKey.deleted.is_(None)) 75 | ) 76 | result = dbsession.execute(stmt) 77 | return result.scalars().first() 78 | 79 | 80 | async def get_api_key_by_public_key( 81 | dbsession: AsyncDBSession, public_key: str 82 | ) -> ApiKey | None: 83 | stmt = ( 84 | select(ApiKey) 85 | .where(ApiKey.public_key == public_key) 86 | .where(ApiKey.deleted.is_(None)) 87 | ) 88 | result = await dbsession.execute(stmt) 89 | return result.scalars().first() 90 | 91 | 92 | def delete_api_key(api_key: ApiKey, by_api_key: ApiKey) -> None: 93 | assert api_key.deleted is None 94 | log.info("Marking API key as deleted %s by %s", api_key.log(), by_api_key.log()) 95 | api_key.deleted = datetime.now(timezone.utc) 96 | events.api_key_removed(public_key=api_key.public_key, name=api_key.name) 97 | 98 | 99 | def record_api_key_usage_sync(dbsession: DBSession, api_key: ApiKey) -> None: 100 | dbsession.add(ApiKeyUsage(created=datetime.now(timezone.utc), api_key=api_key)) 101 | 102 | 103 | async def record_api_key_usage(dbsession: AsyncDBSession, api_key: ApiKey) -> None: 104 | dbsession.add(ApiKeyUsage(created=datetime.now(timezone.utc), api_key=api_key)) 105 | -------------------------------------------------------------------------------- /disco/endpoints/nodes.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | from datetime import datetime, timedelta, timezone 4 | from typing import Annotated 5 | 6 | from fastapi import APIRouter, Depends, HTTPException 7 | from sqlalchemy.ext.asyncio import AsyncSession as AsyncDBSession 8 | 9 | from disco.auth import get_api_key_wo_tx 10 | from disco.endpoints.dependencies import get_db 11 | from disco.utils import docker, keyvalues 12 | from disco.utils.randomname import generate_random_name 13 | 14 | log = logging.getLogger(__name__) 15 | 16 | router = APIRouter(dependencies=[Depends(get_api_key_wo_tx)]) 17 | 18 | 19 | @router.get("/api/disco/swarm/join-token") 20 | async def join_token_get(dbsession: Annotated[AsyncDBSession, Depends(get_db)]): 21 | return { 22 | "joinToken": await docker.get_swarm_join_token(), 23 | "ip": await keyvalues.get_value(dbsession, "DISCO_ADVERTISE_ADDR"), 24 | "dockerVersion": await docker.get_docker_version(), 25 | "registryHost": await keyvalues.get_value(dbsession, "REGISTRY_HOST"), 26 | } 27 | 28 | 29 | @router.get("/api/disco/swarm/nodes") 30 | async def get_node_list(): 31 | node_ids = await docker.get_node_list() 32 | nodes = await docker.get_node_details(node_ids) 33 | for node in nodes: 34 | if "disco-name" not in node.labels: 35 | node.labels["disco-name"] = await generate_random_name() 36 | await docker.set_node_label( 37 | node_id=node.id, key="disco-name", value=node.labels["disco-name"] 38 | ) 39 | return { 40 | "nodes": [ 41 | { 42 | "created": node.created, 43 | "name": node.labels["disco-name"], 44 | "state": node.state, 45 | "address": node.address, 46 | "isLeader": node.labels.get("disco-role") == "main", 47 | } 48 | for node in nodes 49 | ], 50 | } 51 | 52 | 53 | @router.delete("/api/disco/swarm/nodes/{node_name}") 54 | async def node_delete(node_name: str): 55 | log.info("Removing node %s", node_name) 56 | node_ids = await docker.get_node_list() 57 | nodes = await docker.get_node_details(node_ids) 58 | node_id = None 59 | for node in nodes: 60 | if node.labels.get("disco-name") == node_name: 61 | if node.labels.get("disco-role") == "main": 62 | raise HTTPException(422, "Can't remove main node") 63 | node_id = node.id 64 | if node_id is None: 65 | log.info("Didn't find node %s", node_name) 66 | raise HTTPException(status_code=404) 67 | log.info("Starting swarm leaver job for node %s", node_name) 68 | service_name = await docker.leave_swarm(node_id=node_id) 69 | log.info("Draining node %s", node_name) 70 | await docker.drain_node(node_id=node_id) 71 | log.info("Removing swarm leaver service for node %s", node_name) 72 | await docker.rm_service(service_name) 73 | timeout = datetime.now(timezone.utc) + timedelta(minutes=20) 74 | while datetime.now(timezone.utc) < timeout: 75 | try: 76 | log.info("Removing node %s", node_name) 77 | await docker.remove_node(node_id=node_id) 78 | log.info("Removed node %s", node_name) 79 | return {} 80 | except Exception: 81 | log.info("Failed to remove, node, waiting 5 seconds") 82 | await asyncio.sleep(5) 83 | log.info("Removing node --force %s", node_name) 84 | await docker.remove_node(node_id=node_id, force=True) 85 | log.info("Removed node --force %s", node_name) 86 | return {} 87 | -------------------------------------------------------------------------------- /disco/endpoints/logs.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | import logging 4 | import random 5 | 6 | from fastapi import APIRouter, BackgroundTasks, Depends, HTTPException 7 | from sse_starlette import ServerSentEvent 8 | from sse_starlette.sse import EventSourceResponse 9 | 10 | from disco.auth import get_api_key_wo_tx 11 | from disco.models.db import AsyncSession 12 | from disco.utils import docker 13 | from disco.utils.logs import LOGSPOUT_CMD, JsonLogServer, monitor_syslog 14 | from disco.utils.projects import get_project_by_name 15 | 16 | log = logging.getLogger(__name__) 17 | 18 | router = APIRouter(dependencies=[Depends(get_api_key_wo_tx)]) 19 | 20 | 21 | @router.get("/api/logs") 22 | async def logs_all(background_tasks: BackgroundTasks): 23 | return EventSourceResponse( 24 | read_logs( 25 | project_name=None, service_name=None, background_tasks=background_tasks 26 | ) 27 | ) 28 | 29 | 30 | @router.get("/api/logs/{project_name}") 31 | async def logs_project( 32 | project_name: str, 33 | background_tasks: BackgroundTasks, 34 | ): 35 | async with AsyncSession.begin() as dbsession: 36 | project = await get_project_by_name(dbsession, project_name) 37 | if project is None: 38 | raise HTTPException(status_code=404) 39 | return EventSourceResponse( 40 | read_logs( 41 | project_name=project_name, 42 | service_name=None, 43 | background_tasks=background_tasks, 44 | ) 45 | ) 46 | 47 | 48 | @router.get("/api/logs/{project_name}/{service_name}") 49 | async def logs_project_service( 50 | project_name: str, 51 | service_name: str, 52 | background_tasks: BackgroundTasks, 53 | ): 54 | async with AsyncSession.begin() as dbsession: 55 | project = await get_project_by_name(dbsession, project_name) 56 | if project is None: 57 | raise HTTPException(status_code=404) 58 | return EventSourceResponse( 59 | read_logs( 60 | project_name=project_name, 61 | service_name=service_name, 62 | background_tasks=background_tasks, 63 | ) 64 | ) 65 | 66 | 67 | async def read_logs( 68 | project_name: str | None, 69 | service_name: str | None, 70 | background_tasks: BackgroundTasks, 71 | ): 72 | port = random.randint(10000, 65535) 73 | logspout_cmd = LOGSPOUT_CMD.copy() 74 | assert logspout_cmd[4] == "{name}" 75 | syslog_service_name = f"disco-syslog-{port}" 76 | await monitor_syslog(syslog_service_name) 77 | logspout_cmd[4] = syslog_service_name 78 | logspout_cmd[-1] = logspout_cmd[-1].format(port=port) 79 | transport = None 80 | log_queue: asyncio.Queue[dict[str, str | dict[str, str]]] = asyncio.Queue() 81 | await asyncio.create_subprocess_exec(*logspout_cmd) 82 | loop = asyncio.get_running_loop() 83 | transport, _ = await loop.create_datagram_endpoint( 84 | lambda: JsonLogServer( 85 | log_queue=log_queue, project_name=project_name, service_name=service_name 86 | ), 87 | local_addr=("0.0.0.0", port), 88 | ) 89 | try: 90 | while True: 91 | log_obj = await log_queue.get() 92 | yield ServerSentEvent( 93 | event="output", 94 | data=json.dumps(log_obj), 95 | ) 96 | finally: 97 | log.info("HTTP Connection for logs disconnected") 98 | if transport is not None: 99 | try: 100 | transport.close() 101 | log.info("Closed datagram log endpoint") 102 | except Exception: 103 | log.exception("Exception closing transport") 104 | background_tasks.add_task(docker.rm_service, syslog_service_name) 105 | -------------------------------------------------------------------------------- /disco/utils/syslog.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from typing import Literal, TypedDict 4 | 5 | from sqlalchemy.ext.asyncio import AsyncSession as AsyncDBSession 6 | 7 | from disco.models import ApiKey 8 | from disco.utils import docker, keyvalues 9 | 10 | log = logging.getLogger(__name__) 11 | 12 | SYSLOG_URLS_KEY = "SYSLOG_URLS" 13 | 14 | 15 | class SyslogUrl(TypedDict): 16 | url: str 17 | type: Literal["CORE", "GLOBAL"] 18 | 19 | 20 | async def add_syslog_url( 21 | dbsession: AsyncDBSession, url: str, by_api_key: ApiKey 22 | ) -> list[SyslogUrl]: 23 | syslog_urls = await get_syslog_urls(dbsession) 24 | if url not in [syslog_url["url"] for syslog_url in syslog_urls]: 25 | log.info("Adding syslog URL %s by %s", url, by_api_key.log()) 26 | syslog_urls.append( 27 | { 28 | "url": url, 29 | "type": "GLOBAL", 30 | } 31 | ) 32 | await _save_syslog_urls(dbsession, syslog_urls) 33 | return syslog_urls 34 | 35 | 36 | async def remove_syslog_url( 37 | dbsession: AsyncDBSession, url: str, by_api_key: ApiKey 38 | ) -> list[SyslogUrl]: 39 | syslog_urls = await get_syslog_urls(dbsession) 40 | if url in [syslog_url["url"] for syslog_url in syslog_urls]: 41 | log.info("Removing syslog URL %s by %s", url, by_api_key.log()) 42 | syslog_urls.remove( 43 | { 44 | "url": url, 45 | "type": "GLOBAL", 46 | } 47 | ) 48 | await _save_syslog_urls(dbsession, syslog_urls) 49 | return syslog_urls 50 | 51 | 52 | async def get_syslog_urls(dbsession: AsyncDBSession) -> list[SyslogUrl]: 53 | urls_str = await keyvalues.get_value(dbsession, SYSLOG_URLS_KEY) 54 | if urls_str is None: 55 | urls_str = "[]" 56 | syslog_urls = json.loads(urls_str) 57 | return syslog_urls 58 | 59 | 60 | async def set_core_syslogs( 61 | dbsession: AsyncDBSession, urls: list[str] 62 | ) -> list[SyslogUrl]: 63 | log.info("Updating core Syslogs: %s", urls) 64 | syslog_urls = await get_syslog_urls(dbsession) 65 | other_syslog_urls = [ 66 | syslog_url for syslog_url in syslog_urls if syslog_url["type"] != "CORE" 67 | ] 68 | core_syslog_urls: list[SyslogUrl] = [{"url": url, "type": "CORE"} for url in urls] 69 | new_syslog_urls = core_syslog_urls + other_syslog_urls 70 | await _save_syslog_urls(dbsession, new_syslog_urls) 71 | return new_syslog_urls 72 | 73 | 74 | async def _save_syslog_urls( 75 | dbsession: AsyncDBSession, syslog_urls: list[SyslogUrl] 76 | ) -> None: 77 | await keyvalues.set_value(dbsession, SYSLOG_URLS_KEY, json.dumps(syslog_urls)) 78 | 79 | 80 | async def set_syslog_services(disco_host: str, syslog_urls: list[SyslogUrl]) -> None: 81 | existing_services = await docker.list_syslog_services() 82 | # add missing services 83 | for syslog_url in syslog_urls: 84 | already_exists = False 85 | for existing_service in existing_services: 86 | if syslog_url["url"] == existing_service.url: 87 | already_exists = True 88 | if not already_exists: 89 | await docker.start_syslog_service( 90 | disco_host=disco_host, 91 | url=syslog_url["url"], 92 | type=syslog_url["type"], 93 | ) 94 | # remove extra services 95 | for existing_service in existing_services: 96 | should_still_exist = False 97 | for syslog_url in syslog_urls: 98 | if syslog_url["url"] == existing_service.url: 99 | should_still_exist = True 100 | if not should_still_exist: 101 | log.info("Stopping Syslog service %s", existing_service.url) 102 | await docker.rm_service(existing_service.name) 103 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Disco Data 2 | data/ 3 | 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | share/python-wheels/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | MANIFEST 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .nox/ 46 | .coverage 47 | .coverage.* 48 | .cache 49 | nosetests.xml 50 | coverage.xml 51 | *.cover 52 | *.py,cover 53 | .hypothesis/ 54 | .pytest_cache/ 55 | cover/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | db.sqlite3 65 | db.sqlite3-journal 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | 77 | # PyBuilder 78 | .pybuilder/ 79 | target/ 80 | 81 | # Jupyter Notebook 82 | .ipynb_checkpoints 83 | 84 | # IPython 85 | profile_default/ 86 | ipython_config.py 87 | 88 | # pyenv 89 | # For a library or package, you might want to ignore these files since the code is 90 | # intended to run in multiple environments; otherwise, check them in: 91 | # .python-version 92 | 93 | # pipenv 94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 97 | # install all needed dependencies. 98 | #Pipfile.lock 99 | 100 | # poetry 101 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 102 | # This is especially recommended for binary packages to ensure reproducibility, and is more 103 | # commonly ignored for libraries. 104 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 105 | #poetry.lock 106 | 107 | # pdm 108 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 109 | #pdm.lock 110 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 111 | # in version control. 112 | # https://pdm.fming.dev/#use-with-ide 113 | .pdm.toml 114 | 115 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 116 | __pypackages__/ 117 | 118 | # Celery stuff 119 | celerybeat-schedule 120 | celerybeat.pid 121 | 122 | # SageMath parsed files 123 | *.sage.py 124 | 125 | # Environments 126 | .env 127 | .venv 128 | env/ 129 | venv/ 130 | ENV/ 131 | env.bak/ 132 | venv.bak/ 133 | 134 | # Spyder project settings 135 | .spyderproject 136 | .spyproject 137 | 138 | # Rope project settings 139 | .ropeproject 140 | 141 | # mkdocs documentation 142 | /site 143 | 144 | # mypy 145 | .mypy_cache/ 146 | .dmypy.json 147 | dmypy.json 148 | 149 | # Pyre type checker 150 | .pyre/ 151 | 152 | # pytype static type analyzer 153 | .pytype/ 154 | 155 | # Cython debug symbols 156 | cython_debug/ 157 | 158 | # PyCharm 159 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 160 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 161 | # and can be added to the global gitignore or merged into this file. For a more nuclear 162 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 163 | #.idea/ 164 | -------------------------------------------------------------------------------- /disco/endpoints/scale.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | from typing import Annotated 4 | 5 | from fastapi import APIRouter, Depends 6 | from fastapi.exceptions import RequestValidationError 7 | from pydantic import BaseModel, ValidationError 8 | from pydantic_core import InitErrorDetails, PydanticCustomError 9 | from sqlalchemy.ext.asyncio import AsyncSession as AsyncDBSession 10 | from sqlalchemy.orm.session import Session as DBSession 11 | 12 | from disco.auth import get_api_key_sync 13 | from disco.endpoints.dependencies import ( 14 | get_db, 15 | get_db_sync, 16 | get_project_from_url, 17 | get_project_from_url_sync, 18 | ) 19 | from disco.models import ApiKey, Project 20 | from disco.utils import docker 21 | from disco.utils.deployments import get_live_deployment, get_live_deployment_sync 22 | from disco.utils.discofile import ServiceType, get_disco_file_from_str 23 | 24 | log = logging.getLogger(__name__) 25 | 26 | router = APIRouter(dependencies=[Depends(get_api_key_sync)]) 27 | 28 | 29 | @router.get("/api/projects/{project_name}/scale") 30 | async def scale_get( 31 | dbsession: Annotated[AsyncDBSession, Depends(get_db)], 32 | project: Annotated[Project, Depends(get_project_from_url)], 33 | ): 34 | deployment = await get_live_deployment(dbsession, project) 35 | if deployment is None: 36 | services = [] 37 | else: 38 | services = await docker.list_services_for_deployment( 39 | project.name, deployment.number 40 | ) 41 | return { 42 | "services": [ 43 | { 44 | "name": service.name, 45 | "scale": service.replicas, 46 | } 47 | for service in services 48 | ] 49 | } 50 | 51 | 52 | class ScaleRequestBody(BaseModel): 53 | services: dict[str, int] 54 | 55 | 56 | @router.post("/api/projects/{project_name}/scale") 57 | def scale_post( 58 | dbsession: Annotated[DBSession, Depends(get_db_sync)], 59 | project: Annotated[Project, Depends(get_project_from_url_sync)], 60 | api_key: Annotated[ApiKey, Depends(get_api_key_sync)], 61 | req_body: ScaleRequestBody, 62 | ): 63 | deployment = get_live_deployment_sync(dbsession, project) 64 | if deployment is None: 65 | services = set() 66 | else: 67 | disco_file = get_disco_file_from_str(deployment.disco_file) 68 | services = set( 69 | [ 70 | service 71 | for service in disco_file.services 72 | if disco_file.services[service].type == ServiceType.container 73 | ] 74 | ) 75 | invalid_services = [] 76 | for service in req_body.services: 77 | if service not in services: 78 | invalid_services.append(service) 79 | if len(invalid_services) > 0: 80 | raise RequestValidationError( 81 | errors=( 82 | ValidationError.from_exception_data( 83 | "ValueError", 84 | [ 85 | InitErrorDetails( 86 | type=PydanticCustomError( 87 | "value_error", 88 | "Service name not in current deployment", 89 | ), 90 | loc=("body", "services"), 91 | input=service, 92 | ) 93 | for service in invalid_services 94 | ], 95 | ) 96 | ).errors() 97 | ) 98 | if len(req_body.services) > 0: 99 | assert deployment is not None 100 | log.info( 101 | "Scaling services for project %s %s by %s", 102 | project.log(), 103 | " ".join([f"{s}={n}" for s, n in req_body.services.items()]), 104 | api_key.log(), 105 | ) 106 | internal_name_scale = dict( 107 | ( 108 | docker.service_name( 109 | deployment.project_name, service, deployment.number 110 | ), 111 | scale, 112 | ) 113 | for service, scale in req_body.services.items() 114 | ) 115 | asyncio.run(docker.scale(internal_name_scale)) 116 | -------------------------------------------------------------------------------- /disco/endpoints/projectkeyvalues.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Annotated 3 | 4 | from fastapi import APIRouter, Depends, HTTPException, Path 5 | from fastapi.exceptions import RequestValidationError 6 | from pydantic import BaseModel, Field, ValidationError 7 | from pydantic_core import InitErrorDetails, PydanticCustomError 8 | from sqlalchemy.orm.session import Session as DBSession 9 | 10 | from disco.auth import get_api_key_sync 11 | from disco.endpoints.dependencies import get_db_sync, get_project_from_url_sync 12 | from disco.models import ApiKey, Project 13 | from disco.utils.encryption import decrypt 14 | from disco.utils.projectkeyvalues import ( 15 | delete_value, 16 | get_all_key_values_for_project, 17 | get_value, 18 | set_value, 19 | ) 20 | 21 | log = logging.getLogger(__name__) 22 | 23 | router = APIRouter(dependencies=[Depends(get_api_key_sync)]) 24 | 25 | 26 | @router.get("/api/projects/{project_name}/keyvalues") 27 | def key_values_get( 28 | dbsession: Annotated[DBSession, Depends(get_db_sync)], 29 | project: Annotated[Project, Depends(get_project_from_url_sync)], 30 | ): 31 | key_values = get_all_key_values_for_project(dbsession, project) 32 | return { 33 | "keyValues": dict( 34 | [(key_value.key, decrypt(key_value.value)) for key_value in key_values] 35 | ) 36 | } 37 | 38 | 39 | def get_value_from_key_in_url( 40 | dbsession: Annotated[DBSession, Depends(get_db_sync)], 41 | project: Annotated[Project, Depends(get_project_from_url_sync)], 42 | key: Annotated[str, Path(max_length=255)], 43 | ): 44 | value = get_value( 45 | dbsession=dbsession, 46 | project=project, 47 | key=key, 48 | ) 49 | if value is None: 50 | raise HTTPException(status_code=404) 51 | yield value 52 | 53 | 54 | @router.get("/api/projects/{project_name}/keyvalues/{key}") 55 | def key_value_get( 56 | value: Annotated[str, Depends(get_value_from_key_in_url)], 57 | ): 58 | return { 59 | "value": value, 60 | } 61 | 62 | 63 | class SetKeyValueRequestBody(BaseModel): 64 | value: str = Field(..., max_length=2097152) 65 | previous_value: str | None = Field(None, alias="previousValue", max_length=2097152) 66 | 67 | 68 | @router.put("/api/projects/{project_name}/keyvalues/{key}") 69 | def key_value_put( 70 | dbsession: Annotated[DBSession, Depends(get_db_sync)], 71 | key: Annotated[str, Path(max_length=255)], 72 | req_body: SetKeyValueRequestBody, 73 | project: Annotated[Project, Depends(get_project_from_url_sync)], 74 | api_key: Annotated[ApiKey, Depends(get_api_key_sync)], 75 | ): 76 | prev_value = get_value(dbsession=dbsession, project=project, key=key) 77 | if "previous_value" in req_body.model_fields_set: 78 | if req_body.previous_value != prev_value: 79 | raise RequestValidationError( 80 | errors=( 81 | ValidationError.from_exception_data( 82 | "ValueError", 83 | [ 84 | InitErrorDetails( 85 | type=PydanticCustomError( 86 | "value_error", "Previous value mismatch" 87 | ), 88 | loc=("body", "previousValue"), 89 | input=req_body.previous_value, 90 | ) 91 | ], 92 | ) 93 | ).errors() 94 | ) 95 | set_value( 96 | dbsession=dbsession, 97 | project=project, 98 | key=key, 99 | value=req_body.value, 100 | by_api_key=api_key, 101 | ) 102 | return {"value": req_body.value} 103 | 104 | 105 | @router.delete("/api/projects/{project_name}/keyvalues/{key}") 106 | def key_value_delete( 107 | dbsession: Annotated[DBSession, Depends(get_db_sync)], 108 | project: Annotated[Project, Depends(get_project_from_url_sync)], 109 | key: Annotated[str, Path(max_length=255)], 110 | api_key: Annotated[ApiKey, Depends(get_api_key_sync)], 111 | ): 112 | delete_value( 113 | dbsession=dbsession, 114 | project=project, 115 | key=key, 116 | by_api_key=api_key, 117 | ) 118 | return {"deleted": True} 119 | -------------------------------------------------------------------------------- /disco/utils/logs.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | import logging 4 | from dataclasses import dataclass 5 | from datetime import datetime, timedelta, timezone 6 | 7 | from disco.utils import docker 8 | from disco.utils.subprocess import check_call 9 | 10 | log = logging.getLogger(__name__) 11 | 12 | 13 | @dataclass 14 | class ActiveSyslog: 15 | expires: datetime 16 | service_name: str 17 | 18 | 19 | syslog_list_lock = asyncio.Lock() 20 | _active_syslogs: list[ActiveSyslog] = [] 21 | 22 | LOGSPOUT_CMD = [ 23 | "docker", 24 | "service", 25 | "create", 26 | "--name", 27 | "{name}", 28 | "--mode", 29 | "global", 30 | "--env", 31 | "BACKLOG=false", 32 | "--env", 33 | 'RAW_FORMAT={ "container" : "{{`{{ .Container.Name }}`}}", ' 34 | '"labels": {{`{{ toJSON .Container.Config.Labels }}`}}, ' 35 | '"timestamp": "{{`{{ .Time.Format "2006-01-02T15:04:05Z07:00" }}`}}", ' 36 | '"message": {{`{{ toJSON .Data }}`}} }', 37 | "--mount", 38 | "type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock", 39 | "--network", 40 | "disco-logging", 41 | "--env", 42 | "ALLOW_TTY=true", 43 | "--label", 44 | "disco.syslogs", 45 | "--log-driver", 46 | "json-file", 47 | "--log-opt", 48 | "max-size=20m", 49 | "--log-opt", 50 | "max-file=5", 51 | "gliderlabs/logspout:latest", 52 | "raw://disco:{port}", 53 | ] 54 | 55 | 56 | class JsonLogServer(asyncio.DatagramProtocol): 57 | def __init__( 58 | self, 59 | log_queue, 60 | project_name: str | None = None, 61 | service_name: str | None = None, 62 | ): 63 | self.log_queue = log_queue 64 | self.project_name = project_name 65 | self.service_name = service_name 66 | 67 | def connection_made(self, transport): 68 | self.transport = transport 69 | 70 | def datagram_received(self, data, addr): 71 | try: 72 | json_str = data.decode("utf-8") 73 | except UnicodeDecodeError: 74 | log.error("Failed to UTF-8 decode log str: %s", data) 75 | return 76 | try: 77 | log_obj = json.loads(json_str) 78 | except json.decoder.JSONDecodeError: 79 | log.error("Failed to JSON decode log str: %s", json_str) 80 | return 81 | if self.project_name is not None: 82 | if log_obj["labels"].get("disco.project.name") != self.project_name: 83 | return 84 | if self.service_name is not None: 85 | if log_obj["labels"].get("disco.service.name") != self.service_name: 86 | return 87 | self.log_queue.put_nowait(log_obj) 88 | 89 | def connection_lost(self, exception): 90 | try: 91 | self.transport.close() 92 | except Exception: 93 | pass 94 | 95 | 96 | async def monitor_syslog(service_name: str) -> None: 97 | global _active_syslogs 98 | log.info("Adding %s to the list of monitored syslogs", service_name) 99 | async with syslog_list_lock: 100 | _active_syslogs.append( 101 | ActiveSyslog( 102 | service_name=service_name, 103 | expires=datetime.now(timezone.utc) + timedelta(hours=24), 104 | ) 105 | ) 106 | 107 | 108 | async def get_active_syslogs() -> list[str]: 109 | global _active_syslogs 110 | async with syslog_list_lock: 111 | _active_syslogs = [ 112 | sl for sl in _active_syslogs if sl.expires > datetime.now(timezone.utc) 113 | ] 114 | return [sl.service_name for sl in _active_syslogs] 115 | 116 | 117 | async def get_running_syslogs() -> list[str]: 118 | args = [ 119 | "docker", 120 | "service", 121 | "ls", 122 | "--filter", 123 | "label=disco.syslogs", 124 | "--format", 125 | "{{ .Name }}", 126 | ] 127 | stdout, _, _ = await check_call(args) 128 | return stdout 129 | 130 | 131 | async def clean_up_rogue_syslogs() -> None: 132 | active_syslogs = set(await get_active_syslogs()) 133 | running_syslogs = await get_running_syslogs() 134 | for running_syslog in running_syslogs: 135 | if running_syslog not in active_syslogs: 136 | log.warning("Killing rogue syslog %s", running_syslog) 137 | await docker.rm_service(running_syslog) 138 | -------------------------------------------------------------------------------- /disco/utils/tunnels.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | from dataclasses import dataclass 4 | from datetime import datetime, timedelta, timezone 5 | 6 | from disco import config 7 | from disco.utils import docker 8 | from disco.utils.subprocess import check_call 9 | 10 | log = logging.getLogger(__name__) 11 | 12 | 13 | @dataclass 14 | class ActiveTunnel: 15 | expires: datetime 16 | service_name: str 17 | 18 | 19 | tunnel_list_lock = asyncio.Lock() 20 | _active_tunnels: list[ActiveTunnel] = [] 21 | 22 | TUNNEL_CMD = [ 23 | "docker", 24 | "service", 25 | "create", 26 | "--name", 27 | "{name}", 28 | "--env", 29 | "PASSWORD={password}", 30 | "--publish", 31 | "published={host_port},target=22,protocol=tcp", 32 | "--network", 33 | "disco-main", 34 | "--label", 35 | "disco.tunnels", 36 | "--log-driver", 37 | "json-file", 38 | "--log-opt", 39 | "max-size=20m", 40 | "--log-opt", 41 | "max-file=5", 42 | f"letsdiscodev/sshtunnel:{config.DISCO_TUNNEL_VERSION}", 43 | ] 44 | 45 | 46 | def get_service_name(port: int) -> str: 47 | return f"disco-tunnel-{port}" 48 | 49 | 50 | async def monitor_tunnel(service_name: str) -> None: 51 | global _active_tunnels 52 | log.info("Adding %s to the list of monitored tunnels", service_name) 53 | async with tunnel_list_lock: 54 | _active_tunnels.append( 55 | ActiveTunnel( 56 | service_name=service_name, 57 | expires=datetime.now(timezone.utc) + timedelta(minutes=5), 58 | ) 59 | ) 60 | 61 | 62 | async def extend_tunnel_expiration(service_name: str) -> None: 63 | global _active_tunnels 64 | log.info("Setting expiration of tunnel %s to 5 minutes from now", service_name) 65 | async with tunnel_list_lock: 66 | for tunnel in _active_tunnels: 67 | if tunnel.service_name == service_name: 68 | tunnel.expires = datetime.now(timezone.utc) + timedelta(minutes=5) 69 | return 70 | log.warning("Couldn't find active tunnel %s, not extending", service_name) 71 | 72 | 73 | async def close_tunnel(service_name: str) -> None: 74 | global _active_tunnels 75 | log.info("Closing tunnel %s", service_name) 76 | async with tunnel_list_lock: 77 | for tunnel in _active_tunnels: 78 | if tunnel.service_name == service_name: 79 | _active_tunnels.remove(tunnel) 80 | running_tunnels = await get_running_tunnels() 81 | if service_name in running_tunnels: 82 | await docker.rm_service(service_name) 83 | 84 | 85 | async def get_active_tunnels() -> list[str]: 86 | global _active_tunnels 87 | async with tunnel_list_lock: 88 | return [sl.service_name for sl in _active_tunnels] 89 | 90 | 91 | async def get_expired_tunnels() -> list[str]: 92 | global _active_tunnels 93 | async with tunnel_list_lock: 94 | return [ 95 | sl.service_name 96 | for sl in _active_tunnels 97 | if sl.expires < datetime.now(timezone.utc) 98 | ] 99 | 100 | 101 | async def get_running_tunnels() -> list[str]: 102 | args = [ 103 | "docker", 104 | "service", 105 | "ls", 106 | "--filter", 107 | "label=disco.tunnels", 108 | "--format", 109 | "{{ .Name }}", 110 | ] 111 | stdout, _, _ = await check_call(args) 112 | return stdout 113 | 114 | 115 | async def stop_expired_tunnels() -> None: 116 | """Close tunnels that just expired within the last minute. 117 | 118 | If for some reason, the CLI doesn't tell Disco to close the tunnel, 119 | we catch that it expired and we close it. 120 | 121 | """ 122 | expired_tunnels = await get_expired_tunnels() 123 | for expired_tunnel in expired_tunnels: 124 | log.warning("Killing expired tunnel %s", expired_tunnel) 125 | await close_tunnel(expired_tunnel) 126 | 127 | 128 | async def clean_up_rogue_tunnels() -> None: 129 | """Close tunnels that could still run but we don't know about. 130 | 131 | E.g. if the server was restarted while a tunnel was running. 132 | The tunnel would still run but Disco wouldn't know about it. 133 | 134 | """ 135 | active_tunnels = set(await get_active_tunnels()) 136 | running_tunnels = await get_running_tunnels() 137 | for running_tunnel in running_tunnels: 138 | if running_tunnel not in active_tunnels: 139 | log.warning("Killing rogue tunnel %s", running_tunnel) 140 | await close_tunnel(running_tunnel) 141 | -------------------------------------------------------------------------------- /disco/endpoints/projectdomains.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Annotated 3 | 4 | from fastapi import APIRouter, Depends, HTTPException, Path 5 | from fastapi.exceptions import RequestValidationError 6 | from pydantic import BaseModel, Field, ValidationError 7 | from pydantic_core import InitErrorDetails, PydanticCustomError 8 | from sqlalchemy.ext.asyncio import AsyncSession as AsyncDBSession 9 | 10 | from disco.auth import get_api_key 11 | from disco.endpoints.dependencies import get_db, get_project_from_url 12 | from disco.models import ApiKey, Project, ProjectDomain 13 | from disco.utils import keyvalues 14 | from disco.utils.projectdomains import ( 15 | DOMAIN_REGEX, 16 | add_domain, 17 | get_domain_by_id, 18 | get_domain_by_name, 19 | remove_domain, 20 | ) 21 | 22 | log = logging.getLogger(__name__) 23 | 24 | router = APIRouter(dependencies=[Depends(get_api_key)]) 25 | 26 | 27 | @router.get("/api/projects/{project_name}/domains") 28 | async def domains_get( 29 | project: Annotated[Project, Depends(get_project_from_url)], 30 | ): 31 | domains = await project.awaitable_attrs.domains 32 | return { 33 | "domains": [ 34 | { 35 | "id": domain.id, 36 | "name": domain.name, 37 | } 38 | for domain in domains 39 | ] 40 | } 41 | 42 | 43 | class AddDomainReqBody(BaseModel): 44 | domain: str = Field(..., pattern=DOMAIN_REGEX) 45 | 46 | 47 | @router.post("/api/projects/{project_name}/domains", status_code=201) 48 | async def domains_post( 49 | dbsession: Annotated[AsyncDBSession, Depends(get_db)], 50 | project: Annotated[Project, Depends(get_project_from_url)], 51 | api_key: Annotated[ApiKey, Depends(get_api_key)], 52 | req_body: AddDomainReqBody, 53 | ): 54 | domain = await get_domain_by_name(dbsession, req_body.domain) 55 | if domain is not None: 56 | raise RequestValidationError( 57 | errors=( 58 | ValidationError.from_exception_data( 59 | "ValueError", 60 | [ 61 | InitErrorDetails( 62 | type=PydanticCustomError( 63 | "value_error", "Domain already taken by a project" 64 | ), 65 | loc=("body", "domain"), 66 | input=req_body.domain, 67 | ) 68 | ], 69 | ) 70 | ).errors() 71 | ) 72 | disco_host = await keyvalues.get_value_str(dbsession, "DISCO_HOST") 73 | assert disco_host is not None 74 | if req_body.domain == disco_host: 75 | raise RequestValidationError( 76 | errors=( 77 | ValidationError.from_exception_data( 78 | "ValueError", 79 | [ 80 | InitErrorDetails( 81 | type=PydanticCustomError( 82 | "value_error", 83 | "Domain already taken by Disco", 84 | ), 85 | loc=("body", "domain"), 86 | input=req_body.domain, 87 | ) 88 | ], 89 | ) 90 | ).errors() 91 | ) 92 | await add_domain( 93 | dbsession=dbsession, 94 | project=project, 95 | domain_name=req_body.domain, 96 | by_api_key=api_key, 97 | ) 98 | return {} 99 | 100 | 101 | async def get_domain_from_url( 102 | dbsession: Annotated[AsyncDBSession, Depends(get_db)], 103 | project: Annotated[Project, Depends(get_project_from_url)], 104 | domain_id: Annotated[str, Path()], 105 | ): 106 | domain = await get_domain_by_id( 107 | dbsession=dbsession, 108 | domain_id=domain_id, 109 | ) 110 | if domain is None: 111 | raise HTTPException(status_code=404) 112 | if domain.project_id != project.id: 113 | raise HTTPException(status_code=404) 114 | yield domain 115 | 116 | 117 | @router.delete("/api/projects/{project_name}/domains/{domain_id}", status_code=204) 118 | async def domain_delete( 119 | dbsession: Annotated[AsyncDBSession, Depends(get_db)], 120 | domain: Annotated[ProjectDomain, Depends(get_domain_from_url)], 121 | api_key: Annotated[ApiKey, Depends(get_api_key)], 122 | ): 123 | await remove_domain( 124 | dbsession=dbsession, 125 | domain=domain, 126 | by_api_key=api_key, 127 | ) 128 | return {} 129 | -------------------------------------------------------------------------------- /disco/utils/commandruns.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import uuid 3 | from typing import Awaitable, Callable 4 | 5 | from sqlalchemy.orm.session import Session as DBSession 6 | 7 | from disco.models import ApiKey, CommandRun, Deployment, Project 8 | from disco.utils import commandoutputs, docker, keyvalues 9 | from disco.utils.discofile import DiscoFile, ServiceType, get_disco_file_from_str 10 | from disco.utils.encryption import decrypt 11 | from disco.utils.projects import volume_name_for_project 12 | 13 | log = logging.getLogger(__name__) 14 | 15 | 16 | def create_command_run( 17 | dbsession: DBSession, 18 | project: Project, 19 | deployment: Deployment, 20 | service: str, 21 | command: str, 22 | timeout: int, 23 | by_api_key: ApiKey, 24 | ) -> tuple[CommandRun, Callable[[], Awaitable[None]]]: 25 | disco_file: DiscoFile = get_disco_file_from_str(deployment.disco_file) 26 | assert deployment.status == "COMPLETE" 27 | assert service in disco_file.services 28 | number = get_next_run_number(dbsession, project) 29 | command_run = CommandRun( 30 | id=uuid.uuid4().hex, 31 | number=number, 32 | service=service, 33 | command=command, 34 | status="CREATED", 35 | project=project, 36 | deployment=deployment, 37 | by_api_key=by_api_key, 38 | ) 39 | dbsession.add(command_run) 40 | registry_host = keyvalues.get_value_sync(dbsession, "REGISTRY_HOST") 41 | image = docker.get_image_name_for_service( 42 | disco_file=disco_file, 43 | service_name=service, 44 | registry_host=registry_host, 45 | project_name=project.name, 46 | deployment_number=deployment.number, 47 | ) 48 | project_name = project.name 49 | run_number = command_run.number 50 | run_id = command_run.id 51 | if disco_file.services[service].type == ServiceType.command: 52 | command = f"{disco_file.services[service].command} {command}" 53 | env_variables = [ 54 | (env_var.name, decrypt(env_var.value)) for env_var in deployment.env_variables 55 | ] 56 | env_variables += [ 57 | ("DISCO_PROJECT_NAME", project_name), 58 | ("DISCO_SERVICE_NAME", service), 59 | ("DISCO_HOST", keyvalues.get_value_str_sync(dbsession, "DISCO_HOST")), 60 | ("DISCO_DEPLOYMENT_NUMBER", str(deployment.number)), 61 | ] 62 | if deployment.commit_hash is not None: 63 | env_variables += [ 64 | ("DISCO_COMMIT", deployment.commit_hash), 65 | ] 66 | 67 | network = docker.deployment_network_name(project.name, deployment.number) 68 | volumes = [ 69 | ("volume", volume_name_for_project(v.name, project.id), v.destination_path) 70 | for v in disco_file.services[service].volumes 71 | ] 72 | 73 | async def func() -> None: 74 | await commandoutputs.init(commandoutputs.run_source(run_id)) 75 | 76 | async def log_output(output: str) -> None: 77 | await commandoutputs.store_output(commandoutputs.run_source(run_id), output) 78 | 79 | async def log_output_terminate(): 80 | await commandoutputs.terminate(commandoutputs.run_source(run_id)) 81 | 82 | name = f"{project_name}-run.{run_number}" 83 | try: 84 | await docker.run( 85 | image=image, 86 | project_name=project_name, 87 | name=name, 88 | env_variables=env_variables, 89 | volumes=volumes, 90 | networks=[network, "disco-main"], 91 | command=command, 92 | timeout=timeout, 93 | stdout=log_output, 94 | stderr=log_output, 95 | ) 96 | except TimeoutError: 97 | await log_output(f"Timed out after {timeout} seconds\n") 98 | except docker.CommandRunProcessStatusError as ex: 99 | await log_output(f"Exited with code {ex.status}\n") 100 | except Exception: 101 | log.exception("Error when running command %s (%s)", command, name) 102 | await log_output("Internal Disco error\n") 103 | finally: 104 | await log_output_terminate() 105 | 106 | return command_run, func 107 | 108 | 109 | def get_command_run_by_number( 110 | dbsession: DBSession, project: Project, number: int 111 | ) -> CommandRun | None: 112 | return ( 113 | dbsession.query(CommandRun) 114 | .filter(CommandRun.project == project) 115 | .filter(CommandRun.number == number) 116 | .first() 117 | ) 118 | 119 | 120 | def get_next_run_number(dbsession: DBSession, project: Project) -> int: 121 | run = ( 122 | dbsession.query(CommandRun) 123 | .filter(CommandRun.project == project) 124 | .order_by(CommandRun.number.desc()) 125 | .first() 126 | ) 127 | if run is None: 128 | number = 0 129 | else: 130 | number = run.number 131 | return number + 1 132 | -------------------------------------------------------------------------------- /disco/endpoints/envvariables.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Annotated 3 | 4 | from fastapi import APIRouter, BackgroundTasks, Depends, HTTPException, Path 5 | from pydantic import BaseModel, Field 6 | from sqlalchemy.ext.asyncio import AsyncSession as AsyncDBSession 7 | from sqlalchemy.orm.session import Session as DBSession 8 | 9 | from disco.auth import get_api_key, get_api_key_sync 10 | from disco.endpoints.dependencies import ( 11 | get_db, 12 | get_db_sync, 13 | get_project_from_url, 14 | get_project_from_url_sync, 15 | ) 16 | from disco.models import ApiKey, Project, ProjectEnvironmentVariable 17 | from disco.utils.deploymentflow import enqueue_deployment 18 | from disco.utils.deployments import maybe_create_deployment 19 | from disco.utils.encryption import decrypt 20 | from disco.utils.envvariables import ( 21 | delete_env_variable, 22 | get_env_variable_by_name, 23 | get_env_variables_for_project_sync, 24 | set_env_variables, 25 | ) 26 | 27 | log = logging.getLogger(__name__) 28 | 29 | router = APIRouter(dependencies=[Depends(get_api_key_sync)]) 30 | 31 | 32 | @router.get("/api/projects/{project_name}/env") 33 | def env_variables_get( 34 | dbsession: Annotated[DBSession, Depends(get_db_sync)], 35 | project: Annotated[Project, Depends(get_project_from_url_sync)], 36 | ): 37 | env_variables = get_env_variables_for_project_sync(dbsession, project) 38 | return { 39 | "envVariables": [ 40 | { 41 | "name": env_variable.name, 42 | "value": decrypt(env_variable.value), 43 | } 44 | for env_variable in env_variables 45 | ] 46 | } 47 | 48 | 49 | class EnvVariable(BaseModel): 50 | name: str = Field(..., pattern=r"^[a-zA-Z_]+[a-zA-Z0-9_]*$", max_length=255) 51 | value: str = Field(..., max_length=4000) 52 | 53 | 54 | class ReqEnvVariables(BaseModel): 55 | env_variables: list[EnvVariable] = Field(..., alias="envVariables", min_length=1) 56 | 57 | 58 | @router.post("/api/projects/{project_name}/env") 59 | async def env_variables_post( 60 | dbsession: Annotated[AsyncDBSession, Depends(get_db)], 61 | project: Annotated[Project, Depends(get_project_from_url)], 62 | api_key: Annotated[ApiKey, Depends(get_api_key)], 63 | req_env_variables: ReqEnvVariables, 64 | background_tasks: BackgroundTasks, 65 | ): 66 | await set_env_variables( 67 | dbsession=dbsession, 68 | project=project, 69 | env_variables=[ 70 | (env_var.name, env_var.value) for env_var in req_env_variables.env_variables 71 | ], 72 | by_api_key=api_key, 73 | ) 74 | deployment = await maybe_create_deployment( 75 | dbsession=dbsession, 76 | project=project, 77 | commit_hash=None, 78 | disco_file=None, 79 | by_api_key=api_key, 80 | ) 81 | if deployment is not None: 82 | background_tasks.add_task(enqueue_deployment, deployment.id) 83 | return { 84 | "deployment": { 85 | "number": deployment.number, 86 | } 87 | if deployment is not None 88 | else None, 89 | } 90 | 91 | 92 | async def get_env_variable_from_url( 93 | dbsession: Annotated[AsyncDBSession, Depends(get_db)], 94 | project: Annotated[Project, Depends(get_project_from_url)], 95 | env_var_name: Annotated[str, Path()], 96 | ): 97 | env_variable = await get_env_variable_by_name( 98 | dbsession=dbsession, 99 | project=project, 100 | name=env_var_name, 101 | ) 102 | if env_variable is None: 103 | raise HTTPException(status_code=404) 104 | yield env_variable 105 | 106 | 107 | @router.get("/api/projects/{project_name}/env/{env_var_name}") 108 | async def env_variable_get( 109 | env_variable: Annotated[ 110 | ProjectEnvironmentVariable, Depends(get_env_variable_from_url) 111 | ], 112 | ): 113 | return { 114 | "envVariable": { 115 | "name": env_variable.name, 116 | "value": decrypt(env_variable.value), 117 | } 118 | } 119 | 120 | 121 | @router.delete("/api/projects/{project_name}/env/{env_var_name}") 122 | async def env_variable_delete( 123 | dbsession: Annotated[AsyncDBSession, Depends(get_db)], 124 | env_variable: Annotated[ 125 | ProjectEnvironmentVariable, Depends(get_env_variable_from_url) 126 | ], 127 | api_key: Annotated[ApiKey, Depends(get_api_key)], 128 | background_tasks: BackgroundTasks, 129 | ): 130 | await delete_env_variable( 131 | dbsession=dbsession, 132 | env_variable=env_variable, 133 | ) 134 | deployment = await maybe_create_deployment( 135 | dbsession=dbsession, 136 | project=env_variable.project, 137 | commit_hash=None, 138 | disco_file=None, 139 | by_api_key=api_key, 140 | ) 141 | 142 | if deployment is not None: 143 | background_tasks.add_task(enqueue_deployment, deployment.id) 144 | return { 145 | "deployment": { 146 | "number": deployment.number, 147 | } 148 | if deployment is not None 149 | else None, 150 | } 151 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | Disco Logo 3 |

Disco Daemon

4 |

5 | The server-side engine for the Disco open-source PaaS. 6 |

7 |

8 | License 9 | Discord 10 | Ask DeepWiki 11 |

12 |
13 | 14 | **Disco Daemon** is the core server-side component of the [Disco](https://disco.cloud) deployment platform. It runs on your server, acting as the brain and workhorse that manages your applications, automates deployments, and handles the underlying infrastructure. 15 | 16 | While this repository contains the daemon's source code, you typically won't interact with it directly. Instead, you'll use the [**Disco CLI**](https://github.com/letsdiscodev/cli) to install, manage, and communicate with the daemon. 17 | 18 | ## What is Disco? 19 | 20 | Disco is an open-source web deployment platform that lets you host web apps on your own server or Raspberry Pi with the simplicity of a managed PaaS. It helps you **Deploy Any Web App, Pay Less, and Own It All**. 21 | 22 | The Disco ecosystem consists of two main parts: 23 | * [**`disco-cli`**](https://github.com/letsdiscodev/cli): The command-line interface you use on your local machine to manage your servers and projects. 24 | * **`disco-daemon`** (This repo): The agent that runs on your server, executing commands sent by the CLI. 25 | 26 | ## How the Daemon Works 27 | 28 | The Disco Daemon is a self-contained system designed for reliability and ease of use. When you initialize a server with `disco init`, the CLI installs and configures this daemon for you. From then on, the daemon listens for API requests to carry out tasks. 29 | 30 | At its core, the daemon is built on a modern, robust tech stack: 31 | 32 | * **FastAPI**: Exposes a clean, secure REST API for the `disco-cli` to interact with. 33 | * **Docker Swarm**: Manages and orchestrates your applications as containerized services, providing resilience and scalability out of the box. 34 | * **Caddy**: Provides an integrated, fully-managed reverse proxy with automatic HTTPS, certificate renewal, and zero-config routing for your projects. 35 | * **SQLAlchemy & Alembic**: Manages the persistent state of your projects, domains, and deployments in a local SQLite database, with seamless schema migrations. 36 | 37 | ### The Deployment Flow 38 | 39 | When you deploy a project using `git push` or the CLI: 40 | 41 | 1. **Trigger**: The daemon receives a request via a GitHub webhook or a direct API call. 42 | 2. **Queue**: The deployment is added to a queue to be processed sequentially. 43 | 3. **Prepare**: The daemon checks out your code, reads the `disco.json` file, and builds a Docker image. 44 | 4. **Deploy**: A new service is started in Docker Swarm with zero downtime. Caddy automatically configures routing and TLS for any specified domains. 45 | 5. **Cleanup**: Once the new version is healthy, the old version is gracefully shut down. 46 | 47 | ## Key Features 48 | 49 | * **Zero-Downtime Deployments**: Seamlessly rolls out new versions of your applications. 50 | * **Automatic HTTPS**: Caddy integration provides free, auto-renewing SSL/TLS certificates. 51 | * **Git-Based & CLI-Driven Workflows**: Deploy via a simple `git push` or `disco deploy`. 52 | * **Built on Docker Swarm**: Leverages a production-grade container orchestrator for stability. 53 | * **Extensible with Hooks**: Run pre-deployment and post-deployment scripts. 54 | * **Self-Contained & Lightweight**: Runs efficiently on anything from a large cloud VM to a Raspberry Pi. 55 | 56 | ## Getting Started 57 | 58 | **You should not clone this repository to get started.** 59 | 60 | The intended way to use Disco is through the **[Disco CLI](https://github.com/letsdiscodev/cli)**. The CLI will automatically install and manage the daemon on your server for you. 61 | 62 | 1. **Install the Disco CLI on your local machine:** 63 | ```bash 64 | curl https://cli-assets.letsdisco.dev/install.sh | sh 65 | ``` 66 | 67 | 2. **Initialize your server:** 68 | Point a domain to your server's IP, then run the command below. It will connect via SSH, install Docker, and set up the Disco Daemon. 69 | ```bash 70 | disco init root@disco.example.com 71 | ``` 72 | 73 | From there, the CLI will guide you through connecting your GitHub account and deploying your first project. 74 | 75 | ## Development and Contribution 76 | 77 | Interested in contributing to the Disco Daemon? That's great! 78 | 79 | We welcome bug reports, feature requests, and pull requests. Please check out the [Issues](https://github.com/letsdiscodev/disco-daemon/issues) tab or join our [Discord](https://discord.gg/7J4vb5uUwU) to chat with the community. 80 | 81 | ## License 82 | 83 | This project is licensed under the [MIT License](LICENSE). 84 | -------------------------------------------------------------------------------- /disco/endpoints/tunnels.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import random 4 | from secrets import token_hex 5 | from typing import Annotated 6 | 7 | from fastapi import APIRouter, Depends, HTTPException, Path 8 | from fastapi.exceptions import RequestValidationError 9 | from pydantic import BaseModel, ValidationError 10 | from pydantic_core import InitErrorDetails, PydanticCustomError 11 | 12 | from disco.auth import get_api_key_wo_tx 13 | from disco.models.db import AsyncSession 14 | from disco.utils import docker 15 | from disco.utils.deployments import get_live_deployment 16 | from disco.utils.discofile import get_disco_file_from_str 17 | from disco.utils.projects import get_project_by_name 18 | from disco.utils.subprocess import decode_text 19 | from disco.utils.tunnels import ( 20 | TUNNEL_CMD, 21 | close_tunnel, 22 | extend_tunnel_expiration, 23 | get_service_name, 24 | monitor_tunnel, 25 | ) 26 | 27 | log = logging.getLogger(__name__) 28 | 29 | router = APIRouter(dependencies=[Depends(get_api_key_wo_tx)]) 30 | 31 | 32 | class CreateTunnelReqBody(BaseModel): 33 | project: str 34 | service: str 35 | 36 | 37 | @router.post("/api/tunnels", status_code=201) 38 | async def tunnels_post(req_body: CreateTunnelReqBody): 39 | async with AsyncSession.begin() as dbsession: 40 | project = await get_project_by_name(dbsession, req_body.project) 41 | if project is None: 42 | raise RequestValidationError( 43 | errors=( 44 | ValidationError.from_exception_data( 45 | "ValueError", 46 | [ 47 | InitErrorDetails( 48 | type=PydanticCustomError( 49 | "value_error", "Project name not found" 50 | ), 51 | loc=("body", "project"), 52 | input=req_body.project, 53 | ) 54 | ], 55 | ) 56 | ).errors() 57 | ) 58 | 59 | deployment = await get_live_deployment(dbsession, project) 60 | if deployment is None: 61 | raise HTTPException(422, "Project does not have an active deployment") 62 | disco_file = get_disco_file_from_str(deployment.disco_file) 63 | if req_body.service not in disco_file.services: 64 | raise RequestValidationError( 65 | errors=( 66 | ValidationError.from_exception_data( 67 | "ValueError", 68 | [ 69 | InitErrorDetails( 70 | type=PydanticCustomError( 71 | "value_error", 72 | f"Service not found in {list(disco_file.services)}", 73 | ), 74 | loc=("body", "service"), 75 | input=req_body.service, 76 | ) 77 | ], 78 | ) 79 | ).errors() 80 | ) 81 | service = disco_file.services[req_body.service] 82 | host = ( 83 | f"{req_body.project}-{req_body.service}" 84 | if service.exposed_internally 85 | else docker.service_name( 86 | req_body.project, 87 | req_body.service, 88 | deployment.number, 89 | ) 90 | ) 91 | tunnel_cmd = TUNNEL_CMD.copy() 92 | port = random.randint(10000, 65535) 93 | password = token_hex(16) 94 | tunnel_service_name = get_service_name(port) 95 | assert tunnel_cmd[4] == "{name}" 96 | assert tunnel_cmd[6] == "PASSWORD={password}" 97 | assert tunnel_cmd[8] == "published={host_port},target=22,protocol=tcp" 98 | tunnel_cmd[4] = tunnel_cmd[4].format(name=tunnel_service_name) 99 | tunnel_cmd[6] = tunnel_cmd[6].format(password=password) 100 | tunnel_cmd[8] = tunnel_cmd[8].format(host_port=port) 101 | await monitor_tunnel(tunnel_service_name) 102 | process = await asyncio.create_subprocess_exec( 103 | *tunnel_cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE 104 | ) 105 | 106 | async def read_stdout() -> None: 107 | assert process.stdout is not None 108 | async for line in process.stdout: 109 | log.info(decode_text(line)[:-1]) 110 | 111 | async def read_stderr() -> None: 112 | assert process.stderr is not None 113 | async for line in process.stderr: 114 | log.info(decode_text(line)[:-1]) 115 | 116 | tasks = [ 117 | asyncio.create_task(read_stdout()), 118 | asyncio.create_task(read_stderr()), 119 | ] 120 | timeout = 20 121 | try: 122 | async with asyncio.timeout(timeout): 123 | await asyncio.gather(*tasks) 124 | except TimeoutError: 125 | process.terminate() 126 | raise Exception(f"Running command failed, timeout after {timeout} seconds") 127 | 128 | await process.wait() 129 | if process.returncode != 0: 130 | raise Exception(f"Docker returned status {process.returncode}") 131 | return { 132 | "tunnel": { 133 | "host": host, 134 | "password": password, 135 | "port": port, 136 | } 137 | } 138 | 139 | 140 | @router.post("/api/tunnels/{port}") 141 | async def tunnel_post(port: Annotated[int, Path()]): 142 | await extend_tunnel_expiration(get_service_name(port)) 143 | return {} 144 | 145 | 146 | @router.delete("/api/tunnels/{port}") 147 | async def tunnel_delete(port: Annotated[int, Path()]): 148 | await close_tunnel(get_service_name(port)) 149 | return {} 150 | -------------------------------------------------------------------------------- /disco/utils/filesystem.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import os 4 | import shutil 5 | from pathlib import Path 6 | 7 | import aiofiles.os 8 | 9 | log = logging.getLogger(__name__) 10 | 11 | 12 | async def rmtree(path: str) -> None: 13 | def inner_rmtree() -> None: 14 | shutil.rmtree(path) 15 | 16 | await asyncio.get_event_loop().run_in_executor(None, inner_rmtree) 17 | 18 | 19 | async def path_unlink(path: str, missing_ok: bool = False) -> None: 20 | def inner_path_unlink() -> None: 21 | f = Path(path) 22 | f.unlink(missing_ok=missing_ok) 23 | 24 | await asyncio.get_event_loop().run_in_executor(None, inner_path_unlink) 25 | 26 | 27 | def projects_root() -> str: 28 | return "/disco/projects" 29 | 30 | 31 | def project_path(project_name: str) -> str: 32 | return f"/disco/projects/{project_name}" 33 | 34 | 35 | def project_path_on_host(host_home: str, project_name: str) -> str: 36 | return f"{host_home}{project_path(project_name)}" 37 | 38 | 39 | async def project_folder_exists(project_name: str): 40 | return await aiofiles.os.path.isdir(project_path(project_name)) 41 | 42 | 43 | async def read_disco_file( 44 | project_name: str, disco_json_path: str = "disco.json" 45 | ) -> str | None: 46 | path = f"{project_path(project_name)}/{disco_json_path}" 47 | log.info("Reading disco file %s", path) 48 | if not await aiofiles.os.path.isfile(path): 49 | log.info("Disco file does not exist, not reading %s", path) 50 | return None 51 | async with aiofiles.open(path, "r", encoding="utf-8") as f: 52 | return await f.read() 53 | 54 | 55 | def static_sites_root() -> str: 56 | return "/disco/srv" 57 | 58 | 59 | def static_site_deployments_path(project_name: str) -> str: 60 | return f"/disco/srv/{project_name}" 61 | 62 | 63 | def static_site_deployment_path(project_name: str, deployment_number: int) -> str: 64 | return f"{static_site_deployments_path(project_name)}/{deployment_number}" 65 | 66 | 67 | def static_site_deployment_path_host_machine( 68 | host_home: str, project_name: str, deployment_number: int 69 | ) -> str: 70 | path = static_site_deployment_path(project_name, deployment_number) 71 | return f"{host_home}{path}" 72 | 73 | 74 | def create_static_site_deployment_directory_sync( 75 | host_home: str, project_name: str, deployment_number: int 76 | ) -> str: 77 | path = static_site_deployment_path(project_name, deployment_number) 78 | os.makedirs(path) 79 | return static_site_deployment_path_host_machine( 80 | host_home, project_name, deployment_number 81 | ) 82 | 83 | 84 | async def create_static_site_deployment_directory( 85 | host_home: str, project_name: str, deployment_number: int 86 | ) -> str: 87 | path = static_site_deployment_path(project_name, deployment_number) 88 | await aiofiles.os.makedirs(path) 89 | return static_site_deployment_path_host_machine( 90 | host_home, project_name, deployment_number 91 | ) 92 | 93 | 94 | async def remove_project_static_deployments_if_any(project_name: str) -> None: 95 | path = static_site_deployments_path(project_name) 96 | if await aiofiles.os.path.isdir(path): 97 | await rmtree(path) 98 | 99 | 100 | def static_site_src_public_path(project_name: str, public_path: str) -> str: 101 | path = os.path.abspath(f"{project_path(project_name)}/{public_path}") 102 | if not path.startswith(f"{project_path(project_name)}/"): 103 | # prevent traversal attacks 104 | raise Exception("publicPath must be inside project folder") 105 | return path 106 | 107 | 108 | async def copy_static_site_src_to_deployment_folder( 109 | project_name: str, public_path: str, deployment_number: int 110 | ) -> None: 111 | src_path = static_site_src_public_path(project_name, public_path) 112 | dst_path = static_site_deployment_path(project_name, deployment_number) 113 | 114 | def copytree_sync(): 115 | shutil.copytree(src_path, dst_path) 116 | 117 | loop = asyncio.get_running_loop() 118 | await loop.run_in_executor(None, copytree_sync) 119 | 120 | 121 | def _certificate_directory(domain: str) -> str: 122 | return f"/disco/caddy/data/caddy/certificates/acme-v02.api.letsencrypt.org-directory/{domain}" 123 | 124 | 125 | def get_caddy_key_crt(domain: str) -> str: 126 | path = f"{_certificate_directory(domain)}/{domain}.crt" 127 | with open(path, "r", encoding="utf-8") as f: 128 | return f.read() 129 | 130 | 131 | def set_caddy_key_crt(domain: str, value: str) -> None: 132 | directory = _certificate_directory(domain) 133 | if not os.path.isdir(directory): 134 | os.makedirs(directory) 135 | path = f"{directory}/{domain}.crt" 136 | with open(path, "w", encoding="utf-8") as f: 137 | f.write(value) 138 | 139 | 140 | def get_caddy_key_key(domain: str) -> str: 141 | path = f"{_certificate_directory(domain)}/{domain}.key" 142 | with open(path, "r", encoding="utf-8") as f: 143 | return f.read() 144 | 145 | 146 | def set_caddy_key_key(domain: str, value: str) -> None: 147 | directory = _certificate_directory(domain) 148 | if not os.path.isdir(directory): 149 | os.makedirs(directory) 150 | path = f"{directory}/{domain}.key" 151 | with open(path, "w", encoding="utf-8") as f: 152 | f.write(value) 153 | 154 | 155 | def get_caddy_key_meta(domain: str) -> str: 156 | path = f"{_certificate_directory(domain)}/{domain}.json" 157 | with open(path, "r", encoding="utf-8") as f: 158 | return f.read() 159 | 160 | 161 | def set_caddy_key_meta(domain: str, value: str) -> None: 162 | directory = _certificate_directory(domain) 163 | if not os.path.isdir(directory): 164 | os.makedirs(directory) 165 | path = f"{directory}/{domain}.json" 166 | with open(path, "w", encoding="utf-8") as f: 167 | f.write(value) 168 | -------------------------------------------------------------------------------- /disco/utils/commandoutputs.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import os 4 | import uuid 5 | from dataclasses import dataclass 6 | from datetime import datetime, timedelta, timezone 7 | 8 | from sqlalchemy import String, UnicodeText, select 9 | from sqlalchemy.ext.asyncio import ( 10 | AsyncAttrs, 11 | AsyncEngine, 12 | async_sessionmaker, 13 | create_async_engine, 14 | ) 15 | from sqlalchemy.ext.asyncio import AsyncSession as AsyncDBSession 16 | from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column 17 | from sqlalchemy.schema import MetaData 18 | 19 | from disco.models.meta import NAMING_CONVENTION, DateTimeTzAware 20 | from disco.utils.filesystem import path_unlink 21 | 22 | log = logging.getLogger(__name__) 23 | 24 | base_metadata = MetaData(naming_convention=NAMING_CONVENTION) 25 | 26 | 27 | class Base(AsyncAttrs, DeclarativeBase): 28 | metadata = base_metadata 29 | 30 | 31 | class CommandOutput(Base): 32 | __tablename__ = "command_outputs" 33 | 34 | id: Mapped[str] = mapped_column( 35 | String(32), default=lambda: uuid.uuid4().hex, primary_key=True 36 | ) 37 | created: Mapped[datetime] = mapped_column( 38 | DateTimeTzAware(), 39 | default=lambda: datetime.now(timezone.utc), 40 | index=True, 41 | nullable=False, 42 | ) 43 | # None means no more content 44 | text: Mapped[str | None] = mapped_column(UnicodeText()) 45 | 46 | 47 | @dataclass 48 | class Output: 49 | id: str 50 | created: datetime 51 | text: str | None 52 | 53 | 54 | @dataclass 55 | class OutputDbConnection: 56 | last_used: datetime 57 | engine: AsyncEngine 58 | session: async_sessionmaker[AsyncDBSession] 59 | 60 | 61 | _dbs_lock = asyncio.Lock() # when adding/removing dbs 62 | _dbs: dict[str, OutputDbConnection] = {} 63 | 64 | 65 | def _db_url(source: str) -> str: 66 | return f"sqlite+aiosqlite:///{_db_file_path(source)}" 67 | 68 | 69 | def _db_file_path(source: str) -> str: 70 | return f"/disco/data/commandoutputs/{source}.sqlite3" 71 | 72 | 73 | async def _db_connection(source: str) -> OutputDbConnection: 74 | global _dbs 75 | if source not in _dbs: 76 | async with _dbs_lock: 77 | if source not in _dbs: # double check now that we have the lock 78 | engine = create_async_engine( 79 | _db_url(source), connect_args={"check_same_thread": False} 80 | ) 81 | session = async_sessionmaker( 82 | autocommit=False, autoflush=False, bind=engine 83 | ) 84 | _dbs[source] = OutputDbConnection( 85 | engine=engine, 86 | session=session, 87 | last_used=datetime.now(timezone.utc), 88 | ) 89 | _dbs[source].last_used = datetime.now(timezone.utc) 90 | return _dbs[source] 91 | 92 | 93 | async def init(source: str) -> None: 94 | directory = "/disco/data/commandoutputs" 95 | if not os.path.isdir(directory): 96 | os.makedirs(directory) 97 | engine = (await _db_connection(source)).engine 98 | async with engine.begin() as conn: 99 | await conn.run_sync(base_metadata.create_all) 100 | 101 | 102 | async def _dispose(source: str) -> None: 103 | async with _dbs_lock: 104 | if source in _dbs: 105 | log.info("Disposing of DB connection for command output %s", source) 106 | await _dbs[source].engine.dispose() 107 | del _dbs[source] 108 | 109 | 110 | async def store_output(source: str, text: str) -> None: 111 | AsyncSession = (await _db_connection(source)).session 112 | async with AsyncSession.begin() as dbsession: 113 | _log(dbsession=dbsession, source=source, text=text) 114 | 115 | 116 | async def terminate(source: str) -> None: 117 | AsyncSession = (await _db_connection(source)).session 118 | async with AsyncSession.begin() as dbsession: 119 | _log(dbsession=dbsession, source=source, text=None) 120 | 121 | 122 | def _log(dbsession: AsyncDBSession, source: str, text: str | None) -> None: 123 | cmd_output = CommandOutput( 124 | text=text, 125 | ) 126 | dbsession.add(cmd_output) 127 | 128 | 129 | async def get_next(source: str, after: datetime | None = None) -> Output | None: 130 | AsyncSession = (await _db_connection(source)).session 131 | async with AsyncSession.begin() as dbsession: 132 | stmt = select(CommandOutput) 133 | if after is not None: 134 | stmt = stmt.where(CommandOutput.created > after) 135 | stmt = stmt.order_by(CommandOutput.created).limit(1) 136 | result = await dbsession.execute(stmt) 137 | cmd_output = result.scalars().first() 138 | if cmd_output is None: 139 | return None 140 | return Output( 141 | id=cmd_output.id, created=cmd_output.created, text=cmd_output.text 142 | ) 143 | 144 | 145 | async def delete_output_for_source(source: str) -> None: 146 | await path_unlink(path=_db_file_path(source), missing_ok=True) 147 | 148 | 149 | async def get_by_id(source: str, output_id: str) -> Output | None: 150 | AsyncSession = (await _db_connection(source)).session 151 | async with AsyncSession.begin() as dbsession: 152 | cmd_output = await dbsession.get(CommandOutput, output_id) 153 | if cmd_output is None: 154 | return None 155 | return Output( 156 | id=cmd_output.id, 157 | created=cmd_output.created, 158 | text=cmd_output.text, 159 | ) 160 | 161 | 162 | async def clean_up_db_connections() -> None: 163 | global _dbs 164 | six_hours_ago = datetime.now(timezone.utc) - timedelta(hours=6) 165 | old_db_sources = set() 166 | for source, db in _dbs.items(): 167 | if db.last_used < six_hours_ago: 168 | old_db_sources.add(source) 169 | for source in old_db_sources: 170 | await _dispose(source) 171 | 172 | 173 | def deployment_source(deployment_id: str) -> str: 174 | return f"deployment_{deployment_id}" 175 | 176 | 177 | def run_source(run_id: str) -> str: 178 | return f"run_{run_id}" 179 | -------------------------------------------------------------------------------- /disco/auth.py: -------------------------------------------------------------------------------- 1 | from typing import Annotated 2 | 3 | import jwt 4 | from fastapi import Depends, HTTPException 5 | from fastapi.security import ( 6 | HTTPAuthorizationCredentials, 7 | HTTPBasic, 8 | HTTPBasicCredentials, 9 | HTTPBearer, 10 | ) 11 | from sqlalchemy.ext.asyncio import AsyncSession as AsyncDBSession 12 | from sqlalchemy.orm.session import Session as DBSession 13 | 14 | from disco.endpoints.dependencies import get_db, get_db_sync 15 | from disco.models.db import AsyncSession 16 | from disco.utils import keyvalues 17 | from disco.utils.apikeys import ( 18 | get_api_key_by_public_key, 19 | get_api_key_by_public_key_sync, 20 | get_valid_api_key_by_id, 21 | get_valid_api_key_by_id_sync, 22 | record_api_key_usage, 23 | record_api_key_usage_sync, 24 | ) 25 | 26 | basic_header = HTTPBasic(auto_error=False) 27 | bearer_header = HTTPBearer(auto_error=False) 28 | 29 | 30 | def get_api_key_sync( 31 | basic_credentials: Annotated[HTTPBasicCredentials | None, Depends(basic_header)], 32 | bearer_credentials: Annotated[ 33 | HTTPAuthorizationCredentials | None, Depends(bearer_header) 34 | ], 35 | dbsession: Annotated[DBSession, Depends(get_db_sync)], 36 | ): 37 | api_key_str = None 38 | if basic_credentials is not None: 39 | api_key_str = basic_credentials.username 40 | elif bearer_credentials is not None: 41 | bearer_jwt = bearer_credentials.credentials 42 | try: 43 | headers = jwt.get_unverified_header(bearer_jwt) 44 | except jwt.PyJWTError: 45 | headers = None 46 | if headers is not None: 47 | public_key = headers["kid"] 48 | api_key_for_public_key = get_api_key_by_public_key_sync( 49 | dbsession, public_key 50 | ) 51 | if api_key_for_public_key is not None: 52 | disco_host = keyvalues.get_value_str_sync(dbsession, "DISCO_HOST") 53 | try: 54 | jwt.decode( 55 | bearer_jwt, 56 | api_key_for_public_key.id, 57 | algorithms=["HS256"], 58 | audience=disco_host, 59 | options=dict( 60 | verify_signature=True, 61 | verify_exp=True, 62 | ), 63 | ) 64 | api_key_str = api_key_for_public_key.id 65 | except jwt.PyJWTError: 66 | pass 67 | if api_key_str is None: 68 | raise HTTPException(status_code=401) 69 | api_key = get_valid_api_key_by_id_sync(dbsession, api_key_str) 70 | if api_key is None: 71 | raise HTTPException(status_code=403) 72 | record_api_key_usage_sync(dbsession, api_key) 73 | yield api_key 74 | 75 | 76 | async def get_api_key( 77 | basic_credentials: Annotated[HTTPBasicCredentials | None, Depends(basic_header)], 78 | bearer_credentials: Annotated[ 79 | HTTPAuthorizationCredentials | None, Depends(bearer_header) 80 | ], 81 | dbsession: Annotated[AsyncDBSession, Depends(get_db)], 82 | ): 83 | api_key_str = None 84 | if basic_credentials is not None: 85 | api_key_str = basic_credentials.username 86 | elif bearer_credentials is not None: 87 | bearer_jwt = bearer_credentials.credentials 88 | try: 89 | headers = jwt.get_unverified_header(bearer_jwt) 90 | except jwt.PyJWTError: 91 | headers = None 92 | if headers is not None: 93 | public_key = headers["kid"] 94 | api_key_for_public_key = await get_api_key_by_public_key( 95 | dbsession, public_key 96 | ) 97 | if api_key_for_public_key is not None: 98 | disco_host = await keyvalues.get_value_str(dbsession, "DISCO_HOST") 99 | try: 100 | jwt.decode( 101 | bearer_jwt, 102 | api_key_for_public_key.id, 103 | algorithms=["HS256"], 104 | audience=disco_host, 105 | options=dict( 106 | verify_signature=True, 107 | verify_exp=True, 108 | ), 109 | ) 110 | api_key_str = api_key_for_public_key.id 111 | except jwt.PyJWTError: 112 | pass 113 | if api_key_str is None: 114 | raise HTTPException(status_code=401) 115 | api_key = await get_valid_api_key_by_id(dbsession, api_key_str) 116 | if api_key is None: 117 | raise HTTPException(status_code=403) 118 | await record_api_key_usage(dbsession, api_key) 119 | yield api_key 120 | 121 | 122 | async def get_api_key_wo_tx( 123 | basic_credentials: Annotated[HTTPBasicCredentials | None, Depends(basic_header)], 124 | bearer_credentials: Annotated[ 125 | HTTPAuthorizationCredentials | None, Depends(bearer_header) 126 | ], 127 | ): 128 | api_key_id = None 129 | async with AsyncSession.begin() as dbsession: 130 | api_key_str = None 131 | if basic_credentials is not None: 132 | api_key_str = basic_credentials.username 133 | elif bearer_credentials is not None: 134 | bearer_jwt = bearer_credentials.credentials 135 | try: 136 | headers = jwt.get_unverified_header(bearer_jwt) 137 | except jwt.PyJWTError: 138 | headers = None 139 | if headers is not None: 140 | public_key = headers["kid"] 141 | api_key_for_public_key = await get_api_key_by_public_key( 142 | dbsession, public_key 143 | ) 144 | if api_key_for_public_key is not None: 145 | disco_host = await keyvalues.get_value_str(dbsession, "DISCO_HOST") 146 | try: 147 | jwt.decode( 148 | bearer_jwt, 149 | api_key_for_public_key.id, 150 | algorithms=["HS256"], 151 | audience=disco_host, 152 | options=dict( 153 | verify_signature=True, 154 | verify_exp=True, 155 | ), 156 | ) 157 | api_key_str = api_key_for_public_key.id 158 | except jwt.PyJWTError: 159 | pass 160 | if api_key_str is None: 161 | raise HTTPException(status_code=401) 162 | api_key = await get_valid_api_key_by_id(dbsession, api_key_str) 163 | if api_key is None: 164 | raise HTTPException(status_code=403) 165 | api_key_id = api_key.id 166 | await record_api_key_usage(dbsession, api_key) 167 | 168 | yield api_key_id 169 | -------------------------------------------------------------------------------- /disco/utils/projectdomains.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import uuid 3 | 4 | from sqlalchemy import select 5 | from sqlalchemy.ext.asyncio import AsyncSession as AsyncDBSession 6 | from sqlalchemy.orm.session import Session as DBSession 7 | 8 | from disco.models import ApiKey, Project, ProjectDomain 9 | from disco.utils import caddy, docker, events 10 | from disco.utils.deployments import get_live_deployment 11 | from disco.utils.discofile import ServiceType, get_disco_file_from_str 12 | 13 | log = logging.getLogger(__name__) 14 | 15 | DOMAIN_REGEX = ( 16 | r"^(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$" 17 | ) 18 | 19 | 20 | async def add_domain( 21 | dbsession: AsyncDBSession, 22 | project: Project, 23 | domain_name: str, 24 | by_api_key: ApiKey, 25 | ) -> ProjectDomain: 26 | domain = ProjectDomain( 27 | id=uuid.uuid4().hex, 28 | name=domain_name, 29 | project=project, 30 | ) 31 | dbsession.add(domain) 32 | log.info( 33 | "%s added domain to project: %s %s", 34 | by_api_key.log(), 35 | project.log(), 36 | domain.log(), 37 | ) 38 | www_apex_domain_name = _get_apex_www_redirect_for_domain(domain_name) 39 | www_apex_domain = ( 40 | (await get_domain_by_name(dbsession, www_apex_domain_name)) 41 | if www_apex_domain_name is not None 42 | else None 43 | ) 44 | if www_apex_domain is not None: 45 | # we're adding example.com, but 46 | # www.example.com already had a redirect from example.com 47 | log.info( 48 | "Removing domain redirect from %s to %s", 49 | www_apex_domain_name, 50 | www_apex_domain.name, 51 | ) 52 | await caddy.remove_apex_www_redirects(www_apex_domain.id) 53 | domains: list[ProjectDomain] = await project.awaitable_attrs.domains 54 | await caddy.set_domains_for_project( 55 | project_name=project.name, domains=[d.name for d in domains] 56 | ) 57 | if www_apex_domain_name is not None and www_apex_domain is None: 58 | # we're adding www.example.com and example.com is free 59 | log.info( 60 | "Adding domain redirect from %s to %s", www_apex_domain_name, domain.name 61 | ) 62 | await caddy.add_apex_www_redirects( 63 | domain_id=domain.id, 64 | from_domain=www_apex_domain_name, 65 | to_domain=domain.name, 66 | ) 67 | project_domains = await project.awaitable_attrs.domains 68 | if len(project_domains) == 1: 69 | # just added first domain, need to set what it's serving 70 | assert project_domains[0] == domain 71 | await serve_live_deployment(dbsession, project) 72 | events.domain_created(project_name=project.name, domain=domain.name) 73 | return domain 74 | 75 | 76 | async def remove_domain( 77 | dbsession: AsyncDBSession, domain: ProjectDomain, by_api_key: ApiKey 78 | ) -> None: 79 | project = await domain.awaitable_attrs.project 80 | domain_id = domain.id 81 | domain_name = domain.name 82 | log.info( 83 | "%s is removing domain from project: %s %s", 84 | by_api_key.log(), 85 | project.log(), 86 | domain.log(), 87 | ) 88 | domains: list[ProjectDomain] = await project.awaitable_attrs.domains 89 | domains.remove(domain) 90 | await dbsession.delete(domain) 91 | await caddy.set_domains_for_project( 92 | project_name=project.name, domains=[d.name for d in domains] 93 | ) 94 | www_apex_domain_name = _get_apex_www_redirect_for_domain(domain_name) 95 | www_apex_domain = ( 96 | (await get_domain_by_name(dbsession, www_apex_domain_name)) 97 | if www_apex_domain_name is not None 98 | else None 99 | ) 100 | if www_apex_domain_name is not None: 101 | if www_apex_domain is None: 102 | # removing www.example.com and example.com doesn't exist, 103 | # meaning we had a redirect we should remove 104 | log.info( 105 | "Removing domain redirect from %s to %s", 106 | www_apex_domain_name, 107 | domain_name, 108 | ) 109 | await caddy.remove_apex_www_redirects(domain_id) 110 | else: 111 | # removing www.example.com and example.com exists, 112 | # meaning we're freeing www.example.com so we should create a redirect 113 | await caddy.add_apex_www_redirects( 114 | domain_id=www_apex_domain.id, 115 | from_domain=domain_name, 116 | to_domain=www_apex_domain.name, 117 | ) 118 | events.domain_removed(project_name=project.name, domain=domain_name) 119 | 120 | 121 | def _get_apex_www_redirect_for_domain(domain_name: str) -> str | None: 122 | parts = domain_name.split(".") 123 | if len(parts) == 2: 124 | # example.com, return www.example.com 125 | return ".".join(["www"] + parts) 126 | if len(parts) == 3 and parts[0] == "www": 127 | # www.example.com, return example.com 128 | return ".".join(parts[1:]) 129 | # site.example.com, or a.site.example.com, return None 130 | return None 131 | 132 | 133 | async def get_domain_by_id( 134 | dbsession: AsyncDBSession, domain_id: str 135 | ) -> ProjectDomain | None: 136 | return await dbsession.get(ProjectDomain, domain_id) 137 | 138 | 139 | async def get_domain_by_name( 140 | dbsession: AsyncDBSession, domain_name: str 141 | ) -> ProjectDomain | None: 142 | stmt = select(ProjectDomain).where(ProjectDomain.name == domain_name).limit(1) 143 | result = await dbsession.execute(stmt) 144 | return result.scalars().first() 145 | 146 | 147 | def get_domain_by_name_sync( 148 | dbsession: DBSession, domain_name: str 149 | ) -> ProjectDomain | None: 150 | stmt = select(ProjectDomain).where(ProjectDomain.name == domain_name).limit(1) 151 | result = dbsession.execute(stmt) 152 | return result.scalars().first() 153 | 154 | 155 | async def serve_live_deployment(dbsession: AsyncDBSession, project: Project) -> None: 156 | deployment = await get_live_deployment(dbsession, project) 157 | if deployment is None: 158 | return # nothing to serve 159 | if deployment.disco_file is None: 160 | return # nothing to serve 161 | disco_file = get_disco_file_from_str(deployment.disco_file) 162 | if "web" not in disco_file.services: 163 | return # nothing to serve 164 | if disco_file.services["web"].type == ServiceType.container: 165 | internal_service_name = docker.service_name( 166 | project.name, "web", deployment.number 167 | ) 168 | await caddy.serve_service( 169 | project.name, 170 | internal_service_name, 171 | port=disco_file.services["web"].port or 8000, 172 | ) 173 | elif disco_file.services["web"].type == ServiceType.static: 174 | await caddy.serve_static_site(project.name, deployment.number) 175 | -------------------------------------------------------------------------------- /disco/endpoints/run.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | import logging 4 | from datetime import datetime 5 | from typing import Annotated 6 | 7 | from fastapi import APIRouter, BackgroundTasks, Depends, Header, HTTPException 8 | from fastapi.exceptions import RequestValidationError 9 | from pydantic import BaseModel, Field, ValidationError 10 | from pydantic_core import InitErrorDetails, PydanticCustomError 11 | from sqlalchemy.orm.session import Session as DBSession 12 | from sse_starlette import ServerSentEvent 13 | from sse_starlette.sse import EventSourceResponse 14 | 15 | from disco.auth import get_api_key_sync, get_api_key_wo_tx 16 | from disco.endpoints.dependencies import get_db_sync, get_project_from_url_sync 17 | from disco.models import ApiKey, Project 18 | from disco.models.db import Session 19 | from disco.utils import commandoutputs 20 | from disco.utils.commandruns import create_command_run, get_command_run_by_number 21 | from disco.utils.deployments import get_live_deployment_sync 22 | from disco.utils.discofile import DiscoFile, ServiceType, get_disco_file_from_str 23 | from disco.utils.projects import get_project_by_name_sync 24 | 25 | log = logging.getLogger(__name__) 26 | 27 | router = APIRouter() 28 | 29 | 30 | class RunReqBody(BaseModel): 31 | command: str = Field(..., max_length=4000) 32 | service: str | None 33 | timeout: int 34 | 35 | 36 | @router.post( 37 | "/api/projects/{project_name}/runs", 38 | status_code=202, 39 | dependencies=[Depends(get_api_key_sync)], 40 | ) 41 | def run_post( 42 | dbsession: Annotated[DBSession, Depends(get_db_sync)], 43 | project: Annotated[Project, Depends(get_project_from_url_sync)], 44 | api_key: Annotated[ApiKey, Depends(get_api_key_sync)], 45 | req_body: RunReqBody, 46 | background_tasks: BackgroundTasks, 47 | ): 48 | deployment = get_live_deployment_sync(dbsession, project) 49 | if deployment is None: 50 | raise HTTPException(422, "Must deploy first") 51 | disco_file: DiscoFile = get_disco_file_from_str(deployment.disco_file) 52 | if req_body.service is None: 53 | if len(list(disco_file.services.keys())) == 0: 54 | raise HTTPException(422) 55 | if ( 56 | "web" in disco_file.services 57 | and disco_file.services["web"].type != ServiceType.static 58 | ): 59 | service = "web" 60 | else: 61 | services = list( 62 | [ 63 | name 64 | for name, service in disco_file.services.items() 65 | if service.type != ServiceType.static 66 | ] 67 | ) 68 | if len(services) == 0: 69 | raise HTTPException(422, "No service can run commands in project") 70 | service = services[0] 71 | else: 72 | if req_body.service not in disco_file.services: 73 | raise RequestValidationError( 74 | errors=( 75 | ValidationError.from_exception_data( 76 | "ValueError", 77 | [ 78 | InitErrorDetails( 79 | type=PydanticCustomError( 80 | "value_error", 81 | f'Service "{req_body.service}" not in Discofile: {list(disco_file.services.keys())}', 82 | ), 83 | loc=("body", "service"), 84 | input=req_body.service, 85 | ) 86 | ], 87 | ) 88 | ).errors() 89 | ) 90 | if disco_file.services[req_body.service].type == ServiceType.static: 91 | raise RequestValidationError( 92 | errors=( 93 | ValidationError.from_exception_data( 94 | "ValueError", 95 | [ 96 | InitErrorDetails( 97 | type=PydanticCustomError( 98 | "value_error", 99 | f'Service "{req_body.service}" can\'t run commands', 100 | ), 101 | loc=("body", "service"), 102 | input=req_body.service, 103 | ) 104 | ], 105 | ) 106 | ).errors() 107 | ) 108 | service = req_body.service 109 | command_run, func = create_command_run( 110 | dbsession=dbsession, 111 | project=project, 112 | deployment=deployment, 113 | service=service, 114 | command=req_body.command, 115 | timeout=req_body.timeout, 116 | by_api_key=api_key, 117 | ) 118 | background_tasks.add_task(func) 119 | return { 120 | "run": { 121 | "number": command_run.number, 122 | }, 123 | } 124 | 125 | 126 | @router.get( 127 | "/api/projects/{project_name}/runs/{run_number}/output", 128 | dependencies=[Depends(get_api_key_wo_tx)], 129 | ) 130 | async def run_output_get( 131 | project_name: str, 132 | run_number: int, 133 | last_event_id: Annotated[str | None, Header()] = None, 134 | ): 135 | with Session.begin() as dbsession: 136 | project = get_project_by_name_sync(dbsession, project_name) 137 | if project is None: 138 | raise HTTPException(status_code=404) 139 | run = get_command_run_by_number(dbsession, project, run_number) 140 | if run is None: 141 | raise HTTPException(status_code=404) 142 | after = None 143 | source = commandoutputs.run_source(run.id) 144 | if last_event_id is not None: 145 | output = await commandoutputs.get_by_id(source, last_event_id) 146 | if output is not None: 147 | after = output.created 148 | 149 | # TODO refactor, this is copy-pasted from deployment output 150 | async def get_run_output(source: str, after: datetime | None): 151 | while True: 152 | output = await commandoutputs.get_next(source, after=after) 153 | if output is not None: 154 | if output.text is None: 155 | yield ServerSentEvent( 156 | id=output.id, 157 | event="end", 158 | data="", 159 | ) 160 | return 161 | after = output.created 162 | yield ServerSentEvent( 163 | id=output.id, 164 | event="output", 165 | data=json.dumps( 166 | { 167 | "timestamp": output.created.isoformat(), 168 | "text": output.text, 169 | } 170 | ), 171 | ) 172 | if output is None: 173 | await asyncio.sleep(0.1) 174 | 175 | return EventSourceResponse(get_run_output(source, after)) 176 | -------------------------------------------------------------------------------- /disco/endpoints/meta.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import time 4 | from datetime import datetime, timezone 5 | from enum import Enum 6 | from typing import Annotated 7 | 8 | from fastapi import APIRouter, Depends, HTTPException 9 | from fastapi.exceptions import RequestValidationError 10 | from pydantic import BaseModel, Field, ValidationError 11 | from pydantic_core import InitErrorDetails, PydanticCustomError 12 | from sqlalchemy.ext.asyncio import AsyncSession as AsyncDBSession 13 | from sqlalchemy.orm.session import Session as DBSession 14 | from sse_starlette import EventSourceResponse, ServerSentEvent 15 | 16 | import disco 17 | from disco.auth import get_api_key, get_api_key_wo_tx 18 | from disco.endpoints.dependencies import get_db, get_db_sync 19 | from disco.models import ApiKey 20 | from disco.utils import docker, keyvalues 21 | from disco.utils.meta import set_disco_host, update_disco 22 | from disco.utils.projectdomains import DOMAIN_REGEX 23 | from disco.utils.projects import get_project_by_domain 24 | from disco.utils.stats import AsyncDockerStats 25 | 26 | log = logging.getLogger(__name__) 27 | 28 | router = APIRouter(dependencies=[Depends(get_api_key_wo_tx)]) 29 | 30 | 31 | @router.get("/api/disco/meta") 32 | async def meta_get( 33 | dbsession: Annotated[AsyncDBSession, Depends(get_db)], 34 | api_key: Annotated[ApiKey, Depends(get_api_key)], 35 | ): 36 | return { 37 | "version": disco.__version__, 38 | "discoHost": await keyvalues.get_value(dbsession, "DISCO_HOST"), 39 | "registryHost": await keyvalues.get_value(dbsession, "REGISTRY_HOST"), 40 | "publicKey": api_key.public_key, 41 | "docker": {"version": await docker.get_docker_version()}, 42 | } 43 | 44 | 45 | class UpdateRequestBody(BaseModel): 46 | image: str = Field("letsdiscodev/daemon:latest", pattern=r"^[^-].*$") 47 | pull: bool = True 48 | 49 | 50 | @router.post("/api/disco/upgrade") 51 | def upgrade_post( 52 | dbsession: Annotated[DBSession, Depends(get_db_sync)], req_body: UpdateRequestBody 53 | ): 54 | update_disco(dbsession=dbsession, image=req_body.image, pull=req_body.pull) 55 | return {"updating": True} 56 | 57 | 58 | class RegistryAuthType(Enum): 59 | basic = "basic" 60 | 61 | 62 | class SetRegistryRequestBody(BaseModel): 63 | host: str = Field(..., pattern=DOMAIN_REGEX) 64 | authType: RegistryAuthType 65 | username: str 66 | password: str 67 | 68 | 69 | @router.post("/api/disco/registry") 70 | async def registry_post( 71 | dbsession: Annotated[AsyncDBSession, Depends(get_db)], 72 | req_body: SetRegistryRequestBody, 73 | ): 74 | disco_host_home = await keyvalues.get_value(dbsession, "HOST_HOME") 75 | assert disco_host_home is not None 76 | registry_host = await keyvalues.get_value(dbsession, "REGISTRY_HOST") 77 | if registry_host is not None: 78 | await docker.logout( 79 | disco_host_home=disco_host_home, 80 | host=registry_host, 81 | ) 82 | await keyvalues.set_value(dbsession=dbsession, key="REGISTRY_HOST", value=None) 83 | await docker.login( 84 | disco_host_home=disco_host_home, 85 | host=req_body.host, 86 | username=req_body.username, 87 | password=req_body.password, 88 | ) 89 | await keyvalues.set_value( 90 | dbsession=dbsession, key="REGISTRY_HOST", value=req_body.host 91 | ) 92 | return { 93 | "version": disco.__version__, 94 | "discoHost": await keyvalues.get_value(dbsession, "DISCO_HOST"), 95 | "registryHost": await keyvalues.get_value(dbsession, "REGISTRY_HOST"), 96 | } 97 | 98 | 99 | @router.delete("/api/disco/registry") 100 | async def registry_delete( 101 | dbsession: Annotated[AsyncDBSession, Depends(get_db)], 102 | ): 103 | disco_host_home = await keyvalues.get_value(dbsession, "HOST_HOME") 104 | assert disco_host_home is not None 105 | registry_host = await keyvalues.get_value(dbsession, "REGISTRY_HOST") 106 | if registry_host is not None: 107 | node_ids = await docker.get_node_list() 108 | if len(node_ids) > 1: 109 | raise HTTPException(422, "Can't unset registry with many nodes running") 110 | await docker.logout( 111 | disco_host_home=disco_host_home, 112 | host=registry_host, 113 | ) 114 | await keyvalues.set_value(dbsession=dbsession, key="REGISTRY_HOST", value=None) 115 | return { 116 | "version": disco.__version__, 117 | "discoHost": await keyvalues.get_value(dbsession, "DISCO_HOST"), 118 | "registryHost": await keyvalues.get_value(dbsession, "REGISTRY_HOST"), 119 | } 120 | 121 | 122 | class SetDiscoHostRequestBody(BaseModel): 123 | host: str = Field(..., pattern=DOMAIN_REGEX) 124 | 125 | 126 | @router.post("/api/disco/host") 127 | async def host_post( 128 | dbsession: Annotated[AsyncDBSession, Depends(get_db)], 129 | api_key: Annotated[ApiKey, Depends(get_api_key)], 130 | req_body: SetDiscoHostRequestBody, 131 | ): 132 | project = await get_project_by_domain(dbsession, req_body.host) 133 | if project is not None: 134 | raise RequestValidationError( 135 | errors=( 136 | ValidationError.from_exception_data( 137 | "ValueError", 138 | [ 139 | InitErrorDetails( 140 | type=PydanticCustomError( 141 | "value_error", 142 | "Domain already taken by other project", 143 | ), 144 | loc=("body", "domain"), 145 | input=req_body.host, 146 | ) 147 | ], 148 | ) 149 | ).errors() 150 | ) 151 | await set_disco_host(dbsession=dbsession, host=req_body.host, by_api_key=api_key) 152 | return { 153 | "version": disco.__version__, 154 | "discoHost": await keyvalues.get_value_str(dbsession, "DISCO_HOST"), 155 | "registryHost": await keyvalues.get_value(dbsession, "REGISTRY_HOST"), 156 | } 157 | 158 | 159 | @router.get("/api/disco/stats-experimental") 160 | async def stats_experimental(): 161 | return EventSourceResponse(read_stats()) 162 | 163 | 164 | async def read_stats(): 165 | log.info("Starting stats") 166 | async_docker_stats = AsyncDockerStats() 167 | try: 168 | while True: 169 | containers_stats = await async_docker_stats.get_all_container_stats() 170 | df = await docker.host_df() 171 | node_stats = { 172 | "node_name": "leader", 173 | "read": datetime.now(timezone.utc).isoformat(), 174 | "stats": containers_stats, 175 | "df": { 176 | "used": df.used, 177 | "available": df.available, 178 | }, 179 | } 180 | yield ServerSentEvent( 181 | event="stats", 182 | data=json.dumps(node_stats), 183 | ) 184 | time.sleep(3) 185 | finally: 186 | log.info("Stopping stats") 187 | -------------------------------------------------------------------------------- /disco/utils/events.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import asyncio 4 | import json 5 | from dataclasses import dataclass 6 | from datetime import datetime, timedelta, timezone 7 | from typing import TYPE_CHECKING, Any, Iterable 8 | 9 | if TYPE_CHECKING: 10 | from disco.utils.deployments import DEPLOYMENT_STATUS 11 | import logging 12 | 13 | log = logging.getLogger(__name__) 14 | 15 | 16 | @dataclass 17 | class DiscoEvent: 18 | id: str 19 | timestamp: datetime 20 | type: str 21 | data: dict[str, Any] 22 | 23 | 24 | _subscribers: list[asyncio.Queue[DiscoEvent]] = [] 25 | _events: list[DiscoEvent] = [] 26 | 27 | 28 | def deployment_created( 29 | project_name: str, deployment_number: int, status: DEPLOYMENT_STATUS 30 | ) -> None: 31 | _publish_event( 32 | _event( 33 | type="deployment:created", 34 | data={ 35 | "project": { 36 | "name": project_name, 37 | }, 38 | "deployment": { 39 | "number": deployment_number, 40 | "status": status, 41 | }, 42 | }, 43 | ) 44 | ) 45 | 46 | 47 | def deployment_status( 48 | project_name: str, deployment_number: int, status: DEPLOYMENT_STATUS 49 | ) -> None: 50 | _publish_event( 51 | _event( 52 | type="deployment:status", 53 | data={ 54 | "project": { 55 | "name": project_name, 56 | }, 57 | "deployment": { 58 | "number": deployment_number, 59 | "status": status, 60 | }, 61 | }, 62 | ) 63 | ) 64 | 65 | 66 | def env_variable_created(project_name: str, env_var: str) -> None: 67 | _publish_event( 68 | _event( 69 | type="envVar:created", 70 | data={ 71 | "project": { 72 | "name": project_name, 73 | }, 74 | "envVar": { 75 | "name": env_var, 76 | }, 77 | }, 78 | ) 79 | ) 80 | 81 | 82 | def env_variable_updated(project_name: str, env_var: str) -> None: 83 | _publish_event( 84 | _event( 85 | type="envVar:updated", 86 | data={ 87 | "project": { 88 | "name": project_name, 89 | }, 90 | "envVar": { 91 | "name": env_var, 92 | }, 93 | }, 94 | ) 95 | ) 96 | 97 | 98 | def env_variable_removed(project_name: str, env_var: str) -> None: 99 | _publish_event( 100 | _event( 101 | type="envVar:removed", 102 | data={ 103 | "project": { 104 | "name": project_name, 105 | }, 106 | "envVar": { 107 | "name": env_var, 108 | }, 109 | }, 110 | ) 111 | ) 112 | 113 | 114 | def project_created(project_name: str) -> None: 115 | _publish_event( 116 | _event( 117 | type="project:created", 118 | data={ 119 | "project": { 120 | "name": project_name, 121 | }, 122 | }, 123 | ) 124 | ) 125 | 126 | 127 | def project_removed(project_name: str) -> None: 128 | _publish_event( 129 | _event( 130 | type="project:removed", 131 | data={ 132 | "project": { 133 | "name": project_name, 134 | }, 135 | }, 136 | ) 137 | ) 138 | 139 | 140 | def domain_created(project_name: str, domain: str) -> None: 141 | _publish_event( 142 | _event( 143 | type="domain:created", 144 | data={ 145 | "project": { 146 | "name": project_name, 147 | }, 148 | "domain": {"name": domain}, 149 | }, 150 | ) 151 | ) 152 | 153 | 154 | def domain_removed(project_name: str, domain: str) -> None: 155 | _publish_event( 156 | _event( 157 | type="domain:removed", 158 | data={ 159 | "project": { 160 | "name": project_name, 161 | }, 162 | "domain": {"name": domain}, 163 | }, 164 | ) 165 | ) 166 | 167 | 168 | def api_key_created(public_key: str, name: str) -> None: 169 | _publish_event( 170 | _event( 171 | type="apiKey:created", 172 | data={ 173 | "apiKey": { 174 | "publicKey": public_key, 175 | "name": name, 176 | }, 177 | }, 178 | ) 179 | ) 180 | 181 | 182 | def api_key_removed(public_key: str, name: str) -> None: 183 | _publish_event( 184 | _event( 185 | type="apiKey:removed", 186 | data={ 187 | "apiKey": { 188 | "publicKey": public_key, 189 | "name": name, 190 | }, 191 | }, 192 | ) 193 | ) 194 | 195 | 196 | def github_apps_updated() -> None: 197 | _publish_event(_event(type="github:apps:updated", data={})) 198 | 199 | 200 | def github_repos_updated() -> None: 201 | _publish_event(_event(type="github:repos:updated", data={})) 202 | 203 | 204 | def _event(type: str, data: dict[str, Any]) -> DiscoEvent: 205 | dt = datetime.now(timezone.utc) 206 | event = DiscoEvent( 207 | id=dt.isoformat(), 208 | timestamp=dt, 209 | type=type, 210 | data=data, 211 | ) 212 | return event 213 | 214 | 215 | def _publish_event(event: DiscoEvent) -> None: 216 | log.info( 217 | "Dispatching event %s: %s", 218 | event.type, 219 | json.dumps( 220 | { 221 | "id": event.id, 222 | "timestamp": event.timestamp.isoformat(), 223 | "type": event.type, 224 | "data": event.data, 225 | } 226 | ), 227 | ) 228 | _events.append(event) 229 | while len(_events) > 0 and _events[0].timestamp < datetime.now( 230 | timezone.utc 231 | ) - timedelta(hours=1): 232 | _events.pop(0) 233 | for subscriber in _subscribers: 234 | subscriber.put_nowait(event) 235 | 236 | 237 | def subscribe() -> asyncio.Queue[DiscoEvent]: 238 | log.info("Adding event subscriber") 239 | subscriber = asyncio.Queue[DiscoEvent]() 240 | _subscribers.append(subscriber) 241 | return subscriber 242 | 243 | 244 | def unsubscribe(subscriber: asyncio.Queue[DiscoEvent]) -> None: 245 | if subscriber in _subscribers: 246 | log.info("Removing event subscriber") 247 | _subscribers.remove(subscriber) 248 | 249 | 250 | def get_events_since(event_id: str) -> Iterable[DiscoEvent]: 251 | found = False 252 | for event in _events: 253 | if found: 254 | yield event 255 | continue 256 | assert not found 257 | if event.id == event_id: 258 | found = True 259 | continue 260 | --------------------------------------------------------------------------------