├── zapusk ├── __init__.py ├── lib │ ├── __init__.py │ ├── json_serdes.py │ ├── json_serdes_test.py │ └── create_jobitem.py ├── client │ ├── __init__.py │ ├── printer.py │ ├── command_config_jobs.py │ ├── command_config_groups.py │ ├── printer_test.py │ ├── command_run.py │ ├── command_cancel.py │ ├── command_tail.py │ ├── command_list.py │ ├── command_config_jobs_test.py │ ├── command_config_groups_test.py │ ├── command_run_test.py │ ├── command_cancel_test.py │ ├── command_testcase.py │ ├── output.py │ ├── command.py │ ├── command_tail_test.py │ ├── command_exec.py │ ├── command_manager.py │ ├── command_list_test.py │ ├── command_waybar.py │ ├── output_test.py │ ├── command_waybar_test.py │ ├── command_exec_test.py │ ├── api_client.py │ ├── __main__.py │ └── api_client_test.py ├── server │ ├── __init__.py │ ├── error_response.py │ ├── controller_config.py │ ├── api.py │ ├── controller_testcase.py │ ├── controller_scheduled_jobs.py │ ├── controller_config_test.py │ ├── controller_jobs.py │ ├── controller_scheduled_jobs_test.py │ └── controller_jobs_test.py ├── services │ ├── scheduler_service │ │ ├── __init__.py │ │ ├── service.py │ │ └── service_test.py │ ├── config │ │ ├── __init__.py │ │ ├── constants.py │ │ ├── yaml_filereader.py │ │ ├── config_parser.py │ │ ├── service.py │ │ ├── config_parser_test.py │ │ └── service_test.py │ ├── executor_manager │ │ ├── backends │ │ │ ├── __init__.py │ │ │ └── kawka │ │ │ │ ├── __init__.py │ │ │ │ ├── state.py │ │ │ │ ├── args_consumer.py │ │ │ │ ├── backend.py │ │ │ │ ├── consumer_test.py │ │ │ │ ├── backend_test.py │ │ │ │ ├── executor.py │ │ │ │ ├── args_consumer_test.py │ │ │ │ ├── consumer.py │ │ │ │ └── executor_test.py │ │ ├── __init__.py │ │ ├── service.py │ │ └── service_test.py │ └── __init__.py ├── kawka │ ├── __init__.py │ ├── linked_list.py │ ├── linked_list_test.py │ ├── producer_test.py │ ├── topic_test.py │ ├── producer.py │ ├── consumer.py │ ├── topic.py │ ├── consumer_group.py │ ├── topic_iterator.py │ ├── consumer_group_test.py │ └── consumer_test.py ├── models │ ├── id_field.py │ ├── __init__.py │ ├── config.py │ ├── job_group.py │ ├── id_field_test.py │ ├── base_model.py │ ├── job_config.py │ ├── scheduled_job.py │ ├── base_model_test.py │ └── job.py ├── logger.py └── __main__.py ├── .gitignore ├── .coveragerc ├── .imgs └── zapusk.png ├── pytest.ini ├── config.example.yaml ├── .github └── workflows │ ├── publish.yml │ └── test.yml ├── pyproject.toml ├── LICENSE.md └── README.md /zapusk/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /zapusk/lib/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /zapusk/client/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .coverage 3 | htmlcov/ 4 | dist/ 5 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = *_test.py,__main__.py,__init__.py 3 | -------------------------------------------------------------------------------- /.imgs/zapusk.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/a/zapusk/master/.imgs/zapusk.png -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | timeout = 20 3 | ; log_cli = 1 4 | ; log_cli_level = DEBUG 5 | 6 | -------------------------------------------------------------------------------- /zapusk/server/__init__.py: -------------------------------------------------------------------------------- 1 | from .api import create_app 2 | 3 | __all__ = ["create_app"] 4 | -------------------------------------------------------------------------------- /zapusk/services/scheduler_service/__init__.py: -------------------------------------------------------------------------------- 1 | from .service import SchedulerService 2 | 3 | __ALL__ = ["SchedulerService"] 4 | -------------------------------------------------------------------------------- /zapusk/services/config/__init__.py: -------------------------------------------------------------------------------- 1 | from .service import ConfigService 2 | 3 | 4 | __ALL__ = [ 5 | "ConfigService", 6 | ] 7 | -------------------------------------------------------------------------------- /zapusk/services/executor_manager/backends/__init__.py: -------------------------------------------------------------------------------- 1 | from .kawka import ExecutorManagerKawkaBackend 2 | 3 | __ALL__ = ["ExecutorManagerKawkaBackend"] 4 | -------------------------------------------------------------------------------- /zapusk/services/executor_manager/backends/kawka/__init__.py: -------------------------------------------------------------------------------- 1 | from .backend import ExecutorManagerKawkaBackend 2 | 3 | __ALL__ = ["ExecutorManagerKawkaBackend"] 4 | -------------------------------------------------------------------------------- /zapusk/kawka/__init__.py: -------------------------------------------------------------------------------- 1 | from .consumer import Consumer 2 | from .consumer_group import ConsumerGroup 3 | from .producer import Producer 4 | 5 | __all__ = [ 6 | "Consumer", 7 | "ConsumerGroup", 8 | "Producer", 9 | ] 10 | -------------------------------------------------------------------------------- /zapusk/services/executor_manager/__init__.py: -------------------------------------------------------------------------------- 1 | from .service import ExecutorManagerService 2 | from .backends import ExecutorManagerKawkaBackend 3 | 4 | __ALL__ = [ 5 | "ExecutorManagerService", 6 | "ExecutorManagerKawkaBackend", 7 | ] 8 | -------------------------------------------------------------------------------- /zapusk/server/error_response.py: -------------------------------------------------------------------------------- 1 | import json 2 | from flask import Response 3 | 4 | 5 | def error_response(error: str, status: int): 6 | return Response( 7 | json.dumps({"error": error}), 8 | status=status, 9 | ) 10 | -------------------------------------------------------------------------------- /zapusk/services/config/constants.py: -------------------------------------------------------------------------------- 1 | from zapusk.models import JobGroup 2 | 3 | 4 | DEFAULT_COLORS = False 5 | DEFAULT_PORT = 9876 6 | DEFAULT_JOB_GROUPS: dict[str, JobGroup] = { 7 | "default": JobGroup( 8 | id="default", 9 | parallel=10, 10 | ) 11 | } 12 | -------------------------------------------------------------------------------- /zapusk/services/config/yaml_filereader.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | 3 | 4 | class YamlFileReader: 5 | def read(self, file) -> dict: # type: ignore 6 | """ 7 | Reads YAML file as a dict 8 | """ 9 | with open(file) as stream: 10 | return yaml.safe_load(stream) 11 | -------------------------------------------------------------------------------- /zapusk/models/id_field.py: -------------------------------------------------------------------------------- 1 | lookup: dict[str, int] = {} 2 | 3 | 4 | class IdField: 5 | @staticmethod 6 | def next(id: str): 7 | if id not in lookup: 8 | lookup[id] = 0 9 | 10 | lookup[id] += 1 11 | return lookup[id] 12 | 13 | @staticmethod 14 | def reset(id): 15 | lookup[id] = 0 16 | -------------------------------------------------------------------------------- /zapusk/lib/json_serdes.py: -------------------------------------------------------------------------------- 1 | class JsonSerdes: 2 | """ 3 | Static class to serialize/deserialize data for client-server 4 | communication 5 | """ 6 | 7 | @staticmethod 8 | def serialize(data): 9 | return {"data": data} 10 | 11 | @staticmethod 12 | def deserialize(res: dict): 13 | return res["data"] 14 | -------------------------------------------------------------------------------- /zapusk/logger.py: -------------------------------------------------------------------------------- 1 | import logging # pragma: no cover 2 | 3 | FORMAT = "%(levelname)s: %(message)s" # pragma: no cover 4 | logging.basicConfig(format=FORMAT) # pragma: no cover 5 | 6 | 7 | def set_loglevel(level): # pragma: no cover 8 | logging.basicConfig( 9 | format=FORMAT, 10 | level=getattr(logging, level), 11 | ) 12 | -------------------------------------------------------------------------------- /zapusk/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .config import Config 2 | from .job_config import JobConfig 3 | from .job_group import JobGroup 4 | from .job import Job 5 | from .id_field import IdField 6 | from .scheduled_job import ScheduledJob 7 | 8 | __ALL__ = [ 9 | "Config", 10 | "JobConfig", 11 | "JobGroup", 12 | "JobI", 13 | "IdField", 14 | "ScheduledJob" 15 | ] 16 | -------------------------------------------------------------------------------- /zapusk/client/printer.py: -------------------------------------------------------------------------------- 1 | class Printer: 2 | """ 3 | Was created to mock in the tests, because by some reasons I can't 4 | manage neither testfixtures nor pytest to capture output properly 5 | """ 6 | 7 | def print(self, *args, **kwargs): 8 | """ 9 | Prints given data via `print` to terminal 10 | """ 11 | print(*args, **kwargs) 12 | -------------------------------------------------------------------------------- /zapusk/services/__init__.py: -------------------------------------------------------------------------------- 1 | from .config import ConfigService 2 | from .scheduler_service import SchedulerService 3 | from .executor_manager import ( 4 | ExecutorManagerService, 5 | ExecutorManagerKawkaBackend, 6 | ) 7 | 8 | __ALL__ = [ 9 | "ConfigService", 10 | "ExecutorManagerService", 11 | "ExecutorManagerKawkaBackend", 12 | "SchedulerService", 13 | ] 14 | -------------------------------------------------------------------------------- /zapusk/client/command_config_jobs.py: -------------------------------------------------------------------------------- 1 | from .api_client import ApiClientError 2 | from .command import Command 3 | 4 | 5 | class CommandConfigJobs(Command): 6 | def run(self): 7 | try: 8 | config_jobs = self.api_client.get_config_jobs() 9 | self.print_json(config_jobs) 10 | except ApiClientError as ex: 11 | self.print_error(ex) 12 | -------------------------------------------------------------------------------- /zapusk/services/executor_manager/backends/kawka/state.py: -------------------------------------------------------------------------------- 1 | class ExecutorManagerState: 2 | running_consumergroups: dict 3 | running_producers: dict 4 | 5 | def __init__(self): 6 | self.running_consumergroups = {} 7 | self.running_producers = {} 8 | 9 | def reset(self): 10 | self.running_consumergroups = {} 11 | self.running_producers = {} 12 | -------------------------------------------------------------------------------- /zapusk/client/command_config_groups.py: -------------------------------------------------------------------------------- 1 | from .api_client import ApiClientError 2 | from .command import Command 3 | 4 | 5 | class CommandConfigGroups(Command): 6 | def run(self): 7 | try: 8 | config_groups = self.api_client.get_config_groups() 9 | self.print_json(config_groups) 10 | except ApiClientError as ex: 11 | self.print_error(ex) 12 | -------------------------------------------------------------------------------- /zapusk/lib/json_serdes_test.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | from zapusk.lib.json_serdes import JsonSerdes 4 | 5 | 6 | class TestJsonSerdes(TestCase): 7 | def test_should_serialize_and_deserialize_data(self): 8 | item = {} 9 | serialized = JsonSerdes.serialize(item) 10 | deserialized = JsonSerdes.deserialize(serialized) 11 | 12 | self.assertEqual(deserialized, item) 13 | -------------------------------------------------------------------------------- /zapusk/models/config.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from .base_model import BaseModel 3 | 4 | from .job_config import JobConfig 5 | from .job_group import JobGroup 6 | 7 | 8 | @dataclass(eq=False) 9 | class Config(BaseModel): 10 | port: int 11 | colors: bool 12 | job_groups: dict[str, JobGroup] = field(default_factory=dict) 13 | jobs: dict[str, JobConfig] = field(default_factory=dict) 14 | -------------------------------------------------------------------------------- /zapusk/models/job_group.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Optional 3 | 4 | from .base_model import BaseModel 5 | 6 | 7 | @dataclass(eq=False) 8 | class JobGroup(BaseModel): 9 | id: str 10 | parallel: int 11 | on_finish: Optional[str] = None 12 | on_fail: Optional[str] = None 13 | 14 | def __post_init__(self): 15 | if self.parallel <= 0: 16 | raise ValueError("`parallel` must be a positive number") 17 | -------------------------------------------------------------------------------- /zapusk/client/printer_test.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | import pytest 4 | from .printer import Printer 5 | 6 | 7 | class TestPrinter(TestCase): 8 | @pytest.fixture(autouse=True) 9 | def capsys(self, capsys): 10 | self.capsys = capsys 11 | 12 | def test_printer_should_print(self): 13 | printer = Printer() 14 | 15 | printer.print("test") 16 | out, _ = self.capsys.readouterr() 17 | 18 | self.assertEqual(out, "test\n") 19 | -------------------------------------------------------------------------------- /zapusk/client/command_run.py: -------------------------------------------------------------------------------- 1 | from .api_client import ApiClientError 2 | from .command import Command 3 | 4 | 5 | class CommandRun(Command): 6 | def run( 7 | self, 8 | job_config_id: str, 9 | ): 10 | try: 11 | created_job = self.api_client.create_job( 12 | { 13 | "job_config_id": job_config_id, 14 | } 15 | ) 16 | self.print_json(created_job) 17 | except ApiClientError as ex: 18 | self.print_error(ex) 19 | -------------------------------------------------------------------------------- /zapusk/server/controller_config.py: -------------------------------------------------------------------------------- 1 | from flask import Blueprint 2 | from zapusk.lib.json_serdes import JsonSerdes 3 | 4 | 5 | def create_config_api(config_service): 6 | jobgroups_api = Blueprint("jobgroups", __name__) 7 | 8 | @jobgroups_api.route("/config/groups/") 9 | def groups_list(): 10 | return JsonSerdes.serialize(config_service.list_jobgroups()) 11 | 12 | @jobgroups_api.route("/config/jobs/") 13 | def job_list(): 14 | return JsonSerdes.serialize(config_service.list_jobs()) 15 | 16 | return jobgroups_api 17 | -------------------------------------------------------------------------------- /zapusk/kawka/linked_list.py: -------------------------------------------------------------------------------- 1 | class LinkedList[T]: 2 | """ 3 | Simple linked list implementation 4 | """ 5 | 6 | next: "LinkedList[T] | None" = None 7 | """ 8 | Link to a next element 9 | """ 10 | 11 | def __init__(self, data: T): 12 | self.data = data 13 | 14 | def append(self, data: T): 15 | """ 16 | Appends an element to the linked list 17 | """ 18 | self.next = LinkedList(data) 19 | return self.next 20 | 21 | def __str__(self): 22 | return f"linked_list.{self.data}" 23 | -------------------------------------------------------------------------------- /config.example.yaml: -------------------------------------------------------------------------------- 1 | # Example configuration for 2 | port: 9876 3 | 4 | job_groups: 5 | - id: default 6 | parallel: 10 7 | - id: sequential 8 | parallel: 1 9 | - id: parallel 10 | parallel: 2 11 | 12 | 13 | jobs: 14 | - name: Sleep 10 Seconds 15 | id: sleep_10 16 | command: sleep 10 17 | cwd: /var/ 18 | 19 | - name: Sleep 30 Seconds 20 | group: parallel 21 | id: sleep_30 22 | command: sleep 30 23 | 24 | - name: Configurable Sleep 25 | id: sleep 26 | group: sequential 27 | args_command: "zenity --entry --text 'Sleep Time'" 28 | command: "sleep $1" 29 | -------------------------------------------------------------------------------- /zapusk/client/command_cancel.py: -------------------------------------------------------------------------------- 1 | from .api_client import ApiClientError 2 | from .command import Command 3 | 4 | 5 | class CommandCancel(Command): 6 | def run(self, job_id: str | int, scheduled: bool = False): 7 | try: 8 | if scheduled: 9 | cancelled_job = self.api_client.cancel_scheduled_job(job_id) 10 | self.print_json(cancelled_job) 11 | return 12 | 13 | cancelled_job = self.api_client.cancel_job(job_id) 14 | self.print_json(cancelled_job) 15 | 16 | except ApiClientError as ex: 17 | self.print_error(ex) 18 | -------------------------------------------------------------------------------- /zapusk/kawka/linked_list_test.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from .linked_list import LinkedList 3 | 4 | 5 | class TestLinkedList(TestCase): 6 | def test_linked_list_create(self): 7 | head = LinkedList(1) 8 | 9 | self.assertEqual(head.data, 1) 10 | self.assertEqual(head.next, None) 11 | 12 | def test_linked_list_append(self): 13 | head = LinkedList(1) 14 | item = head.append(2) 15 | 16 | self.assertEqual(head.next, item) 17 | 18 | def test_linked_list_str(self): 19 | head = LinkedList("data") 20 | 21 | self.assertEqual(f"{head}", "linked_list.data") 22 | -------------------------------------------------------------------------------- /zapusk/models/id_field_test.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | from .id_field import IdField 4 | 5 | 6 | class TestCounter(TestCase): 7 | def test_should_get_new_id(self): 8 | id = IdField.next("test") 9 | self.assertEqual(id, 1) 10 | IdField.reset("test") 11 | 12 | def test_should_increment_id(self): 13 | ids = [ 14 | IdField.next("test"), 15 | IdField.next("test"), 16 | IdField.next("test"), 17 | IdField.next("test"), 18 | IdField.next("test"), 19 | ] 20 | self.assertEqual(ids, [1, 2, 3, 4, 5]) 21 | IdField.reset("test") 22 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish 2 | 3 | on: 4 | push: 5 | branches: 6 | - "master" 7 | 8 | env: 9 | PYTHON_VERSION: 3.12.4 10 | 11 | jobs: 12 | publish: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v2 16 | - uses: actions/setup-python@v2 17 | with: 18 | python-version: ${{ env.PYTHON_VERSION }} 19 | 20 | - name: install poetry 21 | run: pip3 install poetry 22 | 23 | - name: install deps 24 | run: poetry install 25 | 26 | 27 | - name: test 28 | run: poetry run pytest --cache-clear 29 | 30 | - name: publish 31 | run: | 32 | poetry config pypi-token.pypi ${{ secrets.PYPI_TOKEN}} 33 | poetry build 34 | poetry publish 35 | -------------------------------------------------------------------------------- /zapusk/client/command_tail.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from time import sleep 3 | from sh import tail 4 | 5 | from .api_client import ApiClientError 6 | from .command import Command 7 | 8 | 9 | class CommandTail(Command): 10 | def run(self, job_id): 11 | try: 12 | job = self.api_client.get_job(job_id) 13 | while not job["log"]: 14 | sleep(1) 15 | job = self.api_client.get_job(job_id) 16 | 17 | for line in tail("-f", "-n", "+1", job["log"], _iter=True): 18 | self.output.text(line, end="") 19 | except KeyboardInterrupt: # pragma: no cover 20 | self.output.text("Tail has been closed") 21 | sys.exit(0) 22 | except ApiClientError as ex: 23 | self.print_error(ex) 24 | -------------------------------------------------------------------------------- /zapusk/client/command_list.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | from zapusk.models.job import JOB_STATE_ENUM 3 | 4 | from .api_client import ApiClientError 5 | from .command import Command 6 | 7 | 8 | class CommandList(Command): 9 | def run( 10 | self, 11 | filter: Any = None, 12 | scheduled: bool = False, 13 | ): 14 | try: 15 | if scheduled: 16 | jobs = self.api_client.list_scheduled_jobs() 17 | self.print_json(jobs) 18 | return 19 | 20 | jobs = self.api_client.list_jobs() 21 | 22 | if filter and filter != "ALL": 23 | jobs = [i for i in jobs if i["state"] == filter] 24 | 25 | self.print_json(jobs) 26 | return 27 | 28 | except Exception as ex: 29 | self.handle_error(ex) 30 | -------------------------------------------------------------------------------- /zapusk/lib/create_jobitem.py: -------------------------------------------------------------------------------- 1 | from zapusk.models import Job, JobConfig, JobGroup 2 | 3 | 4 | def create_jobitem( 5 | command: str, 6 | args_command=None, 7 | args=[], 8 | state=Job.JOB_STATE_ENUM.PENDING, 9 | on_finish=None, 10 | on_fail=None, 11 | group_on_finish=None, 12 | group_on_fail=None, 13 | ): 14 | item = Job.from_config( 15 | config=JobConfig( 16 | id="test_config", 17 | name="Test Job Config", 18 | command=command, 19 | group="default", 20 | args_command=args_command, 21 | on_finish=on_finish, 22 | on_fail=on_fail, 23 | ), 24 | group_config=JobGroup( 25 | id="default", 26 | parallel=2, 27 | on_finish=group_on_finish, 28 | on_fail=group_on_fail, 29 | ), 30 | ) 31 | item.args = args 32 | item.state = state 33 | return item 34 | -------------------------------------------------------------------------------- /zapusk/server/api.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | 3 | from .controller_jobs import create_jobs_api 4 | from .controller_config import create_config_api 5 | from .controller_scheduled_jobs import create_scheduled_jobs_api 6 | 7 | 8 | def create_app( 9 | executor_manager_service, 10 | config_service, 11 | scheduler_service, 12 | ): 13 | app = Flask(__name__) 14 | 15 | app.register_blueprint( 16 | create_jobs_api( 17 | config_service=config_service, 18 | executor_manager_service=executor_manager_service, 19 | ) 20 | ) 21 | app.register_blueprint( 22 | create_config_api( 23 | config_service=config_service, 24 | ) 25 | ) 26 | 27 | app.register_blueprint( 28 | create_scheduled_jobs_api( 29 | scheduler_service=scheduler_service, 30 | config_service=config_service, 31 | ) 32 | ) 33 | 34 | return app 35 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "zapusk" 3 | version = "0.1.1" 4 | description = "" 5 | authors = ["Anton Shuvalov "] 6 | readme = "README.md" 7 | 8 | [tool.poetry.scripts] 9 | zapusk-server = "zapusk.__main__:main" 10 | zapusk = "zapusk.client.__main__:main" 11 | 12 | [tool.pytest.ini_options] 13 | pythonpath = "." 14 | 15 | [tool.poetry.dependencies] 16 | python = "^3.12" 17 | docopt = "^0.6.2" 18 | pyyaml = "^6.0.1" 19 | flask = "^3.0.3" 20 | requests = "^2.32.3" 21 | pygments = "^2.18.0" 22 | type-docopt = "^0.8.2" 23 | python-dateutil = "^2.9.0.post0" 24 | pdoc = "^14.5.1" 25 | human-readable = "^1.3.4" 26 | sh = "^2.0.7" 27 | croniter = "^2.0.5" 28 | 29 | [tool.poetry.group.dev.dependencies] 30 | pytest = "^7.4.0" 31 | pytest-cov = "^4.1.0" 32 | black = "^24.4.2" 33 | pytest-timeout = "^2.3.1" 34 | testfixtures = "^8.3.0" 35 | responses = "^0.25.3" 36 | 37 | 38 | 39 | [build-system] 40 | requires = ["poetry-core"] 41 | build-backend = "poetry.core.masonry.api" 42 | -------------------------------------------------------------------------------- /zapusk/models/base_model.py: -------------------------------------------------------------------------------- 1 | from dataclasses import fields 2 | import inspect 3 | 4 | 5 | class BaseModel: 6 | def __eq__(self, value: object, /) -> bool: 7 | if isinstance(value, type(self)): 8 | for fld in fields(self): # type: ignore 9 | left = getattr(self, fld.name) 10 | right = getattr(value, fld.name) 11 | 12 | if left != right: 13 | return False 14 | 15 | return True 16 | if isinstance(value, dict): 17 | for fld in fields(self): # type: ignore 18 | left = getattr(self, fld.name) 19 | right = value.get(fld.name) 20 | 21 | if left != right: 22 | return False 23 | 24 | return True 25 | 26 | return False 27 | 28 | @classmethod 29 | def from_dict(cls, env): 30 | return cls( 31 | **{k: v for k, v in env.items() if k in inspect.signature(cls).parameters} 32 | ) 33 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | push: 5 | branches: 6 | - '*' 7 | - '!master' 8 | 9 | env: 10 | PYTHON_VERSION: 3.12.4 11 | 12 | jobs: 13 | test: 14 | runs-on: ubuntu-latest 15 | strategy: 16 | matrix: 17 | python-version: ['3.12'] 18 | 19 | name: Python ${{ matrix.python-version }} test 20 | 21 | steps: 22 | - uses: actions/checkout@v2 23 | - uses: actions/setup-python@v2 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | 27 | - name: install poetry 28 | run: pip install poetry 29 | 30 | - name: install deps 31 | run: poetry install 32 | 33 | - name: test 34 | run: poetry run pytest --cache-clear --cov=zapusk | tee pytest-coverage.txt 35 | 36 | - name: Pytest coverage comment 37 | if: matrix.python-version == '3.12' 38 | uses: MishaKav/pytest-coverage-comment@main 39 | with: 40 | pytest-coverage-path: ./pytest-coverage.txt 41 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright 2024 Anton Shuvalov 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /zapusk/client/command_config_jobs_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | import responses 3 | 4 | from .command_testcase import CommandTestCase 5 | 6 | 7 | class TestCommandConfigJobs(CommandTestCase): 8 | @responses.activate 9 | def test_should_cancel_job(self): 10 | data = [ 11 | {"id": 1}, 12 | {"id": 2}, 13 | ] 14 | 15 | responses.get( 16 | "http://example.com/config/jobs/", status=200, json={"data": data} 17 | ) 18 | 19 | self.command_manager.config_jobs.run() 20 | json_data = json.loads(self.printer.print.call_args[0][0]) 21 | 22 | self.assertEqual(json_data, data) 23 | 24 | @responses.activate 25 | def test_should_handle_error(self): 26 | responses.get( 27 | "http://example.com/config/jobs/", status=400, json={"error": "ERROR"} 28 | ) 29 | 30 | self.command_manager.config_jobs.run() 31 | args = self.printer.print.call_args[0] 32 | message = json.loads(args[0]) 33 | 34 | self.assertEqual(message, {"error": {"message": "ERROR"}}) 35 | -------------------------------------------------------------------------------- /zapusk/client/command_config_groups_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | import responses 3 | 4 | from .command_testcase import CommandTestCase 5 | 6 | 7 | class TestCommandConfigGroups(CommandTestCase): 8 | @responses.activate 9 | def test_should_cancel_job(self): 10 | data = [ 11 | {"id": 1}, 12 | {"id": 2}, 13 | ] 14 | 15 | responses.get( 16 | "http://example.com/config/groups/", status=200, json={"data": data} 17 | ) 18 | 19 | self.command_manager.config_groups.run() 20 | json_data = json.loads(self.printer.print.call_args[0][0]) 21 | 22 | self.assertEqual(json_data, data) 23 | 24 | @responses.activate 25 | def test_should_handle_error(self): 26 | responses.get( 27 | "http://example.com/config/groups/", status=400, json={"error": "ERROR"} 28 | ) 29 | 30 | self.command_manager.config_groups.run() 31 | args = self.printer.print.call_args[0] 32 | message = json.loads(args[0]) 33 | 34 | self.assertEqual(message, {"error": {"message": "ERROR"}}) 35 | -------------------------------------------------------------------------------- /zapusk/models/job_config.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | import os 3 | from typing import Optional 4 | 5 | from .base_model import BaseModel 6 | 7 | 8 | @dataclass(eq=False) 9 | class JobConfig(BaseModel): 10 | id: str 11 | """ 12 | Job config id 13 | """ 14 | 15 | name: str 16 | """ 17 | Job name 18 | """ 19 | 20 | command: str 21 | """ 22 | shell command for the job 23 | """ 24 | 25 | cwd: str = field(default_factory=lambda: os.environ["HOME"]) 26 | """ 27 | current working dir 28 | """ 29 | 30 | group: str = "default" 31 | """ 32 | Group id to run job in 33 | """ 34 | 35 | args_command: Optional[str] = None 36 | """ 37 | callback to fetch arguments to run the command with 38 | """ 39 | 40 | on_finish: Optional[str] = None 41 | """ 42 | On finish callback 43 | """ 44 | 45 | on_fail: Optional[str] = None 46 | """ 47 | On fail callback 48 | """ 49 | 50 | schedule: Optional[str] = None 51 | """ 52 | Cron-like string to define scheduling interval 53 | """ 54 | 55 | def __str__(self) -> str: 56 | return f"job_config.{self.id}" 57 | -------------------------------------------------------------------------------- /zapusk/models/scheduled_job.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from croniter import croniter 3 | from datetime import datetime, timezone 4 | from typing import Optional 5 | 6 | from .base_model import BaseModel 7 | from .job_config import JobConfig 8 | 9 | 10 | @dataclass(eq=False) 11 | class ScheduledJob(BaseModel): 12 | job_config: JobConfig 13 | 14 | next: int = 0 15 | """ 16 | Next execution time 17 | """ 18 | 19 | last_run: Optional[datetime] = None 20 | """ 21 | list time job run 22 | """ 23 | 24 | def __post_init__(self): 25 | now = datetime.now(timezone.utc) 26 | if self.job_config.schedule: 27 | self.__iter = croniter(self.job_config.schedule, start_time=now) 28 | else: 29 | raise ValueError( 30 | "Job config {self.job_config} contains no `schedule` property" 31 | ) 32 | 33 | self.next = self.__iter.get_next(start_time=now) 34 | 35 | def record_run(self, now: datetime): 36 | self.last_run = now 37 | self.next = self.__iter.get_next(start_time=now) 38 | 39 | def __str__(self) -> str: 40 | return f"scheduled.{self.job_config}" 41 | -------------------------------------------------------------------------------- /zapusk/kawka/producer_test.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from .producer import Producer 3 | 4 | 5 | class ProducerTest(TestCase): 6 | def test_collect_messages_non_block(self): 7 | producer = Producer(name="test_producer", block=False) 8 | [producer.add(i) for i in range(10)] 9 | self.assertEqual(len(producer), 10) 10 | self.assertEqual(list(producer.all()), list(range(10))) 11 | 12 | def test_block_producer_should_terminate_end(self): 13 | producer = Producer(name="test_producer", block=True) 14 | [producer.add(i) for i in [*range(10), Producer.End]] 15 | 16 | # should be ignored 17 | [producer.add(i) for i in range(10)] 18 | results = [producer for i in producer.all()] 19 | self.assertEqual(len(producer), 10) 20 | 21 | def test_producer_should_become_terminated_after_receiving_end(self): 22 | producer = Producer(name="test_producer", block=True) 23 | [producer.add(i) for i in [*range(10), Producer.End]] 24 | 25 | l = len(producer) 26 | [producer.add(i) for i in range(10)] 27 | 28 | self.assertEqual(producer.terminated, True) 29 | self.assertEqual(len(producer), l) 30 | -------------------------------------------------------------------------------- /zapusk/kawka/topic_test.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | from .topic_iterator import End 4 | from .topic import Topic 5 | 6 | 7 | class TestTopic(TestCase): 8 | def test_topic_len_0(self): 9 | topic = Topic(name="test") 10 | 11 | self.assertEqual(len(topic), 0) 12 | 13 | def test_topic_len_3(self): 14 | topic = Topic(name="test") 15 | topic.add(1) 16 | topic.add(2) 17 | topic.add(3) 18 | 19 | self.assertEqual(len(topic), 3) 20 | 21 | def test_topic_str(self): 22 | topic = Topic(name="test") 23 | 24 | self.assertEqual("topic.test", f"{topic}") 25 | 26 | def test_topic_iter_non_block(self): 27 | topic = Topic(name="test") 28 | [topic.add(i) for i in range(10)] 29 | 30 | self.assertEqual( 31 | list(range(10)), 32 | list(topic.iter(head=topic.head, block=False)), 33 | ) 34 | 35 | def test_topic_iter_block(self): 36 | topic = Topic(name="test") 37 | [topic.add(i) for i in range(10)] 38 | topic.add(End) 39 | 40 | self.assertEqual( 41 | list(range(10)), 42 | list(topic.iter(head=topic.head)), 43 | ) 44 | -------------------------------------------------------------------------------- /zapusk/client/command_run_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | import responses 3 | from responses import matchers 4 | 5 | from .command_testcase import CommandTestCase 6 | 7 | 8 | class TestCommandRun(CommandTestCase): 9 | @responses.activate 10 | def test_should_run_job(self): 11 | # TODO: check only tail command has been run 12 | data = [{"id": 1}] 13 | 14 | responses.post( 15 | "http://example.com/jobs/", 16 | status=200, 17 | json={"data": data}, 18 | match=[ 19 | matchers.json_params_matcher( 20 | { 21 | "job_config_id": "echo", 22 | } 23 | ) 24 | ], 25 | ) 26 | 27 | self.command_manager.run.run(job_config_id="echo") 28 | json_data = json.loads(self.printer.print.call_args[0][0]) 29 | 30 | self.assertEqual(json_data, data) 31 | 32 | @responses.activate 33 | def test_should_handle_error(self): 34 | responses.post("http://example.com/jobs/", status=400, json={"error": "ERROR"}) 35 | 36 | self.command_manager.run.run(job_config_id="echo") 37 | args = self.printer.print.call_args[0] 38 | message = json.loads(args[0]) 39 | 40 | self.assertEqual(message, {"error": {"message": "ERROR"}}) 41 | -------------------------------------------------------------------------------- /zapusk/services/executor_manager/service.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from zapusk.models import Job 3 | 4 | logger = logging.getLogger(__name__) 5 | 6 | 7 | class ExecutorManagerService: 8 | """ 9 | JobLog service is a generic interface for a given backend to interact 10 | with the pipeline 11 | """ 12 | 13 | def __init__(self, backend=None): 14 | logger.info("Start joblog") 15 | 16 | if not backend: 17 | raise Exception("ExecutorManagerService backend isn't configured") 18 | 19 | self.__backend = backend 20 | self.__backend.start() 21 | 22 | def get(self, job_id: int) -> Job | None: 23 | """ 24 | returns a job by its id 25 | """ 26 | return self.__backend.get(job_id) 27 | 28 | def list(self) -> list[Job]: 29 | """ 30 | returns all jobs in the pipeline 31 | """ 32 | return self.__backend.list() 33 | 34 | def add(self, job_item: Job) -> Job: 35 | """ 36 | adds a job to the pipeline 37 | """ 38 | return self.__backend.add(job_item) 39 | 40 | def cancel(self, job_item: Job) -> Job: 41 | """ 42 | cancels a job 43 | """ 44 | return self.__backend.cancel(job_item) 45 | 46 | def terminate(self) -> None: 47 | self.__backend.terminate() 48 | -------------------------------------------------------------------------------- /zapusk/client/command_cancel_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | import responses 3 | 4 | from .command_testcase import CommandTestCase 5 | 6 | 7 | class TestCommandCancel(CommandTestCase): 8 | @responses.activate 9 | def test_should_cancel_job(self): 10 | responses.delete("http://example.com/jobs/1", status=200, json={"data": True}) 11 | 12 | self.command_manager.cancel.run(job_id=1) 13 | json_data = json.loads(self.printer.print.call_args[0][0]) 14 | 15 | self.assertEqual(json_data, True) 16 | 17 | @responses.activate 18 | def test_should_cancel_scheduled_job(self): 19 | responses.delete( 20 | "http://example.com/scheduled-jobs/1", status=200, json={"data": True} 21 | ) 22 | 23 | self.command_manager.cancel.run(job_id=1, scheduled=True) 24 | json_data = json.loads(self.printer.print.call_args[0][0]) 25 | 26 | self.assertEqual(json_data, True) 27 | 28 | @responses.activate 29 | def test_should_handle_error(self): 30 | responses.delete( 31 | "http://example.com/jobs/1", status=400, json={"error": "ERROR"} 32 | ) 33 | 34 | self.command_manager.cancel.run(job_id=1) 35 | args = self.printer.print.call_args[0] 36 | message = json.loads(args[0]) 37 | 38 | self.assertEqual(message, {"error": {"message": "ERROR"}}) 39 | -------------------------------------------------------------------------------- /zapusk/client/command_testcase.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from unittest.mock import MagicMock 3 | from zapusk.client.api_client import ApiClient 4 | from testfixtures import TempDirectory 5 | 6 | from zapusk.services.config.service import ConfigService 7 | 8 | from .output import Output 9 | from .command_manager import CommandManager 10 | 11 | 12 | CONFIG_DATA = "" 13 | 14 | 15 | class CommandTestCase(TestCase): 16 | def setUp(self) -> None: 17 | self.temp_dir = TempDirectory() 18 | self.config_file = self.temp_dir / "config.yml" 19 | self.config_file.write_text(CONFIG_DATA) 20 | self.config_service = ConfigService( 21 | config_path=f"{self.temp_dir.path}/config.yml" 22 | ) 23 | 24 | self.printer = MagicMock() 25 | self.output = Output( 26 | printer=self.printer, 27 | ) 28 | self.api_client = ApiClient(base_url="http://example.com") 29 | 30 | self.command_manager = CommandManager( 31 | output=self.output, 32 | api_client=self.api_client, 33 | colors=False, 34 | config_service=self.config_service, 35 | ) 36 | return super().setUp() 37 | 38 | def tearDown(self) -> None: 39 | self.temp_dir.cleanup() 40 | self.printer.reset_mock() 41 | return super().tearDown() 42 | -------------------------------------------------------------------------------- /zapusk/client/output.py: -------------------------------------------------------------------------------- 1 | import json 2 | import sys 3 | from pygments import highlight, lexers, formatters 4 | 5 | from .printer import Printer 6 | 7 | 8 | class Output: 9 | """ 10 | Manages output to a terminal 11 | """ 12 | 13 | def __init__(self, printer=Printer()) -> None: 14 | self.printer = printer 15 | 16 | def json(self, json_data, colors=False, one_line=False, **kwargs): 17 | """ 18 | Prints colored JSON into stdout or stderr 19 | """ 20 | if one_line: 21 | self.printer.print(json.dumps(json_data), **kwargs) 22 | return 23 | 24 | formatted_json = json.dumps(json_data, indent=2) 25 | 26 | if not colors: 27 | self.printer.print(formatted_json, **kwargs) 28 | return 29 | 30 | colorful_json = highlight( 31 | formatted_json, lexers.JsonLexer(), formatters.TerminalFormatter() 32 | ) 33 | self.printer.print(colorful_json, **kwargs) 34 | 35 | def error(self, exception, **kwargs): 36 | """ 37 | Prints JSON to stderr 38 | """ 39 | error = {"error": {"message": exception.message}} 40 | self.json(error, file=sys.stderr, **kwargs) 41 | 42 | def text(self, *args, **kwargs): 43 | """ 44 | Prints text 45 | """ 46 | self.printer.print(*args, **kwargs) 47 | -------------------------------------------------------------------------------- /zapusk/services/executor_manager/backends/kawka/args_consumer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | from datetime import datetime 4 | import subprocess 5 | 6 | 7 | from zapusk.kawka import Consumer 8 | from zapusk.models import Job 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | class ArgsConsumer(Consumer): 14 | def process(self, job: Job): 15 | logger.info(f"{self}: received a job {job} to get args for") 16 | 17 | sink = (self.context or {})["sink"] 18 | 19 | if not job.args_command: 20 | sink.add(job) 21 | return 22 | 23 | command = job.args_command 24 | proc = subprocess.Popen( 25 | command, 26 | shell=True, 27 | stdout=subprocess.PIPE, 28 | stderr=subprocess.PIPE, 29 | env={**os.environ}, 30 | cwd=job.cwd, 31 | ) 32 | exit_code = proc.wait() 33 | out, err = proc.communicate() 34 | 35 | if err or exit_code: 36 | logger.warning(f"{exit_code}: {str(err, 'utf-8')}") 37 | job.state = Job.JOB_STATE_ENUM.FAILED 38 | job.updated_at = datetime.now() 39 | return 40 | 41 | arguments = str(out, "utf-8").split() 42 | logger.info(f"{self} recieved arguments for a job {job}: {arguments}") 43 | job.args = arguments 44 | sink.add(job) 45 | return 46 | -------------------------------------------------------------------------------- /zapusk/kawka/producer.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from .topic import Topic, End 3 | 4 | logger = logging.getLogger(__name__) 5 | 6 | 7 | class Producer: 8 | End = End 9 | 10 | terminated = False 11 | 12 | def __init__(self, name, block=True): 13 | self.name = name 14 | 15 | self.__topic = Topic(name=self.name) 16 | self.__block = block 17 | logger.info(f"{self}: initialized") 18 | 19 | def add(self, msg): 20 | # TODO: probably, not needed 21 | logger.info(f"{self}: collected a message {msg}") 22 | 23 | if self.terminated: 24 | return 25 | 26 | if msg == End: 27 | self.terminated = True 28 | logger.info(f"{self}: terminated") 29 | 30 | self.__topic.add(msg) 31 | 32 | def all(self, block=None): 33 | """ 34 | Iterate over all items from head 35 | """ 36 | if block is not None: 37 | return self.__topic.iter(block=block, head=self.__topic.head) 38 | 39 | return self.__topic.iter(block=self.__block, head=self.__topic.head) 40 | 41 | def __len__(self): 42 | return len(self.__topic) 43 | 44 | def __iter__(self): 45 | """ 46 | Iterate from current message 47 | """ 48 | return self.__topic.iter(block=self.__block, head=self.__topic.last) 49 | 50 | def __str__(self) -> str: 51 | return f"producer.{self.name}" 52 | -------------------------------------------------------------------------------- /zapusk/kawka/consumer.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from threading import Thread 3 | from typing import Any, Optional 4 | 5 | from .producer import Producer 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | class Consumer(Thread): 11 | def __init__( 12 | self, 13 | producer: Producer, 14 | name: Optional[str] = None, 15 | from_head=False, 16 | context: Optional[dict[str, Any]] = None, 17 | *args, 18 | **kwargs, 19 | ): 20 | super(Consumer, self).__init__(*args, **kwargs) 21 | self.context: dict[str, Any] = context or {} 22 | self.producer = producer 23 | self.from_head = from_head 24 | self.name = name if name else type(self).__name__ 25 | 26 | def on_end(self): 27 | logger.info(f"{self} reached the very end of the {self.producer}") 28 | pass 29 | 30 | def process(self, msg, *args, **kwargs): 31 | logger.info(f"{self}: process {msg}") # pragma: no cover 32 | 33 | def run(self): 34 | logger.info(f"{self}: start polling events") 35 | iterator = self.producer.all() if self.from_head else self.producer 36 | 37 | for msg in iterator: 38 | logger.info(f"{self}: message received {msg}") 39 | self.process(msg) 40 | logger.info(f"{self}: waiting for upcoming message") 41 | 42 | self.on_end() 43 | 44 | def __str__(self) -> str: 45 | return f"consumer.{self.name}" 46 | -------------------------------------------------------------------------------- /zapusk/client/command.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import TYPE_CHECKING 3 | from requests.exceptions import ConnectionError 4 | 5 | from .api_client import ApiClient, ApiClientError 6 | from .output import Output 7 | 8 | if TYPE_CHECKING: # pragma: no cover 9 | from .command_manager import CommandManager 10 | 11 | 12 | class Command: 13 | def __init__( 14 | self, 15 | manager: CommandManager, 16 | api_client: ApiClient, 17 | output: Output, 18 | colors=False, 19 | ) -> None: 20 | self.api_client = api_client 21 | self.colors = colors 22 | self.output = output 23 | self.manager = manager 24 | 25 | def run(self, *args, **kwargs): ... # pragma: no cover 26 | 27 | def print_json(self, json_data, one_line=False): 28 | self.output.json(json_data, colors=self.colors, one_line=one_line) 29 | 30 | def print_error(self, exception): 31 | self.output.error(exception, colors=self.colors) 32 | 33 | def handle_error(self, ex): 34 | if type(ex) ==ApiClientError: 35 | self.print_error(ex) 36 | return 37 | 38 | if type(ex) == ConnectionError: 39 | if "Connection refused by Responses" not in str(ex): 40 | self.print_error(ApiClientError("Can not connect to the server. Please start server with `zapusk-server`")) 41 | return 42 | 43 | raise ex 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /zapusk/client/command_tail_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | from unittest.mock import call, patch 3 | import responses 4 | 5 | from .command_testcase import CommandTestCase 6 | 7 | 8 | class TestCommandExec(CommandTestCase): 9 | @responses.activate 10 | def test_should_tail_job(self): 11 | responses.get( 12 | "http://example.com/jobs/1", 13 | status=200, 14 | json={"data": {"id": 1, "log": None}}, 15 | ) 16 | 17 | responses.get( 18 | "http://example.com/jobs/1", 19 | status=200, 20 | json={"data": {"id": 1, "log": "/var/tail.log"}}, 21 | ) 22 | 23 | with patch( 24 | "zapusk.client.command_tail.tail", return_value=["log line 1", "log line 2"] 25 | ): 26 | self.command_manager.tail.run(job_id=1) 27 | 28 | log_line1 = self.printer.print.call_args_list[0] 29 | log_line2 = self.printer.print.call_args_list[1] 30 | 31 | self.assertEqual(log_line1, call("log line 1", end="")) 32 | self.assertEqual(log_line2, call("log line 2", end="")) 33 | 34 | @responses.activate 35 | def test_should_handle_error(self): 36 | responses.get("http://example.com/jobs/1", status=400, json={"error": "ERROR"}) 37 | 38 | self.command_manager.tail.run(job_id=1) 39 | args = self.printer.print.call_args[0] 40 | message = json.loads(args[0]) 41 | 42 | self.assertEqual(message, {"error": {"message": "ERROR"}}) 43 | -------------------------------------------------------------------------------- /zapusk/kawka/topic.py: -------------------------------------------------------------------------------- 1 | import threading 2 | 3 | from .linked_list import LinkedList 4 | from .topic_iterator import TopicIterator, Start, End 5 | 6 | 7 | type L[T] = T | type[Start] | type[End] 8 | 9 | 10 | class Topic[T]: 11 | """ 12 | Topic is a linked list data structure designed to add, store and iterate over its messages. 13 | """ 14 | 15 | head: LinkedList[L[T]] 16 | """ 17 | Link to the first element of the topic 18 | """ 19 | 20 | last: LinkedList[L[T]] 21 | """ 22 | Link to the last element of the topic 23 | """ 24 | 25 | def __init__(self, name: str): 26 | self.name = name 27 | self.mutex = threading.Lock() 28 | self.received = threading.Condition(self.mutex) 29 | 30 | self.head = LinkedList(Start) 31 | self.last = self.head 32 | 33 | def __len__(self): 34 | iter = TopicIterator(block=False, topic=self, head=self.head) 35 | return len(list(iter)) 36 | 37 | def add(self, data: T): 38 | """ 39 | Append a new item to a topic 40 | """ 41 | with self.mutex: 42 | self.last = self.last.append(data) 43 | self.received.notify() 44 | return self 45 | 46 | def iter(self, block=True, head=None): 47 | """ 48 | Creates a blocking or non-blocking iterator over a topic. 49 | """ 50 | return TopicIterator(block=block, topic=self, head=head) 51 | 52 | def __str__(self) -> str: 53 | return f"topic.{self.name}" 54 | -------------------------------------------------------------------------------- /zapusk/client/command_exec.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Optional 3 | 4 | from zapusk.client.api_client import ApiClientError 5 | 6 | from .command import Command 7 | 8 | 9 | class CommandExec(Command): 10 | def run( 11 | self, 12 | command: str, 13 | cwd: str, 14 | name: Optional[str] = None, 15 | group_id: Optional[str] = None, 16 | schedule: Optional[str] = None, 17 | tail: bool = False, 18 | ): 19 | try: 20 | # exec scheduled job 21 | if schedule: 22 | created_job = self.api_client.create_scheduled_job( 23 | { 24 | "command": command, 25 | "group_id": group_id, 26 | "name": name, 27 | "schedule": schedule, 28 | "cwd": cwd, 29 | } 30 | ) 31 | 32 | self.print_json(created_job) 33 | return 34 | 35 | # exec normal job 36 | created_job = self.api_client.create_job( 37 | { 38 | "command": command, 39 | "group_id": group_id, 40 | "name": name, 41 | "cwd": cwd, 42 | } 43 | ) 44 | 45 | if tail: 46 | self.manager.tail.run(created_job["id"]) 47 | return 48 | 49 | self.print_json(created_job) 50 | 51 | except Exception as ex: 52 | self.handle_error(ex) 53 | -------------------------------------------------------------------------------- /zapusk/models/base_model_test.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from unittest import TestCase 3 | 4 | from .base_model import BaseModel 5 | 6 | 7 | class TestBaseModel(TestCase): 8 | def test_base_model_should_compare_dict_and_dataclass(self): 9 | @dataclass(eq=False) 10 | class Model(BaseModel): 11 | attr: int = 1 12 | 13 | model = Model() 14 | 15 | self.assertEqual(model, {"attr": 1}) 16 | 17 | def test_base_model_should_compare_dict_and_dataclass_fail(self): 18 | @dataclass(eq=False) 19 | class Model(BaseModel): 20 | attr: int = 1 21 | 22 | model = Model() 23 | 24 | self.assertNotEqual(model, {"attr": 2}) 25 | 26 | def test_base_model_should_compare_dataclasses(self): 27 | @dataclass(eq=False) 28 | class Model(BaseModel): 29 | attr: int 30 | 31 | model1 = Model(attr=1) 32 | model2 = Model(attr=1) 33 | 34 | self.assertEqual(model1, model2) 35 | 36 | def test_base_model_should_compare_dataclasses_fail(self): 37 | @dataclass(eq=False) 38 | class Model(BaseModel): 39 | attr: int 40 | 41 | model1 = Model(attr=1) 42 | model2 = Model(attr=2) 43 | 44 | self.assertNotEqual(model1, model2) 45 | 46 | def test_base_model_should_compare_other_types(self): 47 | @dataclass(eq=False) 48 | class Model(BaseModel): 49 | attr: int = 1 50 | 51 | model = Model() 52 | 53 | self.assertNotEqual(model, 1) 54 | self.assertNotEqual(model, "1") 55 | self.assertNotEqual(model, None) 56 | -------------------------------------------------------------------------------- /zapusk/services/executor_manager/service_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from unittest import mock 3 | 4 | from .service import ExecutorManagerService 5 | 6 | 7 | class MockBackend: 8 | def start(self): 9 | pass 10 | 11 | def get(self): 12 | pass 13 | 14 | def list(self): 15 | pass 16 | 17 | def add(self): 18 | pass 19 | 20 | def cancel(self): 21 | pass 22 | 23 | 24 | @pytest.mark.parametrize( 25 | "service_method_name,backend_method_name,args,return_value", 26 | [ 27 | ("get", "get", [1], {"id": 1}), 28 | ("list", "list", [], [1, 2, 3]), 29 | ("add", "add", [1], [1]), 30 | ("cancel", "cancel", [1], [1]), 31 | ], 32 | ) 33 | def test_method_call_proxied_to_the_backend( 34 | service_method_name, 35 | backend_method_name, 36 | args, 37 | return_value, 38 | ): 39 | backend = MockBackend() 40 | setattr( 41 | backend, 42 | backend_method_name, 43 | mock.MagicMock( 44 | name=backend_method_name, 45 | return_value=return_value, 46 | ), 47 | ) 48 | 49 | service = ExecutorManagerService(backend=backend) 50 | method = getattr(service, service_method_name) 51 | result = method(*args) 52 | 53 | mocked_method = getattr(backend, backend_method_name) 54 | mocked_method.assert_called_once_with(*args) 55 | 56 | assert result == return_value 57 | 58 | 59 | def test_executor_manager_service_should_fail_without_backend(): 60 | try: 61 | ExecutorManagerService() 62 | except Exception as ex: 63 | assert ex.args[0] == "ExecutorManagerService backend isn't configured" 64 | -------------------------------------------------------------------------------- /zapusk/services/executor_manager/backends/kawka/backend.py: -------------------------------------------------------------------------------- 1 | import os 2 | import signal 3 | from datetime import datetime 4 | from time import sleep 5 | 6 | from zapusk.kawka import ConsumerGroup, Producer 7 | from zapusk.models import Job 8 | 9 | from .consumer import ExecutorManagerConsumer 10 | 11 | 12 | class ExecutorManagerKawkaBackend: 13 | def start(self): 14 | self._producer = Producer(name="executor_manager_producer") 15 | self._consumer = ConsumerGroup( 16 | producer=self._producer, 17 | Consumer=ExecutorManagerConsumer, 18 | parallel=1, 19 | name="executor_manager", 20 | ) 21 | self._consumer.start() 22 | sleep(0.1) 23 | 24 | def add(self, job_item: Job) -> Job: 25 | self._producer.add(job_item) 26 | return job_item 27 | 28 | def list(self) -> list[Job]: 29 | return list(self._producer.all(block=False)) 30 | 31 | def get(self, job_id: int) -> Job | None: 32 | for job_item in self.list(): 33 | if job_item.id != job_id: 34 | continue 35 | return job_item 36 | return None 37 | 38 | def cancel(self, job_item: Job) -> Job: 39 | if job_item.state in [ 40 | Job.JOB_STATE_ENUM.PENDING, 41 | Job.JOB_STATE_ENUM.RUNNING, 42 | ]: 43 | job_item.state = Job.JOB_STATE_ENUM.CANCELLED 44 | job_item.updated_at = datetime.now() 45 | if job_item.pid: 46 | os.kill(job_item.pid, signal.SIGTERM) 47 | 48 | return job_item 49 | 50 | def terminate(self): 51 | self._producer.add(Producer.End) 52 | sleep(1) 53 | self._consumer.join(1) 54 | -------------------------------------------------------------------------------- /zapusk/services/config/config_parser.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from zapusk.models import Config, JobGroup, JobConfig 4 | 5 | from .constants import DEFAULT_JOB_GROUPS, DEFAULT_PORT, DEFAULT_COLORS 6 | 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | class ConfigParser: 12 | 13 | @classmethod 14 | def parse(cls, data: dict): 15 | if not data: 16 | data = {} 17 | 18 | port = data.get("port", DEFAULT_PORT) 19 | colors = data.get("colors", DEFAULT_COLORS) 20 | job_groups = cls.__parse_job_groups(data.get("job_groups", {})) 21 | jobs = cls.__parse_jobs(data.get("jobs", []), list(job_groups.keys())) 22 | 23 | return Config( 24 | port=port, 25 | colors=colors, 26 | job_groups=job_groups, 27 | jobs=jobs, 28 | ) 29 | 30 | @classmethod 31 | def __parse_job_groups(cls, data: list): 32 | logger.debug(f"Parsing job groups") 33 | if not data: 34 | return DEFAULT_JOB_GROUPS 35 | 36 | job_groups = {**DEFAULT_JOB_GROUPS} 37 | for v in data: 38 | _id = v["id"] 39 | job_groups[_id] = JobGroup(**v) 40 | logger.debug(f"Parsed {job_groups[_id]}") 41 | 42 | return job_groups 43 | 44 | @classmethod 45 | def __parse_jobs(cls, jobs: list, known_groups: list[str]): 46 | logger.debug(f"Parsing job configs") 47 | logger.debug(f"Known job_groups: {known_groups}") 48 | 49 | retval = {} 50 | for v in jobs: 51 | j = JobConfig.from_dict(v) 52 | 53 | if j.group not in known_groups: 54 | raise ValueError(f"Unknown job_group `{j.group}` in {j}") 55 | 56 | logger.debug(f"Parsed {j}") 57 | retval[v["id"]] = j 58 | 59 | return retval 60 | -------------------------------------------------------------------------------- /zapusk/kawka/consumer_group.py: -------------------------------------------------------------------------------- 1 | from typing import TypeVar, Generic 2 | 3 | from .consumer import Consumer 4 | from .producer import Producer 5 | 6 | 7 | class ConsumerGroupIterator: 8 | def __init__(self, producer, from_head): 9 | self.from_head = from_head 10 | self.producer = producer 11 | self.iterator = iter(self.producer.all() if self.from_head else self.producer) 12 | 13 | def __iter__(self): 14 | return self 15 | 16 | def __next__(self): 17 | return next(self.iterator) 18 | 19 | def __str__(self) -> str: 20 | return f"cg_iter.{self.producer.name}" 21 | 22 | 23 | C = TypeVar("C", bound=Consumer) 24 | 25 | 26 | class ConsumerGroup(Generic[C]): 27 | _consumers: list[C] 28 | 29 | def __init__( 30 | self, 31 | producer: Producer, 32 | Consumer: type[C], 33 | parallel=1, 34 | from_head=False, 35 | name=None, 36 | context=None, 37 | ): 38 | self.context = context 39 | self.producer = producer 40 | self.Consumer = Consumer 41 | self.consumerGroupIterator = ConsumerGroupIterator( 42 | from_head=from_head, producer=self.producer 43 | ) 44 | self.parallel = parallel 45 | self._consumers = [] 46 | self.from_head = from_head 47 | 48 | self.name = name 49 | if not self.name: 50 | self.name = type(self).__name__ 51 | 52 | self._consumers = [ 53 | Consumer( 54 | producer=self.consumerGroupIterator, # type: ignore 55 | name=f"{self.name}_{i}", 56 | context=self.context, 57 | ) 58 | for i in range(self.parallel) 59 | ] 60 | 61 | def start(self): 62 | [c.start() for c in self._consumers] 63 | 64 | def join(self, timeout: int): 65 | [c.join(timeout) for c in self._consumers] 66 | -------------------------------------------------------------------------------- /zapusk/client/command_manager.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | from zapusk.client.api_client import ApiClient 3 | from zapusk.client.command_tail import CommandTail 4 | from zapusk.services.config.service import ConfigService 5 | from .command_exec import CommandExec 6 | from .command_run import CommandRun 7 | from .command_cancel import CommandCancel 8 | from .command_list import CommandList 9 | from .command_waybar import CommandWaybar 10 | from .command_config_jobs import CommandConfigJobs 11 | from .command_config_groups import CommandConfigGroups 12 | from .output import Output 13 | 14 | 15 | class CommandManager: 16 | def __init__( 17 | self, 18 | config_service=None, 19 | output=Output(), 20 | colors=None, 21 | api_client: Optional[ApiClient] = None, 22 | ) -> None: 23 | self.output = output 24 | self.config_service = config_service if config_service else ConfigService() 25 | config = self.config_service.get_config() 26 | 27 | self.api_client = ( 28 | api_client 29 | if api_client 30 | else ApiClient( 31 | base_url=f"http://localhost:{config.port}/", 32 | ) 33 | ) 34 | 35 | self.colors = ( 36 | colors if colors != None else self.config_service.get_config().colors 37 | ) 38 | kwargs = { 39 | "colors": self.colors, 40 | "output": self.output, 41 | "api_client": self.api_client, 42 | "manager": self, 43 | } 44 | 45 | self.exec = CommandExec(**kwargs) 46 | self.run = CommandRun(**kwargs) 47 | self.cancel = CommandCancel(**kwargs) 48 | self.list = CommandList(**kwargs) 49 | self.waybar = CommandWaybar(**kwargs) 50 | self.tail = CommandTail(**kwargs) 51 | self.config_jobs = CommandConfigJobs(**kwargs) 52 | self.config_groups = CommandConfigGroups(**kwargs) 53 | -------------------------------------------------------------------------------- /zapusk/server/controller_testcase.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from testfixtures import Replacer, TempDirectory 3 | 4 | from zapusk.services import ( 5 | ConfigService, 6 | SchedulerService, 7 | ExecutorManagerService, 8 | ExecutorManagerKawkaBackend, 9 | ) 10 | 11 | from .api import create_app 12 | 13 | 14 | class ControllerTestCase(TestCase): 15 | maxDiff = None 16 | config_data = "" 17 | 18 | def before_create_services(self): ... 19 | 20 | def setUp(self) -> None: 21 | self.replace = Replacer() 22 | 23 | self.temp_dir = TempDirectory() 24 | self.config_file = self.temp_dir / "config.yml" 25 | self.config_file.write_text(self.config_data) 26 | 27 | self.before_create_services() 28 | 29 | self.executor_manager_service = ExecutorManagerService( 30 | backend=ExecutorManagerKawkaBackend(), 31 | ) 32 | self.config_service = ConfigService( 33 | config_path=f"{self.temp_dir.path}/config.yml" 34 | ) 35 | self.scheduler_service = SchedulerService( 36 | config_service=self.config_service, 37 | executor_manager_service=self.executor_manager_service, 38 | ) 39 | self.scheduler_service.start() 40 | 41 | self.app = create_app( 42 | executor_manager_service=self.executor_manager_service, 43 | config_service=self.config_service, 44 | scheduler_service=self.scheduler_service, 45 | ) 46 | self.test_client = self.app.test_client() 47 | 48 | def tearDown(self) -> None: 49 | self.executor_manager_service.terminate() 50 | self.scheduler_service.terminate() 51 | self.temp_dir.cleanup() 52 | self.replace.restore() 53 | 54 | def write_config(self, data): 55 | self.config_file.write_text(data) 56 | 57 | def replace_in_environ(self, key, value): 58 | self.replace.in_environ(key, value) 59 | -------------------------------------------------------------------------------- /zapusk/__main__.py: -------------------------------------------------------------------------------- 1 | #! /bin/python 2 | import logging 3 | from docopt import docopt 4 | import importlib.metadata 5 | 6 | from zapusk.server import create_app 7 | from zapusk.logger import set_loglevel 8 | from zapusk.services.config.service import ConfigService 9 | from zapusk.services import ( 10 | ExecutorManagerService, 11 | ExecutorManagerKawkaBackend, 12 | ) 13 | from zapusk.services.scheduler_service.service import SchedulerService 14 | 15 | logger = logging.getLogger(__name__) 16 | 17 | doc = """zapusk 18 | 19 | Simple background job runner 20 | 21 | Usage: 22 | zapusk-server -h | --help 23 | zapusk-server --version 24 | zapusk-server [--config=PATH] [--verbose] 25 | 26 | Options: 27 | -h --help Show this screen 28 | --version Show version. 29 | -v --verbose Enable logging 30 | 31 | --config PATH Define custom config 32 | 33 | 34 | Examples: 35 | pusk start --config ~/.config/pusk/pusk.yml 36 | """ 37 | 38 | version = importlib.metadata.version("zapusk") 39 | 40 | 41 | def main(): 42 | args = docopt(doc, version=version) 43 | print(args) 44 | 45 | if "--verbose" in args: 46 | set_loglevel("DEBUG") 47 | logger.info("Verbose logging has been enabled") 48 | 49 | logger.info(f"{args}") 50 | logger.info("Start") 51 | 52 | executor_manager_service = ExecutorManagerService( 53 | backend=ExecutorManagerKawkaBackend(), 54 | ) 55 | 56 | config_service = ConfigService(args["--config"]) 57 | 58 | scheduler_service = SchedulerService( 59 | executor_manager_service=executor_manager_service, 60 | config_service=config_service, 61 | ) 62 | scheduler_service.start() 63 | 64 | app = create_app( 65 | executor_manager_service=executor_manager_service, 66 | config_service=config_service, 67 | scheduler_service=scheduler_service, 68 | ) 69 | app.run(host="0.0.0.0", port=config_service.get_config().port) 70 | 71 | 72 | if __name__ == "__main__": 73 | main() 74 | -------------------------------------------------------------------------------- /zapusk/kawka/topic_iterator.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | logger = logging.getLogger(__name__) 4 | 5 | 6 | class End: 7 | pass 8 | 9 | 10 | class Start: 11 | pass 12 | 13 | 14 | class TopicIterator: 15 | def __init__(self, topic, head, block=True): 16 | self.topic = topic 17 | self.block = block 18 | 19 | self.prev = None 20 | self.cur = head 21 | 22 | def __iter__(self): 23 | logger.debug(f"{self}: initialized") 24 | return self 25 | 26 | def __next__(self): 27 | # Skip Start node 28 | if self.cur and self.cur.data == Start: 29 | [self.prev, self.cur] = [self.cur, self.cur.next] 30 | 31 | if self.block: 32 | # If iterator reached the end of a topic, let's terminate 33 | if self.cur and self.cur.data is End: 34 | logger.debug(f"{self}: iterator is over. StopIteration") 35 | with self.topic.received: 36 | self.topic.received.notify() 37 | raise StopIteration 38 | 39 | if self.prev and not self.cur: 40 | if not self.prev.next: 41 | logger.debug( 42 | f"{self}: waiting for upcoming message. self.prev:{self.prev.data}" 43 | ) 44 | with self.topic.received: 45 | self.topic.received.wait() 46 | 47 | self.cur = self.prev.next 48 | 49 | # if self.cur is @End, terminate iterator and notify all other readers 50 | if self.cur and self.cur.data is End: 51 | logger.debug(f"{self}: StopIteration") 52 | with self.topic.received: 53 | self.topic.received.notify() 54 | raise StopIteration 55 | 56 | [self.prev, self.cur] = [self.cur, self.cur.next] 57 | logger.debug(f"{self}: returns {self.prev.data}") 58 | return self.prev.data 59 | 60 | # Non-block iteration 61 | if not self.cur or self.cur.data is End: 62 | logger.debug(f"{self}: StopIteration") 63 | raise StopIteration 64 | 65 | [self.prev, self.cur] = [self.cur, self.cur.next] 66 | return self.prev.data 67 | 68 | def __str__(self) -> str: 69 | return f"iter.{self.topic.name}" 70 | -------------------------------------------------------------------------------- /zapusk/services/executor_manager/backends/kawka/consumer_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | from time import sleep 3 | from unittest import TestCase, mock 4 | from testfixtures.mock import call 5 | from testfixtures import Replacer 6 | from testfixtures.popen import MockPopen 7 | 8 | from zapusk.kawka import Producer 9 | from zapusk.lib.create_jobitem import create_jobitem 10 | from zapusk.models import Job 11 | 12 | from .consumer import ExecutorManagerConsumer 13 | 14 | 15 | class ExecutorManagerTest(TestCase): 16 | def setUp(self): 17 | self.Popen = MockPopen() 18 | self.r = Replacer() 19 | self.r.replace("subprocess.Popen", self.Popen) 20 | self.r.in_environ("HOME", "/home/") 21 | self.addCleanup(self.r.restore) 22 | 23 | def tearDown(self) -> None: 24 | self.r.restore() 25 | 26 | def test_should_get_args_and_run_job(self): 27 | input_producer = Producer(name="input_producer") 28 | 29 | executor_manager = ExecutorManagerConsumer( 30 | name="run_consumer", 31 | producer=input_producer, 32 | ) 33 | executor_manager.start() 34 | 35 | self.Popen.set_command("get_args", stdout=b"hello world", stderr=b"") 36 | self.Popen.set_command( 37 | "my_command hello world", stdout=b"hello world", stderr=b"" 38 | ) 39 | 40 | item = create_jobitem(command="my_command", args_command="get_args") 41 | 42 | input_producer.add(item) 43 | input_producer.add(Producer.End) 44 | 45 | sleep(1) 46 | executor_manager.join(2) 47 | 48 | self.assertEqual( 49 | self.Popen.all_calls[0], 50 | call.Popen( 51 | "get_args", 52 | shell=True, 53 | env={**os.environ}, 54 | cwd="/home/", 55 | stdout=-1, 56 | stderr=-1, 57 | ), 58 | ) 59 | self.assertEqual( 60 | self.Popen.all_calls[3], 61 | call.Popen( 62 | "my_command hello world", 63 | shell=True, 64 | env={**os.environ}, 65 | cwd="/home/", 66 | stdout=mock.ANY, 67 | stderr=mock.ANY, 68 | ), 69 | ) 70 | self.assertEqual(item.state, Job.JOB_STATE_ENUM.FINISHED) 71 | -------------------------------------------------------------------------------- /zapusk/client/command_list_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | import responses 3 | 4 | from zapusk.models.job import JOB_STATE_ENUM 5 | 6 | from .command_testcase import CommandTestCase 7 | 8 | 9 | class TestCommandList(CommandTestCase): 10 | @responses.activate 11 | def test_should_list_jobs(self): 12 | data = [ 13 | { 14 | "id": 1, 15 | "state": JOB_STATE_ENUM.PENDING, 16 | }, 17 | { 18 | "id": 2, 19 | "state": JOB_STATE_ENUM.RUNNING, 20 | }, 21 | ] 22 | 23 | responses.get("http://example.com/jobs/", status=200, json={"data": data}) 24 | 25 | self.command_manager.list.run() 26 | json_data = json.loads(self.printer.print.call_args[0][0]) 27 | 28 | self.assertEqual(json_data, data) 29 | 30 | @responses.activate 31 | def test_should_list_jobs_with_filter(self): 32 | data = [ 33 | { 34 | "id": 1, 35 | "state": JOB_STATE_ENUM.PENDING, 36 | }, 37 | { 38 | "id": 2, 39 | "state": JOB_STATE_ENUM.RUNNING, 40 | }, 41 | ] 42 | 43 | responses.get("http://example.com/jobs/", status=200, json={"data": data}) 44 | 45 | self.command_manager.list.run(filter=JOB_STATE_ENUM.PENDING) 46 | json_data = json.loads(self.printer.print.call_args[0][0]) 47 | 48 | self.assertEqual(json_data, [data[0]]) 49 | 50 | @responses.activate 51 | def test_should_list_scheduled_jobs(self): 52 | data = [ 53 | { 54 | "id": 1, 55 | }, 56 | ] 57 | 58 | responses.get( 59 | "http://example.com/scheduled-jobs/", status=200, json={"data": data} 60 | ) 61 | 62 | self.command_manager.list.run(scheduled=True) 63 | json_data = json.loads(self.printer.print.call_args[0][0]) 64 | 65 | self.assertEqual(json_data, data) 66 | 67 | @responses.activate 68 | def test_should_list_jobs_error(self): 69 | responses.get("http://example.com/jobs/", status=400, json={"error": "ERROR"}) 70 | 71 | self.command_manager.list.run() 72 | json_data = json.loads(self.printer.print.call_args[0][0]) 73 | 74 | self.assertEqual(json_data, {"error": {"message": "ERROR"}}) 75 | -------------------------------------------------------------------------------- /zapusk/services/executor_manager/backends/kawka/backend_test.py: -------------------------------------------------------------------------------- 1 | from time import sleep 2 | from unittest import TestCase 3 | 4 | from zapusk.lib.create_jobitem import create_jobitem 5 | from zapusk.models import Job 6 | 7 | from .backend import ExecutorManagerKawkaBackend 8 | 9 | 10 | class TestKawkaBackend(TestCase): 11 | def test_kawka_backend_add(self): 12 | backend = ExecutorManagerKawkaBackend() 13 | backend.start() 14 | 15 | item = create_jobitem(command="echo 1") 16 | backend.add(item) 17 | backend.terminate() 18 | 19 | self.assertEqual(item.state, Job.JOB_STATE_ENUM.FINISHED) 20 | 21 | def test_kawka_backend_get(self): 22 | backend = ExecutorManagerKawkaBackend() 23 | backend.start() 24 | 25 | item = create_jobitem(command="echo 1") 26 | backend.add(item) 27 | 28 | backend.terminate() 29 | 30 | res = backend.get(item.id) 31 | 32 | self.assertEqual(res, item) 33 | 34 | def test_kawka_backend_get_none(self): 35 | backend = ExecutorManagerKawkaBackend() 36 | backend.start() 37 | 38 | item = create_jobitem(command="echo 1") 39 | backend.add(item) 40 | 41 | backend.terminate() 42 | 43 | res = backend.get(999) 44 | 45 | self.assertEqual(res, None) 46 | 47 | def test_kawka_backend_list(self): 48 | backend = ExecutorManagerKawkaBackend() 49 | backend.start() 50 | 51 | backend.add(create_jobitem(command="echo 1")) 52 | backend.add(create_jobitem(command="echo 2")) 53 | backend.add(create_jobitem(command="echo 3")) 54 | 55 | backend.terminate() 56 | 57 | res = backend.list() 58 | 59 | self.assertEqual(len(res), 3) 60 | self.assertEqual(res[0].command, "echo 1") 61 | self.assertEqual(res[1].command, "echo 2") 62 | self.assertEqual(res[2].command, "echo 3") 63 | 64 | def test_kawka_backend_cancel(self): 65 | backend = ExecutorManagerKawkaBackend() 66 | backend.start() 67 | 68 | item = create_jobitem(command="sleep 10") 69 | backend.add(item) 70 | 71 | sleep(1) 72 | 73 | res = backend.get(item.id) 74 | 75 | if not res: 76 | raise Exception("Fail") 77 | 78 | self.assertEqual(res.state, Job.JOB_STATE_ENUM.RUNNING) 79 | 80 | backend.cancel(item) 81 | sleep(1) 82 | 83 | self.assertEqual(item.state, Job.JOB_STATE_ENUM.CANCELLED) 84 | 85 | backend.terminate() 86 | -------------------------------------------------------------------------------- /zapusk/client/command_waybar.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | from itertools import islice 3 | import json 4 | import human_readable 5 | import dateutil 6 | 7 | from .api_client import ApiClientError 8 | from .command import Command 9 | 10 | 11 | STATE_LOOKUP = { 12 | "PENDING": " ", 13 | "RUNNING": " ", 14 | "FINISHED": " ", 15 | "FAILED": " ", 16 | "CANCELLED": " ", 17 | } 18 | 19 | TIME_PREFIX_LOOKUP = { 20 | "PENDING": "queued", 21 | "RUNNING": "started", 22 | "FINISHED": "finished", 23 | "FAILED": "failed", 24 | "CANCELLED": "cancelled", 25 | } 26 | 27 | 28 | class CommandWaybar(Command): 29 | def run(self): 30 | try: 31 | all_jobs = self.api_client.list_jobs() 32 | self.output.json( 33 | { 34 | "text": self.__build_text(all_jobs), 35 | "tooltip": self.__build_tooltip(all_jobs), 36 | }, 37 | one_line=True, 38 | ) 39 | 40 | except ApiClientError as ex: 41 | self.output.text("{" + f'"text": "{ex}"' + "}") 42 | 43 | def __build_text(self, all_jobs): 44 | pending_jobs = [i for i in all_jobs if i["state"] == "PENDING"] 45 | running_jobs = [i for i in all_jobs if i["state"] == "RUNNING"] 46 | finished_jobs = [i for i in all_jobs if i["state"] == "FINISHED"] 47 | failed_jobs = [i for i in all_jobs if i["state"] == "FAILED"] 48 | cancelled_jobs = [i for i in all_jobs if i["state"] == "CANCELLED"] 49 | 50 | return " ".join( 51 | [ 52 | f"{STATE_LOOKUP['PENDING']} {len(pending_jobs)}", 53 | f"{STATE_LOOKUP['RUNNING']} {len(running_jobs)}", 54 | f"{STATE_LOOKUP['FINISHED']} {len(finished_jobs)}", 55 | f"{STATE_LOOKUP['FAILED']} {len(failed_jobs)}", 56 | f"{STATE_LOOKUP['CANCELLED']} {len(cancelled_jobs)}", 57 | ] 58 | ) 59 | 60 | def __build_tooltip(self, all_jobs): 61 | LAST_JOBS_AMOUNT = 20 62 | now = datetime.datetime.now().timestamp() 63 | last_jobs = islice(reversed(all_jobs), LAST_JOBS_AMOUNT) 64 | 65 | return "\r\n".join( 66 | [ 67 | f"{i['name']}(id={i["id"]}) {TIME_PREFIX_LOOKUP[i['state']]} {human_readable.date_time(now - self.__parse(i['updated_at']))}" 68 | for i in last_jobs 69 | ] 70 | ) 71 | 72 | def __parse(self, date_str): 73 | return dateutil.parser.parse(date_str, ignoretz=True).timestamp() # type: ignore 74 | -------------------------------------------------------------------------------- /zapusk/services/executor_manager/backends/kawka/executor.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | import subprocess 4 | from time import time 5 | from datetime import datetime 6 | 7 | from zapusk.kawka import Consumer 8 | from zapusk.models import Job 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | class Executor(Consumer): 14 | def process(self, job: Job): 15 | logger.info(f"{self} received a job to run {job}") 16 | 17 | logfile_path = f"/tmp/zapusk-{time()}.log" 18 | 19 | if job.state == Job.JOB_STATE_ENUM.CANCELLED: 20 | logger.info("Skipping cancelled job {job.id}") 21 | return 22 | 23 | job.state = Job.JOB_STATE_ENUM.RUNNING 24 | job.log = logfile_path 25 | job.consumed_by = self.name 26 | job.updated_at = datetime.now() 27 | job.command = " ".join([job.command, *job.args]) 28 | 29 | logger.info(f"Run a command {job.command}") 30 | 31 | with open(logfile_path, "w") as logfile: 32 | proc = subprocess.Popen( 33 | job.command, 34 | shell=True, 35 | stdout=logfile, 36 | stderr=logfile, 37 | env={**os.environ}, 38 | cwd=job.cwd, 39 | ) 40 | job.pid = proc.pid 41 | 42 | exit_code = proc.wait() 43 | 44 | job.exit_code = exit_code 45 | if job.state == Job.JOB_STATE_ENUM.CANCELLED: 46 | logger.info(f"Job {job.id} has been cancelled") 47 | return 48 | 49 | if exit_code == 0: 50 | job.state = Job.JOB_STATE_ENUM.FINISHED 51 | job.updated_at = datetime.now() 52 | logger.info(f"{self.name} finished {job} job") 53 | 54 | on_finish = job.on_finish or job.group_config.on_finish 55 | if on_finish: 56 | subprocess.Popen( 57 | on_finish.format(job=job), 58 | shell=True, 59 | env={**os.environ}, 60 | cwd=job.cwd, 61 | ) 62 | 63 | else: 64 | job.state = Job.JOB_STATE_ENUM.FAILED 65 | job.updated_at = datetime.now() 66 | 67 | on_fail = job.on_fail or job.group_config.on_fail 68 | if on_fail: 69 | subprocess.Popen( 70 | on_fail.format(job=job), 71 | shell=True, 72 | env={**os.environ}, 73 | cwd=job.cwd, 74 | ) 75 | 76 | logger.info(f"{self.name} failed {job} job") 77 | -------------------------------------------------------------------------------- /zapusk/services/executor_manager/backends/kawka/args_consumer_test.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | from zapusk.kawka import Producer 4 | from zapusk.lib.create_jobitem import create_jobitem 5 | from zapusk.models import Job 6 | 7 | from .args_consumer import ArgsConsumer 8 | 9 | 10 | class ArgsConsumerTest(TestCase): 11 | 12 | def test_should_run_args_command_and_add_arguments_to_a_jobitem(self): 13 | input_producer = Producer("input_producer") 14 | sink_producer = Producer("sink_producer") 15 | 16 | args_consumer = ArgsConsumer( 17 | name="args_consumer", 18 | producer=input_producer, 19 | context={ 20 | "sink": sink_producer, 21 | }, 22 | ) 23 | 24 | args_consumer.start() 25 | 26 | item = create_jobitem(command="echo", args_command="echo 1") 27 | input_producer.add(item) 28 | input_producer.add(Producer.End) 29 | 30 | args_consumer.join() 31 | 32 | self.assertEqual(item.args, ["1"]) 33 | self.assertEqual(item.state, Job.JOB_STATE_ENUM.PENDING) 34 | self.assertEqual(len(list(sink_producer.all(block=False))), 1) 35 | 36 | def test_should_pass_items_without_args_command_to_the_sink(self): 37 | input_producer = Producer("input_producer") 38 | sink_producer = Producer("sink_producer") 39 | 40 | args_consumer = ArgsConsumer( 41 | name="args_consumer", 42 | producer=input_producer, 43 | context={ 44 | "sink": sink_producer, 45 | }, 46 | ) 47 | 48 | args_consumer.start() 49 | 50 | item = create_jobitem(command="echo") 51 | input_producer.add(item) 52 | input_producer.add(Producer.End) 53 | 54 | args_consumer.join() 55 | 56 | self.assertEqual(item.args, []) 57 | self.assertEqual(item.state, Job.JOB_STATE_ENUM.PENDING) 58 | self.assertEqual(len(list(sink_producer.all(block=False))), 1) 59 | 60 | def test_should_set_state_to_failed_args_command_fails(self): 61 | input_producer = Producer("input_producer") 62 | sink_producer = Producer("sink_producer") 63 | 64 | args_consumer = ArgsConsumer( 65 | name="args_consumer", 66 | producer=input_producer, 67 | context={ 68 | "sink": sink_producer, 69 | }, 70 | ) 71 | 72 | args_consumer.start() 73 | 74 | item = create_jobitem(command="echo", args_command="exit 1") 75 | input_producer.add(item) 76 | input_producer.add(Producer.End) 77 | 78 | args_consumer.join() 79 | 80 | self.assertEqual(item.args, []) 81 | self.assertEqual(item.state, Job.JOB_STATE_ENUM.FAILED) 82 | self.assertEqual(len(list(sink_producer.all(block=False))), 0) 83 | -------------------------------------------------------------------------------- /zapusk/server/controller_scheduled_jobs.py: -------------------------------------------------------------------------------- 1 | from flask import Blueprint, abort, request 2 | from zapusk.lib.json_serdes import JsonSerdes 3 | from zapusk.models import Job, JobConfig, IdField 4 | from zapusk.services.config.service import ConfigService 5 | from zapusk.services.scheduler_service.service import SchedulerService 6 | from .error_response import error_response 7 | 8 | 9 | def create_scheduled_jobs_api( 10 | scheduler_service: SchedulerService, 11 | config_service: ConfigService, 12 | ): 13 | scheduled_jobs_api = Blueprint("scheduled_jobs", __name__) 14 | 15 | @scheduled_jobs_api.route("/scheduled-jobs/") 16 | def scheduled_jobs_list(): 17 | scheduled_jobs = scheduler_service.list() 18 | return JsonSerdes.serialize(scheduled_jobs) 19 | 20 | @scheduled_jobs_api.route("/scheduled-jobs/", methods=["POST"]) 21 | def scheduled_jobs_add(): 22 | body = request.json or {} 23 | 24 | command = body.get("command", None) 25 | if not command: 26 | return abort( 27 | error_response( 28 | status=400, 29 | error="Request body contains no `command`", 30 | ) 31 | ) 32 | 33 | name = body.get("name", None) 34 | group_id = body.get("group_id", None) 35 | 36 | if group_id: 37 | group = config_service.get_job_group(group_id) 38 | if not group: 39 | return abort( 40 | error_response( 41 | status=404, 42 | error=f"Unknown group `{group_id}`", 43 | ) 44 | ) 45 | 46 | schedule = body.get("schedule", None) 47 | 48 | if not schedule: 49 | return abort( 50 | error_response( 51 | status=400, 52 | error=f"Request body contains no `schedule`", 53 | ) 54 | ) 55 | 56 | cmd_id = f"scheduled.{IdField.next("scheduled")}" 57 | 58 | job_config = JobConfig( 59 | id=cmd_id, 60 | name=name or f"{group_id}.{cmd_id}", 61 | schedule=schedule, 62 | command=command, 63 | ) 64 | 65 | is_added = scheduler_service.add(job_config) 66 | 67 | if not is_added: 68 | return abort( 69 | error_response( 70 | status=500, 71 | error=f"Scheduled job hasn't been added", 72 | ) 73 | ) 74 | 75 | return JsonSerdes.serialize(job_config) 76 | 77 | @scheduled_jobs_api.route("/scheduled-jobs/", methods=["DELETE"]) 78 | def scheduled_jobs_cancel(scheduled_id: str): 79 | return JsonSerdes.serialize(scheduler_service.delete(scheduled_id)) 80 | 81 | return scheduled_jobs_api 82 | -------------------------------------------------------------------------------- /zapusk/server/controller_config_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from .controller_testcase import ControllerTestCase 4 | 5 | 6 | CONFIG_DATA = """ 7 | job_groups: 8 | - id: default 9 | parallel: 10 10 | - id: sequential 11 | parallel: 1 12 | - id: parallel 13 | parallel: 2 14 | 15 | jobs: 16 | - id: test1 17 | name: Test1 18 | command: test1 19 | cwd: /home/ 20 | - id: test2 21 | name: Test2 22 | command: test2 23 | """ 24 | 25 | 26 | class TestConfigController(ControllerTestCase): 27 | def test_config_groups_list(self): 28 | self.write_config(CONFIG_DATA) 29 | res = self.test_client.get("/config/groups/") 30 | data = json.loads(res.data) 31 | self.assertEqual( 32 | data, 33 | { 34 | "data": [ 35 | { 36 | "id": "default", 37 | "on_fail": None, 38 | "on_finish": None, 39 | "parallel": 10, 40 | }, 41 | { 42 | "id": "sequential", 43 | "on_fail": None, 44 | "on_finish": None, 45 | "parallel": 1, 46 | }, 47 | { 48 | "id": "parallel", 49 | "on_fail": None, 50 | "on_finish": None, 51 | "parallel": 2, 52 | }, 53 | ] 54 | }, 55 | ) 56 | 57 | def test_config_jobs_list(self): 58 | self.write_config(CONFIG_DATA) 59 | self.replace_in_environ("HOME", "/home/kanye") 60 | res = self.test_client.get("/config/jobs/") 61 | data = json.loads(res.data) 62 | self.assertEqual( 63 | data, 64 | { 65 | "data": [ 66 | { 67 | "args_command": None, 68 | "command": "test1", 69 | "group": "default", 70 | "id": "test1", 71 | "name": "Test1", 72 | "on_fail": None, 73 | "on_finish": None, 74 | "schedule": None, 75 | "cwd": "/home/", 76 | }, 77 | { 78 | "args_command": None, 79 | "command": "test2", 80 | "group": "default", 81 | "id": "test2", 82 | "name": "Test2", 83 | "on_fail": None, 84 | "on_finish": None, 85 | "schedule": None, 86 | "cwd": "/home/kanye", 87 | }, 88 | ] 89 | }, 90 | ) 91 | -------------------------------------------------------------------------------- /zapusk/client/output_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from unittest import TestCase 3 | from unittest.mock import patch 4 | 5 | 6 | from .output import Output 7 | from .printer import Printer 8 | 9 | 10 | class MockPrinter(Printer): 11 | def print(self, *args, **kwargs): 12 | pass 13 | 14 | 15 | class TestOutput(TestCase): 16 | def setUp(self) -> None: 17 | self.mock_printer = MockPrinter() 18 | return super().setUp() 19 | 20 | def test_should_print_to_stdout(self): 21 | with patch.object(self.mock_printer, "print") as mock: 22 | output = Output(printer=self.mock_printer) 23 | output.text("Hello World!") 24 | 25 | mock.assert_called_with("Hello World!") 26 | 27 | def test_should_print_to_stderr(self): 28 | with patch.object(self.mock_printer, "print") as mock: 29 | output = Output(printer=self.mock_printer) 30 | output.text("Hello World!", file=sys.stderr) 31 | 32 | mock.assert_called_with("Hello World!", file=sys.stderr) 33 | 34 | def test_should_print_json(self): 35 | with patch.object(self.mock_printer, "print") as mock: 36 | output = Output(printer=self.mock_printer) 37 | output.json({"key": "val"}) 38 | 39 | mock.assert_called_with('{\n "key": "val"\n}') 40 | 41 | def test_should_print_json_one_line(self): 42 | with patch.object(self.mock_printer, "print") as mock: 43 | output = Output(printer=self.mock_printer) 44 | output.json({"key": "val"}, one_line=True) 45 | 46 | mock.assert_called_with('{"key": "val"}') 47 | 48 | def test_should_print_json_with_colors(self): 49 | with patch.object(self.mock_printer, "print") as mock: 50 | output = Output(printer=self.mock_printer) 51 | output.json({"key": "val"}, colors=True) 52 | 53 | mock.assert_called_with( 54 | '{\x1b[37m\x1b[39;49;00m\n\x1b[37m \x1b[39;49;00m\x1b[94m"key"\x1b[39;49;00m:\x1b[37m \x1b[39;49;00m\x1b[33m"val"\x1b[39;49;00m\x1b[37m\x1b[39;49;00m\n}\x1b[37m\x1b[39;49;00m\n' 55 | ) 56 | 57 | def test_should_print_json_error(self): 58 | with patch.object(self.mock_printer, "print") as mock: 59 | output = Output(printer=self.mock_printer) 60 | output.json({"key": "val"}, file=sys.stderr) 61 | 62 | mock.assert_called_with('{\n "key": "val"\n}', file=sys.stderr) 63 | 64 | def test_should_print_error(self): 65 | with patch.object(self.mock_printer, "print") as mock: 66 | output = Output(printer=self.mock_printer) 67 | 68 | class MockException: 69 | message = "Hello World!" 70 | 71 | output.error(MockException()) 72 | 73 | mock.assert_called_with( 74 | '{\n "error": {\n "message": "Hello World!"\n }\n}', 75 | file=sys.stderr, 76 | ) 77 | -------------------------------------------------------------------------------- /zapusk/client/command_waybar_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | import datetime 3 | import responses 4 | 5 | from zapusk.models.job import JOB_STATE_ENUM 6 | 7 | from .command_testcase import CommandTestCase 8 | 9 | 10 | class TestCommandRun(CommandTestCase): 11 | @responses.activate 12 | def test_should_run_job(self): 13 | now = datetime.datetime.now() 14 | ago_1m = str(now - datetime.timedelta(minutes=1)) 15 | ago_1h = str(now - datetime.timedelta(hours=1)) 16 | ago_1d = str(now - datetime.timedelta(days=1)) 17 | ago_7d = str(now - datetime.timedelta(days=7)) 18 | 19 | data = [ 20 | { 21 | "id": 1, 22 | "name": "P", 23 | "state": JOB_STATE_ENUM.PENDING, 24 | "updated_at": ago_1m, 25 | }, 26 | { 27 | "id": 2, 28 | "name": "R", 29 | "state": JOB_STATE_ENUM.RUNNING, 30 | "updated_at": ago_1h, 31 | }, 32 | { 33 | "id": 3, 34 | "name": "C", 35 | "state": JOB_STATE_ENUM.CANCELLED, 36 | "updated_at": ago_1d, 37 | }, 38 | { 39 | "id": 4, 40 | "name": "D", 41 | "state": JOB_STATE_ENUM.FINISHED, 42 | "updated_at": ago_7d, 43 | }, 44 | { 45 | "id": 5, 46 | "name": "F", 47 | "state": JOB_STATE_ENUM.FAILED, 48 | "updated_at": ago_1m, 49 | }, 50 | ] 51 | 52 | responses.get( 53 | "http://example.com/jobs/", 54 | status=200, 55 | json={"data": data}, 56 | match=[], 57 | ) 58 | 59 | self.command_manager.waybar.run() 60 | json_data = json.loads(self.printer.print.call_args[0][0]) 61 | 62 | self.assertEqual( 63 | json_data["text"], "\uf4ab 1 \uf144 1 \uf058 1 \uf06a 1 \uf057 1" 64 | ) 65 | self.assertEqual( 66 | json_data["tooltip"], 67 | "\r\n".join( 68 | [ 69 | "F(id=5) failed a minute ago", 70 | "D(id=4) finished 7 days ago", 71 | "C(id=3) cancelled a day ago", 72 | "R(id=2) started an hour ago", 73 | "P(id=1) queued a minute ago", 74 | ] 75 | ), 76 | ) 77 | 78 | @responses.activate 79 | def test_should_handle_error(self): 80 | responses.get("http://example.com/jobs/", status=400, json={"error": "ERROR"}) 81 | 82 | self.command_manager.waybar.run() 83 | args = self.printer.print.call_args[0] 84 | message = json.loads(args[0]) 85 | 86 | self.assertEqual(message, {"text": "ERROR"}) 87 | -------------------------------------------------------------------------------- /zapusk/kawka/consumer_group_test.py: -------------------------------------------------------------------------------- 1 | from time import sleep 2 | import itertools 3 | from unittest import TestCase 4 | 5 | from .consumer import Consumer 6 | from .consumer_group import ConsumerGroup 7 | from .producer import Producer 8 | 9 | 10 | class DummyConsumer(Consumer): 11 | def __init__(self, *args, **kwargs): 12 | super().__init__(*args, **kwargs) 13 | self.results = [] 14 | 15 | def process(self, msg): 16 | sleep(0.01) 17 | msg["consumed_by"] = self.name 18 | self.results.append(msg) 19 | 20 | 21 | class ConsumerGroupTest(TestCase): 22 | def test_consumergroup_seq100_parallel1(self): 23 | producer = Producer(name="test_producer", block=True) 24 | 25 | cg = ConsumerGroup(producer=producer, Consumer=DummyConsumer, parallel=1) 26 | cg.start() 27 | 28 | [producer.add({"id": i, "consumed_by": None}) for i in range(100)] 29 | producer.add(Producer.End) 30 | 31 | cg.join(5) 32 | c = cg._consumers[0] 33 | 34 | self.assertEqual(len(c.results), 100) 35 | self.assertEqual( 36 | all(map(lambda x: type(x["consumed_by"]) == str, c.results)), True 37 | ) 38 | 39 | def test_consumergroup_seq100_parallel2(self): 40 | producer = Producer(name="test_producer", block=True) 41 | 42 | cg = ConsumerGroup( 43 | name="DummyGroup", 44 | producer=producer, 45 | Consumer=DummyConsumer, 46 | parallel=2, 47 | ) 48 | cg.start() 49 | 50 | [producer.add({"id": i, "consumed_by": None}) for i in range(100)] 51 | producer.add(Producer.End) 52 | 53 | cg.join(5) 54 | 55 | results = [c.results for c in cg._consumers] 56 | results = list(itertools.chain.from_iterable(results)) 57 | 58 | consumed_by = list(map(lambda x: x["consumed_by"], results)) 59 | 60 | self.assertEqual(len(results), 100) 61 | 62 | self.assertEqual(any(map(lambda x: x == "DummyGroup_0", consumed_by)), True) 63 | self.assertEqual(any(map(lambda x: x == "DummyGroup_1", consumed_by)), True) 64 | 65 | def test_consumergroup_sink(self): 66 | input_producer = Producer(name="input_producer", block=True) 67 | sink_producer = Producer(name="sink_producer", block=True) 68 | 69 | class SinkConsumer(Consumer): 70 | def process(self, msg): 71 | self.context["sink"].add(msg) # type: ignore 72 | 73 | cg = ConsumerGroup( 74 | name="SinkGroup", 75 | producer=input_producer, 76 | Consumer=SinkConsumer, 77 | parallel=1, 78 | context={ 79 | "sink": sink_producer, 80 | }, 81 | ) 82 | cg.start() 83 | 84 | [input_producer.add({"id": i}) for i in range(100)] 85 | input_producer.add(Producer.End) 86 | 87 | cg.join(2) 88 | 89 | self.assertEqual(len(list(sink_producer.all(block=False))), 100) 90 | -------------------------------------------------------------------------------- /zapusk/services/config/service.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from os.path import isfile 4 | from typing import Optional, cast 5 | 6 | from zapusk.models.job_group import JobGroup 7 | from .config_parser import ConfigParser 8 | from .yaml_filereader import YamlFileReader 9 | 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | class ConfigService: 15 | config_path: str | None 16 | 17 | def __init__( 18 | self, 19 | config_path: Optional[str] = None, 20 | file_reader=YamlFileReader(), 21 | parser=ConfigParser(), 22 | ): 23 | self.file_reader = file_reader 24 | self.parser = parser 25 | self.config_path = self.__get_config_path(config_path) 26 | 27 | def __get_config_path(self, config_path): 28 | """ 29 | Returns a path to the config file considering evnironment configuration 30 | """ 31 | if config_path: 32 | if isfile(config_path): 33 | return os.path.expanduser(config_path) 34 | return None 35 | 36 | config_dir = os.path.join( 37 | os.environ.get("APPDATA") 38 | or os.environ.get("XDG_CONFIG_HOME") 39 | or os.path.join(os.environ["HOME"], ".config"), 40 | "zapusk", 41 | ) 42 | 43 | logger.info(f"Config Dir: {config_dir}") 44 | 45 | logger.debug(f"Try to load config file: {config_dir}/config.yaml") 46 | if isfile(f"{config_dir}/config.yaml"): 47 | logger.debug(f"Loaded config file: {config_dir}/config.yaml") 48 | return f"{config_dir}/config.yaml" 49 | 50 | logger.debug(f"Try to load config file: {config_dir}/config.yml") 51 | if isfile(f"{config_dir}/config.yml"): 52 | logger.debug(f"Loaded config file: {config_dir}/config.yml") 53 | return f"{config_dir}/config.yml" 54 | else: 55 | return None 56 | 57 | def get_config(self): 58 | if self.config_path: 59 | config = self.file_reader.read(self.config_path) 60 | else: 61 | config = {} 62 | return self.parser.parse(config) 63 | 64 | def list_jobs(self): 65 | config = self.get_config() 66 | return list(config.jobs.values()) 67 | 68 | def list_jobgroups(self): 69 | config = self.get_config() 70 | return list(config.job_groups.values()) 71 | 72 | def get_job(self, job_id: str): 73 | config = self.get_config() 74 | 75 | for job in config.jobs.values(): 76 | if job.id == job_id: 77 | return job 78 | 79 | return None 80 | 81 | def get_job_group(self, job_group_id: str): 82 | config = self.get_config() 83 | 84 | for job_group in config.job_groups.values(): 85 | if job_group.id == job_group_id: 86 | return job_group 87 | 88 | return None 89 | 90 | def get_job_group_or_default(self, job_group_id: str): 91 | job_group = self.get_job_group(job_group_id) 92 | 93 | if not job_group: 94 | job_group = cast(JobGroup, self.get_job_group("default")) 95 | 96 | return job_group 97 | -------------------------------------------------------------------------------- /zapusk/services/executor_manager/backends/kawka/consumer.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from zapusk.kawka import Consumer, ConsumerGroup, Producer 4 | from zapusk.models import Job 5 | 6 | from .args_consumer import ArgsConsumer 7 | from .executor import Executor 8 | from .state import ExecutorManagerState 9 | 10 | 11 | logger = logging.getLogger(__name__) 12 | executorManagerState = ExecutorManagerState() 13 | 14 | 15 | class ExecutorManagerConsumer(Consumer): 16 | state = executorManagerState 17 | 18 | def __init__(self, block=True, *args, **kwargs): 19 | self.state.reset() 20 | self.block = block 21 | super().__init__(*args, **kwargs) 22 | 23 | def join(self, timeout=None, *args, **kwargs): 24 | for cgs in self.state.running_consumergroups.values(): 25 | [args_cg, run_cg] = cgs 26 | args_cg.join(timeout) 27 | run_cg.join(timeout) 28 | 29 | for ps in self.state.running_producers.values(): 30 | [args_ps, run_ps] = ps 31 | args_ps.add(Producer.End) 32 | run_ps.add(Producer.End) 33 | 34 | super().join(*args, **kwargs) 35 | 36 | def process(self, job: Job): 37 | group_config = job.group_config 38 | [args_producer, _] = self.__get_or_create_producers(group_config) 39 | self.__get_or_create_consumergroups(group_config) 40 | args_producer.add(job) 41 | 42 | def __get_or_create_producers(self, group_config): 43 | if group_config.id not in self.state.running_producers: 44 | args_producer = Producer( 45 | name=f"producer_{group_config.id}_args", block=self.block 46 | ) 47 | run_producer = Producer( 48 | name=f"producer_{group_config.id}_run", block=self.block 49 | ) 50 | 51 | self.state.running_producers[group_config.id] = [ 52 | args_producer, 53 | run_producer, 54 | ] 55 | return [args_producer, run_producer] 56 | 57 | return self.state.running_producers[group_config.id] 58 | 59 | def __get_or_create_consumergroups(self, group_config): 60 | if group_config.id not in self.state.running_consumergroups: 61 | [args_producer, run_producer] = self.__get_or_create_producers(group_config) 62 | args_cg = ConsumerGroup( 63 | name=f"{group_config.id}_args", 64 | producer=args_producer, 65 | Consumer=ArgsConsumer, 66 | parallel=1, 67 | context={"sink": run_producer}, 68 | ) 69 | args_cg.start() 70 | 71 | run_cg = ConsumerGroup( 72 | name=f"{group_config.id}_run", 73 | producer=run_producer, 74 | Consumer=Executor, 75 | parallel=group_config.parallel, 76 | context={"sink": run_producer}, 77 | ) 78 | run_cg.start() 79 | 80 | self.state.running_consumergroups[group_config.id] = [args_cg, run_cg] 81 | return [args_cg, run_cg] 82 | 83 | return self.state.running_consumergroups[group_config.id] 84 | -------------------------------------------------------------------------------- /zapusk/kawka/consumer_test.py: -------------------------------------------------------------------------------- 1 | from time import sleep 2 | from unittest import TestCase 3 | 4 | from .consumer import Consumer 5 | from .producer import Producer 6 | 7 | 8 | class DummyConsumer(Consumer): 9 | def __init__(self, *args, **kwargs): 10 | super().__init__(*args, **kwargs) 11 | self.results = [] 12 | 13 | def process(self, msg): 14 | msg["consumed"] = True 15 | self.results.append(msg) 16 | 17 | 18 | class SleepyConsumer(Consumer): 19 | def __init__(self, sleep=0, *args, **kwargs): 20 | super().__init__(*args, **kwargs) 21 | self.sleep = sleep 22 | self.results = [] 23 | 24 | def process(self, msg): 25 | sleep(self.sleep) 26 | msg["consumed"] = True 27 | self.results.append(msg) 28 | 29 | 30 | class ConsumerTest(TestCase): 31 | def test_read_from_non_block_producer_head(self): 32 | producer = Producer(name="DummyProducer", block=False) 33 | consumer = DummyConsumer(producer=producer, from_head=True) 34 | 35 | [producer.add({"id": i, "consumed": False}) for i in range(10)] 36 | 37 | consumer.start() 38 | consumer.join() 39 | 40 | self.assertEqual(len(consumer.results), 10) 41 | self.assertEqual(all(map(lambda x: x["consumed"], consumer.results)), True) 42 | 43 | def test_read_from_block_producer_head(self): 44 | producer = Producer(name="DummyProducer", block=True) 45 | consumer = DummyConsumer(producer=producer, from_head=True) 46 | 47 | [producer.add({"id": i, "consumed": False}) for i in range(10)] 48 | producer.add(Producer.End) 49 | 50 | consumer.start() 51 | consumer.join() 52 | 53 | self.assertEqual(len(consumer.results), 10) 54 | self.assertEqual(all(map(lambda x: x["consumed"], consumer.results)), True) 55 | 56 | def test_read_from_producer_tail(self): 57 | producer = Producer(name="DummyProducer", block=True) 58 | 59 | # This events should be ignored, because no consumer yet 60 | [producer.add({"id": i, "consumed": False}) for i in range(-10, 0)] 61 | 62 | # Now start a consumer. It will take only the last item with id -1 63 | consumer = DummyConsumer(producer=producer) 64 | consumer.results = [] 65 | consumer.start() 66 | 67 | # And handle this events 68 | [producer.add({"id": i, "consumed": False}) for i in range(10)] 69 | producer.add(Producer.End) 70 | 71 | consumer.join() 72 | self.assertEqual(len(consumer.results), 11) 73 | self.assertEqual(all(map(lambda x: x["consumed"], consumer.results)), True) 74 | self.assertEqual( 75 | list(map(lambda x: x["id"], consumer.results)), list(range(-1, 10)) 76 | ) 77 | 78 | def test_slow_consumer(self): 79 | producer = Producer(name="DummyProducer", block=True) 80 | consumer = SleepyConsumer(producer=producer, sleep=1) 81 | consumer.start() 82 | 83 | producer.add({"id": 1, "consumed": False}) 84 | sleep(0.5) 85 | producer.add({"id": 2, "consumed": False}) 86 | producer.add(Producer.End) 87 | 88 | consumer.join() 89 | 90 | self.assertEqual(len(consumer.results), 2) 91 | self.assertEqual(all(map(lambda x: x["consumed"], consumer.results)), True) 92 | -------------------------------------------------------------------------------- /zapusk/services/scheduler_service/service.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timezone 2 | import logging 3 | from threading import Thread 4 | from time import sleep 5 | 6 | from zapusk.models import Job, ScheduledJob 7 | from zapusk.services.config import ConfigService 8 | from zapusk.services.executor_manager import ExecutorManagerService 9 | 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | class SchedulerService: 15 | __terminated = False 16 | 17 | def __init__( 18 | self, 19 | config_service: ConfigService, 20 | executor_manager_service: ExecutorManagerService, 21 | interval: float = 1, 22 | ) -> None: 23 | self.__interval = interval 24 | self.__config_service = config_service if config_service else ConfigService() 25 | self.__executor_manager_service = executor_manager_service 26 | self.__scheduled_jobs = [j for j in config_service.list_jobs() if j.schedule] 27 | logger.info(f"Scheduled jobs detected {[i.id for i in self.__scheduled_jobs]}") 28 | 29 | self.__data: dict[str, ScheduledJob] = {} 30 | 31 | def start(self) -> None: 32 | """ 33 | Reads config service, schedules jobs and starts the scheduler 34 | 35 | """ 36 | self.add_from_config() 37 | thread = Thread(target=self.__start_thread) 38 | thread.start() 39 | 40 | def add(self, job_config): 41 | """ 42 | Schedule job from JobConfig 43 | """ 44 | try: 45 | scheduled_job = ScheduledJob( 46 | job_config=job_config, 47 | ) 48 | self.__data[job_config.id] = scheduled_job 49 | return True 50 | except ValueError as ex: 51 | logger.info(ex.args[0]) 52 | return False 53 | 54 | def delete(self, job_config_id): 55 | """ 56 | Removes given scheduled job by config id 57 | """ 58 | if job_config_id in self.__data: 59 | del self.__data[job_config_id] 60 | return True 61 | return False 62 | 63 | def list(self): 64 | """ 65 | list all scheduled job configs 66 | """ 67 | return [sj.job_config for sj in self.__data.values()] 68 | 69 | def add_from_config(self) -> None: 70 | """ 71 | Reads config from self.config_service and add schedule all jobs 72 | """ 73 | for job_config in self.__scheduled_jobs: 74 | # Just a type guard, was checked in __init__ 75 | if not job_config.schedule: # pragma: no cover 76 | continue 77 | self.add(job_config) 78 | 79 | def terminate(self) -> None: 80 | self.__terminated = True 81 | 82 | def __start_thread(self) -> None: 83 | while not self.__terminated: 84 | now = datetime.now(timezone.utc) 85 | for scheduled_item in self.__data.values(): 86 | logger.debug(f"Checking schedule for {scheduled_item}") 87 | logger.debug( 88 | f"NEXT:{datetime.fromtimestamp(scheduled_item.next)} < NOW:{now}" 89 | ) 90 | if scheduled_item.next < now.timestamp(): 91 | self.__run_job(scheduled_item, now) 92 | 93 | sleep(self.__interval) 94 | 95 | def __run_job(self, scheduled_job: ScheduledJob, now: datetime) -> None: 96 | job_config = scheduled_job.job_config 97 | job_group = self.__config_service.get_job_group_or_default( 98 | job_config.group or "default" 99 | ) 100 | 101 | logger.info(f"Adding a job {scheduled_job.job_config} to the queue") 102 | scheduled_job.record_run(now) 103 | self.__executor_manager_service.add( 104 | Job.from_config( 105 | group_config=job_group, 106 | config=job_config, 107 | ) 108 | ) 109 | -------------------------------------------------------------------------------- /zapusk/client/command_exec_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from unittest.mock import call, patch 4 | import responses 5 | from responses import matchers 6 | 7 | from .command_testcase import CommandTestCase 8 | 9 | 10 | class TestCommandExec(CommandTestCase): 11 | 12 | @responses.activate 13 | def test_should_exec_job(self): 14 | responses.post( 15 | "http://example.com/jobs/", 16 | status=200, 17 | json={"data": {"id": 1}}, 18 | match=[ 19 | matchers.json_params_matcher( 20 | { 21 | "command": "echo 1", 22 | "group_id": "echo", 23 | "name": "Echo", 24 | "cwd": "/home/anton/", 25 | } 26 | ) 27 | ], 28 | ) 29 | 30 | self.command_manager.exec.run( 31 | command="echo 1", 32 | group_id="echo", 33 | name="Echo", 34 | cwd="/home/anton/", 35 | ) 36 | json_data = json.loads(self.printer.print.call_args[0][0]) 37 | 38 | self.assertEqual(json_data, {"id": 1}) 39 | 40 | @responses.activate 41 | def test_should_exec_scheduled_job(self): 42 | responses.post( 43 | "http://example.com/scheduled-jobs/", 44 | status=200, 45 | json={"data": {"id": 1}}, 46 | match=[ 47 | matchers.json_params_matcher( 48 | { 49 | "command": "echo 1", 50 | "group_id": "echo", 51 | "name": "Echo", 52 | "cwd": "/home/anton/", 53 | "schedule": "*/1 * * * *", 54 | } 55 | ) 56 | ], 57 | ) 58 | 59 | self.command_manager.exec.run( 60 | command="echo 1", 61 | group_id="echo", 62 | name="Echo", 63 | schedule="*/1 * * * *", 64 | cwd="/home/anton/", 65 | ) 66 | json_data = json.loads(self.printer.print.call_args[0][0]) 67 | 68 | self.assertEqual(json_data, {"id": 1}) 69 | 70 | @responses.activate 71 | def test_should_handle_error(self): 72 | responses.post( 73 | "http://example.com/jobs/", 74 | status=400, 75 | json={"error": "ERROR"}, 76 | ) 77 | 78 | self.command_manager.exec.run( 79 | command="echo 1", 80 | cwd="/home/anton/", 81 | ) 82 | args = self.printer.print.call_args[0] 83 | message = json.loads(args[0]) 84 | 85 | self.assertEqual(message, {"error": {"message": "ERROR"}}) 86 | 87 | @responses.activate 88 | def test_should_tail_job(self): 89 | responses.post( 90 | "http://example.com/jobs/", 91 | status=200, 92 | json={"data": {"id": 1}}, 93 | ) 94 | responses.get( 95 | "http://example.com/jobs/1", 96 | status=200, 97 | json={ 98 | "data": { 99 | "id": 1, 100 | "log": "/var/tail.log", 101 | "cwd": "/home/anton/", 102 | }, 103 | }, 104 | ) 105 | 106 | with patch( 107 | "zapusk.client.command_tail.tail", return_value=["log line 1", "log line 2"] 108 | ): 109 | self.command_manager.exec.run( 110 | command="echo 1", 111 | tail=True, 112 | cwd="/home/anton/", 113 | ) 114 | 115 | log_line1 = self.printer.print.call_args_list[0] 116 | log_line2 = self.printer.print.call_args_list[1] 117 | 118 | self.assertEqual(log_line1, call("log line 1", end="")) 119 | self.assertEqual(log_line2, call("log line 2", end="")) 120 | -------------------------------------------------------------------------------- /zapusk/models/job.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from datetime import datetime 3 | from enum import Enum 4 | import os 5 | from typing import Optional 6 | 7 | from .id_field import IdField 8 | from .job_config import JobConfig 9 | from .job_group import JobGroup 10 | 11 | 12 | class JOB_STATE_ENUM(str, Enum): 13 | """ 14 | Enum contains possible job states 15 | """ 16 | 17 | PENDING = "PENDING" 18 | """ 19 | Job is added, but hasn't been picked up by any consumer yet. 20 | """ 21 | 22 | RUNNING = "RUNNING" 23 | """ 24 | Job has been picked up by a consumer. 25 | """ 26 | 27 | FINISHED = "FINISHED" 28 | """ 29 | Job has been finished with zero exit code. 30 | """ 31 | 32 | FAILED = "FAILED" 33 | """ 34 | Job has been finished with non-zero exit code. 35 | """ 36 | 37 | CANCELLED = "CANCELLED" 38 | """ 39 | Job has been cancelled before completion 40 | """ 41 | 42 | 43 | @dataclass 44 | class Job: 45 | """ 46 | Job model 47 | """ 48 | 49 | JOB_STATE_ENUM = JOB_STATE_ENUM 50 | 51 | def __str__(self): 52 | return f"job.{self.job_config_id}.{self.id}" 53 | 54 | @staticmethod 55 | def from_config(group_config: JobGroup, config: JobConfig): 56 | """ 57 | returns a new JobItem created from JobConfig object 58 | """ 59 | return Job( 60 | group_config=group_config, 61 | command=config.command, 62 | args_command=config.args_command, 63 | group=config.group, 64 | job_config_id=config.id, 65 | name=config.name, 66 | on_finish=config.on_finish, 67 | on_fail=config.on_fail, 68 | cwd=config.cwd, 69 | ) 70 | 71 | group_config: JobGroup 72 | """ 73 | Contains jobconfig for job started with it 74 | """ 75 | 76 | command: str 77 | """ 78 | A shell command to be executed when job becomes `RUNNING`. 79 | """ 80 | 81 | name: str 82 | """ 83 | Job human-readable name 84 | """ 85 | 86 | group: str = "default" 87 | """ 88 | job_group id 89 | """ 90 | 91 | cwd: str = field(default_factory=lambda: os.environ["HOME"]) 92 | """ 93 | current working dir 94 | """ 95 | 96 | job_config_id: Optional[str] = None 97 | """ 98 | job_config id 99 | """ 100 | 101 | args_command: Optional[str] = None 102 | """ 103 | A command to get arguments to execute job with 104 | """ 105 | 106 | args: list[str] = field(default_factory=list) 107 | 108 | id: int = field(default_factory=lambda: IdField.next("job_item")) 109 | """ 110 | Unique Job id generated when it's created 111 | """ 112 | 113 | on_finish: Optional[str] = None 114 | """ 115 | A command to execute after job has been successfuly finished 116 | """ 117 | 118 | on_fail: Optional[str] = None 119 | """ 120 | A command to execute after job has been successfuly finished 121 | """ 122 | 123 | state: JOB_STATE_ENUM = JOB_STATE_ENUM.PENDING 124 | """ 125 | defines current state in the pipeline, such as `PENDING`, `RUNNING`, `FAILED` or `FINISHED`. 126 | """ 127 | 128 | pid: int | None = None 129 | """ 130 | contains Job process PID if job has been started. 131 | """ 132 | 133 | log: str | None = None 134 | """ 135 | contains a logfile path if job has been started. 136 | """ 137 | 138 | exit_code: int | None = None 139 | """ 140 | contains an exit status if job has been finished. 141 | """ 142 | 143 | consumed_by: str | None = None 144 | """ 145 | Identifier of a consumer took this job 146 | """ 147 | 148 | created_at: datetime = field(default_factory=lambda: datetime.now()) 149 | """ 150 | when job has been added to the WorkLog 151 | """ 152 | 153 | updated_at: datetime = field(default_factory=lambda: datetime.now()) 154 | """ 155 | when job has progressed within the pipeline last time 156 | """ 157 | -------------------------------------------------------------------------------- /zapusk/server/controller_jobs.py: -------------------------------------------------------------------------------- 1 | import os 2 | from flask import Blueprint, Response, abort, request 3 | from zapusk.lib.json_serdes import JsonSerdes 4 | from zapusk.models import Job, JobConfig, IdField 5 | from .error_response import error_response 6 | 7 | 8 | def create_jobs_api(config_service, executor_manager_service): 9 | jobs_api = Blueprint("jobs", __name__) 10 | 11 | @jobs_api.route("/jobs/") 12 | def job_get(job_id: str): 13 | job = executor_manager_service.get(int(job_id)) 14 | if not job: 15 | return abort( 16 | error_response(status=404, error=f"Job with id {job_id} not found") 17 | ) 18 | 19 | return JsonSerdes.serialize(job) 20 | 21 | @jobs_api.route("/jobs/") 22 | def job_list(): 23 | jobs = executor_manager_service.list() 24 | return JsonSerdes.serialize(jobs) 25 | 26 | @jobs_api.route("/jobs/", methods=["POST"]) 27 | def job_add(): 28 | body = request.json or {} 29 | 30 | job_config_id = body.get("job_config_id", None) 31 | cwd = body.get("cwd", os.environ["HOME"]) 32 | 33 | # if no config id, let's try to execute it as a command 34 | if not job_config_id: 35 | command = body.get("command", None) 36 | if not command: 37 | return abort( 38 | error_response( 39 | status=400, 40 | error="Request body contains no `command` or `job_config_id`", 41 | ) 42 | ) 43 | 44 | group_id = body.get("group_id", None) 45 | name = body.get("name", None) 46 | 47 | job_group = config_service.get_job_group(group_id or "default") 48 | 49 | if not command or not job_group: 50 | return abort( 51 | error_response( 52 | status=404, 53 | error=f'group_id "{group_id}" not found', 54 | ) 55 | ) 56 | 57 | cmd_id = f"command.{IdField.next("command")}" 58 | job_item = Job.from_config( 59 | group_config=job_group, 60 | config=JobConfig( 61 | id=cmd_id, 62 | name=name or f"{job_group.id}.{cmd_id}", 63 | command=command, 64 | cwd=cwd, 65 | ), 66 | ) 67 | executor_manager_service.add(job_item) 68 | 69 | return JsonSerdes.serialize(job_item) 70 | 71 | job_config = config_service.get_job(job_config_id) 72 | 73 | if not job_config: 74 | return abort( 75 | error_response( 76 | status=404, 77 | error=f"Job with id `{job_config_id}` not found", 78 | ) 79 | ) 80 | 81 | job_group = config_service.get_job_group(job_config.group) 82 | 83 | if not job_group: # pragma: no cover 84 | # this technically not possible, because config_parser will fail first 85 | return abort( 86 | error_response( 87 | status=404, 88 | error=f"Job configuration for {job_config.id} contains unknown jobgroup `{job_config.group}`", 89 | ) 90 | ) 91 | 92 | job_item = Job.from_config( 93 | group_config=job_group, 94 | config=job_config, 95 | ) 96 | executor_manager_service.add(job_item) 97 | 98 | return JsonSerdes.serialize(job_item) 99 | 100 | @jobs_api.route("/jobs/", methods=["DELETE"]) 101 | def job_delete(job_id): 102 | job_item = executor_manager_service.get(int(job_id)) 103 | if not job_item: 104 | return abort( 105 | error_response( 106 | status=404, 107 | error=f"Job with id `{job_id}` not found", 108 | ) 109 | ) 110 | 111 | cancelled_job = executor_manager_service.cancel(job_item) 112 | return JsonSerdes.serialize(cancelled_job) 113 | 114 | return jobs_api 115 | -------------------------------------------------------------------------------- /zapusk/client/api_client.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Mapping 2 | from typing import NotRequired, Optional, TypedDict 3 | from urllib.parse import urljoin 4 | import requests 5 | 6 | from zapusk.lib.json_serdes import JsonSerdes 7 | 8 | DEFAULT_ERROR_MESSAGE = "Server respond with an error" 9 | 10 | 11 | class ApiClientError(Exception): 12 | """ 13 | Base class for ApiClient exceptions 14 | """ 15 | 16 | def __init__(self, message, *args, **kwargs) -> None: 17 | self.message = message 18 | super().__init__(*args, **kwargs) 19 | 20 | def __str__(self) -> str: 21 | return f"{self.message}" 22 | 23 | 24 | class JobCreateFromConfigPayload(TypedDict): 25 | job_config_id: str 26 | 27 | 28 | class JobCreateFromCommandPayload(TypedDict): 29 | command: str 30 | cwd: str 31 | name: NotRequired[Optional[str]] 32 | group_id: NotRequired[Optional[str]] 33 | 34 | 35 | class JobCreateScheduledPayload(JobCreateFromCommandPayload): 36 | schedule: str 37 | 38 | 39 | class ApiClient: 40 | http_client = requests 41 | 42 | def __init__(self, base_url: str) -> None: 43 | self.base_url = base_url 44 | 45 | def __filter_none(self, d: Mapping): 46 | return {k: v for k, v in d.items() if v is not None} 47 | 48 | def __handle_error(self, res): 49 | body = res.json() 50 | if "error" in res.json(): 51 | raise ApiClientError(body["error"]) 52 | else: 53 | raise ApiClientError(DEFAULT_ERROR_MESSAGE) 54 | 55 | def get_job(self, job_id: str | int): 56 | res = self.http_client.get(urljoin(self.base_url, f"/jobs/{job_id}")) 57 | body = res.json() 58 | 59 | if res.status_code != 200: 60 | return self.__handle_error(res) 61 | 62 | return JsonSerdes.deserialize(body) 63 | 64 | def list_jobs(self): 65 | res = self.http_client.get(urljoin(self.base_url, f"/jobs/")) 66 | body = res.json() 67 | 68 | if res.status_code != 200: 69 | return self.__handle_error(res) 70 | 71 | return JsonSerdes.deserialize(body) 72 | 73 | def list_scheduled_jobs(self): 74 | res = self.http_client.get(urljoin(self.base_url, f"/scheduled-jobs/")) 75 | body = res.json() 76 | 77 | if res.status_code != 200: 78 | return self.__handle_error(res) 79 | 80 | return JsonSerdes.deserialize(body) 81 | 82 | def create_job( 83 | self, payload: JobCreateFromConfigPayload | JobCreateFromCommandPayload 84 | ): 85 | res = requests.post( 86 | urljoin(self.base_url, "/jobs/"), 87 | json=self.__filter_none(payload), 88 | ) 89 | 90 | if res.status_code != 200: 91 | return self.__handle_error(res) 92 | 93 | return JsonSerdes.deserialize(res.json()) 94 | 95 | def create_scheduled_job(self, payload: JobCreateScheduledPayload): 96 | res = requests.post( 97 | urljoin(self.base_url, "/scheduled-jobs/"), 98 | json=self.__filter_none(payload), 99 | ) 100 | if res.status_code != 200: 101 | return self.__handle_error(res) 102 | 103 | return JsonSerdes.deserialize(res.json()) 104 | 105 | def cancel_job(self, job_id: str | int): 106 | res = self.http_client.delete(urljoin(self.base_url, f"/jobs/{job_id}")) 107 | body = res.json() 108 | 109 | if res.status_code != 200: 110 | return self.__handle_error(res) 111 | 112 | return JsonSerdes.deserialize(body) 113 | 114 | def cancel_scheduled_job(self, job_id: str | int): 115 | res = self.http_client.delete( 116 | urljoin(self.base_url, f"/scheduled-jobs/{job_id}") 117 | ) 118 | body = res.json() 119 | 120 | if res.status_code != 200: 121 | return self.__handle_error(res) 122 | 123 | return JsonSerdes.deserialize(body) 124 | 125 | def get_config_groups(self): 126 | res = self.http_client.get(urljoin(self.base_url, f"/config/groups/")) 127 | body = res.json() 128 | 129 | if res.status_code != 200: 130 | return self.__handle_error(res) 131 | 132 | return JsonSerdes.deserialize(body) 133 | 134 | def get_config_jobs(self): 135 | res = self.http_client.get(urljoin(self.base_url, f"/config/jobs/")) 136 | body = res.json() 137 | 138 | if res.status_code != 200: 139 | return self.__handle_error(res) 140 | 141 | return JsonSerdes.deserialize(body) 142 | -------------------------------------------------------------------------------- /zapusk/client/__main__.py: -------------------------------------------------------------------------------- 1 | import os 2 | from type_docopt import docopt 3 | import importlib.metadata 4 | 5 | from zapusk.client.command_manager import CommandManager 6 | from zapusk.models.job import JOB_STATE_ENUM 7 | 8 | 9 | doc = """zapusk 10 | 11 | 12 | Usage: 13 | zapusk -h | --help 14 | zapusk --version 15 | zapusk run [--colors|--no-colors] [--tail] 16 | zapusk exec [--name=] [--group=] [--tail] [--schedule=] [--colors|--no-colors] 17 | zapusk cancel [--scheduled] [--colors|--no-colors] 18 | zapusk tail 19 | zapusk list [--filter=|--scheduled] [--colors|--no-colors] 20 | zapusk config_jobs [--colors|--no-colors] 21 | zapusk config_groups [--colors|--no-colors] 22 | zapusk waybar 23 | 24 | 25 | Options: 26 | -h --help Show this screen 27 | --version Show version. 28 | --colors Enable colors 29 | --no-colors Disable colors 30 | --filter= Filter running jobs by status [type: JobState] 31 | -n --name= Name for a command 32 | -g --group= Job group to run command in 33 | -t --tail Tail logfile immediately 34 | 35 | Examples: 36 | 37 | # Execute npm i in background 38 | zapusk exec "npm i" 39 | 40 | # Execute pytest and tail its log 41 | zapusk exec "pytest -v" -t 42 | 43 | # Schedule command to run every minute 44 | zapusk exec "pung -c4 google.com" --schedule "*/1 * * * *" 45 | 46 | # Run some job defined in ~/.config/zapusk/config.yaml 47 | zapusk run youtube_dl 48 | 49 | # Cancel some job with id 50 | zapusk cancel 42 51 | 52 | # See logs with id of a job 53 | zapusk tail 42 54 | """ 55 | 56 | version = importlib.metadata.version("zapusk") 57 | 58 | 59 | class JobState: 60 | STATES = [e.value for e in JOB_STATE_ENUM] 61 | 62 | def __init__(self, state): 63 | try: 64 | assert state in self.STATES 65 | self.state = state 66 | except AssertionError as e: 67 | print( 68 | f"Status filter has wrong value. Possible values are {', '.join(self.STATES)}", 69 | ) 70 | exit(1) 71 | 72 | 73 | def main(): 74 | args = docopt(doc, version=version, types={"JobStatus": JobState}) 75 | 76 | colors = None 77 | 78 | if args["--colors"] == True: 79 | colors = True 80 | 81 | if args["--no-colors"] == True: 82 | colors = False 83 | 84 | command_manager = CommandManager(colors=colors) 85 | 86 | if args["run"] == True: 87 | command_manager.run.run( 88 | job_config_id=str(args[""]), 89 | ) 90 | return 91 | 92 | if args["exec"] == True: 93 | command_manager.exec.run( 94 | command=str(args[""]), 95 | group_id=str(args["--group"]) if args["--group"] else None, 96 | name=str(args["--name"]) if args["--name"] else None, 97 | schedule=str(args["--schedule"]) if args["--schedule"] else None, 98 | tail=bool(args["--tail"]), 99 | cwd=os.getcwd(), 100 | ) 101 | return 102 | 103 | if args["cancel"] == True: 104 | command_manager.cancel.run( 105 | job_id=str(args[""]), 106 | scheduled=bool(args["--scheduled"]), 107 | ) 108 | return 109 | 110 | if args["list"] == True: 111 | command_manager.list.run( 112 | scheduled=bool(args["--scheduled"]), 113 | filter=args["--filter"].state if args["--filter"] else None, 114 | ) 115 | return 116 | 117 | if args["list"] == True: 118 | command_manager.list.run( 119 | scheduled=bool(args["--scheduled"]), 120 | filter=args["--filter"], 121 | ) 122 | return 123 | 124 | if args["config_groups"] == True: 125 | command_manager.config_groups.run() 126 | return 127 | 128 | if args["config_jobs"] == True: 129 | command_manager.config_jobs.run() 130 | return 131 | 132 | if args["waybar"] == True: 133 | command_manager.waybar.run() 134 | return 135 | 136 | if args["tail"] == True: 137 | command_manager.tail.run( 138 | job_id=str(args[""]), 139 | ) 140 | return 141 | 142 | command_manager.output.json({"error": "Command not found"}) 143 | 144 | 145 | if __name__ == "__main__": 146 | main() 147 | -------------------------------------------------------------------------------- /zapusk/server/controller_scheduled_jobs_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | from unittest.mock import ANY, patch 3 | 4 | from .controller_testcase import ControllerTestCase 5 | 6 | CONFIG_DATA = """ 7 | jobs: 8 | - name: Echo 9 | id: scheduled_echo 10 | command: echo 1 11 | schedule: "0 0 * 1 *" 12 | """ 13 | 14 | 15 | class TestSchedulerJobController(ControllerTestCase): 16 | def before_create_services(self): 17 | self.write_config(CONFIG_DATA) 18 | self.replace_in_environ("HOME", self.temp_dir.path) 19 | 20 | def test_controller_scheduled_jobs_list(self): 21 | res = self.test_client.get("/scheduled-jobs/") 22 | data = json.loads(res.data) 23 | 24 | self.assertEqual( 25 | data, 26 | { 27 | "data": [ 28 | { 29 | "args_command": None, 30 | "command": "echo 1", 31 | "cwd": self.temp_dir.path, 32 | "group": "default", 33 | "id": "scheduled_echo", 34 | "name": "Echo", 35 | "on_fail": None, 36 | "on_finish": None, 37 | "schedule": "0 0 * 1 *", 38 | } 39 | ] 40 | }, 41 | ) 42 | 43 | def test_controller_scheduled_jobs_create(self): 44 | res = self.test_client.post( 45 | "/scheduled-jobs/", 46 | json={ 47 | "command": "echo 42", 48 | "name": "echo", 49 | "schedule": "1 * * * *", 50 | }, 51 | ) 52 | data = json.loads(res.data) 53 | 54 | self.assertEqual( 55 | data, 56 | { 57 | "data": { 58 | "args_command": None, 59 | "cwd": self.temp_dir.path, 60 | "command": "echo 42", 61 | "group": "default", 62 | "id": "scheduled.1", 63 | "name": "echo", 64 | "on_fail": None, 65 | "on_finish": None, 66 | "schedule": "1 * * * *", 67 | } 68 | }, 69 | ) 70 | 71 | def test_controller_scheduled_jobs_cancel(self): 72 | res = self.test_client.delete( 73 | "/scheduled-jobs/scheduled_echo", 74 | json={ 75 | "command": "echo 42", 76 | "name": "echo", 77 | "schedule": "1 * * * *", 78 | }, 79 | ) 80 | data = json.loads(res.data) 81 | self.assertEqual(data, {"data": True}) 82 | 83 | res = self.test_client.get("/scheduled-jobs/") 84 | data = json.loads(res.data) 85 | 86 | self.assertEqual(data, {"data": []}) 87 | 88 | def test_controller_scheduled_jobs_create_without_command(self): 89 | res = self.test_client.post( 90 | "/scheduled-jobs/", 91 | json={ 92 | "schedule": "1 * * * *", 93 | }, 94 | ) 95 | data = json.loads(res.data) 96 | 97 | self.assertEqual(res.status, "400 BAD REQUEST") 98 | self.assertEqual(data, {"error": "Request body contains no `command`"}) 99 | 100 | def test_controller_scheduled_jobs_create_without_schedule(self): 101 | res = self.test_client.post( 102 | "/scheduled-jobs/", 103 | json={ 104 | "command": "echo 420", 105 | }, 106 | ) 107 | data = json.loads(res.data) 108 | 109 | self.assertEqual(res.status, "400 BAD REQUEST") 110 | self.assertEqual(data, {"error": "Request body contains no `schedule`"}) 111 | 112 | def test_controller_scheduled_jobs_create_with_unknown_group(self): 113 | res = self.test_client.post( 114 | "/scheduled-jobs/", 115 | json={ 116 | "command": "echo 420", 117 | "schedule": "1 * * * *", 118 | "group_id": "unknown", 119 | }, 120 | ) 121 | data = json.loads(res.data) 122 | 123 | self.assertEqual(res.status, "404 NOT FOUND") 124 | self.assertEqual(data, {"error": "Unknown group `unknown`"}) 125 | 126 | def test_controller_scheduled_jobs_create_failed_by_scheduler_service(self): 127 | with patch.object(self.scheduler_service, "add", return_value=False): 128 | res = self.test_client.post( 129 | "/scheduled-jobs/", 130 | json={ 131 | "command": "echo 420", 132 | "schedule": "1 * * * *", 133 | }, 134 | ) 135 | data = json.loads(res.data) 136 | 137 | self.assertEqual(res.status, "500 INTERNAL SERVER ERROR") 138 | self.assertEqual(data, {"error": "Scheduled job hasn't been added"}) 139 | -------------------------------------------------------------------------------- /zapusk/services/config/config_parser_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from testfixtures import Replacer 3 | import yaml 4 | 5 | from zapusk.services.config.constants import DEFAULT_COLORS 6 | 7 | from .config_parser import DEFAULT_JOB_GROUPS, DEFAULT_PORT, ConfigParser 8 | 9 | 10 | @pytest.mark.parametrize( 11 | "config_yaml,expected_result", 12 | [ 13 | [ 14 | """ 15 | 16 | """, 17 | { 18 | "port": DEFAULT_PORT, 19 | "colors": DEFAULT_COLORS, 20 | "job_groups": DEFAULT_JOB_GROUPS, 21 | "jobs": {}, 22 | }, 23 | ], 24 | [ 25 | """ 26 | jobs: 27 | - name: Sleep Timer 28 | id: sleep 29 | command: sleep 10 30 | """, 31 | { 32 | "port": DEFAULT_PORT, 33 | "colors": DEFAULT_COLORS, 34 | "job_groups": DEFAULT_JOB_GROUPS, 35 | "jobs": { 36 | "sleep": { 37 | "name": "Sleep Timer", 38 | "id": "sleep", 39 | "command": "sleep 10", 40 | "cwd": "/home/", 41 | "group": "default", 42 | "args_command": None, 43 | } 44 | }, 45 | }, 46 | ], 47 | [ 48 | """ 49 | job_groups: 50 | - id: awesome_group 51 | parallel: 4200 52 | jobs: 53 | - name: Sleep Timer 54 | group: awesome_group 55 | id: sleep 56 | command: sleep 10 57 | """, 58 | { 59 | "port": DEFAULT_PORT, 60 | "colors": DEFAULT_COLORS, 61 | "job_groups": { 62 | **DEFAULT_JOB_GROUPS, 63 | **{ 64 | "awesome_group": { 65 | "id": "awesome_group", 66 | "parallel": 4200, 67 | }, 68 | }, 69 | }, 70 | "jobs": { 71 | "sleep": { 72 | "name": "Sleep Timer", 73 | "id": "sleep", 74 | "command": "sleep 10", 75 | "cwd": "/home/", 76 | "group": "awesome_group", 77 | "args_command": None, 78 | } 79 | }, 80 | }, 81 | ], 82 | [ 83 | """ 84 | port: 1234 85 | colors: True 86 | job_groups: 87 | - id: default 88 | parallel: 1 89 | """, 90 | { 91 | "port": 1234, 92 | "colors": True, 93 | "job_groups": {"default": {"id": "default", "parallel": 1}}, 94 | "jobs": {}, 95 | }, 96 | ], 97 | [ 98 | """ 99 | job_groups: 100 | - id: default 101 | parallel: 1 102 | on_fail: echo fail 103 | on_finish: echo finish 104 | 105 | jobs: 106 | - name: Sleep Timer 107 | id: sleep 108 | command: sleep 10 109 | on_fail: echo job_fail 110 | on_finish: echo job_finish 111 | """, 112 | { 113 | "port": DEFAULT_PORT, 114 | "colors": DEFAULT_COLORS, 115 | "job_groups": { 116 | "default": { 117 | "id": "default", 118 | "parallel": 1, 119 | "on_fail": "echo fail", 120 | "on_finish": "echo finish", 121 | } 122 | }, 123 | "jobs": { 124 | "sleep": { 125 | "name": "Sleep Timer", 126 | "id": "sleep", 127 | "command": "sleep 10", 128 | "cwd": "/home/", 129 | "group": "default", 130 | "args_command": None, 131 | "on_fail": "echo job_fail", 132 | "on_finish": "echo job_finish", 133 | } 134 | }, 135 | }, 136 | ], 137 | [ 138 | """ 139 | job_groups: 140 | - id: default 141 | parallel: 1 142 | 143 | jobs: 144 | - name: Sleep Timer 145 | id: sleep 146 | command: sleep 10 147 | unknown_property: 1 148 | """, 149 | { 150 | "port": DEFAULT_PORT, 151 | "colors": DEFAULT_COLORS, 152 | "job_groups": { 153 | "default": { 154 | "id": "default", 155 | "parallel": 1, 156 | } 157 | }, 158 | "jobs": { 159 | "sleep": { 160 | "name": "Sleep Timer", 161 | "id": "sleep", 162 | "command": "sleep 10", 163 | "cwd": "/home/", 164 | "group": "default", 165 | "args_command": None, 166 | } 167 | }, 168 | }, 169 | ], 170 | ], 171 | ids=[ 172 | "default_config", 173 | "job_config", 174 | "jobgroups_and_jobs", 175 | "port_and_override_default_jobgroup", 176 | "callbacks", 177 | "unknown_property", 178 | ], 179 | ) 180 | def test_config_parser_should_parse_config(config_yaml, expected_result): 181 | replace = Replacer() 182 | replace.in_environ("HOME", "/home/") 183 | 184 | config_parser = ConfigParser() 185 | config_data = yaml.safe_load(config_yaml) 186 | res = config_parser.parse(config_data) 187 | 188 | assert res == expected_result 189 | replace.restore() 190 | 191 | 192 | #################################### 193 | 194 | 195 | @pytest.mark.parametrize( 196 | "config_yaml,expection_msg", 197 | [ 198 | [ 199 | """ 200 | # Should fail with unknown group id 201 | jobs: 202 | - name: Sleep Timer 203 | group: awesome_group 204 | id: sleep 205 | command: sleep 10 206 | """, 207 | "Unknown job_group `awesome_group` in job_config.sleep", 208 | ], 209 | [ 210 | """ 211 | # Should fail with parallel config error 212 | job_groups: 213 | - id: awesome_group 214 | parallel: -1 215 | """, 216 | "`parallel` must be a positive number", 217 | ], 218 | ], 219 | ids=["unknown_id_fail", "negative_parallel_fail"], 220 | ) 221 | def test_job_should_fail_parsing_config(config_yaml, expection_msg): 222 | config_parser = ConfigParser() 223 | config_data = yaml.safe_load(config_yaml) 224 | 225 | try: 226 | config_parser.parse(config_data) 227 | raise Exception("Should fail") 228 | except Exception as ex: 229 | assert ex.args[0] == expection_msg 230 | -------------------------------------------------------------------------------- /zapusk/services/config/service_test.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | 3 | from testfixtures import Replacer, TempDirectory, replace_in_environ 4 | 5 | from zapusk.services.config.constants import DEFAULT_COLORS 6 | from .service import ConfigService 7 | 8 | 9 | class TestConfigService(TestCase): 10 | def setUp(self) -> None: 11 | self.r = Replacer() 12 | self.r.in_environ("HOME", "/home/") 13 | 14 | def tearDown(self) -> None: 15 | self.r.restore() 16 | 17 | def test_config_service_should_return_jobs(self): 18 | config_service = ConfigService(config_path="./config.example.yaml") 19 | jobs = config_service.list_jobs() 20 | 21 | self.assertEqual(len(jobs), 3) 22 | self.assertEqual( 23 | jobs[0], 24 | { 25 | "name": "Sleep 10 Seconds", 26 | "id": "sleep_10", 27 | "group": "default", 28 | "command": "sleep 10", 29 | "cwd": "/var/", 30 | "args_command": None, 31 | }, 32 | ) 33 | 34 | self.assertEqual( 35 | jobs[1], 36 | { 37 | "name": "Sleep 30 Seconds", 38 | "id": "sleep_30", 39 | "group": "parallel", 40 | "command": "sleep 30", 41 | "cwd": "/home/", 42 | "args_command": None, 43 | }, 44 | ) 45 | 46 | self.assertEqual( 47 | jobs[2], 48 | { 49 | "name": "Configurable Sleep", 50 | "id": "sleep", 51 | "group": "sequential", 52 | "command": "sleep $1", 53 | "cwd": "/home/", 54 | "args_command": "zenity --entry --text 'Sleep Time'", 55 | }, 56 | ) 57 | 58 | def test_config_service_should_return_job_groups(self): 59 | config_service = ConfigService(config_path="./config.example.yaml") 60 | job_groups = config_service.list_jobgroups() 61 | 62 | self.assertEqual(len(job_groups), 3) 63 | self.assertEqual( 64 | job_groups[0], 65 | { 66 | "id": "default", 67 | "parallel": 10, 68 | }, 69 | ) 70 | 71 | self.assertEqual( 72 | job_groups[1], 73 | { 74 | "id": "sequential", 75 | "parallel": 1, 76 | }, 77 | ) 78 | 79 | self.assertEqual( 80 | job_groups[2], 81 | { 82 | "id": "parallel", 83 | "parallel": 2, 84 | }, 85 | ) 86 | 87 | def test_config_service_should_return_full_config(self): 88 | config_service = ConfigService(config_path="./config.example.yaml") 89 | config = config_service.get_config() 90 | 91 | self.assertEqual(len(config.job_groups), 3) 92 | self.assertEqual(len(config.jobs), 3) 93 | self.assertEqual(config.port, 9876) 94 | 95 | def test_config_service_should_return_job_group(self): 96 | config_service = ConfigService(config_path="./config.example.yaml") 97 | job_group = config_service.get_job_group("default") 98 | 99 | self.assertEqual( 100 | job_group, 101 | { 102 | "id": "default", 103 | "parallel": 10, 104 | }, 105 | ) 106 | 107 | def test_config_service_should_return_job_group_none(self): 108 | config_service = ConfigService(config_path="./config.example.yaml") 109 | job_group = config_service.get_job_group("unknown") 110 | 111 | self.assertEqual(job_group, None) 112 | 113 | def test_config_service_should_return_job_group_or_default(self): 114 | config_service = ConfigService(config_path="./config.example.yaml") 115 | job_group = config_service.get_job_group_or_default("unknown") 116 | 117 | self.assertEqual( 118 | job_group, 119 | { 120 | "id": "default", 121 | "parallel": 10, 122 | }, 123 | ) 124 | 125 | def test_config_service_should_return_job(self): 126 | config_service = ConfigService(config_path="./config.example.yaml") 127 | job = config_service.get_job("sleep_10") 128 | 129 | self.assertEqual( 130 | job, 131 | { 132 | "name": "Sleep 10 Seconds", 133 | "id": "sleep_10", 134 | "group": "default", 135 | "command": "sleep 10", 136 | "cwd": "/var/", 137 | "args_command": None, 138 | }, 139 | ) 140 | 141 | def test_config_service_should_return_job_none(self): 142 | config_service = ConfigService(config_path="./config.example.yaml") 143 | job = config_service.get_job("unknown") 144 | 145 | self.assertEqual(job, None) 146 | 147 | def test_config_path_1(self): 148 | with TempDirectory() as d: 149 | with replace_in_environ("APPDATA", d.path): 150 | d.makedir("zapusk") 151 | config_file = d / "zapusk/config.yml" 152 | config_file.write_text("") 153 | 154 | config_service = ConfigService() 155 | self.assertEqual( 156 | config_service.config_path, f"{d.path}/zapusk/config.yml" 157 | ) 158 | 159 | def test_config_path_2(self): 160 | with TempDirectory() as d: 161 | with replace_in_environ("XDG_CONFIG_HOME", d.path): 162 | d.makedir("zapusk") 163 | config_file = d / "zapusk/config.yaml" 164 | config_file.write_text("") 165 | 166 | config_service = ConfigService() 167 | self.assertEqual( 168 | config_service.config_path, f"{d.path}/zapusk/config.yaml" 169 | ) 170 | 171 | def test_config_path_3(self): 172 | with TempDirectory() as d: 173 | with replace_in_environ("HOME", d.path): 174 | with replace_in_environ("XDG_CONFIG_HOME", ""): 175 | d.makedir(".config/zapusk") 176 | config_file = d / ".config/zapusk/config.yaml" 177 | config_file.write_text("") 178 | 179 | config_service = ConfigService() 180 | self.assertEqual( 181 | config_service.config_path, 182 | f"{d.path}/.config/zapusk/config.yaml", 183 | ) 184 | 185 | def test_config_path_fail(self): 186 | with TempDirectory() as d: 187 | with replace_in_environ("XDG_CONFIG_HOME", d.path): 188 | try: 189 | ConfigService() 190 | except FileExistsError as ex: 191 | self.assertEqual(ex.args[0], "Config not found") 192 | 193 | def test_config_should_contain_only_defaults_if_config_file_does_not_exist(self): 194 | config_service = ConfigService( 195 | config_path="/home/leonid_brezhnev/plenum/config.yaml" 196 | ) 197 | config = config_service.get_config() 198 | 199 | self.assertEqual(len(config.job_groups), 1) 200 | self.assertEqual( 201 | config.job_groups["default"], 202 | { 203 | "id": "default", 204 | "parallel": 10, 205 | "on_finish": None, 206 | "on_fail": None, 207 | }, 208 | ) 209 | self.assertEqual(len(config.jobs), 0) 210 | self.assertEqual(config.port, 9876) 211 | self.assertEqual(config.colors, DEFAULT_COLORS) 212 | 213 | pass 214 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Zapusk 2 | 3 | ![Zapusk Screenshot](.imgs/zapusk.png) 4 | 5 | Zapusk is a versatile job runner designed for desktop environments. It simplifies the process of managing background tasks by providing robust features such as pre-configured job execution, background shell command execution, cron-like scheduling, log tailing, and notifications. Zapusk's detailed JSON output also enables powerful data manipulation and analysis when paired with tools like jq. 6 | 7 | ## Table of Contents 8 | 9 | - [Key Features](#key-features) 10 | - [Installation](#installation) 11 | - [Usage](#usage) 12 | - [Basic Commands](#basic-commands) 13 | - [Advanced Usage](#advanced-usage) 14 | - [Configuration](#configuration) 15 | - [Examples](#examples) 16 | - [Contributing](#contributing) 17 | - [License](#license) 18 | 19 | ## Key Features 20 | 21 | - **Preconfigured Jobs:** Run jobs defined in your configuration files. 22 | - **Background Command Execution:** Run shell commands in the background with optional log tailing. 23 | - **Cron-like Scheduling:** Schedule tasks using flexible cron syntax. 24 | - **Log Tailing:** View logs in real-time. 25 | - **Job Management:** Cancel running jobs and check their statuses. 26 | - **Job Groups:** Share settings like callbacks and parallelism between jobs. 27 | - **Colored JSON Output:** Easily readable JSON output. 28 | - **Waybar Integration:** Display job statuses and notifications on Waybar. 29 | - **Custom Working Directory:** Run scripts and callbacks in a specified working directory. 30 | 31 | ## Installation 32 | 33 | Install Zapusk using `pip`: 34 | 35 | ```sh 36 | pip install zapusk 37 | ``` 38 | 39 | ## Usage 40 | 41 | Zapusk requires `zapusk-server` to be started. Zapusk offers a command-line interface for managing and executing jobs. Here's a quick reference: 42 | 43 | ### Basic Commands 44 | 45 | ```sh 46 | Usage: 47 | zapusk -h | --help 48 | zapusk --version 49 | zapusk run [--colors|--no-colors] [--tail] 50 | zapusk exec [--name=] [--group=] [--tail] [--schedule=] [--colors|--no-colors] 51 | zapusk cancel [--scheduled] [--colors|--no-colors] 52 | zapusk tail 53 | zapusk list [--filter=|--scheduled] [--colors|--no-colors] 54 | zapusk config_jobs [--colors|--no-colors] 55 | zapusk config_groups [--colors|--no-colors] 56 | zapusk waybar 57 | 58 | Options: 59 | -h --help Show this screen. 60 | --version Show version. 61 | --colors Enable colors. 62 | --no-colors Disable colors. 63 | --filter= Filter jobs by status. 64 | -n --name= Name for a command. 65 | -g --group= Job group to run the command in. 66 | -t --tail Tail logfile immediately. 67 | ``` 68 | 69 | ### Examples 70 | 71 | ```sh 72 | # Run npm install in the background 73 | zapusk exec "npm i" 74 | 75 | # Run pytest and tail its log 76 | zapusk exec "pytest -v" -t 77 | 78 | # Schedule a command to run every minute 79 | zapusk exec "ping -c4 google.com" --schedule "*/1 * * * *" 80 | 81 | # Run a job defined in ~/.config/zapusk/config.yaml 82 | zapusk run youtube_dl 83 | 84 | # Cancel a job by its ID 85 | zapusk cancel 42 86 | 87 | # See logs for a job by its ID 88 | zapusk tail 42 89 | ``` 90 | 91 | ## Configuration 92 | 93 | Here is an example configuration file for Zapusk. It defines job groups and individual jobs, specifying commands, schedules, notifications, and working directories. 94 | 95 | ```yaml 96 | # The port the server starts on and the client connects to 97 | port: 9876 98 | 99 | # Enable colored JSON output 100 | colors: True 101 | 102 | job_groups: 103 | - id: unsplash 104 | parallel: 1 105 | - id: sleep 106 | parallel: 2 107 | - id: cmd 108 | parallel: 10 109 | on_finish: notify-send -a "zapusk" "Command Finished" "{job.name} has finished" --icon kitty 110 | on_fail: notify-send -a "zapusk" "Command Failed" "{job.name} has failed" --icon kitty 111 | - id: cronie 112 | parallel: 1 113 | on_finish: notify-send -a "zapusk" "Scheduled Job Finished" "{job.name} has finished" --icon kitty 114 | on_fail: notify-send -a "zapusk" "Scheduled Job Failed" "{job.name} has failed" --icon kitty 115 | 116 | jobs: 117 | - name: Unsplash Download 118 | id: unsplash 119 | args_command: "zenity --entry --text 'Collection ID'" 120 | command: ~/.bin/jobs/unsplash_dl.sh 121 | cwd: /path/to/working/directory 122 | 123 | - name: Sleep 124 | id: sleep 125 | group: sleep 126 | args_command: "zenity --entry --text 'Sleep Time'" 127 | command: sleep 128 | on_finish: notify-send -a "zapusk" "Job Finished" "{job.name} has finished" --icon kitty 129 | on_fail: notify-send -a "zapusk" "Job Failed" "{job.name} has failed" --icon kitty 130 | 131 | - name: Cronie 132 | id: cronie 133 | group: cronie 134 | schedule: "*/10 * * * *" 135 | command: sleep 2 136 | ``` 137 | 138 | ## Advanced Usage 139 | 140 | ### Running Preconfigured Jobs 141 | 142 | Run jobs defined in your configuration file using their `id`. 143 | 144 | ```yaml 145 | # Job configuration in ~/.config/zapusk/config.yaml 146 | jobs: 147 | - name: Unsplash Download 148 | id: unsplash 149 | args_command: "zenity --entry --text 'Collection ID'" 150 | command: ~/.bin/jobs/unsplash_wallpaper_collection_download.sh 151 | cwd: /path/to/working/directory 152 | on_finish: notify-send -a "Zapusk" "Wallpapers downloaded" --icon kitty 153 | on_fail: notify-send -a "Zapusk" "Wallpaper download failed" --icon kitty 154 | ``` 155 | 156 | ```sh 157 | # Run the `unsplash` job: 158 | zapusk run unsplash 159 | ``` 160 | 161 | ### Background Command Execution 162 | 163 | Run commands in the background with optional log tailing: 164 | 165 | ```sh 166 | zapusk exec "npm i" -t 167 | ``` 168 | 169 | ### Scheduling Commands 170 | 171 | Schedule commands to run at specific intervals using cron syntax: 172 | 173 | ```sh 174 | zapusk exec "ping -c4 google.com" --schedule "*/1 * * * *" 175 | ``` 176 | 177 | Pre-configured jobs can also be scheduled: 178 | 179 | ```yaml 180 | jobs: 181 | - name: Cronie 182 | id: cronie 183 | group: cronie 184 | schedule: "*/10 * * * *" 185 | command: sleep 2 186 | ``` 187 | 188 | ### Managing Jobs 189 | 190 | Cancel a running or scheduled job by its ID: 191 | 192 | ```sh 193 | zapusk cancel 42 194 | ``` 195 | 196 | Tail the logs of a running job by its ID: 197 | 198 | ```sh 199 | zapusk tail 42 200 | ``` 201 | 202 | List all pending, running, and finished jobs: 203 | 204 | ```sh 205 | zapusk list 206 | ``` 207 | 208 | ### Callbacks 209 | 210 | Use `on_finish` and `on_fail` callbacks for notifications. 211 | 212 | For job group callbacks: 213 | 214 | ```yaml 215 | job_groups: 216 | - id: my_group 217 | parallel: 10 218 | on_finish: notify-send -a "zapusk" "Command Finished" "{job.name} has finished" --icon kitty 219 | on_fail: notify-send -a "zapusk" "Command Failed" "{job.name} has failed" --icon kitty 220 | ``` 221 | 222 | For individual job callbacks: 223 | 224 | ```yaml 225 | jobs: 226 | - name: Sleep 227 | id: sleep 228 | group: sleep 229 | command: ~/.bin/jobs/sleep 230 | cwd: /path/to/working/directory 231 | on_finish: notify-send -a "zapusk" "Job Finished" "{job.name} has finished" --icon kitty 232 | on_fail: notify-send -a "zapusk" "Job Failed" "{job.name} has failed" --icon kitty 233 | ``` 234 | 235 | ## Waybar Integration 236 | 237 | Zapusk integrates with Waybar to display job statuses and notifications directly on your desktop. 238 | 239 | ```json 240 | // Example integration with wofi and jq 241 | "custom/zapusk": { 242 | "exec": "zapusk waybar", 243 | "on-click": "zapusk config_jobs --no-colors | jq -r \".[].id\" | wofi --dmenu | xargs -I{} zapusk run {}", 244 | "tooltip": true, 245 | "return-type": "json", 246 | "format": "{}", 247 | "interval": 1 248 | } 249 | ``` 250 | 251 | 252 | ## License 253 | 254 | Zapusk is licensed under the MIT License. See the [LICENSE](LICENSE.md) file for more information. 255 | -------------------------------------------------------------------------------- /zapusk/services/scheduler_service/service_test.py: -------------------------------------------------------------------------------- 1 | from time import sleep 2 | from unittest import TestCase 3 | from unittest.mock import patch 4 | from testfixtures import Replacer, TempDirectory, mock_datetime, Replace 5 | 6 | from zapusk.models import Job 7 | from zapusk.models.job_config import JobConfig 8 | from zapusk.services import ConfigService 9 | 10 | from .service import SchedulerService 11 | 12 | CONFIG_DATA = """ 13 | jobs: 14 | - name: Echo 15 | id: echo 16 | command: echo 1 17 | schedule: "30 * * * *" 18 | """ 19 | 20 | 21 | class MockExecutorManager: 22 | def add(self): 23 | pass 24 | 25 | 26 | class TestSchedulerService(TestCase): 27 | maxDiff = None 28 | 29 | def setUp(self) -> None: 30 | self.temp_dir = TempDirectory() 31 | self.config_file = self.temp_dir / "config.yml" 32 | self.config_file.write_text(CONFIG_DATA) 33 | self.config_service = ConfigService( 34 | config_path=f"{self.temp_dir.path}/config.yml" 35 | ) 36 | 37 | self.executor_manager_service = MockExecutorManager() 38 | self.d = mock_datetime(1970, 1, 1, 8, 0, 0, delta=0) 39 | 40 | self.r = Replacer() 41 | self.r.replace("zapusk.services.scheduler_service.service.datetime", self.d) 42 | self.r.replace("zapusk.models.scheduled_job.datetime", self.d) 43 | self.r.in_environ("HOME", self.temp_dir.path) 44 | 45 | def tearDown(self) -> None: 46 | self.temp_dir.cleanup() 47 | self.r.restore() 48 | 49 | def test_scheduler_service_should_work(self): 50 | scheduler_service = SchedulerService( 51 | config_service=self.config_service, 52 | executor_manager_service=self.executor_manager_service, # type: ignore 53 | interval=0.1, 54 | ) 55 | 56 | with patch.object(self.executor_manager_service, "add") as mock: 57 | self.d.set(1970, 1, 1, 8, 11, 5) 58 | scheduler_service.start() 59 | sleep(1) 60 | self.d.set(1970, 1, 1, 8, 30, 10) 61 | sleep(1) 62 | scheduler_service.terminate() 63 | 64 | args = mock.call_args.args 65 | 66 | scheduled_job = args[0] 67 | self.assertEqual(type(scheduled_job), Job) 68 | self.assertEqual(scheduled_job.name, "Echo") 69 | 70 | def test_scheduler_service_should_not_add_jobs_without_schedule(self): 71 | scheduler_service = SchedulerService( 72 | config_service=self.config_service, 73 | executor_manager_service=self.executor_manager_service, # type: ignore 74 | interval=0.1, 75 | ) 76 | 77 | with patch.object(self.executor_manager_service, "add") as mock: 78 | scheduler_service.start() 79 | res = scheduler_service.add( 80 | JobConfig(id="no_schedule", name="No Schedule", command="echo 1") 81 | ) 82 | scheduler_service.terminate() 83 | self.assertEqual(res, False) 84 | 85 | def test_scheduler_service_should_list_all_scheduled_jobs(self): 86 | scheduler_service = SchedulerService( 87 | config_service=self.config_service, 88 | executor_manager_service=self.executor_manager_service, # type: ignore 89 | ) 90 | 91 | scheduler_service.add( 92 | JobConfig( 93 | id="1", 94 | name="1", 95 | command="echo 1", 96 | schedule="1 * * * *", 97 | ) 98 | ) 99 | scheduler_service.add( 100 | JobConfig( 101 | id="2", 102 | name="2", 103 | command="echo 2", 104 | schedule="1 * * * *", 105 | ) 106 | ) 107 | 108 | res = scheduler_service.list() 109 | 110 | self.assertEqual( 111 | res, 112 | [ 113 | { 114 | "id": "1", 115 | "name": "1", 116 | "command": "echo 1", 117 | "cwd": self.temp_dir.path, 118 | "group": "default", 119 | "args_command": None, 120 | "on_finish": None, 121 | "on_fail": None, 122 | "schedule": "1 * * * *", 123 | }, 124 | { 125 | "id": "2", 126 | "name": "2", 127 | "command": "echo 2", 128 | "cwd": self.temp_dir.path, 129 | "group": "default", 130 | "args_command": None, 131 | "on_finish": None, 132 | "on_fail": None, 133 | "schedule": "1 * * * *", 134 | }, 135 | ], 136 | ) 137 | 138 | def test_scheduler_service_should_delete_scheduled_jobs(self): 139 | scheduler_service = SchedulerService( 140 | config_service=self.config_service, 141 | executor_manager_service=self.executor_manager_service, # type: ignore 142 | ) 143 | 144 | scheduler_service.add( 145 | JobConfig( 146 | id="1", 147 | name="1", 148 | command="echo 1", 149 | schedule="1 * * * *", 150 | ) 151 | ) 152 | scheduler_service.add( 153 | JobConfig( 154 | id="2", 155 | name="2", 156 | command="echo 2", 157 | schedule="1 * * * *", 158 | ) 159 | ) 160 | 161 | scheduler_service.delete("1") 162 | res = scheduler_service.list() 163 | 164 | self.assertEqual( 165 | res, 166 | [ 167 | { 168 | "id": "2", 169 | "name": "2", 170 | "command": "echo 2", 171 | "cwd": self.temp_dir.path, 172 | "group": "default", 173 | "args_command": None, 174 | "on_finish": None, 175 | "on_fail": None, 176 | "schedule": "1 * * * *", 177 | }, 178 | ], 179 | ) 180 | 181 | def test_scheduler_service_delete_should_ignore_unknown_jobs(self): 182 | scheduler_service = SchedulerService( 183 | config_service=self.config_service, 184 | executor_manager_service=self.executor_manager_service, # type: ignore 185 | ) 186 | 187 | scheduler_service.add( 188 | JobConfig( 189 | id="1", 190 | name="1", 191 | command="echo 1", 192 | schedule="1 * * * *", 193 | ) 194 | ) 195 | scheduler_service.add( 196 | JobConfig( 197 | id="2", 198 | name="2", 199 | command="echo 2", 200 | schedule="1 * * * *", 201 | ) 202 | ) 203 | 204 | res = scheduler_service.delete("3") 205 | self.assertEqual(res, False) 206 | 207 | res = scheduler_service.list() 208 | 209 | self.assertEqual( 210 | res, 211 | [ 212 | { 213 | "id": "1", 214 | "name": "1", 215 | "command": "echo 1", 216 | "cwd": self.temp_dir.path, 217 | "group": "default", 218 | "args_command": None, 219 | "on_finish": None, 220 | "on_fail": None, 221 | "schedule": "1 * * * *", 222 | }, 223 | { 224 | "id": "2", 225 | "name": "2", 226 | "command": "echo 2", 227 | "cwd": self.temp_dir.path, 228 | "group": "default", 229 | "args_command": None, 230 | "on_finish": None, 231 | "on_fail": None, 232 | "schedule": "1 * * * *", 233 | }, 234 | ], 235 | ) 236 | -------------------------------------------------------------------------------- /zapusk/client/api_client_test.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | import pytest 3 | import responses 4 | from responses import matchers 5 | 6 | from zapusk.client.api_client import DEFAULT_ERROR_MESSAGE, ApiClient, ApiClientError 7 | 8 | 9 | BASE_URL = "http://localhost:4000" 10 | api_client = ApiClient(base_url=BASE_URL) 11 | 12 | 13 | @pytest.fixture(autouse=True) 14 | def setUp(): 15 | api_client = ApiClient(base_url=BASE_URL) 16 | 17 | 18 | @responses.activate 19 | @pytest.mark.parametrize( 20 | ",".join( 21 | [ 22 | "method", 23 | "args", 24 | "uri", 25 | "status", 26 | "http_method", 27 | "matchers", 28 | "mocked_json", 29 | "expected_response", 30 | "expected_exception_message", 31 | ] 32 | ), 33 | [ 34 | ( 35 | "get_job", 36 | [1], 37 | "/jobs/1", 38 | 200, 39 | "get", 40 | [], 41 | {"data": "OK"}, 42 | "OK", 43 | None, 44 | ), 45 | ( 46 | "get_job", 47 | [1], 48 | "/jobs/1", 49 | 400, 50 | "get", 51 | [], 52 | {"error": "Error"}, 53 | None, 54 | "Error", 55 | ), 56 | ( 57 | "list_jobs", 58 | [], 59 | "/jobs/", 60 | 200, 61 | "get", 62 | [], 63 | {"data": "OK"}, 64 | "OK", 65 | None, 66 | ), 67 | ( 68 | "list_jobs", 69 | [], 70 | "/jobs/", 71 | 400, 72 | "get", 73 | [], 74 | {"error": "ERROR"}, 75 | None, 76 | "ERROR", 77 | ), 78 | ( 79 | "list_scheduled_jobs", 80 | [], 81 | "/scheduled-jobs/", 82 | 200, 83 | "get", 84 | [], 85 | {"data": "OK"}, 86 | "OK", 87 | None, 88 | ), 89 | ( 90 | "list_scheduled_jobs", 91 | [], 92 | "/scheduled-jobs/", 93 | 400, 94 | "get", 95 | [], 96 | {"error": "ERROR"}, 97 | None, 98 | "ERROR", 99 | ), 100 | ( 101 | "cancel_job", 102 | [1], 103 | "/jobs/1", 104 | 200, 105 | "delete", 106 | [], 107 | {"data": "OK"}, 108 | "OK", 109 | None, 110 | ), 111 | ( 112 | "cancel_job", 113 | [1], 114 | "/jobs/1", 115 | 400, 116 | "delete", 117 | [], 118 | {"error": "ERROR"}, 119 | None, 120 | "ERROR", 121 | ), 122 | ( 123 | "cancel_scheduled_job", 124 | [1], 125 | "/scheduled-jobs/1", 126 | 200, 127 | "delete", 128 | [], 129 | {"data": "OK"}, 130 | "OK", 131 | None, 132 | ), 133 | ( 134 | "cancel_scheduled_job", 135 | [1], 136 | "/scheduled-jobs/1", 137 | 400, 138 | "delete", 139 | [], 140 | {"error": "ERROR"}, 141 | None, 142 | "ERROR", 143 | ), 144 | ( 145 | "get_config_groups", 146 | [], 147 | "/config/groups/", 148 | 200, 149 | "get", 150 | [], 151 | {"data": "OK"}, 152 | "OK", 153 | None, 154 | ), 155 | ( 156 | "get_config_groups", 157 | [], 158 | "/config/groups/", 159 | 400, 160 | "get", 161 | [], 162 | {"error": "Error"}, 163 | None, 164 | "Error", 165 | ), 166 | ( 167 | "get_config_jobs", 168 | [], 169 | "/config/jobs/", 170 | 200, 171 | "get", 172 | [], 173 | {"data": "OK"}, 174 | "OK", 175 | None, 176 | ), 177 | ( 178 | "get_config_jobs", 179 | [], 180 | "/config/jobs/", 181 | 400, 182 | "get", 183 | [], 184 | {"error": "Error"}, 185 | None, 186 | "Error", 187 | ), 188 | ( 189 | "create_job", 190 | [ 191 | { 192 | "job_config_id": "echo", 193 | } 194 | ], 195 | "/jobs/", 196 | 200, 197 | "post", 198 | [ 199 | matchers.json_params_matcher( 200 | { 201 | "job_config_id": "echo", 202 | } 203 | ) 204 | ], 205 | {"data": "OK"}, 206 | "OK", 207 | None, 208 | ), 209 | ( 210 | "create_job", 211 | [ 212 | { 213 | "command": "echo 1", 214 | "name": "Echo", 215 | "group_id": "group", 216 | }, 217 | ], 218 | "/jobs/", 219 | 200, 220 | "post", 221 | [ 222 | matchers.json_params_matcher( 223 | { 224 | "command": "echo 1", 225 | "name": "Echo", 226 | "group_id": "group", 227 | } 228 | ) 229 | ], 230 | {"data": "OK"}, 231 | "OK", 232 | None, 233 | ), 234 | ( 235 | "create_job", 236 | [ 237 | { 238 | "command": "echo 1", 239 | "name": "Echo", 240 | "group_id": "group", 241 | }, 242 | ], 243 | "/jobs/", 244 | 400, 245 | "post", 246 | [ 247 | matchers.json_params_matcher( 248 | { 249 | "command": "echo 1", 250 | "name": "Echo", 251 | "group_id": "group", 252 | } 253 | ) 254 | ], 255 | {"error": "ERROR"}, 256 | None, 257 | "ERROR", 258 | ), 259 | ( 260 | "create_job", 261 | [ 262 | { 263 | "command": "echo 1", 264 | }, 265 | ], 266 | "/jobs/", 267 | 400, 268 | "post", 269 | [], 270 | {}, 271 | None, 272 | DEFAULT_ERROR_MESSAGE, 273 | ), 274 | ( 275 | "create_scheduled_job", 276 | [ 277 | { 278 | "command": "echo 1", 279 | "name": "Echo", 280 | "group_id": "group", 281 | "schedule": "*/1 * * * *", 282 | } 283 | ], 284 | "/scheduled-jobs/", 285 | 200, 286 | "post", 287 | [ 288 | matchers.json_params_matcher( 289 | { 290 | "command": "echo 1", 291 | "name": "Echo", 292 | "group_id": "group", 293 | "schedule": "*/1 * * * *", 294 | } 295 | ) 296 | ], 297 | {"data": "OK"}, 298 | "OK", 299 | None, 300 | ), 301 | ( 302 | "create_scheduled_job", 303 | [ 304 | { 305 | "command": "echo 1", 306 | "schedule": "*/1 * * * *", 307 | } 308 | ], 309 | "/scheduled-jobs/", 310 | 400, 311 | "post", 312 | [], 313 | {"error": "ERROR"}, 314 | None, 315 | "ERROR", 316 | ), 317 | ], 318 | ids=[ 319 | "get_job", 320 | "get_job_non_200", 321 | "list_jobs", 322 | "list_jobs_non_200", 323 | "list_scheduled_jobs", 324 | "list_scheduled_jobs_non_200", 325 | "cancel_job", 326 | "cancel_job_non_200", 327 | "cancel_scheduled_job", 328 | "cancel_scheduled_job_non_200", 329 | "get_config_groups", 330 | "get_config_groups_non_200", 331 | "get_config_jobs", 332 | "get_config_jobs_non_200", 333 | "create_job_from_config", 334 | "create_job_from_command", 335 | "create_job_non_200", 336 | "create_job_non_200_without_error_body", 337 | "create_scheduled_job", 338 | "create_scheduled_job_non_200", 339 | ], 340 | ) 341 | def test_get_job( 342 | method, 343 | args, 344 | uri, 345 | status, 346 | http_method, 347 | matchers, 348 | mocked_json, 349 | expected_response, 350 | expected_exception_message, 351 | ): 352 | try: 353 | mocked_http_method = getattr(responses, http_method) 354 | mocked_http_method( 355 | url=f"{BASE_URL}{uri}", 356 | status=status, 357 | json=mocked_json, 358 | match=matchers, 359 | ) 360 | 361 | mocked_method = getattr(api_client, method) 362 | res = mocked_method(*args) 363 | assert res == expected_response 364 | except ApiClientError as ex: 365 | assert ex.message == expected_exception_message 366 | 367 | 368 | def test_exception_str(): 369 | ex = ApiClientError("test") 370 | assert "test" == f"{ex}" 371 | -------------------------------------------------------------------------------- /zapusk/server/controller_jobs_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | from unittest import TestCase 3 | from unittest.mock import ANY 4 | 5 | from testfixtures import TempDirectory 6 | 7 | from zapusk.server.controller_testcase import ControllerTestCase 8 | from zapusk.services import ( 9 | ConfigService, 10 | SchedulerService, 11 | ExecutorManagerService, 12 | ExecutorManagerKawkaBackend, 13 | ) 14 | 15 | from .api import create_app 16 | 17 | CONFIG_DATA = """ 18 | job_groups: 19 | - id: default 20 | parallel: 10 21 | - id: cmd 22 | parallel: 2 23 | 24 | jobs: 25 | - name: Echo 26 | id: echo 27 | command: echo 1 28 | """ 29 | 30 | 31 | class TestJobController(ControllerTestCase): 32 | def before_create_services(self): 33 | self.write_config(CONFIG_DATA) 34 | self.replace_in_environ("HOME", self.temp_dir.path) 35 | 36 | def test_controller_jobs_create_job(self): 37 | res = self.test_client.post("/jobs/", json={"job_config_id": "echo"}) 38 | data = json.loads(res.data) 39 | 40 | self.assertEqual( 41 | data, 42 | { 43 | "data": { 44 | "args": [], 45 | "args_command": None, 46 | "command": "echo 1", 47 | "cwd": self.temp_dir.path, 48 | "consumed_by": None, 49 | "created_at": ANY, 50 | "exit_code": None, 51 | "group": "default", 52 | "group_config": { 53 | "id": "default", 54 | "on_fail": None, 55 | "on_finish": None, 56 | "parallel": 10, 57 | }, 58 | "id": ANY, 59 | "job_config_id": "echo", 60 | "log": None, 61 | "name": "Echo", 62 | "on_fail": None, 63 | "on_finish": None, 64 | "pid": None, 65 | "state": "PENDING", 66 | "updated_at": ANY, 67 | } 68 | }, 69 | ) 70 | 71 | def test_controller_jobs_create_command(self): 72 | res = self.test_client.post( 73 | "/jobs/", 74 | json={ 75 | "command": "echo 42", 76 | "group_id": "cmd", 77 | "name": "test_command", 78 | }, 79 | ) 80 | data = json.loads(res.data) 81 | 82 | self.assertEqual( 83 | data, 84 | { 85 | "data": { 86 | "args": [], 87 | "args_command": None, 88 | "command": "echo 42", 89 | "cwd": self.temp_dir.path, 90 | "consumed_by": None, 91 | "created_at": ANY, 92 | "exit_code": None, 93 | "group": "default", 94 | "group_config": { 95 | "id": "cmd", 96 | "on_fail": None, 97 | "on_finish": None, 98 | "parallel": 2, 99 | }, 100 | "id": ANY, 101 | "job_config_id": ANY, 102 | "log": None, 103 | "name": "test_command", 104 | "on_fail": None, 105 | "on_finish": None, 106 | "pid": None, 107 | "state": "PENDING", 108 | "updated_at": ANY, 109 | } 110 | }, 111 | ) 112 | 113 | def test_controller_jobs_get_job(self): 114 | res = self.test_client.post("/jobs/", json={"job_config_id": "echo"}) 115 | data = json.loads(res.data) 116 | 117 | job_id = data["data"]["id"] 118 | res = self.test_client.get(f"/jobs/{job_id}") 119 | data = json.loads(res.data) 120 | 121 | self.assertEqual( 122 | data, 123 | { 124 | "data": { 125 | "args": [], 126 | "args_command": None, 127 | "command": "echo 1", 128 | "cwd": self.temp_dir.path, 129 | "consumed_by": None, 130 | "created_at": ANY, 131 | "exit_code": None, 132 | "group": "default", 133 | "group_config": { 134 | "id": "default", 135 | "on_fail": None, 136 | "on_finish": None, 137 | "parallel": 10, 138 | }, 139 | "id": ANY, 140 | "job_config_id": "echo", 141 | "log": None, 142 | "name": "Echo", 143 | "on_fail": None, 144 | "on_finish": None, 145 | "pid": None, 146 | "state": "PENDING", 147 | "updated_at": ANY, 148 | } 149 | }, 150 | ) 151 | 152 | def test_controller_jobs_list_job(self): 153 | res = self.test_client.post("/jobs/", json={"job_config_id": "echo"}) 154 | data = json.loads(res.data) 155 | 156 | job_id = data["data"]["id"] 157 | res = self.test_client.get("/jobs/") 158 | data = json.loads(res.data) 159 | 160 | self.assertEqual( 161 | data, 162 | { 163 | "data": [ 164 | { 165 | "args": [], 166 | "args_command": None, 167 | "command": "echo 1", 168 | "cwd": self.temp_dir.path, 169 | "consumed_by": None, 170 | "created_at": ANY, 171 | "exit_code": None, 172 | "group": "default", 173 | "group_config": { 174 | "id": "default", 175 | "on_fail": None, 176 | "on_finish": None, 177 | "parallel": 10, 178 | }, 179 | "id": job_id, 180 | "job_config_id": "echo", 181 | "log": None, 182 | "name": "Echo", 183 | "on_fail": None, 184 | "on_finish": None, 185 | "pid": None, 186 | "state": "PENDING", 187 | "updated_at": ANY, 188 | } 189 | ] 190 | }, 191 | ) 192 | 193 | def test_controller_jobs_cancel_job(self): 194 | res = self.test_client.post( 195 | "/jobs/", json={"command": "sleep 60", "name": "test_command"} 196 | ) 197 | data = json.loads(res.data) 198 | 199 | job_id = data["data"]["id"] 200 | res = self.test_client.delete(f"/jobs/{job_id}") 201 | data = json.loads(res.data) 202 | 203 | self.assertEqual( 204 | data, 205 | { 206 | "data": { 207 | "args": [], 208 | "args_command": None, 209 | "command": "sleep 60", 210 | "cwd": self.temp_dir.path, 211 | "consumed_by": ANY, 212 | "created_at": ANY, 213 | "exit_code": None, 214 | "group": "default", 215 | "group_config": { 216 | "id": "default", 217 | "on_fail": None, 218 | "on_finish": None, 219 | "parallel": 10, 220 | }, 221 | "id": ANY, 222 | "job_config_id": ANY, 223 | "log": ANY, 224 | "name": "test_command", 225 | "on_fail": None, 226 | "on_finish": None, 227 | "pid": None, 228 | "state": "CANCELLED", 229 | "updated_at": ANY, 230 | } 231 | }, 232 | ) 233 | 234 | def test_controller_jobs_get_unknown(self): 235 | res = self.test_client.get(f"/jobs/420") 236 | data = json.loads(res.data) 237 | 238 | self.assertEqual(res.status, "404 NOT FOUND") 239 | self.assertEqual(data, {"error": "Job with id 420 not found"}) 240 | 241 | def test_create_without_body(self): 242 | res = self.test_client.post(f"/jobs/", json={}) 243 | data = json.loads(res.data) 244 | 245 | self.assertEqual(res.status, "400 BAD REQUEST") 246 | self.assertEqual( 247 | data, {"error": "Request body contains no `command` or `job_config_id`"} 248 | ) 249 | 250 | def test_controller_jobs_create_with_unknown_jobgroup(self): 251 | res = self.test_client.post( 252 | f"/jobs/", 253 | json={ 254 | "command": "echo 1", 255 | "group_id": "unknown", 256 | }, 257 | ) 258 | data = json.loads(res.data) 259 | 260 | self.assertEqual(res.status, "404 NOT FOUND") 261 | self.assertEqual(data, {"error": 'group_id "unknown" not found'}) 262 | 263 | def test_controller_jobs_create_with_unknown_jobconfig_id(self): 264 | res = self.test_client.post( 265 | f"/jobs/", 266 | json={ 267 | "job_config_id": "unknown", 268 | }, 269 | ) 270 | data = json.loads(res.data) 271 | 272 | self.assertEqual(res.status, "404 NOT FOUND") 273 | self.assertEqual(data, {"error": "Job with id `unknown` not found"}) 274 | 275 | def test_controller_jobs_cancel_unknown_job(self): 276 | res = self.test_client.delete("/jobs/420") 277 | data = json.loads(res.data) 278 | 279 | self.assertEqual(res.status, "404 NOT FOUND") 280 | self.assertEqual(data, {"error": "Job with id `420` not found"}) 281 | -------------------------------------------------------------------------------- /zapusk/services/executor_manager/backends/kawka/executor_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | from unittest import TestCase, mock 3 | from testfixtures.mock import call 4 | from testfixtures import Replacer 5 | from testfixtures.popen import MockPopen 6 | 7 | from zapusk.kawka import Producer 8 | from zapusk.lib.create_jobitem import create_jobitem 9 | from zapusk.models import Job 10 | 11 | from .executor import Executor 12 | 13 | 14 | class ExecutorTest(TestCase): 15 | def setUp(self): 16 | self.Popen = MockPopen() 17 | self.r = Replacer() 18 | self.r.replace("subprocess.Popen", self.Popen) 19 | self.r.in_environ("HOME", "/home/") 20 | self.addCleanup(self.r.restore) 21 | 22 | def test_consumer_should_run_command(self): 23 | input_producer = Producer("input_producer") 24 | executor = Executor(name="run_consumer", producer=input_producer) 25 | executor.start() 26 | 27 | self.Popen.set_command("echo 1", stdout=b"1", stderr=b"") 28 | item = create_jobitem(command="echo 1") 29 | input_producer.add(item) 30 | input_producer.add(Producer.End) 31 | 32 | executor.join() 33 | 34 | self.assertEqual( 35 | self.Popen.all_calls[0], 36 | call.Popen( 37 | "echo 1", 38 | shell=True, 39 | env={**os.environ}, 40 | cwd="/home/", 41 | stdout=mock.ANY, 42 | stderr=mock.ANY, 43 | ), 44 | ) 45 | self.assertEqual(item.state, Job.JOB_STATE_ENUM.FINISHED) 46 | 47 | def test_consumer_should_run_on_finish_callback(self): 48 | input_producer = Producer("input_producer") 49 | executor = Executor(name="run_consumer", producer=input_producer) 50 | executor.start() 51 | 52 | self.Popen.set_command("echo 1", stdout=b"1", stderr=b"") 53 | self.Popen.set_command("echo finish", stdout=b"finish", stderr=b"") 54 | 55 | item = create_jobitem(command="echo 1", on_finish="echo finish") 56 | 57 | input_producer.add(item) 58 | input_producer.add(Producer.End) 59 | 60 | executor.join() 61 | 62 | self.assertEqual( 63 | self.Popen.all_calls[0], 64 | call.Popen( 65 | "echo 1", 66 | shell=True, 67 | env={**os.environ}, 68 | cwd="/home/", 69 | stdout=mock.ANY, 70 | stderr=mock.ANY, 71 | ), 72 | ) 73 | 74 | self.assertEqual( 75 | self.Popen.all_calls[2], 76 | call.Popen( 77 | "echo finish", 78 | env={**os.environ}, 79 | cwd="/home/", 80 | shell=True, 81 | ), 82 | ) 83 | 84 | def test_consumer_should_run_on_finish_group_callback(self): 85 | input_producer = Producer("input_producer") 86 | executor = Executor(name="run_consumer", producer=input_producer) 87 | executor.start() 88 | 89 | self.Popen.set_command("echo 1", stdout=b"1", stderr=b"") 90 | self.Popen.set_command("echo finish", stdout=b"finish", stderr=b"") 91 | 92 | item = create_jobitem(command="echo 1", group_on_finish="echo finish") 93 | 94 | input_producer.add(item) 95 | input_producer.add(Producer.End) 96 | 97 | executor.join() 98 | 99 | self.assertEqual( 100 | self.Popen.all_calls[0], 101 | call.Popen( 102 | "echo 1", 103 | env={**os.environ}, 104 | cwd="/home/", 105 | shell=True, 106 | stdout=mock.ANY, 107 | stderr=mock.ANY, 108 | ), 109 | ) 110 | 111 | self.assertEqual( 112 | self.Popen.all_calls[2], 113 | call.Popen( 114 | "echo finish", 115 | env={**os.environ}, 116 | cwd="/home/", 117 | shell=True, 118 | ), 119 | ) 120 | 121 | def test_consumer_should_run_on_finish_job_callback_if_both_job_and_group_are_defined( 122 | self, 123 | ): 124 | input_producer = Producer("input_producer") 125 | executor = Executor(name="run_consumer", producer=input_producer) 126 | executor.start() 127 | 128 | self.Popen.set_command("echo 1", stdout=b"1", stderr=b"") 129 | self.Popen.set_command("echo finish", stdout=b"finish", stderr=b"") 130 | 131 | item = create_jobitem( 132 | command="echo 1", 133 | on_finish="echo finish", 134 | group_on_finish="echo finish_group", 135 | ) 136 | 137 | input_producer.add(item) 138 | input_producer.add(Producer.End) 139 | 140 | executor.join() 141 | 142 | self.assertEqual( 143 | self.Popen.all_calls[0], 144 | call.Popen( 145 | "echo 1", 146 | shell=True, 147 | env={**os.environ}, 148 | cwd="/home/", 149 | stdout=mock.ANY, 150 | stderr=mock.ANY, 151 | ), 152 | ) 153 | 154 | self.assertEqual( 155 | self.Popen.all_calls[2], 156 | call.Popen( 157 | "echo finish", 158 | env={**os.environ}, 159 | cwd="/home/", 160 | shell=True, 161 | ), 162 | ) 163 | 164 | def test_consumer_should_run_on_fail_callback(self): 165 | input_producer = Producer("input_producer") 166 | executor = Executor(name="run_consumer", producer=input_producer) 167 | executor.start() 168 | 169 | self.Popen.set_command("exit 1", stdout=b"", stderr=b"1", returncode=1) 170 | self.Popen.set_command("echo fail", stdout=b"fail", stderr=b"") 171 | 172 | item = create_jobitem(command="exit 1", on_fail="echo fail") 173 | 174 | input_producer.add(item) 175 | input_producer.add(Producer.End) 176 | 177 | executor.join() 178 | 179 | self.assertEqual( 180 | self.Popen.all_calls[0], 181 | call.Popen( 182 | "exit 1", 183 | env={**os.environ}, 184 | cwd="/home/", 185 | shell=True, 186 | stdout=mock.ANY, 187 | stderr=mock.ANY, 188 | ), 189 | ) 190 | 191 | self.assertEqual( 192 | self.Popen.all_calls[2], 193 | call.Popen( 194 | "echo fail", 195 | env={**os.environ}, 196 | cwd="/home/", 197 | shell=True, 198 | ), 199 | ) 200 | 201 | def test_consumer_should_run_group_on_fail_callback(self): 202 | input_producer = Producer("input_producer") 203 | executor = Executor(name="run_consumer", producer=input_producer) 204 | executor.start() 205 | 206 | self.Popen.set_command("exit 1", stdout=b"", stderr=b"1", returncode=1) 207 | self.Popen.set_command("echo fail", stdout=b"fail", stderr=b"") 208 | 209 | item = create_jobitem(command="exit 1", group_on_fail="echo fail") 210 | 211 | input_producer.add(item) 212 | input_producer.add(Producer.End) 213 | 214 | executor.join() 215 | 216 | self.assertEqual( 217 | self.Popen.all_calls[0], 218 | call.Popen( 219 | "exit 1", 220 | env={**os.environ}, 221 | cwd="/home/", 222 | shell=True, 223 | stdout=mock.ANY, 224 | stderr=mock.ANY, 225 | ), 226 | ) 227 | 228 | self.assertEqual( 229 | self.Popen.all_calls[2], 230 | call.Popen( 231 | "echo fail", 232 | env={**os.environ}, 233 | cwd="/home/", 234 | shell=True, 235 | ), 236 | ) 237 | 238 | def test_consumer_should_run_on_fail_job_callback_if_both_job_and_group_callbacks_are_defined( 239 | self, 240 | ): 241 | input_producer = Producer("input_producer") 242 | executor = Executor(name="run_consumer", producer=input_producer) 243 | executor.start() 244 | 245 | self.Popen.set_command("exit 1", stdout=b"", stderr=b"1", returncode=1) 246 | self.Popen.set_command("echo fail", stdout=b"fail", stderr=b"") 247 | 248 | item = create_jobitem( 249 | command="exit 1", on_fail="echo fail", group_on_fail="echo group_fail" 250 | ) 251 | 252 | input_producer.add(item) 253 | input_producer.add(Producer.End) 254 | 255 | executor.join() 256 | 257 | self.assertEqual( 258 | self.Popen.all_calls[0], 259 | call.Popen( 260 | "exit 1", 261 | env={**os.environ}, 262 | cwd="/home/", 263 | shell=True, 264 | stdout=mock.ANY, 265 | stderr=mock.ANY, 266 | ), 267 | ) 268 | 269 | self.assertEqual( 270 | self.Popen.all_calls[2], 271 | call.Popen( 272 | "echo fail", 273 | env={**os.environ}, 274 | cwd="/home/", 275 | shell=True, 276 | ), 277 | ) 278 | 279 | def test_consumer_should_run_command_with_args(self): 280 | input_producer = Producer("input_producer") 281 | executor = Executor(name="run_consumer", producer=input_producer) 282 | executor.start() 283 | 284 | self.Popen.set_command("echo 1 2 3", stdout=b"1 2 3", stderr=b"") 285 | item = create_jobitem(command="echo", args=["1", "2", "3"]) 286 | input_producer.add(item) 287 | input_producer.add(Producer.End) 288 | 289 | executor.join(2) 290 | 291 | self.assertEqual( 292 | self.Popen.all_calls[0], 293 | call.Popen( 294 | "echo 1 2 3", 295 | env={**os.environ}, 296 | cwd="/home/", 297 | shell=True, 298 | stdout=mock.ANY, 299 | stderr=mock.ANY, 300 | ), 301 | ) 302 | self.assertEqual(item.state, Job.JOB_STATE_ENUM.FINISHED) 303 | 304 | def test_consumer_should_fail_command(self): 305 | input_producer = Producer("input_producer") 306 | executor = Executor(name="run_consumer", producer=input_producer) 307 | executor.start() 308 | 309 | self.Popen.set_command("exit 1", stdout=b"1", stderr=b"", returncode=1) 310 | item = create_jobitem(command="exit 1") 311 | input_producer.add(item) 312 | input_producer.add(Producer.End) 313 | 314 | executor.join(2) 315 | 316 | self.assertEqual( 317 | self.Popen.all_calls[0], 318 | call.Popen( 319 | "exit 1", 320 | env={**os.environ}, 321 | cwd="/home/", 322 | shell=True, 323 | stdout=mock.ANY, 324 | stderr=mock.ANY, 325 | ), 326 | ) 327 | self.assertEqual(item.state, Job.JOB_STATE_ENUM.FAILED) 328 | 329 | def test_consumer_should_skip_cancelled(self): 330 | input_producer = Producer("input_producer") 331 | executor = Executor(name="run_consumer", producer=input_producer) 332 | executor.start() 333 | 334 | self.Popen.set_command("exit 1", stdout=b"1", stderr=b"", returncode=1) 335 | item = create_jobitem(command="exit 1", state=Job.JOB_STATE_ENUM.CANCELLED) 336 | input_producer.add(item) 337 | input_producer.add(Producer.End) 338 | 339 | executor.join(2) 340 | 341 | self.assertEqual(len(self.Popen.all_calls), 0) 342 | self.assertEqual(item.state, Job.JOB_STATE_ENUM.CANCELLED) 343 | --------------------------------------------------------------------------------