├── .github └── workflows │ └── ci.yml ├── .gitignore ├── MANIFEST.in ├── README.md ├── license.txt ├── requirements.txt ├── rq_orchestrator ├── __init__.py ├── commands.py ├── config │ └── __init__.py ├── frappe_worker_pool.py ├── hooks.py ├── modules.txt ├── patches.txt ├── public │ └── .gitkeep ├── rq_orchestrator │ └── __init__.py ├── templates │ ├── __init__.py │ └── pages │ │ └── __init__.py └── test_frappe_worker_pool.py └── setup.py /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | 2 | name: CI 3 | 4 | on: 5 | push: 6 | branches: 7 | - develop 8 | pull_request: 9 | 10 | concurrency: 11 | group: develop-rq_orchestrator-${{ github.event.number }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | tests: 16 | runs-on: ubuntu-latest 17 | strategy: 18 | fail-fast: false 19 | name: Server 20 | 21 | services: 22 | redis-cache: 23 | image: redis:alpine 24 | ports: 25 | - 13000:6379 26 | redis-queue: 27 | image: redis:alpine 28 | ports: 29 | - 11000:6379 30 | redis-socketio: 31 | image: redis:alpine 32 | ports: 33 | - 12000:6379 34 | mariadb: 35 | image: mariadb:10.6 36 | env: 37 | MYSQL_ROOT_PASSWORD: root 38 | ports: 39 | - 3306:3306 40 | options: --health-cmd="mysqladmin ping" --health-interval=5s --health-timeout=2s --health-retries=3 41 | 42 | steps: 43 | - name: Clone 44 | uses: actions/checkout@v3 45 | 46 | - name: Setup Python 47 | uses: actions/setup-python@v4 48 | with: 49 | python-version: '3.10' 50 | 51 | - name: Setup Node 52 | uses: actions/setup-node@v3 53 | with: 54 | node-version: 16 55 | check-latest: true 56 | 57 | - name: Cache pip 58 | uses: actions/cache@v2 59 | with: 60 | path: ~/.cache/pip 61 | key: ${{ runner.os }}-pip-${{ hashFiles('**/*requirements.txt', '**/pyproject.toml', '**/setup.py', '**/setup.cfg') }} 62 | restore-keys: | 63 | ${{ runner.os }}-pip- 64 | ${{ runner.os }}- 65 | 66 | - name: Get yarn cache directory path 67 | id: yarn-cache-dir-path 68 | run: 'echo "dir=$(yarn cache dir)" >> $GITHUB_OUTPUT' 69 | 70 | - uses: actions/cache@v3 71 | id: yarn-cache 72 | with: 73 | path: ${{ steps.yarn-cache-dir-path.outputs.dir }} 74 | key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }} 75 | restore-keys: | 76 | ${{ runner.os }}-yarn- 77 | 78 | - name: Setup 79 | run: | 80 | pip install frappe-bench 81 | bench init --skip-redis-config-generation --skip-assets --python "$(which python)" ~/frappe-bench 82 | mysql --host 127.0.0.1 --port 3306 -u root -proot -e "SET GLOBAL character_set_server = 'utf8mb4'" 83 | mysql --host 127.0.0.1 --port 3306 -u root -proot -e "SET GLOBAL collation_server = 'utf8mb4_unicode_ci'" 84 | 85 | - name: Install 86 | working-directory: /home/runner/frappe-bench 87 | run: | 88 | bench get-app rq_orchestrator $GITHUB_WORKSPACE 89 | bench setup requirements --dev 90 | bench new-site --db-root-password root --admin-password admin test_site 91 | bench --site test_site install-app rq_orchestrator 92 | bench build 93 | env: 94 | CI: 'Yes' 95 | 96 | - name: Run Tests 97 | working-directory: /home/runner/frappe-bench 98 | run: | 99 | bench --site test_site set-config allow_tests true 100 | bench --site test_site run-tests --app rq_orchestrator 101 | env: 102 | TYPE: server 103 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | *.pyc 3 | *.egg-info 4 | *.swp 5 | tags 6 | node_modules -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include MANIFEST.in 2 | include requirements.txt 3 | include *.json 4 | include *.md 5 | include *.py 6 | include *.txt 7 | recursive-include rq_orchestrator *.css 8 | recursive-include rq_orchestrator *.csv 9 | recursive-include rq_orchestrator *.html 10 | recursive-include rq_orchestrator *.ico 11 | recursive-include rq_orchestrator *.js 12 | recursive-include rq_orchestrator *.json 13 | recursive-include rq_orchestrator *.md 14 | recursive-include rq_orchestrator *.png 15 | recursive-include rq_orchestrator *.py 16 | recursive-include rq_orchestrator *.svg 17 | recursive-include rq_orchestrator *.txt 18 | recursive-exclude rq_orchestrator *.pyc -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | A simple WorkerPool (without auto-scaling) is merged in Frappe: https://github.com/frappe/frappe/pull/21482 2 | 3 | --- 4 | 5 | ## RQ Orchestrator 6 | 7 | Provides RQ worker pool with dynamic scaling :rocket: 8 | 9 | WARNING: Experimental POC. Don't run this anywhere near production sites. 10 | 11 | ### Concept 12 | 13 | There are roughly two metrics that need to be optimized while deciding on worker count: 14 | 15 | 1. Memory usage - More # of workers => more memory usage. 16 | 2. Average wait time for jobs ~ How responsive the system is. 17 | 18 | Both of these are at odds with each other as a responsive system would require 19 | more workers i.e. more memory usage. We need to dynamically spawn workers while 20 | still control what parameters to optimize for. 21 | 22 | ### Implementation 23 | 24 | This app provides a command to start worker pool instead of single worker. 25 | 26 | Example: 27 | 28 | First remove all `bench worker` processes from process list and supervisor conf and replace them with equivalent commands for worker pool. 29 | 30 | ``` 31 | bench worker-pool --min-workers=2 --max-workers=5 --scaling-period=5 --utilization-threshold=0.5 32 | ``` 33 | 34 | This command will: 35 | 36 | - Spawn two workers and start working 37 | - Every 5 seconds: 38 | - Check how much time workers spent in last 30 seconds 39 | - If it was more than `--utilization-threshold` i.e. 50% then increase 40 | one worker. 41 | - If it was less than half of threshold i.e. 25% then it will decrease 42 | one worker. 43 | - If it was within 25-50% range worker pool stays as is. 44 | 45 | Test it out by simulating fake workload from bench console: 46 | 47 | ```python 48 | # A function that just sleeps 49 | from frappe.core.doctype.rq_job.test_rq_job import test_func 50 | 51 | while True: 52 | import time 53 | time.sleep(0.5) 54 | frappe.enqueue(test_func, sleep=1) 55 | ``` 56 | 57 | - This will enqueue 2 jobs every second that consume 1 second each 58 | - So roughly we will end up spewing 4-5 workers at which point workload and 59 | workers are balanced according to set parameters. 60 | - If you stop enqueuing new jobs, overtime it will drop back to 2 workers 61 | again. 62 | - To Monitor this in realtime go to `RQ Worker` doctype and setup 63 | auto-refresh: 64 | - `setInterval(() => {cur_list.refresh()}, 1000)` 65 | 66 | 67 | If you visualize this is roughly how it will look: 68 | 69 | ![image](https://github.com/ankush/rq_orchestrator/assets/9079960/650649e2-c359-4f68-99be-e846d7c39978) 70 | 71 | 72 | 73 | ### Memory efficiency 74 | 75 | Because WorkerPool forks workers from the master process it can utilize shared memory much better. A worker pool of 8 workers consumes only 1/3 of memory compared to 8 individual workers. 76 | 77 | ![image](https://github.com/ankush/rq_orchestrator/assets/9079960/b6d02778-0447-4474-bb17-df11054ae671) 78 | 79 | 80 | ### Implementation notes 81 | 82 | - `--utilization-threshold` controls responsiveness vs efficiency. Low 83 | threshold means highly responsive system but very low efficiency and vice 84 | versa. 85 | - Some weird edge cases are handled weirdly. 86 | - Extremely long running jobs which might not increase utilization if they 87 | started before scaling window but didn't end yet. 88 | - Utilization in time bucket is computed by remembering old utilization. This 89 | isn't accurate at all and requires rework. 90 | - Scaling up and down only happens in unit of 1 worker. 91 | - Scale down happens at half the threshold for scale up. If you're familiar 92 | with idea of table doubling or resizeable arrays, this is similar concept 93 | to avoid repeated scale up/downs which are of no use. 94 | 95 | #### License 96 | 97 | MIT 98 | -------------------------------------------------------------------------------- /license.txt: -------------------------------------------------------------------------------- 1 | License: MIT -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # frappe on `develop` branch -------------------------------------------------------------------------------- /rq_orchestrator/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.0.1" 2 | -------------------------------------------------------------------------------- /rq_orchestrator/commands.py: -------------------------------------------------------------------------------- 1 | import click 2 | 3 | 4 | @click.command("worker-pool") 5 | @click.option( 6 | "--queue", 7 | type=str, 8 | help="Queue to consume from. Multiple queues can be specified using comma-separated string. If not specified all queues are consumed.", 9 | ) 10 | @click.option("--min-workers", type=int, default=1, help="Minimum worker count") 11 | @click.option("--max-workers", type=int, default=8, help="maximum worker count") 12 | @click.option( 13 | "--scaling-period", 14 | type=int, 15 | default=10, 16 | help="Time in seconds after which autoscaling should run.", 17 | ) 18 | @click.option( 19 | "--utilization-threshold", 20 | type=float, 21 | default=0.5, 22 | help="Utilization after which workers should be scaled up.", 23 | ) 24 | def start_worker_pool( 25 | queue, min_workers, max_workers, scaling_period, utilization_threshold 26 | ): 27 | """Start a backgrond worker pool""" 28 | from rq_orchestrator.frappe_worker_pool import start_worker_pool 29 | 30 | start_worker_pool( 31 | queue, 32 | max_workers=max_workers, 33 | min_workers=min_workers, 34 | scaling_period=scaling_period, 35 | utilization_threshold=utilization_threshold, 36 | ) 37 | 38 | 39 | commands = [ 40 | start_worker_pool, 41 | ] 42 | -------------------------------------------------------------------------------- /rq_orchestrator/config/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankush/rq_orchestrator/7fac3285ace4e1a4fbd8bc2d6a6cc4bec58abc8c/rq_orchestrator/config/__init__.py -------------------------------------------------------------------------------- /rq_orchestrator/frappe_worker_pool.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | 4 | import frappe 5 | from rq import Worker 6 | from rq.command import send_shutdown_command 7 | from rq.worker import WorkerStatus 8 | from rq.worker_pool import WorkerPool 9 | 10 | 11 | class FrappeWorkerPool(WorkerPool): 12 | def __init__( 13 | self, 14 | *args, 15 | min_workers: int = 1, 16 | max_workers: int = 8, 17 | utilization_threshold=0.5, 18 | scaling_period=10, # seconds 19 | **kwargs, 20 | ): 21 | self._min_workers = min_workers 22 | kwargs["num_workers"] = min_workers 23 | self._max_workers = max_workers 24 | self._utilization_threshold = utilization_threshold 25 | self._scaling_period = scaling_period 26 | self._workers_killed = set() 27 | 28 | # Map of last known utilization time 29 | self._utilization_history = {} 30 | 31 | super().__init__(*args, **kwargs) 32 | 33 | def check_workers(self, *args, **kwargs): 34 | super().check_workers(*args, **kwargs) 35 | self.apply_scaling() 36 | time.sleep(self._scaling_period) 37 | 38 | def get_average_utilization(self, workers: list[Worker]) -> float: 39 | utilizations = [] 40 | for worker in workers: 41 | if recent_utilization := self.get_worker_utilization(worker): 42 | utilizations.append(recent_utilization) 43 | 44 | if not utilizations: 45 | return 0.0 46 | 47 | return sum(utilizations) / len(utilizations) 48 | 49 | def _get_workers_in_pool(self): 50 | return [ 51 | w for w in Worker.all(connection=self.connection) if w.name in self.worker_dict 52 | ] 53 | 54 | def get_worker_utilization(self, worker: Worker) -> float | None: 55 | """Get worker's utilization in previous scaling period, 56 | 57 | If worker was spawned in last period then it is not considered""" 58 | 59 | current_working_time = worker.total_working_time 60 | last_known_working_time = self._utilization_history.get(worker.name) 61 | self._utilization_history[worker.name] = current_working_time 62 | 63 | if last_known_working_time is None: 64 | return 65 | 66 | # HACK/XXX: If there's long running job the total working time wont be updated but 67 | # we should consider it 100% utilization. 68 | if ( 69 | worker.get_state() == WorkerStatus.BUSY 70 | and current_working_time == last_known_working_time 71 | ): 72 | return 1.0 73 | 74 | return (current_working_time - last_known_working_time) / self._scaling_period 75 | 76 | def apply_scaling(self): 77 | workers = self._get_workers_in_pool() 78 | utilization = self.get_average_utilization(workers) 79 | 80 | if utilization > self._utilization_threshold and self.num_workers < self._max_workers: 81 | # WorkerPool handles scaling up automatically, not designed to but "works" 82 | self.num_workers += 1 83 | self.log.info( 84 | f"Utilization at {utilization}, increased worker count to {self.num_workers}" 85 | ) 86 | # Spawn down only occurs at 1/2 of threshold to avoid continuously spawning up/down 87 | elif ( 88 | utilization < self._utilization_threshold / 2 89 | and self.num_workers > self._min_workers 90 | ): 91 | self.attempt_scale_down(workers) 92 | self.log.info( 93 | f"Utilization at {utilization}, decreased worker count to {self.num_workers}" 94 | ) 95 | else: 96 | self.log.debug(f"Utilization {utilization} within limits, scaling not applied.") 97 | 98 | def attempt_scale_down(self, workers: list[Worker]): 99 | # Attempt to kill first idle worker 100 | for worker in workers: 101 | if worker.get_state() == WorkerStatus.IDLE and worker.total_working_time: 102 | # kill this one 103 | self.kill_worker(worker) 104 | return 105 | 106 | def kill_worker(self, worker: Worker): 107 | # Repeatedly sending sigint will result in forceful termination, 108 | # hence only send singal once. 109 | if worker.name not in self._workers_killed: 110 | self.num_workers -= 1 111 | send_shutdown_command(self.connection, worker.name) 112 | self._utilization_history.pop(worker.name, None) 113 | self._workers_killed.add(worker.name) 114 | 115 | 116 | def start_worker_pool( 117 | queue: str | None = None, 118 | min_workers: int = 1, 119 | max_workers: int = 8, 120 | scaling_period: int = 10, 121 | utilization_threshold: float = 0.5, 122 | ): 123 | from frappe.utils.background_jobs import get_queue_list, get_redis_conn 124 | 125 | if frappe._tune_gc: 126 | import gc 127 | gc.collect() 128 | gc.freeze() 129 | 130 | 131 | with frappe.init_site(): 132 | # empty init is required to get redis_queue from common_site_config.json 133 | redis_connection = get_redis_conn() 134 | 135 | if queue: 136 | queue = [q.strip() for q in queue.split(",")] 137 | queues = get_queue_list(queue, build_queue_name=True) 138 | 139 | pool = FrappeWorkerPool( 140 | queues=queues, 141 | connection=redis_connection, 142 | min_workers=min_workers, 143 | max_workers=max_workers, 144 | scaling_period=scaling_period, 145 | utilization_threshold=utilization_threshold, 146 | ) 147 | pool.start() 148 | -------------------------------------------------------------------------------- /rq_orchestrator/hooks.py: -------------------------------------------------------------------------------- 1 | from . import __version__ as app_version 2 | 3 | app_name = "rq_orchestrator" 4 | app_title = "RQ Orchestrator" 5 | app_publisher = "Ankush Menat" 6 | app_description = "Providers RQ worker pool with dynamic scaling" 7 | app_email = "ankush@frappe.io" 8 | app_license = "MIT" 9 | 10 | # Includes in 11 | # ------------------ 12 | 13 | # include js, css files in header of desk.html 14 | # app_include_css = "/assets/rq_orchestrator/css/rq_orchestrator.css" 15 | # app_include_js = "/assets/rq_orchestrator/js/rq_orchestrator.js" 16 | 17 | # include js, css files in header of web template 18 | # web_include_css = "/assets/rq_orchestrator/css/rq_orchestrator.css" 19 | # web_include_js = "/assets/rq_orchestrator/js/rq_orchestrator.js" 20 | 21 | # include custom scss in every website theme (without file extension ".scss") 22 | # website_theme_scss = "rq_orchestrator/public/scss/website" 23 | 24 | # include js, css files in header of web form 25 | # webform_include_js = {"doctype": "public/js/doctype.js"} 26 | # webform_include_css = {"doctype": "public/css/doctype.css"} 27 | 28 | # include js in page 29 | # page_js = {"page" : "public/js/file.js"} 30 | 31 | # include js in doctype views 32 | # doctype_js = {"doctype" : "public/js/doctype.js"} 33 | # doctype_list_js = {"doctype" : "public/js/doctype_list.js"} 34 | # doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"} 35 | # doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"} 36 | 37 | # Home Pages 38 | # ---------- 39 | 40 | # application home page (will override Website Settings) 41 | # home_page = "login" 42 | 43 | # website user home page (by Role) 44 | # role_home_page = { 45 | # "Role": "home_page" 46 | # } 47 | 48 | # Generators 49 | # ---------- 50 | 51 | # automatically create page for each record of this doctype 52 | # website_generators = ["Web Page"] 53 | 54 | # Jinja 55 | # ---------- 56 | 57 | # add methods and filters to jinja environment 58 | # jinja = { 59 | # "methods": "rq_orchestrator.utils.jinja_methods", 60 | # "filters": "rq_orchestrator.utils.jinja_filters" 61 | # } 62 | 63 | # Installation 64 | # ------------ 65 | 66 | # before_install = "rq_orchestrator.install.before_install" 67 | # after_install = "rq_orchestrator.install.after_install" 68 | 69 | # Uninstallation 70 | # ------------ 71 | 72 | # before_uninstall = "rq_orchestrator.uninstall.before_uninstall" 73 | # after_uninstall = "rq_orchestrator.uninstall.after_uninstall" 74 | 75 | # Desk Notifications 76 | # ------------------ 77 | # See frappe.core.notifications.get_notification_config 78 | 79 | # notification_config = "rq_orchestrator.notifications.get_notification_config" 80 | 81 | # Permissions 82 | # ----------- 83 | # Permissions evaluated in scripted ways 84 | 85 | # permission_query_conditions = { 86 | # "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions", 87 | # } 88 | # 89 | # has_permission = { 90 | # "Event": "frappe.desk.doctype.event.event.has_permission", 91 | # } 92 | 93 | # DocType Class 94 | # --------------- 95 | # Override standard doctype classes 96 | 97 | # override_doctype_class = { 98 | # "ToDo": "custom_app.overrides.CustomToDo" 99 | # } 100 | 101 | # Document Events 102 | # --------------- 103 | # Hook on document methods and events 104 | 105 | # doc_events = { 106 | # "*": { 107 | # "on_update": "method", 108 | # "on_cancel": "method", 109 | # "on_trash": "method" 110 | # } 111 | # } 112 | 113 | # Scheduled Tasks 114 | # --------------- 115 | 116 | # scheduler_events = { 117 | # "all": [ 118 | # "rq_orchestrator.tasks.all" 119 | # ], 120 | # "daily": [ 121 | # "rq_orchestrator.tasks.daily" 122 | # ], 123 | # "hourly": [ 124 | # "rq_orchestrator.tasks.hourly" 125 | # ], 126 | # "weekly": [ 127 | # "rq_orchestrator.tasks.weekly" 128 | # ], 129 | # "monthly": [ 130 | # "rq_orchestrator.tasks.monthly" 131 | # ], 132 | # } 133 | 134 | # Testing 135 | # ------- 136 | 137 | # before_tests = "rq_orchestrator.install.before_tests" 138 | 139 | # Overriding Methods 140 | # ------------------------------ 141 | # 142 | # override_whitelisted_methods = { 143 | # "frappe.desk.doctype.event.event.get_events": "rq_orchestrator.event.get_events" 144 | # } 145 | # 146 | # each overriding function accepts a `data` argument; 147 | # generated from the base implementation of the doctype dashboard, 148 | # along with any modifications made in other Frappe apps 149 | # override_doctype_dashboards = { 150 | # "Task": "rq_orchestrator.task.get_dashboard_data" 151 | # } 152 | 153 | # exempt linked doctypes from being automatically cancelled 154 | # 155 | # auto_cancel_exempted_doctypes = ["Auto Repeat"] 156 | 157 | # Ignore links to specified DocTypes when deleting documents 158 | # ----------------------------------------------------------- 159 | 160 | # ignore_links_on_delete = ["Communication", "ToDo"] 161 | 162 | # Request Events 163 | # ---------------- 164 | # before_request = ["rq_orchestrator.utils.before_request"] 165 | # after_request = ["rq_orchestrator.utils.after_request"] 166 | 167 | # Job Events 168 | # ---------- 169 | # before_job = ["rq_orchestrator.utils.before_job"] 170 | # after_job = ["rq_orchestrator.utils.after_job"] 171 | 172 | # User Data Protection 173 | # -------------------- 174 | 175 | # user_data_fields = [ 176 | # { 177 | # "doctype": "{doctype_1}", 178 | # "filter_by": "{filter_by}", 179 | # "redact_fields": ["{field_1}", "{field_2}"], 180 | # "partial": 1, 181 | # }, 182 | # { 183 | # "doctype": "{doctype_2}", 184 | # "filter_by": "{filter_by}", 185 | # "partial": 1, 186 | # }, 187 | # { 188 | # "doctype": "{doctype_3}", 189 | # "strict": False, 190 | # }, 191 | # { 192 | # "doctype": "{doctype_4}" 193 | # } 194 | # ] 195 | 196 | # Authentication and authorization 197 | # -------------------------------- 198 | 199 | # auth_hooks = [ 200 | # "rq_orchestrator.auth.validate" 201 | # ] 202 | -------------------------------------------------------------------------------- /rq_orchestrator/modules.txt: -------------------------------------------------------------------------------- 1 | RQ Orchestrator -------------------------------------------------------------------------------- /rq_orchestrator/patches.txt: -------------------------------------------------------------------------------- 1 | [pre_model_sync] 2 | # Patches added in this section will be executed before doctypes are migrated 3 | # Read docs to understand patches: https://frappeframework.com/docs/v14/user/en/database-migrations 4 | 5 | [post_model_sync] 6 | # Patches added in this section will be executed after doctypes are migrated -------------------------------------------------------------------------------- /rq_orchestrator/public/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankush/rq_orchestrator/7fac3285ace4e1a4fbd8bc2d6a6cc4bec58abc8c/rq_orchestrator/public/.gitkeep -------------------------------------------------------------------------------- /rq_orchestrator/rq_orchestrator/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankush/rq_orchestrator/7fac3285ace4e1a4fbd8bc2d6a6cc4bec58abc8c/rq_orchestrator/rq_orchestrator/__init__.py -------------------------------------------------------------------------------- /rq_orchestrator/templates/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankush/rq_orchestrator/7fac3285ace4e1a4fbd8bc2d6a6cc4bec58abc8c/rq_orchestrator/templates/__init__.py -------------------------------------------------------------------------------- /rq_orchestrator/templates/pages/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankush/rq_orchestrator/7fac3285ace4e1a4fbd8bc2d6a6cc4bec58abc8c/rq_orchestrator/templates/pages/__init__.py -------------------------------------------------------------------------------- /rq_orchestrator/test_frappe_worker_pool.py: -------------------------------------------------------------------------------- 1 | # Not actual test but demo using testing system 2 | # Spawn worker pool before running the tests. 3 | import time 4 | 5 | import frappe 6 | from frappe.tests.utils import FrappeTestCase 7 | from frappe.utils.background_jobs import get_queue, get_workers 8 | 9 | 10 | def sleepy(duration=1): 11 | time.sleep(duration) 12 | 13 | 14 | class TestOrchestrator(FrappeTestCase): 15 | def log_status(self): 16 | q = get_queue("default") 17 | workers = get_workers(q) 18 | worker_count = len(workers) 19 | jobs_count = len(q.get_job_ids()) 20 | # Count running jobs too 21 | for w in workers: 22 | if w.get_current_job_id(): 23 | jobs_count += 1 24 | print(f"{worker_count} | {jobs_count:03} ") 25 | 26 | def test_demo(self): 27 | print(f"# of Workers | # of Jobs") 28 | 29 | self.log_status() 30 | 31 | for _ in range(100): 32 | time.sleep(0.5) 33 | frappe.enqueue(sleepy) 34 | self.log_status() 35 | 36 | while True: 37 | time.sleep(0.5) 38 | self.log_status() 39 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | with open("requirements.txt") as f: 4 | install_requires = f.read().strip().split("\n") 5 | 6 | # get version from __version__ variable in rq_orchestrator/__init__.py 7 | from rq_orchestrator import __version__ as version 8 | 9 | setup( 10 | name="rq_orchestrator", 11 | version=version, 12 | description="Providers RQ worker pool with dynamic scaling", 13 | author="Ankush Menat", 14 | author_email="ankush@frappe.io", 15 | packages=find_packages(), 16 | zip_safe=False, 17 | include_package_data=True, 18 | install_requires=install_requires, 19 | ) 20 | --------------------------------------------------------------------------------