├── requirements.txt ├── addons_external ├── queue_job │ ├── controllers │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── main.cpython-310.pyc │ │ │ └── __init__.cpython-310.pyc │ │ └── main.py │ ├── readme │ │ ├── INSTALL.md │ │ ├── HISTORY.md │ │ ├── CONTRIBUTORS.md │ │ ├── ROADMAP.md │ │ ├── CONFIGURE.md │ │ ├── DESCRIPTION.md │ │ └── USAGE.md │ ├── pyproject.toml │ ├── wizards │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-310.pyc │ │ │ ├── queue_jobs_to_done.cpython-310.pyc │ │ │ ├── queue_requeue_job.cpython-310.pyc │ │ │ └── queue_jobs_to_cancelled.cpython-310.pyc │ │ ├── queue_jobs_to_done.py │ │ ├── queue_jobs_to_cancelled.py │ │ ├── queue_requeue_job.py │ │ ├── queue_requeue_job_views.xml │ │ ├── queue_jobs_to_cancelled_views.xml │ │ └── queue_jobs_to_done_views.xml │ ├── static │ │ ├── description │ │ │ ├── icon.png │ │ │ └── icon.svg │ │ └── src │ │ │ └── views │ │ │ └── fields │ │ │ └── job_direct_graph │ │ │ ├── job_direct_graph.scss │ │ │ ├── job_direct_graph.xml │ │ │ └── job_direct_graph.esm.js │ ├── __pycache__ │ │ ├── job.cpython-310.pyc │ │ ├── delay.cpython-310.pyc │ │ ├── fields.cpython-310.pyc │ │ ├── utils.cpython-310.pyc │ │ ├── __init__.cpython-310.pyc │ │ ├── exception.cpython-310.pyc │ │ ├── post_load.cpython-310.pyc │ │ └── post_init_hook.cpython-310.pyc │ ├── models │ │ ├── __pycache__ │ │ │ ├── base.cpython-310.pyc │ │ │ ├── __init__.cpython-310.pyc │ │ │ ├── queue_job.cpython-310.pyc │ │ │ ├── ir_model_fields.cpython-310.pyc │ │ │ ├── queue_job_channel.cpython-310.pyc │ │ │ └── queue_job_function.cpython-310.pyc │ │ ├── __init__.py │ │ ├── ir_model_fields.py │ │ ├── queue_job_channel.py │ │ ├── queue_job_function.py │ │ └── base.py │ ├── jobrunner │ │ ├── __pycache__ │ │ │ ├── runner.cpython-310.pyc │ │ │ ├── __init__.cpython-310.pyc │ │ │ └── channels.cpython-310.pyc │ │ ├── __main__.py │ │ └── __init__.py │ ├── data │ │ ├── queue_job_function_data.xml │ │ └── queue_data.xml │ ├── __init__.py │ ├── tests │ │ ├── __init__.py │ │ ├── test_runner_runner.py │ │ ├── test_runner_channels.py │ │ ├── test_queue_job_protected_write.py │ │ ├── test_wizards.py │ │ ├── test_model_job_function.py │ │ ├── test_model_job_channel.py │ │ ├── test_json_field.py │ │ ├── test_delayable.py │ │ └── common.py │ ├── post_load.py │ ├── security │ │ ├── ir.model.access.csv │ │ └── security.xml │ ├── views │ │ ├── queue_job_menus.xml │ │ ├── queue_job_channel_views.xml │ │ ├── queue_job_function_views.xml │ │ └── queue_job_views.xml │ ├── post_init_hook.py │ ├── __manifest__.py │ ├── exception.py │ ├── utils.py │ └── fields.py ├── session_redis │ ├── __init__.py │ ├── __pycache__ │ │ ├── http.cpython-310.pyc │ │ ├── session.cpython-310.pyc │ │ ├── __init__.cpython-310.pyc │ │ ├── strtobool.cpython-310.pyc │ │ └── json_encoding.cpython-310.pyc │ ├── strtobool.py │ ├── __manifest__.py │ ├── json_encoding.py │ ├── README.rst │ ├── http.py │ └── session.py └── README.md ├── addons └── README.md ├── readme ├── odoo_logo.png ├── heimdall-data-logo-aws.png ├── odoo-heimdall-features.png ├── odoo-heimdall-metrics.png ├── odoo-heimdall-status.png ├── odoo-heimdall-proxy-diagram.png └── odoo-heimdall-docker-desktop.png ├── addons_customer └── README.md ├── heimdall ├── files │ ├── opt │ │ └── heimdall │ │ │ └── static │ │ │ └── drivers │ │ │ └── PostgreSQL │ │ │ └── postgresql-42.4.1-streaming.jar │ └── docker-entrypoint.sh └── Dockerfile ├── bootstrap.sh ├── config ├── odoo.conf ├── odoo.conf.j2 └── Mountrix-vdb.json ├── README.md ├── Dockerfile ├── entrypoint.sh └── docker-compose.yml /requirements.txt: -------------------------------------------------------------------------------- 1 | openpyxl 2 | redis 3 | python-jose 4 | -------------------------------------------------------------------------------- /addons_external/queue_job/controllers/__init__.py: -------------------------------------------------------------------------------- 1 | from . import main 2 | -------------------------------------------------------------------------------- /addons_external/queue_job/readme/INSTALL.md: -------------------------------------------------------------------------------- 1 | Be sure to have the `requests` library. 2 | -------------------------------------------------------------------------------- /addons_external/session_redis/__init__.py: -------------------------------------------------------------------------------- 1 | from . import http 2 | from . import session 3 | -------------------------------------------------------------------------------- /addons/README.md: -------------------------------------------------------------------------------- 1 | # Odoo 1 2 | 3 | Place all the custom developed addons inside this folder 4 | 5 | -------------------------------------------------------------------------------- /readme/odoo_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/readme/odoo_logo.png -------------------------------------------------------------------------------- /addons_external/README.md: -------------------------------------------------------------------------------- 1 | # mountrix External Addons 2 | 3 | Place all the third party addons inside this folder -------------------------------------------------------------------------------- /addons_customer/README.md: -------------------------------------------------------------------------------- 1 | # mountrix External Addons 2 | 3 | Place all the customer custom addons inside this folder -------------------------------------------------------------------------------- /addons_external/queue_job/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["whool"] 3 | build-backend = "whool.buildapi" 4 | -------------------------------------------------------------------------------- /readme/heimdall-data-logo-aws.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/readme/heimdall-data-logo-aws.png -------------------------------------------------------------------------------- /readme/odoo-heimdall-features.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/readme/odoo-heimdall-features.png -------------------------------------------------------------------------------- /readme/odoo-heimdall-metrics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/readme/odoo-heimdall-metrics.png -------------------------------------------------------------------------------- /readme/odoo-heimdall-status.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/readme/odoo-heimdall-status.png -------------------------------------------------------------------------------- /readme/odoo-heimdall-proxy-diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/readme/odoo-heimdall-proxy-diagram.png -------------------------------------------------------------------------------- /readme/odoo-heimdall-docker-desktop.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/readme/odoo-heimdall-docker-desktop.png -------------------------------------------------------------------------------- /addons_external/queue_job/wizards/__init__.py: -------------------------------------------------------------------------------- 1 | from . import queue_requeue_job 2 | from . import queue_jobs_to_done 3 | from . import queue_jobs_to_cancelled 4 | -------------------------------------------------------------------------------- /addons_external/queue_job/static/description/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/static/description/icon.png -------------------------------------------------------------------------------- /addons_external/queue_job/__pycache__/job.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/__pycache__/job.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/__pycache__/delay.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/__pycache__/delay.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/__pycache__/fields.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/__pycache__/fields.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/__pycache__/utils.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/__pycache__/utils.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/__pycache__/exception.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/__pycache__/exception.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/__pycache__/post_load.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/__pycache__/post_load.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/session_redis/__pycache__/http.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/session_redis/__pycache__/http.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/models/__pycache__/base.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/models/__pycache__/base.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/session_redis/__pycache__/session.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/session_redis/__pycache__/session.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/__pycache__/post_init_hook.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/__pycache__/post_init_hook.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/session_redis/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/session_redis/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/session_redis/__pycache__/strtobool.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/session_redis/__pycache__/strtobool.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/controllers/__pycache__/main.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/controllers/__pycache__/main.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/jobrunner/__pycache__/runner.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/jobrunner/__pycache__/runner.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/models/__init__.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | from . import ir_model_fields 3 | from . import queue_job 4 | from . import queue_job_channel 5 | from . import queue_job_function 6 | -------------------------------------------------------------------------------- /addons_external/queue_job/models/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/models/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/models/__pycache__/queue_job.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/models/__pycache__/queue_job.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/wizards/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/wizards/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/jobrunner/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/jobrunner/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/jobrunner/__pycache__/channels.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/jobrunner/__pycache__/channels.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/session_redis/__pycache__/json_encoding.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/session_redis/__pycache__/json_encoding.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/controllers/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/controllers/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/models/__pycache__/ir_model_fields.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/models/__pycache__/ir_model_fields.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/models/__pycache__/queue_job_channel.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/models/__pycache__/queue_job_channel.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/models/__pycache__/queue_job_function.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/models/__pycache__/queue_job_function.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/wizards/__pycache__/queue_jobs_to_done.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/wizards/__pycache__/queue_jobs_to_done.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/wizards/__pycache__/queue_requeue_job.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/wizards/__pycache__/queue_requeue_job.cpython-310.pyc -------------------------------------------------------------------------------- /addons_external/queue_job/wizards/__pycache__/queue_jobs_to_cancelled.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/addons_external/queue_job/wizards/__pycache__/queue_jobs_to_cancelled.cpython-310.pyc -------------------------------------------------------------------------------- /heimdall/files/opt/heimdall/static/drivers/PostgreSQL/postgresql-42.4.1-streaming.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awisky/odoo-heimdall-proxy/HEAD/heimdall/files/opt/heimdall/static/drivers/PostgreSQL/postgresql-42.4.1-streaming.jar -------------------------------------------------------------------------------- /addons_external/queue_job/static/src/views/fields/job_direct_graph/job_direct_graph.scss: -------------------------------------------------------------------------------- 1 | .o_field_job_directed_graph { 2 | width: 600px; 3 | height: 400px; 4 | border: 1px solid lightgray; 5 | 6 | div.root_vis { 7 | height: 100%; 8 | width: 100%; 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /addons_external/queue_job/data/queue_job_function_data.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | _test_job 5 | 6 | 7 | -------------------------------------------------------------------------------- /addons_external/queue_job/static/src/views/fields/job_direct_graph/job_direct_graph.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |
6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /addons_external/queue_job/__init__.py: -------------------------------------------------------------------------------- 1 | from . import controllers 2 | from . import fields 3 | from . import models 4 | from . import wizards 5 | from . import jobrunner 6 | from .post_init_hook import post_init_hook 7 | from .post_load import post_load 8 | 9 | # shortcuts 10 | from .job import identity_exact 11 | -------------------------------------------------------------------------------- /addons_external/queue_job/jobrunner/__main__.py: -------------------------------------------------------------------------------- 1 | import odoo 2 | 3 | from .runner import QueueJobRunner 4 | 5 | 6 | def main(): 7 | odoo.tools.config.parse_config() 8 | runner = QueueJobRunner.from_environ_or_config() 9 | runner.run() 10 | 11 | 12 | if __name__ == "__main__": 13 | main() 14 | -------------------------------------------------------------------------------- /addons_external/queue_job/tests/__init__.py: -------------------------------------------------------------------------------- 1 | from . import test_runner_channels 2 | from . import test_runner_runner 3 | from . import test_delayable 4 | from . import test_json_field 5 | from . import test_model_job_channel 6 | from . import test_model_job_function 7 | from . import test_queue_job_protected_write 8 | from . import test_wizards 9 | -------------------------------------------------------------------------------- /addons_external/queue_job/readme/HISTORY.md: -------------------------------------------------------------------------------- 1 | ## Next 2 | 3 | - \[ADD\] Run jobrunner as a worker process instead of a thread in the 4 | main process (when running with --workers \> 0) 5 | - \[REF\] `@job` and `@related_action` deprecated, any method can be 6 | delayed, and configured using `queue.job.function` records 7 | - \[MIGRATION\] from 13.0 branched at rev. e24ff4b 8 | -------------------------------------------------------------------------------- /addons_external/queue_job/tests/test_runner_runner.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 Camptocamp SA 2 | # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 3 | 4 | # pylint: disable=odoo-addons-relative-import 5 | # we are testing, we want to test as we were an external consumer of the API 6 | from odoo.addons.queue_job.jobrunner import runner 7 | 8 | from .common import load_doctests 9 | 10 | load_tests = load_doctests(runner) 11 | -------------------------------------------------------------------------------- /addons_external/queue_job/models/ir_model_fields.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Camptocamp 2 | # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 3 | 4 | from odoo import fields, models 5 | 6 | 7 | class IrModelFields(models.Model): 8 | _inherit = "ir.model.fields" 9 | 10 | ttype = fields.Selection( 11 | selection_add=[("job_serialized", "Job Serialized")], 12 | ondelete={"job_serialized": "cascade"}, 13 | ) 14 | -------------------------------------------------------------------------------- /addons_external/queue_job/tests/test_runner_channels.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2016 Camptocamp SA 2 | # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 3 | 4 | # pylint: disable=odoo-addons-relative-import 5 | # we are testing, we want to test as we were an external consumer of the API 6 | from odoo.addons.queue_job.jobrunner import channels 7 | 8 | from .common import load_doctests 9 | 10 | load_tests = load_doctests(channels) 11 | -------------------------------------------------------------------------------- /heimdall/files/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # ~ Copyright © 2024 Mountrix.com Company Limited. All Rights Reserved. 4 | # 5 | 6 | set -e 7 | 8 | DOCKER_ENTRYPOINT_SLEEP=${DOCKER_ENTRYPOINT_SLEEP:-2} 9 | 10 | 11 | sleep $(DOCKER_ENTRYPOINT_SLEEP) 2>/dev/null || true 12 | 13 | eval "echo \"$0 ${action} (${HOSTNAME})\"" 14 | 15 | echo "Heimdall Configuration must be done already." 16 | 17 | 18 | /bin/bash -c "eval /opt/heimdall/heimdall-entrypoint.sh" 19 | -------------------------------------------------------------------------------- /addons_external/session_redis/strtobool.py: -------------------------------------------------------------------------------- 1 | _MAP = { 2 | "y": True, 3 | "yes": True, 4 | "t": True, 5 | "true": True, 6 | "on": True, 7 | "1": True, 8 | "n": False, 9 | "no": False, 10 | "f": False, 11 | "false": False, 12 | "off": False, 13 | "0": False, 14 | } 15 | 16 | 17 | def strtobool(value): 18 | try: 19 | return _MAP[str(value).lower()] 20 | except KeyError as error: 21 | raise ValueError('"{}" is not a valid bool value'.format(value)) from error 22 | -------------------------------------------------------------------------------- /addons_external/queue_job/wizards/queue_jobs_to_done.py: -------------------------------------------------------------------------------- 1 | # Copyright 2013-2020 Camptocamp SA 2 | # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 3 | 4 | from odoo import models 5 | 6 | 7 | class SetJobsToDone(models.TransientModel): 8 | _inherit = "queue.requeue.job" 9 | _name = "queue.jobs.to.done" 10 | _description = "Set all selected jobs to done" 11 | 12 | def set_done(self): 13 | jobs = self.job_ids 14 | jobs.button_done() 15 | return {"type": "ir.actions.act_window_close"} 16 | -------------------------------------------------------------------------------- /bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "HOSTNAME:" $HOSTNAME 3 | echo "DB HOST:" ${DB_PORT_5432_TCP_ADDR} 4 | echo "Override default user admin password" 5 | echo "ODOO_USER_ADMIN_DEFAULT_PASSWORD:" $ODOO_USER_ADMIN_DEFAULT_PASSWORD 6 | 7 | ODOO_PATH=/usr/lib/python3/dist-packages/odoo 8 | xmlstarlet edit --inplace --update '//odoo/data/record[@id="user_admin"]/field[@name="password"]/text()' --value "${ODOO_USER_ADMIN_DEFAULT_PASSWORD}" ${ODOO_PATH}/addons/base/data/res_users_data.xml 9 | 10 | 11 | if [ ! -z "$TEST" ]; then 12 | /entrypoint.sh odoo 13 | else 14 | /entrypoint.sh odoo 15 | fi -------------------------------------------------------------------------------- /addons_external/queue_job/wizards/queue_jobs_to_cancelled.py: -------------------------------------------------------------------------------- 1 | # Copyright 2013-2020 Camptocamp SA 2 | # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 3 | 4 | from odoo import models 5 | 6 | 7 | class SetJobsToCancelled(models.TransientModel): 8 | _inherit = "queue.requeue.job" 9 | _name = "queue.jobs.to.cancelled" 10 | _description = "Cancel all selected jobs" 11 | 12 | def set_cancelled(self): 13 | jobs = self.job_ids.filtered( 14 | lambda x: x.state in ("pending", "failed", "enqueued") 15 | ) 16 | jobs.button_cancelled() 17 | return {"type": "ir.actions.act_window_close"} 18 | -------------------------------------------------------------------------------- /addons_external/session_redis/__manifest__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016-2021 Camptocamp SA 2 | # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html) 3 | 4 | 5 | { 6 | "name": "Sessions in Redis", 7 | "summary": "Store web sessions in Redis", 8 | "version": "17.0.1.0", 9 | "author": "Camptocamp,Odoo Community Association (OCA)", 10 | "license": "AGPL-3", 11 | "category": "Extra Tools", 12 | "depends": ["base"], 13 | "external_dependencies": { 14 | "python": ["redis"], 15 | }, 16 | "website": "https://github.com/camptocamp/odoo-cloud-platform", 17 | "data": [], 18 | "installable": True, 19 | } 20 | -------------------------------------------------------------------------------- /addons_external/queue_job/readme/CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | - Guewen Baconnier \<\> 2 | - Stéphane Bidoul \<\> 3 | - Matthieu Dietrich \<\> 4 | - Jos De Graeve \<\> 5 | - David Lefever \<\> 6 | - Laurent Mignon \<\> 7 | - Laetitia Gangloff \<\> 8 | - Cédric Pigeon \<\> 9 | - Tatiana Deribina \<\> 10 | - Souheil Bejaoui \<\> 11 | - Eric Antones \<\> 12 | - Simone Orsi \<\> 13 | - Nguyen Minh Chien \<\> 14 | -------------------------------------------------------------------------------- /addons_external/queue_job/post_load.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from odoo import http 4 | 5 | _logger = logging.getLogger(__name__) 6 | 7 | 8 | def post_load(): 9 | _logger.info( 10 | "Apply Request._get_session_and_dbname monkey patch to capture db" 11 | " from request with multiple databases" 12 | ) 13 | _get_session_and_dbname_orig = http.Request._get_session_and_dbname 14 | 15 | def _get_session_and_dbname(self): 16 | session, dbname = _get_session_and_dbname_orig(self) 17 | if ( 18 | not dbname 19 | and self.httprequest.path == "/queue_job/runjob" 20 | and self.httprequest.args.get("db") 21 | ): 22 | dbname = self.httprequest.args["db"] 23 | return session, dbname 24 | 25 | http.Request._get_session_and_dbname = _get_session_and_dbname 26 | -------------------------------------------------------------------------------- /heimdall/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # ~ Copyright © 2024 Mountrix.com Company Limited. All Rights Reserved. 3 | # 4 | FROM ubuntu:23.10 5 | 6 | # for the Heimdall UI, only needed for the management server 7 | EXPOSE 8087 8 | 9 | # Postgres Port 10 | EXPOSE 5432 11 | 12 | ENV PYTHON_J2CLI_VER=0.3.10 13 | 14 | RUN apt-get update && \ 15 | apt-get install -y --no-install-recommends \ 16 | curl \ 17 | wget \ 18 | vim \ 19 | htop \ 20 | python3-pip && \ 21 | apt-get clean && \ 22 | rm -rf /var/lib/apt/lists/* 23 | 24 | RUN set -x && \ 25 | pip3 install --no-cache --no-cache-dir \ 26 | j2cli==${PYTHON_J2CLI_VER} --break-system-packages 27 | 28 | RUN bash -c 'bash <(curl https://s3.amazonaws.com/s3.heimdalldata.com/hdinstall.sh) server' 29 | 30 | COPY files / 31 | 32 | ENTRYPOINT ["/docker-entrypoint.sh"] 33 | -------------------------------------------------------------------------------- /addons_external/queue_job/security/ir.model.access.csv: -------------------------------------------------------------------------------- 1 | id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink 2 | access_queue_job_manager,queue job manager,queue_job.model_queue_job,queue_job.group_queue_job_manager,1,1,1,1 3 | access_queue_job_function_manager,queue job functions manager,queue_job.model_queue_job_function,queue_job.group_queue_job_manager,1,1,1,1 4 | access_queue_job_channel_manager,queue job channel manager,queue_job.model_queue_job_channel,queue_job.group_queue_job_manager,1,1,1,1 5 | access_queue_requeue_job,queue requeue job manager,queue_job.model_queue_requeue_job,queue_job.group_queue_job_manager,1,1,1,1 6 | access_queue_jobs_to_done,queue jobs to done manager,queue_job.model_queue_jobs_to_done,queue_job.group_queue_job_manager,1,1,1,1 7 | access_queue_jobs_to_cancelled,queue jobs to cancelled manager,queue_job.model_queue_jobs_to_cancelled,queue_job.group_queue_job_manager,1,1,1,1 8 | -------------------------------------------------------------------------------- /addons_external/queue_job/wizards/queue_requeue_job.py: -------------------------------------------------------------------------------- 1 | # Copyright 2013-2020 Camptocamp SA 2 | # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 3 | 4 | from odoo import fields, models 5 | 6 | 7 | class QueueRequeueJob(models.TransientModel): 8 | _name = "queue.requeue.job" 9 | _description = "Wizard to requeue a selection of jobs" 10 | 11 | def _default_job_ids(self): 12 | res = False 13 | context = self.env.context 14 | if context.get("active_model") == "queue.job" and context.get("active_ids"): 15 | res = context["active_ids"] 16 | return res 17 | 18 | job_ids = fields.Many2many( 19 | comodel_name="queue.job", string="Jobs", default=lambda r: r._default_job_ids() 20 | ) 21 | 22 | def requeue(self): 23 | jobs = self.job_ids 24 | jobs.requeue() 25 | return {"type": "ir.actions.act_window_close"} 26 | -------------------------------------------------------------------------------- /addons_external/queue_job/views/queue_job_menus.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 10 | 11 | 12 | 13 | 19 | 20 | 26 | 27 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /addons_external/queue_job/readme/ROADMAP.md: -------------------------------------------------------------------------------- 1 | - After creating a new database or installing `queue_job` on an existing 2 | database, Odoo must be restarted for the runner to detect it. 3 | - When Odoo shuts down normally, it waits for running jobs to finish. 4 | However, when the Odoo server crashes or is otherwise force-stopped, 5 | running jobs are interrupted while the runner has no chance to know 6 | they have been aborted. In such situations, jobs may remain in 7 | `started` or `enqueued` state after the Odoo server is halted. Since 8 | the runner has no way to know if they are actually running or not, and 9 | does not know for sure if it is safe to restart the jobs, it does not 10 | attempt to restart them automatically. Such stale jobs therefore fill 11 | the running queue and prevent other jobs to start. You must therefore 12 | requeue them manually, either from the Jobs view, or by running the 13 | following SQL statement *before starting Odoo*: 14 | 15 | ``` sql 16 | update queue_job set state='pending' where state in ('started', 'enqueued') 17 | ``` 18 | -------------------------------------------------------------------------------- /addons_external/queue_job/tests/test_queue_job_protected_write.py: -------------------------------------------------------------------------------- 1 | # copyright 2020 Camptocamp 2 | # license lgpl-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 3 | 4 | from odoo import exceptions 5 | from odoo.tests import common 6 | 7 | 8 | class TestJobWriteProtected(common.TransactionCase): 9 | def test_create_error(self): 10 | with self.assertRaises(exceptions.AccessError): 11 | self.env["queue.job"].create( 12 | {"uuid": "test", "model_name": "res.partner", "method_name": "write"} 13 | ) 14 | 15 | def test_write_protected_field_error(self): 16 | job_ = self.env["res.partner"].with_delay().create({"name": "test"}) 17 | db_job = job_.db_record() 18 | with self.assertRaises(exceptions.AccessError): 19 | db_job.method_name = "unlink" 20 | 21 | def test_write_allow_no_protected_field_error(self): 22 | job_ = self.env["res.partner"].with_delay().create({"name": "test"}) 23 | db_job = job_.db_record() 24 | db_job.priority = 30 25 | self.assertEqual(db_job.priority, 30) 26 | -------------------------------------------------------------------------------- /addons_external/queue_job/security/security.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Job Queue 6 | 20 7 | 8 | 9 | Job Queue Manager 10 | 11 | 15 | 16 | 17 | 18 | 19 | Job Queue multi-company 20 | 21 | 22 | ['|', ('company_id', '=', False), ('company_id', 'in', company_ids)] 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /addons_external/queue_job/post_init_hook.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 ACSONE SA/NV 2 | # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). 3 | 4 | import logging 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | 9 | def post_init_hook(env): 10 | # this is the trigger that sends notifications when jobs change 11 | logger.info("Create queue_job_notify trigger") 12 | env.cr.execute( 13 | """ 14 | DROP TRIGGER IF EXISTS queue_job_notify ON queue_job; 15 | CREATE OR REPLACE 16 | FUNCTION queue_job_notify() RETURNS trigger AS $$ 17 | BEGIN 18 | IF TG_OP = 'DELETE' THEN 19 | IF OLD.state != 'done' THEN 20 | PERFORM pg_notify('queue_job', OLD.uuid); 21 | END IF; 22 | ELSE 23 | PERFORM pg_notify('queue_job', NEW.uuid); 24 | END IF; 25 | RETURN NULL; 26 | END; 27 | $$ LANGUAGE plpgsql; 28 | CREATE TRIGGER queue_job_notify 29 | AFTER INSERT OR UPDATE OR DELETE 30 | ON queue_job 31 | FOR EACH ROW EXECUTE PROCEDURE queue_job_notify(); 32 | """ 33 | ) 34 | -------------------------------------------------------------------------------- /addons_external/queue_job/__manifest__.py: -------------------------------------------------------------------------------- 1 | # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 2 | 3 | { 4 | "name": "Job Queue", 5 | "version": "17.0.1.0.0", 6 | "author": "Camptocamp,ACSONE SA/NV,Odoo Community Association (OCA)", 7 | "website": "https://github.com/OCA/queue", 8 | "license": "LGPL-3", 9 | "category": "Generic Modules", 10 | "depends": ["mail", "base_sparse_field", "web"], 11 | "external_dependencies": {"python": ["requests"]}, 12 | "data": [ 13 | "security/security.xml", 14 | "security/ir.model.access.csv", 15 | "views/queue_job_views.xml", 16 | "views/queue_job_channel_views.xml", 17 | "views/queue_job_function_views.xml", 18 | "wizards/queue_jobs_to_done_views.xml", 19 | "wizards/queue_jobs_to_cancelled_views.xml", 20 | "wizards/queue_requeue_job_views.xml", 21 | "views/queue_job_menus.xml", 22 | "data/queue_data.xml", 23 | "data/queue_job_function_data.xml", 24 | ], 25 | "assets": { 26 | "web.assets_backend": [ 27 | "/queue_job/static/src/views/**/*", 28 | ], 29 | }, 30 | "installable": True, 31 | "development_status": "Mature", 32 | "maintainers": ["guewen"], 33 | "post_init_hook": "post_init_hook", 34 | "post_load": "post_load", 35 | } 36 | -------------------------------------------------------------------------------- /addons_external/queue_job/readme/CONFIGURE.md: -------------------------------------------------------------------------------- 1 | - Using environment variables and command line: 2 | - Adjust environment variables (optional): 3 | - `ODOO_QUEUE_JOB_CHANNELS=root:4` or any other channels 4 | configuration. The default is `root:1` 5 | - if `xmlrpc_port` is not set: `ODOO_QUEUE_JOB_PORT=8069` 6 | - Start Odoo with `--load=web,queue_job` and `--workers` greater than 7 | 1.[^1] 8 | - Using the Odoo configuration file: 9 | 10 | ``` ini 11 | [options] 12 | (...) 13 | workers = 6 14 | server_wide_modules = web,queue_job 15 | 16 | (...) 17 | [queue_job] 18 | channels = root:2 19 | ``` 20 | 21 | - Confirm the runner is starting correctly by checking the odoo log 22 | file: 23 | 24 | ``` 25 | ...INFO...queue_job.jobrunner.runner: starting 26 | ...INFO...queue_job.jobrunner.runner: initializing database connections 27 | ...INFO...queue_job.jobrunner.runner: queue job runner ready for db 28 | ...INFO...queue_job.jobrunner.runner: database connections ready 29 | ``` 30 | 31 | - Create jobs (eg using `base_import_async`) and observe they start 32 | immediately and in parallel. 33 | - Tip: to enable debug logging for the queue job, use 34 | `--log-handler=odoo.addons.queue_job:DEBUG` 35 | 36 | [^1]: It works with the threaded Odoo server too, although this way of 37 | running Odoo is obviously not for production purposes. 38 | -------------------------------------------------------------------------------- /addons_external/queue_job/exception.py: -------------------------------------------------------------------------------- 1 | # Copyright 2012-2016 Camptocamp 2 | # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 3 | 4 | 5 | class BaseQueueJobError(Exception): 6 | """Base queue job error""" 7 | 8 | 9 | class JobError(BaseQueueJobError): 10 | """A job had an error""" 11 | 12 | 13 | class NoSuchJobError(JobError): 14 | """The job does not exist.""" 15 | 16 | 17 | class FailedJobError(JobError): 18 | """A job had an error having to be resolved.""" 19 | 20 | 21 | class RetryableJobError(JobError): 22 | """A job had an error but can be retried. 23 | 24 | The job will be retried after the given number of seconds. If seconds is 25 | empty, it will be retried according to the ``retry_pattern`` of the job or 26 | by :const:`odoo.addons.queue_job.job.RETRY_INTERVAL` if nothing is defined. 27 | 28 | If ``ignore_retry`` is True, the retry counter will not be increased. 29 | """ 30 | 31 | def __init__(self, msg, seconds=None, ignore_retry=False): 32 | super().__init__(msg) 33 | self.seconds = seconds 34 | self.ignore_retry = ignore_retry 35 | 36 | 37 | # TODO: remove support of NothingToDo: too dangerous 38 | class NothingToDoJob(JobError): 39 | """The Job has nothing to do.""" 40 | 41 | 42 | class ChannelNotFound(BaseQueueJobError): 43 | """A channel could not be found""" 44 | -------------------------------------------------------------------------------- /addons_external/queue_job/wizards/queue_requeue_job_views.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Requeue Jobs 6 | queue.requeue.job 7 | 8 |
9 | 10 | 11 | 12 |
13 |
21 |
22 |
23 |
24 | 25 | 26 | Requeue Jobs 27 | queue.requeue.job 28 | form 29 | 30 | new 31 | 32 | 33 | 34 |
35 | -------------------------------------------------------------------------------- /addons_external/queue_job/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 Camptocamp 2 | # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 3 | 4 | import logging 5 | import os 6 | 7 | _logger = logging.getLogger(__name__) 8 | 9 | 10 | def must_run_without_delay(env): 11 | """Retrun true if jobs have to run immediately. 12 | 13 | :param env: `odoo.api.Environment` instance 14 | """ 15 | # TODO: drop in v17 16 | if os.getenv("TEST_QUEUE_JOB_NO_DELAY"): 17 | _logger.warning( 18 | "`TEST_QUEUE_JOB_NO_DELAY` env var found. NO JOB scheduled. " 19 | "Note that this key is deprecated: please use `QUEUE_JOB__NO_DELAY`" 20 | ) 21 | return True 22 | 23 | if os.getenv("QUEUE_JOB__NO_DELAY"): 24 | _logger.warning("`QUEUE_JOB__NO_DELAY` env var found. NO JOB scheduled.") 25 | return True 26 | 27 | # TODO: drop in v17 28 | deprecated_keys = ("_job_force_sync", "test_queue_job_no_delay") 29 | for key in deprecated_keys: 30 | if env.context.get(key): 31 | _logger.warning( 32 | "`%s` ctx key found. NO JOB scheduled. " 33 | "Note that this key is deprecated: please use `queue_job__no_delay`", 34 | key, 35 | ) 36 | return True 37 | 38 | if env.context.get("queue_job__no_delay"): 39 | _logger.warning("`queue_job__no_delay` ctx key found. NO JOB scheduled.") 40 | return True 41 | -------------------------------------------------------------------------------- /addons_external/queue_job/wizards/queue_jobs_to_cancelled_views.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Cancel Jobs 6 | queue.jobs.to.cancelled 7 | 8 |
9 | 10 | 11 | 12 |
13 |
21 |
22 |
23 |
24 | 25 | 26 | Cancel jobs 27 | queue.jobs.to.cancelled 28 | form 29 | 30 | new 31 | 32 | 33 | 34 |
35 | -------------------------------------------------------------------------------- /addons_external/queue_job/wizards/queue_jobs_to_done_views.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Set Jobs to Done 6 | queue.jobs.to.done 7 | 8 |
9 | 10 | 11 | 12 |
13 |
21 |
22 |
23 |
24 | 25 | 26 | Set jobs to done 27 | queue.jobs.to.done 28 | form 29 | 30 | new 31 | 32 | 33 | 34 |
35 | -------------------------------------------------------------------------------- /addons_external/session_redis/json_encoding.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016-2020 Camptocamp SA 2 | # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html) 3 | 4 | import json 5 | from datetime import date, datetime 6 | 7 | import dateutil 8 | 9 | 10 | class SessionEncoder(json.JSONEncoder): 11 | """Encode date/datetime objects 12 | 13 | So that we can later recompose them if they were stored in the session 14 | """ 15 | 16 | def default(self, obj): 17 | if isinstance(obj, datetime): 18 | return {"_type": "datetime_isoformat", "value": obj.isoformat()} 19 | elif isinstance(obj, date): 20 | return {"_type": "date_isoformat", "value": obj.isoformat()} 21 | elif isinstance(obj, set): 22 | return {"_type": "set", "value": tuple(obj)} 23 | return json.JSONEncoder.default(self, obj) 24 | 25 | 26 | class SessionDecoder(json.JSONDecoder): 27 | """Decode json, recomposing recordsets and date/datetime""" 28 | 29 | def __init__(self, *args, **kwargs): 30 | super().__init__(object_hook=self.object_hook, *args, **kwargs) 31 | 32 | def object_hook(self, obj): 33 | if "_type" not in obj: 34 | return obj 35 | type_ = obj["_type"] 36 | if type_ == "datetime_isoformat": 37 | return dateutil.parser.parse(obj["value"]) 38 | elif type_ == "date_isoformat": 39 | return dateutil.parser.parse(obj["value"]).date() 40 | elif type_ == "set": 41 | return set(obj["value"]) 42 | return obj 43 | -------------------------------------------------------------------------------- /addons_external/queue_job/tests/test_wizards.py: -------------------------------------------------------------------------------- 1 | # license lgpl-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 2 | from odoo.tests import common 3 | 4 | 5 | class TestWizards(common.TransactionCase): 6 | def setUp(self): 7 | super().setUp() 8 | self.job = ( 9 | self.env["queue.job"] 10 | .with_context( 11 | _job_edit_sentinel=self.env["queue.job"].EDIT_SENTINEL, 12 | ) 13 | .create( 14 | { 15 | "uuid": "test", 16 | "user_id": self.env.user.id, 17 | "state": "failed", 18 | "model_name": "queue.job", 19 | "method_name": "write", 20 | "args": (), 21 | } 22 | ) 23 | ) 24 | 25 | def _wizard(self, model_name): 26 | return ( 27 | self.env[model_name] 28 | .with_context( 29 | active_model=self.job._name, 30 | active_ids=self.job.ids, 31 | ) 32 | .create({}) 33 | ) 34 | 35 | def test_01_requeue(self): 36 | wizard = self._wizard("queue.requeue.job") 37 | wizard.requeue() 38 | self.assertEqual(self.job.state, "pending") 39 | 40 | def test_02_cancel(self): 41 | wizard = self._wizard("queue.jobs.to.cancelled") 42 | wizard.set_cancelled() 43 | self.assertEqual(self.job.state, "cancelled") 44 | 45 | def test_03_done(self): 46 | wizard = self._wizard("queue.jobs.to.done") 47 | wizard.set_done() 48 | self.assertEqual(self.job.state, "done") 49 | -------------------------------------------------------------------------------- /addons_external/queue_job/readme/DESCRIPTION.md: -------------------------------------------------------------------------------- 1 | This addon adds an integrated Job Queue to Odoo. 2 | 3 | It allows to postpone method calls executed asynchronously. 4 | 5 | Jobs are executed in the background by a `Jobrunner`, in their own 6 | transaction. 7 | 8 | Example: 9 | 10 | ``` python 11 | from odoo import models, fields, api 12 | 13 | class MyModel(models.Model): 14 | _name = 'my.model' 15 | 16 | def my_method(self, a, k=None): 17 | _logger.info('executed with a: %s and k: %s', a, k) 18 | 19 | 20 | class MyOtherModel(models.Model): 21 | _name = 'my.other.model' 22 | 23 | def button_do_stuff(self): 24 | self.env['my.model'].with_delay().my_method('a', k=2) 25 | ``` 26 | 27 | In the snippet of code above, when we call `button_do_stuff`, a job 28 | **capturing the method and arguments** will be postponed. It will be 29 | executed as soon as the Jobrunner has a free bucket, which can be 30 | instantaneous if no other job is running. 31 | 32 | Features: 33 | 34 | - Views for jobs, jobs are stored in PostgreSQL 35 | - Jobrunner: execute the jobs, highly efficient thanks to PostgreSQL's 36 | NOTIFY 37 | - Channels: give a capacity for the root channel and its sub-channels 38 | and segregate jobs in them. Allow for instance to restrict heavy jobs 39 | to be executed one at a time while little ones are executed 4 at a 40 | times. 41 | - Retries: Ability to retry jobs by raising a type of exception 42 | - Retry Pattern: the 3 first tries, retry after 10 seconds, the 5 next 43 | tries, retry after 1 minutes, ... 44 | - Job properties: priorities, estimated time of arrival (ETA), custom 45 | description, number of retries 46 | - Related Actions: link an action on the job view, such as open the 47 | record concerned by the job 48 | -------------------------------------------------------------------------------- /addons_external/session_redis/README.rst: -------------------------------------------------------------------------------- 1 | Sessions in Redis 2 | ================= 3 | 4 | This addon allows to store the web sessions in Redis. 5 | 6 | Configuration 7 | ------------- 8 | 9 | The storage of sessions in Redis is activated using environment variables. 10 | 11 | * ``ODOO_SESSION_REDIS`` has to be ``1`` or ``true`` 12 | * ``ODOO_SESSION_REDIS_HOST`` is the redis hostname (default is ``localhost``) 13 | * ``ODOO_SESSION_REDIS_PORT`` is the redis port (default is ``6379``) 14 | * ``ODOO_SESSION_REDIS_PASSWORD`` is the password for the AUTH command 15 | (optional) 16 | * ``ODOO_SESSION_REDIS_URL`` is an alternative way to define the Redis server 17 | address. It's the preferred way when you're using the ``rediss://`` protocol. 18 | * ``ODOO_SESSION_REDIS_PREFIX`` is the prefix for the session keys (optional) 19 | * ``ODOO_SESSION_REDIS_EXPIRATION`` is the time in seconds before expiration of 20 | the sessions (default is 7 days) 21 | * ``ODOO_SESSION_REDIS_EXPIRATION_ANONYMOUS`` is the time in seconds before expiration of 22 | the anonymous sessions (default is 3 hours) 23 | 24 | 25 | The keys are set to ``session:``. 26 | When a prefix is defined, the keys are ``session::`` 27 | 28 | This addon must be added in the server wide addons with (``--load`` option): 29 | 30 | ``--load=web,session_redis`` 31 | 32 | Limitations 33 | ----------- 34 | 35 | * The server has to be restarted in order for the sessions to be stored in 36 | Redis. 37 | * All the users will have to login again as their previous session will be 38 | dropped. 39 | * The addon monkey-patch ``odoo.http.Root.session_store`` with a custom 40 | method when the Redis mode is active, so incompatibilities with other addons 41 | is possible if they do the same. 42 | -------------------------------------------------------------------------------- /config/odoo.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2022 Patagon IO SAS. All rights reserved. 3 | # 4 | 5 | [options] 6 | addons_path = /mnt/mountrix/addons/,/mnt/odoo/addons,/mnt/odoo/addons_customer,/mnt/odoo/addons_external 7 | admin_passwd = admin 8 | csv_internal_sep = , 9 | data_dir = /var/lib/odoo 10 | db_host = heimdallproxy1 11 | db_maxconn = 64 12 | db_name = odoo-17 13 | db_password = odoo 14 | db_port = 5432 15 | db_sslmode = prefer 16 | db_template = template0 17 | db_user = odoo 18 | dbfilter = 19 | demo = {} 20 | email_from = False 21 | geoip_database = /usr/share/GeoIP/GeoLite2-City.mmdb 22 | http_enable = True 23 | http_interface = 24 | http_port = 8069 25 | import_partial = 26 | limit_memory_hard = 2684354560 27 | limit_memory_soft = 2147483648 28 | limit_request = 8192 29 | limit_time_cpu = 4000 30 | limit_time_real = 6000 31 | limit_time_real_cron = -1 32 | list_db = True 33 | log_db = False 34 | log_db_level = warning 35 | log_handler = :INFO 36 | log_level = info 37 | logfile = None 38 | gevent_port = 8072 39 | max_cron_threads = 0 40 | osv_memory_age_limit = False 41 | osv_memory_count_limit = False 42 | pg_path = 43 | pidfile = 44 | proxy_mode = True 45 | reportgz = False 46 | screencasts = 47 | screenshots = /tmp/odoo_tests 48 | server_wide_modules = base,web,session_redis,queue_job 49 | smtp_password = False 50 | smtp_port = 25 51 | smtp_server = smtp 52 | smtp_ssl = False 53 | smtp_user = False 54 | syslog = False 55 | test_enable = False 56 | test_file = False 57 | test_tags = None 58 | transient_age_limit = 1.0 59 | translate_modules = ['all'] 60 | unaccent = False 61 | upgrade_path = 62 | without_demo = 63 | workers = 4 64 | running_env=default 65 | 66 | [queue_job] 67 | channels = root:2 68 | 69 | [domain_alias] 70 | external_email_server_default = true 71 | alias_domain = False 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /addons_external/queue_job/data/queue_data.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Jobs Garbage Collector 6 | 5 7 | minutes 8 | -1 9 | 10 | code 11 | model.requeue_stuck_jobs() 12 | 13 | 14 | 15 | Job failed 16 | queue.job 17 | 18 | 19 | 20 | AutoVacuum Job Queue 21 | 22 | 23 | 24 | 1 25 | days 26 | -1 27 | 28 | code 29 | model.autovacuum() 30 | 31 | 32 | 33 | 34 | root 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /addons_external/queue_job/static/description/icon.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /addons_external/queue_job/views/queue_job_channel_views.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | queue.job.channel.form 6 | queue.job.channel 7 | 8 |
9 | 10 | 15 | 20 | 21 | 22 | 23 | 24 | 25 | 26 |
27 |
28 |
29 | 30 | 31 | queue.job.channel.tree 32 | queue.job.channel 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | queue.job.channel.search 42 | queue.job.channel 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | Channels 54 | queue.job.channel 55 | tree,form 56 | {} 57 | 58 | 59 | 60 |
61 | -------------------------------------------------------------------------------- /addons_external/queue_job/views/queue_job_function_views.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | queue.job.function.form 6 | queue.job.function 7 | 8 |
9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 |
18 |
19 |
20 | 21 | 22 | queue.job.function.tree 23 | queue.job.function 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | queue.job.function.search 34 | queue.job.function 35 | 36 | 37 | 38 | 39 | 40 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | Job Functions 52 | queue.job.function 53 | tree,form 54 | {} 55 | 56 | 57 | 58 |
59 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Odoo Heimdall Proxy Development Environment 2 | 3 | Odoo 4 | 5 | [Heimdall](https://www.heimdalldata.com/odoo/) is a database proxy specially designed for [Odoo](https://www.odoo.com) 6 | 7 | ## Heimdall Architecture with Odoo, Redis and Postgres 8 | 9 | ![](./readme/odoo-heimdall-proxy-diagram.png) 10 | 11 | ![](./readme/odoo-heimdall-features.png) 12 | 13 | ### Heimdall Manager Dashboard 14 | 15 | - Cache hit rates 16 | - Replica Usage 17 | - Average Response Time 18 | 19 | ![](./readme/odoo-heimdall-metrics.png) 20 | 21 | ### Heimdall manager status overview 22 | 23 | ![](./readme/odoo-heimdall-status.png) 24 | 25 | ### docker desktop containers overview 26 | 27 | ![](./readme/odoo-heimdall-docker-desktop.png) 28 | 29 | ### Project Architecture 30 | 31 | . 32 | ├── .vscode # Visual Studio Debug configuration 33 | ├── addons # Odoo Project custom addons folder 34 | ├── addons_customer # Odoo Project customer addons 35 | ├── addons_external # Odoo Project third party addons 36 | ├── config # Odoo config 37 | ├── heimdall # Heimdall 38 | ├── .gitignore 39 | ├── Dockerfile # Odoo Dockerfile 40 | ├── docker-compose.yml # setup for local docker-compose run 41 | ├── requirements.txt # Odoo external python libraries 42 | └── README.md 43 | 44 | ## Development workflow 45 | 46 | - Local environment 47 | - Ideal local development workflow 48 | 49 | ## Local environment 50 | 51 | To setup your local environment you need the following tools: 52 | 53 | - Docker installed in your machine 54 | - IDE like Visual Studio code or PyCharm 55 | 56 | #### Docker & docker-compose 57 | 58 | Check the official docker page to install docker and docker-compose on your machine, it depends on your OS 59 | 60 | #### Quick Instructions 61 | 62 | ```bash 63 | Docker-compose up 64 | ``` 65 | 66 | Access Heimdall Manager 67 | http://127.0.0.1:8087 68 | 69 | user: admin 70 | password: admin 71 | 72 | Go to Virtual DB and import Mountrix-vdb.json and commit 73 | 74 | Stort docker-compose Ctrl+c and restart it 75 | 76 | ```bash 77 | docker-compose up 78 | ``` 79 | 80 | Access Odoo 81 | http://127.0.0.1/ 82 | 83 | user: admin 84 | password: admin 85 | 86 | Install any App and use Odoo. You'll see Heimdall proxy cache hit rates and replica use. 87 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM odoo:17.0 2 | LABEL maintainer="Agustin Wisky. " 3 | 4 | USER root 5 | # Mount Customize /mnt/"addons" folders for users addons 6 | RUN apt-get update && apt-get install --no-install-recommends -y \ 7 | # openssh-server \ 8 | git && \ 9 | apt-get clean && \ 10 | rm -rf /var/lib/apt/lists/* 11 | 12 | # RUN mkdir /var/run/sshd 13 | 14 | WORKDIR / 15 | 16 | # #allow remote access 17 | # COPY ./rsa/id_rsa_remote.pub /root/.ssh/id_rsa_remote.pub 18 | # RUN cat /root/.ssh/id_rsa_remote.pub > /root/.ssh/authorized_keys 19 | 20 | # # Create known_hosts 21 | # RUN touch /root/.ssh/known_hosts && ssh-keyscan github.com >> /root/.ssh/known_hosts 22 | # # Create known_hosts and add github key 23 | # RUN printf "Host github.com\n\tStrictHostKeyChecking no\n" >> /root/.ssh/config \ 24 | # && chmod -R 600 /root/.ssh/ 25 | 26 | 27 | RUN mkdir -p /mnt/mountrix/addons 28 | 29 | ARG ODOO_USER_ADMIN_DEFAULT_PASSWORD 30 | 31 | RUN mkdir -p /mnt/odoo 32 | 33 | # Update aptitude with new repo 34 | RUN apt-get update \ 35 | && apt-get install --no-install-recommends -y \ 36 | procps\ 37 | vim\ 38 | xmlstarlet && \ 39 | apt-get clean && \ 40 | rm -rf /var/lib/apt/lists/* 41 | 42 | # Install Python basics 43 | RUN apt-get update && apt-get install -y --no-install-recommends \ 44 | apt-utils\ 45 | python3-dev\ 46 | python3-wheel\ 47 | wget\ 48 | less\ 49 | j2cli &&\ 50 | apt-get clean && \ 51 | rm -rf /var/lib/apt/lists/* 52 | 53 | # Install debugpy if you want to debug python code 54 | RUN pip3 install --no-cache-dir debugpy 55 | 56 | #install ohmybash 57 | RUN bash -c "$(wget --progress=dot:giga https://raw.githubusercontent.com/ohmybash/oh-my-bash/master/tools/install.sh -O -)" 58 | 59 | COPY ./requirements.txt /mnt/odoo/ 60 | 61 | RUN pip3 install --no-cache-dir -r /mnt/odoo/requirements.txt 62 | 63 | COPY ./addons /mnt/odoo/addons 64 | COPY bootstrap.sh /etc/bootstrap.sh 65 | RUN chmod a+x /etc/bootstrap.sh 66 | 67 | COPY ./entrypoint.sh / 68 | RUN chmod a+x /entrypoint.sh 69 | 70 | COPY ./addons_external /mnt/odoo/addons_external 71 | COPY ./addons_customer /mnt/odoo/addons_customer 72 | 73 | RUN chown -R odoo /mnt/* && \ 74 | chown -R odoo /var/lib/odoo 75 | 76 | RUN mkdir -p /run/sshd; exit 0 && chmod 0755 /run/sshd 77 | COPY ./config/odoo.conf.j2 /etc/odoo/odoo.conf.j2 78 | 79 | EXPOSE 22 80 | EXPOSE 8888 81 | 82 | ENTRYPOINT ["/bin/sh","-c"] 83 | CMD ["/etc/bootstrap.sh"] 84 | 85 | -------------------------------------------------------------------------------- /addons_external/queue_job/tests/test_model_job_function.py: -------------------------------------------------------------------------------- 1 | # copyright 2020 Camptocamp 2 | # license lgpl-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 3 | 4 | from odoo import exceptions 5 | from odoo.tests import common 6 | 7 | 8 | class TestJobFunction(common.TransactionCase): 9 | def test_function_name_compute(self): 10 | function = self.env["queue.job.function"].create( 11 | {"model_id": self.env.ref("base.model_res_users").id, "method": "read"} 12 | ) 13 | self.assertEqual(function.name, ".read") 14 | 15 | def test_function_name_inverse(self): 16 | function = self.env["queue.job.function"].create({"name": ".read"}) 17 | self.assertEqual(function.model_id.model, "res.users") 18 | self.assertEqual(function.method, "read") 19 | 20 | def test_function_name_inverse_invalid_regex(self): 21 | with self.assertRaises(exceptions.UserError): 22 | self.env["queue.job.function"].create({"name": ".read"} 28 | ) 29 | 30 | def test_function_job_config(self): 31 | channel = self.env["queue.job.channel"].create( 32 | {"name": "foo", "parent_id": self.env.ref("queue_job.channel_root").id} 33 | ) 34 | job_function = self.env["queue.job.function"].create( 35 | { 36 | "model_id": self.env.ref("base.model_res_users").id, 37 | "method": "read", 38 | "channel_id": channel.id, 39 | "edit_retry_pattern": "{1: 2, 3: 4}", 40 | "edit_related_action": ( 41 | '{"enable": True,' 42 | ' "func_name": "related_action_foo",' 43 | ' "kwargs": {"b": 1}}' 44 | ), 45 | } 46 | ) 47 | self.assertEqual( 48 | self.env["queue.job.function"].job_config(".read"), 49 | self.env["queue.job.function"].JobConfig( 50 | channel="root.foo", 51 | retry_pattern={1: 2, 3: 4}, 52 | related_action_enable=True, 53 | related_action_func_name="related_action_foo", 54 | related_action_kwargs={"b": 1}, 55 | job_function_id=job_function.id, 56 | ), 57 | ) 58 | -------------------------------------------------------------------------------- /addons_external/queue_job/tests/test_model_job_channel.py: -------------------------------------------------------------------------------- 1 | # copyright 2018 Camptocamp 2 | # license lgpl-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 3 | 4 | from psycopg2 import IntegrityError 5 | 6 | import odoo 7 | from odoo.tests import common 8 | 9 | 10 | class TestJobChannel(common.TransactionCase): 11 | def setUp(self): 12 | super().setUp() 13 | self.Channel = self.env["queue.job.channel"] 14 | self.root_channel = self.Channel.search([("name", "=", "root")]) 15 | 16 | def test_channel_new(self): 17 | channel = self.Channel.new() 18 | self.assertFalse(channel.name) 19 | self.assertFalse(channel.complete_name) 20 | 21 | def test_channel_create(self): 22 | channel = self.Channel.create( 23 | {"name": "test", "parent_id": self.root_channel.id} 24 | ) 25 | self.assertEqual(channel.name, "test") 26 | self.assertEqual(channel.complete_name, "root.test") 27 | channel2 = self.Channel.create({"name": "test", "parent_id": channel.id}) 28 | self.assertEqual(channel2.name, "test") 29 | self.assertEqual(channel2.complete_name, "root.test.test") 30 | 31 | @odoo.tools.mute_logger("odoo.sql_db") 32 | def test_channel_complete_name_uniq(self): 33 | channel = self.Channel.create( 34 | {"name": "test", "parent_id": self.root_channel.id} 35 | ) 36 | self.assertEqual(channel.name, "test") 37 | self.assertEqual(channel.complete_name, "root.test") 38 | 39 | self.Channel.create({"name": "test", "parent_id": self.root_channel.id}) 40 | 41 | # Flush process all the pending recomputations (or at least the 42 | # given field and flush the pending updates to the database. 43 | # It is normally called on commit. 44 | 45 | # The context manager 'with self.assertRaises(IntegrityError)' purposefully 46 | # not uses here due to its 'flush_all()' method inside it and exception raises 47 | # before the line 'self.env.flush_all()'. So, we are expecting an IntegrityError. 48 | try: 49 | self.env.flush_all() 50 | except IntegrityError as ex: 51 | self.assertIn("queue_job_channel_name_uniq", ex.pgerror) 52 | else: 53 | self.assertEqual(True, False) 54 | 55 | def test_channel_display_name(self): 56 | channel = self.Channel.create( 57 | {"name": "test", "parent_id": self.root_channel.id} 58 | ) 59 | self.assertEqual(channel.display_name, channel.complete_name) 60 | -------------------------------------------------------------------------------- /entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #render odoo configuration template 4 | #export MODULES=`ls -ld /mnt/mountrix/addons/* |awk '{print $9}'| sed 's/$/,/g' | xargs echo | sed 's/, /,/g'` 5 | j2 --undefined /etc/odoo/odoo.conf.j2 -o /etc/odoo/odoo.conf 6 | 7 | set -e 8 | 9 | if [ -v PASSWORD_FILE ]; then 10 | PASSWORD="$(< $PASSWORD_FILE)" 11 | fi 12 | 13 | # set the postgres database host, port, user and password according to the environment 14 | # and pass them as arguments to the odoo process if not present in the config file 15 | : ${HOST:=${DB_PORT_5432_TCP_ADDR:='db'}} 16 | : ${PORT:=${DB_PORT_5432_TCP_PORT:=5432}} 17 | : ${USER:=${DB_ENV_POSTGRES_USER:=${POSTGRES_USER:='odoo'}}} 18 | : ${PASSWORD:=${DB_ENV_POSTGRES_PASSWORD:=${POSTGRES_PASSWORD:='odoo'}}} 19 | 20 | DB_ARGS=() 21 | function check_config() { 22 | param="$1" 23 | value="$2" 24 | if grep -q -E "^\s*\b${param}\b\s*=" "$ODOO_RC" ; then 25 | value=$(grep -E "^\s*\b${param}\b\s*=" "$ODOO_RC" |cut -d " " -f3|sed 's/["\n\r]//g') 26 | fi; 27 | DB_ARGS+=("--${param}") 28 | DB_ARGS+=("${value}") 29 | } 30 | check_config "db_host" "$HOST" 31 | check_config "db_port" "$PORT" 32 | check_config "db_user" "$USER" 33 | check_config "db_password" "$PASSWORD" 34 | 35 | function test_modules() { 36 | echo "=====TESTING====" 37 | modules=$(find /mnt/odoo/addons/ -type d -exec test -e "{}/__manifest__.py" ';' -print | cut -d'/' -f5- | tr '\n' ','| sed 's/.$//') 38 | if [ ! -z "$modules" ]; then 39 | modules=$modules 40 | else 41 | modules=base 42 | fi 43 | echo "Modules:" $modules 44 | exec /usr/bin/python3 /usr/bin/odoo "$@" "${DB_ARGS[@]}" -d test -i $modules --log-level=test --test-enable --stop-after-init 45 | } 46 | 47 | function debug_modules() { 48 | echo "=====Starting with debugger====" 49 | exec /usr/bin/python3 -m debugpy --listen 0.0.0.0:3001 /usr/bin/odoo "$@" "${DB_ARGS[@]}" 50 | } 51 | 52 | function check() { 53 | if [ ! -z "$DEBUG" ]; then 54 | debug_modules 55 | elif [ ! -z "$TEST" ]; then 56 | test_modules 57 | else 58 | exec odoo "$@" "${DB_ARGS[@]}" #--log-handler=odoo.addons.queue_job:DEBUG 59 | fi 60 | } 61 | 62 | case "$1" in 63 | -- | odoo) 64 | shift 65 | if [[ "$1" == "scaffold" ]] ; then 66 | check 67 | else 68 | wait-for-psql.py ${DB_ARGS[@]} --timeout=30 69 | check 70 | fi 71 | ;; 72 | -*) 73 | wait-for-psql.py ${DB_ARGS[@]} --timeout=30 74 | check 75 | ;; 76 | *) 77 | exec "$@" 78 | esac 79 | 80 | exit 1 -------------------------------------------------------------------------------- /addons_external/session_redis/http.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016-2019 Camptocamp SA 2 | # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html) 3 | 4 | import logging 5 | import os 6 | 7 | from odoo import http 8 | from odoo.tools import config 9 | from odoo.tools.func import lazy_property 10 | 11 | from .session import RedisSessionStore 12 | from .strtobool import strtobool 13 | 14 | _logger = logging.getLogger(__name__) 15 | 16 | try: 17 | import redis 18 | from redis.sentinel import Sentinel 19 | except ImportError: 20 | redis = None # noqa 21 | _logger.debug("Cannot 'import redis'.") 22 | 23 | 24 | def is_true(strval): 25 | return bool(strtobool(strval or "0".lower())) 26 | 27 | 28 | sentinel_host = os.environ.get("ODOO_SESSION_REDIS_SENTINEL_HOST") 29 | sentinel_master_name = os.environ.get("ODOO_SESSION_REDIS_SENTINEL_MASTER_NAME") 30 | if sentinel_host and not sentinel_master_name: 31 | raise Exception( 32 | "ODOO_SESSION_REDIS_SENTINEL_MASTER_NAME must be defined " 33 | "when using session_redis" 34 | ) 35 | sentinel_port = int(os.environ.get("ODOO_SESSION_REDIS_SENTINEL_PORT", 26379)) 36 | host = os.environ.get("ODOO_SESSION_REDIS_HOST", "localhost") 37 | port = int(os.environ.get("ODOO_SESSION_REDIS_PORT", 6379)) 38 | prefix = os.environ.get("ODOO_SESSION_REDIS_PREFIX") 39 | url = os.environ.get("ODOO_SESSION_REDIS_URL") 40 | password = os.environ.get("ODOO_SESSION_REDIS_PASSWORD") 41 | expiration = os.environ.get("ODOO_SESSION_REDIS_EXPIRATION") 42 | anon_expiration = os.environ.get("ODOO_SESSION_REDIS_EXPIRATION_ANONYMOUS") 43 | 44 | 45 | @lazy_property 46 | def session_store(self): 47 | if sentinel_host: 48 | sentinel = Sentinel([(sentinel_host, sentinel_port)], password=password) 49 | redis_client = sentinel.master_for(sentinel_master_name) 50 | elif url: 51 | redis_client = redis.from_url(url) 52 | else: 53 | redis_client = redis.Redis(host=host, port=port, password=password) 54 | return RedisSessionStore( 55 | redis=redis_client, 56 | prefix=prefix, 57 | expiration=expiration, 58 | anon_expiration=anon_expiration, 59 | session_class=http.Session, 60 | ) 61 | 62 | 63 | def purge_fs_sessions(path): 64 | for fname in os.listdir(path): 65 | path = os.path.join(path, fname) 66 | try: 67 | os.unlink(path) 68 | except OSError: 69 | _logger.warning("OS Error during purge of redis sessions.") 70 | 71 | 72 | if is_true(os.environ.get("ODOO_SESSION_REDIS")): 73 | if sentinel_host: 74 | _logger.debug( 75 | "HTTP sessions stored in Redis with prefix '%s'. " 76 | "Using Sentinel on %s:%s", 77 | prefix or "", 78 | sentinel_host, 79 | sentinel_port, 80 | ) 81 | else: 82 | _logger.debug( 83 | "HTTP sessions stored in Redis with prefix '%s' on " "%s:%s", 84 | prefix or "", 85 | host, 86 | port, 87 | ) 88 | http.Application.session_store = session_store 89 | # clean the existing sessions on the file system 90 | purge_fs_sessions(config.session_dir) 91 | -------------------------------------------------------------------------------- /config/odoo.conf.j2: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2022 Patagon IO SAS. All rights reserved. 3 | # 4 | 5 | [options] 6 | addons_path = {{ MODULES or "/mnt/mountrix/addons/," }}/mnt/odoo/addons,/mnt/odoo/addons_customer,/mnt/odoo/addons_external 7 | admin_passwd = {{ ODOO_DBMASTER_PASSWORD or "Noh3quoun9wahgoo6eix2Iethea3asho" }} 8 | csv_internal_sep = , 9 | data_dir = {{ ODOO_DATA_DIR or "/var/lib/odoo" }} 10 | db_host = {{ DB_PORT_5432_TCP_ADDR or "db" }} 11 | db_maxconn = {{ ODOO_CONF_DB_MAXCONN or 64 }} 12 | db_name = {{ ODOO_DBNAME or "" }} 13 | db_password = {{ DB_ENV_POSTGRES_PASSWORD or "odoo" }} 14 | db_port = {{ DB_PORT_5432_TCP_PORT or 5432 }} 15 | db_sslmode = prefer 16 | db_template = template0 17 | db_user = {{ DB_ENV_POSTGRES_USER or "odoo" }} 18 | dbfilter = {{ DB_FILTER or "" }} 19 | demo = {} 20 | email_from = {{ POSTFIX_SMTP_LOGIN or "False" }} 21 | geoip_database = /usr/share/GeoIP/GeoLite2-City.mmdb 22 | http_enable = {{ ODOO_CONF_HTTP_ENABLE or "True" }} 23 | http_interface = 24 | http_port = {{ ODOO_CONF_HTTP_PORT or "8069" }} 25 | import_partial = {{ ODOO_CONF_IMPORT_PARTIAL or "" }} 26 | limit_memory_hard = {{ ODOO_CONF_LIMIT_MEMORY_HARD or "2684354560" }} 27 | limit_memory_soft = {{ ODOO_CONF_LIMIT_MEMORY_SOFT or "2147483648" }} 28 | limit_request = {{ ODOO_CONF_LIMIT_REQUEST or "8192" }} 29 | limit_time_cpu = {{ ODOO_CONF_LIMIT_TIME_CPU or "4000" }} 30 | limit_time_real = {{ ODOO_CONF_LIMIT_TIME_REAL or "6000" }} 31 | limit_time_real_cron = {{ ODOO_CONF_LIMIT_TIME_REAL_CRON or "-1" }} 32 | list_db = {{ ODOO_CONF_LIST_DB or True }} 33 | log_db = {{ ODOO_CONF_LOG_DB or "False" }} 34 | log_db_level = {{ ODOO_CONF_LOG_DB_LEVEL or "warning" }} 35 | log_handler = {{ ODOO_CONF_LOG_HANDLER or ":INFO" }} 36 | log_level = {{ LOG_LEVEL or "info" }} 37 | logfile = None 38 | gevent_port = {{ ODOO_CONF_LONGPOLLING_PORT or "8072" }} 39 | max_cron_threads = {{ ODOO_CONF_MAX_CRON_THREADS or "0" }} 40 | osv_memory_age_limit = {{ ODOO_CONF_OSV_MEMORY_AGE_LIMIT or "False" }} 41 | osv_memory_count_limit = {{ ODOO_CONF_OSV_MEMORY_COUNT_LIMIT or "False" }} 42 | pg_path = 43 | pidfile = 44 | proxy_mode = {{ ODOO_CONF_PROXY_MODE or "True" }} 45 | reportgz = False 46 | screencasts = 47 | screenshots = /tmp/odoo_tests 48 | server_wide_modules = {{ ODOO_CONF_SERVER_WIDE_MODULES or "base,web,session_redis,queue_job" }} 49 | smtp_password = {{ ODOO_CONF_SMTP_PASSWORD or "False" }} 50 | smtp_port = {{ ODOO_CONF_SMTP_PORT or "25" }} 51 | smtp_server = {{ ODOO_CONF_SMTP_SERVER or "smtp" }} 52 | smtp_ssl = {{ ODOO_CONF_SMTP_SSL or "False" }} 53 | smtp_user = {{ ODOO_CONF_SMTP_USER or "False" }} 54 | syslog = {% if DOCKER_LOG_TYPE == 'json' %}True{% else %}False{% endif %} 55 | test_enable = False 56 | test_file = False 57 | test_tags = None 58 | transient_age_limit = 1.0 59 | translate_modules = ['all'] 60 | unaccent = False 61 | upgrade_path = {{ ODOO_CONF_UPGRADE_PATH or "" }} 62 | without_demo = {{ ODOO_WITHOUT_DEMO or "" }} 63 | workers = {{ ODOO_CONF_WORKERS or "0" }} 64 | running_env=default 65 | 66 | [queue_job] 67 | channels = {{ ODOO_QUEUE_JOB_CHANNELS or "root:2" }} 68 | 69 | [domain_alias] 70 | external_email_server_default = true 71 | alias_domain = {{ POSTFIX_SMTP_DOMAIN or "False" }} 72 | 73 | {% include '/etc/odoo.d/extra_incoming_mail.j2' ignore missing %} 74 | 75 | {% include '/etc/odoo_files.d/extra_odoo_conf.j2' ignore missing -%} 76 | -------------------------------------------------------------------------------- /addons_external/queue_job/models/queue_job_channel.py: -------------------------------------------------------------------------------- 1 | # Copyright 2013-2020 Camptocamp SA 2 | # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 3 | 4 | 5 | from odoo import _, api, exceptions, fields, models 6 | 7 | 8 | class QueueJobChannel(models.Model): 9 | _name = "queue.job.channel" 10 | _description = "Job Channels" 11 | _rec_name = "complete_name" 12 | 13 | name = fields.Char() 14 | complete_name = fields.Char( 15 | compute="_compute_complete_name", store=True, readonly=True, recursive=True 16 | ) 17 | parent_id = fields.Many2one( 18 | comodel_name="queue.job.channel", string="Parent Channel", ondelete="restrict" 19 | ) 20 | job_function_ids = fields.One2many( 21 | comodel_name="queue.job.function", 22 | inverse_name="channel_id", 23 | string="Job Functions", 24 | ) 25 | removal_interval = fields.Integer( 26 | default=lambda self: self.env["queue.job"]._removal_interval, required=True 27 | ) 28 | 29 | _sql_constraints = [ 30 | ("name_uniq", "unique(complete_name)", "Channel complete name must be unique") 31 | ] 32 | 33 | @api.depends("name", "parent_id.complete_name") 34 | def _compute_complete_name(self): 35 | for record in self: 36 | if not record.name: 37 | complete_name = "" # new record 38 | elif record.parent_id: 39 | complete_name = ".".join([record.parent_id.complete_name, record.name]) 40 | else: 41 | complete_name = record.name 42 | record.complete_name = complete_name 43 | 44 | @api.constrains("parent_id", "name") 45 | def parent_required(self): 46 | for record in self: 47 | if record.name != "root" and not record.parent_id: 48 | raise exceptions.ValidationError(_("Parent channel required.")) 49 | 50 | @api.model_create_multi 51 | def create(self, vals_list): 52 | records = self.browse() 53 | if self.env.context.get("install_mode"): 54 | # installing a module that creates a channel: rebinds the channel 55 | # to an existing one (likely we already had the channel created by 56 | # the @job decorator previously) 57 | new_vals_list = [] 58 | for vals in vals_list: 59 | name = vals.get("name") 60 | parent_id = vals.get("parent_id") 61 | if name and parent_id: 62 | existing = self.search( 63 | [("name", "=", name), ("parent_id", "=", parent_id)] 64 | ) 65 | if existing: 66 | if not existing.get_metadata()[0].get("noupdate"): 67 | existing.write(vals) 68 | records |= existing 69 | continue 70 | new_vals_list.append(vals) 71 | vals_list = new_vals_list 72 | records |= super().create(vals_list) 73 | return records 74 | 75 | def write(self, values): 76 | for channel in self: 77 | if ( 78 | not self.env.context.get("install_mode") 79 | and channel.name == "root" 80 | and ("name" in values or "parent_id" in values) 81 | ): 82 | raise exceptions.UserError(_("Cannot change the root channel")) 83 | return super().write(values) 84 | 85 | def unlink(self): 86 | for channel in self: 87 | if channel.name == "root": 88 | raise exceptions.UserError(_("Cannot remove the root channel")) 89 | return super().unlink() 90 | -------------------------------------------------------------------------------- /addons_external/session_redis/session.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016-2019 Camptocamp SA 2 | # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html) 3 | 4 | import json 5 | import logging 6 | 7 | from odoo.service import security 8 | from odoo.tools._vendor.sessions import SessionStore 9 | 10 | from . import json_encoding 11 | 12 | # this is equal to the duration of the session garbage collector in 13 | # odoo.http.session_gc() 14 | DEFAULT_SESSION_TIMEOUT = 60 * 60 * 24 * 7 # 7 days in seconds 15 | DEFAULT_SESSION_TIMEOUT_ANONYMOUS = 60 * 60 * 3 # 3 hours in seconds 16 | 17 | _logger = logging.getLogger(__name__) 18 | 19 | 20 | class RedisSessionStore(SessionStore): 21 | """SessionStore that saves session to redis""" 22 | 23 | def __init__( 24 | self, 25 | redis, 26 | session_class=None, 27 | prefix="", 28 | expiration=None, 29 | anon_expiration=None, 30 | ): 31 | super().__init__(session_class=session_class) 32 | self.redis = redis 33 | if expiration is None: 34 | self.expiration = DEFAULT_SESSION_TIMEOUT 35 | else: 36 | self.expiration = expiration 37 | if anon_expiration is None: 38 | self.anon_expiration = DEFAULT_SESSION_TIMEOUT_ANONYMOUS 39 | else: 40 | self.anon_expiration = anon_expiration 41 | self.prefix = "session:" 42 | if prefix: 43 | self.prefix = "%s:%s:" % (self.prefix, prefix) 44 | 45 | def build_key(self, sid): 46 | return "%s%s" % (self.prefix, sid) 47 | 48 | def save(self, session): 49 | key = self.build_key(session.sid) 50 | 51 | # allow to set a custom expiration for a session 52 | # such as a very short one for monitoring requests 53 | if session.uid: 54 | expiration = session.expiration or self.expiration 55 | else: 56 | expiration = session.expiration or self.anon_expiration 57 | if _logger.isEnabledFor(logging.DEBUG): 58 | if session.uid: 59 | user_msg = "user '%s' (id: %s)" % (session.login, session.uid) 60 | else: 61 | user_msg = "anonymous user" 62 | _logger.debug( 63 | "saving session with key '%s' and " "expiration of %s seconds for %s", 64 | key, 65 | expiration, 66 | user_msg, 67 | ) 68 | 69 | data = json.dumps(dict(session), cls=json_encoding.SessionEncoder).encode( 70 | "utf-8" 71 | ) 72 | if self.redis.set(key, data): 73 | return self.redis.expire(key, expiration) 74 | 75 | def delete(self, session): 76 | key = self.build_key(session.sid) 77 | _logger.debug("deleting session with key %s", key) 78 | return self.redis.delete(key) 79 | 80 | def get(self, sid): 81 | if not self.is_valid_key(sid): 82 | _logger.debug( 83 | "session with invalid sid '%s' has been asked, " "returning a new one", 84 | sid, 85 | ) 86 | return self.new() 87 | 88 | key = self.build_key(sid) 89 | saved = self.redis.get(key) 90 | if not saved: 91 | _logger.debug( 92 | "session with non-existent key '%s' has been asked, " 93 | "returning a new one", 94 | key, 95 | ) 96 | return self.new() 97 | try: 98 | data = json.loads(saved.decode("utf-8"), cls=json_encoding.SessionDecoder) 99 | except ValueError: 100 | _logger.debug( 101 | "session for key '%s' has been asked but its json " 102 | "content could not be read, it has been reset", 103 | key, 104 | ) 105 | data = {} 106 | return self.session_class(data, sid, False) 107 | 108 | def list(self): 109 | keys = self.redis.keys("%s*" % self.prefix) 110 | _logger.debug("a listing redis keys has been called") 111 | return [key[len(self.prefix) :] for key in keys] 112 | 113 | def rotate(self, session, env): 114 | self.delete(session) 115 | session.sid = self.generate_key() 116 | if session.uid and env: 117 | session.session_token = security.compute_session_token(session, env) 118 | self.save(session) 119 | 120 | def vacuum(self): 121 | """Do not garbage collect the sessions 122 | 123 | Redis keys are automatically cleaned at the end of their 124 | expiration. 125 | """ 126 | return None 127 | -------------------------------------------------------------------------------- /addons_external/queue_job/fields.py: -------------------------------------------------------------------------------- 1 | # copyright 2016 Camptocamp 2 | # license lgpl-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 3 | 4 | import json 5 | from datetime import date, datetime 6 | 7 | import dateutil 8 | import lxml 9 | 10 | from odoo import fields, models 11 | from odoo.tools.func import lazy 12 | 13 | 14 | class JobSerialized(fields.Field): 15 | """Provide the storage for job fields stored as json 16 | 17 | A base_type must be set, it must be dict, list or tuple. 18 | When the field is not set, the json will be the corresponding 19 | json string ("{}" or "[]"). 20 | 21 | Support for some custom types has been added to the json decoder/encoder 22 | (see JobEncoder and JobDecoder). 23 | """ 24 | 25 | type = "job_serialized" 26 | column_type = ("text", "text") 27 | 28 | _base_type = None 29 | 30 | # these are the default values when we convert an empty value 31 | _default_json_mapping = { 32 | dict: "{}", 33 | list: "[]", 34 | tuple: "[]", 35 | models.BaseModel: lambda env: json.dumps( 36 | {"_type": "odoo_recordset", "model": "base", "ids": [], "uid": env.uid} 37 | ), 38 | } 39 | 40 | def __init__(self, string=fields.Default, base_type=fields.Default, **kwargs): 41 | super().__init__(string=string, _base_type=base_type, **kwargs) 42 | 43 | def _setup_attrs(self, model, name): # pylint: disable=missing-return 44 | super()._setup_attrs(model, name) 45 | if self._base_type not in self._default_json_mapping: 46 | raise ValueError("%s is not a supported base type" % (self._base_type)) 47 | 48 | def _base_type_default_json(self, env): 49 | default_json = self._default_json_mapping.get(self._base_type) 50 | if not isinstance(default_json, str): 51 | default_json = default_json(env) 52 | return default_json 53 | 54 | def convert_to_column(self, value, record, values=None, validate=True): 55 | return self.convert_to_cache(value, record, validate=validate) 56 | 57 | def convert_to_cache(self, value, record, validate=True): 58 | # cache format: json.dumps(value) or None 59 | if isinstance(value, self._base_type): 60 | return json.dumps(value, cls=JobEncoder) 61 | else: 62 | return value or None 63 | 64 | def convert_to_record(self, value, record): 65 | default = self._base_type_default_json(record.env) 66 | return json.loads(value or default, cls=JobDecoder, env=record.env) 67 | 68 | 69 | class JobEncoder(json.JSONEncoder): 70 | """Encode Odoo recordsets so that we can later recompose them""" 71 | 72 | def _get_record_context(self, obj): 73 | return obj._job_prepare_context_before_enqueue() 74 | 75 | def default(self, obj): 76 | if isinstance(obj, models.BaseModel): 77 | return { 78 | "_type": "odoo_recordset", 79 | "model": obj._name, 80 | "ids": obj.ids, 81 | "uid": obj.env.uid, 82 | "su": obj.env.su, 83 | "context": self._get_record_context(obj), 84 | } 85 | elif isinstance(obj, datetime): 86 | return {"_type": "datetime_isoformat", "value": obj.isoformat()} 87 | elif isinstance(obj, date): 88 | return {"_type": "date_isoformat", "value": obj.isoformat()} 89 | elif isinstance(obj, lxml.etree._Element): 90 | return { 91 | "_type": "etree_element", 92 | "value": lxml.etree.tostring(obj, encoding=str), 93 | } 94 | elif isinstance(obj, lazy): 95 | return obj._value 96 | return json.JSONEncoder.default(self, obj) 97 | 98 | 99 | class JobDecoder(json.JSONDecoder): 100 | """Decode json, recomposing recordsets""" 101 | 102 | def __init__(self, *args, **kwargs): 103 | env = kwargs.pop("env") 104 | super().__init__(*args, object_hook=self.object_hook, **kwargs) 105 | assert env 106 | self.env = env 107 | 108 | def object_hook(self, obj): 109 | if "_type" not in obj: 110 | return obj 111 | type_ = obj["_type"] 112 | if type_ == "odoo_recordset": 113 | model = self.env(user=obj.get("uid"), su=obj.get("su"))[obj["model"]] 114 | if obj.get("context"): 115 | model = model.with_context(**obj.get("context")) 116 | return model.browse(obj["ids"]) 117 | elif type_ == "datetime_isoformat": 118 | return dateutil.parser.parse(obj["value"]) 119 | elif type_ == "date_isoformat": 120 | return dateutil.parser.parse(obj["value"]).date() 121 | elif type_ == "etree_element": 122 | return lxml.etree.fromstring(obj["value"]) 123 | return obj 124 | -------------------------------------------------------------------------------- /addons_external/queue_job/static/src/views/fields/job_direct_graph/job_direct_graph.esm.js: -------------------------------------------------------------------------------- 1 | /* @odoo-module */ 2 | /* global vis */ 3 | 4 | import {loadCSS, loadJS} from "@web/core/assets"; 5 | import {registry} from "@web/core/registry"; 6 | import {standardFieldProps} from "@web/views/fields/standard_field_props"; 7 | import {useService} from "@web/core/utils/hooks"; 8 | 9 | const {Component, onWillStart, useEffect, useRef} = owl; 10 | 11 | export class JobDirectGraph extends Component { 12 | setup() { 13 | this.orm = useService("orm"); 14 | this.action = useService("action"); 15 | this.rootRef = useRef("root_vis"); 16 | this.network = null; 17 | onWillStart(async () => { 18 | await loadJS("/queue_job/static/lib/vis/vis-network.min.js"); 19 | loadCSS("/queue_job/static/lib/vis/vis-network.min.css"); 20 | }); 21 | useEffect(() => { 22 | this.renderNetwork(); 23 | this._fitNetwork(); 24 | return () => { 25 | if (this.network) { 26 | this.$el.empty(); 27 | } 28 | return this.rootRef.el; 29 | }; 30 | }); 31 | } 32 | 33 | get $el() { 34 | return $(this.rootRef.el); 35 | } 36 | 37 | get resId() { 38 | return this.props.record.data.id; 39 | } 40 | 41 | get context() { 42 | return this.props.record.getFieldContext(this.props.name); 43 | } 44 | 45 | get model() { 46 | return this.props.record.resModel; 47 | } 48 | 49 | htmlTitle(html) { 50 | const container = document.createElement("div"); 51 | container.innerHTML = html; 52 | return container; 53 | } 54 | 55 | renderNetwork() { 56 | if (this.network) { 57 | this.$el.empty(); 58 | } 59 | let nodes = this.props.value.nodes || []; 60 | if (!nodes.length) { 61 | return; 62 | } 63 | nodes = nodes.map((node) => { 64 | node.title = this.htmlTitle(node.title || ""); 65 | return node; 66 | }); 67 | 68 | const edges = []; 69 | _.each(this.props.value.edges || [], function (edge) { 70 | const edgeFrom = edge[0]; 71 | const edgeTo = edge[1]; 72 | edges.push({ 73 | from: edgeFrom, 74 | to: edgeTo, 75 | arrows: "to", 76 | }); 77 | }); 78 | 79 | const data = { 80 | nodes: new vis.DataSet(nodes), 81 | edges: new vis.DataSet(edges), 82 | }; 83 | const options = { 84 | // Fix the seed to have always the same result for the same graph 85 | layout: {randomSeed: 1}, 86 | }; 87 | // Arbitrary threshold, generation becomes very slow at some 88 | // point, and disabling the stabilization helps to have a fast result. 89 | // Actually, it stabilizes, but is displayed while stabilizing, rather 90 | // than showing a blank canvas. 91 | if (nodes.length > 100) { 92 | options.physics = {stabilization: false}; 93 | } 94 | const network = new vis.Network(this.$el[0], data, options); 95 | network.selectNodes([this.resId]); 96 | var self = this; 97 | network.on("dragging", function () { 98 | // By default, dragging changes the selected node 99 | // to the dragged one, we want to keep the current 100 | // job selected 101 | network.selectNodes([self.resId]); 102 | }); 103 | network.on("click", function (params) { 104 | if (params.nodes.length > 0) { 105 | var resId = params.nodes[0]; 106 | if (resId !== self.resId) { 107 | self.openDependencyJob(resId); 108 | } 109 | } else { 110 | // Clicked outside of the nodes, we want to 111 | // keep the current job selected 112 | network.selectNodes([self.resId]); 113 | } 114 | }); 115 | this.network = network; 116 | } 117 | 118 | async openDependencyJob(resId) { 119 | const action = await this.orm.call( 120 | this.model, 121 | "get_formview_action", 122 | [[resId]], 123 | { 124 | context: this.context, 125 | } 126 | ); 127 | await this.action.doAction(action); 128 | } 129 | 130 | _fitNetwork() { 131 | if (this.network) { 132 | this.network.fit(this.network.body.nodeIndices); 133 | } 134 | } 135 | } 136 | 137 | JobDirectGraph.props = { 138 | ...standardFieldProps, 139 | }; 140 | 141 | JobDirectGraph.template = "queue.JobDirectGraph"; 142 | 143 | registry.category("fields").add("job_directed_graph", JobDirectGraph); 144 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | services: 3 | db-master: 4 | image: postgres:latest 5 | environment: 6 | POSTGRES_DB: postgres 7 | POSTGRES_USER: odoo 8 | POSTGRES_PASSWORD: odoo 9 | volumes: 10 | - master-data:/var/lib/postgresql/data 11 | ports: 12 | - "5432:5432" 13 | restart: unless-stopped 14 | 15 | db-replica: 16 | image: postgres:latest 17 | depends_on: 18 | - db-master 19 | environment: 20 | POSTGRES_DB: postgres 21 | POSTGRES_USER: odoo 22 | POSTGRES_PASSWORD: odoo 23 | POSTGRES_REPLICATION_MODE: replica 24 | POSTGRES_REPLICATION_USER: odoo 25 | POSTGRES_REPLICATION_PASSWORD: odoo 26 | volumes: 27 | - replica-data:/var/lib/postgresql/data 28 | command: > 29 | bash -c " 30 | while ! pg_isready -h db-master -p 5432 -U odoo; do 31 | echo 'Waiting for master to be ready...' 32 | sleep 2 33 | done; 34 | pg_basebackup -h db-master -D /var/lib/postgresql/data -U odoo -vP -W --create-slot --slot=replica_slot --write-recovery-conf; 35 | exec docker-entrypoint.sh postgres 36 | " 37 | ports: 38 | - "5433:5432" 39 | restart: unless-stopped 40 | redis: 41 | image: redis:6.2-alpine 42 | restart: unless-stopped 43 | ports: 44 | - "6379:6379" 45 | command: redis-server --requirepass eiSa7so4Oochae4B #--loglevel debug 46 | volumes: 47 | - redis-data:/data 48 | web: 49 | build: 50 | context: . 51 | environment: 52 | - DB_PORT_5432_TCP_ADDR=heimdallproxy1 53 | - DB_PORT_5432_TCP_PORT=5432 54 | - DB_ENV_POSTGRES_USER=odoo 55 | - DB_ENV_POSTGRES_PASSWORD=odoo 56 | - PGHOST=heimdallproxy1 57 | - PGPORT=5432 58 | - PGUSER=odoo 59 | - PGPASSWORD=odoo 60 | - PGDATABASE=odoo-17 61 | - ODOO_USER_ADMIN_DEFAULT_PASSWORD=admin 62 | - ODOO_DBMASTER_PASSWORD=admin 63 | - ODOO_CONF_WORKERS=4 64 | - ODOO_QUEUE_JOB_PORT=8069 65 | - ODOO_DBNAME=odoo-17 66 | - SHELL=/bin/bash 67 | - ODOO_SESSION_REDIS=true 68 | - ODOO_SESSION_REDIS_HOST=redis 69 | - ODOO_SESSION_REDIS_PASSWORD=eiSa7so4Oochae4B 70 | - ODOO_SESSION_REDIS_PORT=6379 71 | # - DEBUG=true to debug with VS Code in your local 72 | # - TEST=true to execute the units tests in your local 73 | depends_on: 74 | - heimdallproxy1 75 | - redis 76 | ports: 77 | # - "8069:8069" 78 | # - "8072:8072" 79 | - "8888:8888" 80 | tty: true 81 | volumes: 82 | - odoo-web-data:/var/lib/odoo 83 | - ./config:/etc/odoo 84 | - ./:/mnt/odoo 85 | restart: unless-stopped # run as a service 86 | nginx: 87 | image: nginx:latest 88 | depends_on: 89 | - web 90 | command: > 91 | /bin/bash -c "echo $$' 92 | upstream odoo { 93 | server web:8069; 94 | } 95 | upstream odoo-chat { 96 | server web:8072; 97 | } 98 | 99 | server { 100 | listen 80; 101 | client_max_body_size 0; 102 | 103 | access_log /var/log/nginx/odoo-access.log; 104 | error_log /var/log/nginx/odoo-error.log; 105 | 106 | proxy_set_header Host $$host:$$server_port; 107 | proxy_set_header X-Real-IP $$remote_addr; 108 | proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for; 109 | proxy_set_header X-Forwarded-Proto $$scheme; 110 | proxy_set_header X-Forwarded-Host $$host:$$server_port; 111 | proxy_set_header X-Forwarded-Server $$host; 112 | 113 | location / { 114 | proxy_redirect off; 115 | proxy_pass http://odoo; 116 | } 117 | 118 | location /longpolling { 119 | proxy_pass http://odoo-chat; 120 | } 121 | 122 | location ~* /web/static/ { 123 | proxy_cache_valid 200 90m; 124 | proxy_buffering on; 125 | expires 864000; 126 | proxy_pass http://odoo; 127 | } 128 | 129 | }' > /etc/nginx/conf.d/default.conf && nginx -g 'daemon off;'" 130 | ports: 131 | - "80:80" 132 | restart: unless-stopped 133 | heimdallmanager: 134 | build: 135 | context: ./heimdall 136 | dockerfile: Dockerfile 137 | depends_on: 138 | - db-master 139 | environment: 140 | - hduser=admin 141 | - hdpassword=admin 142 | - vdbName=Mountrix-vdb 143 | ports: 144 | - "8087:8087" 145 | tty: true 146 | volumes: 147 | - heimdall-cache:/opt/heimdall/config 148 | 149 | restart: unless-stopped 150 | 151 | heimdallproxy1: 152 | build: 153 | context: ./heimdall 154 | dockerfile: Dockerfile 155 | depends_on: 156 | - heimdallmanager 157 | environment: 158 | - hduser=admin 159 | - hdpassword=admin 160 | - hdRole=proxy 161 | - vdbName=Mountrix-vdb 162 | - configcache=/opt/heimdall/configcache 163 | ports: 164 | - "5434:5432" 165 | tty: true 166 | volumes: 167 | - heimdall-cache:/opt/heimdall/configcache 168 | restart: unless-stopped 169 | volumes: 170 | odoo-web-data: 171 | redis-data: 172 | heimdall-cache: 173 | master-data: 174 | name: odoo-heimdall-master-data 175 | replica-data: 176 | name: odoo-heimdall-replica-data 177 | -------------------------------------------------------------------------------- /addons_external/queue_job/jobrunner/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2015-2016 ACSONE SA/NV () 2 | # Copyright 2016 Camptocamp SA 3 | # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 4 | 5 | import logging 6 | from threading import Thread 7 | import time 8 | 9 | from odoo.service import server 10 | from odoo.tools import config 11 | 12 | try: 13 | from odoo.addons.server_environment import serv_config 14 | 15 | if serv_config.has_section("queue_job"): 16 | queue_job_config = serv_config["queue_job"] 17 | else: 18 | queue_job_config = {} 19 | except ImportError: 20 | queue_job_config = config.misc.get("queue_job", {}) 21 | 22 | 23 | from .runner import QueueJobRunner, _channels 24 | 25 | _logger = logging.getLogger(__name__) 26 | 27 | START_DELAY = 5 28 | 29 | 30 | # Here we monkey patch the Odoo server to start the job runner thread 31 | # in the main server process (and not in forked workers). This is 32 | # very easy to deploy as we don't need another startup script. 33 | 34 | 35 | class QueueJobRunnerThread(Thread): 36 | def __init__(self): 37 | Thread.__init__(self) 38 | self.daemon = True 39 | self.runner = QueueJobRunner.from_environ_or_config() 40 | 41 | def run(self): 42 | # sleep a bit to let the workers start at ease 43 | time.sleep(START_DELAY) 44 | self.runner.run() 45 | 46 | def stop(self): 47 | self.runner.stop() 48 | 49 | 50 | class WorkerJobRunner(server.Worker): 51 | """Jobrunner workers""" 52 | 53 | def __init__(self, multi): 54 | super().__init__(multi) 55 | self.watchdog_timeout = None 56 | self.runner = QueueJobRunner.from_environ_or_config() 57 | self._recover = False 58 | 59 | def sleep(self): 60 | pass 61 | 62 | def signal_handler(self, sig, frame): # pylint: disable=missing-return 63 | _logger.debug("WorkerJobRunner (%s) received signal %s", self.pid, sig) 64 | super().signal_handler(sig, frame) 65 | self.runner.stop() 66 | 67 | def process_work(self): 68 | if self._recover: 69 | _logger.info("WorkerJobRunner (%s) runner is reinitialized", self.pid) 70 | self.runner = QueueJobRunner.from_environ_or_config() 71 | self._recover = False 72 | _logger.debug("WorkerJobRunner (%s) starting up", self.pid) 73 | time.sleep(START_DELAY) 74 | self.runner.run() 75 | 76 | def signal_time_expired_handler(self, n, stack): 77 | _logger.info( 78 | "Worker (%d) CPU time limit (%s) reached.Stop gracefully and recover", 79 | self.pid, 80 | config["limit_time_cpu"], 81 | ) 82 | self._recover = True 83 | self.runner.stop() 84 | 85 | 86 | runner_thread = None 87 | 88 | 89 | def _is_runner_enabled(): 90 | return not _channels().strip().startswith("root:0") 91 | 92 | 93 | def _start_runner_thread(server_type): 94 | global runner_thread 95 | if not config["stop_after_init"]: 96 | if _is_runner_enabled(): 97 | _logger.info("starting jobrunner thread (in %s)", server_type) 98 | runner_thread = QueueJobRunnerThread() 99 | runner_thread.start() 100 | else: 101 | _logger.info( 102 | "jobrunner thread (in %s) NOT started, " 103 | "because the root channel's capacity is set to 0", 104 | server_type, 105 | ) 106 | 107 | 108 | orig_prefork__init__ = server.PreforkServer.__init__ 109 | orig_prefork_process_spawn = server.PreforkServer.process_spawn 110 | orig_prefork_worker_pop = server.PreforkServer.worker_pop 111 | orig_threaded_start = server.ThreadedServer.start 112 | orig_threaded_stop = server.ThreadedServer.stop 113 | 114 | 115 | def prefork__init__(server, app): 116 | res = orig_prefork__init__(server, app) 117 | server.jobrunner = {} 118 | return res 119 | 120 | 121 | def prefork_process_spawn(server): 122 | orig_prefork_process_spawn(server) 123 | if not hasattr(server, "jobrunner"): 124 | # if 'queue_job' is not in server wide modules, PreforkServer is 125 | # not initialized with a 'jobrunner' attribute, skip this 126 | return 127 | if not server.jobrunner and _is_runner_enabled(): 128 | server.worker_spawn(WorkerJobRunner, server.jobrunner) 129 | 130 | 131 | def prefork_worker_pop(server, pid): 132 | res = orig_prefork_worker_pop(server, pid) 133 | if not hasattr(server, "jobrunner"): 134 | # if 'queue_job' is not in server wide modules, PreforkServer is 135 | # not initialized with a 'jobrunner' attribute, skip this 136 | return res 137 | if pid in server.jobrunner: 138 | server.jobrunner.pop(pid) 139 | return res 140 | 141 | 142 | def threaded_start(server, *args, **kwargs): 143 | res = orig_threaded_start(server, *args, **kwargs) 144 | _start_runner_thread("threaded server") 145 | return res 146 | 147 | 148 | def threaded_stop(server): 149 | global runner_thread 150 | if runner_thread: 151 | runner_thread.stop() 152 | res = orig_threaded_stop(server) 153 | if runner_thread: 154 | runner_thread.join() 155 | runner_thread = None 156 | return res 157 | 158 | 159 | server.PreforkServer.__init__ = prefork__init__ 160 | server.PreforkServer.process_spawn = prefork_process_spawn 161 | server.PreforkServer.worker_pop = prefork_worker_pop 162 | server.ThreadedServer.start = threaded_start 163 | server.ThreadedServer.stop = threaded_stop 164 | -------------------------------------------------------------------------------- /addons_external/queue_job/tests/test_json_field.py: -------------------------------------------------------------------------------- 1 | # copyright 2016 Camptocamp 2 | # license lgpl-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 3 | 4 | import json 5 | from datetime import date, datetime 6 | 7 | from lxml import etree 8 | 9 | from odoo.tests import common 10 | 11 | # pylint: disable=odoo-addons-relative-import 12 | # we are testing, we want to test as we were an external consumer of the API 13 | from odoo.addons.queue_job.fields import JobDecoder, JobEncoder 14 | 15 | 16 | class TestJson(common.TransactionCase): 17 | def test_encoder_recordset(self): 18 | demo_user = self.env.ref("base.user_demo") 19 | context = demo_user.context_get() 20 | partner = self.env(user=demo_user, context=context).ref("base.main_partner") 21 | value = partner 22 | value_json = json.dumps(value, cls=JobEncoder) 23 | expected_context = context.copy() 24 | expected_context.pop("uid") 25 | expected = { 26 | "uid": demo_user.id, 27 | "_type": "odoo_recordset", 28 | "model": "res.partner", 29 | "ids": [partner.id], 30 | "su": False, 31 | "context": expected_context, 32 | } 33 | self.assertEqual(json.loads(value_json), expected) 34 | 35 | def test_encoder_recordset_list(self): 36 | demo_user = self.env.ref("base.user_demo") 37 | context = demo_user.context_get() 38 | partner = self.env(user=demo_user, context=context).ref("base.main_partner") 39 | value = ["a", 1, partner] 40 | value_json = json.dumps(value, cls=JobEncoder) 41 | expected_context = context.copy() 42 | expected_context.pop("uid") 43 | expected = [ 44 | "a", 45 | 1, 46 | { 47 | "uid": demo_user.id, 48 | "_type": "odoo_recordset", 49 | "model": "res.partner", 50 | "ids": [partner.id], 51 | "su": False, 52 | "context": expected_context, 53 | }, 54 | ] 55 | self.assertEqual(json.loads(value_json), expected) 56 | 57 | def test_decoder_recordset(self): 58 | demo_user = self.env.ref("base.user_demo") 59 | context = demo_user.context_get() 60 | partner = self.env(user=demo_user).ref("base.main_partner") 61 | 62 | value_json = json.dumps( 63 | { 64 | "_type": "odoo_recordset", 65 | "model": "res.partner", 66 | "su": False, 67 | "ids": partner.ids, 68 | "uid": demo_user.id, 69 | "context": {"tz": context["tz"], "lang": context["lang"]}, 70 | } 71 | ) 72 | 73 | expected = partner 74 | value = json.loads(value_json, cls=JobDecoder, env=self.env) 75 | self.assertEqual(value, expected) 76 | self.assertEqual(demo_user, expected.env.user) 77 | 78 | def test_decoder_recordset_list(self): 79 | demo_user = self.env.ref("base.user_demo") 80 | context = demo_user.context_get() 81 | partner = self.env(user=demo_user).ref("base.main_partner") 82 | value_json = json.dumps( 83 | [ 84 | "a", 85 | 1, 86 | { 87 | "_type": "odoo_recordset", 88 | "model": "res.partner", 89 | "su": False, 90 | "ids": partner.ids, 91 | "uid": demo_user.id, 92 | "context": {"tz": context["tz"], "lang": context["lang"]}, 93 | }, 94 | ] 95 | ) 96 | expected = ["a", 1, partner] 97 | value = json.loads(value_json, cls=JobDecoder, env=self.env) 98 | self.assertEqual(value, expected) 99 | self.assertEqual(demo_user, expected[2].env.user) 100 | 101 | def test_decoder_recordset_list_without_user(self): 102 | value_json = ( 103 | '["a", 1, {"_type": "odoo_recordset",' '"model": "res.users", "ids": [1]}]' 104 | ) 105 | expected = ["a", 1, self.env.ref("base.user_root")] 106 | value = json.loads(value_json, cls=JobDecoder, env=self.env) 107 | self.assertEqual(value, expected) 108 | 109 | def test_encoder_datetime(self): 110 | value = ["a", 1, datetime(2017, 4, 19, 8, 48, 50, 1)] 111 | value_json = json.dumps(value, cls=JobEncoder) 112 | expected = [ 113 | "a", 114 | 1, 115 | {"_type": "datetime_isoformat", "value": "2017-04-19T08:48:50.000001"}, 116 | ] 117 | self.assertEqual(json.loads(value_json), expected) 118 | 119 | def test_decoder_datetime(self): 120 | value_json = ( 121 | '["a", 1, {"_type": "datetime_isoformat",' 122 | '"value": "2017-04-19T08:48:50.000001"}]' 123 | ) 124 | expected = ["a", 1, datetime(2017, 4, 19, 8, 48, 50, 1)] 125 | value = json.loads(value_json, cls=JobDecoder, env=self.env) 126 | self.assertEqual(value, expected) 127 | 128 | def test_encoder_date(self): 129 | value = ["a", 1, date(2017, 4, 19)] 130 | value_json = json.dumps(value, cls=JobEncoder) 131 | expected = ["a", 1, {"_type": "date_isoformat", "value": "2017-04-19"}] 132 | self.assertEqual(json.loads(value_json), expected) 133 | 134 | def test_decoder_date(self): 135 | value_json = '["a", 1, {"_type": "date_isoformat",' '"value": "2017-04-19"}]' 136 | expected = ["a", 1, date(2017, 4, 19)] 137 | value = json.loads(value_json, cls=JobDecoder, env=self.env) 138 | self.assertEqual(value, expected) 139 | 140 | def test_encoder_etree(self): 141 | etree_el = etree.Element("root", attr="val") 142 | etree_el.append(etree.Element("child", attr="val")) 143 | value = ["a", 1, etree_el] 144 | value_json = json.dumps(value, cls=JobEncoder) 145 | expected = [ 146 | "a", 147 | 1, 148 | { 149 | "_type": "etree_element", 150 | "value": '', 151 | }, 152 | ] 153 | self.assertEqual(json.loads(value_json), expected) 154 | 155 | def test_decoder_etree(self): 156 | value_json = '["a", 1, {"_type": "etree_element", "value": \ 157 | ""}]' 158 | etree_el = etree.Element("root", attr="val") 159 | etree_el.append(etree.Element("child", attr="val")) 160 | expected = ["a", 1, etree.tostring(etree_el)] 161 | value = json.loads(value_json, cls=JobDecoder, env=self.env) 162 | value[2] = etree.tostring(value[2]) 163 | self.assertEqual(value, expected) 164 | -------------------------------------------------------------------------------- /addons_external/queue_job/models/queue_job_function.py: -------------------------------------------------------------------------------- 1 | # Copyright 2013-2020 Camptocamp SA 2 | # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 3 | 4 | import ast 5 | import logging 6 | import re 7 | from collections import namedtuple 8 | 9 | from odoo import _, api, exceptions, fields, models, tools 10 | 11 | from ..fields import JobSerialized 12 | 13 | _logger = logging.getLogger(__name__) 14 | 15 | 16 | regex_job_function_name = re.compile(r"^<([0-9a-z_\.]+)>\.([0-9a-zA-Z_]+)$") 17 | 18 | 19 | class QueueJobFunction(models.Model): 20 | _name = "queue.job.function" 21 | _description = "Job Functions" 22 | _log_access = False 23 | 24 | JobConfig = namedtuple( 25 | "JobConfig", 26 | "channel " 27 | "retry_pattern " 28 | "related_action_enable " 29 | "related_action_func_name " 30 | "related_action_kwargs " 31 | "job_function_id ", 32 | ) 33 | 34 | def _default_channel(self): 35 | return self.env.ref("queue_job.channel_root") 36 | 37 | name = fields.Char( 38 | compute="_compute_name", 39 | inverse="_inverse_name", 40 | index=True, 41 | store=True, 42 | ) 43 | 44 | # model and method should be required, but the required flag doesn't 45 | # let a chance to _inverse_name to be executed 46 | model_id = fields.Many2one( 47 | comodel_name="ir.model", string="Model", ondelete="cascade" 48 | ) 49 | method = fields.Char() 50 | 51 | channel_id = fields.Many2one( 52 | comodel_name="queue.job.channel", 53 | string="Channel", 54 | required=True, 55 | default=lambda r: r._default_channel(), 56 | ) 57 | channel = fields.Char(related="channel_id.complete_name", store=True, readonly=True) 58 | retry_pattern = JobSerialized(string="Retry Pattern (serialized)", base_type=dict) 59 | edit_retry_pattern = fields.Text( 60 | string="Retry Pattern", 61 | compute="_compute_edit_retry_pattern", 62 | inverse="_inverse_edit_retry_pattern", 63 | help="Pattern expressing from the count of retries on retryable errors," 64 | " the number of of seconds to postpone the next execution. Setting the " 65 | "number of seconds to a 2-element tuple or list will randomize the " 66 | "retry interval between the 2 values.\n" 67 | "Example: {1: 10, 5: 20, 10: 30, 15: 300}.\n" 68 | "Example: {1: (1, 10), 5: (11, 20), 10: (21, 30), 15: (100, 300)}.\n" 69 | "See the module description for details.", 70 | ) 71 | related_action = JobSerialized(string="Related Action (serialized)", base_type=dict) 72 | edit_related_action = fields.Text( 73 | string="Related Action", 74 | compute="_compute_edit_related_action", 75 | inverse="_inverse_edit_related_action", 76 | help="The action when the button *Related Action* is used on a job. " 77 | "The default action is to open the view of the record related " 78 | "to the job. Configured as a dictionary with optional keys: " 79 | "enable, func_name, kwargs.\n" 80 | "See the module description for details.", 81 | ) 82 | 83 | @api.depends("model_id.model", "method") 84 | def _compute_name(self): 85 | for record in self: 86 | if not (record.model_id and record.method): 87 | record.name = "" 88 | continue 89 | record.name = self.job_function_name(record.model_id.model, record.method) 90 | 91 | def _inverse_name(self): 92 | groups = regex_job_function_name.match(self.name) 93 | if not groups: 94 | raise exceptions.UserError(_("Invalid job function: {}").format(self.name)) 95 | model_name = groups[1] 96 | method = groups[2] 97 | model = ( 98 | self.env["ir.model"].sudo().search([("model", "=", model_name)], limit=1) 99 | ) 100 | if not model: 101 | raise exceptions.UserError(_("Model {} not found").format(model_name)) 102 | self.model_id = model.id 103 | self.method = method 104 | 105 | @api.depends("retry_pattern") 106 | def _compute_edit_retry_pattern(self): 107 | for record in self: 108 | retry_pattern = record._parse_retry_pattern() 109 | record.edit_retry_pattern = str(retry_pattern) 110 | 111 | def _inverse_edit_retry_pattern(self): 112 | try: 113 | edited = (self.edit_retry_pattern or "").strip() 114 | if edited: 115 | self.retry_pattern = ast.literal_eval(edited) 116 | else: 117 | self.retry_pattern = {} 118 | except (ValueError, TypeError, SyntaxError) as ex: 119 | raise exceptions.UserError( 120 | self._retry_pattern_format_error_message() 121 | ) from ex 122 | 123 | @api.depends("related_action") 124 | def _compute_edit_related_action(self): 125 | for record in self: 126 | record.edit_related_action = str(record.related_action) 127 | 128 | def _inverse_edit_related_action(self): 129 | try: 130 | edited = (self.edit_related_action or "").strip() 131 | if edited: 132 | self.related_action = ast.literal_eval(edited) 133 | else: 134 | self.related_action = {} 135 | except (ValueError, TypeError, SyntaxError) as ex: 136 | raise exceptions.UserError( 137 | self._related_action_format_error_message() 138 | ) from ex 139 | 140 | @staticmethod 141 | def job_function_name(model_name, method_name): 142 | return f"<{model_name}>.{method_name}" 143 | 144 | def job_default_config(self): 145 | return self.JobConfig( 146 | channel="root", 147 | retry_pattern={}, 148 | related_action_enable=True, 149 | related_action_func_name=None, 150 | related_action_kwargs={}, 151 | job_function_id=None, 152 | ) 153 | 154 | def _parse_retry_pattern(self): 155 | try: 156 | # as json can't have integers as keys and the field is stored 157 | # as json, convert back to int 158 | retry_pattern = { 159 | int(try_count): postpone_seconds 160 | for try_count, postpone_seconds in self.retry_pattern.items() 161 | } 162 | except ValueError: 163 | _logger.error( 164 | "Invalid retry pattern for job function %s," 165 | " keys could not be parsed as integers, fallback" 166 | " to the default retry pattern.", 167 | self.name, 168 | ) 169 | retry_pattern = {} 170 | return retry_pattern 171 | 172 | @tools.ormcache("name") 173 | def job_config(self, name): 174 | config = self.search([("name", "=", name)], limit=1) 175 | if not config: 176 | return self.job_default_config() 177 | retry_pattern = config._parse_retry_pattern() 178 | return self.JobConfig( 179 | channel=config.channel, 180 | retry_pattern=retry_pattern, 181 | related_action_enable=config.related_action.get("enable", True), 182 | related_action_func_name=config.related_action.get("func_name"), 183 | related_action_kwargs=config.related_action.get("kwargs", {}), 184 | job_function_id=config.id, 185 | ) 186 | 187 | def _retry_pattern_format_error_message(self): 188 | return _( 189 | "Unexpected format of Retry Pattern for {}.\n" 190 | "Example of valid format:\n" 191 | "{{1: 300, 5: 600, 10: 1200, 15: 3000}}" 192 | ).format(self.name) 193 | 194 | @api.constrains("retry_pattern") 195 | def _check_retry_pattern(self): 196 | for record in self: 197 | retry_pattern = record.retry_pattern 198 | if not retry_pattern: 199 | continue 200 | 201 | all_values = list(retry_pattern) + list(retry_pattern.values()) 202 | for value in all_values: 203 | try: 204 | int(value) 205 | except ValueError as ex: 206 | raise exceptions.UserError( 207 | record._retry_pattern_format_error_message() 208 | ) from ex 209 | 210 | def _related_action_format_error_message(self): 211 | return _( 212 | "Unexpected format of Related Action for {}.\n" 213 | "Example of valid format:\n" 214 | '{{"enable": True, "func_name": "related_action_foo",' 215 | ' "kwargs" {{"limit": 10}}}}' 216 | ).format(self.name) 217 | 218 | @api.constrains("related_action") 219 | def _check_related_action(self): 220 | valid_keys = ("enable", "func_name", "kwargs") 221 | for record in self: 222 | related_action = record.related_action 223 | if not related_action: 224 | continue 225 | 226 | if any(key not in valid_keys for key in related_action): 227 | raise exceptions.UserError( 228 | record._related_action_format_error_message() 229 | ) 230 | 231 | @api.model_create_multi 232 | def create(self, vals_list): 233 | records = self.browse() 234 | if self.env.context.get("install_mode"): 235 | # installing a module that creates a job function: rebinds the record 236 | # to an existing one (likely we already had the job function created by 237 | # the @job decorator previously) 238 | new_vals_list = [] 239 | for vals in vals_list: 240 | name = vals.get("name") 241 | if name: 242 | existing = self.search([("name", "=", name)], limit=1) 243 | if existing: 244 | if not existing.get_metadata()[0].get("noupdate"): 245 | existing.write(vals) 246 | records |= existing 247 | continue 248 | new_vals_list.append(vals) 249 | vals_list = new_vals_list 250 | records |= super().create(vals_list) 251 | self.env.registry.clear_cache() 252 | return records 253 | 254 | def write(self, values): 255 | res = super().write(values) 256 | self.env.registry.clear_cache() 257 | return res 258 | 259 | def unlink(self): 260 | res = super().unlink() 261 | self.env.registry.clear_cache() 262 | return res 263 | -------------------------------------------------------------------------------- /addons_external/queue_job/controllers/main.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2015-2016 ACSONE SA/NV () 2 | # Copyright 2013-2016 Camptocamp SA 3 | # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 4 | 5 | import logging 6 | import random 7 | import time 8 | import traceback 9 | from io import StringIO 10 | 11 | from psycopg2 import OperationalError, errorcodes 12 | from werkzeug.exceptions import BadRequest, Forbidden 13 | 14 | from odoo import SUPERUSER_ID, _, api, http, registry, tools 15 | from odoo.service.model import PG_CONCURRENCY_ERRORS_TO_RETRY 16 | 17 | from ..delay import chain, group 18 | from ..exception import FailedJobError, NothingToDoJob, RetryableJobError 19 | from ..job import ENQUEUED, Job 20 | 21 | _logger = logging.getLogger(__name__) 22 | 23 | PG_RETRY = 5 # seconds 24 | 25 | DEPENDS_MAX_TRIES_ON_CONCURRENCY_FAILURE = 5 26 | 27 | 28 | class RunJobController(http.Controller): 29 | def _try_perform_job(self, env, job): 30 | """Try to perform the job.""" 31 | job.set_started() 32 | job.store() 33 | env.cr.commit() 34 | _logger.debug("%s started", job) 35 | 36 | job.perform() 37 | job.set_done() 38 | job.store() 39 | env.flush_all() 40 | env.cr.commit() 41 | _logger.debug("%s done", job) 42 | 43 | def _enqueue_dependent_jobs(self, env, job): 44 | tries = 0 45 | while True: 46 | try: 47 | job.enqueue_waiting() 48 | except OperationalError as err: 49 | # Automatically retry the typical transaction serialization 50 | # errors 51 | if err.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY: 52 | raise 53 | if tries >= DEPENDS_MAX_TRIES_ON_CONCURRENCY_FAILURE: 54 | _logger.info( 55 | "%s, maximum number of tries reached to update dependencies", 56 | errorcodes.lookup(err.pgcode), 57 | ) 58 | raise 59 | wait_time = random.uniform(0.0, 2**tries) 60 | tries += 1 61 | _logger.info( 62 | "%s, retry %d/%d in %.04f sec...", 63 | errorcodes.lookup(err.pgcode), 64 | tries, 65 | DEPENDS_MAX_TRIES_ON_CONCURRENCY_FAILURE, 66 | wait_time, 67 | ) 68 | time.sleep(wait_time) 69 | else: 70 | break 71 | 72 | @http.route("/queue_job/runjob", type="http", auth="none", save_session=False) 73 | def runjob(self, db, job_uuid, **kw): 74 | http.request.session.db = db 75 | env = http.request.env(user=SUPERUSER_ID) 76 | 77 | def retry_postpone(job, message, seconds=None): 78 | job.env.clear() 79 | with registry(job.env.cr.dbname).cursor() as new_cr: 80 | job.env = api.Environment(new_cr, SUPERUSER_ID, {}) 81 | job.postpone(result=message, seconds=seconds) 82 | job.set_pending(reset_retry=False) 83 | job.store() 84 | 85 | # ensure the job to run is in the correct state and lock the record 86 | env.cr.execute( 87 | "SELECT state FROM queue_job WHERE uuid=%s AND state=%s FOR UPDATE", 88 | (job_uuid, ENQUEUED), 89 | ) 90 | if not env.cr.fetchone(): 91 | _logger.warning( 92 | "was requested to run job %s, but it does not exist, " 93 | "or is not in state %s", 94 | job_uuid, 95 | ENQUEUED, 96 | ) 97 | return "" 98 | 99 | job = Job.load(env, job_uuid) 100 | assert job and job.state == ENQUEUED 101 | 102 | try: 103 | try: 104 | self._try_perform_job(env, job) 105 | except OperationalError as err: 106 | # Automatically retry the typical transaction serialization 107 | # errors 108 | if err.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY: 109 | raise 110 | 111 | _logger.debug("%s OperationalError, postponed", job) 112 | raise RetryableJobError( 113 | tools.ustr(err.pgerror, errors="replace"), seconds=PG_RETRY 114 | ) from err 115 | 116 | except NothingToDoJob as err: 117 | if str(err): 118 | msg = str(err) 119 | else: 120 | msg = _("Job interrupted and set to Done: nothing to do.") 121 | job.set_done(msg) 122 | job.store() 123 | env.cr.commit() 124 | 125 | except RetryableJobError as err: 126 | # delay the job later, requeue 127 | retry_postpone(job, str(err), seconds=err.seconds) 128 | _logger.debug("%s postponed", job) 129 | # Do not trigger the error up because we don't want an exception 130 | # traceback in the logs we should have the traceback when all 131 | # retries are exhausted 132 | env.cr.rollback() 133 | return "" 134 | 135 | except (FailedJobError, Exception) as orig_exception: 136 | buff = StringIO() 137 | traceback.print_exc(file=buff) 138 | traceback_txt = buff.getvalue() 139 | _logger.error(traceback_txt) 140 | job.env.clear() 141 | with registry(job.env.cr.dbname).cursor() as new_cr: 142 | job.env = job.env(cr=new_cr) 143 | vals = self._get_failure_values(job, traceback_txt, orig_exception) 144 | job.set_failed(**vals) 145 | job.store() 146 | buff.close() 147 | raise 148 | 149 | _logger.debug("%s enqueue depends started", job) 150 | self._enqueue_dependent_jobs(env, job) 151 | _logger.debug("%s enqueue depends done", job) 152 | 153 | return "" 154 | 155 | def _get_failure_values(self, job, traceback_txt, orig_exception): 156 | """Collect relevant data from exception.""" 157 | exception_name = orig_exception.__class__.__name__ 158 | if hasattr(orig_exception, "__module__"): 159 | exception_name = orig_exception.__module__ + "." + exception_name 160 | exc_message = getattr(orig_exception, "name", str(orig_exception)) 161 | return { 162 | "exc_info": traceback_txt, 163 | "exc_name": exception_name, 164 | "exc_message": exc_message, 165 | } 166 | 167 | # flake8: noqa: C901 168 | @http.route("/queue_job/create_test_job", type="http", auth="user") 169 | def create_test_job( 170 | self, 171 | priority=None, 172 | max_retries=None, 173 | channel=None, 174 | description="Test job", 175 | size=1, 176 | failure_rate=0, 177 | ): 178 | if not http.request.env.user.has_group("base.group_erp_manager"): 179 | raise Forbidden(_("Access Denied")) 180 | 181 | if failure_rate is not None: 182 | try: 183 | failure_rate = float(failure_rate) 184 | except (ValueError, TypeError): 185 | failure_rate = 0 186 | 187 | if not (0 <= failure_rate <= 1): 188 | raise BadRequest("failure_rate must be between 0 and 1") 189 | 190 | if size is not None: 191 | try: 192 | size = int(size) 193 | except (ValueError, TypeError): 194 | size = 1 195 | 196 | if priority is not None: 197 | try: 198 | priority = int(priority) 199 | except ValueError: 200 | priority = None 201 | 202 | if max_retries is not None: 203 | try: 204 | max_retries = int(max_retries) 205 | except ValueError: 206 | max_retries = None 207 | 208 | if size == 1: 209 | return self._create_single_test_job( 210 | priority=priority, 211 | max_retries=max_retries, 212 | channel=channel, 213 | description=description, 214 | failure_rate=failure_rate, 215 | ) 216 | 217 | if size > 1: 218 | return self._create_graph_test_jobs( 219 | size, 220 | priority=priority, 221 | max_retries=max_retries, 222 | channel=channel, 223 | description=description, 224 | failure_rate=failure_rate, 225 | ) 226 | return "" 227 | 228 | def _create_single_test_job( 229 | self, 230 | priority=None, 231 | max_retries=None, 232 | channel=None, 233 | description="Test job", 234 | size=1, 235 | failure_rate=0, 236 | ): 237 | delayed = ( 238 | http.request.env["queue.job"] 239 | .with_delay( 240 | priority=priority, 241 | max_retries=max_retries, 242 | channel=channel, 243 | description=description, 244 | ) 245 | ._test_job(failure_rate=failure_rate) 246 | ) 247 | return f"job uuid: {delayed.db_record().uuid}" 248 | 249 | TEST_GRAPH_MAX_PER_GROUP = 5 250 | 251 | def _create_graph_test_jobs( 252 | self, 253 | size, 254 | priority=None, 255 | max_retries=None, 256 | channel=None, 257 | description="Test job", 258 | failure_rate=0, 259 | ): 260 | model = http.request.env["queue.job"] 261 | current_count = 0 262 | 263 | possible_grouping_methods = (chain, group) 264 | 265 | tails = [] # we can connect new graph chains/groups to tails 266 | root_delayable = None 267 | while current_count < size: 268 | jobs_count = min( 269 | size - current_count, random.randint(1, self.TEST_GRAPH_MAX_PER_GROUP) 270 | ) 271 | 272 | jobs = [] 273 | for __ in range(jobs_count): 274 | current_count += 1 275 | jobs.append( 276 | model.delayable( 277 | priority=priority, 278 | max_retries=max_retries, 279 | channel=channel, 280 | description="%s #%d" % (description, current_count), 281 | )._test_job(failure_rate=failure_rate) 282 | ) 283 | 284 | grouping = random.choice(possible_grouping_methods) 285 | delayable = grouping(*jobs) 286 | if not root_delayable: 287 | root_delayable = delayable 288 | else: 289 | tail_delayable = random.choice(tails) 290 | tail_delayable.on_done(delayable) 291 | tails.append(delayable) 292 | 293 | root_delayable.delay() 294 | 295 | return "graph uuid: {}".format( 296 | list(root_delayable._head())[0]._generated_job.graph_uuid 297 | ) 298 | -------------------------------------------------------------------------------- /config/Mountrix-vdb.json: -------------------------------------------------------------------------------- 1 | {"vdbs":[ 2 | { 3 | "inProduction": false, 4 | "acCaseSensitiveFields": false, 5 | "acCaseSensitiveTables": false, 6 | "accessType": "postgres", 7 | "cacheEnabled": true, 8 | "pciHipaaCache": false, 9 | "cacheSize": 5.0E7, 10 | "cacheSoftValues": true, 11 | "cacheMaxExpiry": 10.0, 12 | "cacheObjectCount": 50000.0, 13 | "cacheObjectSize": 4000000.0, 14 | "preCacheTracking": true, 15 | "cacheSizeRate": 1.0, 16 | "cacheType": "redis", 17 | "clusterCacheManager": false, 18 | "customKey": false, 19 | "debugMode": false, 20 | "gridPassword": "eiSa7so4Oochae4B", 21 | "gridPort": 6379.0, 22 | "databaseNumber": 0.0, 23 | "gridServer": "redis", 24 | "id": 1.714263662452E12, 25 | "jdbcClass": "com.heimdalldata.HeimdallDriver", 26 | "jdbcUrl": "jdbc:heimdall://\u003cHeimdall Server IP:port\u003e/Mountrix-vdb?hduser\u003d\u003cuser\u003e\u0026hdpassword\u003d\u003cpassword\u003e", 27 | "excludeKeyCatalog": false, 28 | "excludeKeySchema": false, 29 | "excludeKeySqlComments": false, 30 | "excludeKeyUser": false, 31 | "excludeKeyVdb": false, 32 | "localCacheEnable": true, 33 | "localhostOnly": false, 34 | "logConnections": false, 35 | "loggingEnabled": true, 36 | "logMethods": false, 37 | "liteDebug": false, 38 | "logResultSetMethods": false, 39 | "logSql": false, 40 | "mgmtProxyEnable": false, 41 | "runAsService": true, 42 | "tlsEnable": true, 43 | "tlsRequired": false, 44 | "tlsLegacy": false, 45 | "kerberosAuthEnabled": false, 46 | "ldapAuthEnabled": false, 47 | "delayedTransaction": true, 48 | "paranoia": false, 49 | "multiplex": true, 50 | "multiplexTimeout": 0.0, 51 | "healthCheckTokenAuthorizationEnabled": false, 52 | "spModeEnabled": false, 53 | "proxyBalanceMode": "load", 54 | "redirectBy": "redirectPrivateIp", 55 | "awsAZRedirect": false, 56 | "customLdapConfig": false, 57 | "synchAuthentication": false, 58 | "tokenizedAuthentication": false, 59 | "proxyauthEnabled": false, 60 | "authMode": "passthrough", 61 | "proxyAddress": "0.0.0.0", 62 | "proxyPort": "5432", 63 | "xmx": 600.0, 64 | "revalidateCache": false, 65 | "shipConsoleLogs": true, 66 | "writeLogsToFile": true, 67 | "logAuthentications": false, 68 | "sqlDrivenAuthEnabled": false, 69 | "authorizationQuery": "select * from heimdall.pg_hba where enabled \u003d true order by line_number asc", 70 | "certificateAlias": "global_use_certificate", 71 | "certificate": "-----BEGIN CERTIFICATE-----\nMIICpzCCAY+gAwIBAgIJANVlc3oXuP+3MA0GCSqGSIb3DQEBCwUAMBMxETAPBgNVBAMMCEhlaW1kYWxsMB4XDTI0MDQyNzAwMTc1MVoXDTM0MDQyNzAwMTc1MVowEzERMA8GA1UEAwwISGVpbWRhbGwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtRkTM1d0IhY1qM0whHbNbPWzhxSBNd3O8e9unvhQtPSInweQBn6t+vEXA3DNKVdSEIpQr5Pe3vmQ5r6m0VNyDKG8n1cb8DAQnoYd22hF0h216JbT2t4fdP1r5o73Yni9zIHEJ06jIrHHvymyn6dfRUunT5kcnP8xm43qDmFHp+DolpAgcTWwMurz3d5fl/ZmQkFWjHhe4etuWpP7xs9dBNrDYaW+zLcxt/AsQ6ZRsc+B8WIKNxpptMQARJXS48vc5nIQOFNuuvcs+c6k0tw6UZEtgc76p37sjfp+6BhJIM73GOFywZkYf7JHlqU+pUfcyPD5CqTC2v+H+dZvsFKrbAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAJ8Vr/bo+iWvb0+DwfCBLa25A6smqcjJZ6KZGk1FE0SAPx8OpAumma30KTa8jg7pRccmkeAOuTs7gfOGgX0eQvflfap4RdZJJVwRUmdLmLnqEag32Rkw6A87iOTbSLdeUSa1TmRyrKmYrFVmYuVfGW5Kj/+iCIkB5/Twijvoeues/9ttpg1+By+leuQPSLERARY0U/oY1nGedjn+45h+HsJuQqgq7tfycw1KOZESUybOiVnfsnciRTaaR6i7pfuDbLQhw4G9jnBkZqE3fQ+DXw7aCIw/phfNv45nu1Y3wE8jj2GSc3IOm1akAMInNtU7CL5SCILCymwh6NupwHVOG5M\u003d\n-----END CERTIFICATE-----", 72 | "privateKey": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCtRkTM1d0IhY1qM0whHbNbPWzhxSBNd3O8e9unvhQtPSInweQBn6t+vEXA3DNKVdSEIpQr5Pe3vmQ5r6m0VNyDKG8n1cb8DAQnoYd22hF0h216JbT2t4fdP1r5o73Yni9zIHEJ06jIrHHvymyn6dfRUunT5kcnP8xm43qDmFHp+DolpAgcTWwMurz3d5fl/ZmQkFWjHhe4etuWpP7xs9dBNrDYaW+zLcxt/AsQ6ZRsc+B8WIKNxpptMQARJXS48vc5nIQOFNuuvcs+c6k0tw6UZEtgc76p37sjfp+6BhJIM73GOFywZkYf7JHlqU+pUfcyPD5CqTC2v+H+dZvsFKrbAgMBAAECggEASC0kDXrsNleDP4CcrpKzmkbs6uJ1AS/HyvSA28+mke2MI6l1M2PySLleCObwin9UJbJNrDcVJLmvAAR3GUHcFNor6xj2aRkMRgHX7p8JdFPRO3cneTmb44I3gBM/ylKiCz+xNlmvTd/fGfZk6lDS8EOsWPLsIqJFn3oUF2rEfEOVIel/ZDgT7CyrVKYbjAnBIYJYgD/w0bl88JgDJvSlSBq0wFzCxokqs0C6RBAY0zgfu0yFVFhxxNDs+HHOUZiOobifqQJAbICNL1nmYWRwF7wdBIXOnnrFlsbgfaSN0WQCo1h/w1QH3QTdGDXh64qBbIE/KW5VcYLe/WjH/fnQ+QKBgQDel6OaPu8bo9Uxd9fllAU4RosnOmj6ELVCxvBSu/asgA1FzamRIe/O4WiW2NuxDLl8Nh+qUK3G+YwQXV4A7oY2/nFuJyb8v0LOXlebxgKksmXa60OoNHUbSeCy2Y/E7DrdciYrBbeS4gRRgFzLUXUtQJxXnfRHfIeoA7+gyTqLdwKBgQDHR8HePVNI4Iy5gJEczUYeeJXIxyxyEQrTZIUVZi0iO4yk64uY4R8R9dX+KBJEwff5nldfUyCNVS3hQdozTmzvsrZDyk85sctpzpKWmKl3IbtTbZMpPUdAV9XhDl1+eG9Vktf6BX9Sk6PeaHpmhxpc2o8IoYXkjKfL/u7VdYbsvQKBgHxSvbHdDLenXd/vS8qSTggMfpQ+b3ub8WGGs5vCGBhovQELyXWiOsqV41C9dpcAwGi74hvCTA0iPDS3sVVb4+K7YqdffssUGEAjOVNr2PYcHf+T/4nnIrFHJhU+pqoaNxaQNvk4jDuhuT+8LhfxOz03JRtawaFb0Hjq/xDS07wpAoGAdvWdA1+0YeK53tqixYC8hOAS+gH7xDPsNHjjAP03oCHQOXrqoF5yv7PzBH9DOIxZzUb6fUsQOY5JjxhSeyIr4YLbTuhsFtLNzOGlnASR1S3rt0fH+2w7JmvIn0qGAUM/ouO/zd6OPOspfVD9EEtCL3/AtrRFS0OzvxD13bME0jkCgYEAjFcHqvnHPokQDIXuMzkKbsm894iGyHi3Fsk4GJszJrr3s8uS6Tp7vy7yFREWPF8OTlHN0Xk7H5p7qYANPz/6QbHgyIcbmMJWk9pvSAVR0Mf40QwvJEqIwyXA7k/4k6A7T17HgmCRIA5NAWhZ9gAA5x4i8RwfJuQJtWPF9wKTEys\u003d\n-----END PRIVATE KEY-----", 73 | "privateKeyAlgorithm": "RSA", 74 | "accessKey": "EmggBc1eJfmjkAhf", 75 | "secretKey": "m80IdoASKwKu51RS", 76 | "useSsl": false, 77 | "verifyPeer": false, 78 | "jmxHostname": "${hostname}", 79 | "jmxPort": 0.0, 80 | "dnsPort": 0.0, 81 | "userCaseSensitive": false, 82 | "enabled": true, 83 | "file": "config/Mountrix-vdb_1.conf", 84 | "name": "Mountrix-vdb", 85 | "version": 1.0, 86 | "dataSources": [ 87 | { 88 | "enabled": true, 89 | "file": "config/Mountrix-source_1.conf", 90 | "name": "Mountrix-source", 91 | "version": 1 92 | } 93 | ], 94 | "rules": [ 95 | { 96 | "enabled": true, 97 | "file": "config/Mountrix-rules_1.conf", 98 | "name": "Mountrix-rules", 99 | "version": 1 100 | } 101 | ], 102 | "properties": [ 103 | { 104 | "key": "nativePrepareHandleId", 105 | "value": "true" 106 | } 107 | ], 108 | "users": [ 109 | { 110 | "user": "odoo", 111 | "password": "odoo" 112 | } 113 | ], 114 | "pinnedClients": [], 115 | "modules": [ 116 | "modules/heimdallloadbalancer-1.0-23.11.06.1.jar", 117 | "modules/heimdallextractplan-1.0-23.11.06.1.jar", 118 | "modules/heimdallfirewall-1.0-23.11.06.1.jar", 119 | "modules/heimdallforward-1.0-23.11.06.1.jar", 120 | "modules/heimdallasync-1.0-23.11.06.1.jar", 121 | "modules/heimdallredis-6.2.5-23.11.06.1.jar", 122 | "modules/heimdallnotification-23.11.06.1.jar", 123 | "modules/heimdalltrigger-1.0-23.11.06.1.jar", 124 | "modules/heimdallhazelcast-4.2.8-23.11.06.1.jar", 125 | "modules/heimdallredshift-1.0-23.11.06.1.jar", 126 | "modules/heimdallpostgres-1.0-23.11.06.1.jar", 127 | "modules/heimdallmysql-1.5-23.11.06.1.jar", 128 | "modules/heimdallpgsqlprotocol-1.0-23.11.06.1.jar", 129 | "modules/heimdallsqlserver-1.0-23.11.06.1.jar" 130 | ], 131 | "cloudServices": { 132 | "cloudLogging": false, 133 | "cloudMetrics": false 134 | } 135 | }] 136 | ,"sources":[ 137 | { 138 | "url": "jdbc:postgresql://db-master:5432/${catalog}", 139 | "driverConfig": { 140 | "enabled": true, 141 | "file": "config/PostgreSQL_1.conf", 142 | "name": "PostgreSQL", 143 | "version": 1 144 | }, 145 | "driverClass": "org.postgresql.Driver", 146 | "driverUrl": "/drivers/PostgreSQL/postgresql-42.4.1-streaming.jar", 147 | "dataSourceClass": "org.postgresql.ds.PGSimpleDataSource", 148 | "properties": { 149 | "sslmode": "disable", 150 | "password": "odoo", 151 | "testQuery": "SELECT 1", 152 | "extraFloatDigits": "false", 153 | "defaultCatalog": "postgres", 154 | "user": "odoo", 155 | "preferQueryMode": "extended", 156 | "url": "jdbc:postgresql://db-master:5432/${catalog}" 157 | }, 158 | "usepool": true, 159 | "poolProperties": {}, 160 | "servers": [ 161 | { 162 | "name": "Primary", 163 | "url": "jdbc:postgresql://db-master:5432/${catalog}", 164 | "enabled": true, 165 | "writeable": false, 166 | "weight": 1, 167 | "readWeight": 0, 168 | "writeWeight": 0, 169 | "active": true, 170 | "failed": false 171 | }, 172 | { 173 | "name": "Replica1", 174 | "url": "jdbc:postgresql://db-replica:5432/${catalog}", 175 | "enabled": true, 176 | "writeable": true, 177 | "weight": 0, 178 | "readWeight": 0, 179 | "writeWeight": 0, 180 | "active": true, 181 | "failed": false 182 | } 183 | ], 184 | "useLoadBalancing": true, 185 | "useResponseMetrics": false, 186 | "trackClusterChanges": false, 187 | "useAwsSecret": false, 188 | "desiredWriteCapacity": 1, 189 | "desiredReadCapacity": 10, 190 | "trackReplicationLags": false, 191 | "timeWindow": 10000, 192 | "alertThreshold": 0, 193 | "responseMetricMultiplier": 2, 194 | "holdTime": 30000, 195 | "obscure": { 196 | "secretKey": "Odrv3jRI9sliFSZUFXwbynSf68Llbi" 197 | }, 198 | "lastDatabaseLookup": 0, 199 | "enabled": true, 200 | "file": "config/Mountrix-source_1.conf", 201 | "name": "Mountrix-source", 202 | "version": 1 203 | }] 204 | ,"drivers":[ 205 | { 206 | "driverVersion": "42.4.1-streaming", 207 | "exampleUrl": "jdbc:postgresql:[\u003c//host\u003e[:\u003c5432\u003e/]]\u003cdatabase\u003e", 208 | "writerUrl": "jdbc:postgresql://${writer}/${database}", 209 | "readerUrl": "jdbc:postgresql://${replica}/${database}", 210 | "websiteUrl": "http://jdbc.postgresql.org", 211 | "notes": "JDBC 4.0, can also be used for PG compatible databases, such as Greenplum", 212 | "jdbcClass": "org.postgresql.Driver", 213 | "dataSourceClass": "org.postgresql.ds.PGSimpleDataSource", 214 | "xaDataSourceClass": "org.postgresql.xa.PGXADataSource", 215 | "uploadedDrivers": [ 216 | "/drivers/PostgreSQL/postgresql-42.4.1-streaming.jar" 217 | ], 218 | "autoUpdate": true, 219 | "enabled": true, 220 | "file": "config/PostgreSQL_1.conf", 221 | "name": "PostgreSQL", 222 | "version": 1 223 | }] 224 | ,"rules":[ 225 | { 226 | "rules": [ 227 | { 228 | "enabled": true, 229 | "type": "L", 230 | "patterns": [ 231 | "" 232 | ], 233 | "rowPatterns": [ 234 | "" 235 | ], 236 | "operator": "AND", 237 | "columnNameOperator": "AND", 238 | "intrans": true, 239 | "properties": {} 240 | }, 241 | { 242 | "enabled": true, 243 | "type": "C", 244 | "patterns": [ 245 | "" 246 | ], 247 | "rowPatterns": [ 248 | "" 249 | ], 250 | "operator": "AND", 251 | "columnNameOperator": "AND", 252 | "intrans": false, 253 | "properties": { 254 | "ttl": "5*60000" 255 | } 256 | }, 257 | { 258 | "enabled": true, 259 | "type": "N", 260 | "patterns": [ 261 | "SET TRANSACTION ISOLATION LEVEL REPEATABLE READ" 262 | ], 263 | "rowPatterns": [ 264 | "" 265 | ], 266 | "operator": "AND", 267 | "columnNameOperator": "AND", 268 | "intrans": false, 269 | "properties": { 270 | "responseInt": "1" 271 | } 272 | } 273 | ], 274 | "responseRules": [], 275 | "enabled": true, 276 | "file": "config/Mountrix-rules_1.conf", 277 | "name": "Mountrix-rules", 278 | "version": 1 279 | }]} -------------------------------------------------------------------------------- /addons_external/queue_job/tests/test_delayable.py: -------------------------------------------------------------------------------- 1 | # copyright 2019 Camptocamp 2 | # license agpl-3.0 or later (http://www.gnu.org/licenses/agpl.html) 3 | 4 | import unittest 5 | from unittest import mock 6 | 7 | from odoo.addons.queue_job.delay import Delayable, DelayableGraph 8 | 9 | 10 | class TestDelayable(unittest.TestCase): 11 | def setUp(self): 12 | super().setUp() 13 | self.recordset = mock.MagicMock(name="recordset") 14 | 15 | def test_delayable_set(self): 16 | dl = Delayable(self.recordset) 17 | dl.set(priority=15) 18 | self.assertEqual(dl.priority, 15) 19 | dl.set({"priority": 20, "description": "test"}) 20 | self.assertEqual(dl.priority, 20) 21 | self.assertEqual(dl.description, "test") 22 | 23 | def test_delayable_set_unknown(self): 24 | dl = Delayable(self.recordset) 25 | with self.assertRaises(ValueError): 26 | dl.set(foo=15) 27 | 28 | def test_graph_add_vertex_edge(self): 29 | graph = DelayableGraph() 30 | graph.add_vertex("a") 31 | self.assertEqual(graph._graph, {"a": set()}) 32 | graph.add_edge("a", "b") 33 | self.assertEqual(graph._graph, {"a": {"b"}, "b": set()}) 34 | graph.add_edge("b", "c") 35 | self.assertEqual(graph._graph, {"a": {"b"}, "b": {"c"}, "c": set()}) 36 | 37 | def test_graph_vertices(self): 38 | graph = DelayableGraph({"a": {"b"}, "b": {"c"}, "c": set()}) 39 | self.assertEqual(graph.vertices(), {"a", "b", "c"}) 40 | 41 | def test_graph_edges(self): 42 | graph = DelayableGraph( 43 | {"a": {"b"}, "b": {"c", "d"}, "c": {"e"}, "d": set(), "e": set()} 44 | ) 45 | self.assertEqual( 46 | sorted(graph.edges()), 47 | sorted( 48 | [ 49 | ("a", "b"), 50 | ("b", "c"), 51 | ("b", "d"), 52 | ("c", "e"), 53 | ] 54 | ), 55 | ) 56 | 57 | def test_graph_connect(self): 58 | node_tail = Delayable(self.recordset) 59 | node_tail2 = Delayable(self.recordset) 60 | node_middle = Delayable(self.recordset) 61 | node_top = Delayable(self.recordset) 62 | node_middle.on_done(node_tail) 63 | node_middle.on_done(node_tail2) 64 | node_top.on_done(node_middle) 65 | collected = node_top._graph._connect_graphs() 66 | self.assertEqual( 67 | collected._graph, 68 | { 69 | node_tail: set(), 70 | node_tail2: set(), 71 | node_middle: {node_tail, node_tail2}, 72 | node_top: {node_middle}, 73 | }, 74 | ) 75 | 76 | def test_graph_paths(self): 77 | graph = DelayableGraph( 78 | {"a": {"b"}, "b": {"c", "d"}, "c": {"e"}, "d": set(), "e": set()} 79 | ) 80 | paths = list(graph.paths("a")) 81 | self.assertEqual(sorted(paths), sorted([["a", "b", "d"], ["a", "b", "c", "e"]])) 82 | paths = list(graph.paths("b")) 83 | self.assertEqual(sorted(paths), sorted([["b", "d"], ["b", "c", "e"]])) 84 | paths = list(graph.paths("c")) 85 | self.assertEqual(paths, [["c", "e"]]) 86 | paths = list(graph.paths("d")) 87 | self.assertEqual(paths, [["d"]]) 88 | paths = list(graph.paths("e")) 89 | self.assertEqual(paths, [["e"]]) 90 | 91 | def test_graph_repr(self): 92 | graph = DelayableGraph( 93 | {"a": {"b"}, "b": {"c", "d"}, "c": {"e"}, "d": set(), "e": set()} 94 | ) 95 | actual = repr(graph) 96 | expected = ["'a' → 'b' → 'c' → 'e'", "'a' → 'b' → 'd'"] 97 | self.assertEqual(sorted(actual.split("\n")), expected) 98 | 99 | def test_graph_topological_sort(self): 100 | # the graph is an example from 101 | # https://en.wikipedia.org/wiki/Topological_sorting 102 | # if you want a visual representation 103 | graph = DelayableGraph( 104 | { 105 | 5: {11}, 106 | 7: {11, 8}, 107 | 3: {8, 10}, 108 | 11: {2, 9, 10}, 109 | 2: set(), 110 | 8: {9}, 111 | 9: set(), 112 | 10: set(), 113 | } 114 | ) 115 | 116 | # these are all the pre-computed combinations that 117 | # respect the dependencies order 118 | valid_solutions = [ 119 | [3, 5, 7, 8, 11, 2, 9, 10], 120 | [3, 5, 7, 8, 11, 2, 10, 9], 121 | [3, 5, 7, 8, 11, 9, 2, 10], 122 | [3, 5, 7, 8, 11, 9, 10, 2], 123 | [3, 5, 7, 8, 11, 10, 2, 9], 124 | [3, 5, 7, 8, 11, 10, 9, 2], 125 | [3, 5, 7, 11, 2, 8, 9, 10], 126 | [3, 5, 7, 11, 2, 8, 10, 9], 127 | [3, 5, 7, 11, 2, 10, 8, 9], 128 | [3, 5, 7, 11, 8, 2, 9, 10], 129 | [3, 5, 7, 11, 8, 2, 10, 9], 130 | [3, 5, 7, 11, 8, 9, 2, 10], 131 | [3, 5, 7, 11, 8, 9, 10, 2], 132 | [3, 5, 7, 11, 8, 10, 2, 9], 133 | [3, 5, 7, 11, 8, 10, 9, 2], 134 | [3, 5, 7, 11, 10, 2, 8, 9], 135 | [3, 5, 7, 11, 10, 8, 2, 9], 136 | [3, 5, 7, 11, 10, 8, 9, 2], 137 | [3, 7, 5, 8, 11, 2, 9, 10], 138 | [3, 7, 5, 8, 11, 2, 10, 9], 139 | [3, 7, 5, 8, 11, 9, 2, 10], 140 | [3, 7, 5, 8, 11, 9, 10, 2], 141 | [3, 7, 5, 8, 11, 10, 2, 9], 142 | [3, 7, 5, 8, 11, 10, 9, 2], 143 | [3, 7, 5, 11, 2, 8, 9, 10], 144 | [3, 7, 5, 11, 2, 8, 10, 9], 145 | [3, 7, 5, 11, 2, 10, 8, 9], 146 | [3, 7, 5, 11, 8, 2, 9, 10], 147 | [3, 7, 5, 11, 8, 2, 10, 9], 148 | [3, 7, 5, 11, 8, 9, 2, 10], 149 | [3, 7, 5, 11, 8, 9, 10, 2], 150 | [3, 7, 5, 11, 8, 10, 2, 9], 151 | [3, 7, 5, 11, 8, 10, 9, 2], 152 | [3, 7, 5, 11, 10, 2, 8, 9], 153 | [3, 7, 5, 11, 10, 8, 2, 9], 154 | [3, 7, 5, 11, 10, 8, 9, 2], 155 | [3, 7, 8, 5, 11, 2, 9, 10], 156 | [3, 7, 8, 5, 11, 2, 10, 9], 157 | [3, 7, 8, 5, 11, 9, 2, 10], 158 | [3, 7, 8, 5, 11, 9, 10, 2], 159 | [3, 7, 8, 5, 11, 10, 2, 9], 160 | [3, 7, 8, 5, 11, 10, 9, 2], 161 | [5, 3, 7, 8, 11, 2, 9, 10], 162 | [5, 3, 7, 8, 11, 2, 10, 9], 163 | [5, 3, 7, 8, 11, 9, 2, 10], 164 | [5, 3, 7, 8, 11, 9, 10, 2], 165 | [5, 3, 7, 8, 11, 10, 2, 9], 166 | [5, 3, 7, 8, 11, 10, 9, 2], 167 | [5, 3, 7, 11, 2, 8, 9, 10], 168 | [5, 3, 7, 11, 2, 8, 10, 9], 169 | [5, 3, 7, 11, 2, 10, 8, 9], 170 | [5, 3, 7, 11, 8, 2, 9, 10], 171 | [5, 3, 7, 11, 8, 2, 10, 9], 172 | [5, 3, 7, 11, 8, 9, 2, 10], 173 | [5, 3, 7, 11, 8, 9, 10, 2], 174 | [5, 3, 7, 11, 8, 10, 2, 9], 175 | [5, 3, 7, 11, 8, 10, 9, 2], 176 | [5, 3, 7, 11, 10, 2, 8, 9], 177 | [5, 3, 7, 11, 10, 8, 2, 9], 178 | [5, 3, 7, 11, 10, 8, 9, 2], 179 | [5, 7, 3, 8, 11, 2, 9, 10], 180 | [5, 7, 3, 8, 11, 2, 10, 9], 181 | [5, 7, 3, 8, 11, 9, 2, 10], 182 | [5, 7, 3, 8, 11, 9, 10, 2], 183 | [5, 7, 3, 8, 11, 10, 2, 9], 184 | [5, 7, 3, 8, 11, 10, 9, 2], 185 | [5, 7, 3, 11, 2, 8, 9, 10], 186 | [5, 7, 3, 11, 2, 8, 10, 9], 187 | [5, 7, 3, 11, 2, 10, 8, 9], 188 | [5, 7, 3, 11, 8, 2, 9, 10], 189 | [5, 7, 3, 11, 8, 2, 10, 9], 190 | [5, 7, 3, 11, 8, 9, 2, 10], 191 | [5, 7, 3, 11, 8, 9, 10, 2], 192 | [5, 7, 3, 11, 8, 10, 2, 9], 193 | [5, 7, 3, 11, 8, 10, 9, 2], 194 | [5, 7, 3, 11, 10, 2, 8, 9], 195 | [5, 7, 3, 11, 10, 8, 2, 9], 196 | [5, 7, 3, 11, 10, 8, 9, 2], 197 | [5, 7, 11, 2, 3, 8, 9, 10], 198 | [5, 7, 11, 2, 3, 8, 10, 9], 199 | [5, 7, 11, 2, 3, 10, 8, 9], 200 | [5, 7, 11, 3, 2, 8, 9, 10], 201 | [5, 7, 11, 3, 2, 8, 10, 9], 202 | [5, 7, 11, 3, 2, 10, 8, 9], 203 | [5, 7, 11, 3, 8, 2, 9, 10], 204 | [5, 7, 11, 3, 8, 2, 10, 9], 205 | [5, 7, 11, 3, 8, 9, 2, 10], 206 | [5, 7, 11, 3, 8, 9, 10, 2], 207 | [5, 7, 11, 3, 8, 10, 2, 9], 208 | [5, 7, 11, 3, 8, 10, 9, 2], 209 | [5, 7, 11, 3, 10, 2, 8, 9], 210 | [5, 7, 11, 3, 10, 8, 2, 9], 211 | [5, 7, 11, 3, 10, 8, 9, 2], 212 | [7, 3, 5, 8, 11, 2, 9, 10], 213 | [7, 3, 5, 8, 11, 2, 10, 9], 214 | [7, 3, 5, 8, 11, 9, 2, 10], 215 | [7, 3, 5, 8, 11, 9, 10, 2], 216 | [7, 3, 5, 8, 11, 10, 2, 9], 217 | [7, 3, 5, 8, 11, 10, 9, 2], 218 | [7, 3, 5, 11, 2, 8, 9, 10], 219 | [7, 3, 5, 11, 2, 8, 10, 9], 220 | [7, 3, 5, 11, 2, 10, 8, 9], 221 | [7, 3, 5, 11, 8, 2, 9, 10], 222 | [7, 3, 5, 11, 8, 2, 10, 9], 223 | [7, 3, 5, 11, 8, 9, 2, 10], 224 | [7, 3, 5, 11, 8, 9, 10, 2], 225 | [7, 3, 5, 11, 8, 10, 2, 9], 226 | [7, 3, 5, 11, 8, 10, 9, 2], 227 | [7, 3, 5, 11, 10, 2, 8, 9], 228 | [7, 3, 5, 11, 10, 8, 2, 9], 229 | [7, 3, 5, 11, 10, 8, 9, 2], 230 | [7, 3, 8, 5, 11, 2, 9, 10], 231 | [7, 3, 8, 5, 11, 2, 10, 9], 232 | [7, 3, 8, 5, 11, 9, 2, 10], 233 | [7, 3, 8, 5, 11, 9, 10, 2], 234 | [7, 3, 8, 5, 11, 10, 2, 9], 235 | [7, 3, 8, 5, 11, 10, 9, 2], 236 | [7, 5, 3, 8, 11, 2, 9, 10], 237 | [7, 5, 3, 8, 11, 2, 10, 9], 238 | [7, 5, 3, 8, 11, 9, 2, 10], 239 | [7, 5, 3, 8, 11, 9, 10, 2], 240 | [7, 5, 3, 8, 11, 10, 2, 9], 241 | [7, 5, 3, 8, 11, 10, 9, 2], 242 | [7, 5, 3, 11, 2, 8, 9, 10], 243 | [7, 5, 3, 11, 2, 8, 10, 9], 244 | [7, 5, 3, 11, 2, 10, 8, 9], 245 | [7, 5, 3, 11, 8, 2, 9, 10], 246 | [7, 5, 3, 11, 8, 2, 10, 9], 247 | [7, 5, 3, 11, 8, 9, 2, 10], 248 | [7, 5, 3, 11, 8, 9, 10, 2], 249 | [7, 5, 3, 11, 8, 10, 2, 9], 250 | [7, 5, 3, 11, 8, 10, 9, 2], 251 | [7, 5, 3, 11, 10, 2, 8, 9], 252 | [7, 5, 3, 11, 10, 8, 2, 9], 253 | [7, 5, 3, 11, 10, 8, 9, 2], 254 | [7, 5, 11, 2, 3, 8, 9, 10], 255 | [7, 5, 11, 2, 3, 8, 10, 9], 256 | [7, 5, 11, 2, 3, 10, 8, 9], 257 | [7, 5, 11, 3, 2, 8, 9, 10], 258 | [7, 5, 11, 3, 2, 8, 10, 9], 259 | [7, 5, 11, 3, 2, 10, 8, 9], 260 | [7, 5, 11, 3, 8, 2, 9, 10], 261 | [7, 5, 11, 3, 8, 2, 10, 9], 262 | [7, 5, 11, 3, 8, 9, 2, 10], 263 | [7, 5, 11, 3, 8, 9, 10, 2], 264 | [7, 5, 11, 3, 8, 10, 2, 9], 265 | [7, 5, 11, 3, 8, 10, 9, 2], 266 | [7, 5, 11, 3, 10, 2, 8, 9], 267 | [7, 5, 11, 3, 10, 8, 2, 9], 268 | [7, 5, 11, 3, 10, 8, 9, 2], 269 | ] 270 | 271 | self.assertIn(list(graph.topological_sort()), valid_solutions) 272 | -------------------------------------------------------------------------------- /addons_external/queue_job/models/base.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Camptocamp 2 | # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) 3 | 4 | import functools 5 | 6 | from odoo import api, models 7 | 8 | from ..delay import Delayable 9 | from ..job import DelayableRecordset 10 | from ..utils import must_run_without_delay 11 | 12 | 13 | class Base(models.AbstractModel): 14 | """The base model, which is implicitly inherited by all models. 15 | 16 | A new :meth:`~with_delay` method is added on all Odoo Models, allowing to 17 | postpone the execution of a job method in an asynchronous process. 18 | """ 19 | 20 | _inherit = "base" 21 | 22 | def with_delay( 23 | self, 24 | priority=None, 25 | eta=None, 26 | max_retries=None, 27 | description=None, 28 | channel=None, 29 | identity_key=None, 30 | ): 31 | """Return a ``DelayableRecordset`` 32 | 33 | It is a shortcut for the longer form as shown below:: 34 | 35 | self.with_delay(priority=20).action_done() 36 | # is equivalent to: 37 | self.delayable().set(priority=20).action_done().delay() 38 | 39 | ``with_delay()`` accepts job properties which specify how the job will 40 | be executed. 41 | 42 | Usage with job properties:: 43 | 44 | env['a.model'].with_delay(priority=30, eta=60*60*5).action_done() 45 | delayable.export_one_thing(the_thing_to_export) 46 | # => the job will be executed with a low priority and not before a 47 | # delay of 5 hours from now 48 | 49 | When using :meth:``with_delay``, the final ``delay()`` is implicit. 50 | See the documentation of :meth:``delayable`` for more details. 51 | 52 | :return: instance of a DelayableRecordset 53 | :rtype: :class:`odoo.addons.queue_job.job.DelayableRecordset` 54 | """ 55 | return DelayableRecordset( 56 | self, 57 | priority=priority, 58 | eta=eta, 59 | max_retries=max_retries, 60 | description=description, 61 | channel=channel, 62 | identity_key=identity_key, 63 | ) 64 | 65 | def delayable( 66 | self, 67 | priority=None, 68 | eta=None, 69 | max_retries=None, 70 | description=None, 71 | channel=None, 72 | identity_key=None, 73 | ): 74 | """Return a ``Delayable`` 75 | 76 | The returned instance allows to enqueue any method of the recordset's 77 | Model. 78 | 79 | Usage:: 80 | 81 | delayable = self.env["res.users"].browse(10).delayable(priority=20) 82 | delayable.do_work(name="test"}).delay() 83 | 84 | In this example, the ``do_work`` method will not be executed directly. 85 | It will be executed in an asynchronous job. 86 | 87 | Method calls on a Delayable generally return themselves, so calls can 88 | be chained together:: 89 | 90 | delayable.set(priority=15).do_work(name="test"}).delay() 91 | 92 | The order of the calls that build the job is not relevant, beside 93 | the call to ``delay()`` that must happen at the very end. This is 94 | equivalent to the example above:: 95 | 96 | delayable.do_work(name="test"}).set(priority=15).delay() 97 | 98 | Very importantly, ``delay()`` must be called on the top-most parent 99 | of a chain of jobs, so if you have this:: 100 | 101 | job1 = record1.delayable().do_work() 102 | job2 = record2.delayable().do_work() 103 | job1.on_done(job2) 104 | 105 | The ``delay()`` call must be made on ``job1``, otherwise ``job2`` will 106 | be delayed, but ``job1`` will never be. When done on ``job1``, the 107 | ``delay()`` call will traverse the graph of jobs and delay all of 108 | them:: 109 | 110 | job1.delay() 111 | 112 | For more details on the graph dependencies, read the documentation of 113 | :module:`~odoo.addons.queue_job.delay`. 114 | 115 | :param priority: Priority of the job, 0 being the higher priority. 116 | Default is 10. 117 | :param eta: Estimated Time of Arrival of the job. It will not be 118 | executed before this date/time. 119 | :param max_retries: maximum number of retries before giving up and set 120 | the job state to 'failed'. A value of 0 means 121 | infinite retries. Default is 5. 122 | :param description: human description of the job. If None, description 123 | is computed from the function doc or name 124 | :param channel: the complete name of the channel to use to process 125 | the function. If specified it overrides the one 126 | defined on the function 127 | :param identity_key: key uniquely identifying the job, if specified 128 | and a job with the same key has not yet been run, 129 | the new job will not be added. It is either a 130 | string, either a function that takes the job as 131 | argument (see :py:func:`..job.identity_exact`). 132 | the new job will not be added. 133 | :return: instance of a Delayable 134 | :rtype: :class:`odoo.addons.queue_job.job.Delayable` 135 | """ 136 | return Delayable( 137 | self, 138 | priority=priority, 139 | eta=eta, 140 | max_retries=max_retries, 141 | description=description, 142 | channel=channel, 143 | identity_key=identity_key, 144 | ) 145 | 146 | def _patch_job_auto_delay(self, method_name, context_key=None): 147 | """Patch a method to be automatically delayed as job method when called 148 | 149 | This patch method has to be called in ``_register_hook`` (example 150 | below). 151 | 152 | When a method is patched, any call to the method will not directly 153 | execute the method's body, but will instead enqueue a job. 154 | 155 | When a ``context_key`` is set when calling ``_patch_job_auto_delay``, 156 | the patched method is automatically delayed only when this key is 157 | ``True`` in the caller's context. It is advised to patch the method 158 | with a ``context_key``, because making the automatic delay *in any 159 | case* can produce nasty and unexpected side effects (e.g. another 160 | module calls the method and expects it to be computed before doing 161 | something else, expecting a result, ...). 162 | 163 | A typical use case is when a method in a module we don't control is 164 | called synchronously in the middle of another method, and we'd like all 165 | the calls to this method become asynchronous. 166 | 167 | The options of the job usually passed to ``with_delay()`` (priority, 168 | description, identity_key, ...) can be returned in a dictionary by a 169 | method named after the name of the method suffixed by ``_job_options`` 170 | which takes the same parameters as the initial method. 171 | 172 | It is still possible to force synchronous execution of the method by 173 | setting a key ``_job_force_sync`` to True in the environment context. 174 | 175 | Example patching the "foo" method to be automatically delayed as job 176 | (the job options method is optional): 177 | 178 | .. code-block:: python 179 | 180 | # original method: 181 | def foo(self, arg1): 182 | print("hello", arg1) 183 | 184 | def large_method(self): 185 | # doing a lot of things 186 | self.foo("world) 187 | # doing a lot of other things 188 | 189 | def button_x(self): 190 | self.with_context(auto_delay_foo=True).large_method() 191 | 192 | # auto delay patch: 193 | def foo_job_options(self, arg1): 194 | return { 195 | "priority": 100, 196 | "description": "Saying hello to {}".format(arg1) 197 | } 198 | 199 | def _register_hook(self): 200 | self._patch_method( 201 | "foo", 202 | self._patch_job_auto_delay("foo", context_key="auto_delay_foo") 203 | ) 204 | return super()._register_hook() 205 | 206 | The result when ``button_x`` is called, is that a new job for ``foo`` 207 | is delayed. 208 | """ 209 | 210 | def auto_delay_wrapper(self, *args, **kwargs): 211 | # when no context_key is set, we delay in any case (warning, can be 212 | # dangerous) 213 | context_delay = self.env.context.get(context_key) if context_key else True 214 | if ( 215 | self.env.context.get("job_uuid") 216 | or not context_delay 217 | or must_run_without_delay(self.env) 218 | ): 219 | # we are in the job execution 220 | return auto_delay_wrapper.origin(self, *args, **kwargs) 221 | else: 222 | # replace the synchronous call by a job on itself 223 | method_name = auto_delay_wrapper.origin.__name__ 224 | job_options_method = getattr(self, f"{method_name}_job_options", None) 225 | job_options = {} 226 | if job_options_method: 227 | job_options.update(job_options_method(*args, **kwargs)) 228 | delayed = self.with_delay(**job_options) 229 | return getattr(delayed, method_name)(*args, **kwargs) 230 | 231 | origin = getattr(self, method_name) 232 | return functools.update_wrapper(auto_delay_wrapper, origin) 233 | 234 | @api.model 235 | def _job_store_values(self, job): 236 | """Hook for manipulating job stored values. 237 | 238 | You can define a more specific hook for a job function 239 | by defining a method name with this pattern: 240 | 241 | `_queue_job_store_values_${func_name}` 242 | 243 | NOTE: values will be stored only if they match stored fields on `queue.job`. 244 | 245 | :param job: current queue_job.job.Job instance. 246 | :return: dictionary for setting job values. 247 | """ 248 | return {} 249 | 250 | @api.model 251 | def _job_prepare_context_before_enqueue_keys(self): 252 | """Keys to keep in context of stored jobs 253 | Empty by default for backward compatibility. 254 | """ 255 | return ("tz", "lang", "allowed_company_ids", "force_company", "active_test") 256 | 257 | def _job_prepare_context_before_enqueue(self): 258 | """Return the context to store in the jobs 259 | Can be used to keep only safe keys. 260 | """ 261 | return { 262 | key: value 263 | for key, value in self.env.context.items() 264 | if key in self._job_prepare_context_before_enqueue_keys() 265 | } 266 | 267 | @classmethod 268 | def _patch_method(cls, name, method): 269 | origin = getattr(cls, name) 270 | method.origin = origin 271 | # propagate decorators from origin to method, and apply api decorator 272 | wrapped = api.propagate(origin, method) 273 | wrapped.origin = origin 274 | setattr(cls, name, wrapped) 275 | -------------------------------------------------------------------------------- /addons_external/queue_job/views/queue_job_views.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | queue.job.form 6 | queue.job 7 | 8 |
9 |
10 |
42 | 43 |
44 | 57 |
58 |

59 | 60 |

61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 |
90 |
98 |
99 | 100 | 105 | 110 | 111 | 112 | 118 |
119 |
122 | 123 |
124 |
125 | 130 | 135 | 136 |
137 |
138 |
139 | 140 | 141 | 142 |
143 |
144 |
145 |
146 | 147 | 148 | queue.job.tree 149 | queue.job 150 | 151 | 157 | 158 | 159 | 160 | 161 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | queue.job.pivot 180 | queue.job 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | queue.job.graph 192 | queue.job 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | queue.job.search 203 | queue.job 204 | 205 | 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 222 | 227 | 232 | 237 | 242 | 243 | 248 | 249 | 254 | 259 | 264 | 269 | 274 | 279 | 284 | 285 | 286 | 287 | 288 | 289 | 290 | Jobs 291 | queue.job 292 | tree,form,pivot,graph 293 | {'search_default_wait_dependencies': 1, 294 | 'search_default_pending': 1, 295 | 'search_default_enqueued': 1, 296 | 'search_default_started': 1, 297 | 'search_default_failed': 1} 298 | 299 | 300 | 301 | 302 |
303 | -------------------------------------------------------------------------------- /addons_external/queue_job/readme/USAGE.md: -------------------------------------------------------------------------------- 1 | To use this module, you need to: 2 | 3 | 1. Go to `Job Queue` menu 4 | 5 | ## Developers 6 | 7 | ### Delaying jobs 8 | 9 | The fast way to enqueue a job for a method is to use `with_delay()` on a 10 | record or model: 11 | 12 | ``` python 13 | def button_done(self): 14 | self.with_delay().print_confirmation_document(self.state) 15 | self.write({"state": "done"}) 16 | return True 17 | ``` 18 | 19 | Here, the method `print_confirmation_document()` will be executed 20 | asynchronously as a job. `with_delay()` can take several parameters to 21 | define more precisely how the job is executed (priority, ...). 22 | 23 | All the arguments passed to the method being delayed are stored in the 24 | job and passed to the method when it is executed asynchronously, 25 | including `self`, so the current record is maintained during the job 26 | execution (warning: the context is not kept). 27 | 28 | Dependencies can be expressed between jobs. To start a graph of jobs, 29 | use `delayable()` on a record or model. The following is the equivalent 30 | of `with_delay()` but using the long form: 31 | 32 | ``` python 33 | def button_done(self): 34 | delayable = self.delayable() 35 | delayable.print_confirmation_document(self.state) 36 | delayable.delay() 37 | self.write({"state": "done"}) 38 | return True 39 | ``` 40 | 41 | Methods of Delayable objects return itself, so it can be used as a 42 | builder pattern, which in some cases allow to build the jobs 43 | dynamically: 44 | 45 | ``` python 46 | def button_generate_simple_with_delayable(self): 47 | self.ensure_one() 48 | # Introduction of a delayable object, using a builder pattern 49 | # allowing to chain jobs or set properties. The delay() method 50 | # on the delayable object actually stores the delayable objects 51 | # in the queue_job table 52 | ( 53 | self.delayable() 54 | .generate_thumbnail((50, 50)) 55 | .set(priority=30) 56 | .set(description=_("generate xxx")) 57 | .delay() 58 | ) 59 | ``` 60 | 61 | The simplest way to define a dependency is to use `.on_done(job)` on a 62 | Delayable: 63 | 64 | ``` python 65 | def button_chain_done(self): 66 | self.ensure_one() 67 | job1 = self.browse(1).delayable().generate_thumbnail((50, 50)) 68 | job2 = self.browse(1).delayable().generate_thumbnail((50, 50)) 69 | job3 = self.browse(1).delayable().generate_thumbnail((50, 50)) 70 | # job 3 is executed when job 2 is done which is executed when job 1 is done 71 | job1.on_done(job2.on_done(job3)).delay() 72 | ``` 73 | 74 | Delayables can be chained to form more complex graphs using the 75 | `chain()` and `group()` primitives. A chain represents a sequence of 76 | jobs to execute in order, a group represents jobs which can be executed 77 | in parallel. Using `chain()` has the same effect as using several nested 78 | `on_done()` but is more readable. Both can be combined to form a graph, 79 | for instance we can group \[A\] of jobs, which blocks another group 80 | \[B\] of jobs. When and only when all the jobs of the group \[A\] are 81 | executed, the jobs of the group \[B\] are executed. The code would look 82 | like: 83 | 84 | ``` python 85 | from odoo.addons.queue_job.delay import group, chain 86 | 87 | def button_done(self): 88 | group_a = group(self.delayable().method_foo(), self.delayable().method_bar()) 89 | group_b = group(self.delayable().method_baz(1), self.delayable().method_baz(2)) 90 | chain(group_a, group_b).delay() 91 | self.write({"state": "done"}) 92 | return True 93 | ``` 94 | 95 | When a failure happens in a graph of jobs, the execution of the jobs 96 | that depend on the failed job stops. They remain in a state 97 | `wait_dependencies` until their "parent" job is successful. This can 98 | happen in two ways: either the parent job retries and is successful on a 99 | second try, either the parent job is manually "set to done" by a user. 100 | In these two cases, the dependency is resolved and the graph will 101 | continue to be processed. Alternatively, the failed job and all its 102 | dependent jobs can be canceled by a user. The other jobs of the graph 103 | that do not depend on the failed job continue their execution in any 104 | case. 105 | 106 | Note: `delay()` must be called on the delayable, chain, or group which 107 | is at the top of the graph. In the example above, if it was called on 108 | `group_a`, then `group_b` would never be delayed (but a warning would be 109 | shown). 110 | 111 | ### Enqueing Job Options 112 | 113 | - priority: default is 10, the closest it is to 0, the faster it will be 114 | executed 115 | - eta: Estimated Time of Arrival of the job. It will not be executed 116 | before this date/time 117 | - max_retries: default is 5, maximum number of retries before giving up 118 | and set the job state to 'failed'. A value of 0 means infinite 119 | retries. 120 | - description: human description of the job. If not set, description is 121 | computed from the function doc or method name 122 | - channel: the complete name of the channel to use to process the 123 | function. If specified it overrides the one defined on the function 124 | - identity_key: key uniquely identifying the job, if specified and a job 125 | with the same key has not yet been run, the new job will not be 126 | created 127 | 128 | ### Configure default options for jobs 129 | 130 | In earlier versions, jobs could be configured using the `@job` 131 | decorator. This is now obsolete, they can be configured using optional 132 | `queue.job.function` and `queue.job.channel` XML records. 133 | 134 | Example of channel: 135 | 136 | ``` XML 137 | 138 | sale 139 | 140 | 141 | ``` 142 | 143 | Example of job function: 144 | 145 | ``` XML 146 | 147 | 148 | action_done 149 | 150 | 151 | 152 | 153 | ``` 154 | 155 | The general form for the `name` is: `.method`. 156 | 157 | The channel, related action and retry pattern options are optional, they 158 | are documented below. 159 | 160 | When writing modules, if 2+ modules add a job function or channel with 161 | the same name (and parent for channels), they'll be merged in the same 162 | record, even if they have different xmlids. On uninstall, the merged 163 | record is deleted when all the modules using it are uninstalled. 164 | 165 | **Job function: model** 166 | 167 | If the function is defined in an abstract model, you can not write 168 | `` but 169 | you have to define a function for each model that inherits from the 170 | abstract model. 171 | 172 | **Job function: channel** 173 | 174 | The channel where the job will be delayed. The default channel is 175 | `root`. 176 | 177 | **Job function: related action** 178 | 179 | The *Related Action* appears as a button on the Job's view. The button 180 | will execute the defined action. 181 | 182 | The default one is to open the view of the record related to the job 183 | (form view when there is a single record, list view for several 184 | records). In many cases, the default related action is enough and 185 | doesn't need customization, but it can be customized by providing a 186 | dictionary on the job function: 187 | 188 | ``` python 189 | { 190 | "enable": False, 191 | "func_name": "related_action_partner", 192 | "kwargs": {"name": "Partner"}, 193 | } 194 | ``` 195 | 196 | - `enable`: when `False`, the button has no effect (default: `True`) 197 | - `func_name`: name of the method on `queue.job` that returns an action 198 | - `kwargs`: extra arguments to pass to the related action method 199 | 200 | Example of related action code: 201 | 202 | ``` python 203 | class QueueJob(models.Model): 204 | _inherit = 'queue.job' 205 | 206 | def related_action_partner(self, name): 207 | self.ensure_one() 208 | model = self.model_name 209 | partner = self.records 210 | action = { 211 | 'name': name, 212 | 'type': 'ir.actions.act_window', 213 | 'res_model': model, 214 | 'view_type': 'form', 215 | 'view_mode': 'form', 216 | 'res_id': partner.id, 217 | } 218 | return action 219 | ``` 220 | 221 | **Job function: retry pattern** 222 | 223 | When a job fails with a retryable error type, it is automatically 224 | retried later. By default, the retry is always 10 minutes later. 225 | 226 | A retry pattern can be configured on the job function. What a pattern 227 | represents is "from X tries, postpone to Y seconds". It is expressed as 228 | a dictionary where keys are tries and values are seconds to postpone as 229 | integers: 230 | 231 | ``` python 232 | { 233 | 1: 10, 234 | 5: 20, 235 | 10: 30, 236 | 15: 300, 237 | } 238 | ``` 239 | 240 | Based on this configuration, we can tell that: 241 | 242 | - 5 first retries are postponed 10 seconds later 243 | - retries 5 to 10 postponed 20 seconds later 244 | - retries 10 to 15 postponed 30 seconds later 245 | - all subsequent retries postponed 5 minutes later 246 | 247 | **Job Context** 248 | 249 | The context of the recordset of the job, or any recordset passed in 250 | arguments of a job, is transferred to the job according to an 251 | allow-list. 252 | 253 | The default allow-list is ("tz", "lang", "allowed_company_ids", 254 | "force_company", "active_test"). It can be customized in 255 | `Base._job_prepare_context_before_enqueue_keys`. **Bypass jobs on 256 | running Odoo** 257 | 258 | When you are developing (ie: connector modules) you might want to bypass 259 | the queue job and run your code immediately. 260 | 261 | To do so you can set QUEUE_JOB\_\_NO_DELAY=1 in your enviroment. 262 | 263 | **Bypass jobs in tests** 264 | 265 | When writing tests on job-related methods is always tricky to deal with 266 | delayed recordsets. To make your testing life easier you can set 267 | queue_job\_\_no_delay=True in the context. 268 | 269 | Tip: you can do this at test case level like this 270 | 271 | ``` python 272 | @classmethod 273 | def setUpClass(cls): 274 | super().setUpClass() 275 | cls.env = cls.env(context=dict( 276 | cls.env.context, 277 | queue_job__no_delay=True, # no jobs thanks 278 | )) 279 | ``` 280 | 281 | Then all your tests execute the job methods synchronously without 282 | delaying any jobs. 283 | 284 | ### Testing 285 | 286 | **Asserting enqueued jobs** 287 | 288 | The recommended way to test jobs, rather than running them directly and 289 | synchronously is to split the tests in two parts: 290 | 291 | > - one test where the job is mocked (trap jobs with `trap_jobs()` and 292 | > the test only verifies that the job has been delayed with the 293 | > expected arguments 294 | > - one test that only calls the method of the job synchronously, to 295 | > validate the proper behavior of this method only 296 | 297 | Proceeding this way means that you can prove that jobs will be enqueued 298 | properly at runtime, and it ensures your code does not have a different 299 | behavior in tests and in production (because running your jobs 300 | synchronously may have a different behavior as they are in the same 301 | transaction / in the middle of the method). Additionally, it gives more 302 | control on the arguments you want to pass when calling the job's method 303 | (synchronously, this time, in the second type of tests), and it makes 304 | tests smaller. 305 | 306 | The best way to run such assertions on the enqueued jobs is to use 307 | `odoo.addons.queue_job.tests.common.trap_jobs()`. 308 | 309 | A very small example (more details in `tests/common.py`): 310 | 311 | ``` python 312 | # code 313 | def my_job_method(self, name, count): 314 | self.write({"name": " ".join([name] * count) 315 | 316 | def method_to_test(self): 317 | count = self.env["other.model"].search_count([]) 318 | self.with_delay(priority=15).my_job_method("Hi!", count=count) 319 | return count 320 | 321 | # tests 322 | from odoo.addons.queue_job.tests.common import trap_jobs 323 | 324 | # first test only check the expected behavior of the method and the proper 325 | # enqueuing of jobs 326 | def test_method_to_test(self): 327 | with trap_jobs() as trap: 328 | result = self.env["model"].method_to_test() 329 | expected_count = 12 330 | 331 | trap.assert_jobs_count(1, only=self.env["model"].my_job_method) 332 | trap.assert_enqueued_job( 333 | self.env["model"].my_job_method, 334 | args=("Hi!",), 335 | kwargs=dict(count=expected_count), 336 | properties=dict(priority=15) 337 | ) 338 | self.assertEqual(result, expected_count) 339 | 340 | 341 | # second test to validate the behavior of the job unitarily 342 | def test_my_job_method(self): 343 | record = self.env["model"].browse(1) 344 | record.my_job_method("Hi!", count=12) 345 | self.assertEqual(record.name, "Hi! Hi! Hi! Hi! Hi! Hi! Hi! Hi! Hi! Hi! Hi! Hi!") 346 | ``` 347 | 348 | If you prefer, you can still test the whole thing in a single test, by 349 | calling `jobs_tester.perform_enqueued_jobs()` in your test. 350 | 351 | ``` python 352 | def test_method_to_test(self): 353 | with trap_jobs() as trap: 354 | result = self.env["model"].method_to_test() 355 | expected_count = 12 356 | 357 | trap.assert_jobs_count(1, only=self.env["model"].my_job_method) 358 | trap.assert_enqueued_job( 359 | self.env["model"].my_job_method, 360 | args=("Hi!",), 361 | kwargs=dict(count=expected_count), 362 | properties=dict(priority=15) 363 | ) 364 | self.assertEqual(result, expected_count) 365 | 366 | trap.perform_enqueued_jobs() 367 | 368 | record = self.env["model"].browse(1) 369 | record.my_job_method("Hi!", count=12) 370 | self.assertEqual(record.name, "Hi! Hi! Hi! Hi! Hi! Hi! Hi! Hi! Hi! Hi! Hi! Hi!") 371 | ``` 372 | 373 | **Execute jobs synchronously when running Odoo** 374 | 375 | When you are developing (ie: connector modules) you might want to bypass 376 | the queue job and run your code immediately. 377 | 378 | To do so you can set `QUEUE_JOB__NO_DELAY=1` in your environment. 379 | 380 | Warning 381 | 382 | Do not do this in production 383 | 384 | **Execute jobs synchronously in tests** 385 | 386 | You should use `trap_jobs`, really, but if for any reason you could not 387 | use it, and still need to have job methods executed synchronously in 388 | your tests, you can do so by setting `queue_job__no_delay=True` in the 389 | context. 390 | 391 | Tip: you can do this at test case level like this 392 | 393 | ``` python 394 | @classmethod 395 | def setUpClass(cls): 396 | super().setUpClass() 397 | cls.env = cls.env(context=dict( 398 | cls.env.context, 399 | queue_job__no_delay=True, # no jobs thanks 400 | )) 401 | ``` 402 | 403 | Then all your tests execute the job methods synchronously without 404 | delaying any jobs. 405 | 406 | In tests you'll have to mute the logger like: 407 | 408 | > @mute_logger('odoo.addons.queue_job.models.base') 409 | 410 | Note 411 | 412 | in graphs of jobs, the `queue_job__no_delay` context key must be in at 413 | least one job's env of the graph for the whole graph to be executed 414 | synchronously 415 | 416 | ### Tips and tricks 417 | 418 | - **Idempotency** 419 | (): The 420 | queue_job should be idempotent so they can be retried several times 421 | without impact on the data. 422 | - **The job should test at the very beginning its relevance**: the 423 | moment the job will be executed is unknown by design. So the first 424 | task of a job should be to check if the related work is still relevant 425 | at the moment of the execution. 426 | 427 | ### Patterns 428 | 429 | Through the time, two main patterns emerged: 430 | 431 | 1. For data exposed to users, a model should store the data and the 432 | model should be the creator of the job. The job is kept hidden from 433 | the users 434 | 2. For technical data, that are not exposed to the users, it is 435 | generally alright to create directly jobs with data passed as 436 | arguments to the job, without intermediary models. 437 | -------------------------------------------------------------------------------- /addons_external/queue_job/tests/common.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Camptocamp 2 | # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). 3 | import doctest 4 | import logging 5 | import typing 6 | from contextlib import contextmanager 7 | from itertools import groupby 8 | from operator import attrgetter 9 | from unittest import TestCase, mock 10 | 11 | from odoo.tests.case import TestCase as _TestCase 12 | from odoo.tests.common import MetaCase 13 | 14 | from odoo.addons.queue_job.delay import Graph 15 | 16 | # pylint: disable=odoo-addons-relative-import 17 | from odoo.addons.queue_job.job import Job 18 | 19 | 20 | @contextmanager 21 | def trap_jobs(): 22 | """Context Manager used to test enqueuing of jobs 23 | 24 | Trapping jobs allows to split the tests in: 25 | 26 | * the part that delays the job with the expected arguments in one test 27 | * the execution of the job itself in a second test 28 | 29 | When the jobs are trapped, they are not executed at all, however, we 30 | can verify they have been enqueued with the correct arguments and 31 | properties. 32 | 33 | Then in a second test, we can call the job method directly with the 34 | arguments to test. 35 | 36 | The context manager yields a instance of ``JobsTrap``, which provides 37 | utilities and assert methods. 38 | 39 | Example of method to test:: 40 | 41 | def button_that_uses_delayable_chain(self): 42 | delayables = chain( 43 | self.delayable( 44 | channel="root.test", 45 | description="Test", 46 | eta=15, 47 | identity_key=identity_exact, 48 | max_retries=1, 49 | priority=15, 50 | ).testing_method(1, foo=2), 51 | self.delayable().testing_method('x', foo='y'), 52 | self.delayable().no_description(), 53 | ) 54 | delayables.delay() 55 | 56 | Example of usage in a test:: 57 | 58 | with trap_jobs() as trap: 59 | self.env['test.queue.job'].button_that_uses_delayable_chain() 60 | 61 | trap.assert_jobs_count(3) 62 | trap.assert_jobs_count( 63 | 2, only=self.env['test.queue.job'].testing_method 64 | 65 | ) 66 | trap.assert_jobs_count( 67 | 1, only=self.env['test.queue.job'].no_description 68 | ) 69 | 70 | trap.assert_enqueued_job( 71 | self.env['test.queue.job'].testing_method, 72 | args=(1,), 73 | kwargs={"foo": 2}, 74 | properties=dict( 75 | channel="root.test", 76 | description="Test", 77 | eta=15, 78 | identity_key=identity_exact, 79 | max_retries=1, 80 | priority=15, 81 | ) 82 | ) 83 | trap.assert_enqueued_job( 84 | self.env['test.queue.job'].testing_method, 85 | args=("x",), 86 | kwargs={"foo": "y"}, 87 | ) 88 | trap.assert_enqueued_job( 89 | self.env['test.queue.job'].no_description, 90 | ) 91 | 92 | # optionally, you can perform the jobs synchronously (without going 93 | # to the database) 94 | jobs_tester.perform_enqueued_jobs() 95 | """ 96 | with mock.patch( 97 | "odoo.addons.queue_job.delay.Job", 98 | name="Job Class", 99 | auto_spec=True, 100 | unsafe=True, 101 | ) as job_cls_mock: 102 | with JobsTrap(job_cls_mock) as trap: 103 | yield trap 104 | 105 | 106 | class JobCall(typing.NamedTuple): 107 | method: typing.Callable 108 | args: tuple 109 | kwargs: dict 110 | properties: dict 111 | 112 | def __eq__(self, other): 113 | if not isinstance(other, JobCall): 114 | return NotImplemented 115 | return ( 116 | self.method.__self__ == other.method.__self__ 117 | and self.method.__func__ == other.method.__func__ 118 | and self.args == other.args 119 | and self.kwargs == other.kwargs 120 | and self.properties == other.properties 121 | ) 122 | 123 | 124 | class JobsTrap: 125 | """Used by ``trap_jobs()``, provide assert methods on the trapped jobs 126 | 127 | Look the documentation of ``trap_jobs()`` for a usage example. 128 | 129 | The ``store`` method of the Job instances is mocked so they are never 130 | saved in database. 131 | 132 | Helpers for tests: 133 | 134 | * ``jobs_count`` 135 | * ``assert_jobs_count`` 136 | * ``assert_enqueued_job`` 137 | * ``perform_enqueued_jobs`` 138 | 139 | You can also access the list of calls that were made to enqueue the jobs in 140 | the ``calls`` attribute, and the generated jobs in the ``enqueued_jobs``. 141 | """ 142 | 143 | def __init__(self, job_mock): 144 | self.job_mock = job_mock 145 | self.job_mock.side_effect = self._add_job 146 | # 1 call == 1 job, they share the same position in the lists 147 | self.calls = [] 148 | self.enqueued_jobs = [] 149 | self._store_patchers = [] 150 | self._test_case = TestCase() 151 | 152 | def jobs_count(self, only=None): 153 | """Return the count of enqueued jobs 154 | 155 | ``only`` is an option method on which the count is filtered 156 | """ 157 | if only: 158 | return len(self._filtered_enqueued_jobs(only)) 159 | return len(self.enqueued_jobs) 160 | 161 | def assert_jobs_count(self, expected, only=None): 162 | """Raise an assertion error if the count of enqueued jobs does not match 163 | 164 | ``only`` is an option method on which the count is filtered 165 | """ 166 | self._test_case.assertEqual(self.jobs_count(only=only), expected) 167 | 168 | def assert_enqueued_job(self, method, args=None, kwargs=None, properties=None): 169 | """Raise an assertion error if the expected method has not been enqueued 170 | 171 | * ``method`` is the method (as method object) delayed as job 172 | * ``args`` is a tuple of arguments passed to the job method 173 | * ``kwargs`` is a dict of keyword arguments passed to the job method 174 | * ``properties`` is a dict of job properties (priority, eta, ...) 175 | 176 | The args and the kwargs *must* be match exactly what has been enqueued 177 | in the job method. The properties are optional: if the job has been 178 | enqueued with a custom description but the assert method is not called 179 | with ``description`` in the properties, it still matches the call. 180 | However, if a ``description`` is passed in the assert's properties, it 181 | must match. 182 | """ 183 | if properties is None: 184 | properties = {} 185 | if args is None: 186 | args = () 187 | if kwargs is None: 188 | kwargs = {} 189 | expected_call = JobCall( 190 | method=method, 191 | args=args, 192 | kwargs=kwargs, 193 | properties=properties, 194 | ) 195 | actual_calls = [] 196 | for call in self.calls: 197 | checked_properties = { 198 | key: value 199 | for key, value in call.properties.items() 200 | if key in properties 201 | } 202 | # build copy of calls with only the properties that we want to 203 | # check 204 | actual_calls.append( 205 | JobCall( 206 | method=call.method, 207 | args=call.args, 208 | kwargs=call.kwargs, 209 | properties=checked_properties, 210 | ) 211 | ) 212 | 213 | if expected_call not in actual_calls: 214 | raise AssertionError( 215 | "Job {} was not enqueued.\n" "Actual enqueued jobs:\n{}".format( 216 | self._format_job_call(expected_call), 217 | "\n".join( 218 | f" * {self._format_job_call(call)}" for call in actual_calls 219 | ), 220 | ) 221 | ) 222 | 223 | def perform_enqueued_jobs(self): 224 | """Perform the enqueued jobs synchronously""" 225 | 226 | def by_graph(job): 227 | return job.graph_uuid or "" 228 | 229 | sorted_jobs = sorted(self.enqueued_jobs, key=by_graph) 230 | for graph_uuid, jobs in groupby(sorted_jobs, key=by_graph): 231 | if graph_uuid: 232 | self._perform_graph_jobs(jobs) 233 | else: 234 | self._perform_single_jobs(jobs) 235 | self.enqueued_jobs = [] 236 | 237 | def _perform_single_jobs(self, jobs): 238 | # we probably don't want to replicate a perfect order here, but at 239 | # least respect the priority 240 | for job in sorted(jobs, key=attrgetter("priority")): 241 | job.perform() 242 | 243 | def _perform_graph_jobs(self, jobs): 244 | graph = Graph() 245 | for job in jobs: 246 | graph.add_vertex(job) 247 | for parent in job.depends_on: 248 | graph.add_edge(parent, job) 249 | 250 | for job in graph.topological_sort(): 251 | job.perform() 252 | 253 | def _add_job(self, *args, **kwargs): 254 | job = Job(*args, **kwargs) 255 | if not job.identity_key or all( 256 | j.identity_key != job.identity_key for j in self.enqueued_jobs 257 | ): 258 | self.enqueued_jobs.append(job) 259 | 260 | patcher = mock.patch.object(job, "store") 261 | self._store_patchers.append(patcher) 262 | patcher.start() 263 | 264 | job_args = kwargs.pop("args", None) or () 265 | job_kwargs = kwargs.pop("kwargs", None) or {} 266 | self.calls.append( 267 | JobCall( 268 | method=args[0], 269 | args=job_args, 270 | kwargs=job_kwargs, 271 | properties=kwargs, 272 | ) 273 | ) 274 | return job 275 | 276 | def __enter__(self): 277 | return self 278 | 279 | def __exit__(self, exc_type, exc_value, traceback): 280 | for patcher in self._store_patchers: 281 | patcher.stop() 282 | 283 | def _filtered_enqueued_jobs(self, job_method): 284 | enqueued_jobs = [ 285 | job 286 | for job in self.enqueued_jobs 287 | if job.func.__self__ == job_method.__self__ 288 | and job.func.__func__ == job_method.__func__ 289 | ] 290 | return enqueued_jobs 291 | 292 | def _format_job_call(self, call): 293 | method_all_args = [] 294 | if call.args: 295 | method_all_args.append(", ".join(f"{arg}" for arg in call.args)) 296 | if call.kwargs: 297 | method_all_args.append( 298 | ", ".join(f"{key}={value}" for key, value in call.kwargs.items()) 299 | ) 300 | return "<{}>.{}({}) with properties ({})".format( 301 | call.method.__self__, 302 | call.method.__name__, 303 | ", ".join(method_all_args), 304 | ", ".join(f"{key}={value}" for key, value in call.properties.items()), 305 | ) 306 | 307 | def __repr__(self): 308 | return repr(self.calls) 309 | 310 | 311 | class JobCounter: 312 | def __init__(self, env): 313 | super().__init__() 314 | self.env = env 315 | self.existing = self.search_all() 316 | 317 | def count_all(self): 318 | return len(self.search_all()) 319 | 320 | def count_created(self): 321 | return len(self.search_created()) 322 | 323 | def count_existing(self): 324 | return len(self.existing) 325 | 326 | def search_created(self): 327 | return self.search_all() - self.existing 328 | 329 | def search_all(self): 330 | return self.env["queue.job"].search([]) 331 | 332 | 333 | class JobMixin: 334 | def job_counter(self): 335 | return JobCounter(self.env) 336 | 337 | def perform_jobs(self, jobs): 338 | for job in jobs.search_created(): 339 | Job.load(self.env, job.uuid).perform() 340 | 341 | @contextmanager 342 | def trap_jobs(self): 343 | with trap_jobs() as trap: 344 | yield trap 345 | 346 | 347 | @contextmanager 348 | def mock_with_delay(): 349 | """Context Manager mocking ``with_delay()`` 350 | 351 | DEPRECATED: use ``trap_jobs()'``. 352 | 353 | Mocking this method means we can decorrelate the tests in: 354 | 355 | * the part that delay the job with the expected arguments 356 | * the execution of the job itself 357 | 358 | The first kind of test does not need to actually create the jobs in the 359 | database, as we can inspect how the Mocks were called. 360 | 361 | The second kind of test calls directly the method decorated by ``@job`` 362 | with the arguments that we want to test. 363 | 364 | The context manager returns 2 mocks: 365 | * the first allow to check that with_delay() was called and with which 366 | arguments 367 | * the second to check which job method was called and with which arguments. 368 | 369 | Example of test:: 370 | 371 | def test_export(self): 372 | with mock_with_delay() as (delayable_cls, delayable): 373 | # inside this method, there is a call 374 | # partner.with_delay(priority=15).export_record('test') 375 | self.record.run_export() 376 | 377 | # check 'with_delay()' part: 378 | self.assertEqual(delayable_cls.call_count, 1) 379 | # arguments passed in 'with_delay()' 380 | delay_args, delay_kwargs = delayable_cls.call_args 381 | self.assertEqual( 382 | delay_args, (self.env['res.partner'],) 383 | ) 384 | self.assertDictEqual(delay_kwargs, {priority: 15}) 385 | 386 | # check what's passed to the job method 'export_record' 387 | self.assertEqual(delayable.export_record.call_count, 1) 388 | delay_args, delay_kwargs = delayable.export_record.call_args 389 | self.assertEqual(delay_args, ('test',)) 390 | self.assertDictEqual(delay_kwargs, {}) 391 | 392 | An example of the first kind of test: 393 | https://github.com/camptocamp/connector-jira/blob/0ca4261b3920d5e8c2ae4bb0fc352ea3f6e9d2cd/connector_jira/tests/test_batch_timestamp_import.py#L43-L76 # noqa 394 | And the second kind: 395 | https://github.com/camptocamp/connector-jira/blob/0ca4261b3920d5e8c2ae4bb0fc352ea3f6e9d2cd/connector_jira/tests/test_import_task.py#L34-L46 # noqa 396 | 397 | """ 398 | with mock.patch( 399 | "odoo.addons.queue_job.models.base.DelayableRecordset", 400 | name="DelayableRecordset", 401 | spec=True, 402 | ) as delayable_cls: 403 | # prepare the mocks 404 | delayable = mock.MagicMock(name="DelayableBinding") 405 | delayable_cls.return_value = delayable 406 | yield delayable_cls, delayable 407 | 408 | 409 | class OdooDocTestCase(doctest.DocTestCase, _TestCase, MetaCase("DummyCase", (), {})): 410 | """ 411 | We need a custom DocTestCase class in order to: 412 | - define test_tags to run as part of standard tests 413 | - output a more meaningful test name than default "DocTestCase.runTest" 414 | """ 415 | 416 | def __init__( 417 | self, doctest, optionflags=0, setUp=None, tearDown=None, checker=None, seq=0 418 | ): 419 | super().__init__( 420 | doctest._dt_test, 421 | optionflags=optionflags, 422 | setUp=setUp, 423 | tearDown=tearDown, 424 | checker=checker, 425 | ) 426 | self.test_sequence = seq 427 | 428 | def setUp(self): 429 | """Log an extra statement which test is started.""" 430 | super().setUp() 431 | logging.getLogger(__name__).info("Running tests for %s", self._dt_test.name) 432 | 433 | 434 | def load_doctests(module): 435 | """ 436 | Generates a tests loading method for the doctests of the given module 437 | https://docs.python.org/3/library/unittest.html#load-tests-protocol 438 | """ 439 | 440 | def load_tests(loader, tests, ignore): 441 | """ 442 | Apply the 'test_tags' attribute to each DocTestCase found by the DocTestSuite. 443 | Also extend the DocTestCase class trivially to fit the class teardown 444 | that Odoo backported for its own test classes from Python 3.8. 445 | """ 446 | # UP036 Version block is outdated for minimum Python version 447 | # if sys.version_info < (3, 8): 448 | # doctest.DocTestCase.doClassCleanups = lambda: None 449 | # doctest.DocTestCase.tearDown_exceptions = [] 450 | 451 | for idx, test in enumerate(doctest.DocTestSuite(module)): 452 | odoo_test = OdooDocTestCase(test, seq=idx) 453 | odoo_test.test_tags = {"standard", "at_install", "queue_job", "doctest"} 454 | tests.addTest(odoo_test) 455 | 456 | return tests 457 | 458 | return load_tests 459 | --------------------------------------------------------------------------------