├── .gitignore ├── Dockerfile ├── LICENSE ├── README.rst ├── _images ├── celery-connectors-json-to-celery-relay-with-existing-ecomm-celery-app.gif ├── flower-jtoc-relay-results.png ├── jupyterhub-step-1-login-as-admin-admin.png ├── jupyterhub-step-2-start-server.png ├── jupyterhub-step-3-browse-ipython-notebooks.png ├── jupyterhub-step-4-run-all-notebook-cells.png ├── jupyterhub-step-5-confirm-notebook-was-saved.png ├── jupyterhub-step-6-stop-server.png ├── jupyterhub-step-7-jupyterhub-user-notebook-persistence.png └── runnning-jupyter-hub-with-ssl.png ├── ansible ├── .gitignore ├── ansible.cfg ├── configs │ ├── ca.srl │ ├── cert_openssl.cnf │ ├── extfile.cnf │ └── openssl.cnf ├── create-x509s.yml └── inventory_dev ├── celery_connectors ├── __init__.py ├── build_ssl_options.py ├── kombu_subscriber.py ├── log │ ├── __init__.py │ ├── logging.json │ └── setup_logging.py ├── message_processor.py ├── mixin_send_task_msg.py ├── publisher.py ├── rabbitmq │ ├── list-bindings.sh │ ├── list-channels.sh │ ├── list-connections.sh │ ├── list-consumers.sh │ ├── list-exchanges.sh │ ├── list-queues.sh │ ├── rabbitmqadmin.py │ ├── rmq-close-all-connections.sh │ ├── rmq-status.sh │ ├── rmq-trace-off.sh │ ├── rmq-trace-on.sh │ └── watch-queues.sh ├── redis │ ├── __init__.py │ ├── base_redis_application.py │ ├── redis-publish-messages.py │ ├── redis-subscribe-and-read-messages.py │ ├── redis_json_application.py │ └── redis_wrapper.py ├── relay_json_to_celery_worker.py ├── relay_worker.py ├── run_consumer_relay.py ├── run_jtoc_relay.py ├── run_publisher.py ├── scripts │ ├── start-container.sh │ ├── subscribe-to-rabbitmq.sh │ └── subscribe-to-redis.sh ├── subscriber.py └── utils.py ├── clean-persistence-data.sh ├── compose ├── compose-jupyter.yml ├── compose-kombu-message-processor-rabbitmq.yml ├── compose-kombu-message-processor-redis.yml ├── compose-kombu-mixin-subscriber.yml ├── compose-kombu-rabbitmq-subscriber.yml ├── compose-kombu-redis-subscriber.yml ├── compose-kombu-sqs-publisher.yml ├── compose-kombu-sqs-subscriber.yml ├── compose-publish-user-conversion-events-rabbitmq.yml ├── compose-publish-user-conversion-events-redis.yml ├── compose-run-celery-rabbitmq-subscriber.yml ├── compose-run-celery-redis-subscriber.yml ├── compose-run-rabbitmq-publisher.yml ├── compose-run-rabbitmq-subscriber.yml ├── compose-run-redis-publisher.yml ├── compose-run-redis-subscriber.yml ├── compose-start-ecomm-relay.yml ├── compose-start-ecomm-worker.yml ├── compose-start-load-test-rabbitmq.yml ├── compose-start-load-test-redis.yml ├── compose-start-mixin-celery-relay.yml ├── compose-start-mixin-json-relay.yml ├── compose-start-mixin-load-test.yml ├── compose-start-mixin-publisher.yml ├── compose-start-subscriptions-rabbitmq-test.yml ├── data │ ├── jupyter │ │ ├── .gitignore │ │ └── jupyterhub_config.py │ ├── notebooks │ │ └── .gitignore │ └── postgres │ │ └── .gitignore ├── env │ ├── common-celery-connectors.env │ ├── oauth.env │ └── postgres-dev.env ├── files │ ├── jupyter_userlist │ └── start-jupyter.sh ├── ssl │ └── .gitignore └── starter.yml ├── dev-build.sh ├── docker ├── bashrc ├── data │ ├── rabbitmq │ │ └── .gitignore │ └── redis │ │ └── .gitignore ├── dev │ ├── env │ │ └── common-celery-connectors.env │ ├── rabbitmq-celery-only-consume.yml │ └── sleep-for-validation.yml ├── env │ ├── flowerrabbit-dev.env │ ├── flowerredis-dev.env │ ├── rabbit1-dev.env │ └── redis1-dev.env ├── logs │ ├── rabbitmq │ │ └── .gitignore │ └── redis │ │ └── .gitignore ├── persistence_redis_and_rabbitmq.yml ├── rabbitmq │ ├── autocluster-0.4.1.ez │ ├── enabled_plugins │ ├── erlang.cookie │ └── rabbitmq.config └── redis_and_rabbitmq.yml ├── ecomm_app ├── __init__.py ├── ecommerce │ ├── __init__.py │ ├── celeryconfig_pub_sub.py │ └── tasks.py ├── job_worker.py └── publish_task.py ├── kombu_mixin_subscriber.py ├── kombu_rabbitmq_subscriber.py ├── kombu_redis_subscriber.py ├── kombu_sqs_publisher.py ├── kombu_sqs_subscriber.py ├── publish-user-conversion-events-rabbitmq.py ├── publish-user-conversion-events-redis.py ├── run_rabbitmq_publisher.py ├── run_rabbitmq_subscriber.py ├── run_redis_publisher.py ├── run_redis_subscriber.py ├── setup.cfg ├── setup.py ├── ssh.sh ├── start-dev.sh ├── start-ecomm-relay.py ├── start-ecomm-worker.sh ├── start-kombu-message-processor-rabbitmq.py ├── start-kombu-message-processor-redis.py ├── start-load-test-rabbitmq.py ├── start-load-test-redis.py ├── start-mixin-celery-relay.py ├── start-mixin-json-relay.py ├── start-mixin-load-test.py ├── start-mixin-publisher.py ├── start-persistence-containers.sh ├── start-redis-and-rabbitmq.sh ├── start-subscriptions-rabbitmq-test.py ├── stop-dev.sh ├── stop-redis-and-rabbitmq.sh ├── tests ├── __init__.py ├── base_test.py ├── load_test_message_processor_rabbitmq.py ├── load_test_relay_rabbitmq.py ├── load_test_subscriber_rabbitmq.py ├── load_test_worker_rabbitmq.py ├── mixin_pub_sub.py ├── test_consume_large_number_of_messages.py └── test_functional.py └── tox.ini /.gitignore: -------------------------------------------------------------------------------- 1 | ### OSX ### 2 | .DS_Store 3 | .AppleDouble 4 | .LSOverride 5 | 6 | # Icon must end with two \r 7 | Icon 8 | 9 | 10 | # Thumbnails 11 | ._* 12 | 13 | # Files that might appear on external disk 14 | .Spotlight-V100 15 | .Trashes 16 | 17 | # Directories potentially created on remote AFP share 18 | .AppleDB 19 | .AppleDesktop 20 | Network Trash Folder 21 | Temporary Items 22 | .apdisk 23 | 24 | 25 | ### Python ### 26 | # Byte-compiled / optimized / DLL files 27 | __pycache__/ 28 | *.py[cod] 29 | 30 | # C extensions 31 | *.so 32 | 33 | # Distribution / packaging 34 | .Python 35 | env/ 36 | build/ 37 | develop-eggs/ 38 | dist/ 39 | downloads/ 40 | eggs/ 41 | lib/ 42 | lib64/ 43 | parts/ 44 | sdist/ 45 | var/ 46 | *.egg-info/ 47 | .installed.cfg 48 | *.egg 49 | 50 | # PyInstaller 51 | # Usually these files are written by a python script from a template 52 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 53 | *.manifest 54 | *.spec 55 | 56 | # Installer logs 57 | pip-log.txt 58 | pip-delete-this-directory.txt 59 | 60 | # Unit test / coverage reports 61 | htmlcov/ 62 | .tox/ 63 | .coverage 64 | .cache 65 | nosetests.xml 66 | coverage.xml 67 | 68 | # Translations 69 | *.mo 70 | *.pot 71 | 72 | # Sphinx documentation 73 | docs/_build/ 74 | 75 | # PyBuilder 76 | target/ 77 | 78 | 79 | ### Django ### 80 | *.log 81 | *.pot 82 | *.pyc 83 | __pycache__/ 84 | local_settings.py 85 | migrations 86 | 87 | .env 88 | db.sqlite3 89 | 90 | *.pyc 91 | .idea 92 | .eggs 93 | .tox 94 | *.swp 95 | *.swo 96 | *.o 97 | *.pickle 98 | *.zlib 99 | *.gz 100 | *.tar 101 | *.doctree 102 | *.bin 103 | *.retry 104 | *.egg-info 105 | *.tgz 106 | Error 107 | 108 | build 109 | dist 110 | venv 111 | venv2 112 | py2venv 113 | py3venv 114 | 115 | *.log 116 | *.log.1* 117 | *.log.2* 118 | *.log.3* 119 | *.log.4* 120 | *.log.5* 121 | *.log.6* 122 | *.log.7* 123 | *.log.8* 124 | *.log.9* 125 | 126 | *.jpg 127 | *.jpeg 128 | *.tiff 129 | *.gif 130 | 131 | nohup.out 132 | .pytest_cache 133 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.6-alpine 2 | 3 | RUN apk add --update \ 4 | python \ 5 | python-dev \ 6 | py-pip \ 7 | build-base \ 8 | curl \ 9 | curl-dev \ 10 | bash \ 11 | libffi-dev \ 12 | net-tools \ 13 | heimdal-telnet \ 14 | openssl \ 15 | openssl-dev \ 16 | vim \ 17 | && pip install virtualenv 18 | 19 | RUN mkdir -p -m 777 /opt/celery_connectors /opt/shared /opt/logs /opt/data /opt/configs 20 | WORKDIR /opt/celery_connectors 21 | 22 | COPY celery_connectors-latest.tgz /opt/celery_connectors 23 | COPY ./docker/bashrc /root/.bashrc 24 | 25 | RUN cd /opt/celery_connectors && tar xvf celery_connectors-latest.tgz && ls /opt/celery_connectors 26 | 27 | RUN echo "Starting Build" 28 | 29 | RUN cd /opt/celery_connectors \ 30 | && virtualenv -p python3 /opt/celery_connectors/venv \ 31 | && source /opt/celery_connectors/venv/bin/activate \ 32 | && pip install -e . 33 | 34 | ENV START_SCRIPT /opt/celery_connectors/celery_connectors/scripts/start-container.sh 35 | ENV LOG_DIR /opt/logs 36 | ENV CONFIG_DIR /opt/logs 37 | ENV DATA_DIR /opt/logs 38 | 39 | ENTRYPOINT bash /opt/celery_connectors/celery_connectors/scripts/start-container.sh 40 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright [2018] [Jay Johnson] 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /_images/celery-connectors-json-to-celery-relay-with-existing-ecomm-celery-app.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jay-johnson/celery-connectors/f6d48bf3e48513228a86a1ff6014b6e6cf465742/_images/celery-connectors-json-to-celery-relay-with-existing-ecomm-celery-app.gif -------------------------------------------------------------------------------- /_images/flower-jtoc-relay-results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jay-johnson/celery-connectors/f6d48bf3e48513228a86a1ff6014b6e6cf465742/_images/flower-jtoc-relay-results.png -------------------------------------------------------------------------------- /_images/jupyterhub-step-1-login-as-admin-admin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jay-johnson/celery-connectors/f6d48bf3e48513228a86a1ff6014b6e6cf465742/_images/jupyterhub-step-1-login-as-admin-admin.png -------------------------------------------------------------------------------- /_images/jupyterhub-step-2-start-server.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jay-johnson/celery-connectors/f6d48bf3e48513228a86a1ff6014b6e6cf465742/_images/jupyterhub-step-2-start-server.png -------------------------------------------------------------------------------- /_images/jupyterhub-step-3-browse-ipython-notebooks.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jay-johnson/celery-connectors/f6d48bf3e48513228a86a1ff6014b6e6cf465742/_images/jupyterhub-step-3-browse-ipython-notebooks.png -------------------------------------------------------------------------------- /_images/jupyterhub-step-4-run-all-notebook-cells.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jay-johnson/celery-connectors/f6d48bf3e48513228a86a1ff6014b6e6cf465742/_images/jupyterhub-step-4-run-all-notebook-cells.png -------------------------------------------------------------------------------- /_images/jupyterhub-step-5-confirm-notebook-was-saved.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jay-johnson/celery-connectors/f6d48bf3e48513228a86a1ff6014b6e6cf465742/_images/jupyterhub-step-5-confirm-notebook-was-saved.png -------------------------------------------------------------------------------- /_images/jupyterhub-step-6-stop-server.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jay-johnson/celery-connectors/f6d48bf3e48513228a86a1ff6014b6e6cf465742/_images/jupyterhub-step-6-stop-server.png -------------------------------------------------------------------------------- /_images/jupyterhub-step-7-jupyterhub-user-notebook-persistence.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jay-johnson/celery-connectors/f6d48bf3e48513228a86a1ff6014b6e6cf465742/_images/jupyterhub-step-7-jupyterhub-user-notebook-persistence.png -------------------------------------------------------------------------------- /_images/runnning-jupyter-hub-with-ssl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jay-johnson/celery-connectors/f6d48bf3e48513228a86a1ff6014b6e6cf465742/_images/runnning-jupyter-hub-with-ssl.png -------------------------------------------------------------------------------- /ansible/.gitignore: -------------------------------------------------------------------------------- 1 | *.srl 2 | -------------------------------------------------------------------------------- /ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | ask_pass=False 3 | host_key_checking=False 4 | display_skipped_hosts=True 5 | retry_files_save_path=/tmp 6 | transport=paramiko 7 | log_path="/tmp/ansible-cc.log" 8 | [ssh_connection] 9 | pipelining = True 10 | control_path = /tmp/ansible-cc-ssh-%%h-%%p-%%r 11 | -------------------------------------------------------------------------------- /ansible/configs/ca.srl: -------------------------------------------------------------------------------- 1 | 01 2 | -------------------------------------------------------------------------------- /ansible/configs/cert_openssl.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | days = 2000 3 | serial = 1 4 | distinguished_name = req_distinguished_name 5 | x509_extensions = v3_ca 6 | 7 | 8 | [req_distinguished_name] 9 | countryName = US 10 | stateOrProvinceName = WA 11 | localityName = Redmond 12 | organizationName = SecureEverything 13 | organizationalUnitName = SecureEverythingOrgUnit 14 | commonName = SecureEverything 15 | 16 | [ v3_ca ] 17 | subjectKeyIdentifier = hash 18 | authorityKeyIdentifier = keyid:always,issuer:always 19 | basicConstraints = CA:TRUE 20 | keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment, keyAgreement, keyCertSign 21 | subjectAltName = DNS:*.localdev.com, DNS:redis.localdev.com, DNS:rabbitmq.localdev.com, DNS:jupyter.localdev.com, DNS:jenkins.localdev.com, DNS:www.localdev.com, DNS:api.localdev.com, DNS:db.localdev.com, DNS:pgadmin.localdev.com, DNS:phpmyadmin.localdev.com, DNS:kibana.localdev.com, DNS:lb.localdev.com, DNS:docker.localdev.com, email:admin@localdev.com 22 | issuerAltName = issuer:copy 23 | -------------------------------------------------------------------------------- /ansible/configs/extfile.cnf: -------------------------------------------------------------------------------- 1 | subjectAltName = DNS:*.localdev.com, DNS:rabbitmq.localdev.com, DNS:redis.localdev.com, DNS:jupyter.localdev.com, DNS:jenkins.localdev.com, DNS:www.localdev.com, DNS:api.localdev.com, DNS:db.localdev.com, DNS:pgadmin.localdev.com, DNS:phpmyadmin.localdev.com, DNS:kibana.localdev.com, DNS:lb.localdev.com, DNS:docker.localdev.com, IP:127.0.0.1 2 | extendedKeyUsage = serverAuth 3 | -------------------------------------------------------------------------------- /ansible/configs/openssl.cnf: -------------------------------------------------------------------------------- 1 | [ req ] 2 | prompt = no 3 | default_bits = 2048 4 | distinguished_name = req_distinguished_name # where to get DN for reqs 5 | 6 | [ req_distinguished_name ] 7 | C = US 8 | ST = WA 9 | L = Redmond 10 | O = SecureEverything 11 | OU = SecureEverythingOrgUnit 12 | CN = LocalDev 13 | -------------------------------------------------------------------------------- /ansible/inventory_dev: -------------------------------------------------------------------------------- 1 | [local] 2 | localhost ansible_python_interpreter="../venv/bin/python" 3 | -------------------------------------------------------------------------------- /celery_connectors/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jay-johnson/celery-connectors/f6d48bf3e48513228a86a1ff6014b6e6cf465742/celery_connectors/__init__.py -------------------------------------------------------------------------------- /celery_connectors/build_ssl_options.py: -------------------------------------------------------------------------------- 1 | import ssl 2 | from celery_connectors.utils import ev 3 | 4 | 5 | def build_ssl_options(ca_cert="", 6 | keyfile="", 7 | certfile="", 8 | ssl_required="0"): 9 | 10 | use_ca_certs = ev("SSL_CA_CERT", ca_cert) 11 | use_keyfile = ev("SSL_KEYFILE", keyfile) 12 | use_certfile = ev("SSL_CERTFILE", certfile) 13 | use_ssl_required = ev("SSL_REQUIRED", ssl_required) == "1" 14 | 15 | ssl_options = {} 16 | if use_ca_certs: 17 | ssl_options["ca_certs"] = use_ca_certs 18 | if use_keyfile: 19 | ssl_options["keyfile"] = use_keyfile 20 | if use_certfile: 21 | ssl_options["certfile"] = use_certfile 22 | if use_ssl_required: 23 | ssl_options["cert_reqs"] = ssl.CERT_REQUIRED 24 | 25 | return ssl_options 26 | # end of build_ssl_options 27 | -------------------------------------------------------------------------------- /celery_connectors/log/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jay-johnson/celery-connectors/f6d48bf3e48513228a86a1ff6014b6e6cf465742/celery_connectors/log/__init__.py -------------------------------------------------------------------------------- /celery_connectors/log/logging.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "disable_existing_loggers": false, 4 | "formatters": { 5 | "simple": { 6 | "format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s" 7 | } 8 | }, 9 | "handlers": { 10 | "console": { 11 | "class": "logging.StreamHandler", 12 | "level": "INFO", 13 | "formatter": "simple", 14 | "stream": "ext://sys.stdout" 15 | }, 16 | "info_file_handler": { 17 | "class": "logging.handlers.RotatingFileHandler", 18 | "level": "INFO", 19 | "formatter": "simple", 20 | "filename": "latest.log", 21 | "maxBytes": 10485760, 22 | "backupCount": 20, 23 | "encoding": "utf8" 24 | } 25 | }, 26 | "loggers": { 27 | "my_module": { 28 | "level": "ERROR", 29 | "handlers": ["console"], 30 | "propagate": "no" 31 | } 32 | }, 33 | "root": { 34 | "level": "INFO", 35 | "handlers": ["console", "info_file_handler"] 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /celery_connectors/log/setup_logging.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import logging.config 4 | 5 | 6 | def setup_logging(default_level=logging.INFO, 7 | default_path="{}/logging.json".format( 8 | os.getenv("LOG_DIR", 9 | os.path.dirname(os.path.realpath(__file__))).strip().lstrip()), 10 | env_key='LOG_CFG'): 11 | 12 | """ 13 | Setup logging configuration 14 | """ 15 | path = default_path 16 | value = os.getenv(env_key, None) 17 | if value: 18 | path = value 19 | if os.path.exists(path): 20 | with open(path, 'rt') as f: 21 | config = json.load(f) 22 | logging.config.dictConfig(config) 23 | else: 24 | logging.basicConfig(level=default_level) 25 | # end of setup_logging 26 | -------------------------------------------------------------------------------- /celery_connectors/message_processor.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import logging 3 | from celery_connectors.utils import ev 4 | from celery_connectors.kombu_subscriber import KombuSubscriber 5 | from celery_connectors.publisher import Publisher 6 | 7 | log = logging.getLogger("message-processor") 8 | 9 | 10 | class MessageProcessor: 11 | 12 | def __init__(self, 13 | name="message-processor", 14 | sub_auth_url=ev("SUB_BROKER_URL", "redis://localhost:6379/0"), 15 | sub_ssl_options={}, 16 | sub_serializer="application/json", 17 | sub_silent=False, 18 | pub_auth_url=ev("PUB_BROKER_URL", "redis://localhost:6379/0"), 19 | pub_ssl_options={}, 20 | pub_serializer="json", 21 | pub_silent=False): 22 | 23 | self.name = name 24 | self.recv_msgs = [] 25 | self.sub_auth_url = sub_auth_url 26 | self.pub_auth_url = pub_auth_url 27 | self.sub_ssl_options = sub_ssl_options 28 | self.pub_ssl_options = pub_ssl_options 29 | self.sub_serializer = sub_serializer 30 | self.pub_serializer = pub_serializer 31 | self.pub_queue_name = None 32 | 33 | self.sub = None 34 | self.pub = None 35 | 36 | self.exchange = None 37 | self.exchange_name = "" 38 | self.queue = None 39 | self.queue_name = "" 40 | self.routing_key = None 41 | self.pub_routing_key = None 42 | self.pub_hook_version = 1 43 | 44 | self.sub_verbose = not sub_silent 45 | self.pub_verbose = not pub_silent 46 | 47 | # end of __init__ 48 | 49 | def build_publish_node(self, body, data): 50 | publish_hook_body = { 51 | "org_msg": body, 52 | "data": data, 53 | "hook_created": datetime.datetime.now().isoformat(), 54 | "source": self.name, 55 | "version": self.pub_hook_version 56 | } 57 | return publish_hook_body 58 | # end of build_publish_node 59 | 60 | def process_message(self, body, message): 61 | log.info(("{} proc start - msg body={}") 62 | .format(self.name, body)) 63 | 64 | self.recv_msgs.append(body) 65 | 66 | if self.exchange_name: 67 | 68 | processing_data = {} 69 | 70 | if self.pub_verbose: 71 | log.info(("{} pub-hook - build - " 72 | "hook msg body") 73 | .format(self.name)) 74 | 75 | publish_hook_body = self.build_publish_node(body, 76 | data=processing_data) 77 | 78 | if self.pub_verbose: 79 | log.info(("{} pub-hook - send - " 80 | "exchange={} rk={} sz={}") 81 | .format(self.name, 82 | self.exchange_name, 83 | self.routing_key, 84 | self.pub_serializer)) 85 | 86 | try: 87 | publish_hook_result = self.get_pub().publish(body=publish_hook_body, 88 | exchange=self.exchange_name, 89 | routing_key=self.routing_key, 90 | queue=self.routing_key, 91 | serializer=self.pub_serializer, 92 | retry=True) 93 | if self.pub_verbose: 94 | log.info(("{} pub-hook - send - done " 95 | "exchange={} rk={} res={}") 96 | .format(self.name, 97 | self.exchange_name, 98 | self.routing_key, 99 | publish_hook_result)) 100 | 101 | except Exception as hookfailed: 102 | log.info(("{} Non-fatal - publish hook failed " + 103 | "body={} exchange={} rk={} sz={} ex={}") 104 | .format(self.name, 105 | body, 106 | self.exchange_name, 107 | self.routing_key, 108 | self.pub_serializer, 109 | hookfailed)) 110 | else: 111 | log.info(("No auto-caching or pub-hook " 112 | "set exchange={}") 113 | .format(self.exchange)) 114 | # end of send to publisher 115 | 116 | message.ack() 117 | 118 | log.info(("{} proc done - msg") 119 | .format(self.name)) 120 | # end of process_message 121 | 122 | def get_pub(self): 123 | if not self.pub: 124 | self.pub = Publisher("msg-pub", 125 | self.pub_auth_url, 126 | self.pub_ssl_options) 127 | return self.pub 128 | # end of get_pub 129 | 130 | def get_sub(self): 131 | if not self.sub: 132 | self.sub = KombuSubscriber("msg-sub", 133 | self.sub_auth_url, 134 | self.sub_ssl_options) 135 | return self.sub 136 | # end of get_sub 137 | 138 | def consume_queue(self, 139 | queue, 140 | exchange, 141 | routing_key=None, 142 | heartbeat=60, 143 | expiration=None, 144 | pub_serializer="application/json", 145 | sub_serializer="application/json", 146 | pub_queue_name=None, 147 | seconds_to_consume=1.0, 148 | forever=True, 149 | silent=False, 150 | callback=None): 151 | 152 | self.queue_name = queue 153 | self.exchange_name = exchange 154 | self.routing_key = routing_key 155 | self.pub_queue_name = pub_queue_name 156 | self.pub_serializer = pub_serializer 157 | self.sub_serializer = sub_serializer 158 | sub_silent = silent 159 | 160 | use_callback = self.process_message 161 | if callback: 162 | use_callback = callback 163 | 164 | log.info(("{} START - consume_queue={} " 165 | "rk={} callback={}") 166 | .format(self.name, 167 | self.queue_name, 168 | self.routing_key, 169 | use_callback.__name__)) 170 | 171 | not_done = True 172 | while not_done: 173 | 174 | self.get_sub().consume(callback=use_callback, 175 | queue=self.queue_name, 176 | exchange=None, 177 | routing_key=None, 178 | serializer=self.sub_serializer, 179 | heartbeat=heartbeat, 180 | forever=forever, 181 | time_to_wait=seconds_to_consume, 182 | silent=sub_silent) 183 | 184 | if not forever: 185 | not_done = False 186 | # if not forever 187 | 188 | # end of while loop 189 | 190 | log.info(("{} DONE - consume_queue={} rk={}") 191 | .format(self.name, 192 | self.queue_name, 193 | self.routing_key)) 194 | 195 | # end of consume_queue 196 | 197 | # end of MessageProcessor 198 | -------------------------------------------------------------------------------- /celery_connectors/mixin_send_task_msg.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from kombu.common import maybe_declare 3 | from kombu.pools import producers 4 | from celery_connectors.utils import SUCCESS 5 | from celery_connectors.utils import FAILED 6 | from celery_connectors.utils import ERROR 7 | 8 | 9 | log = logging.getLogger("pub") 10 | 11 | 12 | def mixin_send_task_msg(conn=None, 13 | data={}, 14 | exchange=None, # kombu.Exchange object 15 | routing_key=None, # string 16 | priority="high", 17 | priority_routing={}, 18 | serializer="json", 19 | silent=False, 20 | log_label="relay", 21 | **kwargs): 22 | 23 | """ 24 | This was built for ProducerConsumerMixins 25 | to publish messages using the kombu.Producer 26 | https://github.com/celery/kombu/blob/81e52b1a9a6d5e59aa64a26bd6a6021a6d082e1c/kombu/mixins.py#L250 27 | """ 28 | 29 | verbose = not silent 30 | 31 | res = {"status": ERROR, # non-zero is failure 32 | "error": ""} 33 | 34 | use_routing_key = routing_key 35 | if not use_routing_key: 36 | if priority in priority_routing: 37 | use_routing_key = priority_routing[priority] 38 | # end of finding the routing key 39 | 40 | payload = data 41 | if len(payload) == 0: 42 | res["status"] = ERROR 43 | res["error"] = "Please set a data argument to a dict " + \ 44 | "to publish messages" 45 | return res 46 | 47 | if not conn: 48 | res["status"] = ERROR 49 | res["error"] = "Please set a valid connection (conn) " + \ 50 | "to publish messages" 51 | return res 52 | 53 | if not exchange: 54 | res["status"] = ERROR 55 | res["error"] = "Please set an exchange to publish" 56 | return res 57 | 58 | if not use_routing_key: 59 | res["status"] = ERROR 60 | res["error"] = "Please set pass in a routing_key " + \ 61 | "or a valid priority_routing with an" + \ 62 | "entry to a routing_key string to " + \ 63 | "send a task message" 64 | return res 65 | 66 | if verbose: 67 | log.debug(("{} publish - " 68 | "ex={} rk={} sz={}") 69 | .format(log_label, 70 | exchange, 71 | use_routing_key, 72 | serializer)) 73 | 74 | last_step = "try" 75 | try: 76 | with producers[conn].acquire(block=True) as producer: 77 | 78 | # if you throw here, please pass in a kombu.Exchange 79 | # because the type of Exchange should not be handled in 80 | # the send method 81 | last_step = "Please set an exchange to publish" 82 | last_step = "maybe declare={}".format(exchange.name) 83 | maybe_declare(exchange, 84 | producer.channel) 85 | 86 | if verbose: 87 | if "org_msg" in payload["data"]: 88 | log.info(("{} - ex={} rk={} msg={} r_id={}") 89 | .format(log_label, 90 | exchange.name, 91 | use_routing_key, 92 | payload["data"]["org_msg"]["msg_id"], 93 | payload["msg_id"])) 94 | elif "msg_id" in payload: 95 | log.info(("ex={} rk={} msg={}") 96 | .format(exchange.name, 97 | use_routing_key, 98 | payload["msg_id"])) 99 | else: 100 | log.info(("ex={} rk={} body={}") 101 | .format(exchange.name, 102 | use_routing_key, 103 | str(payload)[0:30])) 104 | # end of verbose 105 | 106 | last_step = "publish rk={}".format(routing_key) 107 | producer.publish(payload, 108 | serializer=serializer, 109 | exchange=exchange, 110 | routing_key=routing_key) 111 | 112 | res["status"] = SUCCESS 113 | res["error"] = "" 114 | 115 | except Exception as e: 116 | res["status"] = FAILED 117 | res["error"] = ("{} producer threw " 118 | "exception={} ex={} rk={} " 119 | "last_step={}").format( 120 | log_label, 121 | e, 122 | exchange, 123 | routing_key, 124 | last_step) 125 | 126 | log.error(res["error"]) 127 | # end of try to send 128 | 129 | return res 130 | # end of mixin_send_task_msg 131 | -------------------------------------------------------------------------------- /celery_connectors/rabbitmq/list-bindings.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | host="localhost" 4 | port=15672 5 | user=rabbitmq 6 | pw=rabbitmq 7 | 8 | echo "" 9 | echo "Listing Bindings broker=${host}:${port}" 10 | echo "" 11 | rabbitmqadmin.py --host=${host} --port=${port} --username=${user} --password=${pw} list bindings source routing_key destination 12 | echo "" 13 | -------------------------------------------------------------------------------- /celery_connectors/rabbitmq/list-channels.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | host="localhost" 4 | port=15672 5 | user=rabbitmq 6 | pw=rabbitmq 7 | 8 | echo "" 9 | echo "Listing Channels broker=${host}:${port}" 10 | echo "" 11 | rabbitmqadmin.py --host=${host} --port=${port} --username=${user} --password=${pw} list channels name connection number confirm consumer_count messages messages_unacknowledged messages_uncommitted messages_unconfirmed acks_uncommitted prefetch_count global_prefetch_count 12 | echo "" 13 | -------------------------------------------------------------------------------- /celery_connectors/rabbitmq/list-connections.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | host="localhost" 4 | port=15672 5 | user=rabbitmq 6 | pw=rabbitmq 7 | 8 | echo "" 9 | echo "Listing Connections broker=${host}:${port}" 10 | echo "" 11 | rabbitmqadmin.py --host=${host} --port=${port} --username=${user} --password=${pw} list connections name state channels timeout 12 | echo "" 13 | -------------------------------------------------------------------------------- /celery_connectors/rabbitmq/list-consumers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | host="localhost" 4 | port=15672 5 | user=rabbitmq 6 | pw=rabbitmq 7 | 8 | echo "" 9 | echo "Listing Consumers broker=${host}:${port}" 10 | echo "" 11 | rabbitmqadmin.py --host=${host} --port=${port} --username=${user} --password=${pw} list consumers 12 | echo "" 13 | -------------------------------------------------------------------------------- /celery_connectors/rabbitmq/list-exchanges.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | host="localhost" 4 | port=15672 5 | user=rabbitmq 6 | pw=rabbitmq 7 | 8 | echo "" 9 | echo "Listing Exchanges broker=${host}:${port}" 10 | echo "" 11 | rabbitmqadmin.py --host=${host} --port=${port} --username=${user} --password=${pw} list exchanges name type durable auto_delete 12 | echo "" 13 | -------------------------------------------------------------------------------- /celery_connectors/rabbitmq/list-queues.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | host="localhost" 4 | port=15672 5 | user=rabbitmq 6 | pw=rabbitmq 7 | 8 | echo "" 9 | echo "Listing Queues broker=${host}:${port}" 10 | echo "" 11 | rabbitmqadmin.py --host=${host} --port=${port} --username=${user} --password=${pw} list queues name durable auto_delete consumers messages messages_ready messages_unacknowledged 12 | echo "" 13 | -------------------------------------------------------------------------------- /celery_connectors/rabbitmq/rmq-close-all-connections.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | host="localhost" 4 | port=15672 5 | user=rabbitmq 6 | pw=rabbitmq 7 | container_name=celrabbit1 8 | 9 | echo "" 10 | echo "Closing all connections for broker=${host}:${port}" 11 | echo "" 12 | if [[ -f /tmp/.all-rabbit-connections.txt ]]; then 13 | rm -f /tmp/.all-rabbit-connections.txt 14 | fi 15 | rabbitmqadmin.py --host=${host} --port=${port} --username=${user} --password=${pw} -f tsv -q list connections name > /tmp/.all-rabbit-connections.txt 16 | while read -r name; do rabbitmqadmin.py --host=${host} --port=${port} --username=${user} --password=${pw} -q close connection name="${name}"; done < /tmp/.all-rabbit-connections.txt 17 | if [[ -f /tmp/.all-rabbit-connections.txt ]]; then 18 | rm -f /tmp/.all-rabbit-connections.txt 19 | fi 20 | echo "" 21 | echo "" 22 | -------------------------------------------------------------------------------- /celery_connectors/rabbitmq/rmq-status.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | host="localhost" 4 | port=15672 5 | user=rabbitmq 6 | pw=rabbitmq 7 | container_name=celrabbit1 8 | 9 | echo "" 10 | echo "Getting Status for broker=${host}:${port}" 11 | echo "" 12 | docker exec -it ${container_name} rabbitmqctl status 13 | echo "" 14 | echo "Generating Report for broker=${host}:${port}" 15 | echo "" 16 | docker exec -it ${container_name} rabbitmqctl report 17 | echo "" 18 | echo "Getting Environment for broker=${host}:${port}" 19 | echo "" 20 | docker exec -it ${container_name} rabbitmqctl environment 21 | echo "" 22 | echo "" 23 | -------------------------------------------------------------------------------- /celery_connectors/rabbitmq/rmq-trace-off.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | host="localhost" 4 | port=15672 5 | user=rabbitmq 6 | pw=rabbitmq 7 | container_name=celrabbit1 8 | 9 | echo "" 10 | echo "Turning off tracing for broker=${host}:${port}" 11 | echo "" 12 | docker exec -it ${container_name} rabbitmqctl trace_off 13 | echo "" 14 | -------------------------------------------------------------------------------- /celery_connectors/rabbitmq/rmq-trace-on.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | host="localhost" 4 | port=15672 5 | user=rabbitmq 6 | pw=rabbitmq 7 | container_name=celrabbit1 8 | 9 | echo "" 10 | echo "Turning on tracing for broker=${host}:${port}" 11 | echo "" 12 | docker exec -it ${container_name} rabbitmqctl trace_on 13 | docker logs -f ${container_name} 14 | echo "" 15 | -------------------------------------------------------------------------------- /celery_connectors/rabbitmq/watch-queues.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | host="localhost" 4 | port=15672 5 | user=rabbitmq 6 | pw=rabbitmq 7 | 8 | echo "" 9 | echo "Listing Queues broker=${host}:${port}" 10 | echo "" 11 | watch -n1 rabbitmqadmin.py --host=${host} --port=${port} --username=${user} --password=${pw} list queues name durable auto_delete consumers messages messages_ready messages_unacknowledged 12 | echo "" 13 | -------------------------------------------------------------------------------- /celery_connectors/redis/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jay-johnson/celery-connectors/f6d48bf3e48513228a86a1ff6014b6e6cf465742/celery_connectors/redis/__init__.py -------------------------------------------------------------------------------- /celery_connectors/redis/base_redis_application.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | 5 | class BaseRedisApplication: 6 | 7 | def __init__(self, name, redis_address, port, redis_queue, logger, request_key_name=None, response_key_name=None, debug=False): 8 | 9 | self.m_name = name 10 | self.m_host_address = redis_address 11 | self.m_port = port 12 | self.m_queue_name = redis_queue 13 | self.m_log = logger 14 | self.m_debug = debug 15 | 16 | self.m_db = int(os.getenv("ENV_REDIS_DB_ID", 0)) 17 | # if set to empty string use password=None 18 | self.m_redis_password = os.getenv("ENV_REDIS_PASSWORD", "") 19 | 20 | if str(self.m_redis_password) == "": 21 | self.m_redis_password = None 22 | 23 | self.m_rw = None 24 | self.m_put_count = 0 25 | self.m_get_count = 0 26 | self.m_failed_get_count = 0 27 | self.m_fetch_timeout = 60 28 | self.m_overflowed = False 29 | self.m_max_count_on_arch = sys.maxsize - 1 30 | self.m_sleep_for_connection_outage = 1 31 | 32 | self.m_request_key = request_key_name 33 | self.m_response_key = response_key_name 34 | # end of __init__ 35 | 36 | def enable_debug(self): 37 | self.m_debug = True 38 | return None 39 | # end of enable_debug 40 | 41 | def disable_debug(self): 42 | self.m_debug = False 43 | return None 44 | # end of enable_debug 45 | 46 | def reset_counts(self): 47 | self.m_get_count = 0 48 | self.m_put_count = 0 49 | self.m_failed_get_count = 0 50 | self.m_overflowed = False 51 | return None 52 | # end of reset_counts 53 | 54 | def update_failed_get_count(self): 55 | if self.m_failed_get_count == self.m_max_count_on_arch: 56 | self.m_overflowed = True 57 | self.m_failed_get_count = 0 58 | else: 59 | self.m_failed_get_count += 1 60 | 61 | return None 62 | # end of update_failed_get_count 63 | 64 | def update_get_count(self): 65 | if self.m_get_count == self.m_max_count_on_arch: 66 | self.m_overflowed = True 67 | self.m_get_count = 0 68 | else: 69 | self.m_get_count += 1 70 | 71 | return None 72 | # end of update_get_count 73 | 74 | def update_put_count(self): 75 | if self.m_put_count == self.m_max_count_on_arch: 76 | self.m_overflowed = True 77 | self.m_put_count = 0 78 | else: 79 | self.m_put_count += 1 80 | 81 | return None 82 | # end of update_put_count 83 | 84 | def lg(self, msg, level=6): 85 | 86 | if self.m_log: 87 | full_msg = self.m_name + ": " + msg 88 | 89 | if self.m_debug: 90 | print(full_msg) 91 | 92 | self.m_log.log(full_msg, level) 93 | 94 | return None 95 | # end of lg 96 | 97 | # Force ALL Derived clients to Disconnect correctly 98 | def disconnect(self): 99 | return None 100 | # end of disconnect 101 | 102 | # Force ALL Derived clients to Connect correctly 103 | def connect(self): 104 | return None 105 | # end of connect 106 | 107 | def get_message(self): 108 | self.lg("get_message timeout={}".format(self.m_fetch_timeout), 7) 109 | 110 | # By default RedisWrapper returns None when the timeout is hit 111 | msg = self.m_rw.get(False, self.m_fetch_timeout) 112 | 113 | self.update_get_count() 114 | 115 | return msg 116 | # end of get_message 117 | 118 | def put_message(self, msg_object): 119 | self.lg("putting msg={}".format(msg_object), 7) 120 | 121 | self.m_rw.put(msg_object) 122 | 123 | self.update_put_count() 124 | return None 125 | # end of put_message 126 | 127 | # end of BaseRedisApplication 128 | -------------------------------------------------------------------------------- /celery_connectors/redis/redis-publish-messages.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import uuid 5 | from spylunking.log.setup_logging import build_colorized_logger 6 | from celery_connectors.redis.redis_json_application import RedisJSONApplication 7 | 8 | port = 6379 9 | host = os.getenv("ENV_REDIS_HOST", "localhost").strip().lstrip() 10 | db = int(os.getenv("ENV_REDIS_DB_ID", 0)) 11 | # if set to empty string use password=None 12 | redis_pw = os.getenv("ENV_REDIS_PASSWORD", "") 13 | queue_name = os.getenv("Q_1", "reporting.accounts").strip().lstrip() 14 | name = "redis-producer" 15 | 16 | log = build_colorized_logger( 17 | name='redis-publish') 18 | 19 | log.info("START - {} - Sending messages to redis={}:{}/{} queue={}".format(name, 20 | host, 21 | port, 22 | db, 23 | queue_name)) 24 | 25 | if str(redis_pw) == "": 26 | redis_pw = None 27 | 28 | app = RedisJSONApplication(name, redis_address=host, redis_port=port, redis_queue=queue_name, logger=log) 29 | app.connect() 30 | 31 | max_msgs = 1000 32 | msgs = [] 33 | for x in range(0, max_msgs): 34 | msgs.append(str(x) + "=" + str(uuid.uuid4()).replace("-", "")) 35 | 36 | for msg in msgs: 37 | payload = {"body": msg, 38 | "properties": {"delivery_tag": 1, 39 | "delivery_info": {"routing_key": queue_name, "exchange": queue_name}}} 40 | app.put_into_key(queue_name, payload) 41 | # end of for all to send 42 | 43 | log.info("END - {} - Sending messages={} to redis={}:{}/{} queue={}".format(max_msgs, 44 | name, 45 | host, 46 | port, 47 | db, 48 | queue_name)) 49 | -------------------------------------------------------------------------------- /celery_connectors/redis/redis-subscribe-and-read-messages.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | from spylunking.log.setup_logging import build_colorized_logger 5 | from celery_connectors.redis.redis_json_application import RedisJSONApplication 6 | 7 | port = 6379 8 | host = os.getenv("ENV_REDIS_HOST", "localhost").strip().lstrip() 9 | db = int(os.getenv("ENV_REDIS_DB_ID", 0)) 10 | # if set to empty string use password=None 11 | redis_pw = os.getenv("ENV_REDIS_PASSWORD", "") 12 | queue_name = os.getenv("Q_1", "reporting.accounts").strip().lstrip() 13 | 14 | name = "redis-subscriber" 15 | log = build_colorized_logger( 16 | name=name) 17 | 18 | log.info("START - {} - Sending messages to redis={}:{}/{} queue={}".format(name, 19 | host, 20 | port, 21 | db, 22 | queue_name)) 23 | 24 | 25 | if str(redis_pw) == "": 26 | redis_pw = None 27 | 28 | app = RedisJSONApplication(name, redis_address=host, redis_port=port, redis_queue=queue_name, logger=log) 29 | app.connect() 30 | 31 | while True: 32 | msg = app.wait_for_message_on_key(num_seconds=1, key=queue_name) 33 | if msg: 34 | log.info("Received msg: " + str(msg)) 35 | # end of infinite while loop - stop with ctrl + c 36 | -------------------------------------------------------------------------------- /celery_connectors/run_consumer_relay.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from kombu import Connection 3 | from celery_connectors.utils import ev 4 | from celery_connectors.relay_worker import RelayWorker 5 | 6 | 7 | # Credits and inspirations from these great sources: 8 | # 9 | # https://github.com/celery/kombu/blob/master/examples/rpc-tut6/rpc_server.py 10 | # https://gist.github.com/oubiwann/3843016 11 | # https://gist.github.com/eavictor/ee7856581619ac60643b57987b7ed580#file-mq_kombu_rpc_server-py 12 | # https://github.com/Skablam/kombu-examples 13 | # https://gist.github.com/mlavin/6671079 14 | 15 | log = logging.getLogger(ev("APP_NAME", "relay")) 16 | 17 | 18 | def run_consumer_relay(broker_url, 19 | ssl_options={}, 20 | transport_options={}, 21 | task_queues=[], 22 | callback=None, 23 | prefetch_count=1, 24 | relay_broker_url=None, 25 | relay_exchange=None, 26 | relay_routing_key=None, 27 | relay_handler=None, 28 | *args, 29 | **kwargs): 30 | 31 | if len(broker_url) == 0: 32 | log.error(("Please pass in a valid broker_url " 33 | "to consume")) 34 | return 35 | 36 | if len(task_queues) == 0: 37 | log.error(("Please pass in a list of task_queues to " 38 | "consume")) 39 | return 40 | 41 | with Connection(broker_url, 42 | ssl=ssl_options, 43 | transport_options=transport_options) as conn: 44 | try: 45 | 46 | log.info(("consuming queues={}") 47 | .format(task_queues)) 48 | 49 | RelayWorker( 50 | "json-to-json-relay", 51 | conn=conn, 52 | task_queues=task_queues, 53 | callback=callback, 54 | prefetch_count=prefetch_count, 55 | relay_broker_url=relay_broker_url, 56 | relay_exchange=relay_exchange, 57 | relay_routing_key=relay_routing_key, 58 | relay_handler=relay_handler, 59 | **kwargs).run() 60 | 61 | except KeyboardInterrupt: 62 | log.info("Received Interrupt - Shutting down") 63 | # end of with kombu.Connection 64 | 65 | # end of run_consumer_relay 66 | -------------------------------------------------------------------------------- /celery_connectors/run_jtoc_relay.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from kombu import Connection 3 | from celery_connectors.utils import ev 4 | from celery_connectors.relay_json_to_celery_worker import RelayJSONtoCeleryWorker 5 | 6 | 7 | # Credits and inspirations from these great sources: 8 | # 9 | # https://github.com/celery/kombu/blob/master/examples/rpc-tut6/rpc_server.py 10 | # https://gist.github.com/oubiwann/3843016 11 | # https://gist.github.com/eavictor/ee7856581619ac60643b57987b7ed580#file-mq_kombu_rpc_server-py 12 | # https://github.com/Skablam/kombu-examples 13 | # https://gist.github.com/mlavin/6671079 14 | 15 | log = logging.getLogger(ev("APP_NAME", "jtoc")) 16 | 17 | 18 | def run_jtoc_relay(broker_url, 19 | ssl_options={}, 20 | transport_options={}, 21 | task_queues=[], 22 | callback=None, 23 | prefetch_count=1, 24 | relay_broker_url=None, 25 | relay_backend_url=None, 26 | relay_exchange=None, 27 | relay_routing_key=None, 28 | relay_handler=None, 29 | celery_app=None, 30 | *args, 31 | **kwargs): 32 | 33 | if len(broker_url) == 0: 34 | log.error(("Please pass in a valid broker_url " 35 | "to consume")) 36 | return 37 | 38 | if len(task_queues) == 0: 39 | log.error(("Please pass in a list of task_queues to " 40 | "consume")) 41 | return 42 | 43 | with Connection(broker_url, 44 | ssl=ssl_options, 45 | transport_options=transport_options) as conn: 46 | try: 47 | 48 | log.info(("consuming queues={}") 49 | .format(task_queues)) 50 | 51 | RelayJSONtoCeleryWorker( 52 | "json-to-celery-relay", 53 | conn=conn, 54 | task_queues=task_queues, 55 | callback=callback, 56 | prefetch_count=prefetch_count, 57 | relay_broker_url=relay_broker_url, 58 | relay_backend_url=relay_backend_url, 59 | relay_exchange=relay_exchange, 60 | relay_routing_key=relay_routing_key, 61 | relay_handler=relay_handler, 62 | celery_app=celery_app, 63 | **kwargs).run() 64 | 65 | except KeyboardInterrupt: 66 | log.info("Received Interrupt - Shutting down") 67 | # end of with kombu.Connection 68 | 69 | # end of run_jtoc_relay 70 | -------------------------------------------------------------------------------- /celery_connectors/run_publisher.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | from kombu import Connection 4 | from celery_connectors.utils import SUCCESS 5 | from celery_connectors.utils import calc_backoff_timer 6 | from celery_connectors.utils import get_percent_done 7 | import celery_connectors.mixin_send_task_msg 8 | 9 | 10 | log = logging.getLogger("pub") 11 | 12 | 13 | def run_publisher(broker_url, 14 | exchange=None, # kombu.Exchange object 15 | routing_key=None, # string 16 | msgs=[], 17 | num_per_batch=-1, 18 | priority="high", 19 | priority_routing={}, 20 | serializer="json", 21 | ssl_options={}, 22 | transport_options={}, 23 | send_method=None, 24 | silent=True, 25 | publish_silent=False, 26 | log_label="pub", 27 | *args, 28 | **kwargs): 29 | 30 | verbose = not silent 31 | 32 | if verbose: 33 | log.debug("connecting") 34 | 35 | with Connection(broker_url, 36 | ssl=ssl_options, 37 | transport_options=transport_options) as conn: 38 | 39 | num_to_send = len(msgs) 40 | 41 | if num_to_send == 0: 42 | log.info(("no msgs={} to publish") 43 | .format(num_to_send)) 44 | return 45 | 46 | use_send_method = send_method 47 | # use the default method for sending if one is not passed in 48 | if not use_send_method: 49 | use_send_method = celery_connectors.mixin_send_task_msg.mixin_send_task_msg 50 | 51 | if verbose: 52 | log.debug(("publishing ex={} rk={} " 53 | "msgs={} send_method={}") 54 | .format(exchange, 55 | routing_key, 56 | num_to_send, 57 | use_send_method.__name__)) 58 | 59 | num_sent = 0 60 | not_done = True 61 | num_fails = 0 62 | 63 | while not_done: 64 | 65 | cur_msg = msgs[num_sent] 66 | 67 | hide_logs = publish_silent 68 | if num_sent > 1 and num_sent % 200 == 0: 69 | hide_logs = False 70 | log.info(("{} send done " 71 | "msg={}/{} ex={} rk={}") 72 | .format(get_percent_done( 73 | num_sent, 74 | num_to_send), 75 | num_sent, 76 | num_to_send, 77 | exchange.name, 78 | routing_key)) 79 | 80 | send_res = use_send_method(conn=conn, 81 | data=cur_msg, 82 | exchange=exchange, 83 | routing_key=routing_key, 84 | priority=priority, 85 | priority_routing=priority_routing, 86 | serializer=serializer, 87 | silent=hide_logs, 88 | log_label=log_label) 89 | 90 | if send_res["status"] == SUCCESS: 91 | num_fails = 0 92 | num_sent += 1 93 | if num_sent >= num_to_send: 94 | not_done = False 95 | else: 96 | num_fails += 1 97 | sleep_duration = calc_backoff_timer(num_fails) 98 | log.info(("publish failed - {} - exch={} rk={} " 99 | "sleep={} seconds retry={}") 100 | .format(send_res["error"], 101 | exchange, 102 | routing_key, 103 | sleep_duration, 104 | num_fails)) 105 | 106 | if num_fails > 100000: 107 | num_fails = 1 108 | 109 | time.sleep(sleep_duration) 110 | # end of if done 111 | 112 | # end of sending all messages 113 | 114 | # end of with kombu.Connection 115 | 116 | # end of run_publisher 117 | -------------------------------------------------------------------------------- /celery_connectors/scripts/start-container.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "" 4 | date 5 | 6 | cd /opt/celery_connectors 7 | source /opt/celery_connectors/venv/bin/activate 8 | 9 | echo "Starting Celery=${APP_NAME} loglevel=${LOG_LEVEL}" 10 | celery worker -A run_rabbitmq_subscriber -c 3 --loglevel=${LOG_LEVEL} -n ${APP_NAME} -Ofair 11 | -------------------------------------------------------------------------------- /celery_connectors/scripts/subscribe-to-rabbitmq.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "" 4 | date 5 | echo "Starting rabbitmq subscriber" 6 | 7 | # this assumes the current directory is the repository's home dir 8 | export LOG_LEVEL=DEBUG 9 | export LOG_CFG=./celery_connectors/log/logging.json 10 | export APP_NAME="rabbitmq_subscriber_$(date +"%Y-%m-%d-%H-%M-%S")" 11 | 12 | celery worker -A run_rabbitmq_subscriber -c 3 --loglevel=${LOG_LEVEL} -n ${APP_NAME} -Ofair 13 | -------------------------------------------------------------------------------- /celery_connectors/scripts/subscribe-to-redis.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "" 4 | date 5 | echo "Starting redis subscriber" 6 | 7 | # this assumes the current directory is the repository's home dir 8 | export LOG_LEVEL=DEBUG 9 | export LOG_CFG=./celery_connectors/log/logging.json 10 | export APP_NAME="redis_subscriber_$(date +"%Y-%m-%d-%H-%M-%S")" 11 | 12 | celery worker -A run_redis_subscriber -c 3 --loglevel=${LOG_LEVEL} -n ${APP_NAME} -Ofair 13 | -------------------------------------------------------------------------------- /celery_connectors/subscriber.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from celery import Celery 3 | from celery import bootsteps 4 | from kombu import Queue, Exchange, Consumer 5 | from celery_connectors.utils import ev 6 | 7 | log = logging.getLogger("celery-subscriber") 8 | 9 | 10 | class Subscriber: 11 | 12 | def __init__(self, 13 | name=ev("SUBSCRIBER_NAME", "celery-subscriber"), 14 | auth_url=ev("BROKER_URL", "redis://localhost:6379/0"), 15 | app=None, 16 | ssl_options={}, 17 | transport_options={}, 18 | worker_log_format="%(asctime)s: %(levelname)s %(message)s", 19 | **kwargs): 20 | 21 | """ 22 | Available Brokers: 23 | http://docs.celeryproject.org/en/latest/getting-started/brokers/index.html 24 | 25 | Redis: 26 | http://docs.celeryproject.org/en/latest/getting-started/brokers/redis.html 27 | 28 | RabbitMQ: 29 | http://docs.celeryproject.org/en/latest/getting-started/brokers/rabbitmq.html 30 | 31 | SQS: 32 | http://docs.celeryproject.org/en/latest/getting-started/brokers/sqs.html 33 | """ 34 | 35 | self.state = "not_ready" 36 | self.name = name 37 | self.auth_url = auth_url 38 | self.ssl_options = ssl_options 39 | self.transport_options = transport_options 40 | 41 | self.subscriber_app = None 42 | 43 | # allow passing in an initialized Celery application 44 | if app: 45 | self.subscriber_app = app 46 | else: 47 | self.subscriber_app = Celery() 48 | 49 | # update the celery configuration from the kwargs dictionary 50 | self.subscriber_app.conf.update(kwargs) 51 | 52 | # make sure to set the broker_url 53 | self.subscriber_app.conf.broker_url = self.auth_url 54 | self.subscriber_app.conf.worker_log_format = worker_log_format 55 | 56 | self.exchange = None 57 | self.consume_from_queues = [] 58 | 59 | # end of __init__ 60 | 61 | def setup_routing(self, ex_name, consume_queue_names, routing_key=None): 62 | 63 | self.exchange = None 64 | if routing_key: 65 | log.debug(("creating Exchange={} topic for rk={}") 66 | .format(ex_name, 67 | routing_key)) 68 | self.exchange = Exchange(ex_name, type="topic") 69 | else: 70 | log.debug(("creating Exchange={} direct") 71 | .format(ex_name, 72 | routing_key)) 73 | self.exchange = Exchange(ex_name, 74 | type="direct") 75 | # end of if/else 76 | 77 | self.consume_from_queues = [] 78 | for queue_name in consume_queue_names: 79 | 80 | new_queue = None 81 | if routing_key: 82 | log.debug(("creating Queue={} topic rk={} from Exchange={}") 83 | .format(queue_name, 84 | routing_key, 85 | ex_name)) 86 | new_queue = Queue(queue_name, 87 | exchange=self.exchange, 88 | routing_key=routing_key) 89 | else: 90 | log.debug(("creating Queue={} direct from Exchange={}") 91 | .format(queue_name, 92 | ex_name)) 93 | new_queue = Queue(queue_name, exchange=self.exchange) 94 | # end of handling queues with direct/topic routing 95 | 96 | self.consume_from_queues.append(new_queue) 97 | # end of building new consume queues 98 | 99 | self.subscriber_app.conf.tasks_queues = self.consume_from_queues 100 | self.subscriber_app.conf.broker_transport_options = self.transport_options 101 | 102 | self.state = "ready" 103 | # end of setup_routing 104 | 105 | def consume(self, 106 | callback, 107 | queue=None, 108 | queues=[], 109 | exchange=None, 110 | routing_key=None, 111 | silent=False, 112 | prefetch_count=1, # only fetch one message off the queue 113 | auto_declare=True): 114 | 115 | """ 116 | Redis does not have an Exchange or Routing Keys, but RabbitMQ does. 117 | 118 | Redis producers uses only the queue name to both publish and consume messages: 119 | http://docs.celeryproject.org/en/latest/getting-started/brokers/redis.html#configuration 120 | """ 121 | 122 | if not callback: 123 | log.info(("Please pass in a valid callback " 124 | "function or class method")) 125 | return 126 | 127 | if not queue and len(queues) == 0: 128 | log.info(("Please pass in a valid queue name" 129 | "or list of queue names")) 130 | return 131 | 132 | if self.state != "ready": 133 | use_queues = [queue] 134 | if len(queues) > 0: 135 | use_queues = queues 136 | 137 | if exchange and routing_key: 138 | self.setup_routing(exchange, use_queues, routing_key) 139 | else: 140 | self.setup_routing(queue, use_queues) 141 | # end of initializing for the first time 142 | 143 | if not silent: 144 | log.info(("Subscribed to Exchange={} with " 145 | "routes to queues={} with callback={} " 146 | "prefetch_count={}") 147 | .format(self.exchange.name, 148 | len(self.consume_from_queues), 149 | callback.__name__, 150 | prefetch_count)) 151 | 152 | consume_from_queues = self.consume_from_queues 153 | 154 | # http://docs.celeryproject.org/en/latest/userguide/extending.html 155 | class ConnectorConsumer(bootsteps.ConsumerStep): 156 | 157 | def get_consumers(self, channel): 158 | 159 | # http://docs.celeryproject.org/projects/kombu/en/latest/userguide/consumers.html 160 | return [Consumer(channel, 161 | queues=consume_from_queues, 162 | auto_declare=auto_declare, 163 | prefetch_count=prefetch_count, 164 | callbacks=[callback], 165 | accept=["json"])] 166 | # end of get_consumer 167 | 168 | # end of ConnectorConsumer 169 | 170 | self.subscriber_app.steps["consumer"].add(ConnectorConsumer) 171 | 172 | # end of consume 173 | 174 | # end of Subscriber 175 | -------------------------------------------------------------------------------- /celery_connectors/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | import random 4 | import datetime 5 | 6 | 7 | SUCCESS = 0 8 | FAILED = 1 9 | ERROR = 2 10 | READY = 0 11 | NOT_READY = 1 12 | 13 | 14 | def ev(k, v): 15 | return os.getenv(k, v).strip().lstrip() 16 | # end of ev 17 | 18 | 19 | def get_percent_done(progress, total): 20 | if int(total) == 0: 21 | return 0 22 | else: 23 | return "%0.2f" % float(float(progress)/float(total)*100.00) 24 | # end of get_percent_done 25 | 26 | 27 | def calc_backoff_timer(num=0, sleep_secs=2.0): 28 | sleep_duration_in_seconds = sleep_secs 29 | if num > 0: 30 | sleep_duration_in_seconds = float((num * 1.5) * sleep_secs) 31 | if sleep_duration_in_seconds > 60.0: 32 | sleep_duration_in_seconds = 60.0 33 | return sleep_duration_in_seconds 34 | # end of calc_backoff_timer 35 | 36 | 37 | def build_msg_id(max_len=10): 38 | return str(uuid.uuid4()).replace("-", "")[0:max_len] 39 | # end of build_msg_id 40 | 41 | 42 | def build_msg(data={}, version=1, max_id_len=10): 43 | now = datetime.datetime.now().isoformat() 44 | msg_id = "{}_{}".format(build_msg_id(max_id_len), version) 45 | 46 | msg = {"msg_id": msg_id, 47 | "created": now, 48 | "data": data} 49 | return msg 50 | # end of build_msg 51 | 52 | 53 | def build_sample_msgs(num=100, 54 | data={}, 55 | version=1): 56 | 57 | msgs = [] 58 | if num < 1: 59 | return msgs 60 | 61 | num_done = 0 62 | while num_done < num: 63 | new_msg = build_msg(data=data, 64 | version=version) 65 | msgs.append(new_msg) 66 | num_done += 1 67 | # end of building them 68 | 69 | return msgs 70 | # end of build_sample_msgs 71 | 72 | 73 | def get_exchange_from_msg(msg): 74 | try: 75 | return msg.delivery_info["exchange"] 76 | except Exception: 77 | return "" 78 | # end of get_exchange_from_msg 79 | 80 | 81 | def get_routing_key_from_msg(msg): 82 | try: 83 | return msg.delivery_info["routing_key"] 84 | except Exception: 85 | return "" 86 | # end of get_routing_key_from_msg 87 | 88 | 89 | def get_source_info_from_msg(msg): 90 | src_exchange = get_exchange_from_msg(msg) 91 | src_routing_key = get_routing_key_from_msg(msg) 92 | source_info = {"src_exchange": src_exchange, 93 | "src_routing_key": src_routing_key} 94 | return source_info 95 | # end of get_source_info_from_msg 96 | 97 | 98 | def get_random_float(use_min=1.0, use_max=10.0): 99 | return random.uniform(use_min, use_max) 100 | # end of get_random_float 101 | -------------------------------------------------------------------------------- /clean-persistence-data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "" 4 | echo "Using root to delete persistence directories: ./docker/data/rabbitmq/ ./docker/data/redis and logs: ./docker/logs/rabbitmq ./docker/logs/redis and files: ./docker/data/rabbitmq/.erlang.cookie" 5 | echo "" 6 | 7 | loc="./docker/data/rabbitmq ./docker/logs/rabbitmq ./docker/data/redis ./docker/logs/redis ./docker/data/rabbitmq/.erlang.cookie" 8 | for d in ${loc}; do 9 | if [[ -e ${d} ]]; then 10 | echo " - deleting=${d}" 11 | last_status=1 12 | if [[ -f ${d} ]]; then 13 | rm -rf ${d} 14 | last_status=$? 15 | elif [[ -d ${d} ]]; then 16 | rm -rf ${d}/* 17 | last_status=$? 18 | fi 19 | if [[ "${last_status}" != "0" ]]; then 20 | echo "" 21 | echo "" 22 | echo "Failed to delete: ${d}" 23 | echo "Please make sure to run this tool with sudo or as root." 24 | echo "This is because docker-compose creates files and directories as the host's root user with these vanilla containers." 25 | echo "" 26 | echo "" 27 | exit 1 28 | fi 29 | fi 30 | done 31 | 32 | exit 0 33 | -------------------------------------------------------------------------------- /compose/compose-jupyter.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | 3 | services: 4 | 5 | # PostgreSQL database 6 | postgres: 7 | image: postgres:10-alpine 8 | container_name: "postgres" 9 | hostname: postgres 10 | restart: always 11 | env_file: 12 | - ./env/postgres-dev.env 13 | ports: 14 | - "5432:5432" 15 | volumes: 16 | - ./data/postgres:/var/lib/postgresql/data 17 | - ./data/common:/data/common 18 | logging: 19 | # limit logs retained on host to 25MB 20 | driver: "json-file" 21 | options: 22 | max-size: "500k" 23 | max-file: "50" 24 | 25 | jupyter: 26 | depends_on: 27 | - postgres 28 | container_name: jupyterhub 29 | image: jupyterhub/jupyterhub:latest 30 | hostname: jupyterhub 31 | restart: always 32 | volumes: 33 | # Bind Docker socket on the host so we can connect to the daemon from 34 | # within the container 35 | - "/var/run/docker.sock:/var/run/docker.sock:rw" 36 | # Bind Docker volume on host for JupyterHub database and cookie secrets 37 | - ./data/jupyter:/data 38 | - ./data/common:/data/common 39 | - ./data/notebooks:/home/jovyan/work 40 | - ./ssl:/ssl 41 | - ./files:/files 42 | ports: 43 | - "443:443" 44 | - "8080:8080" 45 | environment: 46 | USERLIST_FILE: /files/jupyter_userlist 47 | DOCKER_NOTEBOOK_DIR: /home/jovyan/work 48 | # All containers will join this network 49 | DOCKER_NETWORK_NAME: jupyterhub-network 50 | # JupyterHub will spawn this Notebook image for users 51 | DOCKER_NOTEBOOK_IMAGE: jupyter/scipy-notebook 52 | # Notebook directory inside user image 53 | # DOCKER_NOTEBOOK_DIR: ${DOCKER_NOTEBOOK_DIR} 54 | # Using this run command (optional) 55 | # DOCKER_SPAWN_CMD: ${DOCKER_SPAWN_CMD} 56 | # Postgres db info 57 | SSL_KEY: /ssl/jupyter_server_key.pem 58 | SSL_CERT: /ssl/jupyter_server_cert.pem 59 | POSTGRES_DB: jupyter 60 | POSTGRES_HOST: postgres 61 | env_file: 62 | - ./env/postgres-dev.env 63 | - ./env/oauth.env 64 | entrypoint: "/files/start-jupyter.sh" 65 | 66 | volumes: 67 | data: 68 | external: 69 | name: jupyterhub-data 70 | 71 | networks: 72 | default: 73 | external: 74 | name: jupyterhub-network 75 | -------------------------------------------------------------------------------- /compose/compose-kombu-message-processor-rabbitmq.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | kombumsgprocrmq: 7 | hostname: kombumsgprocrmq 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=rmq-msg-proc 12 | - SUBSCRIBER_NAME=rmq-proc 13 | - SUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 14 | - CONSUME_EXCHANGE= 15 | - CONSUME_ROUTING_KEY= 16 | - CONSUME_QUEUE=user.events.conversions 17 | - PUBLISH_NAME=rmq-proc 18 | - PUB_BROKER_URL=redis://localhost:6379/0 19 | - PUBLISH_EXCHANGE=reporting.accounts 20 | - PUBLISH_ROUTING_KEY=reporting.accounts 21 | - PUBLISH_QUEUE= 22 | labels: 23 | NAME: "kombumsgprocrmq" 24 | container_name: "kombumsgprocrmq" 25 | network_mode: "host" 26 | # volumes: 27 | # want to shared files outside the container? 28 | # - /tmp:/opt/shared 29 | logging: 30 | # limit logs retained on host to 25MB 31 | driver: "json-file" 32 | options: 33 | max-size: "500k" 34 | max-file: "50" 35 | # debug containers by sleeping on entrypoint 36 | # entrypoint: "sleep 600" 37 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/start-kombu-message-processor-rabbitmq.py" 38 | -------------------------------------------------------------------------------- /compose/compose-kombu-message-processor-redis.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | kombumsgprocredis: 7 | hostname: kombumsgprocredis 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=redis-msg-proc 12 | - SUBSCRIBER_NAME=redis-proc 13 | - SUB_BROKER_URL=redis://localhost:6379/0 14 | - CONSUME_EXCHANGE= 15 | - CONSUME_ROUTING_KEY= 16 | - CONSUME_QUEUE=user.events.conversions 17 | - PUBLISH_NAME=redis-proc 18 | - PUB_BROKER_URL=redis://localhost:6379/0 19 | - PUBLISH_EXCHANGE=reporting.accounts 20 | - PUBLISH_ROUTING_KEY=reporting.accounts 21 | - PUBLISH_QUEUE= 22 | labels: 23 | NAME: "kombumsgprocredis" 24 | container_name: "kombumsgprocredis" 25 | network_mode: "host" 26 | # volumes: 27 | # want to shared files outside the container? 28 | # - /tmp:/opt/shared 29 | logging: 30 | # limit logs retained on host to 25MB 31 | driver: "json-file" 32 | options: 33 | max-size: "500k" 34 | max-file: "50" 35 | # debug containers by sleeping on entrypoint 36 | # entrypoint: "sleep 600" 37 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/start-kombu-message-processor-redis.py" 38 | -------------------------------------------------------------------------------- /compose/compose-kombu-mixin-subscriber.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | kombumixinsubrmq: 7 | hostname: kombumixinsubrmq 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=mixin-rmq-sub 12 | - SUBSCRIBER_NAME=rmq-sub 13 | - SUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 14 | - CONSUME_EXCHANGE=reporting.payments 15 | - CONSUME_ROUTING_KEY=reporting.payments 16 | - CONSUME_QUEUE=reporting.payments 17 | - PUBLISH_NAME= 18 | - PUB_BROKER_URL= 19 | - PUBLISH_EXCHANGE= 20 | - PUBLISH_ROUTING_KEY= 21 | - PUBLISH_QUEUE= 22 | labels: 23 | NAME: "kombumixinsubrmq" 24 | container_name: "kombumixinsubrmq" 25 | network_mode: "host" 26 | # volumes: 27 | # want to shared files outside the container? 28 | # - /tmp:/opt/shared 29 | logging: 30 | # limit logs retained on host to 25MB 31 | driver: "json-file" 32 | options: 33 | max-size: "500k" 34 | max-file: "50" 35 | # debug containers by sleeping on entrypoint 36 | # entrypoint: "sleep 600" 37 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/kombu_mixin_subscriber.py" 38 | -------------------------------------------------------------------------------- /compose/compose-kombu-rabbitmq-subscriber.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | kombusubrmq: 7 | hostname: kombusubrmq 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=kombu-rmq-sub 12 | - SUBSCRIBER_NAME=rmq-sub 13 | - SUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 14 | - CONSUME_EXCHANGE=reporting 15 | - CONSUME_ROUTING_KEY=reporting.accounts 16 | - CONSUME_QUEUE=reporting.accounts 17 | - PUBLISH_NAME= 18 | - PUB_BROKER_URL= 19 | - PUBLISH_EXCHANGE= 20 | - PUBLISH_ROUTING_KEY= 21 | - PUBLISH_QUEUE= 22 | - TEST_STOP_DONE=1 23 | labels: 24 | NAME: "kombusubrmq" 25 | container_name: "kombusubrmq" 26 | network_mode: "host" 27 | # volumes: 28 | # want to shared files outside the container? 29 | # - /tmp:/opt/shared 30 | logging: 31 | # limit logs retained on host to 25MB 32 | driver: "json-file" 33 | options: 34 | max-size: "500k" 35 | max-file: "50" 36 | # debug containers by sleeping on entrypoint 37 | # entrypoint: "sleep 600" 38 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/kombu_rabbitmq_subscriber.py" 39 | -------------------------------------------------------------------------------- /compose/compose-kombu-redis-subscriber.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | kombusubredis: 7 | hostname: kombusubredis 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=kombu-redis-sub 12 | - SUBSCRIBER_NAME=redis-sub 13 | - SUB_BROKER_URL=redis://localhost:6379/0 14 | - CONSUME_EXCHANGE= 15 | - CONSUME_ROUTING_KEY= 16 | - CONSUME_QUEUE=user.events.conversions 17 | - PUBLISH_NAME= 18 | - PUB_BROKER_URL= 19 | - PUBLISH_EXCHANGE= 20 | - PUBLISH_ROUTING_KEY= 21 | - PUBLISH_QUEUE= 22 | labels: 23 | NAME: "kombusubredis" 24 | container_name: "kombusubredis" 25 | network_mode: "host" 26 | # volumes: 27 | # want to shared files outside the container? 28 | # - /tmp:/opt/shared 29 | logging: 30 | # limit logs retained on host to 25MB 31 | driver: "json-file" 32 | options: 33 | max-size: "500k" 34 | max-file: "50" 35 | # debug containers by sleeping on entrypoint 36 | # entrypoint: "sleep 600" 37 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/kombu_redis_subscriber.py" 38 | -------------------------------------------------------------------------------- /compose/compose-kombu-sqs-publisher.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | kombupubsqs: 7 | hostname: kombupubsqs 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - SQS_AWS_ACCESS_KEY= 12 | - SQS_AWS_SECRET_KEY= 13 | - APP_NAME=kombu-sqs-pub 14 | - SUBSCRIBER_NAME= 15 | - CONSUME_EXCHANGE= 16 | - CONSUME_ROUTING_KEY= 17 | - CONSUME_QUEUE= 18 | - PUBLISH_NAME=kombu-sqs-pub 19 | - PUB_BROKER_URL= 20 | - PUBLISH_EXCHANGE=test1 21 | - PUBLISH_ROUTING_KEY=test1 22 | - PUBLISH_QUEUE=test1 23 | labels: 24 | NAME: "kombupubsqs" 25 | container_name: "kombupubsqs" 26 | network_mode: "host" 27 | # volumes: 28 | # want to shared files outside the container? 29 | # - /tmp:/opt/shared 30 | logging: 31 | # limit logs retained on host to 25MB 32 | driver: "json-file" 33 | options: 34 | max-size: "500k" 35 | max-file: "50" 36 | # debug containers by sleeping on entrypoint 37 | # entrypoint: "sleep 600" 38 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/kombu_sqs_publisher.py" 39 | -------------------------------------------------------------------------------- /compose/compose-kombu-sqs-subscriber.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | kombusqspub: 7 | hostname: kombusqspub 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - SQS_AWS_ACCESS_KEY= 12 | - SQS_AWS_SECRET_KEY= 13 | - APP_NAME=kombu-sqs-sub 14 | - SUBSCRIBER_NAME=sqs-sub 15 | - CONSUME_EXCHANGE=test1 16 | - CONSUME_ROUTING_KEY=test1 17 | - CONSUME_QUEUE=test1 18 | - PUBLISH_NAME= 19 | - PUB_BROKER_URL= 20 | - PUBLISH_EXCHANGE= 21 | - PUBLISH_ROUTING_KEY= 22 | - PUBLISH_QUEUE= 23 | labels: 24 | NAME: "kombusqspub" 25 | container_name: "kombusqspub" 26 | network_mode: "host" 27 | # volumes: 28 | # want to shared files outside the container? 29 | # - /tmp:/opt/shared 30 | logging: 31 | # limit logs retained on host to 25MB 32 | driver: "json-file" 33 | options: 34 | max-size: "500k" 35 | max-file: "50" 36 | # debug containers by sleeping on entrypoint 37 | # entrypoint: "sleep 600" 38 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/kombu_sqs_subscriber.py" 39 | -------------------------------------------------------------------------------- /compose/compose-publish-user-conversion-events-rabbitmq.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | ucepubrmq: 7 | hostname: ucepubrmq 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=rmq-pub-uce 12 | - SUBSCRIBER_NAME= 13 | - SUB_BROKER_URL= 14 | - CONSUME_EXCHANGE= 15 | - CONSUME_ROUTING_KEY= 16 | - CONSUME_QUEUE= 17 | - PUBLISH_NAME=publisher 18 | - PUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 19 | - PUBLISH_EXCHANGE=user.events 20 | - PUBLISH_ROUTING_KEY=user.events.conversions 21 | - PUBLISH_QUEUE=user.events.conversions 22 | labels: 23 | NAME: "ucepubrmq" 24 | container_name: "ucepubrmq" 25 | network_mode: "host" 26 | # volumes: 27 | # want to shared files outside the container? 28 | # - /tmp:/opt/shared 29 | logging: 30 | # limit logs retained on host to 25MB 31 | driver: "json-file" 32 | options: 33 | max-size: "500k" 34 | max-file: "50" 35 | # debug containers by sleeping on entrypoint 36 | # entrypoint: "sleep 600" 37 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/publish-user-conversion-events-rabbitmq.py" 38 | -------------------------------------------------------------------------------- /compose/compose-publish-user-conversion-events-redis.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | ucepubredis: 7 | hostname: ucepubredis 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=redis-pub-uce 12 | - SUBSCRIBER_NAME= 13 | - SUB_BROKER_URL= 14 | - CONSUME_EXCHANGE= 15 | - CONSUME_ROUTING_KEY= 16 | - CONSUME_QUEUE= 17 | - PUBLISH_NAME=publisher 18 | - PUB_BROKER_URL=redis://localhost:6379/0 19 | - PUBLISH_EXCHANGE=user.events 20 | - PUBLISH_ROUTING_KEY=user.events.conversions 21 | - PUBLISH_QUEUE=user.events.conversions 22 | labels: 23 | NAME: "ucepubredis" 24 | container_name: "ucepubredis" 25 | network_mode: "host" 26 | # volumes: 27 | # want to shared files outside the container? 28 | # - /tmp:/opt/shared 29 | logging: 30 | # limit logs retained on host to 25MB 31 | driver: "json-file" 32 | options: 33 | max-size: "500k" 34 | max-file: "50" 35 | # debug containers by sleeping on entrypoint 36 | # entrypoint: "sleep 600" 37 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/publish-user-conversion-events-redis.py" 38 | -------------------------------------------------------------------------------- /compose/compose-run-celery-rabbitmq-subscriber.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | celeryrabbitmqsubscriber: 7 | hostname: celeryrabbitmqsubscriber 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=celeryrmqsub 12 | - SUBSCRIBER_NAME=rmq-sub 13 | - SUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 14 | - CONSUME_QUEUE=reporting.accounts 15 | - CONSUME_QUEUE2=reporting.subscriptions 16 | - PREFETCH_COUNT=1 17 | labels: 18 | NAME: "celeryrabbitmqsubscriber" 19 | container_name: "celeryrabbitmqsubscriber" 20 | network_mode: "host" 21 | # volumes: 22 | # want to shared files outside the container? 23 | # - /tmp:/opt/shared 24 | logging: 25 | # limit logs retained on host to 25MB 26 | driver: "json-file" 27 | options: 28 | max-size: "500k" 29 | max-file: "50" 30 | # debug containers by sleeping on entrypoint 31 | # entrypoint: "sleep 600" 32 | entrypoint: "/opt/celery_connectors/venv/bin/celery worker -A run_rabbitmq_subscriber -n rabbitmq_bootstep -c 3 --loglevel=INFO -Ofair" 33 | -------------------------------------------------------------------------------- /compose/compose-run-celery-redis-subscriber.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | celeryredissubscriber: 7 | hostname: celeryredissubscriber 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=celeryrmqsub 12 | - SUBSCRIBER_NAME=rmq-sub 13 | - SUB_BROKER_URL=redis://localhost:6379/0 14 | - CONSUME_QUEUE=reporting.accounts 15 | - CONSUME_QUEUE2=reporting.subscriptions 16 | - PREFETCH_COUNT=1 17 | labels: 18 | NAME: "celeryredissubscriber" 19 | container_name: "celeryredissubscriber" 20 | network_mode: "host" 21 | # volumes: 22 | # want to shared files outside the container? 23 | # - /tmp:/opt/shared 24 | logging: 25 | # limit logs retained on host to 25MB 26 | driver: "json-file" 27 | options: 28 | max-size: "500k" 29 | max-file: "50" 30 | # debug containers by sleeping on entrypoint 31 | # entrypoint: "sleep 600" 32 | entrypoint: "/opt/celery_connectors/venv/bin/celery worker -A run_redis_subscriber -n redis_bootstep -c 3 --loglevel=INFO -Ofair" 33 | -------------------------------------------------------------------------------- /compose/compose-run-rabbitmq-publisher.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | kombupubrmq: 7 | hostname: kombupubrmq 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=celery-rmq-pub 12 | - SUBSCRIBER_NAME=rmq-pub 13 | - SUB_BROKER_URL= 14 | - CONSUME_EXCHANGE= 15 | - CONSUME_ROUTING_KEY= 16 | - CONSUME_QUEUE= 17 | - PUBLISH_NAME=publisher 18 | - PUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 19 | - PUBLISH_EXCHANGE=reporting 20 | - PUBLISH_ROUTING_KEY=reporting.accounts 21 | - PUBLISH_QUEUE=reporting.accounts 22 | labels: 23 | NAME: "kombupubrmq" 24 | container_name: "kombupubrmq" 25 | network_mode: "host" 26 | # volumes: 27 | # want to shared files outside the container? 28 | # - /tmp:/opt/shared 29 | logging: 30 | # limit logs retained on host to 25MB 31 | driver: "json-file" 32 | options: 33 | max-size: "500k" 34 | max-file: "50" 35 | # debug containers by sleeping on entrypoint 36 | # entrypoint: "sleep 600" 37 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/run_rabbitmq_publisher.py" 38 | -------------------------------------------------------------------------------- /compose/compose-run-rabbitmq-subscriber.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | celerysubrmq: 7 | hostname: celerysubrmq 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=celery-rmq-sub 12 | - SUBSCRIBER_NAME=rmq-sub 13 | - SUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 14 | - CONSUME_EXCHANGE= 15 | - CONSUME_ROUTING_KEY= 16 | - CONSUME_QUEUE=reporting.accounts 17 | - CONSUME_QUEUE2=reporting.subscriptions 18 | - PUBLISH_NAME= 19 | - PUB_BROKER_URL= 20 | - PUBLISH_EXCHANGE= 21 | - PUBLISH_ROUTING_KEY= 22 | - PUBLISH_QUEUE= 23 | labels: 24 | NAME: "celerysubrmq" 25 | container_name: "celerysubrmq" 26 | network_mode: "host" 27 | # volumes: 28 | # want to shared files outside the container? 29 | # - /tmp:/opt/shared 30 | logging: 31 | # limit logs retained on host to 25MB 32 | driver: "json-file" 33 | options: 34 | max-size: "500k" 35 | max-file: "50" 36 | # debug containers by sleeping on entrypoint 37 | # entrypoint: "sleep 600" 38 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/run_rabbitmq_subscriber.py" 39 | -------------------------------------------------------------------------------- /compose/compose-run-redis-publisher.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | kombupubredis: 7 | hostname: kombupubredis 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=redis-pub 12 | - SUBSCRIBER_NAME= 13 | - SUB_BROKER_URL= 14 | - CONSUME_EXCHANGE= 15 | - CONSUME_ROUTING_KEY= 16 | - CONSUME_QUEUE= 17 | - PUBLISH_NAME=publisher 18 | - PUB_BROKER_URL=redis://localhost:6379/0 19 | - PUBLISH_EXCHANGE=reporting.accounts 20 | - PUBLISH_ROUTING_KEY=reporting.accounts 21 | - PUBLISH_QUEUE=reporting.accounts 22 | labels: 23 | NAME: "kombupubredis" 24 | container_name: "kombupubredis" 25 | network_mode: "host" 26 | # volumes: 27 | # want to shared files outside the container? 28 | # - /tmp:/opt/shared 29 | logging: 30 | # limit logs retained on host to 25MB 31 | driver: "json-file" 32 | options: 33 | max-size: "500k" 34 | max-file: "50" 35 | # debug containers by sleeping on entrypoint 36 | # entrypoint: "sleep 600" 37 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/run_redis_publisher.py" 38 | -------------------------------------------------------------------------------- /compose/compose-run-redis-subscriber.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | celerysubredis: 7 | hostname: celerysubredis 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=celery-redis-sub 12 | - SUBSCRIBER_NAME=redis-sub 13 | - SUB_BROKER_URL=redis://localhost:6379/0 14 | - CONSUME_EXCHANGE= 15 | - CONSUME_ROUTING_KEY= 16 | - CONSUME_QUEUE=reporting.accounts 17 | - PUBLISH_NAME= 18 | - PUB_BROKER_URL= 19 | - PUBLISH_EXCHANGE= 20 | - PUBLISH_ROUTING_KEY= 21 | - PUBLISH_QUEUE= 22 | labels: 23 | NAME: "celerysubredis" 24 | container_name: "celerysubredis" 25 | network_mode: "host" 26 | # volumes: 27 | # want to shared files outside the container? 28 | # - /tmp:/opt/shared 29 | logging: 30 | # limit logs retained on host to 25MB 31 | driver: "json-file" 32 | options: 33 | max-size: "500k" 34 | max-file: "50" 35 | # debug containers by sleeping on entrypoint 36 | # entrypoint: "sleep 600" 37 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/run_redis_subscriber.py" 38 | -------------------------------------------------------------------------------- /compose/compose-start-ecomm-relay.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | ecommrelay: 7 | hostname: ecommrelay 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=ecomm-relay 12 | - SUBSCRIBER_NAME=ecomm-relay-sub 13 | - SUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 14 | - CONSUME_EXCHANGE= 15 | - CONSUME_ROUTING_KEY= 16 | - CONSUME_QUEUE=user.events.conversions 17 | - PUBLISH_NAME=publisher 18 | - PUB_BROKER_URL=redis://localhost:6379/0 19 | - PUBLISH_EXCHANGE=reporting.accounts 20 | - PUBLISH_ROUTING_KEY=reporting.accounts 21 | - RELAY_NAME=ecomm-relay 22 | - RELAY_WORKER_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 23 | - RELAY_BACKEND_URL=redis://localhost:6379/12 24 | - RELAY_CONFIG_MODULE=ecomm_app.ecommerce.celeryconfig_pub_sub 25 | - RELAY_TASK_NAME=ecomm_app.ecommerce.tasks.handle_user_conversion_events 26 | - PUBLISH_QUEUE= 27 | labels: 28 | NAME: "ecommrelay" 29 | container_name: "ecommrelay" 30 | network_mode: "host" 31 | # volumes: 32 | # want to shared files outside the container? 33 | # - /tmp:/opt/shared 34 | logging: 35 | # limit logs retained on host to 25MB 36 | driver: "json-file" 37 | options: 38 | max-size: "500k" 39 | max-file: "50" 40 | # debug containers by sleeping on entrypoint 41 | # entrypoint: "sleep 600" 42 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/start-ecomm-relay.py" 43 | -------------------------------------------------------------------------------- /compose/compose-start-ecomm-worker.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | ecommworker: 7 | hostname: ecommworker 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=celery-rabbitmq 12 | - SUBSCRIBER_NAME=rmq-sub 13 | - SUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 14 | - PUB_BROKER_URL=redis://localhost:6379/0 15 | - PATH_TO_WORKER_MODULE=ecomm_app.job_worker 16 | - PATH_TO_CELERY=/opt/celery_connectors/venv/bin/celery 17 | - APP_NAME=ecommerce_subscriber 18 | - NUM_WORKERS=3 19 | - LOG_LEVEL=DEBUG 20 | labels: 21 | NAME: "ecommworker" 22 | container_name: "ecommworker" 23 | network_mode: "host" 24 | # volumes: 25 | # want to shared files outside the container? 26 | # - /tmp:/opt/shared 27 | logging: 28 | # limit logs retained on host to 25MB 29 | driver: "json-file" 30 | options: 31 | max-size: "500k" 32 | max-file: "50" 33 | # debug containers by sleeping on entrypoint 34 | # entrypoint: "sleep 600" 35 | entrypoint: "/opt/celery_connectors/start-ecomm-worker.sh" 36 | -------------------------------------------------------------------------------- /compose/compose-start-load-test-rabbitmq.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | loadtestrmq: 7 | hostname: loadtestrmq 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=load-test-rabbitmq 12 | - SUBSCRIBER_NAME= 13 | - SUB_BROKER_URL= 14 | - CONSUME_EXCHANGE= 15 | - CONSUME_ROUTING_KEY= 16 | - CONSUME_QUEUE= 17 | - PUBLISH_NAME=publisher 18 | - PUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 19 | - PUBLISH_EXCHANGE= 20 | - PUBLISH_EXCHANGE_TYPE= 21 | - PUBLISH_ROUTING_KEY=reporting.accounts 22 | - PUBLISH_QUEUE=reporting.accounts 23 | - NUM_MSG_TO_PUBLISH=200000 24 | - PREFETCH_COUNT=1 25 | labels: 26 | NAME: "loadtestrmq" 27 | container_name: "loadtestrmq" 28 | network_mode: "host" 29 | # volumes: 30 | # want to shared files outside the container? 31 | # - /tmp:/opt/shared 32 | logging: 33 | # limit logs retained on host to 25MB 34 | driver: "json-file" 35 | options: 36 | max-size: "500k" 37 | max-file: "50" 38 | # debug containers by sleeping on entrypoint 39 | # entrypoint: "sleep 600" 40 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/start-load-test-rabbitmq.py" 41 | -------------------------------------------------------------------------------- /compose/compose-start-load-test-redis.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | loadtestredis: 7 | hostname: loadtestredis 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=load-test-redis 12 | - SUBSCRIBER_NAME= 13 | - SUB_BROKER_URL= 14 | - CONSUME_EXCHANGE= 15 | - CONSUME_ROUTING_KEY= 16 | - CONSUME_QUEUE= 17 | - PUBLISH_NAME=publisher 18 | - PUB_BROKER_URL=redis://localhost:6379/0 19 | - PUBLISH_EXCHANGE= 20 | - PUBLISH_ROUTING_KEY=reporting.accounts 21 | - PUBLISH_QUEUE=reporting.accounts 22 | - NUM_MSG_TO_PUBLISH=200000 23 | - PREFETCH_COUNT=1 24 | labels: 25 | NAME: "loadtestredis" 26 | container_name: "loadtestredis" 27 | network_mode: "host" 28 | # volumes: 29 | # want to shared files outside the container? 30 | # - /tmp:/opt/shared 31 | logging: 32 | # limit logs retained on host to 25MB 33 | driver: "json-file" 34 | options: 35 | max-size: "500k" 36 | max-file: "50" 37 | # debug containers by sleeping on entrypoint 38 | # entrypoint: "sleep 600" 39 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/start-load-test-redis.py" 40 | -------------------------------------------------------------------------------- /compose/compose-start-mixin-celery-relay.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | jtocrelay: 7 | hostname: jtocrelay 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=jtoc_relay 12 | - SUBSCRIBER_NAME=jtoc-sub 13 | - SUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 14 | - CONSUME_EXCHANGE=ecomm.api 15 | - CONSUME_EXCHANGE_TYPE=topic 16 | - CONSUME_ROUTING_KEY=ecomm.api.west 17 | - CONSUME_QUEUE=ecomm.api.west 18 | - PUBLISH_NAME=jtoc-pub 19 | - RELAY_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 20 | - RELAY_BACKEND_URL=redis://localhost:6379/10 21 | - RELAY_EXCHANGE=reporting.accounts 22 | - RELAY_EXCHANGE_TYPE=direct 23 | - RELAY_ROUTING_KEY=reporting.payments 24 | - RELAY_QUEUE= 25 | - PREFETCH_COUNT=1 26 | labels: 27 | NAME: "jtocrelay" 28 | container_name: "jtocrelay" 29 | network_mode: "host" 30 | # volumes: 31 | # want to shared files outside the container? 32 | # - /tmp:/opt/shared 33 | logging: 34 | # limit logs retained on host to 25MB 35 | driver: "json-file" 36 | options: 37 | max-size: "500k" 38 | max-file: "50" 39 | # debug containers by sleeping on entrypoint 40 | # entrypoint: "sleep 600" 41 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/start-mixin-celery-relay.py" 42 | -------------------------------------------------------------------------------- /compose/compose-start-mixin-json-relay.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | jtojrelay: 7 | hostname: jtojrelay 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=jtoj_relay 12 | - SUBSCRIBER_NAME=jtoj-sub 13 | - SUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 14 | - CONSUME_EXCHANGE=ecomm.api 15 | - CONSUME_EXCHANGE_TYPE=topic 16 | - CONSUME_ROUTING_KEY=ecomm.api.west 17 | - CONSUME_QUEUE=ecomm.api.west 18 | - PUBLISH_NAME=jtoj-pub 19 | - RELAY_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 20 | - RELAY_BACKEND_URL=redis://localhost:6379/0 21 | - RELAY_EXCHANGE= 22 | - RELAY_EXCHANGE_TYPE=direct 23 | - RELAY_ROUTING_KEY=reporting.payments 24 | - RELAY_QUEUE= 25 | - PREFETCH_COUNT=1 26 | labels: 27 | NAME: "jtojrelay" 28 | container_name: "jtojrelay" 29 | network_mode: "host" 30 | # volumes: 31 | # want to shared files outside the container? 32 | # - /tmp:/opt/shared 33 | logging: 34 | # limit logs retained on host to 25MB 35 | driver: "json-file" 36 | options: 37 | max-size: "500k" 38 | max-file: "50" 39 | # debug containers by sleeping on entrypoint 40 | # entrypoint: "sleep 600" 41 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/start-mixin-json-relay.py" 42 | -------------------------------------------------------------------------------- /compose/compose-start-mixin-load-test.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | mixinloadtest: 7 | hostname: mixinloadtest 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=mixin-load-test-ecomm-rabbit 12 | - SUBSCRIBER_NAME=mixin-load-test-sub 13 | - SUB_BROKER_URL= 14 | - CONSUME_EXCHANGE= 15 | - CONSUME_ROUTING_KEY= 16 | - CONSUME_QUEUE= 17 | - PUBLISH_NAME=mixin-load-test-publisher 18 | - PUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 19 | - PUBLISH_EXCHANGE=ecomm.api 20 | - PUBLISH_EXCHANGE_TYPE=topic 21 | - PUBLISH_ROUTING_KEY=ecomm.api.west 22 | - PUBLISH_QUEUE=ecomm.api.west 23 | - NUM_MSG_TO_PUBLISH=20000 24 | labels: 25 | NAME: "mixinloadtest" 26 | container_name: "mixinloadtest" 27 | network_mode: "host" 28 | # volumes: 29 | # want to shared files outside the container? 30 | # - /tmp:/opt/shared 31 | logging: 32 | # limit logs retained on host to 25MB 33 | driver: "json-file" 34 | options: 35 | max-size: "500k" 36 | max-file: "50" 37 | # debug containers by sleeping on entrypoint 38 | # entrypoint: "sleep 600" 39 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/start-mixin-load-test.py" 40 | -------------------------------------------------------------------------------- /compose/compose-start-mixin-publisher.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | mixinpubrmq: 7 | hostname: mixinpubrmq 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=robopub 12 | - SUBSCRIBER_NAME= 13 | - SUB_BROKER_URL= 14 | - CONSUME_EXCHANGE= 15 | - CONSUME_ROUTING_KEY= 16 | - CONSUME_QUEUE= 17 | - PUBLISH_NAME=mixin-pub 18 | - PUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 19 | - PUBLISH_EXCHANGE=ecomm.api 20 | - PUBLISH_EXCHANGE_TYPE=topic 21 | - PUBLISH_ROUTING_KEY=ecomm.api.west 22 | - PUBLISH_QUEUE=ecomm.api.west 23 | labels: 24 | NAME: "mixinpubrmq" 25 | container_name: "mixinpubrmq" 26 | network_mode: "host" 27 | # volumes: 28 | # want to shared files outside the container? 29 | # - /tmp:/opt/shared 30 | logging: 31 | # limit logs retained on host to 25MB 32 | driver: "json-file" 33 | options: 34 | max-size: "500k" 35 | max-file: "50" 36 | # debug containers by sleeping on entrypoint 37 | # entrypoint: "sleep 600" 38 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/start-mixin-publisher.py" 39 | -------------------------------------------------------------------------------- /compose/compose-start-subscriptions-rabbitmq-test.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | subsloadtest: 7 | hostname: subsloadtest 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=mixin-load-test-subs-rabbit 12 | - SUBSCRIBER_NAME= 13 | - SUB_BROKER_URL= 14 | - CONSUME_EXCHANGE= 15 | - CONSUME_ROUTING_KEY= 16 | - CONSUME_QUEUE= 17 | - PUBLISH_NAME=mixin-subs-publisher 18 | - PUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 19 | - PUBLISH_EXCHANGE= 20 | - PUBLISH_EXCHANGE_TYPE= 21 | - PUBLISH_ROUTING_KEY=reporting.subscriptions 22 | - PUBLISH_QUEUE=reporting.subscriptions 23 | - NUM_MSG_TO_PUBLISH=200000 24 | labels: 25 | NAME: "subsloadtest" 26 | container_name: "subsloadtest" 27 | network_mode: "host" 28 | # volumes: 29 | # want to shared files outside the container? 30 | # - /tmp:/opt/shared 31 | logging: 32 | # limit logs retained on host to 25MB 33 | driver: "json-file" 34 | options: 35 | max-size: "500k" 36 | max-file: "50" 37 | # debug containers by sleeping on entrypoint 38 | # entrypoint: "sleep 600" 39 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/start-subscriptions-rabbitmq-test.py" 40 | -------------------------------------------------------------------------------- /compose/data/jupyter/.gitignore: -------------------------------------------------------------------------------- 1 | jupyterhub_cookie_secret 2 | -------------------------------------------------------------------------------- /compose/data/jupyter/jupyterhub_config.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Jupyter Development Team. 2 | # Distributed under the terms of the Modified BSD License. 3 | 4 | # Configuration file for JupyterHub 5 | import os 6 | 7 | c = get_config() # noqa 8 | 9 | # We rely on environment variables to configure JupyterHub so that we 10 | # avoid having to rebuild the JupyterHub container every time we change a 11 | # configuration parameter. 12 | 13 | # Spawn single-user servers as Docker containers 14 | c.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner' 15 | # Spawn containers from this image 16 | c.DockerSpawner.container_image = os.environ['DOCKER_NOTEBOOK_IMAGE'] 17 | # JupyterHub requires a single-user instance of the Notebook server, so we 18 | # default to using the `start-singleuser.sh` script included in the 19 | # jupyter/docker-stacks *-notebook images as the Docker run command when 20 | # spawning containers. Optionally, you can override the Docker run command 21 | # using the DOCKER_SPAWN_CMD environment variable. 22 | spawn_cmd = os.environ.get('DOCKER_SPAWN_CMD', "start-singleuser.sh") 23 | c.DockerSpawner.extra_create_kwargs.update({'command': spawn_cmd}) 24 | # Connect containers to this Docker network 25 | network_name = os.environ['DOCKER_NETWORK_NAME'] or 'default' 26 | c.DockerSpawner.use_internal_ip = True 27 | c.DockerSpawner.network_name = network_name 28 | # Pass the network name as argument to spawned containers 29 | c.DockerSpawner.extra_host_config = {'network_mode': network_name} 30 | # Explicitly set notebook directory because we'll be mounting a host volume to 31 | # it. Most jupyter/docker-stacks *-notebook images run the Notebook server as 32 | # user `jovyan`, and set the notebook directory to `/home/jovyan/work`. 33 | # We follow the same convention. 34 | notebook_dir = os.environ.get('DOCKER_NOTEBOOK_DIR') or '/data/notebooks' 35 | c.DockerSpawner.notebook_dir = notebook_dir 36 | # Mount the real user's Docker volume on the host to the notebook user's 37 | # notebook directory in the container 38 | c.DockerSpawner.volumes = {'jupyterhub-user-{username}': notebook_dir} 39 | c.DockerSpawner.extra_create_kwargs.update({'volume_driver': 'local'}) 40 | # Remove containers once they are stopped 41 | c.DockerSpawner.remove_containers = True 42 | # For debugging arguments passed to spawned containers 43 | c.DockerSpawner.debug = True 44 | 45 | # User containers will access hub by container name on the Docker network 46 | # c.JupyterHub.hub_ip = 'jupyterhub' 47 | c.JupyterHub.hub_ip = '0.0.0.0' 48 | c.JupyterHub.hub_port = 8080 49 | 50 | # TLS config 51 | c.JupyterHub.port = 443 52 | c.JupyterHub.ssl_key = os.environ['SSL_KEY'] 53 | c.JupyterHub.ssl_cert = os.environ['SSL_CERT'] 54 | 55 | # Authenticate users with GitHub OAuth 56 | if os.getenv("GITHUB_AUTH_ENABLED", "0") == "1": 57 | c.JupyterHub.authenticator_class = 'oauthenticator.GitHubOAuthenticator' 58 | c.GitHubOAuthenticator.oauth_callback_url = os.environ['OAUTH_CALLBACK_URL'] 59 | 60 | # Persist hub data on volume mounted inside container 61 | data_dir = os.environ.get('DATA_VOLUME_CONTAINER', '/data') 62 | 63 | c.JupyterHub.cookie_secret_file = os.path.join(data_dir, 'jupyterhub_cookie_secret') 64 | 65 | c.JupyterHub.db_url = 'postgresql://postgres:{password}@{host}/{db}'.format( 66 | host=os.environ['POSTGRES_HOST'], 67 | password=os.environ['POSTGRES_PASSWORD'], 68 | db=os.environ['POSTGRES_DB'], 69 | ) 70 | 71 | # Whitlelist users and admins 72 | c.Authenticator.whitelist = whitelist = set() 73 | c.Authenticator.admin_users = admin = set() 74 | c.JupyterHub.admin_access = True 75 | pwd = os.path.dirname(__file__) 76 | 77 | user_list_file = os.getenv("USERLIST_FILE", 78 | "/files/jupyter_userlist") 79 | 80 | if os.path.exists(user_list_file): 81 | print("Loading user_list_file={}".format(user_list_file)) 82 | with open(user_list_file) as f: 83 | for line in f: 84 | if not line: 85 | continue 86 | parts = line.split() 87 | name = parts[0] 88 | whitelist.add(name) 89 | if len(parts) > 1 and parts[1] == 'admin': 90 | admin.add(name) 91 | -------------------------------------------------------------------------------- /compose/data/notebooks/.gitignore: -------------------------------------------------------------------------------- 1 | *.ipynb_checkpoints 2 | -------------------------------------------------------------------------------- /compose/data/postgres/.gitignore: -------------------------------------------------------------------------------- 1 | pgdata 2 | -------------------------------------------------------------------------------- /compose/env/common-celery-connectors.env: -------------------------------------------------------------------------------- 1 | CELERY_SUBSCRIBER_NAME=celery-subscriber 2 | CONVERSIONS_EXCHANGE=user.events 3 | CONVERSIONS_QUEUE=user.events.conversions 4 | CONVERSIONS_ROUTING_KEY=user.events.conversions 5 | DIRECT_EXCHANGE_TYPE= 6 | DIRECT_RELAY_EXCHANGE_NAME= 7 | ECOMM_EXCHANGE_NAME=ecomm.api 8 | ECOMM_QUEUE_NAME=ecomm.api.west 9 | ECOMM_ROUTING_KEY=ecomm.api.west 10 | EXCHANGE_NAME= 11 | JSON_TO_CELERY_APP_NAME=jtoc 12 | JSON_TO_CELERY_RELAY_APP_NAME=jtoc_relay 13 | KOMBU_PUBLISHER_NAME=kombu-publisher 14 | KOMBU_SUBSCRIBER_NAME=kombu-subscriber 15 | LOAD_TEST_EXCHANGE=reporting 16 | LOAD_TEST_QUEUE=reporting.accounts 17 | LOAD_TEST_ROUTING_KEY=reporting.accounts 18 | LOG_LEVEL=DEBUG 19 | LOG_CFG=/opt/celery_connectors/celery_connectors/log/logging.json 20 | MIXIN_APP_NAME=mixin_relay 21 | PREFETCH_COUNT=1 22 | PUB_APP_NAME=robopub 23 | PUB_SUB_APP_NAME=robopubsub 24 | RABBITMQ_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 25 | RABBITMQ_PUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 26 | RABBITMQ_RELAY_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 27 | RABBITMQ_SUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 28 | REDIS_BROKER_URL=redis://localhost:6379/0 29 | REDIS_RELAY_BROKER_URL=redis://localhost:6379/10 30 | REDIS_SUB_BROKER_URL=redis://localhost:6379/0 31 | RELAY_APP_NAME=relay 32 | RELAY_EXCHANGE_TYPE=direct 33 | RELAY_ROUTING_KEY=reporting.payments 34 | RELAY_TASK_NAME=ecomm_app.ecommerce.tasks.handle_user_conversion_events 35 | RELAY_WORKER_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 36 | REPORTING_ACCOUNTS_EXCHANGE=reporting.accounts 37 | REPORTING_ACCOUNTS_QUEUE_NAME=reporting.accounts 38 | REPORTING_ACCOUNTS_ROUTING_KEY=reporting.accounts 39 | REPORTING_EXCHANGE=reporting 40 | REPORTING_SUBS_QUEUE_NAME=reporting.subscriptions 41 | REPORTING_SUBS_ROUTING_KEY=reporting.subscriptions 42 | SSL_CA_CERT= 43 | SSL_CERTFILE= 44 | SSL_KEYFILE= 45 | SSL_REQUIRED= 46 | TEST_EXCHANGE=test.events 47 | TEST_QUEUE=test.events.conversions 48 | TEST_RABBITMQ_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 49 | TEST_REDIS_BROKER_URL=redis://localhost:6379/0 50 | TEST_ROUTING_KEY=test.events.conversions 51 | TEST_STOP_DONE=0 52 | TOPIC_EXCHANGE_TYPE=topic 53 | -------------------------------------------------------------------------------- /compose/env/oauth.env: -------------------------------------------------------------------------------- 1 | GITHUB_CLIENT_ID=NOTREAL 2 | GITHUB_CLIENT_SECRET=NOTREALSECRET 3 | OAUTH_CALLBACK_URL=https://jupyter.localdev.com/hub/oauth_callback 4 | -------------------------------------------------------------------------------- /compose/env/postgres-dev.env: -------------------------------------------------------------------------------- 1 | POSTGRES_USER=postgres 2 | POSTGRES_PASSWORD=postgres 3 | POSTGRES_DB=jupyter 4 | PGDATA=/var/lib/postgresql/data/pgdata 5 | -------------------------------------------------------------------------------- /compose/files/jupyter_userlist: -------------------------------------------------------------------------------- 1 | admin admin 2 | -------------------------------------------------------------------------------- /compose/files/start-jupyter.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Adding local dev username=admin password=admin" 4 | 5 | username="admin" 6 | password="admin" 7 | useradd -m -p $(openssl passwd -1 ${password}) -s /bin/bash -G sudo ${username} 8 | 9 | /opt/conda/bin/conda install -yq psycopg2=2.7 10 | /opt/conda/bin/conda clean -tipsy 11 | /opt/conda/bin/pip install --no-cache-dir oauthenticator==0.7.* dockerspawner==0.9.* notebook 12 | 13 | jupyterhub -f /data/jupyterhub_config.py 14 | 15 | echo "exiting" 16 | sleep 300 17 | -------------------------------------------------------------------------------- /compose/ssl/.gitignore: -------------------------------------------------------------------------------- 1 | *.key 2 | *.cer 3 | *.cert 4 | *.cnf 5 | *.csr 6 | *.crt 7 | *.p12 8 | *.pem 9 | -------------------------------------------------------------------------------- /compose/starter.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | worker: 7 | hostname: worker 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=celery-rabbitmq 12 | - SUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 13 | - SUBSCRIBER_NAME=rmq-proc 14 | - CONSUME_EXCHANGE= 15 | - CONSUME_ROUTING_KEY= 16 | - CONSUME_QUEUE=user.events.conversions 17 | - PUBLISH_NAME=publisher 18 | - PUB_BROKER_URL=redis://localhost:6379/0 19 | - PUBLISH_EXCHANGE=reporting.accounts 20 | - PUBLISH_ROUTING_KEY=reporting.accounts 21 | - PUBLISH_QUEUE= 22 | labels: 23 | NAME: "worker" 24 | container_name: "worker" 25 | network_mode: "host" 26 | # volumes: 27 | # want to shared files outside the container? 28 | # - /tmp:/opt/shared 29 | logging: 30 | # limit logs retained on host to 25MB 31 | driver: "json-file" 32 | options: 33 | max-size: "500k" 34 | max-file: "50" 35 | # debug containers by sleeping on entrypoint 36 | # entrypoint: "sleep 600" 37 | entrypoint: "/opt/celery_connectors/venv/bin/python /opt/celery_connectors/kombu_rabbitmq_subscriber.py" 38 | -------------------------------------------------------------------------------- /dev-build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | maintainer=jayjohnson 4 | imagename=celery-connectors 5 | tag=$(cat setup.py | grep "version=" | sed -e 's/"/ /g' | awk '{print $2}') 6 | 7 | log=/dev/null 8 | 9 | source ./properties.sh 10 | 11 | rm -f celery_connectors-*.tgz 12 | include_these="celery_connectors ecomm_app tests kombu_*.py publish-*.py run_*.py start-*.sh start-*.py stop-*.sh tox.ini README.rst setup.cfg setup.py" 13 | echo "Creating src build tar for tag=${tag} including=${include_these}" 14 | tar zcvf celery_connectors-${tag}.tgz ${include_these} 15 | cp celery_connectors-${tag}.tgz celery_connectors-latest.tgz 16 | 17 | echo "" 18 | echo "--------------------------------------------------------" 19 | echo "Building new Docker image(${maintainer}/${imagename})" 20 | docker build --rm -t $maintainer/$imagename . 21 | last_status=$? 22 | if [[ "${last_status}" == "0" ]]; then 23 | echo "" 24 | if [[ "${tag}" != "" ]]; then 25 | image_csum=$(docker images | grep "${maintainer}/${imagename} " | grep latest | awk '{print $3}') 26 | if [[ "${image_csum}" != "" ]]; then 27 | docker tag $image_csum $maintainer/$imagename:$tag 28 | last_status=$? 29 | if [[ "${last_status}" != "0" ]]; then 30 | echo "Failed to tag image(${imagename}) with Tag(${tag})" 31 | echo "" 32 | exit 1 33 | else 34 | echo "Build Successful Tagged Image(${imagename}) with Tag(${tag})" 35 | fi 36 | 37 | echo "" 38 | exit 0 39 | else 40 | echo "" 41 | echo "Build failed to find latest image(${imagename}) with Tag(${tag})" 42 | echo "" 43 | exit 1 44 | fi 45 | else 46 | echo "Build Successful" 47 | echo "" 48 | exit 0 49 | fi 50 | echo "" 51 | else 52 | echo "" 53 | echo "Build failed with exit code: ${last_status}" 54 | echo "" 55 | exit 1 56 | fi 57 | 58 | exit 0 59 | -------------------------------------------------------------------------------- /docker/bashrc: -------------------------------------------------------------------------------- 1 | # .bashrc 2 | 3 | # Source global definitions 4 | if [ -f /etc/bashrc ]; then 5 | . /etc/bashrc 6 | fi 7 | 8 | # Uncomment the following line if you don't like systemctl's auto-paging feature: 9 | # export SYSTEMD_PAGER= 10 | 11 | # User specific aliases and functions 12 | # .bashrc 13 | HISTCONTROL=ignoredups:ignorespace 14 | 15 | # append to the history file, don't overwrite it 16 | shopt -s histappend 17 | 18 | # for setting history length see HISTSIZE and HISTFILESIZE in bash(1) 19 | HISTSIZE=1000 20 | HISTFILESIZE=2000 21 | 22 | # check the window size after each command and, if necessary, 23 | # update the values of LINES and COLUMNS. 24 | shopt -s checkwinsize 25 | 26 | # make less more friendly for non-text input files, see lesspipe(1) 27 | [ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)" 28 | 29 | # set variable identifying the chroot you work in (used in the prompt below) 30 | if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then 31 | debian_chroot=$(cat /etc/debian_chroot) 32 | fi 33 | 34 | # set a fancy prompt (non-color, unless we know we "want" color) 35 | case "$TERM" in 36 | xterm-color) color_prompt=yes;; 37 | esac 38 | 39 | # uncomment for a colored prompt, if the terminal has the capability; turned 40 | # off by default to not distract the user: the focus in a terminal window 41 | # should be on the output of commands, not on the prompt 42 | #force_color_prompt=yes 43 | 44 | if [ -n "$force_color_prompt" ]; then 45 | if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then 46 | # We have color support; assume it's compliant with Ecma-48 47 | # (ISO/IEC-6429). (Lack of such support is extremely rare, and such 48 | # a case would tend to support setf rather than setaf.) 49 | color_prompt=yes 50 | else 51 | color_prompt= 52 | fi 53 | fi 54 | 55 | if [ "$color_prompt" = yes ]; then 56 | PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ ' 57 | else 58 | PS1='${debian_chroot:+($debian_chroot)}\u:\w\$ ' 59 | fi 60 | unset color_prompt force_color_prompt 61 | 62 | # If this is an xterm set the title to user@host:dir 63 | case "$TERM" in 64 | xterm*|rxvt*) 65 | PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u: \w\a\]$PS1" 66 | ;; 67 | *) 68 | ;; 69 | esac 70 | 71 | alias gs='git status' 72 | alias gl='git log' 73 | alias gd='git diff' 74 | alias gco='git checkout' 75 | alias vi='vim' 76 | 77 | # enable color support of ls and also add handy aliases 78 | if [ -x /usr/bin/dircolors ]; then 79 | test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)" 80 | alias ls='ls --color=auto' 81 | #alias dir='dir --color=auto' 82 | #alias vdir='vdir --color=auto' 83 | alias grep='grep --color=auto' 84 | alias fgrep='fgrep --color=auto' 85 | alias egrep='egrep --color=auto' 86 | fi 87 | 88 | # some more ls aliases 89 | alias ll='ls -alF' 90 | alias la='ls -A' 91 | alias l='ls -CF' 92 | alias rg='grep -rin ' 93 | 94 | # Alias definitions. 95 | # You may want to put all your additions into a separate file like 96 | # ~/.bash_aliases, instead of adding them here directly. 97 | # See /usr/share/doc/bash-doc/examples in the bash-doc package. 98 | 99 | if [ -f ~/.bash_aliases ]; then 100 | . ~/.bash_aliases 101 | fi 102 | 103 | # enable programmable completion features (you don't need to enable 104 | # this, if it's already enabled in /etc/bash.bashrc and /etc/profile 105 | # sources /etc/bash.bashrc). 106 | if [ -f /etc/bash_completion ] && ! shopt -oq posix; then 107 | . /etc/bash_completion 108 | fi 109 | 110 | unset command_not_found_handle 111 | 112 | txtund="" 113 | txtbld="" 114 | blddkg="" 115 | bldred="" 116 | bldblu="" 117 | bldylw="" 118 | bldgrn="" 119 | bldgry="" 120 | bldpnk="" 121 | bldwht="" 122 | txtrst="" 123 | 124 | # check if stdout is a terminal... 125 | if test -t 1; then 126 | if [[ -e /usr/bin/tput ]]; then 127 | # see if it supports colors... 128 | ncolors=$(tput colors) 129 | 130 | if test -n "$ncolors" && test $ncolors -ge 8; then 131 | 132 | txtund=$(tput sgr 0 1) # Underline 133 | txtbld=$(tput bold) # Bold 134 | blddkg=${txtbld}$(tput setaf 0) # Dark Gray 135 | bldred=${txtbld}$(tput setaf 1) # Red 136 | bldblu=${txtbld}$(tput setaf 2) # Blue 137 | bldylw=${txtbld}$(tput setaf 3) # Yellow 138 | bldgrn=${txtbld}$(tput setaf 4) # Green 139 | bldgry=${txtbld}$(tput setaf 5) # Gray 140 | bldpnk=${txtbld}$(tput setaf 6) # Pink 141 | bldwht=${txtbld}$(tput setaf 7) # White 142 | txtrst=$(tput sgr0) # Reset 143 | fi 144 | fi 145 | fi 146 | 147 | dbg() { 148 | cdate=$(date '+%Y-%m-%d %H:%M:%S') 149 | echo "${bldwht}$cdate $@ $txtrst" 150 | } 151 | 152 | inf() { 153 | cdate=$(date '+%Y-%m-%d %H:%M:%S') 154 | echo "$cdate $@" 155 | } 156 | 157 | anmt() { 158 | cdate=$(date '+%Y-%m-%d %H:%M:%S') 159 | echo "${bldylw}$cdate $@ $txtrst" 160 | } 161 | 162 | amnt() { 163 | cdate=$(date '+%Y-%m-%d %H:%M:%S') 164 | echo "${bldylw}$cdate $@ $txtrst" 165 | } 166 | 167 | warn() { 168 | cdate=$(date '+%Y-%m-%d %H:%M:%S') 169 | echo "${bldylw}$cdate $@ $txtrst" 170 | } 171 | 172 | ign() { 173 | cdate=$(date '+%Y-%m-%d %H:%M:%S') 174 | echo "${blddkg}$cdate $@ $txtrst" 175 | } 176 | 177 | good() { 178 | cdate=$(date '+%Y-%m-%d %H:%M:%S') 179 | echo "${bldgrn}$cdate $@ $txtrst" 180 | } 181 | 182 | green() { 183 | cdate=$(date '+%Y-%m-%d %H:%M:%S') 184 | echo "${bldgrn}$@ $txtrst" 185 | } 186 | 187 | err() { 188 | cdate=$(date '+%Y-%m-%d %H:%M:%S') 189 | echo "${bldred}$cdate $@ $txtrst" 190 | } 191 | 192 | lg() { 193 | cdate=$(date '+%Y-%m-%d %H:%M:%S') 194 | echo "$cdate $@" 195 | } 196 | 197 | boom() { 198 | echo "" 199 | if [[ "${2}" != "" ]]; then 200 | echo "${bldred}StatusCode: ${1}. $txtrst" 201 | echo "${bldred}Error: ${2}.$txtrst" 202 | exit $1 203 | else 204 | echo "${bldred}Error: ${@}.$txtrst" 205 | exit 1 206 | fi 207 | } 208 | 209 | xerr() { 210 | last_status=$? 211 | if [[ "${last_status}" != "" ]]; then 212 | if [[ "${last_status}" != "0" ]]; then 213 | echo "" 214 | err "Exiting(${last_status}) Error: ${@}" 215 | exit $last_status 216 | fi 217 | fi 218 | } 219 | 220 | lgenv() { 221 | echo "" 222 | echo "-------------------------------------------" 223 | env | sort 224 | echo "-------------------------------------------" 225 | echo "" 226 | } 227 | 228 | alias dev="cd /opt/celery_connectors" 229 | alias lint="pycodestyle --max-line-length=160 --exclude=venv,build,.tox,celery_connectors/rabbitmq/rabbitmqadmin.py" 230 | # python setup.py sdist bdist_wheel --universal upload -r pypi 231 | 232 | if [[ -e /opt/celery_connectors/venv/bin/activate ]]; then 233 | source /opt/celery_connectors/venv/bin/activate 234 | else 235 | echo "Missing virtual env: /opt/celery_connectors/venv/bin/activate" 236 | fi 237 | -------------------------------------------------------------------------------- /docker/data/rabbitmq/.gitignore: -------------------------------------------------------------------------------- 1 | mnesia 2 | .erlang.cookie 3 | -------------------------------------------------------------------------------- /docker/data/redis/.gitignore: -------------------------------------------------------------------------------- 1 | *.rdb 2 | *.log 3 | -------------------------------------------------------------------------------- /docker/dev/env/common-celery-connectors.env: -------------------------------------------------------------------------------- 1 | CELERY_SUBSCRIBER_NAME=celery-subscriber 2 | CONVERSIONS_EXCHANGE=user.events 3 | CONVERSIONS_QUEUE=user.events.conversions 4 | CONVERSIONS_ROUTING_KEY=user.events.conversions 5 | DIRECT_EXCHANGE_TYPE= 6 | DIRECT_RELAY_EXCHANGE_NAME= 7 | ECOMM_EXCHANGE_NAME=ecomm.api 8 | ECOMM_QUEUE_NAME=ecomm.api.west 9 | ECOMM_ROUTING_KEY=ecomm.api.west 10 | EXCHANGE_NAME= 11 | JSON_TO_CELERY_APP_NAME=jtoc 12 | JSON_TO_CELERY_RELAY_APP_NAME=jtoc_relay 13 | KOMBU_PUBLISHER_NAME=kombu-publisher 14 | KOMBU_SUBSCRIBER_NAME=kombu-subscriber 15 | LOAD_TEST_EXCHANGE=reporting 16 | LOAD_TEST_QUEUE=reporting.accounts 17 | LOAD_TEST_ROUTING_KEY=reporting.accounts 18 | LOG_LEVEL=DEBUG 19 | LOG_CFG=/opt/celery_connectors/celery_connectors/log/logging.json 20 | MIXIN_APP_NAME=mixin_relay 21 | PREFETCH_COUNT=1 22 | PUB_APP_NAME=robopub 23 | PUB_SUB_APP_NAME=robopubsub 24 | RABBITMQ_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 25 | RABBITMQ_PUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 26 | RABBITMQ_RELAY_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 27 | RABBITMQ_SUB_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 28 | REDIS_BROKER_URL=redis://localhost:6379/0 29 | REDIS_RELAY_BROKER_URL=redis://localhost:6379/10 30 | REDIS_SUB_BROKER_URL=redis://localhost:6379/0 31 | RELAY_APP_NAME=relay 32 | RELAY_EXCHANGE_TYPE=direct 33 | RELAY_ROUTING_KEY=reporting.payments 34 | RELAY_TASK_NAME=ecomm_app.ecommerce.tasks.handle_user_conversion_events 35 | RELAY_WORKER_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 36 | REPORTING_ACCOUNTS_EXCHANGE=reporting.accounts 37 | REPORTING_ACCOUNTS_QUEUE_NAME=reporting.accounts 38 | REPORTING_ACCOUNTS_ROUTING_KEY=reporting.accounts 39 | REPORTING_EXCHANGE=reporting 40 | REPORTING_SUBS_QUEUE_NAME=reporting.subscriptions 41 | REPORTING_SUBS_ROUTING_KEY=reporting.subscriptions 42 | SSL_CA_CERT= 43 | SSL_CERTFILE= 44 | SSL_KEYFILE= 45 | SSL_REQUIRED= 46 | TEST_EXCHANGE=test.events 47 | TEST_QUEUE=test.events.conversions 48 | TEST_RABBITMQ_BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 49 | TEST_REDIS_BROKER_URL=redis://localhost:6379/0 50 | TEST_ROUTING_KEY=test.events.conversions 51 | TEST_STOP_DONE=0 52 | TOPIC_EXCHANGE_TYPE=topic 53 | -------------------------------------------------------------------------------- /docker/dev/rabbitmq-celery-only-consume.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | worker: 7 | hostname: worker 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | environment: 11 | - APP_NAME=celery-rabbitmq 12 | - BROKER_URL=pyamqp://rabbitmq:rabbitmq@localhost:5672// 13 | - SUBSCRIBER_NAME=rmq-sub 14 | labels: 15 | NAME: "worker" 16 | container_name: "worker" 17 | network_mode: "host" 18 | # volumes: 19 | # want to shared files outside the container? 20 | # - /tmp:/opt/shared 21 | logging: 22 | # limit logs retained on host to 25MB 23 | driver: "json-file" 24 | options: 25 | max-size: "500k" 26 | max-file: "50" 27 | # debug containers by sleeping on entrypoint 28 | # entrypoint: "sleep 600" 29 | entrypoint: "bash /opt/celery_connectors/celery_connectors/scripts/start-container.sh" 30 | -------------------------------------------------------------------------------- /docker/dev/sleep-for-validation.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Celery Connectors 6 | worker: 7 | hostname: worker 8 | image: jayjohnson/celery-connectors:latest 9 | env_file: ./env/common-celery-connectors.env 10 | labels: 11 | NAME: "worker" 12 | container_name: "worker" 13 | network_mode: "host" 14 | # volumes: 15 | # want to shared files outside the container? 16 | # - /tmp:/opt/shared 17 | logging: 18 | # limit logs retained on host to 25MB 19 | driver: "json-file" 20 | options: 21 | max-size: "500k" 22 | max-file: "50" 23 | entrypoint: "sleep 300" 24 | -------------------------------------------------------------------------------- /docker/env/flowerrabbit-dev.env: -------------------------------------------------------------------------------- 1 | FLOWER_BROKER=amqp://rabbitmq:rabbitmq@localhost:5672 2 | -------------------------------------------------------------------------------- /docker/env/flowerredis-dev.env: -------------------------------------------------------------------------------- 1 | FLOWER_BROKER=redis://localhost:6372 2 | -------------------------------------------------------------------------------- /docker/env/rabbit1-dev.env: -------------------------------------------------------------------------------- 1 | RABBITMQ_ERLANG_COOKIE=SWQOKODSQALRPCLNMEQG 2 | RABBITMQ_DEFAULT_USER=rabbitmq 3 | RABBITMQ_DEFAULT_PASS=rabbitmq 4 | RABBITMQ_DEFAULT_VHOST=/ 5 | -------------------------------------------------------------------------------- /docker/env/redis1-dev.env: -------------------------------------------------------------------------------- 1 | ENV_CLUSTER_NAME=singlenode 2 | ENV_NODE_TYPE=master 3 | ENV_IP_BIND=0.0.0.0 4 | ENV_MASTER_REDIS_HOST=redisnode1 5 | ENV_MASTER_REDIS_PORT=6379 6 | ENV_REDIS_PORT=6379 7 | ENV_SENTINEL_PORT=16000 8 | ENV_SENTINEL_INSTANCES_FOR_QUORUM=0 9 | ENV_SENTINEL_DOWN_IN_MILLISECONDS=5000 10 | ENV_SENTINEL_FAILOVER_TIMEOUT=10000 11 | ENV_NODE_REPLICAS= 12 | ENV_CLUSTERED=0 13 | ENV_USE_THIS_REDIS_CONFIG=/opt/redis/node/redis_perf.conf 14 | ENV_START_SERVICE=/opt/redis/node/start_redis_node.sh 15 | -------------------------------------------------------------------------------- /docker/logs/rabbitmq/.gitignore: -------------------------------------------------------------------------------- 1 | *.log 2 | -------------------------------------------------------------------------------- /docker/logs/redis/.gitignore: -------------------------------------------------------------------------------- 1 | *.log 2 | -------------------------------------------------------------------------------- /docker/persistence_redis_and_rabbitmq.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Redis 6 | celredis1: 7 | hostname: redis1 8 | image: redis:4.0.5-alpine 9 | labels: 10 | NAME: "redis1" 11 | ports: 12 | - "6379:6379" 13 | - "16379:16379" 14 | container_name: "celredis1" 15 | volumes: 16 | # want to store the rdb file outside the container? 17 | # - you will still need to configure rdb saves as needed 18 | - ./data/redis:/data 19 | logging: 20 | # limit logs retained on host to 25MB 21 | driver: "json-file" 22 | options: 23 | max-size: "500k" 24 | max-file: "50" 25 | 26 | # RabbitMQ 27 | celrabbit1: 28 | image: "rabbitmq:3.6.6-management" 29 | hostname: "rabbit1" 30 | env_file: 31 | - ./env/rabbit1-dev.env 32 | ports: 33 | - "5672:5672" 34 | - "15672:15672" 35 | - "25672:25672" 36 | labels: 37 | NAME: "rabbit1" 38 | volumes: 39 | - ./rabbitmq/enabled_plugins:/etc/rabbitmq/enabled_plugins 40 | - ./data/rabbitmq:/var/lib/rabbitmq 41 | - ./logs/rabbitmq:/var/log/rabbitmq 42 | container_name: "celrabbit1" 43 | logging: 44 | # limit logs retained on host to 25MB 45 | driver: "json-file" 46 | options: 47 | max-size: "500k" 48 | max-file: "50" 49 | 50 | # Flower for RabbitMQ 51 | celflowerrabbit: 52 | image: "mher/flower:latest" 53 | hostname: "celflowerrabbit" 54 | env_file: 55 | - ./env/flowerrabbit-dev.env 56 | labels: 57 | NAME: "flowerrabbit" 58 | container_name: "celflowerrabbit" 59 | logging: 60 | # limit logs retained on host to 25MB 61 | driver: "json-file" 62 | options: 63 | max-size: "500k" 64 | max-file: "50" 65 | network_mode: "host" 66 | entrypoint: "/usr/local/bin/python /usr/local/bin/flower --broker=amqp://rabbitmq:rabbitmq@localhost:5672 --port=5555 --basic_auth=admin:admin" 67 | 68 | # Flower for Redis 69 | celflowerredis: 70 | image: "mher/flower:latest" 71 | hostname: "celflowerredis" 72 | env_file: 73 | - ./env/flowerredis-dev.env 74 | labels: 75 | NAME: "flowerredis" 76 | container_name: "celflowerredis" 77 | logging: 78 | # limit logs retained on host to 25MB 79 | driver: "json-file" 80 | options: 81 | max-size: "500k" 82 | max-file: "50" 83 | network_mode: "host" 84 | entrypoint: "/usr/local/bin/python /usr/local/bin/flower --broker=redis://localhost:6379 --port=5556 --basic_auth=admin:admin" 85 | 86 | -------------------------------------------------------------------------------- /docker/rabbitmq/autocluster-0.4.1.ez: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jay-johnson/celery-connectors/f6d48bf3e48513228a86a1ff6014b6e6cf465742/docker/rabbitmq/autocluster-0.4.1.ez -------------------------------------------------------------------------------- /docker/rabbitmq/enabled_plugins: -------------------------------------------------------------------------------- 1 | [rabbitmq_management, rabbitmq_management_visualiser]. 2 | -------------------------------------------------------------------------------- /docker/rabbitmq/erlang.cookie: -------------------------------------------------------------------------------- 1 | SWQOKODSQALRPCLNMEQG 2 | -------------------------------------------------------------------------------- /docker/rabbitmq/rabbitmq.config: -------------------------------------------------------------------------------- 1 | [ 2 | {rabbit, 3 | [ 4 | {default_vhost, <<"/">>}, 5 | {default_user, <<"rabbitmq">>}, 6 | {default_pass, <<"rabbitmq">>}, 7 | {loopback_users, []} 8 | ] 9 | }, 10 | ]. 11 | -------------------------------------------------------------------------------- /docker/redis_and_rabbitmq.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | 5 | # Redis 6 | celredis1: 7 | hostname: redis1 8 | image: redis:4.0.5-alpine 9 | labels: 10 | NAME: "redis1" 11 | ports: 12 | - "6379:6379" 13 | - "16379:16379" 14 | container_name: "celredis1" 15 | # volumes: 16 | # want to store the rdb file outside the container? 17 | # - /backedupdironhost:/opt/redis/run 18 | logging: 19 | # limit logs retained on host to 25MB 20 | driver: "json-file" 21 | options: 22 | max-size: "500k" 23 | max-file: "50" 24 | 25 | # RabbitMQ 26 | celrabbit1: 27 | image: "rabbitmq:3.6.6-management" 28 | hostname: "rabbit1" 29 | env_file: 30 | - ./env/rabbit1-dev.env 31 | ports: 32 | - "5672:5672" 33 | - "15672:15672" 34 | - "25672:25672" 35 | labels: 36 | NAME: "rabbit1" 37 | volumes: 38 | - ./rabbitmq/enabled_plugins:/etc/rabbitmq/enabled_plugins 39 | container_name: "celrabbit1" 40 | logging: 41 | # limit logs retained on host to 25MB 42 | driver: "json-file" 43 | options: 44 | max-size: "500k" 45 | max-file: "50" 46 | 47 | # Flower for RabbitMQ 48 | celflowerrabbit: 49 | image: "mher/flower:latest" 50 | hostname: "celflowerrabbit" 51 | env_file: 52 | - ./env/flowerrabbit-dev.env 53 | labels: 54 | NAME: "flowerrabbit" 55 | container_name: "celflowerrabbit" 56 | logging: 57 | # limit logs retained on host to 25MB 58 | driver: "json-file" 59 | options: 60 | max-size: "500k" 61 | max-file: "50" 62 | network_mode: "host" 63 | entrypoint: "/usr/local/bin/python /usr/local/bin/flower --broker=amqp://rabbitmq:rabbitmq@localhost:5672 --port=5555 --basic_auth=admin:admin" 64 | 65 | # Flower for Redis 66 | celflowerredis: 67 | image: "mher/flower:latest" 68 | hostname: "celflowerredis" 69 | env_file: 70 | - ./env/flowerredis-dev.env 71 | labels: 72 | NAME: "flowerredis" 73 | container_name: "celflowerredis" 74 | logging: 75 | # limit logs retained on host to 25MB 76 | driver: "json-file" 77 | options: 78 | max-size: "500k" 79 | max-file: "50" 80 | network_mode: "host" 81 | entrypoint: "/usr/local/bin/python /usr/local/bin/flower --broker=redis://localhost:6379 --port=5556 --basic_auth=admin:admin" 82 | 83 | -------------------------------------------------------------------------------- /ecomm_app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jay-johnson/celery-connectors/f6d48bf3e48513228a86a1ff6014b6e6cf465742/ecomm_app/__init__.py -------------------------------------------------------------------------------- /ecomm_app/ecommerce/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jay-johnson/celery-connectors/f6d48bf3e48513228a86a1ff6014b6e6cf465742/ecomm_app/ecommerce/__init__.py -------------------------------------------------------------------------------- /ecomm_app/ecommerce/celeryconfig_pub_sub.py: -------------------------------------------------------------------------------- 1 | broker_url = "pyamqp://rabbitmq:rabbitmq@localhost:5672//" 2 | result_backend = "redis://localhost:6379/10" 3 | 4 | # http://docs.celeryproject.org/en/latest/userguide/optimizing.html 5 | 6 | # these are targeted at optimizing processing 7 | # on long-running tasks 8 | # while increasing reliability 9 | 10 | 11 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-worker_prefetch_multiplier 12 | worker_prefetch_multiplier = 1 13 | 14 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_heartbeat 15 | broker_heartbeat = 240 # seconds 16 | 17 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_connection_max_retries 18 | broker_connection_max_retries = None 19 | 20 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-task_acks_late 21 | task_acks_late = True 22 | 23 | # http://docs.celeryproject.org/en/latest/userguide/calling.html#calling-retry 24 | task_publish_retry_policy = {"interval_max": 1, 25 | "max_retries": 120, # None - forever 26 | "interval_start": 0.1, 27 | "interval_step": 0.2} 28 | 29 | task_serializer = "json" 30 | result_serializer = "json" 31 | accept_content = ["json"] 32 | timezone = "America/Los_Angeles" 33 | 34 | task_routes = {"run.check_values": "low-priority", 35 | "run.calculate_results": "high-priority"} 36 | -------------------------------------------------------------------------------- /ecomm_app/ecommerce/tasks.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import logging 4 | from celery import Celery 5 | from celery.task import task 6 | 7 | log = logging.getLogger("worker") 8 | 9 | 10 | def ev(k, v): 11 | return os.getenv(k, v).strip().lstrip() 12 | # end of ev 13 | 14 | 15 | def get_celery_app(name=ev( 16 | "CELERY_NAME", 17 | "relay"), 18 | auth_url=ev( 19 | "ECOMM_BROKER_URL", 20 | "amqp://rabbitmq:rabbitmq@localhost:5672//"), 21 | backend_url=ev( 22 | "ECOMM_BACKEND_URL", 23 | "redis://localhost:6379/10"), 24 | ssl_options={}, 25 | transport_options={}, 26 | path_to_config_module="ecomm_app.ecommerce.celeryconfig_pub_sub", 27 | worker_log_format="relay - %(asctime)s: %(levelname)s %(message)s", 28 | **kwargs): 29 | 30 | # get the Celery application 31 | app = Celery(name, 32 | broker=auth_url, 33 | backend=backend_url) 34 | 35 | app.config_from_object(path_to_config_module, 36 | namespace="CELERY") 37 | 38 | app.conf.update(kwargs) 39 | 40 | if len(transport_options) > 0: 41 | log.info(("loading transport_options={}") 42 | .format(transport_options)) 43 | app.conf.update(**transport_options) 44 | # custom tranport options 45 | 46 | if len(ssl_options) > 0: 47 | log.info(("loading ssl_options={}") 48 | .format(ssl_options)) 49 | app.conf.update(**ssl_options) 50 | # custom ssl options 51 | 52 | return app 53 | # end of get_celery_app 54 | 55 | 56 | @task(queue="handle_user_conversion_events") 57 | def handle_user_conversion_events(body={}, 58 | source_info={}): 59 | 60 | label = "user_conversion_events" 61 | 62 | log.info(("task - {} - start " 63 | "body={}") 64 | .format(label, 65 | body)) 66 | 67 | if "simulate_processing_lag" in body: 68 | log.info(("task - {} - simulating processing" 69 | "lag={} sleeping") 70 | .format(label, 71 | body["simulate_processing_lag"])) 72 | time.sleep(float(body["simulate_processing_lag"])) 73 | # end of handling adding artifical lag for testing Celery 74 | 75 | log.info(("task - {} - done") 76 | .format(label)) 77 | 78 | return True 79 | # end of handle_user_conversion_events 80 | -------------------------------------------------------------------------------- /ecomm_app/job_worker.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import logging 5 | # import the app's tasks 6 | import ecomm_app.ecommerce.tasks 7 | 8 | name = "ecommerce-worker" 9 | 10 | log = logging.getLogger(name) 11 | 12 | log.info("Start - {}".format(name)) 13 | 14 | default_broker_url = "pyamqp://rabbitmq:rabbitmq@localhost:5672//" 15 | default_backend_url = "redis://localhost:6379/10" 16 | default_config_module = "ecomm_app.ecommerce.celeryconfig_pub_sub" 17 | 18 | worker_broker_url = os.getenv("WORKER_BROKER_URL", 19 | default_broker_url).strip().lstrip() 20 | 21 | ssl_options = {} 22 | transport_options = {} 23 | 24 | # Get the Celery app from the ecommerce project's get_celery_app 25 | app = ecomm_app.ecommerce.tasks.get_celery_app( 26 | name=name, 27 | auth_url=worker_broker_url, 28 | backend_url=default_backend_url) 29 | 30 | # if you want to discover tasks in other directories: 31 | # app.autodiscover_tasks(["some_dir_name_with_tasks"]) 32 | 33 | log.info("End - {}".format(name)) 34 | -------------------------------------------------------------------------------- /ecomm_app/publish_task.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import uuid 4 | from datetime import datetime 5 | from spylunking.log.setup_logging import build_colorized_logger 6 | from celery_connectors.utils import ev 7 | import ecommerce.tasks 8 | 9 | 10 | name = "celery-task-publisher" 11 | log = build_colorized_logger( 12 | name=name) 13 | 14 | pub_auth_url = ev("PUB_BROKER_URL", 15 | "amqp://rabbitmq:rabbitmq@localhost:5672//") 16 | path_to_config_module = "ecommerce.celeryconfig_pub_sub" 17 | 18 | app = ecommerce.tasks.get_celery_app( 19 | name="demo", 20 | auth_url=pub_auth_url, 21 | path_to_config_module=path_to_config_module) 22 | 23 | task_name = "ecomm_app.ecommerce.tasks.handle_user_conversion_events" 24 | now = datetime.now().isoformat() 25 | body = {"account_id": 999, 26 | "subscription_id": 321, 27 | "stripe_id": 876, 28 | "created": now, 29 | "product_id": "JJJ", 30 | "version": 1, 31 | "msg_id": str(uuid.uuid4())} 32 | 33 | msg = {"internals": True} 34 | 35 | log.info(("Sending broker={} " 36 | "body={}") 37 | .format(app.conf.broker_url, 38 | body)) 39 | 40 | result = app.send_task(task_name, (body, msg)) 41 | 42 | log.info(("Done with msg_id={} result={}") 43 | .format(body["msg_id"], 44 | result.get())) 45 | -------------------------------------------------------------------------------- /kombu_mixin_subscriber.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from spylunking.log.setup_logging import build_colorized_logger 4 | from celery_connectors.utils import ev 5 | from celery_connectors.kombu_subscriber import KombuSubscriber 6 | 7 | 8 | name = "kombu-mixin-subscriber" 9 | log = build_colorized_logger( 10 | name=name) 11 | 12 | log.info("Start - {}".format(name)) 13 | 14 | 15 | recv_msgs = [] 16 | 17 | 18 | def handle_message(body, message): 19 | log.info(("callback received msg " 20 | "body={}") 21 | .format(body)) 22 | recv_msgs.append(body) 23 | message.ack() 24 | # end of handle_message 25 | 26 | 27 | # Initialize KombuSubscriber 28 | ssl_options = {} 29 | sub = KombuSubscriber("kombu-mixin-subscriber", 30 | ev("SUB_BROKER_URL", 31 | "amqp://rabbitmq:rabbitmq@localhost:5672//"), 32 | ssl_options) 33 | 34 | 35 | # Now consume: 36 | seconds_to_consume = 10.0 37 | heartbeat = 60 38 | serializer = "application/json" 39 | exchange = ev("CONSUME_EXCHANGE", "reporting.payments") 40 | routing_key = ev("CONSUME_ROUTING_KEY", "reporting.payments") 41 | queue = ev("CONSUME_QUEUE", "reporting.payments") 42 | sub.consume(callback=handle_message, 43 | queue=queue, 44 | exchange=exchange, 45 | routing_key=routing_key, 46 | serializer=serializer, 47 | heartbeat=heartbeat, 48 | time_to_wait=seconds_to_consume) 49 | 50 | log.info("End - {}".format(name)) 51 | -------------------------------------------------------------------------------- /kombu_rabbitmq_subscriber.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from spylunking.log.setup_logging import build_colorized_logger 4 | from celery_connectors.utils import ev 5 | from celery_connectors.kombu_subscriber import KombuSubscriber 6 | 7 | 8 | name = "kombu-rabbitmq-subscriber" 9 | log = build_colorized_logger( 10 | name=name) 11 | 12 | log.info("Start - {}".format(name)) 13 | 14 | 15 | recv_msgs = [] 16 | 17 | 18 | def handle_message(body, message): 19 | log.info(("callback received msg " 20 | "body={}") 21 | .format(body)) 22 | recv_msgs.append(body) 23 | message.ack() 24 | # end of handle_message 25 | 26 | 27 | # Initialize KombuSubscriber 28 | ssl_options = {} 29 | sub = KombuSubscriber("kombu-rabbitmq-subscriber", 30 | ev("SUB_BROKER_URL", 31 | "pyamqp://rabbitmq:rabbitmq@localhost:5672//"), 32 | ssl_options) 33 | 34 | 35 | # Now consume: 36 | seconds_to_consume = 10.0 37 | heartbeat = 60 38 | serializer = "application/json" 39 | exchange = ev("CONSUME_EXCHANGE", "reporting") 40 | routing_key = ev("CONSUME_ROUTING_KEY", "reporting.accounts") 41 | queue = ev("CONSUME_QUEUE", "reporting.accounts") 42 | sub.consume(callback=handle_message, 43 | queue=queue, 44 | exchange=exchange, 45 | routing_key=routing_key, 46 | serializer=serializer, 47 | heartbeat=heartbeat, 48 | time_to_wait=seconds_to_consume) 49 | 50 | log.info("End - {}".format(name)) 51 | -------------------------------------------------------------------------------- /kombu_redis_subscriber.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from spylunking.log.setup_logging import build_colorized_logger 4 | from celery_connectors.utils import ev 5 | from celery_connectors.kombu_subscriber import KombuSubscriber 6 | 7 | 8 | name = "kombu-redis-subscriber" 9 | log = build_colorized_logger( 10 | name=name) 11 | 12 | log.info("Start - {}".format(name)) 13 | 14 | 15 | recv_msgs = [] 16 | 17 | 18 | def handle_message(body, message): 19 | log.info(("callback received msg " 20 | "body={}") 21 | .format(body)) 22 | recv_msgs.append(body) 23 | message.ack() 24 | # end of handle_message 25 | 26 | 27 | # Initialize KombuSubscriber 28 | ssl_options = {} 29 | sub = KombuSubscriber("kombu-redis-subscriber", 30 | ev("SUB_BROKER_URL", 31 | "redis://localhost:6379/0"), 32 | ssl_options) 33 | 34 | 35 | # Now consume: 36 | seconds_to_consume = 10.0 37 | heartbeat = 60 38 | serializer = "application/json" 39 | queue = ev("CONSUME_QUEUE", "reporting.accounts") 40 | sub.consume(callback=handle_message, 41 | queue=queue, 42 | exchange=None, 43 | routing_key=None, 44 | serializer=serializer, 45 | heartbeat=heartbeat, 46 | time_to_wait=seconds_to_consume) 47 | 48 | log.info("End - {}".format(name)) 49 | -------------------------------------------------------------------------------- /kombu_sqs_publisher.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import datetime 4 | from spylunking.log.setup_logging import build_colorized_logger 5 | from celery_connectors.utils import ev 6 | from celery_connectors.publisher import Publisher 7 | 8 | 9 | name = "kombu-sqs-publisher" 10 | log = build_colorized_logger( 11 | name=name) 12 | 13 | log.info("Start - {}".format(name)) 14 | 15 | 16 | # Initialize Publisher 17 | # http://docs.celeryproject.org/en/latest/getting-started/brokers/sqs.html 18 | # https://github.com/celery/kombu/blob/master/kombu/transport/SQS.py 19 | aws_key = ev( 20 | "SQS_AWS_ACCESS_KEY", 21 | "not_a_key") 22 | aws_secret = ev( 23 | "SQS_AWS_SECRET_KEY", 24 | "not_a_secret") 25 | 26 | sqs_auth_url = ev("SUB_BROKER_URL", 27 | "sqs://{}:{}@".format( 28 | aws_key, 29 | aws_secret)) 30 | 31 | ssl_options = {} 32 | pub = Publisher("kombu-sqs-publisher", 33 | sqs_auth_url, 34 | ssl_options) 35 | # sample: "sqs://ABCDEFGHIJKLMNOPQRST:ZYXK7NiynGlTogH8Nj+P9nlE73sq3@" 36 | # ^ from the doc: 'you must remember to include the "@" at the end.' 37 | 38 | 39 | # Now consume: 40 | seconds_to_consume = 10.0 41 | serializer = "json" 42 | exchange = ev("CONSUME_EXCHANGE", "test1") 43 | routing_key = ev("CONSUME_ROUTING_KEY", "test1") 44 | queue = ev("CONSUME_QUEUE", "test1") 45 | max_timeout = 43200 46 | transport_options = {} 47 | 48 | if not pub: 49 | log.error(("Failed to connect to " 50 | "broker={}") 51 | .format(sqs_auth_url)) 52 | else: 53 | 54 | # Create the message: 55 | now = datetime.datetime.now().isoformat() 56 | body = {"account_id": 111, 57 | "subscription_id": 222, 58 | "stripe_id": 333, 59 | "created": now, 60 | "product_id": "DEF"} 61 | 62 | log.info(("Sending user conversion event " 63 | "msg={} ex={} rk={}") 64 | .format(body, 65 | exchange, 66 | routing_key)) 67 | 68 | # Publish the message: 69 | msg_sent = pub.publish( 70 | body=body, 71 | exchange=exchange, 72 | routing_key=routing_key, 73 | queue=queue, 74 | serializer=serializer, 75 | retry=True, 76 | transport_options=transport_options) 77 | 78 | log.info(("End - {} sent={}") 79 | .format(name, 80 | msg_sent)) 81 | # end of valid publisher or not 82 | -------------------------------------------------------------------------------- /kombu_sqs_subscriber.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from spylunking.log.setup_logging import build_colorized_logger 4 | from celery_connectors.utils import ev 5 | from celery_connectors.kombu_subscriber import KombuSubscriber 6 | 7 | 8 | name = "kombu-sqs-subscriber" 9 | log = build_colorized_logger( 10 | name=name) 11 | 12 | log.info("Start - {}".format(name)) 13 | 14 | 15 | recv_msgs = [] 16 | 17 | 18 | def handle_message(body, message): 19 | log.info(("callback received msg " 20 | "body={}") 21 | .format(body)) 22 | recv_msgs.append(body) 23 | message.ack() 24 | # end of handle_message 25 | 26 | 27 | # Initialize KombuSubscriber 28 | # http://docs.celeryproject.org/en/latest/getting-started/brokers/sqs.html 29 | # https://github.com/celery/kombu/blob/master/kombu/transport/SQS.py 30 | aws_key = ev( 31 | "SQS_AWS_ACCESS_KEY", 32 | "not_a_key") 33 | aws_secret = ev( 34 | "SQS_AWS_SECRET_KEY", 35 | "not_a_secret") 36 | 37 | sqs_auth_url = ev("BROKER_URL", 38 | "sqs://{}:{}@".format( 39 | aws_key, 40 | aws_secret)) 41 | 42 | transport_options = {} 43 | ssl_options = {} 44 | sub = KombuSubscriber("kombu-sqs-subscriber", 45 | sqs_auth_url, 46 | ssl_options) 47 | # sample: "sqs://ABCDEFGHIJKLMNOPQRST:ZYXK7NiynGlTogH8Nj+P9nlE73sq3@" 48 | # ^ from the doc: 'you must remember to include the "@" at the end.' 49 | 50 | 51 | # Now consume: 52 | seconds_to_consume = 10.0 53 | serializer = "application/json" 54 | queue = "test1" 55 | exchange = "test1" 56 | routing_key = "test1" 57 | transport_options = {"polling_interval": 0.3, 58 | "visibility_timeout": 600} 59 | sub.consume(callback=handle_message, 60 | queue=queue, 61 | exchange=exchange, 62 | routing_key=routing_key, 63 | serializer=serializer, 64 | time_to_wait=seconds_to_consume, 65 | transport_options=transport_options) 66 | 67 | log.info("End - {}".format(name)) 68 | -------------------------------------------------------------------------------- /publish-user-conversion-events-rabbitmq.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import datetime 4 | from spylunking.log.setup_logging import build_colorized_logger 5 | from celery_connectors.utils import ev 6 | from celery_connectors.publisher import Publisher 7 | 8 | 9 | name = "publish-user-conversion-events" 10 | log = build_colorized_logger( 11 | name=name) 12 | 13 | log.info("Start - {}".format(name)) 14 | 15 | exchange_name = ev("PUBLISH_EXCHANGE", "user.events") 16 | routing_key = ev("PUBLISH_ROUTING_KEY", "user.events.conversions") 17 | queue_name = ev("PUBLISH_QUEUE", "user.events.conversions") 18 | auth_url = ev("PUB_BROKER_URL", "pyamqp://rabbitmq:rabbitmq@localhost:5672//") 19 | serializer = "json" 20 | 21 | # import ssl 22 | # Connection("amqp://", login_method='EXTERNAL', ssl={ 23 | # "ca_certs": '/etc/pki/tls/certs/something.crt', 24 | # "keyfile": '/etc/something/system.key', 25 | # "certfile": '/etc/something/system.cert', 26 | # "cert_reqs": ssl.CERT_REQUIRED, 27 | # }) 28 | # 29 | ssl_options = {} 30 | app = Publisher("publish-uce-rabbitmq", 31 | auth_url, 32 | ssl_options) 33 | 34 | if not app: 35 | log.error("Failed to connect to broker={}".format(auth_url)) 36 | else: 37 | 38 | # Create the message: 39 | now = datetime.datetime.now().isoformat() 40 | body = {"account_id": 777, 41 | "subscription_id": 888, 42 | "stripe_id": 999, 43 | "product_id": "XYZ", 44 | "created": now} 45 | 46 | log.info(("Sending user conversion event " 47 | "msg={} ex={} rk={}") 48 | .format(body, exchange_name, routing_key)) 49 | 50 | # Publish the message: 51 | msg_sent = app.publish(body=body, 52 | exchange=exchange_name, 53 | routing_key=routing_key, 54 | queue=queue_name, 55 | serializer=serializer, 56 | retry=True) 57 | 58 | log.info(("End - {} sent={}") 59 | .format(name, 60 | msg_sent)) 61 | # end of valid or not 62 | -------------------------------------------------------------------------------- /publish-user-conversion-events-redis.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import datetime 4 | from spylunking.log.setup_logging import build_colorized_logger 5 | from celery_connectors.utils import ev 6 | from celery_connectors.publisher import Publisher 7 | 8 | 9 | name = "publish-user-conversion-events" 10 | log = build_colorized_logger( 11 | name=name) 12 | 13 | log.info("Start - {}".format(name)) 14 | 15 | exchange_name = ev("PUBLISH_EXCHANGE", "user.events") 16 | routing_key = ev("PUBLISH_ROUTING_KEY", "user.events.conversions") 17 | queue_name = ev("PUBLISH_QUEUE", "user.events.conversions") 18 | auth_url = ev("PUB_BROKER_URL", "redis://localhost:6379/0") 19 | serializer = "json" 20 | 21 | # import ssl 22 | # Connection("amqp://", login_method='EXTERNAL', ssl={ 23 | # "ca_certs": '/etc/pki/tls/certs/something.crt', 24 | # "keyfile": '/etc/something/system.key', 25 | # "certfile": '/etc/something/system.cert', 26 | # "cert_reqs": ssl.CERT_REQUIRED, 27 | # }) 28 | # 29 | ssl_options = {} 30 | app = Publisher("publish-uce-redis", 31 | auth_url, 32 | ssl_options) 33 | 34 | if not app: 35 | log.error("Failed to connect to broker={}".format(auth_url)) 36 | else: 37 | 38 | # Create the message: 39 | now = datetime.datetime.now().isoformat() 40 | body = {"account_id": 123, 41 | "subscription_id": 456, 42 | "stripe_id": 789, 43 | "product_id": "ABC", 44 | "created": now} 45 | 46 | log.info(("Sending user conversion event " 47 | "msg={} ex={} rk={}") 48 | .format(body, exchange_name, routing_key)) 49 | 50 | # Publish the message: 51 | msg_sent = app.publish(body=body, 52 | exchange=exchange_name, 53 | routing_key=routing_key, 54 | queue=queue_name, 55 | serializer=serializer, 56 | retry=True) 57 | 58 | log.info(("End - {} sent={}") 59 | .format(name, 60 | msg_sent)) 61 | # end of valid or not 62 | -------------------------------------------------------------------------------- /run_rabbitmq_publisher.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import datetime 4 | from spylunking.log.setup_logging import build_colorized_logger 5 | from celery_connectors.utils import ev 6 | from celery_connectors.publisher import Publisher 7 | 8 | 9 | name = "run-rabbitmq-publisher" 10 | log = build_colorized_logger( 11 | name=name) 12 | 13 | log.info("Start - {}".format(name)) 14 | 15 | # Celery Transports: 16 | # http://docs.celeryproject.org/projects/kombu/en/latest/userguide/connections.html#transport-comparison 17 | 18 | exchange_name = ev("PUBLISH_EXCHANGE", "reporting") 19 | routing_key = ev("PUBLISH_ROUTING_KEY", "reporting.accounts") 20 | queue_name = ev("PUBLISH_QUEUE", "reporting.accounts") 21 | auth_url = ev("PUB_BROKER_URL", "pyamqp://rabbitmq:rabbitmq@localhost:5672//") 22 | serializer = "json" 23 | 24 | # import ssl 25 | # Connection("amqp://", login_method='EXTERNAL', ssl={ 26 | # "ca_certs": '/etc/pki/tls/certs/something.crt', 27 | # "keyfile": '/etc/something/system.key', 28 | # "certfile": '/etc/something/system.cert', 29 | # "cert_reqs": ssl.CERT_REQUIRED, 30 | # }) 31 | # 32 | ssl_options = {} 33 | app = Publisher("rabbitmq-publisher", 34 | auth_url, 35 | ssl_options) 36 | 37 | if not app: 38 | log.error("Failed to connect to broker={}".format(auth_url)) 39 | else: 40 | 41 | # Create the message: 42 | now = datetime.datetime.now().isoformat() 43 | body = {"account_id": 456, 44 | "created": now} 45 | 46 | log.info(("Sending msg={} " 47 | "ex={} rk={}") 48 | .format(body, 49 | exchange_name, 50 | routing_key)) 51 | 52 | # Publish the message: 53 | msg_sent = app.publish(body=body, 54 | exchange=exchange_name, 55 | routing_key=routing_key, 56 | queue=queue_name, 57 | serializer=serializer, 58 | retry=True) 59 | 60 | log.info(("End - {} sent={}") 61 | .format(name, 62 | msg_sent)) 63 | # end of valid publisher or not 64 | -------------------------------------------------------------------------------- /run_rabbitmq_subscriber.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from spylunking.log.setup_logging import build_colorized_logger 4 | from celery import Celery 5 | from celery_connectors.utils import ev 6 | from celery_connectors.utils import get_source_info_from_msg 7 | from celery_connectors.subscriber import Subscriber 8 | 9 | 10 | name = "run-rabbitmq-subscriber" 11 | log = build_colorized_logger( 12 | name=name) 13 | 14 | log.info("Start - {}".format(name)) 15 | 16 | 17 | recv_msgs = [] 18 | 19 | 20 | def handle_message(body, message): 21 | source_info = get_source_info_from_msg(message) 22 | log.info(("callback received msg " 23 | "body={} from_ex={} from_rk={}") 24 | .format(body, 25 | source_info["src_exchange"], 26 | source_info["src_routing_key"])) 27 | recv_msgs.append(body) 28 | message.ack() 29 | # end of handle_message 30 | 31 | 32 | # Initialize Celery application 33 | ssl_options = {} 34 | 35 | # http://docs.celeryproject.org/en/latest/userguide/calling.html#calling-retry 36 | # allow publishes to retry for a time 37 | task_publish_retry_policy = { 38 | "interval_max": 1, 39 | "max_retries": 120, # None - forever 40 | "interval_start": 0.1, 41 | "interval_step": 0.2} 42 | 43 | # Confirm publishes with Celery 44 | # https://github.com/celery/kombu/issues/572 45 | transport_options = { 46 | "confirm_publish": True} 47 | 48 | conn_attrs = { 49 | "task_default_queue": "celery.rabbit.sub", 50 | "task_default_exchange": "celery.rabbit.sub", 51 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-worker_prefetch_multiplier 52 | "worker_prefetch_multiplier": 1, # consume 1 message at a time 53 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-worker_prefetch_multiplier 54 | "prefetch_count": 3, # consume 1 message at a time per worker (3 workers) 55 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_heartbeat 56 | "broker_heartbeat": 240, # in seconds 57 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_connection_max_retries 58 | "broker_connection_max_retries": None, # None is forever 59 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-task_acks_late 60 | "task_acks_late": True, # on consume do not send an immediate ack back 61 | "task_publish_retry_policy": task_publish_retry_policy} 62 | 63 | app = Celery() 64 | sub = Subscriber(name="rabbitmq-subscriber", 65 | auth_url=ev("SUB_BROKER_URL", 66 | "pyamqp://rabbitmq:rabbitmq@localhost:5672//"), 67 | app=app, 68 | transport_options=transport_options, 69 | ssl_options=ssl_options, 70 | **conn_attrs) 71 | 72 | 73 | # Now consume: 74 | queues = [ 75 | ev("CONSUME_QUEUE", "reporting.accounts"), 76 | ev("CONSUME_QUEUE2", "reporting.subscriptions") 77 | ] 78 | sub.consume(callback=handle_message, 79 | queues=queues, 80 | prefetch_count=conn_attrs["prefetch_count"]) 81 | 82 | log.info("End - {}".format(name)) 83 | -------------------------------------------------------------------------------- /run_redis_publisher.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import datetime 4 | from spylunking.log.setup_logging import build_colorized_logger 5 | from celery_connectors.utils import ev 6 | from celery_connectors.publisher import Publisher 7 | 8 | 9 | name = "run-redis-publisher" 10 | log = build_colorized_logger( 11 | name=name) 12 | 13 | log.info("Start - {}".format(name)) 14 | 15 | # Celery Transports: 16 | # http://docs.celeryproject.org/projects/kombu/en/latest/userguide/connections.html#transport-comparison 17 | 18 | exchange_name = ev("PUBLISH_EXCHANGE", "reporting.accounts") 19 | routing_key = ev("PUBLISH_EXCHANGE", "reporting.accounts") 20 | queue_name = ev("PUBLISH_QUEUE", "reporting.accounts") 21 | auth_url = ev("PUB_BROKER_URL", "redis://localhost:6379/0") 22 | serializer = "json" 23 | 24 | # https://redis.io/topics/security 25 | # 26 | # Redis does not support encryption, but I would like to try out ssl-termination 27 | # using an haproxy/nginx container running as an ssl-proxy to see if this works. 28 | 29 | # import ssl 30 | # Connection("amqp://", login_method='EXTERNAL', ssl={ 31 | # "ca_certs": '/etc/pki/tls/certs/something.crt', 32 | # "keyfile": '/etc/something/system.key', 33 | # "certfile": '/etc/something/system.cert', 34 | # "cert_reqs": ssl.CERT_REQUIRED, 35 | # }) 36 | # 37 | ssl_options = {} 38 | app = Publisher("redis-publisher", 39 | auth_url, 40 | ssl_options) 41 | 42 | if not app: 43 | log.error(("Failed to connect to broker={}") 44 | .format(auth_url)) 45 | else: 46 | 47 | # Now send: 48 | now = datetime.datetime.now().isoformat() 49 | body = {"account_id": 123, 50 | "created": now} 51 | 52 | log.info(("Sending msg={} " 53 | "ex={} rk={}") 54 | .format(body, 55 | exchange_name, 56 | routing_key)) 57 | 58 | # Publish the message: 59 | msg_sent = app.publish(body=body, 60 | exchange=exchange_name, 61 | routing_key=routing_key, 62 | queue=queue_name, 63 | serializer=serializer, 64 | retry=True) 65 | 66 | log.info(("End - {} sent={}") 67 | .format(name, 68 | msg_sent)) 69 | # end of valid publisher or not 70 | -------------------------------------------------------------------------------- /run_redis_subscriber.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from spylunking.log.setup_logging import build_colorized_logger 4 | from celery import Celery 5 | from celery_connectors.utils import ev 6 | from celery_connectors.subscriber import Subscriber 7 | 8 | 9 | name = "run-redis-subscriber" 10 | log = build_colorized_logger( 11 | name=name) 12 | 13 | log.info("Start - {}".format(name)) 14 | 15 | 16 | recv_msgs = [] 17 | 18 | 19 | def handle_message(body, message): 20 | log.info(("callback received msg " 21 | "body={}") 22 | .format(body)) 23 | recv_msgs.append(body) 24 | message.ack() 25 | # end of handle_message 26 | 27 | 28 | # Initialize Celery application 29 | ssl_options = {} 30 | 31 | # http://docs.celeryproject.org/en/latest/userguide/calling.html#calling-retry 32 | # allow publishes to retry for a time 33 | task_publish_retry_policy = { 34 | "interval_max": 1, 35 | "max_retries": 120, # None - forever 36 | "interval_start": 0.1, 37 | "interval_step": 0.2} 38 | 39 | # Confirm publishes with Celery 40 | # https://github.com/celery/kombu/issues/572 41 | transport_options = { 42 | "confirm_publish": True} 43 | 44 | conn_attrs = { 45 | "task_default_queue": "celery.redis.sub", 46 | "task_default_exchange": "celery.redis.sub", 47 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-worker_prefetch_multiplier 48 | "worker_prefetch_multiplier": 1, # consume 1 message at a time 49 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-worker_prefetch_multiplier 50 | "prefetch_count": 3, # consume 1 message at a time per worker (3 workers) 51 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_heartbeat 52 | "broker_heartbeat": 240, # in seconds 53 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_connection_max_retries 54 | "broker_connection_max_retries": None, # None is forever 55 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-task_acks_late 56 | "task_acks_late": True, # on consume do not send an immediate ack back 57 | "task_publish_retry_policy": task_publish_retry_policy} 58 | 59 | app = Celery() 60 | sub = Subscriber("redis-subscriber", 61 | ev("SUB_BROKER_URL", "redis://localhost:6379/0"), 62 | app, 63 | ssl_options, 64 | **conn_attrs) 65 | 66 | 67 | # Now consume: 68 | queue = ev("CONSUME_QUEUE", "reporting.accounts") 69 | sub.consume(callback=handle_message, 70 | queue=queue, 71 | exchange=None, 72 | routing_key=None, 73 | prefetch_count=conn_attrs["prefetch_count"]) 74 | 75 | log.info("End - {}".format(name)) 76 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.rst 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import warnings 4 | import unittest 5 | 6 | try: 7 | from setuptools import setup 8 | except ImportError: 9 | from distutils.core import setup 10 | 11 | try: 12 | from distutils.command.build_py import build_py_2to3 as build_py 13 | except ImportError: 14 | from distutils.command.build_py import build_py 15 | 16 | """ 17 | https://packaging.python.org/guides/making-a-pypi-friendly-readme/ 18 | check the README.rst works on pypi as the 19 | long_description with: 20 | twine check dist/* 21 | """ 22 | long_description = open('README.rst').read() 23 | 24 | cur_path, cur_script = os.path.split(sys.argv[0]) 25 | os.chdir(os.path.abspath(cur_path)) 26 | 27 | install_requires = [ 28 | 'ansible>=1.9', 29 | 'pep8>=1.7.1', 30 | 'flake8>=3.4.1', 31 | 'boto3', 32 | 'pycurl', 33 | 'redis', 34 | 'celery>=4.1.0', 35 | 'kombu>=4.1.0', 36 | 'logstash-formatter', 37 | 'python-logstash', 38 | 'coverage', 39 | 'future', 40 | 'pylint', 41 | 'spylunking', 42 | 'unittest2', 43 | 'mock' 44 | ] 45 | 46 | 47 | if sys.version_info < (2, 7): 48 | warnings.warn( 49 | 'Python 2.6 is not supported.', 50 | DeprecationWarning) 51 | 52 | 53 | def celery_connectors_test_suite(): 54 | test_loader = unittest.TestLoader() 55 | test_suite = test_loader.discover('tests', pattern='test_*.py') 56 | return test_suite 57 | 58 | 59 | # Don't import celery_connectors module here, since deps may not be installed 60 | sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'celery_connectors')) 61 | 62 | setup( 63 | name='celery-connectors', 64 | cmdclass={'build_py': build_py}, 65 | version='1.0.30', 66 | description='Celery Connectors', 67 | long_description=long_description, 68 | author='Jay Johnson', 69 | author_email='jay.p.h.johnson@gmail.com', 70 | url='https://github.com/jay-johnson/celery-connectors', 71 | packages=[ 72 | 'celery_connectors', 73 | 'celery_connectors.redis', 74 | 'celery_connectors.log' 75 | ], 76 | package_data={}, 77 | install_requires=install_requires, 78 | test_suite='setup.celery_connectors_test_suite', 79 | tests_require=[ 80 | ], 81 | scripts=[ 82 | './celery_connectors/redis/redis-subscribe-and-read-messages.py', 83 | './celery_connectors/redis/redis-publish-messages.py', 84 | './celery_connectors/rabbitmq/rabbitmqadmin.py', 85 | './celery_connectors/rabbitmq/list-bindings.sh', 86 | './celery_connectors/rabbitmq/list-channels.sh', 87 | './celery_connectors/rabbitmq/list-connections.sh', 88 | './celery_connectors/rabbitmq/list-consumers.sh', 89 | './celery_connectors/rabbitmq/list-exchanges.sh', 90 | './celery_connectors/rabbitmq/list-queues.sh', 91 | './celery_connectors/rabbitmq/rmq-close-all-connections.sh', 92 | './celery_connectors/rabbitmq/rmq-trace-on.sh', 93 | './celery_connectors/rabbitmq/rmq-trace-off.sh', 94 | './celery_connectors/rabbitmq/rmq-status.sh', 95 | './celery_connectors/rabbitmq/watch-queues.sh', 96 | './celery_connectors/scripts/subscribe-to-rabbitmq.sh', 97 | './celery_connectors/scripts/subscribe-to-redis.sh', 98 | './publish-user-conversion-events-redis.py', 99 | './publish-user-conversion-events-rabbitmq.py', 100 | './start-kombu-message-processor-redis.py', 101 | './start-kombu-message-processor-rabbitmq.py', 102 | './start-mixin-json-relay.py', 103 | './start-mixin-celery-relay.py', 104 | './start-mixin-publisher.py', 105 | './start-mixin-load-test.py', 106 | './start-load-test-rabbitmq.py', 107 | './start-subscriptions-rabbitmq-test.py', 108 | './start-load-test-redis.py', 109 | './run_rabbitmq_publisher.py', 110 | './run_redis_publisher.py', 111 | './kombu_rabbitmq_subscriber.py', 112 | './kombu_redis_subscriber.py', 113 | './kombu_sqs_publisher.py', 114 | './kombu_sqs_subscriber.py', 115 | './kombu_mixin_subscriber.py', 116 | './start-redis-and-rabbitmq.sh', 117 | './stop-redis-and-rabbitmq.sh', 118 | './start-persistence-containers.sh', 119 | './start-ecomm-relay.py' 120 | ], 121 | use_2to3=True, 122 | classifiers=[ 123 | 'Development Status :: 5 - Production/Stable', 124 | 'Intended Audience :: Developers', 125 | 'License :: OSI Approved :: Apache Software License', 126 | 'Operating System :: OS Independent', 127 | 'Programming Language :: Python :: 2', 128 | 'Programming Language :: Python :: 2.7', 129 | 'Programming Language :: Python :: 3.5', 130 | 'Programming Language :: Python :: 3.6', 131 | 'Programming Language :: Python :: Implementation :: PyPy', 132 | 'Topic :: Software Development :: Libraries :: Python Modules', 133 | ]) 134 | -------------------------------------------------------------------------------- /ssh.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker exec -it worker bash 4 | -------------------------------------------------------------------------------- /start-dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # this assumes docker is running and docker-compose is installed 4 | 5 | cd docker/dev 6 | 7 | compose_file="rabbitmq-celery-only-consume.yml" 8 | 9 | echo "Starting celery-connector with compose_file=${compose_file}" 10 | docker-compose -f $compose_file up -d 11 | 12 | exit 0 13 | -------------------------------------------------------------------------------- /start-ecomm-relay.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import uuid 4 | import time 5 | from datetime import datetime 6 | from spylunking.log.setup_logging import build_colorized_logger 7 | from celery_connectors.utils import ev 8 | from celery_connectors.message_processor import MessageProcessor 9 | 10 | # import the ecommerce tasks out of the demo dir 11 | import ecomm_app.ecommerce.tasks 12 | 13 | 14 | name = "ecomm-relay" 15 | log = build_colorized_logger( 16 | name=name) 17 | 18 | log.info("Start - {}".format(name)) 19 | 20 | 21 | def relay_callback(body, message): 22 | 23 | pub_auth_url = ev("RELAY_WORKER_BROKER_URL", 24 | "pyamqp://rabbitmq:rabbitmq@localhost:5672//") 25 | pub_backend_url = ev("RELAY_BACKEND_URL", 26 | "redis://localhost:6379/12") 27 | path_to_config_module = ev("RELAY_CONFIG_MODULE", 28 | "ecomm_app.ecommerce.celeryconfig_pub_sub") 29 | 30 | app = ecomm_app.ecommerce.tasks.get_celery_app( 31 | name=ev("RELAY_NAME", "ecomm-relay"), 32 | auth_url=pub_auth_url, 33 | backend_url=pub_backend_url, 34 | path_to_config_module=path_to_config_module) 35 | 36 | task_name = ev("RELAY_TASK_NAME", 37 | "ecomm_app.ecommerce.tasks.handle_user_conversion_events") 38 | now = datetime.now().isoformat() 39 | body = {"account_id": 999, 40 | "subscription_id": 321, 41 | "stripe_id": 876, 42 | "created": now, 43 | "product_id": "JJJ", 44 | "version": 1, 45 | "org_msg": body, 46 | "msg_id": str(uuid.uuid4())} 47 | 48 | source_info = {"msg_proc": ev("RELAY_NAME", 49 | "ecomm_relay")} 50 | 51 | log.info(("Sending broker={} " 52 | "body={}") 53 | .format(app.conf.broker_url, 54 | body)) 55 | 56 | result = app.send_task(task_name, (body, source_info)) 57 | 58 | if "simulate_processing_lag" in body: 59 | log.info(("task - {} - simulating processing" 60 | "lag={} sleeping") 61 | .format(task_name, 62 | body["simulate_processing_lag"])) 63 | time.sleep(float(body["simulate_processing_lag"])) 64 | # end of handling adding artifical lag for testing Celery 65 | 66 | log.info(("Done with msg_id={} result={}") 67 | .format(body["msg_id"], 68 | result.get())) 69 | 70 | # now that the message has been 71 | # sent to the celery ecomm worker 72 | # we can ack the message which 73 | # deletes it from the source queue 74 | # the message processor uses 75 | message.ack() 76 | 77 | # end of relay_callback 78 | 79 | 80 | # want to change where you're subscribing vs publishing? 81 | sub_ssl_options = {} 82 | sub_auth_url = ev("SUB_BROKER_URL", 83 | "pyamqp://rabbitmq:rabbitmq@localhost:5672//") 84 | pub_ssl_options = {} 85 | pub_auth_url = ev("PUB_BROKER_URL", 86 | "redis://localhost:6379/0") 87 | 88 | # start the message processor 89 | msg_proc = MessageProcessor(name=name, 90 | sub_auth_url=sub_auth_url, 91 | sub_ssl_options=sub_ssl_options, 92 | pub_auth_url=pub_auth_url, 93 | pub_ssl_options=pub_ssl_options) 94 | 95 | # configure where this is consuming: 96 | queue = ev("CONSUME_QUEUE", "user.events.conversions") 97 | 98 | # Relay Publish Hook - sending to Redis 99 | # where is it sending handled messages using a publish-hook or auto-caching: 100 | exchange = ev("PUBLISH_EXCHANGE", "reporting.accounts") 101 | routing_key = ev("PUBLISH_ROUTING_KEY", "reporting.accounts") 102 | 103 | # set up the controls and long-term connection attributes 104 | seconds_to_consume = 10.0 105 | heartbeat = 60 106 | serializer = "application/json" 107 | pub_serializer = "json" 108 | expiration = None 109 | consume_forever = True 110 | 111 | # start consuming 112 | msg_proc.consume_queue(queue=queue, 113 | heartbeat=heartbeat, 114 | expiration=expiration, 115 | sub_serializer=serializer, 116 | pub_serializer=pub_serializer, 117 | seconds_to_consume=seconds_to_consume, 118 | forever=consume_forever, 119 | # Optional: if you're chaining a publish hook to another system 120 | exchange=exchange, 121 | # Optional: if you're chaining a publish hook to another system 122 | routing_key=routing_key, 123 | # Pass in a custom callback 124 | # for processing messages found in the queue 125 | callback=relay_callback) 126 | 127 | log.info("End - {}".format(name)) 128 | -------------------------------------------------------------------------------- /start-ecomm-worker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | path_to_worker_module="ecomm_app.job_worker" 4 | worker_name="ecommerce_subscriber" 5 | num_workers=3 6 | log_level="INFO" 7 | path_to_celery="celery" 8 | 9 | if [[ "${PATH_TO_WORKER_MODULE}" != "" ]]; then 10 | path_to_worker_module="${PATH_TO_WORKER_MODULE}" 11 | fi 12 | if [[ "${APP_NAME}" != "" ]]; then 13 | worker_name="${APP_NAME}" 14 | fi 15 | if [[ "${NUM_WORKERS}" != "" ]]; then 16 | num_workers=${NUM_WORKERS} 17 | fi 18 | if [[ "${LOG_LEVEL}" != "" ]]; then 19 | log_level="${LOG_LEVEL}" 20 | fi 21 | if [[ "${PATH_TO_CELERY}" != "" ]]; then 22 | path_to_celery="${PATH_TO_CELERY}" 23 | fi 24 | 25 | # http://docs.celeryproject.org/en/latest/userguide/optimizing.html 26 | echo "${path_to_celery} worker -A ${path_to_worker_module} --loglevel=${log_level} -n ${worker_name} -c ${num_workers} -Ofair" 27 | ${path_to_celery} worker -A ${path_to_worker_module} --loglevel=${log_level} -n ${worker_name} -c ${num_workers} -Ofair 28 | -------------------------------------------------------------------------------- /start-kombu-message-processor-rabbitmq.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from spylunking.log.setup_logging import build_colorized_logger 4 | from celery_connectors.utils import ev 5 | from celery_connectors.message_processor import MessageProcessor 6 | 7 | 8 | name = "msg-proc" 9 | log = build_colorized_logger( 10 | name=name) 11 | 12 | log.info("Start - {}".format(name)) 13 | 14 | # want to change where you're subscribing vs publishing? 15 | sub_ssl_options = {} 16 | sub_auth_url = ev("SUB_BROKER_URL", "pyamqp://rabbitmq:rabbitmq@localhost:5672//") 17 | pub_ssl_options = {} 18 | pub_auth_url = ev("PUB_BROKER_URL", "redis://localhost:6379/0") 19 | 20 | # start the message processor 21 | msg_proc = MessageProcessor(name=name, 22 | sub_auth_url=sub_auth_url, 23 | sub_ssl_options=sub_ssl_options, 24 | pub_auth_url=pub_auth_url, 25 | pub_ssl_options=pub_ssl_options) 26 | 27 | # configure where this is consuming: 28 | queue = ev("CONSUME_QUEUE", "user.events.conversions") 29 | 30 | # Relay Publish Hook - sending to Redis 31 | # where is it sending handled messages using a publish-hook or auto-caching: 32 | exchange = ev("PUBLISH_EXCHANGE", "reporting.accounts") 33 | routing_key = ev("PUBLISH_ROUTING_KEY", "reporting.accounts") 34 | 35 | # set up the controls and long-term connection attributes 36 | seconds_to_consume = 10.0 37 | heartbeat = 60 38 | serializer = "application/json" 39 | pub_serializer = "json" 40 | expiration = None 41 | consume_forever = True 42 | 43 | # start consuming 44 | msg_proc.consume_queue(queue=queue, 45 | heartbeat=heartbeat, 46 | expiration=expiration, 47 | sub_serializer=serializer, 48 | pub_serializer=pub_serializer, 49 | seconds_to_consume=seconds_to_consume, 50 | forever=consume_forever, 51 | # Optional: if you're chaining a publish hook to another system 52 | exchange=exchange, 53 | # Optional: if you're chaining a publish hook to another system 54 | routing_key=routing_key) 55 | 56 | log.info("End - {}".format(name)) 57 | -------------------------------------------------------------------------------- /start-kombu-message-processor-redis.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from spylunking.log.setup_logging import build_colorized_logger 4 | from celery_connectors.utils import ev 5 | from celery_connectors.message_processor import MessageProcessor 6 | 7 | 8 | name = "msg-proc" 9 | log = build_colorized_logger( 10 | name=name) 11 | 12 | log.info("Start - {}".format(name)) 13 | 14 | # want to change where you're subscribing vs publishing? 15 | sub_ssl_options = {} 16 | sub_auth_url = ev("SUB_BROKER_URL", "redis://localhost:6379/0") 17 | pub_ssl_options = {} 18 | pub_auth_url = ev("PUB_BROKER_URL", "redis://localhost:6379/0") 19 | 20 | # start the message processor 21 | msg_proc = MessageProcessor(name=name, 22 | sub_auth_url=sub_auth_url, 23 | sub_ssl_options=sub_ssl_options, 24 | pub_auth_url=pub_auth_url, 25 | pub_ssl_options=pub_ssl_options) 26 | 27 | # configure where this is consuming: 28 | queue = ev("CONSUME_QUEUE", "user.events.conversions") 29 | 30 | # Relay Publish Hook - is disabled for this example 31 | # where is it sending handled messages using a publish-hook or auto-caching: 32 | exchange = None 33 | routing_key = None 34 | 35 | # set up the controls and long-term connection attributes 36 | seconds_to_consume = 10.0 37 | heartbeat = 60 38 | serializer = "application/json" 39 | expiration = None 40 | consume_forever = True 41 | 42 | # start consuming 43 | msg_proc.consume_queue(queue=queue, 44 | heartbeat=heartbeat, 45 | expiration=expiration, 46 | sub_serializer=serializer, 47 | seconds_to_consume=seconds_to_consume, 48 | forever=consume_forever, 49 | # Optional: if you're chaining a publish hook to another system 50 | exchange=exchange, 51 | # Optional: if you're chaining a publish hook to another system 52 | routing_key=routing_key) 53 | 54 | log.info("End - {}".format(name)) 55 | -------------------------------------------------------------------------------- /start-load-test-rabbitmq.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from kombu import Exchange, Queue 4 | from spylunking.log.setup_logging import build_colorized_logger 5 | from celery_connectors.utils import ev 6 | from celery_connectors.utils import build_sample_msgs 7 | from celery_connectors.build_ssl_options import build_ssl_options 8 | from celery_connectors.run_publisher import run_publisher 9 | 10 | 11 | # Credits and inspirations from these great sources: 12 | # 13 | # https://github.com/celery/kombu/blob/master/examples/rpc-tut6/rpc_server.py 14 | # https://gist.github.com/oubiwann/3843016 15 | # https://gist.github.com/eavictor/ee7856581619ac60643b57987b7ed580#file-mq_kombu_rpc_server-py 16 | # https://github.com/Skablam/kombu-examples 17 | # https://gist.github.com/mlavin/6671079 18 | 19 | name = ev("APP_NAME", "robopub") 20 | log = build_colorized_logger( 21 | name=name) 22 | 23 | 24 | broker_url = ev("PUB_BROKER_URL", "pyamqp://rabbitmq:rabbitmq@localhost:5672//") 25 | exchange_name = ev("PUBLISH_EXCHANGE", "") 26 | exchange_type = ev("PUBLISH_EXCHANGE_TYPE", "") 27 | routing_key = ev("PUBLISH_ROUTING_KEY", "reporting.accounts") 28 | queue_name = ev("PUBLISH_QUEUE", "reporting.accounts") 29 | priority_routing = {"high": queue_name} 30 | use_exchange = Exchange(exchange_name, type=exchange_type) 31 | use_routing_key = routing_key 32 | use_queue = Queue(queue_name, exchange=use_exchange, routing_key=routing_key) 33 | task_queues = [ 34 | use_queue 35 | ] 36 | ssl_options = build_ssl_options() 37 | transport_options = {} 38 | 39 | num_msgs_to_send = int(float(ev("NUM_MSG_TO_PUBLISH", "200000"))) 40 | log.info(("Generating messages={}") 41 | .format(num_msgs_to_send)) 42 | 43 | # use these if you want to simulate processing lag 44 | # relay_task_lag = 0.0 45 | # worker_task_lag = 0.0 46 | # processing_lag_data = {"relay_simulate_processing_lag": worker_task_lag, 47 | # "simulate_processing_lag": worker_task_lag} 48 | # msgs = build_sample_msgs(num=num_msgs_to_send, 49 | # data=processing_lag_data) 50 | 51 | msgs = build_sample_msgs(num=num_msgs_to_send, 52 | data={}) 53 | 54 | log.info(("Publishing messages={}") 55 | .format(len(msgs))) 56 | 57 | run_publisher(broker_url=broker_url, 58 | exchange=use_exchange, # kombu.Exchange object 59 | routing_key=use_routing_key, # string 60 | msgs=msgs, 61 | ssl_options=ssl_options, 62 | transport_options=transport_options, 63 | priority="high", 64 | priority_routing=priority_routing, 65 | silent=True, 66 | publish_silent=True) 67 | 68 | log.info("Done Publishing") 69 | -------------------------------------------------------------------------------- /start-load-test-redis.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from kombu import Exchange, Queue 4 | from spylunking.log.setup_logging import build_colorized_logger 5 | from celery_connectors.utils import ev 6 | from celery_connectors.utils import build_sample_msgs 7 | from celery_connectors.build_ssl_options import build_ssl_options 8 | from celery_connectors.run_publisher import run_publisher 9 | 10 | 11 | # Credits and inspirations from these great sources: 12 | # 13 | # https://github.com/celery/kombu/blob/master/examples/rpc-tut6/rpc_server.py 14 | # https://gist.github.com/oubiwann/3843016 15 | # https://gist.github.com/eavictor/ee7856581619ac60643b57987b7ed580#file-mq_kombu_rpc_server-py 16 | # https://github.com/Skablam/kombu-examples 17 | # https://gist.github.com/mlavin/6671079 18 | 19 | name = ev("APP_NAME", "robopub") 20 | log = build_colorized_logger( 21 | name=name) 22 | 23 | 24 | broker_url = ev("PUB_BROKER_URL", "redis://localhost:6379/0") 25 | exchange_name = ev("PUBLISH_EXCHANGE", "") 26 | exchange_type = ev("PUBLISH_EXCHANGE_TYPE", "") 27 | routing_key = ev("PUBLISH_ROUTING_KEY", "reporting.accounts") 28 | queue_name = ev("PUBLISH_QUEUE", "reporting.accounts") 29 | priority_routing = {"high": queue_name} 30 | use_exchange = Exchange(exchange_name, type=exchange_type) 31 | use_routing_key = routing_key 32 | use_queue = Queue(queue_name, exchange=use_exchange, routing_key=routing_key) 33 | task_queues = [ 34 | use_queue 35 | ] 36 | ssl_options = build_ssl_options() 37 | transport_options = {} 38 | 39 | num_msgs_to_send = int(float(ev("NUM_MSG_TO_PUBLISH", "200000"))) 40 | log.info(("Generating messages={}") 41 | .format(num_msgs_to_send)) 42 | 43 | # relay_task_lag = 0.0 44 | # worker_task_lag = 0.0 45 | # processing_lag_data = {"relay_simulate_processing_lag": worker_task_lag, 46 | # "simulate_processing_lag": worker_task_lag} 47 | # msgs = build_sample_msgs(num=num_msgs_to_send, 48 | # data=processing_lag_data) 49 | 50 | msgs = build_sample_msgs(num=num_msgs_to_send, 51 | data={}) 52 | 53 | log.info(("Publishing messages={}") 54 | .format(len(msgs))) 55 | 56 | run_publisher(broker_url=broker_url, 57 | exchange=use_exchange, # kombu.Exchange object 58 | routing_key=use_routing_key, # string 59 | msgs=msgs, 60 | ssl_options=ssl_options, 61 | transport_options=transport_options, 62 | priority="high", 63 | priority_routing=priority_routing, 64 | silent=True, 65 | publish_silent=True) 66 | 67 | log.info("Done Publishing") 68 | -------------------------------------------------------------------------------- /start-mixin-celery-relay.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from kombu import Exchange, Queue 4 | from spylunking.log.setup_logging import build_colorized_logger 5 | from celery_connectors.utils import ev 6 | from celery_connectors.build_ssl_options import build_ssl_options 7 | from celery_connectors.run_jtoc_relay import run_jtoc_relay 8 | 9 | 10 | # Credits and inspirations from these great sources: 11 | # 12 | # https://github.com/celery/kombu/blob/master/examples/rpc-tut6/rpc_server.py 13 | # https://gist.github.com/oubiwann/3843016 14 | # https://gist.github.com/eavictor/ee7856581619ac60643b57987b7ed580#file-mq_kombu_rpc_server-py 15 | # https://github.com/Skablam/kombu-examples 16 | # https://gist.github.com/mlavin/6671079 17 | 18 | name = ev("APP_NAME", "jtoc_relay") 19 | log = build_colorized_logger( 20 | name=name) 21 | 22 | 23 | broker_url = ev("SUB_BROKER_URL", "pyamqp://rabbitmq:rabbitmq@localhost:5672//") 24 | exchange_name = ev("CONSUME_EXCHANGE", "ecomm.api") 25 | exchange_type = ev("CONSUME_EXCHANGE_TYPE", "topic") 26 | routing_key = ev("CONSUME_ROUTING_KEY", "ecomm.api.west") 27 | queue_name = ev("CONSUME_QUEUE", "ecomm.api.west") 28 | prefetch_count = int(float(ev("PREFETCH_COUNT", "1"))) 29 | priority_routing = {"high": queue_name, 30 | "low": queue_name} 31 | use_exchange = Exchange(exchange_name, type=exchange_type) 32 | use_queue = Queue(queue_name, exchange=use_exchange, routing_key=routing_key) 33 | task_queues = [ 34 | use_queue 35 | ] 36 | ssl_options = build_ssl_options() 37 | 38 | relay_broker_url = ev("RELAY_BROKER_URL", "pyamqp://rabbitmq:rabbitmq@localhost:5672//") 39 | relay_backend_url = ev("RELAY_BACKEND_URL", "redis://localhost:6379/10") 40 | relay_exchange_name = ev("RELAY_EXCHANGE", "") 41 | relay_exchange_type = ev("RELAY_EXCHANGE_TYPE", "direct") 42 | relay_routing_key = ev("RELAY_ROUTING_KEY", "reporting.payments") 43 | relay_exchange = Exchange(relay_exchange_name, type=relay_exchange_type) 44 | 45 | transport_options = {} 46 | 47 | log.info(("Consuming queues={}") 48 | .format(len(task_queues))) 49 | 50 | run_jtoc_relay(broker_url=broker_url, 51 | ssl_options=ssl_options, 52 | transport_options=transport_options, 53 | task_queues=task_queues, 54 | prefetch_count=prefetch_count, 55 | relay_broker_url=relay_broker_url, 56 | relay_backend_url=relay_backend_url, 57 | relay_exchange=relay_exchange, 58 | relay_exchange_type=relay_exchange_type, 59 | relay_routing_key=relay_routing_key) 60 | 61 | log.info("Done") 62 | -------------------------------------------------------------------------------- /start-mixin-json-relay.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from kombu import Exchange, Queue 4 | from spylunking.log.setup_logging import build_colorized_logger 5 | from celery_connectors.utils import ev 6 | from celery_connectors.build_ssl_options import build_ssl_options 7 | from celery_connectors.run_consumer_relay import run_consumer_relay 8 | 9 | 10 | # Credits and inspirations from these great sources: 11 | # 12 | # https://github.com/celery/kombu/blob/master/examples/rpc-tut6/rpc_server.py 13 | # https://gist.github.com/oubiwann/3843016 14 | # https://gist.github.com/eavictor/ee7856581619ac60643b57987b7ed580#file-mq_kombu_rpc_server-py 15 | # https://github.com/Skablam/kombu-examples 16 | # https://gist.github.com/mlavin/6671079 17 | 18 | name = ev("APP_NAME", "mixin_relay") 19 | log = build_colorized_logger( 20 | name=name) 21 | 22 | 23 | broker_url = ev("SUB_BROKER_URL", "pyamqp://rabbitmq:rabbitmq@localhost:5672//") 24 | exchange_name = ev("CONSUME_EXCHANGE", "ecomm.api") 25 | exchange_type = ev("CONSUME_EXCHANGE_TYPE", "topic") 26 | queue_name = ev("CONSUME_QUEUE", "ecomm.api.west") 27 | routing_key = ev("CONSUME_ROUTING_KEY", "ecomm.api.west") 28 | prefetch_count = int(float(ev("PREFETCH_COUNT", "1"))) 29 | priority_routing = {"high": queue_name, 30 | "low": queue_name} 31 | use_exchange = Exchange(exchange_name, type=exchange_type) 32 | use_queue = Queue(queue_name, exchange=use_exchange, routing_key=routing_key) 33 | task_queues = [ 34 | use_queue 35 | ] 36 | ssl_options = build_ssl_options() 37 | 38 | relay_broker_url = ev("RELAY_BROKER_URL", "pyamqp://rabbitmq:rabbitmq@localhost:5672//") 39 | relay_exchange_name = ev("RELAY_EXCHANGE", "") 40 | relay_exchange_type = ev("RELAY_EXCHANGE_TYPE", "direct") 41 | relay_routing_key = ev("RELAY_ROUTING_KEY", "reporting.payments") 42 | relay_exchange = Exchange(relay_exchange_name, type=relay_exchange_type) 43 | 44 | transport_options = {} 45 | 46 | log.info(("Consuming queues={}") 47 | .format(len(task_queues))) 48 | 49 | run_consumer_relay(broker_url=broker_url, 50 | ssl_options=ssl_options, 51 | transport_options=transport_options, 52 | task_queues=task_queues, 53 | prefetch_count=prefetch_count, 54 | relay_broker_url=relay_broker_url, 55 | relay_exchange=relay_exchange, 56 | relay_routing_key=relay_routing_key) 57 | 58 | log.info("Done") 59 | -------------------------------------------------------------------------------- /start-mixin-load-test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from kombu import Exchange, Queue 4 | from spylunking.log.setup_logging import build_colorized_logger 5 | from celery_connectors.utils import ev 6 | from celery_connectors.utils import build_sample_msgs 7 | from celery_connectors.build_ssl_options import build_ssl_options 8 | from celery_connectors.run_publisher import run_publisher 9 | 10 | 11 | # Credits and inspirations from these great sources: 12 | # 13 | # https://github.com/celery/kombu/blob/master/examples/rpc-tut6/rpc_server.py 14 | # https://gist.github.com/oubiwann/3843016 15 | # https://gist.github.com/eavictor/ee7856581619ac60643b57987b7ed580#file-mq_kombu_rpc_server-py 16 | # https://github.com/Skablam/kombu-examples 17 | # https://gist.github.com/mlavin/6671079 18 | 19 | name = ev("APP_NAME", "robopub") 20 | log = build_colorized_logger( 21 | name=name) 22 | 23 | 24 | broker_url = ev("PUB_BROKER_URL", "pyamqp://rabbitmq:rabbitmq@localhost:5672//") 25 | exchange_name = ev("PUBLISH_EXCHANGE", "ecomm.api") 26 | exchange_type = ev("PUBLISH_EXCHANGE_TYPE", "topic") 27 | routing_key = ev("PUBLISH_ROUTING_KEY", "ecomm.api.west") 28 | queue_name = ev("PUBLISH_QUEUE", "ecomm.api.west") 29 | priority_routing = {"high": queue_name, 30 | "low": queue_name} 31 | use_exchange = Exchange(exchange_name, type=exchange_type) 32 | use_routing_key = routing_key 33 | use_queue = Queue(queue_name, exchange=use_exchange, routing_key=routing_key) 34 | task_queues = [ 35 | use_queue 36 | ] 37 | ssl_options = build_ssl_options() 38 | transport_options = {} 39 | 40 | num_msgs_to_send = int(float(ev("NUM_MSG_TO_PUBLISH", "20000"))) 41 | log.info(("Generating messages={}") 42 | .format(num_msgs_to_send)) 43 | 44 | # relay_task_lag = 0.0 45 | # worker_task_lag = 0.0 46 | # processing_lag_data = {"relay_simulate_processing_lag": worker_task_lag, 47 | # "simulate_processing_lag": worker_task_lag} 48 | # msgs = build_sample_msgs(num=num_msgs_to_send, 49 | # data=processing_lag_data) 50 | 51 | msgs = build_sample_msgs(num=num_msgs_to_send, 52 | data={}) 53 | 54 | log.info(("Publishing messages={}") 55 | .format(len(msgs))) 56 | 57 | run_publisher(broker_url=broker_url, 58 | exchange=use_exchange, # kombu.Exchange object 59 | routing_key=use_routing_key, # string 60 | msgs=msgs, 61 | ssl_options=ssl_options, 62 | transport_options=transport_options, 63 | priority="high", 64 | priority_routing=priority_routing, 65 | silent=True, 66 | publish_silent=True) 67 | 68 | log.info("Done Publishing") 69 | -------------------------------------------------------------------------------- /start-mixin-publisher.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from kombu import Exchange, Queue 4 | from spylunking.log.setup_logging import build_colorized_logger 5 | from celery_connectors.utils import ev 6 | from celery_connectors.utils import build_sample_msgs 7 | from celery_connectors.build_ssl_options import build_ssl_options 8 | from celery_connectors.run_publisher import run_publisher 9 | 10 | 11 | # Credits and inspirations from these great sources: 12 | # 13 | # https://github.com/celery/kombu/blob/master/examples/rpc-tut6/rpc_server.py 14 | # https://gist.github.com/oubiwann/3843016 15 | # https://gist.github.com/eavictor/ee7856581619ac60643b57987b7ed580#file-mq_kombu_rpc_server-py 16 | # https://github.com/Skablam/kombu-examples 17 | # https://gist.github.com/mlavin/6671079 18 | 19 | name = ev("APP_NAME", "robopub") 20 | log = build_colorized_logger( 21 | name=name) 22 | 23 | 24 | broker_url = ev("PUB_BROKER_URL", "pyamqp://rabbitmq:rabbitmq@localhost:5672//") 25 | exchange_name = ev("PUBLISH_EXCHANGE", "ecomm.api") 26 | exchange_type = ev("PUBLISH_EXCHANGE_TYPE", "topic") 27 | routing_key = ev("PUBLISH_ROUTING_KEY", "ecomm.api.west") 28 | queue_name = ev("PUBLISH_QUEUE", "ecomm.api.west") 29 | priority_routing = {"high": queue_name, 30 | "low": queue_name} 31 | use_exchange = Exchange(exchange_name, type=exchange_type) 32 | use_routing_key = routing_key 33 | use_queue = Queue(queue_name, exchange=use_exchange, routing_key=routing_key) 34 | task_queues = [ 35 | use_queue 36 | ] 37 | ssl_options = build_ssl_options() 38 | transport_options = {} 39 | 40 | num_msgs_to_send = 10 41 | log.info(("Generating messages={}") 42 | .format(num_msgs_to_send)) 43 | 44 | relay_task_lag = 2.0 45 | worker_task_lag = 8.0 46 | processing_lag_data = {"relay_simulate_processing_lag": worker_task_lag, 47 | "simulate_processing_lag": worker_task_lag} 48 | 49 | msgs = build_sample_msgs(num=num_msgs_to_send, 50 | data=processing_lag_data) 51 | 52 | log.info(("Publishing messages={}") 53 | .format(len(msgs))) 54 | 55 | run_publisher(broker_url=broker_url, 56 | exchange=use_exchange, # kombu.Exchange object 57 | routing_key=use_routing_key, # string 58 | msgs=msgs, 59 | ssl_options=ssl_options, 60 | transport_options=transport_options, 61 | priority="high", 62 | priority_routing=priority_routing) 63 | 64 | log.info("Done Publishing") 65 | -------------------------------------------------------------------------------- /start-persistence-containers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # this assumes docker is running and docker-compose is installed 4 | 5 | cd docker 6 | 7 | echo "Starting persistence redis and rabbitmq" 8 | docker-compose -f persistence_redis_and_rabbitmq.yml up -d 9 | 10 | exit 0 11 | -------------------------------------------------------------------------------- /start-redis-and-rabbitmq.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # this assumes docker is running and docker-compose is installed 4 | 5 | cd docker 6 | 7 | echo "Starting redis and rabbitmq" 8 | docker-compose -f redis_and_rabbitmq.yml up -d 9 | 10 | exit 0 11 | -------------------------------------------------------------------------------- /start-subscriptions-rabbitmq-test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from kombu import Exchange, Queue 4 | from spylunking.log.setup_logging import build_colorized_logger 5 | from celery_connectors.utils import ev 6 | from celery_connectors.utils import build_sample_msgs 7 | from celery_connectors.build_ssl_options import build_ssl_options 8 | from celery_connectors.run_publisher import run_publisher 9 | 10 | 11 | # Credits and inspirations from these great sources: 12 | # 13 | # https://github.com/celery/kombu/blob/master/examples/rpc-tut6/rpc_server.py 14 | # https://gist.github.com/oubiwann/3843016 15 | # https://gist.github.com/eavictor/ee7856581619ac60643b57987b7ed580#file-mq_kombu_rpc_server-py 16 | # https://github.com/Skablam/kombu-examples 17 | # https://gist.github.com/mlavin/6671079 18 | 19 | name = ev("APP_NAME", "robopub") 20 | log = build_colorized_logger( 21 | name=name) 22 | 23 | 24 | broker_url = ev("PUB_BROKER_URL", "pyamqp://rabbitmq:rabbitmq@localhost:5672//") 25 | exchange_name = ev("PUBLISH_EXCHANGE", "") 26 | exchange_type = ev("PUBLISH_EXCHANGE_TYPE", "") 27 | routing_key = ev("PUBLISH_ROUTING_KEY", "reporting.subscriptions") 28 | queue_name = ev("PUBLISH_QUEUE", "reporting.subscriptions") 29 | priority_routing = {"high": queue_name} 30 | use_exchange = Exchange(exchange_name, type=exchange_type) 31 | use_routing_key = routing_key 32 | use_queue = Queue(queue_name, exchange=use_exchange, routing_key=routing_key) 33 | task_queues = [ 34 | use_queue 35 | ] 36 | ssl_options = build_ssl_options() 37 | transport_options = {} 38 | 39 | num_msgs_to_send = int(float(ev("NUM_MSG_TO_PUBLISH", "200000"))) 40 | log.info(("Generating messages={}") 41 | .format(num_msgs_to_send)) 42 | 43 | # relay_task_lag = 0.0 44 | # worker_task_lag = 0.0 45 | # processing_lag_data = {"relay_simulate_processing_lag": worker_task_lag, 46 | # "simulate_processing_lag": worker_task_lag} 47 | # msgs = build_sample_msgs(num=num_msgs_to_send, 48 | # data=processing_lag_data) 49 | 50 | msgs = build_sample_msgs(num=num_msgs_to_send, 51 | data={}) 52 | 53 | log.info(("Publishing messages={}") 54 | .format(len(msgs))) 55 | 56 | run_publisher(broker_url=broker_url, 57 | exchange=use_exchange, # kombu.Exchange object 58 | routing_key=use_routing_key, # string 59 | msgs=msgs, 60 | ssl_options=ssl_options, 61 | transport_options=transport_options, 62 | priority="high", 63 | priority_routing=priority_routing, 64 | silent=True, 65 | publish_silent=True) 66 | 67 | log.info("Done Publishing") 68 | -------------------------------------------------------------------------------- /stop-dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # this assumes docker is running and docker-compose is installed 4 | 5 | cd docker/dev 6 | 7 | compose_file="rabbitmq-celery-only-consume.yml" 8 | echo "Stopping celery-connectors with compose_file=${compose_file}" 9 | docker-compose -f ${compose_file} stop 10 | 11 | if [[ "$?" == "0" ]]; then 12 | docker rm worker >> /dev/null 2>&1 13 | fi 14 | 15 | exit 0 16 | -------------------------------------------------------------------------------- /stop-redis-and-rabbitmq.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # this assumes docker is running and docker-compose is installed 4 | 5 | cd docker 6 | 7 | echo "Stopping redis and rabbitmq" 8 | docker-compose -f redis_and_rabbitmq.yml stop 9 | 10 | if [[ "$?" == "0" ]]; then 11 | docker rm celredis1 celrabbit1 celflowerrabbit celflowerredis >> /dev/null 2>&1 12 | fi 13 | 14 | exit 0 15 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jay-johnson/celery-connectors/f6d48bf3e48513228a86a1ff6014b6e6cf465742/tests/__init__.py -------------------------------------------------------------------------------- /tests/load_test_message_processor_rabbitmq.py: -------------------------------------------------------------------------------- 1 | import os 2 | import datetime 3 | import uuid 4 | from spylunking.log.setup_logging import test_logger 5 | from celery_connectors.utils import get_percent_done 6 | from celery_connectors.utils import ev 7 | from tests.base_test import BaseTestCase 8 | 9 | 10 | log = test_logger( 11 | name="load-test-rabbit-subscriber") 12 | 13 | 14 | class LoadTestSubscriberRabbitMQConsuming(BaseTestCase): 15 | 16 | def build_user_conversion_event_msg(self, 17 | test_values, 18 | now=datetime.datetime.now().isoformat()): 19 | body = {"account_id": 777, 20 | "subscription_id": 888, 21 | "stripe_id": 999, 22 | "product_id": "XYZ", 23 | "msg_id": str(uuid.uuid4()), 24 | "created": now} 25 | 26 | return body 27 | # end of build_user_conversion_event_msg 28 | 29 | def test_rabbitmq_consuming(self): 30 | 31 | # Integration Test the Relay Processor 32 | # This test just fills the queue for processing 33 | num_to_consume = 50000 34 | num_sent = 0 35 | num_to_send = num_to_consume 36 | msgs_to_send = [] 37 | 38 | msgs_by_id = {} 39 | 40 | self.exchange_name = ev("LOAD_TEST_EXCHANGE", 41 | "reporting") 42 | self.routing_key = ev("LOAD_TEST_ROUTING_KEY", 43 | "reporting.accounts") 44 | self.queue_name = ev("LOAD_TEST_QUEUE", 45 | "user.events.conversions") 46 | 47 | log.info(("Publishing {}/{} " 48 | "ex={} rk={} broker={}") 49 | .format(num_sent, 50 | num_to_send, 51 | self.exchange_name, 52 | self.routing_key, 53 | self.pub_auth_url)) 54 | 55 | pub_retry = True 56 | not_done_publishing = True 57 | 58 | test_values = {"test_name": "large messages"} 59 | 60 | if len(msgs_to_send) == 0: 61 | while len(msgs_to_send) != num_to_send: 62 | 63 | test_msg = self.build_user_conversion_event_msg(test_values) 64 | msgs_to_send.append(test_msg) 65 | msgs_by_id[test_msg["msg_id"]] = False 66 | # end of building messages before slower publishing calls 67 | 68 | while not_done_publishing: 69 | 70 | if (num_sent % 1000 == 0) and num_sent > 0: 71 | log.info(("Published {} for " 72 | "{}/{} messages") 73 | .format(get_percent_done(num_sent, 74 | num_to_send), 75 | num_sent, 76 | num_to_send)) 77 | # end of if print for tracing 78 | 79 | msg_body = None 80 | if num_sent < len(msgs_to_send): 81 | msg_body = msgs_to_send[num_sent] 82 | 83 | self.publish(body=msg_body, 84 | exchange=self.exchange_name, 85 | routing_key=self.routing_key, 86 | queue=self.queue_name, 87 | priority=0, 88 | ttl=None, 89 | serializer=self.pub_serializer, 90 | retry=pub_retry) 91 | 92 | num_sent += 1 93 | 94 | if num_sent >= num_to_send: 95 | log.info(("Published {} ALL " 96 | "{}/{} messages") 97 | .format(get_percent_done(num_sent, 98 | num_to_send), 99 | num_sent, 100 | num_to_send)) 101 | 102 | not_done_publishing = False 103 | elif num_sent >= len(msgs_to_send): 104 | log.info(("Published {} all " 105 | "{}/{} messages") 106 | .format(get_percent_done(num_sent, 107 | len(msgs_to_send)), 108 | num_sent, 109 | num_to_send)) 110 | 111 | not_done_publishing = False 112 | # if should stop 113 | 114 | # end of not_done_publishing 115 | 116 | assert(num_sent == num_to_consume) 117 | 118 | os.system("list-queues.sh") 119 | 120 | log.info("") 121 | log.info(("display messages in the queues " 122 | "with routing_key={} again with:") 123 | .format(self.routing_key)) 124 | log.info("list-queues.sh") 125 | log.info("") 126 | 127 | # end of test_rabbitmq_consuming 128 | 129 | # end of LoadTestSubscriberRabbitMQConsuming 130 | -------------------------------------------------------------------------------- /tests/load_test_relay_rabbitmq.py: -------------------------------------------------------------------------------- 1 | import os 2 | import datetime 3 | import uuid 4 | import random 5 | from spylunking.log.setup_logging import test_logger 6 | from celery_connectors.utils import get_percent_done 7 | from celery_connectors.utils import ev 8 | from tests.base_test import BaseTestCase 9 | 10 | 11 | log = test_logger( 12 | name="load-test-rabbit-relay") 13 | 14 | 15 | class LoadTestRelayRabbitMQConsuming(BaseTestCase): 16 | 17 | def build_user_conversion_event_msg(self, 18 | test_values, 19 | now=datetime.datetime.now().isoformat()): 20 | body = {"account_id": 777, 21 | "subscription_id": 888, 22 | "stripe_id": 999, 23 | "product_id": "XYZ", 24 | "simulate_processing_lag": random.uniform(1.0, 5.0), 25 | "msg_id": str(uuid.uuid4()), 26 | "created": now} 27 | 28 | return body 29 | # end of build_user_conversion_event_msg 30 | 31 | def test_rabbitmq_consuming(self): 32 | 33 | # Integration Test the Relay Processor 34 | # This test just fills the queue for processing 35 | num_to_consume = 50000 36 | num_sent = 0 37 | num_to_send = num_to_consume 38 | msgs_to_send = [] 39 | 40 | msgs_by_id = {} 41 | 42 | self.exchange_name = ev("LOAD_TEST_EXCHANGE", 43 | "user.events") 44 | self.routing_key = ev("LOAD_TEST_ROUTING_KEY", 45 | "user.events.conversions") 46 | self.queue_name = ev("LOAD_TEST_QUEUE", 47 | "user.events.conversions") 48 | 49 | log.info(("Publishing {}/{} " 50 | "ex={} rk={} broker={}") 51 | .format(num_sent, 52 | num_to_send, 53 | self.exchange_name, 54 | self.routing_key, 55 | self.pub_auth_url)) 56 | 57 | pub_retry = True 58 | not_done_publishing = True 59 | 60 | test_values = {"test_name": "large messages"} 61 | 62 | if len(msgs_to_send) == 0: 63 | while len(msgs_to_send) != num_to_send: 64 | 65 | test_msg = self.build_user_conversion_event_msg(test_values) 66 | msgs_to_send.append(test_msg) 67 | msgs_by_id[test_msg["msg_id"]] = False 68 | # end of building messages before slower publishing calls 69 | 70 | while not_done_publishing: 71 | 72 | if (num_sent % 1000 == 0) and num_sent > 0: 73 | log.info(("Published {} for " 74 | "{}/{} messages") 75 | .format(get_percent_done(num_sent, 76 | num_to_send), 77 | num_sent, 78 | num_to_send)) 79 | # end of if print for tracing 80 | 81 | msg_body = None 82 | if num_sent < len(msgs_to_send): 83 | msg_body = msgs_to_send[num_sent] 84 | 85 | self.publish(body=msg_body, 86 | exchange=self.exchange_name, 87 | routing_key=self.routing_key, 88 | queue=self.queue_name, 89 | priority=0, 90 | ttl=None, 91 | serializer=self.pub_serializer, 92 | retry=pub_retry) 93 | 94 | num_sent += 1 95 | 96 | if num_sent >= num_to_send: 97 | log.info(("Published {} ALL " 98 | "{}/{} messages") 99 | .format(get_percent_done(num_sent, 100 | num_to_send), 101 | num_sent, 102 | num_to_send)) 103 | 104 | not_done_publishing = False 105 | elif num_sent >= len(msgs_to_send): 106 | log.info(("Published {} all " 107 | "{}/{} messages") 108 | .format(get_percent_done(num_sent, 109 | len(msgs_to_send)), 110 | num_sent, 111 | num_to_send)) 112 | 113 | not_done_publishing = False 114 | # if should stop 115 | 116 | # end of not_done_publishing 117 | 118 | assert(num_sent == num_to_consume) 119 | 120 | os.system("list-queues.sh") 121 | 122 | log.info("") 123 | log.info(("display messages in the queues " 124 | "with routing_key={} again with:") 125 | .format(self.routing_key)) 126 | log.info("list-queues.sh") 127 | log.info("") 128 | 129 | # end of test_rabbitmq_consuming 130 | 131 | # end of LoadTestRelayRabbitMQConsuming 132 | -------------------------------------------------------------------------------- /tests/load_test_subscriber_rabbitmq.py: -------------------------------------------------------------------------------- 1 | import os 2 | import datetime 3 | import uuid 4 | import random 5 | from spylunking.log.setup_logging import test_logger 6 | from celery_connectors.utils import get_percent_done 7 | from celery_connectors.utils import ev 8 | from tests.base_test import BaseTestCase 9 | 10 | 11 | log = test_logger( 12 | name="load-test-rabbit-subscriber") 13 | 14 | 15 | class LoadTestSubscriberRabbitMQConsuming(BaseTestCase): 16 | 17 | def build_user_conversion_event_msg(self, 18 | test_values, 19 | now=datetime.datetime.now().isoformat()): 20 | body = {"account_id": 777, 21 | "subscription_id": 888, 22 | "stripe_id": 999, 23 | "product_id": "XYZ", 24 | "simulate_processing_lag": random.uniform(1.0, 5.0), 25 | "msg_id": str(uuid.uuid4()), 26 | "created": now} 27 | 28 | return body 29 | # end of build_user_conversion_event_msg 30 | 31 | def test_rabbitmq_consuming(self): 32 | 33 | # Integration Test the Subscriber Processor 34 | # This test just fills the queue for processing 35 | num_to_consume = 50000 36 | num_sent = 0 37 | num_to_send = num_to_consume 38 | msgs_to_send = [] 39 | 40 | msgs_by_id = {} 41 | 42 | self.exchange_name = ev("LOAD_TEST_EXCHANGE", 43 | "reporting") 44 | self.routing_key = ev("LOAD_TEST_ROUTING_KEY", 45 | "reporting.accounts") 46 | self.queue_name = ev("LOAD_TEST_QUEUE", 47 | "reporting.accounts") 48 | 49 | log.info(("Publishing {}/{} " 50 | "ex={} rk={} broker={}") 51 | .format(num_sent, 52 | num_to_send, 53 | self.exchange_name, 54 | self.routing_key, 55 | self.pub_auth_url)) 56 | 57 | pub_retry = True 58 | not_done_publishing = True 59 | 60 | test_values = {"test_name": "large messages"} 61 | 62 | if len(msgs_to_send) == 0: 63 | while len(msgs_to_send) != num_to_send: 64 | 65 | test_msg = self.build_user_conversion_event_msg(test_values) 66 | msgs_to_send.append(test_msg) 67 | msgs_by_id[test_msg["msg_id"]] = False 68 | # end of building messages before slower publishing calls 69 | 70 | while not_done_publishing: 71 | 72 | if (num_sent % 1000 == 0) and num_sent > 0: 73 | log.info(("Published {} for " 74 | "{}/{} messages") 75 | .format(get_percent_done(num_sent, 76 | num_to_send), 77 | num_sent, 78 | num_to_send)) 79 | # end of if print for tracing 80 | 81 | msg_body = None 82 | if num_sent < len(msgs_to_send): 83 | msg_body = msgs_to_send[num_sent] 84 | 85 | self.publish(body=msg_body, 86 | exchange=self.exchange_name, 87 | routing_key=self.routing_key, 88 | queue=self.queue_name, 89 | priority=0, 90 | ttl=None, 91 | serializer=self.pub_serializer, 92 | retry=pub_retry) 93 | 94 | num_sent += 1 95 | 96 | if num_sent >= num_to_send: 97 | log.info(("Published {} ALL " 98 | "{}/{} messages") 99 | .format(get_percent_done(num_sent, 100 | num_to_send), 101 | num_sent, 102 | num_to_send)) 103 | 104 | not_done_publishing = False 105 | elif num_sent >= len(msgs_to_send): 106 | log.info(("Published {} all " 107 | "{}/{} messages") 108 | .format(get_percent_done(num_sent, 109 | len(msgs_to_send)), 110 | num_sent, 111 | num_to_send)) 112 | 113 | not_done_publishing = False 114 | # if should stop 115 | 116 | # end of not_done_publishing 117 | 118 | assert(num_sent == num_to_consume) 119 | 120 | os.system("list-queues.sh") 121 | 122 | log.info("") 123 | log.info(("display messages in the queues " 124 | "with routing_key={} again with:") 125 | .format(self.routing_key)) 126 | log.info("list-queues.sh") 127 | log.info("") 128 | 129 | # end of test_rabbitmq_consuming 130 | 131 | # end of LoadTestSubscriberRabbitMQConsuming 132 | -------------------------------------------------------------------------------- /tests/load_test_worker_rabbitmq.py: -------------------------------------------------------------------------------- 1 | import os 2 | import datetime 3 | import uuid 4 | import random 5 | from spylunking.log.setup_logging import test_logger 6 | from celery_connectors.utils import get_percent_done 7 | from celery_connectors.utils import ev 8 | from tests.base_test import BaseTestCase 9 | import ecomm_app.ecommerce.tasks 10 | 11 | 12 | log = test_logger( 13 | name='load-test-rabbit-worker') 14 | 15 | 16 | class LoadTestWorkerRabbitMQConsuming(BaseTestCase): 17 | 18 | def build_user_conversion_event_msg(self, 19 | test_values, 20 | now=datetime.datetime.now().isoformat()): 21 | body = {"account_id": 777, 22 | "subscription_id": 888, 23 | "stripe_id": 999, 24 | "product_id": "XYZ", 25 | "simulate_processing_lag": random.uniform(1.0, 5.0), 26 | "msg_id": str(uuid.uuid4()), 27 | "created": now} 28 | 29 | return body 30 | # end of build_user_conversion_event_msg 31 | 32 | def test_rabbitmq_consuming(self): 33 | 34 | # Integration Test the Consuming Worker with 50,0000 messages 35 | # This test just uses send_task for publishing 36 | num_to_consume = 50000 37 | num_sent = 0 38 | num_to_send = num_to_consume 39 | msgs_to_send = [] 40 | 41 | msgs_by_id = {} 42 | 43 | not_done_publishing = True 44 | 45 | test_values = {"test_name": "large messages"} 46 | 47 | if len(msgs_to_send) == 0: 48 | while len(msgs_to_send) != num_to_send: 49 | test_msg = self.build_user_conversion_event_msg(test_values) 50 | msgs_to_send.append(test_msg) 51 | msgs_by_id[test_msg["msg_id"]] = False 52 | # end of building messages before slower publishing calls 53 | 54 | pub_auth_url = ev("RELAY_WORKER_BROKER_URL", 55 | "pyamqp://rabbitmq:rabbitmq@localhost:5672//") 56 | path_to_config_module = "ecomm_app.ecommerce.celeryconfig_pub_sub" 57 | 58 | app = ecomm_app.ecommerce.tasks.get_celery_app( 59 | name="demo", 60 | auth_url=pub_auth_url, 61 | path_to_config_module=path_to_config_module) 62 | 63 | task_name = "ecomm_app.ecommerce.tasks.handle_user_conversion_events" 64 | 65 | source_id = {"msg_proc": ev("TEST_RELAY_NAME", 66 | "test_ecomm_relay")} 67 | result = None 68 | 69 | log.info(("Sending broker={}") 70 | .format(app.conf.broker_url)) 71 | 72 | while not_done_publishing: 73 | 74 | if (num_sent % 1000 == 0) and num_sent > 0: 75 | log.info(("Published {} for " 76 | "{}/{} messages") 77 | .format(get_percent_done(num_sent, 78 | num_to_send), 79 | num_sent, 80 | num_to_send)) 81 | # end of if print for tracing 82 | 83 | msg_body = None 84 | if num_sent < len(msgs_to_send): 85 | msg_body = msgs_to_send[num_sent] 86 | 87 | result = app.send_task(task_name, (msg_body, source_id)) 88 | 89 | num_sent += 1 90 | 91 | if num_sent >= num_to_send: 92 | log.info(("Published {} ALL " 93 | "{}/{} messages") 94 | .format(get_percent_done(num_sent, 95 | num_to_send), 96 | num_sent, 97 | num_to_send)) 98 | 99 | not_done_publishing = False 100 | elif num_sent >= len(msgs_to_send): 101 | log.info(("Published {} all " 102 | "{}/{} messages result={}") 103 | .format(get_percent_done(num_sent, 104 | len(msgs_to_send)), 105 | num_sent, 106 | num_to_send, 107 | result)) 108 | 109 | not_done_publishing = False 110 | # if should stop 111 | 112 | # end of not_done_publishing 113 | 114 | assert(num_sent == num_to_consume) 115 | 116 | log.info("") 117 | os.system("list-queues.sh") 118 | log.info("") 119 | 120 | # end of test_rabbitmq_consuming 121 | 122 | # end of LoadTestWorkerRabbitMQConsuming 123 | -------------------------------------------------------------------------------- /tests/test_functional.py: -------------------------------------------------------------------------------- 1 | from tests.base_test import BaseTestCase 2 | 3 | 4 | class FunctionalTest(BaseTestCase): 5 | 6 | def test_unittest_works(self): 7 | self.assertEqual(1, 1) 8 | # end of test_unittest_works 9 | 10 | # end of FunctionalTest 11 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = 3 | {2.7,3.5,3.6}-unit 4 | {2.7,3.5,3.6}-integration-{rabbitmq,redis,sqs} 5 | 6 | flake8 7 | flakeplus 8 | configcheck 9 | pydocstyle 10 | 11 | basepython = 12 | 2.7: python2.7 13 | 3.4: python3.4 14 | 3.5: python3.5 15 | 3.6: python3.6 16 | flake8,flakeplus,configcheck,pydocstyle: python2.7 17 | 18 | [testenv:pydocstyle] 19 | commands = 20 | pydocstyle {toxinidir}/celery_connectors 21 | 22 | [flake8] 23 | max-line-length = 140 24 | ignore = E126,E127,E131,E226,E261,E265,E266,E302,E305,E401,E402,F403,F405,E731 25 | exclude = ./build*,cross.py,.tox/*,./celery_connectors/rabbitmq/rabbitmqadmin.py 26 | 27 | [pycodestyle] 28 | max-line-length = 140 29 | exclude = ./build*,cross.py,.tox/*,./celery_connectors/rabbitmq/rabbitmqadmin.py 30 | 31 | [testenv:lint] 32 | deps = flake8 33 | commands = flake8 34 | --------------------------------------------------------------------------------