├── .bumpversion.cfg ├── .dockerignore ├── .gitignore ├── .travis.yml ├── Dockerfile ├── LICENSE ├── README.md ├── band ├── __init__.py ├── __main__.py ├── config │ ├── __init__.py │ ├── configfile.py │ ├── env.py │ └── reader.py ├── constants.py ├── exceptions.py ├── lib │ ├── __init__.py │ ├── helpers.py │ ├── http.py │ ├── json.py │ ├── redis.py │ ├── response.py │ ├── scheduler.py │ └── structs.py ├── log.py ├── promote.py ├── registry.py ├── rpc │ ├── __init__.py │ ├── rpc_pubsub_redis.py │ └── server.py ├── server.py └── sync_runner.py ├── makefile ├── setup.py └── tests ├── test_response.py └── test_wrap_sync.py /.bumpversion.cfg: -------------------------------------------------------------------------------- 1 | [bumpversion] 2 | current_version = 0.20.6 3 | commit = True 4 | tag = False 5 | 6 | [bumpversion:file:setup.py] 7 | 8 | [bumpversion:file:band/__init__.py] 9 | 10 | [bumpversion:file:Dockerfile] 11 | 12 | [bumpversion:file:.travis.yml] 13 | 14 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | *.DS_Store 3 | .vscode 4 | .git/* 5 | .env 6 | .env.dev 7 | data/* 8 | *.egg-info/ 9 | /.mypy_cache 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.env* 2 | /venv 3 | /.vscode 4 | /data 5 | __pycache__ 6 | *.egg-info/ 7 | /.mypy_cache 8 | .pytest_cache 9 | 10 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | dist: trusty 3 | language: minimal 4 | branches: 5 | only: 6 | - master 7 | - dev 8 | env: 9 | global: 10 | - PROJECT_NAME=band-base-py 11 | - ORG=rockstat 12 | - IMAGE_NAME=$ORG/$PROJECT_NAME 13 | - VERSION=0.20.6 14 | - TAG="$( [ $TRAVIS_BRANCH == 'master' ] && echo latest || echo $TRAVIS_BRANCH )" 15 | - secure: bB5Y+bokrlZZ33JcVHEBWER19qRT4rDIDdBaInax41Gt+XtHo4lvj+abj5sv92W36XjZs8O0sF4wa0oSKEniBeEjBAAjtPD6dSet+GHzHlledUH/VWA3+nN/mKv0ImeiWTHmC2tPYhxkeRzeREGeCwZMsxk3NSSkLAJh11Z+ikgaJgFq7oB8bUM66RgaYvYlCu22Sx7KR9zdjlrNJWkEsHVYy45KC+pRzi2hzAOKyhlT7mL1hEw8gxkBUayPw1SG10y0ByxdA3T9368oi3oJhluXALI/tRCYPNZRh4uGKtbGfu0W+qx+bbtLHKE5v9itYeRDuoP60w8heMs7MNqyMWMrP1lZy3fojTHyudKI2HKC8NC1/3cv+cYgHNGUITBRRcCnxh826nGdvVkU5IJd8U4PemOdqm/XsyYRsZAXcV3sxD84hJgRPcLJqqCYOyie0BQh8U8vb3MqQoQJifMadDHoFqMBNGihpZECH6FZcEynSsXNo/vJU3DLh+ChAwWlm9xhm0uhD/yzFSnH9+cAUNbX6wM3xLUZeXFXcbJ7NffP80tSIW9wWNJwNLedtzgg0vOpdgCm9lIeu3ZN91MYZxnu/6BdJA4F7zgF7YUkaJXB/8H7RaPrqdhTXfLgIlPhF+CzL3M5sxR/y58DD5VgO8Yuj+Me9JpqRHBzjpFfa5I= 16 | - secure: hFXYHNtvQjRZnZYw+g+ce9l+5mxzU8Dp/8p7BmKBJrRGsSB+kyFfFX9cnkgbds9QrTRM22xTlvXqnBKLQQBeKFPxghCXqJGsSIW3jj5mp8gGr0d8L4UB/ZT7R/dvsC7KAyqeJmuhV7qbqU58D7wGrOuobguYMsPlboG9tWRcpqnXfy1ZAOD6wwxn4g/zEJb97WRsp/a2ED/XHKOUUCFnCuYGRbh0OvTnzMUFi+NzpVsYKJOIGRyNXJ8umz5aF47aijT8kUHX+kPtxzwTxWU508qfncSz2OT5fEUH0zLbUd+gMlDhE6RKHzFou/iNYecaZKYT1s/YGtJFM9yJOB7iP1txJy9SwLrw9uo8ZCulDzAeOgiNeqcu8f+tZ/wQz+tnlVKtEs1otk3kyunASKhpqKHDcY8/EPPPX0Z0Vinbfu07QtylxpngijGxrctw5UFTWC/TrVokKdBvCrjgwbUhg8X2HkAFqJScHqtFaO2JY/ySarC1Pl3zA3wj08ePkXzu7RUhfvBqTcBnpLqcqnnYMulyPUOoEE414lezr2u2y5xgtj1VKFllajepmjKRz8xmlN1568IWOUU6YSorzkHLxpu6Os3H0QLLU3dMdUn5wAKyofIFTJPO8w8nSK0LX0TZXsoXrwLkhXCZV7BSjHSwRckTEpKYqX9Zj5UQtQOPBvM= 17 | services: 18 | - docker 19 | script: 20 | - cat Dockerfile | sed "s/\:latest/\:$TAG/g" > Dockerfile.tag 21 | - echo "$PROJECT_NAME $IMAGE_NAME:$TAG" 22 | - docker build -t $PROJECT_NAME -f Dockerfile.tag . 23 | after_script: 24 | - docker images 25 | before_deploy: 26 | - docker tag "$PROJECT_NAME" "$IMAGE_NAME:$TAG" 27 | - docker login -u "$DOCKER_USERNAME" -p "$DOCKER_PASSWORD" 28 | after_deploy: 29 | - make repo=rockstat/director br=$TRAVIS_BRANCH travis-trigger 30 | - make repo=rockstat/anaconda br=$TRAVIS_BRANCH travis-trigger 31 | deploy: 32 | provider: script 33 | skip_cleanup: true 34 | script: docker push "$IMAGE_NAME:$TAG" 35 | on: 36 | all_branches: true 37 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.6-alpine3.9 2 | LABEL maintainer="Dmitry Rodin " 3 | LABEL band.base-py.version="0.20.6" 4 | 5 | RUN apk add --no-cache \ 6 | wget curl \ 7 | unzip gzip \ 8 | nano git \ 9 | make gcc g++ coreutils \ 10 | libffi libffi-dev \ 11 | openssl openssl-dev 12 | 13 | 14 | ENV HOST=0.0.0.0 15 | ENV PORT=8080 16 | EXPOSE ${PORT} 17 | WORKDIR /usr/src/band 18 | ADD . . 19 | RUN python setup.py develop 20 | RUN echo -e "Installed python packages:\n$(pip freeze)" 21 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2018 Dmitry Rodin 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Required images colection for Rockstat platform 2 | 3 | Rockstat platform has own microservice management tool call `director` (on of images in this repo). For interraction whith other services used special framework [Band](/rockstat/band) for python and [Rock-me-ts](/rockstat/rock-me-ts) for TypeScript/JavaScript services running in node.js. 4 | 5 | ## Rockstat architecture 6 | 7 | ![Rockstat sheme](https://rockstat.ru/media/rockstat_v3_arch.png?3) 8 | 9 | [Read more](https://rockstat.ru/about) 10 | 11 | ## Components (Band collection) 12 | 13 | ### Director 14 | 15 | Director is the Chief serivice officer. It manages all other services built on provided frameworks. Runs by setup software. 16 | 17 | ### SxGeo: Sypes Geo ip to location service 18 | 19 | Enriches requests with geo data. 20 | At first execution service download fresh database from library servers. 21 | To run make http call: 22 | 23 | ``` 24 | ❯❯❯ http GET http://127.0.0.1:10000/run/sxgeo --timeout 300 -v 25 | HTTP/1.1 200 OK 26 | { 27 | "name": "sxgeo", 28 | "short_id": "c9e202d9a535", 29 | "state": "running" 30 | } 31 | ``` 32 | 33 | In last example used [httpie](/jakubroztocil/httpie) console HTTP client 34 | 35 | ### MmGeo: MaxMind ip to location service 36 | 37 | Enriches requests with geo data. 38 | At first execution service download fresh database from library servers. 39 | To run make http call: 40 | 41 | ``` 42 | ❯❯❯ http GET http://127.0.0.1:10000/run/mmgeo --timeout 300 -v 43 | HTTP/1.1 200 OK 44 | { 45 | "name": "mmgeo", 46 | "short_id": "363ef60fe68c", 47 | "state": "running" 48 | } 49 | ``` 50 | 51 | ### UaParser: extract usefull data from User-Agent string 52 | 53 | Enriches requests with client device type and software versions. 54 | To run make http call: 55 | 56 | ``` 57 | ❯❯❯ http GET http://127.0.0.1:10000/run/uaparser --timeout 300 -v 58 | 59 | HTTP/1.1 200 OK 60 | { 61 | "name": "mmgeo", 62 | "short_id": "363ef60fe68c", 63 | "state": "running" 64 | } 65 | ``` 66 | 67 | ### Send Mixpanel: service for batch uploading data to Mixpanel warehouse 68 | 69 | By default upload data every second. 70 | To run make http call: 71 | 72 | ``` 73 | ❯❯❯ http --timeout 300 -v POST http://127.0.0.1:10000/run/send_mixpanel env:='{"MIXPANEL_TOKEN":"31fecdd83ab66bbd2de1fd098a704e00","MIXPANEL_API_SECRET":"06e0d599bced1abe8c56cc162842a44f"' 74 | HTTP/1.1 200 OK 75 | { 76 | "name": "send_mixpanel", 77 | "short_id": "d1c2c3153e8d", 78 | "state": "running" 79 | } 80 | ``` 81 | 82 | 83 | 84 | 85 | _________________________________________________ 86 | 87 | Closed area / Проход закрыт. Очень сыро 88 | =========== 89 | 90 | Автоматическая аллокация портов на хостовой машине 91 | 92 | ## Running (DEV host) 93 | 94 | add dev .env file containg 95 | 96 | ... 97 | 98 | 99 | host.docker.internal is internal host machine alias at docker for mac 100 | 101 | running band (by default starting on 10000 port) 102 | 103 | run service 104 | 105 | http get http://localhost:10000/run/tg_hellobot 106 | 107 | check 108 | 109 | http get http://localhost:10000/list 110 | 111 | call 112 | 113 | http get http://localhost:10000/call/tg_hellobot/ 114 | 115 | `http` is executable of httpie library 116 | 117 | ## Run in docker 118 | 119 | IMG_PATH=/Users/user/projects/rockstat/band_images 120 | docker run -d \ 121 | --name=band --hostname=band \ 122 | --restart=unless-stopped \ 123 | --network custom \ 124 | -p 127.0.0.1:10000:8080 \ 125 | -v $IMG_PATH/band_collection:/images/band_collection:ro \ 126 | -v $IMG_PATH/band:/images/band_base:ro \ 127 | -v /var/run/docker.sock:/var/run/docker.sock \ 128 | -e REDIS_HOST=redis \ 129 | -e BAND_URL=http://band:8080 \ 130 | -e BAND_IMAGES=/images \ 131 | rst/band 132 | 133 | ## Deps 134 | 135 | in pypi old version of `jsonrpcclient` and should be installed from git `pip install -U git+https://github.com/bcb/jsonrpcclient.git@master#egg=jsonrpcclient` 136 | 137 | ## Maintain 138 | 139 | Prune unused docker containers 140 | 141 | docker container prune 142 | 143 | and images 144 | 145 | docker image prune --all 146 | -------------------------------------------------------------------------------- /band/__init__.py: -------------------------------------------------------------------------------- 1 | __VERSION__ = '0.20.6' 2 | 3 | import importlib 4 | import os 5 | import uvloop 6 | import asyncio 7 | 8 | 9 | def loop_exc(loop, context): 10 | message = context.get('message', None) 11 | exception = context.get('exception', None) 12 | exc_info = (type(exception), exception, 13 | exception.__traceback__) if exception else None 14 | logger.exception('loop ex', message=message, 15 | exception=exception, exc_info=exc_info) 16 | 17 | 18 | loop = uvloop.new_event_loop() 19 | loop.set_exception_handler(loop_exc) 20 | asyncio.set_event_loop(loop) 21 | 22 | from .constants import * 23 | from .config.configfile import settings 24 | from .log import * 25 | from .lib import response 26 | 27 | # response = BandResponse() 28 | 29 | logger.info('final configuration', settings=settings) 30 | 31 | from .lib.structs import * 32 | from .lib.http import json_response 33 | from .lib.redis import RedisFactory 34 | redis_factory = RedisFactory(**settings, loop=loop) 35 | 36 | from .registry import Dome, Expose, worker, cleanup, blocking 37 | dome: Dome = Dome.instance() 38 | expose: Expose = dome.exposeour 39 | 40 | from .server import app, start_server, add_routes 41 | from .lib.scheduler import Scheduler 42 | 43 | scheduler = dome['scheduler'] = Scheduler(**settings, loop=loop) 44 | 45 | from .rpc.rpc_pubsub_redis import RedisPubSubRPC 46 | 47 | app.on_startup.append(dome.on_startup) 48 | app.on_shutdown.append(dome.on_shutdown) 49 | 50 | rpc = dome['rpc'] = RedisPubSubRPC(**settings) 51 | 52 | # if settings.name != DIRECTOR_SERVICE: 53 | importlib.import_module('band.promote', 'band') 54 | 55 | # 3rd party lib re-export 56 | from aiocron import crontab 57 | -------------------------------------------------------------------------------- /band/__main__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from . import settings, start_server 3 | 4 | def main(): 5 | start_server(**settings) 6 | 7 | if __name__ == '__main__': 8 | main() 9 | 10 | 11 | -------------------------------------------------------------------------------- /band/config/__init__.py: -------------------------------------------------------------------------------- 1 | from .env import environ 2 | from .configfile import settings 3 | -------------------------------------------------------------------------------- /band/config/configfile.py: -------------------------------------------------------------------------------- 1 | from jinja2 import Environment, FileSystemLoader, Template 2 | import collections 3 | import os 4 | from pprint import pprint 5 | import yaml 6 | from prodict import Prodict as pdict 7 | from ..log import logger 8 | from .env import env, name_env, environ 9 | from .reader import reader 10 | 11 | DEFAULT_FILES = ['config.yaml', 'custom.yml'] 12 | 13 | root = os.getcwd() 14 | 15 | config = { 16 | 'env': env, 17 | '_pid': os.getpid(), 18 | '_cwd': os.getcwd() 19 | } 20 | 21 | def update(d, u): 22 | for k, v in u.items(): 23 | if isinstance(v, collections.Mapping): 24 | d[k] = update(d.get(k, {}), v) 25 | else: 26 | d[k] = v 27 | return d 28 | 29 | for fn in DEFAULT_FILES: 30 | data = reader(f'{root}/{fn}') 31 | if data: 32 | update(config, data) 33 | 34 | config.update({ 35 | 'name': config.get('name', name_env) 36 | }) 37 | settings = pdict.from_dict(config) 38 | 39 | __all__ = ['settings'] 40 | -------------------------------------------------------------------------------- /band/config/env.py: -------------------------------------------------------------------------------- 1 | import os 2 | from os import environ 3 | import socket 4 | from dotenv import load_dotenv 5 | from pathlib import Path 6 | 7 | ENV_DEV = 'development' 8 | ENV_PROD = 'production' 9 | 10 | env_fn = '.env' 11 | env_local_fn = '.env.local' 12 | 13 | root = Path(os.getcwd()) 14 | load_dotenv(dotenv_path=root / env_local_fn) 15 | load_dotenv(dotenv_path=root / env_fn) 16 | 17 | env = environ['ENV'] = environ.get('ENV', ENV_DEV) 18 | name_env = environ['NAME'] = environ.get('NAME', socket.gethostname()) 19 | -------------------------------------------------------------------------------- /band/config/reader.py: -------------------------------------------------------------------------------- 1 | from jinja2 import Environment, FileSystemLoader, Template, TemplateNotFound 2 | import collections 3 | import os 4 | import yaml 5 | from os.path import dirname, basename 6 | from .env import environ 7 | from ..log import logger 8 | 9 | def reader(fn): 10 | logger.debug('loading', f=fn) 11 | try: 12 | tmplenv = Environment(loader=FileSystemLoader(dirname(fn))) 13 | tmpl = tmplenv.get_template(str(basename(fn))) 14 | part = tmpl.render(**environ) 15 | data = yaml.load(part) 16 | return data 17 | except TemplateNotFound: 18 | logger.warn('Template not found', file=fn) 19 | except Exception: 20 | logger.exception('config') 21 | 22 | -------------------------------------------------------------------------------- /band/constants.py: -------------------------------------------------------------------------------- 1 | # roles 2 | 3 | LISTENER = 'listener' 4 | HANDLER = 'handler' 5 | ENRICHER = 'enricher' 6 | ROLES = set([LISTENER, ENRICHER, HANDLER]) 7 | 8 | # Services 9 | DIRECTOR_SERVICE = 'director' 10 | FRONTIER_SERVICE = 'front' 11 | 12 | # Text statuses 13 | RESULT_OPERATING = 'operating' 14 | RESULT_PONG = 'pong' 15 | OK = 'o-o-ok' 16 | RESULT_OK = 200 17 | RESULT_NOT_FOUND = 404 18 | RESULT_BAD_ARGS = 404 19 | RESULT_INTERNAL_ERROR = 500 20 | BROADCAST = 'broadcast' 21 | ENRICH = 'enrich' 22 | 23 | NOTIFY_ALIVE = '__iamalive' 24 | REQUEST_STATUS = '__status' 25 | 26 | 27 | HUMA_LOGF = '%(asctime)s %(levelname)s %(message)s' 28 | JSON_LOGF = '{ "loggerName":"%(name)s", "asciTime":"%(asctime)s", "levelNo":"%(levelno)s", "levelName":"%(levelname)s", "message":"%(message)s"}' 29 | 30 | -------------------------------------------------------------------------------- /band/exceptions.py: -------------------------------------------------------------------------------- 1 | 2 | class BandNotYetReadyException(Exception): 3 | """Code not ready.""" 4 | pass 5 | 6 | -------------------------------------------------------------------------------- /band/lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rockstat/band-framework/e6c3bbd5cec9b692185cbd620ed107eba8d312af/band/lib/__init__.py -------------------------------------------------------------------------------- /band/lib/helpers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Common helpers collection 3 | """ 4 | from typing import Dict 5 | 6 | from ..config import environ 7 | 8 | 9 | def env_is_true(name, default='', environ=environ): 10 | return environ.get(name, default).lower() in ("yes", "true", "t", "1") 11 | 12 | 13 | def without_none(dick: dict) -> dict: 14 | return {k: v for k, v in dick.items() if v is not None} 15 | -------------------------------------------------------------------------------- /band/lib/http.py: -------------------------------------------------------------------------------- 1 | import time 2 | import asyncio 3 | from aiohttp.web import (json_response as _json_response, middleware, 4 | HTTPException, Response, RouteTableDef, RouteDef, StreamResponse) 5 | from jsonrpcclient.exceptions import ReceivedErrorResponse 6 | from band import logger, response 7 | import types 8 | 9 | from ..lib.json import json_def, json_dumps, json_loads 10 | 11 | 12 | def json_response(result, status=200, request=None): 13 | return _json_response( 14 | body=json_dumps(result), 15 | status=status, 16 | headers=cors_headers(request=request)) 17 | 18 | 19 | async def request_handler(request, handler): 20 | # get query 21 | query = dict(**request.query) 22 | # url params 23 | if request.method == 'POST': 24 | if request.content_type == 'application/json': 25 | raw = await request.text() 26 | if raw: 27 | query.update(json_loads(raw)) 28 | else: 29 | post = await request.post() 30 | query.update(post) 31 | # url params 32 | query.update(request.match_info) 33 | try: 34 | result = await handler(**query) 35 | # Handling stream responses 36 | if isinstance(result, types.AsyncGeneratorType): 37 | try: 38 | stream = StreamResponse() 39 | await stream.prepare(request) 40 | async for block in result: 41 | await stream.write(block) 42 | await stream.write_eof() 43 | return stream 44 | # Halt handling 45 | except asyncio.CancelledError: 46 | logger.warn('halted response') 47 | return stream 48 | # regilar response 49 | else: 50 | return json_response(result, request=request) 51 | except ReceivedErrorResponse as e: 52 | return json_response(response.error(e.message), request=request) 53 | except Exception as e: 54 | logger.exception("exc") 55 | return json_response(response.error(str(e)), request=request) 56 | 57 | 58 | def add_http_handler(handler, path, **kwargs): 59 | logger.info('Adding route', path=path) 60 | 61 | async def wrapper(request): 62 | return await request_handler(request, handler) 63 | 64 | return [ 65 | RouteDef('GET', path, wrapper, kwargs), 66 | RouteDef('POST', path, wrapper, kwargs) 67 | ] 68 | 69 | 70 | def say_cors_yes(request=None): 71 | return Response(headers=cors_headers(request=request)) 72 | 73 | 74 | def cors_headers(request): 75 | origin = request.headers.get('ORIGIN', '*') 76 | return { 77 | 'Access-Control-Allow-Origin': origin, 78 | 'Access-Control-Allow-Credentials': 'true', 79 | 'Access-Control-Allow-Headers': 'X-Requested-With,Content-Type' 80 | } 81 | 82 | 83 | @middleware 84 | async def naive_cors_middleware(request, handler): 85 | """ 86 | Simple CORS middleware to access api from dashboard 87 | """ 88 | if request.method == 'OPTIONS': 89 | return say_cors_yes(request) 90 | else: 91 | return await handler(request) 92 | 93 | 94 | @middleware 95 | async def error_middleware(request, handler): 96 | 97 | try: 98 | response = await handler(request) 99 | return response 100 | 101 | except HTTPException as ex: 102 | logger.exception('error middleware http ex') 103 | return json_response({ 104 | 'error': ex.reason 105 | }, 106 | status=ex.status, 107 | request=request) 108 | 109 | except Exception as ex: 110 | logger.exception('error middleware ex') 111 | return json_response({ 112 | 'error': 'Internal server error' 113 | }, 114 | status=500, 115 | request=request) 116 | 117 | return error_middleware 118 | 119 | 120 | __all__ = [ 121 | 'naive_cors_middleware', 122 | 'error_middleware' 123 | ] 124 | -------------------------------------------------------------------------------- /band/lib/json.py: -------------------------------------------------------------------------------- 1 | # import orjson 2 | import json 3 | import ujson 4 | 5 | 6 | def json_def(obj): 7 | if isinstance(obj, dict): 8 | return dict(obj) 9 | 10 | 11 | def json_dumps(data, **kwargs): 12 | # kwargs.pop('default', None) 13 | # kwargs['ensure_ascii'] = False 14 | return ujson.dumps(data, ensure_ascii=False) 15 | 16 | 17 | def json_loads(data): 18 | return ujson.loads(data) 19 | 20 | -------------------------------------------------------------------------------- /band/lib/redis.py: -------------------------------------------------------------------------------- 1 | from band import settings 2 | import asyncio 3 | import aioredis 4 | from aioredis.pool import ConnectionsPool 5 | from .. import logger 6 | # from weakref import WeakKeyDictionary 7 | 8 | class RedisFactory: 9 | def __init__(self, redis_dsn='redis://host:6379/0', loop=None, **kwargs): 10 | self.redis_dsn = redis_dsn 11 | self.loop = loop 12 | # self.wmap = WeakKeyDictionary() 13 | 14 | async def create_client(self): 15 | logger.debug('creating redis client using to', redis_dns=self.redis_dsn) 16 | return await aioredis.create_redis(self.redis_dsn, loop=self.loop) 17 | 18 | # async def create_subsribed_client(self, chan): 19 | # client = await self.create_client() 20 | # channel, = await client.subscribe(chan) 21 | # self.wmap[client] = [chan] 22 | # return client, channel 23 | 24 | async def create_pool(self): 25 | logger.debug('creating redis pool using to', redis_dns=self.redis_dsn) 26 | return await aioredis.create_pool(self.redis_dsn, loop=self.loop) 27 | 28 | async def create_redis_pool(self, **kwargs): 29 | return await aioredis.create_redis_pool(self.redis_dsn, loop=self.loop, **kwargs) 30 | 31 | async def close_client(self, client): 32 | # if client in self.wmap: 33 | # for chan in self.wmap[client]: 34 | # await self.wmap[client].unsubscribe(chan) 35 | if not client.closed: 36 | client.close() 37 | await client.wait_closed() 38 | # await asyncio.sleep(0) 39 | 40 | async def close_pool(self, pool): 41 | pool.close() 42 | await pool.wait_closed() 43 | await asyncio.sleep(0.01) 44 | -------------------------------------------------------------------------------- /band/lib/response.py: -------------------------------------------------------------------------------- 1 | from typing import NamedTuple, Dict 2 | 3 | from ..lib.json import json_def, json_dumps, json_loads 4 | 5 | RESP_PIXEL = 'pixel' 6 | RESP_REDIRECT = 'redirect' 7 | RESP_ERROR = 'error' 8 | RESP_DATA = 'data' 9 | 10 | 11 | class BaseBandResponse(dict): 12 | type__: str = None 13 | 14 | def to_json(self): 15 | return json_dumps(self._asdict()) 16 | 17 | def is_redirect(self): 18 | return self.type__ == RESP_REDIRECT 19 | 20 | def is_data(self): 21 | return self.type__ == RESP_DATA 22 | 23 | def is_pixel(self): 24 | return self.type__ == RESP_PIXEL 25 | 26 | def is_error(self): 27 | return self.type__ == RESP_ERROR 28 | 29 | @property 30 | def type(self): 31 | return self.type__ 32 | 33 | def __getattr__(self, key): 34 | return self.get(key) 35 | 36 | 37 | class BandResponceError(BaseBandResponse): 38 | errorMessage: str 39 | statusCode: int 40 | data: Dict 41 | type__: str = RESP_ERROR 42 | 43 | def __init__(self, errorMessage='Unknown error', statusCode=500, data={}): 44 | super().__init__(data) 45 | self.type__ = self.type__ 46 | self.errorMessage = errorMessage 47 | self.statusCode = statusCode 48 | 49 | @property 50 | def error_message(self): 51 | return self.errorMessage 52 | 53 | @property 54 | def status_code(self): 55 | return self.statusCode 56 | 57 | def __str__(self): 58 | return f'Error: {self.errorMessage} ({self.statusCode})' 59 | 60 | def _asdict(self): 61 | return { 62 | 'type__': self.type__, 63 | 'statusCode': self.statusCode, 64 | 'errorMessage': self.errorMessage, 65 | 'data': self 66 | } 67 | 68 | class BandResponceData(BaseBandResponse): 69 | data: Dict 70 | statusCode: int 71 | type__: str = RESP_DATA 72 | 73 | def __init__(self, data, statusCode=200): 74 | super().__init__(data) 75 | self.type__ = self.type__ 76 | self.statusCode = statusCode 77 | 78 | def __str__(self): 79 | return f'Data: {self}' 80 | 81 | def status_code(self): 82 | return self['statusCode'] 83 | 84 | def _asdict(self): 85 | return { 86 | 'type__': self.type__, 87 | 'statusCode': self.statusCode, 88 | 'data': self 89 | } 90 | 91 | 92 | class BandResponceRedirect(BaseBandResponse): 93 | location: str 94 | statusCode: int 95 | data: Dict 96 | type__: str = RESP_REDIRECT 97 | 98 | def __init__(self, location, data={}, statusCode=302): 99 | super().__init__(data) 100 | self.location = location 101 | self.statusCode = statusCode 102 | 103 | def status_code(self): 104 | return self.statusCode 105 | 106 | def __str__(self): 107 | return f'Redirect: {self.location}' 108 | 109 | def _asdict(self): 110 | return { 111 | 'type__': self.type__, 112 | 'location': self.location, 113 | 'statusCode': self.statusCode, 114 | 'data': self 115 | } 116 | 117 | class BandResponcePixel(BaseBandResponse): 118 | 119 | data: Dict = {} 120 | type__: str = RESP_PIXEL 121 | 122 | def __init__(self, data={}): 123 | super().__init__(data) 124 | 125 | def __str__(self): 126 | return 'Pixel' 127 | 128 | def _asdict(self): 129 | return { 130 | 'type__': self.type__, 131 | 'data': self 132 | } 133 | 134 | 135 | MAP = { 136 | RESP_DATA: BandResponceData, 137 | RESP_PIXEL: BandResponcePixel, 138 | RESP_REDIRECT: BandResponceRedirect, 139 | RESP_ERROR: BandResponceError 140 | } 141 | 142 | 143 | def create_response(data): 144 | if isinstance(data, dict): 145 | type__ = data.pop('type__', None) 146 | if type__ and (type__ in MAP): 147 | # for data return only data 148 | if type__ == RESP_DATA: 149 | return data.get('data') 150 | # other typed responses 151 | return MAP[type__](**data) 152 | return data 153 | 154 | 155 | def error(errorMessage="", statusCode=500, data={}): 156 | return BandResponceError(errorMessage, statusCode=statusCode, data=data) 157 | 158 | 159 | def data(data, statusCode=200): 160 | return BandResponceData(data, statusCode=statusCode) 161 | 162 | 163 | def redirect(location, statusCode=302, data={}): 164 | return BandResponceRedirect(location, statusCode=statusCode, data=data) 165 | 166 | 167 | def pixel(data={}): 168 | return BandResponcePixel(data=data) 169 | 170 | 171 | __all__ = [ 172 | 'error', 'data', 'redirect', 'pixel', 'create_response', 173 | 'BandResponceData', 'BandResponceError', 'BandResponcePixel', 174 | 'BandResponceRedirect', 'BaseBandResponse' 175 | ] 176 | -------------------------------------------------------------------------------- /band/lib/scheduler.py: -------------------------------------------------------------------------------- 1 | import aiojobs 2 | import inspect 3 | from .. import logger 4 | 5 | 6 | class Scheduler: 7 | def __init__(self, **kwargs): 8 | self.scheduler = None 9 | self.jobs_limit = kwargs.get('jobs_limit', 200) 10 | 11 | async def startup(self): 12 | self.scheduler = await aiojobs.create_scheduler(exception_handler=None, limit=self.jobs_limit) 13 | 14 | async def shutdown(self): 15 | await self.scheduler.close() 16 | 17 | async def spawn(self, coro): 18 | return await self.scheduler.spawn(coro) 19 | 20 | async def spawn_tasks(self, tasks): 21 | for task in tasks: 22 | try: 23 | logger.debug(f'Executing worker {task.__name__}') 24 | if inspect.iscoroutinefunction(task) == True: 25 | task = task() 26 | await self.scheduler.spawn(task) 27 | except Exception: 28 | logger.exception('exc') 29 | -------------------------------------------------------------------------------- /band/lib/structs.py: -------------------------------------------------------------------------------- 1 | from collections import deque, defaultdict, UserDict, namedtuple 2 | from typing import Dict, NamedTuple 3 | 4 | 5 | class DotDict(dict): 6 | def __getattr__(self, attr): 7 | attr = self[attr] 8 | return DotDict(attr) if type(attr) == dict else attr 9 | 10 | def __setattr__(self, attr, value): 11 | self[attr] = value 12 | 13 | class MethodRegistration(namedtuple('MethodRegistration', 'method role options')): 14 | __slots__ = () 15 | method: str 16 | role: str 17 | options: Dict 18 | -------------------------------------------------------------------------------- /band/log.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import logging 3 | import structlog 4 | from .lib.helpers import env_is_true 5 | from .lib.json import json_def, json_dumps 6 | from pythonjsonlogger import jsonlogger 7 | 8 | 9 | logging.basicConfig(level=logging.DEBUG, format="%(message)s") 10 | logHandler = logging.StreamHandler(sys.stdout) 11 | logHandler.setFormatter(jsonlogger.JsonFormatter()) 12 | 13 | 14 | processors = [ 15 | structlog.stdlib.add_logger_name, 16 | structlog.stdlib.add_log_level, 17 | # structlog.stdlib.PositionalArgumentsFormatter(), 18 | structlog.processors.StackInfoRenderer(), 19 | structlog.processors.format_exc_info, 20 | # structlog.processors.UnicodeDecoder(), 21 | # structlog.stdlib.render_to_log_kwargs, 22 | structlog.processors.TimeStamper(fmt="%Y-%m-%d %H:%M.%S", utc=False), 23 | ] 24 | 25 | 26 | if env_is_true('JSON_LOGS'): 27 | processors.append(structlog.processors.JSONRenderer(serializer=json_dumps)) 28 | pass 29 | 30 | else: 31 | processors.append(structlog.dev.ConsoleRenderer()) 32 | 33 | 34 | structlog.configure( 35 | processors=processors, 36 | context_class=dict, 37 | logger_factory=structlog.stdlib.LoggerFactory(), 38 | wrapper_class=structlog.stdlib.BoundLogger, 39 | # cache_logger_on_first_use=True, 40 | ) 41 | 42 | logger = structlog.get_logger() 43 | 44 | 45 | x__all__ = ['logger'] 46 | -------------------------------------------------------------------------------- /band/promote.py: -------------------------------------------------------------------------------- 1 | from asyncio import sleep 2 | from time import time 3 | from datetime import timedelta 4 | from . import (settings, dome, expose, worker, logger, 5 | rpc, DIRECTOR_SERVICE, NOTIFY_ALIVE, response) 6 | 7 | START_AT = round(time() * 1000) 8 | 9 | 10 | @worker() 11 | async def promote(): 12 | logger.info('starting announcing service') 13 | while True: 14 | # Initial delay 15 | await sleep(1) 16 | try: 17 | await rpc.notify( 18 | DIRECTOR_SERVICE, NOTIFY_ALIVE, name=settings.name) 19 | except Exception: 20 | logger.exception('announce error') 21 | # Notify every 22 | await sleep(5) 23 | 24 | 25 | @expose() 26 | async def __status(**params): 27 | """ 28 | Service status 29 | """ 30 | ms_diff = round(time() * 1000 - START_AT) 31 | return response.data({ 32 | 'name': settings.name, 33 | 'app_started': START_AT, 34 | 'app_uptime': ms_diff, 35 | 'app_state': 'running', 36 | 'register': [*dome.methods.dicts] 37 | }) 38 | -------------------------------------------------------------------------------- /band/registry.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List 2 | from prodict import Prodict as pdict 3 | from collections import MutableMapping 4 | 5 | from .lib.http import add_http_handler 6 | from .log import logger 7 | from .lib.helpers import without_none 8 | from .constants import ENRICHER, HANDLER, LISTENER 9 | from .rpc.server import AsyncRPCMethods 10 | from .sync_runner import blocking 11 | 12 | class Expose: 13 | def __init__(self, dome): 14 | self._dome = dome 15 | 16 | def __call__(self, *args, **kwargs): 17 | def wrapper(handler): 18 | 19 | self._dome.expose_method(handler, role=None, *args, **kwargs) 20 | return handler 21 | return wrapper 22 | 23 | def handler(self, name=None, path=None, alias=None, timeout=None): 24 | """ 25 | Expose function and promote function as request handler to front service. 26 | name: 27 | path: "/hello/:name" For services exposed by HTTP you can configure path with with params 28 | alias: other_service If needed possible to promote service by different name. 29 | Affected only service name in front service 30 | timeout: custom response wait timeout (ms) 31 | """ 32 | 33 | def wrapper(handler): 34 | self._dome.expose_method( 35 | handler, name=name, path=path, role=HANDLER, alias=alias, timeout=timeout) 36 | return handler 37 | return wrapper 38 | 39 | def enricher(self, props: dict, keys: list, alias=None): 40 | """ 41 | Expose function and promote function as request enricher to front service 42 | props: list contains requests props in dot notation like ["sess.type"] 43 | keys: list of requested dispatching keys 44 | timeout: custom response wait timeout 45 | """ 46 | def wrapper(handler): 47 | self._dome.expose_method( 48 | handler, props={**props}, keys=[*keys], alias=alias, role=ENRICHER) 49 | return handler 50 | return wrapper 51 | 52 | def listener(self): 53 | """ 54 | Expose function and promote functions as request listener to front service 55 | Will receive all requests at final stage 56 | """ 57 | def wrapper(handler): 58 | self._dome.expose_method(handler, role=LISTENER) 59 | return handler 60 | return wrapper 61 | 62 | 63 | class Dome(MutableMapping): 64 | 65 | __instance = None 66 | 67 | @staticmethod 68 | def instance(): 69 | if Dome.__instance == None: 70 | Dome.__instance = Dome() 71 | return Dome.__instance 72 | 73 | def __init__(self): 74 | self._state = dict() 75 | self._executor = None 76 | self._startup = list() 77 | self._shutdown = list() 78 | self._routes = [] 79 | self._methods = AsyncRPCMethods() 80 | self._expose: Expose = Expose(self) 81 | 82 | def __getitem__(self, key): 83 | return self._state[key] 84 | 85 | def __setitem__(self, key, value): 86 | self._state[key] = value 87 | 88 | def __delitem__(self, key): 89 | del self._state[key] 90 | 91 | def __len__(self): 92 | return len(self._state) 93 | 94 | def __iter__(self): 95 | return iter(self._state) 96 | 97 | def expose_method(self, 98 | handler, 99 | role, 100 | name=None, 101 | path=None, 102 | keys: List=None, 103 | props: Dict=None, 104 | timeout=None, 105 | alias=None, 106 | **kwargs): 107 | name = name or handler.__name__ 108 | path = path or f'/{name}' 109 | 110 | if role == ENRICHER and not keys: 111 | raise ValueError('Keys property must be present') 112 | 113 | options = without_none(dict( 114 | keys=keys, 115 | props=props, 116 | alias=alias, 117 | timeout=timeout 118 | )) 119 | 120 | self._methods.add_method( 121 | handler, name=name, role=role, options=options) 122 | 123 | self._routes += add_http_handler(handler, path) 124 | 125 | @property 126 | def exposeour(self) -> Expose: 127 | return self._expose 128 | 129 | def expose(self, *args, **kwargs): 130 | """ 131 | Deprecated method "expose". Will be removed soon! 132 | """ 133 | logger.warn('Deprecated method "expose". Will be removed soon!') 134 | return self._expose(*args, **kwargs) 135 | 136 | def add_startup(self, task): 137 | self._startup.append(task) 138 | 139 | def add_shutdown(self, task): 140 | self._shutdown.append(task) 141 | 142 | async def on_startup(self, app): 143 | logger.info('Starting scheduler') 144 | await self['scheduler'].startup() 145 | logger.info('Starting redis RPC') 146 | await self['scheduler'].spawn(self['rpc'].writer()) 147 | await self['scheduler'].spawn(self['rpc'].reader()) 148 | logger.info('Executing startup handlers') 149 | await self['scheduler'].spawn_tasks(self._startup) 150 | 151 | async def on_shutdown(self, app): 152 | logger.info('Executing shutdown handlers') 153 | await self['scheduler'].spawn_tasks(self._shutdown) 154 | logger.info('Stopping scheduler') 155 | await self['scheduler'].shutdown() 156 | 157 | @property 158 | def startup(self): 159 | """ 160 | Deprecated method "startup". Will be removed soon! 161 | """ 162 | logger.warn('Deprecated method "startup". Will be removed soon!') 163 | return self._startup 164 | 165 | @property 166 | def shutdown(self): 167 | """ 168 | Deprecated method "shutdown". Will be removed soon! 169 | """ 170 | logger.warn('Deprecated method "shutdown". Will be removed soon!') 171 | return self._shutdown 172 | 173 | @property 174 | def methods(self): 175 | return self._methods 176 | 177 | @property 178 | def routes(self): 179 | return self._routes 180 | 181 | 182 | 183 | def worker(): 184 | """ 185 | Register function as worker. 186 | Will be executed on application startup 187 | """ 188 | def wrapper(handler): 189 | logger.info(f"Registered worker {handler.__name__}") 190 | Dome.instance().add_startup(handler) 191 | return handler 192 | return wrapper 193 | 194 | 195 | def cleanup(): 196 | """ 197 | Register function as unload handler. 198 | Will be executed on application shutdown 199 | """ 200 | def wrapper(handler): 201 | logger.info(f"Registered cleaner {handler.__name__}") 202 | Dome.instance().add_shutdown(handler) 203 | return handler 204 | return wrapper 205 | 206 | 207 | __all__ = ['Dome', 'Expose', 'worker', 'cleanup', 'blocking'] 208 | -------------------------------------------------------------------------------- /band/rpc/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rockstat/band-framework/e6c3bbd5cec9b692185cbd620ed107eba8d312af/band/rpc/__init__.py -------------------------------------------------------------------------------- /band/rpc/rpc_pubsub_redis.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module containt components for JSON2-RPC-like protocol implementation 3 | used to interract with side parts of platform. 4 | """ 5 | 6 | from jsonrpcclient.async_client import AsyncClient 7 | from jsonrpcclient.request import Request, Notification 8 | from jsonrpcserver.response import Response 9 | from async_timeout import timeout 10 | from collections import namedtuple 11 | from typing import Callable, Any 12 | import asyncio 13 | import json 14 | import ujson 15 | import itertools 16 | from ..lib.response import create_response, BaseBandResponse 17 | from ..lib.json import json_def, json_dumps, json_loads 18 | 19 | from .. import logger, redis_factory, dome, scheduler, BROADCAST, ENRICH, REQUEST_STATUS 20 | 21 | 22 | 23 | 24 | class MethodCall(namedtuple('MethodCall', ['dest', 'method', 'source'])): 25 | __slots__ = () 26 | 27 | def tos(self): 28 | return self.dest + ':' + self.method + ':' + self.source 29 | 30 | def __repr__(self): 31 | return self.dest + ':' + self.method + ':' + self.source 32 | 33 | @classmethod 34 | def make(cls, method): 35 | return cls._make(method.split(':')) 36 | 37 | 38 | class RedisPubSubRPC(AsyncClient): 39 | """ 40 | Band RPC interface. 41 | Used to interract with other microservices 42 | 43 | Class constants: 44 | RPC_TIMEOUT Default timeout for RPC request (seconds) 45 | """ 46 | 47 | RPC_TIMEOUT = 2 48 | 49 | def __init__(self, name, rpc_params=None, redis_params=None, 50 | **kwargs): 51 | super(RedisPubSubRPC, self).__init__('noop') 52 | self.name = name 53 | self.pending = {} 54 | # TODO: remove redis_params 55 | if redis_params: 56 | logger.warn( 57 | 'Variable redis_params deprecated and will be removed. Use rpc_params instead.' 58 | ) 59 | self.rpc_params = rpc_params or redis_params or {} 60 | self.channels = set([self.name]) 61 | if self.rpc_params.get('listen_all', None) == True: 62 | self.channels.add(BROADCAST) 63 | if self.rpc_params.get('listen_enrich', None) == True: 64 | self.channels.add(ENRICH) 65 | self.queue = asyncio.Queue() 66 | self.id_gen = itertools.count(1) 67 | 68 | 69 | def log_request(self, request, extra=None, fmt=None, trim=False): 70 | pass 71 | 72 | def log_response(self, response, extra=None, fmt=None, trim=False): 73 | pass 74 | 75 | async def dispatch(self, msg): 76 | """ 77 | add handling 78 | {"jsonrpc": "2.0", "error": {"code": -32602, "message": "Invalid params"}, "id": "1"} 79 | """ 80 | # common extension 81 | if 'method' in msg: 82 | mparts = msg['method'].split(':') 83 | if len(mparts) == 3: 84 | msg['to'] = mparts[0] 85 | msg['method'] = mparts[1] 86 | msg['from'] = mparts[2] 87 | # Answer from remotely called method 88 | has_result_key = 'result' in msg 89 | has_error_key = 'error' in msg 90 | if has_result_key or has_error_key : 91 | # logger.debug('received with result', msg=msg) 92 | if 'id' in msg and msg.get('id') in self.pending: 93 | # wrapping into BandResponse object 94 | if has_result_key: 95 | msg['result'] = create_response(msg.get('result')) 96 | self.pending[msg['id']].set_result(msg) 97 | if has_error_key: 98 | logger.error('RPC-ERR', err=msg.get('error')) 99 | # Incoming call to exposed method 100 | elif 'params' in msg: 101 | is_status_request = msg.get('method') == REQUEST_STATUS 102 | # check address structure 103 | if msg.get('to') in self.channels: 104 | response = await dome.methods.dispatch(msg) 105 | # check response is needed 106 | if not response.is_notification: 107 | # Converting to dict 108 | response = {**response, 'from': self.name, 'to': msg.get('from')} 109 | # extracting full band response struct 110 | response_result = response.get('result', None) 111 | if isinstance(response_result, BaseBandResponse): 112 | response['result'] = response_result._asdict() 113 | # if not is_status_request: 114 | # print(response) 115 | await self.put(msg.get('from'), json_dumps(response)) 116 | 117 | async def reader(self): 118 | for chan in self.channels: 119 | await scheduler.spawn(self.chan_reader(chan)) 120 | 121 | async def chan_reader(self, chan): 122 | logger.info('starting reader for channel', chan=chan) 123 | while True: 124 | try: 125 | client = await redis_factory.create_client() 126 | channel, = await client.subscribe(chan) 127 | while True: 128 | msg = await channel.get(encoding='utf-8') 129 | if msg is None: 130 | break 131 | msg = json_loads(msg) 132 | # if msg 133 | await scheduler.spawn(self.dispatch(msg)) 134 | 135 | except asyncio.CancelledError: 136 | logger.info('redis_rpc_reader: loop cancelled / call break') 137 | break 138 | except ConnectionRefusedError: 139 | logger.error('Redis connection refused') 140 | except Exception: 141 | logger.exception('reader exception') 142 | finally: 143 | if client and not client.closed: 144 | await client.unsubscribe(chan) 145 | await redis_factory.close_client(client) 146 | await asyncio.sleep(1) 147 | 148 | async def writer(self): 149 | while True: 150 | logger.info('redis_rpc_writer: root loop. creating pool') 151 | try: 152 | pool = await redis_factory.create_pool() 153 | logger.info('redis_rpc_writer: entering loop') 154 | while True: 155 | name, msg = await self.queue.get() 156 | self.queue.task_done() 157 | async with pool.get() as conn: 158 | await conn.execute('publish', name, msg) 159 | except asyncio.CancelledError: 160 | logger.info('redis_rpc_writer: cancelled / break') 161 | break 162 | except ConnectionRefusedError: 163 | logger.error('Redis connection refused') 164 | except Exception: 165 | logger.exception('redis_rpc_writer: unknown') 166 | finally: 167 | logger.info('redis_rpc_writer: finally / closing pool') 168 | if pool and not pool.closed: 169 | await redis_factory.close_pool(pool) 170 | await asyncio.sleep(1) 171 | 172 | async def get(self): 173 | item = await self.queue.get() 174 | self.queue.task_done() 175 | return item 176 | 177 | async def request(self, to, method, timeout__=RPC_TIMEOUT, **params): 178 | """ 179 | Arguments 180 | timeout__ (int,str) Custom timeout 181 | """ 182 | mc = MethodCall(dest=to, method=method, source=self.name) 183 | req_id = str(next(self.id_gen)) 184 | req = Request(mc.tos(), params, request_id=req_id) 185 | return await self.send( 186 | req, request_id=req['id'], timeout__=int(timeout__), to=to) 187 | 188 | async def notify(self, to, method, **params): 189 | mc = MethodCall(dest=to, method=method, source=self.name) 190 | req = Notification(mc.tos(), **params) 191 | return await self.send(req, to=to) 192 | 193 | async def put(self, dest, data): 194 | await self.queue.put(( 195 | dest, 196 | data, 197 | )) 198 | 199 | async def send_message(self, request, timeout__=RPC_TIMEOUT, **kwargs): 200 | to = kwargs['to'] 201 | # Outgoing msg queue 202 | await self.put(to, request.encode()) 203 | # skip waiting for notification 204 | if 'request_id' not in kwargs: 205 | return 206 | 207 | req_id = kwargs['request_id'] 208 | # Waiting for response 209 | try: 210 | req = self.pending[req_id] = asyncio.Future() 211 | # await asyncio.wait_for(self.pending[req_id], timeout=self.timeout) 212 | async with timeout(int(timeout__)) as cm: 213 | await req 214 | except asyncio.TimeoutError: 215 | logger.error( 216 | 'rpc timeout', timeout=timeout__, to=to, req_id=req_id) 217 | except asyncio.CancelledError: 218 | logger.warn('Cancelled') 219 | finally: 220 | del self.pending[req_id] 221 | 222 | # Retunrning result 223 | return None if cm.expired else self.process_response(req.result()) 224 | 225 | 226 | def __getattr__(self, name: str) -> Callable: 227 | """ 228 | This gives us an alternate way to make a request. 229 | >>> rpc.cube(3) 230 | --> {"jsonrpc": "2.0", "method": "cube", "params": [3], "id": 1} 231 | That's the same as saying `client.request("cube", 3)`. 232 | """ 233 | 234 | def attr_handler(*args: Any, **kwargs: Any) -> Response: 235 | return self.request(name, *args, **kwargs) 236 | 237 | return attr_handler 238 | 239 | 240 | __all__ = ['RedisPubSubRPC'] 241 | -------------------------------------------------------------------------------- /band/rpc/server.py: -------------------------------------------------------------------------------- 1 | from jsonrpcserver.aio import AsyncMethods 2 | from ..lib.structs import MethodRegistration 3 | from ..constants import ROLES 4 | from ..log import logger 5 | import jsonrpcserver 6 | 7 | 8 | jsonrpcserver.config.log_requests = False 9 | jsonrpcserver.config.log_responses = False 10 | jsonrpcserver.config.trim_log_values = True 11 | 12 | 13 | class AsyncRPCMethods(AsyncMethods): 14 | def __init__(self, *args, **kwargs): 15 | super().__init__(*args, **kwargs) 16 | self._roles = dict() 17 | 18 | def add_method(self, handler, options={}, *args, **kwargs): 19 | method_name = kwargs.pop('name', handler.__name__) 20 | role = kwargs.pop('role', None) 21 | 22 | self._roles[method_name] = MethodRegistration( 23 | method=method_name, role=role, options=options) 24 | self[method_name] = handler 25 | 26 | @property 27 | def dicts(self): 28 | """ 29 | Generator for registrations as dicts 30 | """ 31 | for mc in self._roles.values(): 32 | if mc.role in ROLES and not mc.method.startswith('__'): 33 | yield dict(mc._asdict()) 34 | 35 | def __contains__(self, key): 36 | return key in self._items -------------------------------------------------------------------------------- /band/server.py: -------------------------------------------------------------------------------- 1 | from aiohttp import web 2 | import uvloop 3 | import asyncio 4 | 5 | from .log import logger 6 | from .lib.http import naive_cors_middleware, error_middleware 7 | from . import dome, loop 8 | 9 | __all__ = ['add_routes', 'start_server', 'app'] 10 | 11 | 12 | # loop = uvloop.new_event_loop() 13 | # loop.set_exception_handler(loop_exc) 14 | # asyncio.set_event_loop(loop) 15 | 16 | app = web.Application( 17 | logger=logger, debug=False, middlewares=[naive_cors_middleware, error_middleware]) 18 | 19 | 20 | def add_routes(routes): 21 | logger.debug('Attaching routes') 22 | app.router.add_routes(routes) 23 | 24 | 25 | def start_server(listen, name, **kwargs): 26 | host, port = listen.split(':') 27 | add_routes(dome.routes) 28 | 29 | web.run_app(app, host=host, port=port, handle_signals=True, print=None, access_log=None) 30 | -------------------------------------------------------------------------------- /band/sync_runner.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | import asyncio 3 | import concurrent.futures 4 | from . import loop, logger 5 | 6 | 7 | def blocking(params=None): 8 | """ 9 | Mark function for thread execution 10 | """ 11 | 12 | def wrapper(handler): 13 | logger.info(f"Wrap block {handler.__name__}") 14 | async def caller(*args, **kwargs): 15 | wrapped = partial(handler, *args, **kwargs) 16 | logger.info('Executing task in custom process') 17 | with concurrent.futures.ThreadPoolExecutor() as pool: 18 | result = await loop.run_in_executor(pool, wrapped) 19 | return result 20 | 21 | return caller 22 | 23 | return wrapper 24 | -------------------------------------------------------------------------------- /makefile: -------------------------------------------------------------------------------- 1 | PERCENT := % 2 | DEL := / 3 | 4 | bump-patch: 5 | bumpversion patch 6 | 7 | bump-minor: 8 | bumpversion minor 9 | 10 | build_image: 11 | docker build -t band-base-py . 12 | docker tag band-base-py rockstat/band-base-py:dev 13 | docker tag band-base-py rockstat/band-base-py:latest 14 | 15 | push_image_dev: 16 | docker push rockstat/band-base-py:dev 17 | 18 | to_master: 19 | sh -c 'git checkout master && git merge dev && git push origin master && git checkout dev' 20 | 21 | travis-trigger: 22 | curl -vv -s -X POST \ 23 | -H "Content-Type: application/json" \ 24 | -H "Accept: application/json" \ 25 | -H "Travis-API-Version: 3" \ 26 | -H "Authorization: token $$TRAVIS_TOKEN" \ 27 | -d '{ "request": { "branch":"$(br)" }}' \ 28 | https://api.travis-ci.com/repo/$(subst $(DEL),$(PERCENT)2F,$(repo))/requests 29 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | __VERSION__ = '0.20.6' 2 | 3 | from setuptools import setup, find_packages 4 | 5 | setup( 6 | name='band', 7 | version='0.20.6', 8 | author='Dmitry Rodin', 9 | author_email='madiedinro@gmail.com', 10 | license='MIT', 11 | description='Python microservices for Rockstat analytics plaform', 12 | long_description=""" 13 | About 14 | --- 15 | Orchestranion module start services in docker containers, examine and send configuraton to the front service. 16 | Includes microserivice framework for easy develop simple services and easy expose by https through front. 17 | More at project documentation 18 | """, 19 | packages=find_packages(exclude=['contrib', 'docs', 'tests']), 20 | url='https://github.com/rockstat/band-framework', 21 | include_package_data=True, 22 | install_requires=[ 23 | 'pyyaml>=4.2b1', 'inflection', 'jinja2', 'python-dotenv', 24 | 'structlog', 'colorama', 'python-json-logger', 'coloredlogs', 25 | 'cryptography', 'base58', 'xxhash', 26 | 'asyncio', 'uvloop', 'async_lru', 'aioconsole', 27 | 'aiohttp<4', 'aioredis', 'aiojobs', 'aiocache', 28 | 'aiofiles', 'aiocron>=1.3,<2', 'yarl', 29 | 'simplech>=0.16', 30 | 'jsonrpcserver==3.5.6', 'jsonrpcclient==2.6.0', 31 | 'requests', # for jsonrpc client 32 | 'prodict', 'pydantic', 'ujson', 'arrow' 33 | ], 34 | zip_safe=False, 35 | classifiers=[ 36 | 'Development Status :: 3 - Alpha', 37 | 'License :: OSI Approved :: MIT License', 38 | 'Programming Language :: Python :: 3.5', 39 | 'Programming Language :: Python :: 3.6', 40 | ], 41 | project_urls={ # Optional 42 | 'Homepage': 'https://rock.st', 43 | 'Docs': 'https://rock.st/docs' 44 | }) 45 | -------------------------------------------------------------------------------- /tests/test_response.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from band.lib.json import json_def, json_loads, json_dumps 4 | 5 | from band.lib import response 6 | from band.lib.response import BandResponceData, BandResponceError, BandResponcePixel, BandResponceRedirect, create_response 7 | 8 | 9 | def test_response(): 10 | with pytest.raises(TypeError): 11 | BandResponceData() 12 | 13 | 14 | def test_response_hash(): 15 | 16 | pix_res = BandResponcePixel() 17 | assert str(pix_res) == "Pixel" 18 | 19 | def test_response_json(): 20 | 21 | pix_res = BandResponcePixel() 22 | assert pix_res.to_json() == b'{"type__":"pixel","data":{}}' 23 | 24 | err = BandResponceError('Wrong way') 25 | assert err.to_json() == b'{"type__":"error","statusCode":500,"errorMessage":"Wrong way","data":{}}' 26 | 27 | 28 | def test_create_response(): 29 | 30 | pix = '{"type__":"pixel","data":{"mydata":"123"}}' 31 | resp = create_response(json_loads(pix)) 32 | 33 | assert not isinstance(resp, BandResponceData) 34 | assert resp.mydata == '123' 35 | 36 | err = BandResponceError('Wrong way') 37 | assert err.to_json() == b'{"type__":"error","statusCode":500,"errorMessage":"Wrong way","data":{}}' 38 | 39 | err_restored = create_response(json_loads(err.to_json())) 40 | assert isinstance(err_restored, BandResponceError) 41 | # array 42 | 43 | resp = [1,2,4] 44 | resp = create_response(resp) 45 | 46 | assert resp == [1,2,4] 47 | 48 | # raw 49 | resp = '' 50 | resp = create_response(resp) 51 | 52 | assert resp == '' 53 | 54 | # none 55 | resp = None 56 | resp = create_response(resp) 57 | 58 | assert resp == None 59 | 60 | 61 | 62 | def test_response_attrs(): 63 | response = BandResponceData({'mydata': '123'}) 64 | 65 | assert response.mydata == '123' 66 | assert response.type__ == 'data' 67 | assert response['mydata'] == '123' 68 | with pytest.raises(KeyError): 69 | type__ = response['type__'] 70 | 71 | 72 | def test_response_restore(): 73 | err = BandResponceError('Wrong way') 74 | 75 | assert err.error_message == 'Wrong way' 76 | assert isinstance(err, BandResponceError) 77 | 78 | js = err.to_json() 79 | err = create_response(json_loads(js)) 80 | 81 | assert err.error_message == 'Wrong way' 82 | assert isinstance(err, BandResponceError) 83 | 84 | -------------------------------------------------------------------------------- /tests/test_wrap_sync.py: -------------------------------------------------------------------------------- 1 | import json 2 | import ujson 3 | import os 4 | import pytest 5 | from time import sleep 6 | from itertools import count 7 | 8 | from band import blocking, loop, logger 9 | import asyncio 10 | 11 | 12 | @blocking() 13 | def blocking_func(seconds): 14 | logger.info(f'starting sleep {seconds}') 15 | sleep(seconds) 16 | logger.info(f'end of sleep {seconds}') 17 | return seconds 18 | 19 | 20 | async def nonblock_func(seconds): 21 | logger.info(f'starting nonblock sleep {seconds}') 22 | for c in count(): 23 | if c == seconds/0.05: 24 | return c 25 | await asyncio.sleep(0.045) 26 | logger.info('puk ', c=c) 27 | 28 | 29 | def test_wrap_sync(): 30 | f = asyncio.gather(nonblock_func(1), blocking_func(1)) 31 | res = loop.run_until_complete(f) 32 | assert res == [20, 1] 33 | 34 | 35 | if __name__ == '__main__': 36 | test_wrap_sync() 37 | 38 | 39 | --------------------------------------------------------------------------------