├── .codeclimate.yml ├── .dockerignore ├── .github └── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── .gitignore ├── .gitlab-ci.yml ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── config └── polyswarmd.yml ├── docker ├── Dockerfile ├── Dockerfile.release ├── docker-compose.yml └── release │ ├── polyswarmd.yml │ └── site-packages.patch ├── requirements.dev.txt ├── requirements.txt ├── scripts ├── README.md ├── launch_ipfs_container.sh ├── start_testrpc.sh ├── test.sh ├── test_curl.sh ├── test_file.sh └── test_ws.sh ├── setup.cfg ├── setup.py ├── src └── polyswarmd │ ├── __init__.py │ ├── __main__.py │ ├── app.py │ ├── config │ ├── __init__.py │ ├── contract.py │ ├── polyswarmd.py │ ├── service.py │ └── status.py │ ├── exceptions.py │ ├── monkey.py │ ├── services │ ├── __init__.py │ ├── artifact │ │ ├── __init__.py │ │ ├── exceptions.py │ │ ├── ipfs.py │ │ └── service.py │ ├── auth.py │ ├── consul.py │ └── ethereum │ │ ├── __init__.py │ │ ├── rpc.py │ │ └── service.py │ ├── utils │ ├── __init__.py │ ├── bloom.py │ ├── decorators │ │ ├── __init__.py │ │ └── chains.py │ ├── logger.py │ ├── profiler.py │ ├── response.py │ └── utils.py │ ├── views │ ├── __init__.py │ ├── artifacts.py │ ├── balances.py │ ├── bounties.py │ ├── eth.py │ ├── event_message.py │ ├── offers.py │ ├── relay.py │ └── staking.py │ ├── websockets │ ├── __init__.py │ ├── conftest.py │ ├── filter.py │ ├── json_schema.py │ ├── message_types.py │ ├── messages.py │ ├── scripts │ │ └── gen_stubs.py │ └── serve.py │ └── wsgi.py └── tests ├── __init__.py ├── conftest.py ├── fixtures └── config │ ├── chain │ ├── ArbiterStaking.json │ ├── BountyRegistry.json │ ├── ERC20Relay.json │ ├── NectarToken.json │ ├── OfferMultiSig.json │ ├── OfferRegistry.json │ ├── config.json │ ├── homechain.json │ └── sidechain.json │ └── polyswarmd │ └── polyswarmd.yml ├── test_balances.py ├── test_bloom.py ├── test_bounties.py ├── test_eth.py ├── test_event_message.py ├── test_offers.py ├── test_relay.py ├── test_status.py ├── test_suite_internals.py ├── test_utils.py └── utils.py /.codeclimate.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | checks: 3 | complex-logic: 4 | enabled: false 5 | method-complexity: 6 | enabled: false 7 | file-lines: 8 | enabled: false 9 | engines: 10 | pep8: 11 | enabled: true 12 | checks: 13 | E266: # Too many leading '#' in comment 14 | enabled: false 15 | E402: # module imports not at top of file 16 | enabled: false 17 | E501: # line too long 18 | enabled: false 19 | E302: # blank line expectations 20 | enabled: false 21 | E261: # at least two spaces before inline comment 22 | enabled: false 23 | E262: # inline comment should start with a # 24 | enabled: false 25 | E251: # unexpected spaces around keyword / parameter equals 26 | enabled: false 27 | bandit: # security issues 28 | enabled: true 29 | sonar-python: # security issues 30 | enabled: true 31 | # radon: # cyclomatic complexity 32 | # enabled: true 33 | # fixme: # highlight FIXME, TODO, HACK, etc omments 34 | # enabled: true 35 | # git-legal: # discount lawyering 36 | # enabled: true 37 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | *.pyc 3 | venv/ 4 | __pycache__/ 5 | .env 6 | .pytest_cache 7 | .tox 8 | build/ 9 | config/ 10 | !config/polyswarmd.yml 11 | !config/polyswarmd.docker.yml 12 | dist/ 13 | deb_dist/ 14 | polyswarmd.spec 15 | polyswarmd.egg-info/ 16 | truffle/ 17 | !truffle/build/ 18 | .idea/ -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | 5 | --- 6 | 7 | **Describe the bug** 8 | A clear and concise description of what the bug is. 9 | 10 | **To Reproduce** 11 | Steps to reproduce the behavior: 12 | 1. Go to '...' 13 | 2. Click on '....' 14 | 3. Scroll down to '....' 15 | 4. See error 16 | 17 | **Expected behavior** 18 | A clear and concise description of what you expected to happen. 19 | 20 | **Additional context** 21 | Add any other context about the problem here. 22 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | 5 | --- 6 | 7 | **Is your feature request related to a problem? Please describe.** 8 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 9 | 10 | **Describe the solution you'd like** 11 | A clear and concise description of what you want to happen. 12 | 13 | **Describe alternatives you've considered** 14 | A clear and concise description of any alternative solutions or features you've considered. 15 | 16 | **Additional context** 17 | Add any other context or screenshots about the feature request here. 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # polyswarmd config 2 | /config/ 3 | !/config/polyswarmd.yml 4 | !/config/polyswarmd.docker.yml 5 | polyswarmd.spec 6 | docker/contracts 7 | docker/polyswarmd.yml 8 | 9 | # other polyswarm 10 | truffle/ 11 | polyswarm-relay/ 12 | node_modules/ 13 | 14 | # stdeb 15 | deb_dist/ 16 | 17 | # Byte-compiled / optimized / DLL files 18 | __pycache__/ 19 | *.py[cod] 20 | *$py.class 21 | 22 | # Distribution / packaging 23 | .Python 24 | env/ 25 | build/ 26 | develop-eggs/ 27 | dist/ 28 | downloads/ 29 | eggs/ 30 | .eggs/ 31 | lib/ 32 | lib64/ 33 | parts/ 34 | sdist/ 35 | var/ 36 | wheels/ 37 | *.egg-info/ 38 | .installed.cfg 39 | *.egg 40 | 41 | # Installer logs 42 | pip-log.txt 43 | pip-delete-this-directory.txt 44 | 45 | # Unit test / coverage reports 46 | htmlcov/ 47 | .tox/ 48 | .coverage 49 | .coverage.* 50 | .cache 51 | nosetests.xml 52 | coverage.xml 53 | *.cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # pyenv 62 | .python-version 63 | 64 | # dotenv 65 | .env 66 | 67 | # virtualenv 68 | .venv 69 | venv/ 70 | .env/ 71 | env/ 72 | ENV/ 73 | 74 | # mypy 75 | .mypy_cache/ 76 | 77 | # editor detritus 78 | .vscode/ 79 | .idea -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | image: $REPO_URL/stage 2 | 3 | services: 4 | - docker:dind 5 | 6 | stages: 7 | - build 8 | - test 9 | - e2e 10 | - release 11 | - deployment 12 | 13 | variables: 14 | DOCKER_HOST: tcp://localhost:2375/ 15 | BASE_IMAGE_NAME: polyswarmd 16 | 17 | before_script: 18 | - apk --no-cache add curl jq 19 | 20 | ############################################################### 21 | # Build Stage (jobs inside a stage run in parallel) 22 | ############################################################### 23 | 24 | build: 25 | stage: build 26 | tags: 27 | - kube 28 | script: 29 | # try to download a cache image 30 | - docker pull $REPO_URL/$BASE_IMAGE_NAME:latest || true 31 | # explicitly pull the latest version of the dependant image 32 | - docker pull pypy:3-7-stretch 33 | - docker build 34 | -f docker/Dockerfile 35 | -t $REPO_URL/$BASE_IMAGE_NAME:$CI_COMMIT_SHA 36 | -t $REPO_URL/$BASE_IMAGE_NAME:$CI_COMMIT_REF_SLUG 37 | --cache-from=$REPO_URL/$BASE_IMAGE_NAME:latest 38 | . 39 | - docker push $REPO_URL/$BASE_IMAGE_NAME:$CI_COMMIT_SHA 40 | - docker push $REPO_URL/$BASE_IMAGE_NAME:$CI_COMMIT_REF_SLUG 41 | 42 | ############################################################### 43 | # Test Stage 44 | ############################################################### 45 | 46 | test: 47 | stage: test 48 | tags: 49 | - kube 50 | before_script: 51 | - docker pull $REPO_URL/$BASE_IMAGE_NAME:$CI_COMMIT_SHA 52 | script: 53 | - docker run $REPO_URL/$BASE_IMAGE_NAME:$CI_COMMIT_SHA make ci-test 54 | 55 | ############################################################### 56 | # End-to-end Stage 57 | ############################################################### 58 | 59 | e2e: 60 | stage: e2e 61 | tags: 62 | - kube 63 | script: 64 | - pip install $END_TO_END_LIB@$CI_COMMIT_REF_NAME || pip install $END_TO_END_LIB 65 | - e2e init 66 | - e2e run 67 | 68 | ############################################################### 69 | # Release Stage 70 | ############################################################### 71 | 72 | release-latest: 73 | stage: release 74 | tags: 75 | - kube 76 | only: 77 | - master 78 | script: 79 | # Gets the current image that was built in the CI for this commit 80 | - docker pull $REPO_URL/$BASE_IMAGE_NAME:$CI_COMMIT_SHA 81 | # Creates new tags for this image, one that should go to AWS and another to DockerHub with the tag "latest" 82 | - docker tag $REPO_URL/$BASE_IMAGE_NAME:$CI_COMMIT_SHA $REPO_URL/$BASE_IMAGE_NAME:latest 83 | - docker tag $REPO_URL/$BASE_IMAGE_NAME:$CI_COMMIT_SHA polyswarm/$BASE_IMAGE_NAME:latest 84 | # Pushes to AWS 85 | - docker push $REPO_URL/$BASE_IMAGE_NAME:latest 86 | # Pushes to DockerHub 87 | - docker logout 88 | - docker login -u $CI_CUSTOM_DOCKER_HUB_USERNAME -p $CI_CUSTOM_DOCKER_HUB_PASSWORD 89 | - docker push polyswarm/$BASE_IMAGE_NAME:latest 90 | 91 | release-tag: 92 | stage: release 93 | tags: 94 | - kube 95 | only: 96 | - tags 97 | script: 98 | # Gets the current image that was built in the CI for this commit 99 | - docker pull $REPO_URL/$BASE_IMAGE_NAME:$CI_COMMIT_SHA 100 | # Creates new tags for this image, one that should go to AWS and another to DockerHub with the tag from git 101 | - docker tag $REPO_URL/$BASE_IMAGE_NAME:$CI_COMMIT_SHA $REPO_URL/$BASE_IMAGE_NAME:$(git describe --tags --abbrev=0) 102 | - docker tag $REPO_URL/$BASE_IMAGE_NAME:$CI_COMMIT_SHA polyswarm/$BASE_IMAGE_NAME:$(git describe --tags --abbrev=0) 103 | # Pushes to AWS 104 | - docker push $REPO_URL/$BASE_IMAGE_NAME:$(git describe --tags --abbrev=0) 105 | # Pushes to DockerHub 106 | - docker logout 107 | - docker login -u $CI_CUSTOM_DOCKER_HUB_USERNAME -p $CI_CUSTOM_DOCKER_HUB_PASSWORD 108 | - docker push polyswarm/$BASE_IMAGE_NAME:$(git describe --tags --abbrev=0) 109 | 110 | 111 | ############################################################### 112 | # Deployment Stage 113 | ############################################################### 114 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2018 Swarm Technologies, Inc. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst requirements.txt 2 | recursive-include config 3 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: clean clean-test clean-pyc clean-build help 2 | .DEFAULT_GOAL := help 3 | SRCROOT := src/polyswarmd 4 | TESTSRCROOT := tests/ 5 | 6 | define PRINT_HELP_PYSCRIPT 7 | import re, sys 8 | for line in sys.stdin: 9 | match = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line) 10 | if match: 11 | target, help = match.groups() 12 | print("%-20s %s" % (target, help)) 13 | endef 14 | export PRINT_HELP_PYSCRIPT 15 | 16 | help: 17 | @python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST) 18 | 19 | lint: mypy ## check style 20 | # static checks 21 | -flake8 $(SRCROOT) 22 | # style checks 23 | -yapf -p -r -d $(SRCROOT) 24 | # order import 25 | -isort --recursive --diff $(SRCROOT) 26 | # verify that requirements.txt is ordered 27 | sort -u -c requirements.txt && sort -u -c requirements.dev.txt 28 | 29 | mypy: ## check types 30 | mypy 31 | 32 | format: format-requirements format-tests ## format code in Polyswarm style 33 | yapf -p -r -i $(SRCROOT) 34 | isort --recursive $(SRCROOT) 35 | 36 | format-tests: ## format test code in Polyswarm style 37 | yapf -p -r -i --exclude tests/test_suite_internals.py $(TESTSRCROOT) 38 | isort --recursive $(TESTSRCROOT) 39 | 40 | format-requirements: ## sort requirements.txt 41 | sort -u requirements.txt -o requirements.txt 42 | sort -u requirements.dev.txt -o requirements.dev.txt 43 | 44 | msgstubs: ## generate websocket event definition type stubs 45 | (cd $(SRCROOT) && python -m websockets.scripts.gen_stubs | yapf) 46 | 47 | quicktest: ## run tests 48 | py.test -k "not test_SLOW" 49 | 50 | test: ## run tests, including slow ones 51 | py.test 52 | 53 | clean: clean-build clean-pyc clean-test ## remove all build, test, coverage and Python artifacts 54 | 55 | clean-build: ## remove build artifacts 56 | rm -fr build/ 57 | rm -fr dist/ 58 | rm -fr .eggs/ 59 | 60 | find . \( -path ./env -o -path ./venv -o -path ./.env -o -path ./.venv \) -prune -o -name '*.egg-info' -exec rm -fr {} + 61 | find . \( -path ./env -o -path ./venv -o -path ./.env -o -path ./.venv \) -prune -o -name '*.egg' -exec rm -f {} + 62 | 63 | clean-pyc: ## remove Python file artifacts 64 | find . -name '*.pyc' -exec rm -f {} + 65 | find . -name '*.pyo' -exec rm -f {} + 66 | find . -name '*~' -exec rm -f {} + 67 | find . -name '__pycache__' -exec rm -fr {} + 68 | 69 | clean-test: ## remove test and coverage artifacts 70 | rm -f .coverage 71 | rm -fr htmlcov/ 72 | rm -fr .pytest_cache 73 | 74 | coverage: ## check code coverage 75 | coverage run --source $(SRCROOT) -m pytest --doctest-modules 76 | coverage report -m 77 | coverage html 78 | google-chrome htmlcov/index.html 79 | 80 | ci-test: dev-dependencies test ## install dev-dependencies and test 81 | 82 | dev-dependencies: # Install developer requirements 83 | bash -c "pip install -r <(cat requirements.dev.txt | grep -v 'mypy')" 84 | pip install -e . 85 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # polyswarmd - The PolySwarm Daemon 2 | 3 | *API under development and is subject to change* 4 | 5 | ## Introduction 6 | 7 | `polyswarmd` is a convenience daemon that provides a simple RPC API for interacting with the PolySwarm marketplace. Specifically, `polyswarmd` handles interaction with Ethereum and IPFS nodes on clients' behalf. 8 | 9 | 10 | ## Usage 11 | 12 | New developers are encouraged to visit the [PolySwarm Documentation](https://docs.polyswarm.io) and build on top of [`polyswarm-client`](https://github.com/polyswarm/polyswarm-client) rather than directly writing code against the comparatively low-level `polyswarmd`. 13 | 14 | ### `make` 15 | 16 | There are a selection of useful rules covering routine tasks made in the project `Makefile`, 17 | 18 | - `make test` - Run all `pytest` unittests & doctests 19 | + `make quicktest` - Run all "NOT SLOW" tests (PolyswarmD has some 20 | slow-running tests to verify certain WebSocket behavior, 21 | this will skip those) 22 | + `make coverage` - Print a test coverage report 23 | - `make lint` - Linting the source directory with 24 | [yapf](https://github.com/google/yapf), 25 | [isort](https://github.com/timothycrosley/isort) 26 | [mypy](https://mypy.readthedocs.io/en/latest/), 27 | [flake8](http://flake8.pycqa.org/en/latest/) as well as running 28 | doctests for `polyswarmd.websockets` & verifying `requirements*.txt` 29 | is sorted. 30 | + `make mypy` - Run [mypy](https://mypy.readthedocs.io/en/stable/) type 31 | checking 32 | - `make format` - format source code in Polyswarm style 33 | + `make format-tests` - format test code 34 | + `make format-requirements` - format `requirements*.txt` 35 | - `make clean` - Clean `build/`, `*.pyc` and more 36 | - `make msgstubs` - Regenerate the dynamically generated type-stubs for the 37 | websocket messages in `polyswarmd.websockets.message`. 38 | - `make help` - Print available rules 39 | 40 | ## Example Config 41 | 42 | ```yaml 43 | artifact: 44 | max_size: 34603008 45 | fallback_max_size: 10485760 46 | limit: 256 47 | library: 48 | module: polyswarmd.services.artifact.ipfs 49 | class_name: IpfsServiceClient 50 | args: 51 | - http://localhost:5001 52 | community: gamma 53 | eth: 54 | trace_transactions: true 55 | consul: 56 | uri: http://localhost:8500 57 | # directory: /path/to/config 58 | profiler: 59 | enabled: false 60 | # db_uri: http://db:1234 61 | redis: 62 | uri: redis://localhost:6379 63 | websocket: 64 | enabled: true 65 | ``` 66 | 67 | -------------------------------------------------------------------------------- /config/polyswarmd.yml: -------------------------------------------------------------------------------- 1 | artifact: 2 | max_size: 34603008 3 | fallback_max_size: 10485760 4 | limit: 256 5 | library: 6 | module: polyswarmd.services.artifact.ipfs 7 | class_name: IpfsServiceClient 8 | community: gamma 9 | eth: 10 | trace_transactions: true 11 | consul: 12 | uri: http://consul:8500 13 | profiler: 14 | enabled: false 15 | redis: 16 | uri: redis://redis:6379 17 | websocket: 18 | enabled: true 19 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM pypy:3-7-stretch 2 | LABEL maintainer="PolySwarm Developers " 3 | 4 | WORKDIR /usr/src/app 5 | 6 | RUN apt-get update && apt-get install -y \ 7 | jq \ 8 | libgmp-dev \ 9 | && rm -rf /var/lib/apt/lists/* 10 | 11 | COPY requirements.txt ./ 12 | 13 | RUN set -x && pip install --no-cache-dir -r requirements.txt 14 | 15 | COPY . . 16 | COPY ./config/polyswarmd.yml /etc/polyswarmd/polyswarmd.yml 17 | RUN set -x && pip install . 18 | 19 | # You can set log format and level in command line by e.g. `polyswarmd.wsgi:app(log_format='text', log_level='WARNING')` 20 | ENV GUNICORN_CMD_ARGS="--bind 0.0.0.0:31337 -k flask_sockets.worker -w 4" 21 | CMD ["gunicorn", "polyswarmd.wsgi:app()"] 22 | -------------------------------------------------------------------------------- /docker/Dockerfile.release: -------------------------------------------------------------------------------- 1 | FROM ubuntu:xenial 2 | LABEL maintainer="PolySwarm Developers " 3 | 4 | ARG POLYSWARMD_VERSION 5 | ARG CONTRACTS_VERSION 6 | 7 | WORKDIR /usr/src/app 8 | COPY requirements.txt ./ 9 | 10 | # Python and build deps 11 | RUN set -x && \ 12 | apt-get update && apt-get install -y \ 13 | curl \ 14 | git \ 15 | libgmp-dev \ 16 | libssl-dev \ 17 | python3 \ 18 | python3-pip && \ 19 | rm -rf /var/lib/apt/lists/* 20 | 21 | # Install solc 22 | RUN set -x && \ 23 | apt-get update && apt-get install -y software-properties-common && \ 24 | add-apt-repository ppa:ethereum/ethereum && \ 25 | apt-get update && apt-get install -y \ 26 | solc && \ 27 | rm -rf /var/lib/apt/lists/* 28 | 29 | # Install truffle 30 | RUN set -x && \ 31 | curl -sSf https://deb.nodesource.com/setup_9.x | bash - && \ 32 | apt-get install -y \ 33 | nodejs && \ 34 | rm -rf /var/lib/apt/lists/* && \ 35 | npm i -g truffle 36 | 37 | # Install python deps 38 | RUN set -x && \ 39 | pip3 install --no-cache-dir -r requirements.txt && \ 40 | pip3 install pyinstaller 41 | 42 | COPY . . 43 | 44 | # Build contracts 45 | RUN set -x && \ 46 | git clone -b $CONTRACTS_VERSION https://github.com/polyswarm/contracts.git && \ 47 | cd contracts && \ 48 | npm i && \ 49 | truffle compile 50 | 51 | # Build ELF 52 | RUN patch -d /usr/local/lib/python3.5/dist-packages -p1 < docker/release/site-packages.patch && \ 53 | pyinstaller src/polyswarmd/__main__.py -n polyswarmd -y --clean && \ 54 | mkdir -p dist/polyswarmd/config/contracts && \ 55 | cp contracts/build/contracts/*.json dist/polyswarmd/config/contracts && \ 56 | cp docker/release/polyswarmd.yml dist/polyswarmd/config 57 | 58 | # Build tar 59 | RUN cd dist && \ 60 | mv polyswarmd polyswarmd-$POLYSWARMD_VERSION && \ 61 | tar -czf /tmp/polyswarmd-$POLYSWARMD_VERSION.tar.gz polyswarmd-$POLYSWARMD_VERSION 62 | -------------------------------------------------------------------------------- /docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | polyswarmd: 4 | image: "polyswarm/polyswarmd" 5 | depends_on: 6 | - geth 7 | - ipfs 8 | entrypoint: 9 | - polyswarmd 10 | environment: 11 | - ETH_URI=http://geth:8545 12 | - IPFS_URI=http://ipfs:5001 13 | ports: 14 | - "31337:31337" 15 | geth: 16 | image: "ethereum/client-go:alpine" 17 | ports: 18 | - "30303:30303" 19 | volumes: 20 | - "ethereum:/root/.ethereum" 21 | command: ["--fast", "--rpc", "--rpcaddr", "0.0.0.0", "--rpcvhosts", "geth", "--rpcapi", "eth,web3,personal,net"] 22 | ipfs: 23 | image: "ipfs/go-ipfs" 24 | environment: 25 | - IPFS_BIND_IP=0.0.0.0 26 | ports: 27 | - "4001:4001" 28 | volumes: 29 | - "ipfs-export:/export" 30 | - "ipfs-data:/data/ipfs" 31 | volumes: 32 | ethereum: 33 | ipfs-export: 34 | ipfs-data: 35 | -------------------------------------------------------------------------------- /docker/release/polyswarmd.yml: -------------------------------------------------------------------------------- 1 | community: default 2 | ipfs_uri: http://localhost:5001 3 | artifact_limit: 1 4 | homechain: 5 | chain_id: 1 6 | eth_uri: http://localhost:8545 7 | nectar_token_address: "0x9e46a38f5daabe8683e10793b06749eef7d733d1" 8 | bounty_registry_address: "0xb9314ac8f04d61cb155598b50c49470b5196d892" 9 | offer_registry_address: "0xa9b722839defca2652db9e4fd8b8e281ef0b5de4" 10 | erc20_relay_address: "0xff2bad6415467e952484456d7d23e0fd1e9816ce" 11 | sidechain: 12 | free: Yes 13 | chain_id: 31337 14 | eth_uri: http://localhost:7545 15 | nectar_token_address: "0x6bc19695033ea13326ee7a4ca23fe2a4acf07764" 16 | bounty_registry_address: "0x93043aa63981e70d181049a5f0e605a86f2f4446" 17 | offer_registry_address: "0x74a6c8a715bbd0082523294d1a3cbc146ffbeb15" 18 | erc20_relay_address: "0x2e34c500aba2fb2d530fd2017c3b761423090629" 19 | -------------------------------------------------------------------------------- /docker/release/site-packages.patch: -------------------------------------------------------------------------------- 1 | diff --git a/PyInstaller/hooks/hook-Crypto.py b/PyInstaller/hooks/hook-Crypto.py 2 | new file mode 100644 3 | index 0000000..f108e55 4 | --- /dev/null 5 | +++ b/PyInstaller/hooks/hook-Crypto.py 6 | @@ -0,0 +1,41 @@ 7 | +#----------------------------------------------------------------------------- 8 | +# Copyright (c) 2005-2016, PyInstaller Development Team. 9 | +# 10 | +# Distributed under the terms of the GNU General Public License with exception 11 | +# for distributing bootloader. 12 | +# 13 | +# The full license is in the file COPYING.txt, distributed with this software. 14 | +#----------------------------------------------------------------------------- 15 | + 16 | +""" 17 | +Hook for Crypto module, adapted by hook for Cryptodome 18 | +""" 19 | + 20 | +import os 21 | +import glob 22 | + 23 | +from PyInstaller.compat import EXTENSION_SUFFIXES 24 | +from PyInstaller.utils.hooks import get_module_file_attribute 25 | + 26 | +# Include the modules as binaries in a subfolder named like the package. 27 | +# Crypto's loader expects to find them inside the package directory for 28 | +# the main module. We cannot use hiddenimports because that would add the 29 | +# modules outside the package. 30 | + 31 | +binaries = [] 32 | +binary_module_names = [ 33 | + 'Crypto.Cipher', 34 | + 'Crypto.Util', 35 | + 'Crypto.Hash', 36 | + 'Crypto.Protocol', 37 | +] 38 | + 39 | +try: 40 | + for module_name in binary_module_names: 41 | + m_dir = os.path.dirname(get_module_file_attribute(module_name)) 42 | + for ext in EXTENSION_SUFFIXES: 43 | + module_bin = glob.glob(os.path.join(m_dir, '_*%s' % ext)) 44 | + for f in module_bin: 45 | + binaries.append((f, module_name.replace('.', '/'))) 46 | +except ImportError: 47 | + pass 48 | diff --git a/PyInstaller/hooks/hook-cytoolz.itertoolz.py b/PyInstaller/hooks/hook-cytoolz.itertoolz.py 49 | new file mode 100644 50 | index 0000000..d33bd49 51 | --- /dev/null 52 | +++ b/PyInstaller/hooks/hook-cytoolz.itertoolz.py 53 | @@ -0,0 +1,13 @@ 54 | +#----------------------------------------------------------------------------- 55 | +# Copyright (c) 2018, PyInstaller Development Team. 56 | +# 57 | +# Distributed under the terms of the GNU General Public License with exception 58 | +# for distributing bootloader. 59 | +# 60 | +# The full license is in the file COPYING.txt, distributed with this software. 61 | +#----------------------------------------------------------------------------- 62 | + 63 | +# Hook for the cytoolz package: https://pypi.python.org/pypi/cytoolz 64 | +# Tested with cytoolz 0.9.0 and Python 3.5.2, on Ubuntu Linux x64 65 | + 66 | +hiddenimports = [ 'cytoolz.utils', 'cytoolz._signatures' ] 67 | diff --git a/PyInstaller/hooks/hook-eth_abi.py b/PyInstaller/hooks/hook-eth_abi.py 68 | new file mode 100644 69 | index 0000000..70815c9 70 | --- /dev/null 71 | +++ b/PyInstaller/hooks/hook-eth_abi.py 72 | @@ -0,0 +1,15 @@ 73 | +#----------------------------------------------------------------------------- 74 | +# Copyright (c) 2018, PyInstaller Development Team. 75 | +# 76 | +# Distributed under the terms of the GNU General Public License with exception 77 | +# for distributing bootloader. 78 | +# 79 | +# The full license is in the file COPYING.txt, distributed with this software. 80 | +#----------------------------------------------------------------------------- 81 | + 82 | +# Hook for the eth-abi package: https://pypi.python.org/pypi/eth-abi 83 | +# Tested with eth-utils 0.5.0 and Python 3.5.2, on Ubuntu Linux x64 84 | + 85 | +from PyInstaller.utils.hooks import copy_metadata 86 | + 87 | +datas = copy_metadata("eth_abi") 88 | diff --git a/PyInstaller/hooks/hook-eth_keyfile.py b/PyInstaller/hooks/hook-eth_keyfile.py 89 | new file mode 100644 90 | index 0000000..1e9b61a 91 | --- /dev/null 92 | +++ b/PyInstaller/hooks/hook-eth_keyfile.py 93 | @@ -0,0 +1,15 @@ 94 | +#----------------------------------------------------------------------------- 95 | +# Copyright (c) 2018, PyInstaller Development Team. 96 | +# 97 | +# Distributed under the terms of the GNU General Public License with exception 98 | +# for distributing bootloader. 99 | +# 100 | +# The full license is in the file COPYING.txt, distributed with this software. 101 | +#----------------------------------------------------------------------------- 102 | + 103 | +# Hook for the eth-utils package: https://pypi.python.org/pypi/eth-utils 104 | +# Tested with eth-utils 0.8.1 and Python 3.5.2, on Ubuntu Linux x64 105 | + 106 | +from PyInstaller.utils.hooks import copy_metadata 107 | + 108 | +datas = copy_metadata("eth_keyfile") 109 | diff --git a/PyInstaller/hooks/hook-eth_utils.py b/PyInstaller/hooks/hook-eth_utils.py 110 | new file mode 100644 111 | index 0000000..efd18ea 112 | --- /dev/null 113 | +++ b/PyInstaller/hooks/hook-eth_utils.py 114 | @@ -0,0 +1,15 @@ 115 | +#----------------------------------------------------------------------------- 116 | +# Copyright (c) 2018, PyInstaller Development Team. 117 | +# 118 | +# Distributed under the terms of the GNU General Public License with exception 119 | +# for distributing bootloader. 120 | +# 121 | +# The full license is in the file COPYING.txt, distributed with this software. 122 | +#----------------------------------------------------------------------------- 123 | + 124 | +# Hook for the eth-utils package: https://pypi.python.org/pypi/eth-utils 125 | +# Tested with eth-utils 0.8.1 and Python 3.5.2, on Ubuntu Linux x64 126 | + 127 | +from PyInstaller.utils.hooks import copy_metadata 128 | + 129 | +datas = copy_metadata("eth_utils") 130 | diff --git a/PyInstaller/hooks/hook-web3.py b/PyInstaller/hooks/hook-web3.py 131 | new file mode 100644 132 | index 0000000..1ea2e15 133 | --- /dev/null 134 | +++ b/PyInstaller/hooks/hook-web3.py 135 | @@ -0,0 +1,15 @@ 136 | +#----------------------------------------------------------------------------- 137 | +# Copyright (c) 2018, PyInstaller Development Team. 138 | +# 139 | +# Distributed under the terms of the GNU General Public License with exception 140 | +# for distributing bootloader. 141 | +# 142 | +# The full license is in the file COPYING.txt, distributed with this software. 143 | +#----------------------------------------------------------------------------- 144 | + 145 | +# Hook for the web3 package: https://pypi.python.org/pypi/web3 146 | +# Tested with web3 3.16.5 and Python 3.5.2, on Ubuntu Linux x64 147 | + 148 | +from PyInstaller.utils.hooks import copy_metadata 149 | + 150 | +datas = copy_metadata("web3") 151 | diff --git a/eth_hash/utils.py b/eth_hash/utils.py 152 | index d54ab56..45157e8 100644 153 | --- a/eth_hash/utils.py 154 | +++ b/eth_hash/utils.py 155 | @@ -3,10 +3,12 @@ import os 156 | 157 | from eth_hash.backends import ( 158 | SUPPORTED_BACKENDS, 159 | + pycryptodome 160 | ) 161 | 162 | 163 | def auto_choose_backend(): 164 | + return pycryptodome 165 | env_backend = get_backend_in_environment() 166 | 167 | if env_backend: 168 | -------------------------------------------------------------------------------- /requirements.dev.txt: -------------------------------------------------------------------------------- 1 | coverage>=5.0.3 2 | flake8>=3.7.9 3 | isort>=4.3.21 4 | mypy>=0.761 5 | mypy-extensions>=0.4.3 6 | pytest>=5.3.2 7 | pytest-runner>=5.2 8 | requests-mock>=1.7.0 9 | yapf>=0.28.0 10 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | base58==1.0.3 2 | click==7.0 3 | dataclasses==0.7; python_version=="3.6" 4 | fastjsonschema==2.14.1 5 | Flask==1.0.2 6 | Flask-Caching==1.7.2 7 | Flask-Sockets==0.2.1 8 | gevent==1.4.0 9 | gevent-websocket==0.10.1 10 | git+https://github.com/polyswarm/flask-profiler.git@profile-stats#egg=flask_profiler 11 | git+https://github.com/polyswarm/polyswarmd-config.git@master#egg=polyswarmd_config 12 | gunicorn==19.9.0 13 | ipfshttpclient==0.4.12 14 | polyswarm-artifact>=1.3.3 15 | psycopg2-binary==2.8.4 16 | pycryptodome==3.8.1 17 | py-evm==0.3.0a5 18 | python-consul==1.1.0 19 | python-json-logger==0.1.9 20 | PyYaml==4.2b4 21 | redis==3.3.8 22 | requests==2.22.0 23 | requests-futures==1.0.0 24 | rlp==1.1.0 25 | ujson==1.35 26 | web3==4.9.2 27 | websocket-client==0.48.0 28 | Werkzeug==0.15.3 29 | -------------------------------------------------------------------------------- /scripts/README.md: -------------------------------------------------------------------------------- 1 | Note that there are private keys in the keystore directory, the passphrase for 2 | all 5 is the string `blah`, *THESE ARE PROVIDED ONLY FOR TESTING PURPOSES WITH 3 | THIS SCRIPT* 4 | -------------------------------------------------------------------------------- /scripts/launch_ipfs_container.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir -p /tmp/export /tmp/ipfs-data 4 | docker run -d --name ipfs -v /tmp/export:/export -v /tmp/ipfs-data:/data/ipfs -p 4001:4001 -p 127.0.0.1:5001:5001 -p 127.0.0.1:8080:8080 ipfs/go-ipfs:latest 5 | -------------------------------------------------------------------------------- /scripts/start_testrpc.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | start_testrpc() { 4 | # We define 10 accounts with balance 1M ether, needed for high-value tests. 5 | local accounts=( 6 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501200,1000000000000000000000000" 7 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501201,1000000000000000000000000" 8 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501202,1000000000000000000000000" 9 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501203,1000000000000000000000000" 10 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501204,1000000000000000000000000" 11 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501205,1000000000000000000000000" 12 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501206,1000000000000000000000000" 13 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501207,1000000000000000000000000" 14 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501208,1000000000000000000000000" 15 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501209,1000000000000000000000000" 16 | ) 17 | 18 | ganache-cli --gasLimit 0xfffffffffff ${accounts[@]} 19 | } 20 | 21 | start_testrpc 22 | -------------------------------------------------------------------------------- /scripts/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Exit script as soon as a command fails. 4 | set -o errexit 5 | 6 | # Executes cleanup function at script exit. 7 | trap cleanup EXIT 8 | 9 | cleanup() { 10 | # Kill the testrpc instance that we started (if we started one and if it's still running). 11 | if [ -n "$testrpc_pid" ] && ps -p $testrpc_pid > /dev/null; then 12 | kill -9 $testrpc_pid 13 | fi 14 | } 15 | 16 | testrpc_port=8545 17 | 18 | start_testrpc() { 19 | # We define 10 accounts with balance 1M ether, needed for high-value tests. 20 | local accounts=( 21 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501200,1000000000000000000000000" 22 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501201,1000000000000000000000000" 23 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501202,1000000000000000000000000" 24 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501203,1000000000000000000000000" 25 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501204,1000000000000000000000000" 26 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501205,1000000000000000000000000" 27 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501206,1000000000000000000000000" 28 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501207,1000000000000000000000000" 29 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501208,1000000000000000000000000" 30 | --account="0x2bdd21761a483f71054e14f5b827213567971c676928d9a1808cbfa4b7501209,1000000000000000000000000" 31 | ) 32 | 33 | ganache-cli --networkId 1337 --gasLimit 0xfffffffffff ${accounts[@]} > /dev/null & 34 | 35 | testrpc_pid=$! 36 | } 37 | 38 | start_testrpc 39 | 40 | (cd truffle && truffle migrate --reset) 41 | nectar_address=$(jq '.["networks"]["1337"]["address"]' < truffle/build/contracts/NectarToken.json) 42 | bounty_registry_address=$(jq '.["networks"]["1337"]["address"]' < truffle/build/contracts/BountyRegistry.json) 43 | 44 | cat > polyswarmd.test.cfg <<- EOF 45 | NECTAR_TOKEN_ADDRESS = $nectar_address 46 | BOUNTY_REGISTRY_ADDRESS = $bounty_registry_address 47 | EOF 48 | 49 | cat polyswarmd.test.cfg 50 | 51 | POLYSWARMD_NETWORK=test python3 -m unittest polyswarmd_test $@ 52 | -------------------------------------------------------------------------------- /scripts/test_curl.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | ACCOUNT="0x34e583cf9c1789c3141538eec77d9f0b8f7e89f2" 4 | 5 | curl -H 'Content-Type: application/json' -d '{"amount": "62500000000000000", "uri": "QmYNmQKp6SuaVrpgWRsPTgCQCnpxUYGq76YEKBXuj2N4H6", "duration": 10}' http://localhost:31337/bounties?account=$ACCOUNT 6 | -------------------------------------------------------------------------------- /scripts/test_file.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | echo foo > foo 4 | echo bar > bar 5 | 6 | curl --trace-ascii - \ 7 | -F file=@foo \ 8 | -F file=@bar \ 9 | http://localhost:31337/artifacts?account=0x0000000000000000000000000000000000000000 10 | 11 | rm foo bar 12 | -------------------------------------------------------------------------------- /scripts/test_ws.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | curl -v -i -N \ 4 | -H "Connection: Upgrade" \ 5 | -H "Upgrade: websocket" \ 6 | -H "Host: localhost" \ 7 | -H "Origin: http://localhost:31337" \ 8 | -H "Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==" \ 9 | -H "Sec-WebSocket-Version: 13" \ 10 | http://localhost:31337/events?chain=home 11 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [coverage:run] 2 | source = 3 | polyswarmd 4 | 5 | omit = 6 | venv/* 7 | .venv/* 8 | 9 | [tool:pytest] 10 | addopts = --doctest-modules 11 | norecursedirs = 12 | venv 13 | .venv 14 | build 15 | dist 16 | docker 17 | dist 18 | config 19 | collect_ignore = ['setup.py'] 20 | 21 | [isort] 22 | skip_glob=tests/conftest.py 23 | atomic=True 24 | balanced_wrapping=True 25 | force_sort_within_sections=True 26 | from_first=False 27 | include_trailing_comma=True 28 | known_first_party=polyswarm,polyswarmartifact,polyswarmclient,polyswarmcli,polyswarmapi,polyswarmmodels,polyswarmd 29 | known_standard_library=typing,typing_extensions 30 | multi_line_output=3 31 | use_parentheses=True 32 | 33 | [flake8] 34 | max-line-length= 119 35 | ignore = E402, # module imports not at top of file 36 | E266, # Too many leading '#' in comment 37 | E402, # module imports not at top of file 38 | E302, # blank line expectations 39 | E261, # at least two spaces before inline comment 40 | E262, # inline comment should start with a # 41 | E704, # Multiple statements on one line 42 | E251, # unexpected spaces around keyword / parameter equals 43 | W503, # line break before binary operator 44 | W504, # line break after binary operator 45 | E266, # missing whitespace around arithmetic operators 46 | 47 | [mypy] 48 | cache_dir = .mypy_cache 49 | check_untyped_defs = True 50 | files = src/polyswarmd 51 | # don't warn if types can't be found for 3rd party libs 52 | ignore_missing_imports = True 53 | # enable 'incremental mode' 54 | incremental = True 55 | pretty = True 56 | python_version = 3.7 57 | # Prohibit equality checks, identity checks, and container checks between 58 | # non-overlapping types. 59 | strict_equality = True 60 | # treat `None` as compatible with every type (mypy isn't sophisticated enough to 61 | # know when 'Optional' has been checked) 62 | strict_optional = False 63 | warn_redundant_casts = True 64 | warn_unreachable = True 65 | warn_unused_configs = True 66 | warn_unused_ignores = True 67 | 68 | [yapf] 69 | based_on_style = google 70 | column_limit = 101 71 | # don't allow dictionary keys to be spread across multiple lines 72 | allow_multiline_dictionary_keys = false 73 | # align closing bracket with visual indentation 74 | align_closing_bracket_with_visual_indent = true 75 | # let spacing indicate operator precedence 76 | arithmetic_precedence_indication = true 77 | # Do not split consecutive brackets 78 | coalesce_brackets = true 79 | # de-dent closing brackets 80 | dedent_closing_brackets = true 81 | # Indent value if it can't fit on the same line 82 | indent_dictionary_value = true 83 | # if a subexpression with a comma fits in its starting line, then the subexpression is not split 84 | split_all_top_level_comma_separated_values = false 85 | # Split each comprehension clause onto it's own line 86 | split_complex_comprehension = true -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import find_packages, setup 2 | 3 | 4 | def parse_requirements(): 5 | with open('requirements.txt', 'r') as f: 6 | return ['{2} @ {0}'.format(*r.partition('#egg=')) if '#egg=' in r else r for r in f.read().splitlines()] 7 | 8 | 9 | setup(name='polyswarmd', 10 | version='2.2.0', 11 | description='Daemon for interacting with the PolySwarm marketplace', 12 | author = 'PolySwarm Developers', 13 | author_email = 'info@polyswarm.io', 14 | url='https://github.com/polyswarm/polyswarmd', 15 | license='MIT', 16 | install_requires=parse_requirements(), 17 | include_package_data=True, 18 | packages=find_packages('src'), 19 | package_dir={'': 'src/'}, 20 | entry_points={ 21 | 'console_scripts': ['polyswarmd=polyswarmd.__main__:main'], 22 | }, 23 | ) 24 | -------------------------------------------------------------------------------- /src/polyswarmd/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/polyswarm/polyswarmd/b732d60f0f829cc355c1f938bbe6de69f9985098/src/polyswarmd/__init__.py -------------------------------------------------------------------------------- /src/polyswarmd/__main__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | 4 | import click 5 | from gevent import pywsgi 6 | from geventwebsocket.handler import WebSocketHandler 7 | 8 | from polyswarmd.utils.logger import init_logging 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | @click.command() 14 | @click.option('--log-format', envvar='LOG_FORMAT', default='text', help='Logging format') 15 | @click.option('--log-level', envvar='LOG_LEVEL', default='WARNING', help='Logging level') 16 | @click.option('--host', default='', help='Host to listen on') 17 | @click.option('--port', default=31337, help='Port to listen on') 18 | def main(log_format, log_level, host, port): 19 | log_level = getattr(logging, log_level.upper(), None) 20 | if not isinstance(log_level, int): 21 | logging.error('Invalid log level') 22 | sys.exit(-1) 23 | 24 | init_logging(log_format, log_level) 25 | 26 | from polyswarmd.app import app 27 | 28 | server = pywsgi.WSGIServer((host, port), app, handler_class=WebSocketHandler) 29 | 30 | logger.critical("polyswarmd is ready!") 31 | server.serve_forever() 32 | 33 | 34 | if __name__ == '__main__': 35 | main() 36 | -------------------------------------------------------------------------------- /src/polyswarmd/app.py: -------------------------------------------------------------------------------- 1 | """ 2 | isort:skip_file 3 | """ 4 | from concurrent.futures import ThreadPoolExecutor 5 | from requests_futures.sessions import FuturesSession 6 | from polyswarmd.monkey import patch_all 7 | 8 | patch_all() 9 | 10 | import datetime 11 | 12 | import functools 13 | import logging 14 | 15 | from flask import Flask, g, request 16 | from flask_caching import Cache 17 | 18 | from polyswarmd.config.polyswarmd import PolySwarmd, DEFAULT_FALLBACK_SIZE 19 | 20 | from polyswarmd.utils.logger import init_logging # noqa 21 | 22 | from polyswarmd.utils.profiler import setup_profiler 23 | from polyswarmd.utils.response import success, failure, install_error_handlers 24 | 25 | logger = logging.getLogger(__name__) 26 | cache: Cache = Cache(config={"CACHE_TYPE": "simple", "CACHE_DEFAULT_TIMEOUT": 30}) 27 | 28 | # Set up our app object 29 | app = Flask(__name__) 30 | app.url_map.strict_slashes = False 31 | _config = PolySwarmd.auto() 32 | app.config['POLYSWARMD'] = _config 33 | # Setting this value works even when Content-Length is omitted, we must have it 34 | app.config['MAX_CONTENT_LENGTH'] = _config.artifact.max_size * _config.artifact.limit 35 | 36 | session = FuturesSession(executor=ThreadPoolExecutor(4), adapter_kwargs={'max_retries': 2}) 37 | 38 | session.request = functools.partial(session.request, timeout=10) 39 | 40 | app.config['REQUESTS_SESSION'] = session 41 | app.config['CHECK_BLOCK_LIMIT'] = True 42 | app.config['THREADPOOL'] = ThreadPoolExecutor() 43 | 44 | install_error_handlers(app) 45 | 46 | from polyswarmd.views.eth import misc 47 | from polyswarmd.views.artifacts import artifacts 48 | from polyswarmd.views.balances import balances 49 | from polyswarmd.views.bounties import bounties 50 | from polyswarmd.views.relay import relay 51 | from polyswarmd.views.offers import offers 52 | from polyswarmd.views.staking import staking 53 | from polyswarmd.views.event_message import init_websockets 54 | 55 | app.register_blueprint(misc, url_prefix='/') 56 | app.register_blueprint(artifacts, url_prefix='/artifacts') 57 | app.register_blueprint(balances, url_prefix='/balances') 58 | app.register_blueprint(bounties, url_prefix='/bounties') 59 | app.register_blueprint(relay, url_prefix='/relay') 60 | app.register_blueprint(offers, url_prefix='/offers') 61 | app.register_blueprint(staking, url_prefix='/staking') 62 | 63 | if app.config['POLYSWARMD'].websocket.enabled: 64 | init_websockets(app) 65 | 66 | setup_profiler(app) 67 | cache.init_app(app) 68 | 69 | AUTH_WHITELIST = {'/status', '/relay/withdrawal', '/transactions'} 70 | 71 | 72 | @cache.memoize(30) 73 | def get_auth(api_key, auth_uri): 74 | future = session.get(auth_uri, headers={'Authorization': api_key}) 75 | return future.result() 76 | 77 | 78 | @cache.memoize(30) 79 | def get_account(api_key, auth_uri): 80 | future = session.get(auth_uri, params={'api_key': api_key}) 81 | return future.result() 82 | 83 | 84 | def check_auth_response(api_response): 85 | if api_response is None or api_response.status_code // 100 != 2: 86 | return None 87 | try: 88 | return api_response.json() 89 | except ValueError: 90 | logger.exception( 91 | 'Invalid response from API key management service, received: %s', api_response.encode() 92 | ) 93 | return None 94 | 95 | 96 | class User(object): 97 | 98 | def __init__(self, authorized=False, user_id=None, max_artifact_size=DEFAULT_FALLBACK_SIZE): 99 | self.authorized = authorized 100 | self.max_artifact_size = max_artifact_size 101 | self.user_id = user_id if authorized else None 102 | 103 | @classmethod 104 | def from_api_key(cls, api_key): 105 | config = app.config['POLYSWARMD'] 106 | 107 | auth_uri = f'{config.auth.uri}/communities/{config.community}/auth' 108 | 109 | r = get_auth(api_key, auth_uri) 110 | j = check_auth_response(r) 111 | if j is None: 112 | return cls( 113 | authorized=False, user_id=None, max_artifact_size=config.artifact.fallback_max_size 114 | ) 115 | 116 | anonymous = j.get('anonymous', True) 117 | user_id = j.get('user_id') if not anonymous else None 118 | 119 | # Get account features 120 | account_uri = f'{config.auth.uri}/accounts' 121 | r = get_account(api_key, account_uri) 122 | j = check_auth_response(r) 123 | if j is None: 124 | return cls( 125 | authorized=True, 126 | user_id=user_id, 127 | max_artifact_size=config.artifact.fallback_max_size 128 | ) 129 | 130 | max_artifact_size = next(( 131 | f['base_uses'] 132 | for f in j.get('account', {}).get('features', []) 133 | if f['tag'] == 'max_artifact_size' 134 | ), config.artifact.fallback_max_size) 135 | return cls(authorized=True, user_id=user_id, max_artifact_size=max_artifact_size) 136 | 137 | @property 138 | def anonymous(self): 139 | return self.user_id is None 140 | 141 | def __bool__(self): 142 | config = app.config['POLYSWARMD'] 143 | return config.auth.require_api_key and self.authorized 144 | 145 | 146 | @app.route('/status') 147 | def status(): 148 | config = app.config['POLYSWARMD'] 149 | return success(config.status.get_status()) 150 | 151 | 152 | @app.before_request 153 | def before_request(): 154 | g.user = User() 155 | 156 | config = app.config['POLYSWARMD'] 157 | 158 | if not config.auth.require_api_key: 159 | return 160 | 161 | # Ignore prefix if present 162 | try: 163 | api_key = request.headers.get('Authorization').split()[-1] 164 | except Exception: 165 | # exception == unauthenticated 166 | return whitelist_check(request.path) 167 | 168 | if api_key: 169 | g.user = User.from_api_key(api_key) 170 | if not g.user: 171 | return whitelist_check(request.path) 172 | 173 | size = request.content_length 174 | if size is not None and size > g.user.max_artifact_size * 256: 175 | return failure('Payload too large', 413) 176 | 177 | 178 | def whitelist_check(path): 179 | # Want to be able to whitelist unauthenticated routes, everything requires auth by default 180 | return None if path in AUTH_WHITELIST else failure('Unauthorized', 401) 181 | 182 | 183 | @app.after_request 184 | def after_request(response): 185 | eth_address = getattr(g, 'eth_address', None) 186 | user = getattr(g, 'user', None) 187 | 188 | if response.status_code == 200: 189 | logger.info( 190 | '%s %s %s %s %s %s', datetime.datetime.now(), request.method, response.status_code, 191 | request.path, eth_address, user.user_id 192 | ) 193 | else: 194 | logger.error( 195 | '%s %s %s %s %s %s: %s', datetime.datetime.now(), request.method, response.status_code, 196 | request.path, eth_address, user.user_id, response.get_data() 197 | ) 198 | 199 | return response 200 | -------------------------------------------------------------------------------- /src/polyswarmd/config/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/polyswarm/polyswarmd/b732d60f0f829cc355c1f938bbe6de69f9985098/src/polyswarmd/config/__init__.py -------------------------------------------------------------------------------- /src/polyswarmd/config/contract.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | import json 3 | import logging 4 | import os 5 | import time 6 | import yaml 7 | 8 | from consul import Timeout 9 | from typing import Any, Dict, List, Set, Tuple 10 | from web3 import HTTPProvider, Web3 11 | from web3.exceptions import MismatchedABI 12 | from web3.middleware import geth_poa_middleware 13 | 14 | from polyswarmdconfig.config import Config 15 | from polyswarmdconfig.exceptions import MissingConfigValueError 16 | from polyswarmd.services.ethereum.rpc import EthereumRpc 17 | from polyswarmd.utils import IN_TESTENV, camel_case_to_snake_case 18 | 19 | logger = logging.getLogger(__name__) 20 | EXPECTED_CONTRACTS = [ 21 | 'NectarToken', 'BountyRegistry', 'ArbiterStaking', 'ERC20Relay', 'OfferRegistry', 'OfferMultiSig' 22 | ] 23 | 24 | # Allow interfacing with contract versions in this range 25 | SUPPORTED_CONTRACT_VERSIONS = { 26 | 'ArbiterStaking': ((1, 2, 0), (1, 3, 0)), 27 | 'BountyRegistry': ((1, 6, 0), (1, 7, 0)), 28 | 'ERC20Relay': ((1, 2, 0), (1, 4, 0)), 29 | 'OfferRegistry': ((1, 2, 0), (1, 3, 0)), 30 | } 31 | 32 | 33 | class Contract(object): 34 | def __init__(self, w3, name, abi, address=None): 35 | self.name = name 36 | self.w3 = w3 37 | self.abi = abi 38 | self.address = address 39 | self._contract = None 40 | 41 | def bind(self, address=None, persistent=False): 42 | from polyswarmd.views.eth import ZERO_ADDRESS 43 | address = address or self.address 44 | if not address: 45 | raise ValueError('No address provided to bind to') 46 | 47 | ret = self.w3.eth.contract(address=self.w3.toChecksumAddress(address), abi=self.abi) 48 | 49 | supported_versions = SUPPORTED_CONTRACT_VERSIONS.get(self.name) 50 | if IN_TESTENV: 51 | logger.info("We are inside a test environment, skipping contract VERSION check") 52 | elif supported_versions is not None and address != ZERO_ADDRESS: 53 | min_version, max_version = supported_versions 54 | try: 55 | version = tuple(int(s) for s in ret.functions.VERSION().call().split('.')) 56 | except MismatchedABI: 57 | logger.error('Expected version but no version reported for contract %s', self.name) 58 | raise ValueError('No contract version reported') 59 | except ValueError: 60 | logger.error( 61 | 'Invalid version specified for contract %s, require major.minor.patch as string', 62 | self.name 63 | ) 64 | raise ValueError('Invalid contract version reported') 65 | 66 | if len(version) != 3 or not min_version <= version < max_version: 67 | logger.error( 68 | "Received %s version %s.%s.%s, but expected version between %s.%s.%s and %s.%s.%s ", 69 | self.name, *version, *min_version, *max_version 70 | ) 71 | raise ValueError('Unsupported contract version') 72 | 73 | if persistent: 74 | self._contract = ret 75 | 76 | return ret 77 | 78 | @property 79 | def contract(self): 80 | if self._contract is None: 81 | return self.bind(self.address, persistent=True) 82 | 83 | return self._contract 84 | 85 | @staticmethod 86 | def from_json(w3: Web3, name: str, contract: Dict[str, Any], config: Dict[str, Any]): 87 | if 'abi' not in contract: 88 | return None 89 | 90 | abi = contract.get('abi') 91 | 92 | # XXX: OfferMultiSig doesn't follow this convention, but we don't bind that now anyway 93 | address = config.get(camel_case_to_snake_case(name) + '_address') 94 | 95 | return Contract(w3, name, abi, address) 96 | 97 | 98 | @dataclasses.dataclass 99 | class Chain(Config): 100 | name: str 101 | eth_uri: str 102 | chain_id: int 103 | w3: Web3 104 | nectar_token: Contract 105 | bounty_registry: Contract 106 | erc20_relay: Contract 107 | arbiter_staking: Contract 108 | offer_registry: Contract 109 | offer_multi_sig: Contract 110 | free: bool = False 111 | rpc: EthereumRpc = dataclasses.field(init=False) 112 | 113 | def __post_init__(self): 114 | self.setup_rpc() 115 | 116 | def setup_rpc(self): 117 | self.rpc = EthereumRpc(self) 118 | 119 | @classmethod 120 | def populate(cls, config: Dict[str, Any]) -> Dict[str, Any]: 121 | eth_uri = config.get('eth_uri') 122 | if eth_uri is None: 123 | raise MissingConfigValueError('Missing eth_uri') 124 | 125 | w3 = cls.setup_web3(eth_uri) 126 | contract_abis = config.get('contracts') 127 | del config['contracts'] 128 | contracts = cls.create_contract_dicts(w3, contract_abis, config) 129 | config.update(contracts) 130 | config['w3'] = w3 131 | 132 | return super(Chain, cls).populate(config) 133 | 134 | @classmethod 135 | def setup_web3(cls, eth_uri: str): 136 | w3 = Web3(HTTPProvider(eth_uri)) 137 | w3.middleware_stack.inject(geth_poa_middleware, layer=0) 138 | return w3 139 | 140 | @classmethod 141 | def create_contract_dicts(cls, w3: Web3, contracts: Dict[str, Any], 142 | config: Dict[str, Any]) -> Dict[str, Contract]: 143 | return { 144 | camel_case_to_snake_case(name): cls.create_contract(w3, name, abi, config) 145 | for name, abi in contracts.items() 146 | } 147 | 148 | @classmethod 149 | def create_contract(cls, w3: Web3, name: str, abi: Dict[str, Any], config: Dict[str, Any]) -> Contract: 150 | return Contract.from_json(w3, name, abi, config) 151 | 152 | @staticmethod 153 | def does_include_all_contracts(contracts: Dict[str, Any]) -> bool: 154 | return all([c in contracts for c in EXPECTED_CONTRACTS]) 155 | 156 | 157 | class ConsulChain(Chain): 158 | 159 | @classmethod 160 | def from_consul(cls, consul_client, name: str, community_key: str): 161 | chain = cls.fetch_config(consul_client, name, community_key) 162 | chain['contracts'] = cls.fetch_contracts(consul_client, community_key) 163 | chain['name'] = name 164 | return cls.from_dict(chain) 165 | 166 | @classmethod 167 | def fetch_config(cls, consul_client, name: str, key: str) -> Dict[str, Any]: 168 | config = cls.fetch_from_consul_or_wait(consul_client, f'{key}/{name}chain').get('Value') 169 | if config is None: 170 | raise ValueError(f'Invalid chain config for chain {name}') 171 | 172 | return json.loads(config.decode('utf-8')) 173 | 174 | @classmethod 175 | def fetch_contracts(cls, consul_client, key: str) -> Dict[str, Any]: 176 | contracts: Dict[str, Any] = {} 177 | while True: 178 | contracts.update(cls.find_contracts(consul_client, key)) 179 | if cls.does_include_all_contracts(contracts): 180 | break 181 | 182 | logger.info('Key present but not all contracts deployed, retrying...') 183 | time.sleep(1) 184 | return contracts 185 | 186 | @classmethod 187 | def find_contracts(cls, consul_client, key: str) -> Dict[str, Any]: 188 | return {name: abi for name, abi in cls.fetch_contract_parts(consul_client, key)} 189 | 190 | @classmethod 191 | def fetch_contract_parts(cls, consul_client, key: str) -> List[Tuple[str, Dict[str, Any]]]: 192 | return [ 193 | cls.parse_kv_pair(kv_pair) 194 | for kv_pair in cls.fetch_contract_kv_pairs(consul_client, key) 195 | ] 196 | 197 | @classmethod 198 | def parse_kv_pair(cls, kv_pair) -> Tuple[str, Dict[str, Any]]: 199 | return cls.get_name(kv_pair), cls.get_abi(kv_pair) 200 | 201 | @classmethod 202 | def get_abi(cls, kv_pair) -> Dict[str, Any]: 203 | return json.loads(kv_pair.get('Value').decode('utf-8')) 204 | 205 | @classmethod 206 | def get_name(cls, kv_pair) -> str: 207 | return kv_pair.get('Key').rsplit('/', 1)[-1] 208 | 209 | @classmethod 210 | def fetch_contract_kv_pairs(cls, consul_client, key: str) -> List[Any]: 211 | filter_ = cls.contract_filter(key) 212 | return [ 213 | x for x in cls.fetch_from_consul_or_wait(consul_client, key, recurse=True) 214 | if x.get('Key') not in filter_ 215 | ] 216 | 217 | @classmethod 218 | def contract_filter(cls, key) -> Set[str]: 219 | return {f'{key}/{x}' for x in ('homechain', 'sidechain', 'config')} 220 | 221 | @staticmethod 222 | def fetch_from_consul_or_wait(client, key, recurse=False, index=0) -> Any: 223 | while True: 224 | try: 225 | index, data = client.kv.get(key, recurse=recurse, index=index, wait='2m') 226 | if data is not None: 227 | return data 228 | except Timeout: 229 | logger.info('Consul up but key %s not available, retrying...', key) 230 | continue 231 | 232 | @staticmethod 233 | def wait_for_consul_key_deletion(client, key, recurse=False, index=0): 234 | logger.info('Watching key: %s', key) 235 | while True: 236 | try: 237 | index, data = client.kv.get(key, recurse=recurse, index=index, wait='2m') 238 | if data is None: 239 | return 240 | except Timeout: 241 | logger.info('Consul key %s still valid', key) 242 | continue 243 | 244 | 245 | class FileChain(Chain): 246 | 247 | @classmethod 248 | def from_config_file(cls, name, filename): 249 | chain = cls.load_chain_details(filename) 250 | chain['contracts'] = cls.load_contracts(filename) 251 | chain['name'] = name 252 | return cls.from_dict(chain) 253 | 254 | @classmethod 255 | def load_chain_details(cls, filename: str) -> Dict[str, Any]: 256 | with open(filename, 'r') as f: 257 | return yaml.safe_load(f) 258 | 259 | @classmethod 260 | def load_contracts(cls, path) -> Dict[str, Any]: 261 | contracts_dir = os.path.dirname(path) 262 | return cls.load_contracts_from_dir(contracts_dir) 263 | 264 | @classmethod 265 | def load_contracts_from_dir(cls, directory) -> Dict[str, Any]: 266 | return { 267 | name: abi for root, dirs, files in os.walk(directory) 268 | for name, abi in cls.load_contract_files(root, files) 269 | } 270 | 271 | @classmethod 272 | def load_contract_files(cls, root: str, files: List[str]) -> List[Tuple[str, Dict[str, Any]]]: 273 | filter_ = cls.contract_filter() 274 | return [cls.load_contract(os.path.join(root, f)) for f in files if f not in filter_] 275 | 276 | @classmethod 277 | def contract_filter(cls) -> Set[str]: 278 | return {f'{x}.json' for x in ('homechain', 'sidechain', 'config')} 279 | 280 | @classmethod 281 | def load_contract(cls, filename: str) -> Tuple[str, Dict[str, Any]]: 282 | return cls.get_name(os.path.basename(filename)), cls.get_abi(filename) 283 | 284 | @classmethod 285 | def get_name(cls, filename: str): 286 | return os.path.splitext(filename)[0] 287 | 288 | @classmethod 289 | def get_abi(cls, filename: str) -> Dict[str, Any]: 290 | with open(filename, 'r') as f: 291 | return json.load(f) 292 | -------------------------------------------------------------------------------- /src/polyswarmd/config/polyswarmd.py: -------------------------------------------------------------------------------- 1 | import dataclasses 2 | import logging 3 | import os 4 | import warnings 5 | import yaml 6 | 7 | from requests_futures.sessions import FuturesSession 8 | from typing import Dict, Optional 9 | 10 | from polyswarmdconfig import Artifact, Auth, Config, Consul, Redis 11 | from polyswarmd.config.contract import Chain, ConsulChain, FileChain 12 | from polyswarmd.config.status import Status 13 | from polyswarmd.services.artifact import ArtifactServices 14 | from polyswarmd.services.auth import AuthService 15 | from polyswarmd.services.ethereum import EthereumService 16 | from polyswarmd.utils.utils import IN_TESTENV 17 | 18 | logger = logging.getLogger(__name__) 19 | 20 | CONFIG_LOCATIONS = ['/etc/polyswarmd', '~/.config/polyswarmd'] 21 | if IN_TESTENV: 22 | # XXX: This is a huge hack to work around the issue that you have to load a function to 23 | # monkeypatch it. Because __init__.py alone has enough to break tests, this is an 24 | # alternative way to signal that we shouldn't perform "ordinary" file loading 25 | CONFIG_LOCATIONS = ['tests/fixtures/config/polyswarmd/'] 26 | 27 | DEFAULT_FALLBACK_SIZE = 10 * 1024 * 1024 28 | 29 | 30 | @dataclasses.dataclass 31 | class Eth(Config): 32 | trace_transactions: bool = True 33 | consul: Optional[Consul] = None 34 | directory: Optional[str] = None 35 | 36 | def __post_init__(self): 37 | if self.consul is not None and self.directory is not None: 38 | raise ValueError('Cannot have both directory and consul values') 39 | elif self.consul is None and self.directory is None: 40 | raise MissingConfigValueError('Must specify either consul or directory') 41 | 42 | def get_chains(self, community: str) -> Dict[str, Chain]: 43 | if self.consul is not None: 44 | return { 45 | network: ConsulChain.from_consul(self.consul.client, network, f'chain/{community}') 46 | for network in ['home', 'side'] 47 | } 48 | else: 49 | return { 50 | chain: FileChain.from_config_file( 51 | chain, os.path.join(self.directory, f'{chain}chain.json') 52 | ) for chain in ['home', 'side'] 53 | } 54 | 55 | 56 | @dataclasses.dataclass 57 | class Profiler(Config): 58 | enabled: bool = False 59 | db_uri: Optional[str] = None 60 | 61 | def __post_init__(self): 62 | if self.enabled and self.db_uri is None: 63 | raise ValueError('Profiler enabled, but no db uri set') 64 | 65 | 66 | @dataclasses.dataclass 67 | class Websocket(Config): 68 | enabled: bool = True 69 | 70 | def __post_init__(self): 71 | if self.enabled and os.environ.get('DISABLE_WEBSOCKETS'): 72 | self.enabled = False 73 | warnings.warn( 74 | '"DISABLE_WEBSOCKETS" environment variable is deprecated, please use POLYSWARMD_WEBSOCKET_ENABLED', 75 | DeprecationWarning) 76 | 77 | 78 | @dataclasses.dataclass 79 | class PolySwarmd(Config): 80 | artifact: Artifact 81 | community: str 82 | auth: Auth = dataclasses.field(default_factory=Auth) 83 | chains: Dict[str, Chain] = dataclasses.field(init=False) 84 | eth: Eth = dataclasses.field(default_factory=Eth) 85 | profiler: Profiler = dataclasses.field(default_factory=Profiler) 86 | redis: Redis = dataclasses.field(default_factory=Redis) 87 | status: Status = dataclasses.field(init=False) 88 | session: FuturesSession = dataclasses.field(init=False, default_factory=FuturesSession) 89 | websocket: Websocket = dataclasses.field(default_factory=Websocket) 90 | 91 | @staticmethod 92 | def auto(): 93 | return PolySwarmd.from_config_file_search() 94 | 95 | @staticmethod 96 | def from_config_file_search(): 97 | # Expect config in the environment 98 | for location in CONFIG_LOCATIONS: 99 | location = os.path.abspath(os.path.expanduser(location)) 100 | filename = os.path.join(location, 'polyswarmd.yml') 101 | if os.path.isfile(filename): 102 | return PolySwarmd.create_from_file(filename) 103 | 104 | else: 105 | return PolySwarmd.from_dict_and_environment({}) 106 | 107 | @staticmethod 108 | def create_from_file(path): 109 | with open(path, 'r') as f: 110 | return PolySwarmd.from_dict_and_environment(yaml.safe_load(f)) 111 | 112 | def __post_init__(self): 113 | self.setup_chains() 114 | self.setup_status() 115 | 116 | def setup_chains(self): 117 | self.chains = self.eth.get_chains(self.community) 118 | 119 | def setup_status(self): 120 | self.status = Status(self.community) 121 | self.status.register_services(self.__create_services()) 122 | 123 | def __create_services(self): 124 | services = [*self.create_ethereum_services(), self.create_artifact_service()] 125 | if self.auth.uri: 126 | services.append(self.create_auth_services()) 127 | return services 128 | 129 | def create_artifact_service(self): 130 | return ArtifactServices(self.artifact.client, self.session) 131 | 132 | def create_ethereum_services(self): 133 | return [EthereumService(name, chain, self.session) for name, chain in self.chains.items()] 134 | 135 | def create_auth_services(self): 136 | return AuthService(self.auth.uri, self.session) 137 | -------------------------------------------------------------------------------- /src/polyswarmd/config/service.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Any, Dict 3 | 4 | import gevent 5 | from requests import HTTPError 6 | from requests_futures.sessions import FuturesSession 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | class Service: 12 | """Service that polyswarmd connects to """ 13 | session: FuturesSession 14 | name: str 15 | uri: str 16 | 17 | def __init__(self, name, uri, session): 18 | self.name = name 19 | self.uri = uri 20 | self.session = session 21 | 22 | def wait_until_live(self): 23 | while not self.test_reachable(): 24 | gevent.sleep(1) 25 | 26 | def test_reachable(self) -> bool: 27 | try: 28 | self.connect_to_service() 29 | return True 30 | except (HTTPError, ConnectionError): 31 | logger.exception('Error connecting to %s', self.name) 32 | return False 33 | 34 | def connect_to_service(self): 35 | future = self.session.post(self.uri) 36 | response = future.result() 37 | response.raise_for_status() 38 | 39 | def get_service_state(self) -> Dict[str, Any]: 40 | return self.build_output(self.test_reachable()) 41 | 42 | def build_output(self, reachable) -> Dict[str, Any]: 43 | return {'reachable': reachable} 44 | -------------------------------------------------------------------------------- /src/polyswarmd/config/status.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Union 2 | 3 | from polyswarmd.config.service import Service 4 | 5 | 6 | class Status: 7 | community: str 8 | services: List[Service] 9 | 10 | def __init__(self, community): 11 | self.community = community 12 | self.services = [] 13 | 14 | def get_status(self): 15 | status: Dict = {'community': self.community} 16 | status.update(self.test_services()) 17 | return status 18 | 19 | def register_services(self, services: List[Service]): 20 | for service in services: 21 | self.services.append(service) 22 | 23 | def register_service(self, service: Service): 24 | self.services.append(service) 25 | 26 | def test_services(self) -> Dict[str, Union[str, bool, Dict]]: 27 | # The return type may NOT be correct. It was produced by referencing the original 28 | # implementation. If it's the case that `test_services` (and it's subclass's impl) 29 | # *only* returns `bool` now, just drop this to `Dict[str, bool]` or whatever. 30 | return {service.name: service.get_service_state() for service in self.services} 31 | -------------------------------------------------------------------------------- /src/polyswarmd/exceptions.py: -------------------------------------------------------------------------------- 1 | class PolyswarmdException(Exception): 2 | 3 | def __init__(self, message=None): 4 | self.message = message 5 | 6 | 7 | class WebsocketConnectionAbortedError(Exception): 8 | """Exception thrown when no clients exist to broadcast to""" 9 | 10 | def __init__(self, message=None): 11 | self.message = message 12 | -------------------------------------------------------------------------------- /src/polyswarmd/monkey.py: -------------------------------------------------------------------------------- 1 | """ 2 | isort:skip_file 3 | """ 4 | from concurrent.futures import ThreadPoolExecutor 5 | 6 | import web3 7 | from gevent import monkey 8 | from requests_futures.sessions import FuturesSession 9 | 10 | session = FuturesSession( 11 | executor=ThreadPoolExecutor(), adapter_kwargs={ 12 | 'max_retries': 5, 13 | 'pool_maxsize': 100 14 | } 15 | ) 16 | 17 | 18 | def patch_all(): 19 | patch_gevent() 20 | patch_web3() 21 | 22 | 23 | def patch_gevent(): 24 | monkey.patch_all() 25 | 26 | 27 | def patch_web3(): 28 | 29 | def make_post_request(endpoint_uri, data, *args, **kwargs): 30 | kwargs.setdefault('timeout', 1) 31 | future = session.post(endpoint_uri, data=data, *args, **kwargs) 32 | response = future.result() 33 | response.raise_for_status() 34 | 35 | return response.content 36 | 37 | web3.providers.rpc.make_post_request = make_post_request 38 | -------------------------------------------------------------------------------- /src/polyswarmd/services/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/polyswarm/polyswarmd/b732d60f0f829cc355c1f938bbe6de69f9985098/src/polyswarmd/services/__init__.py -------------------------------------------------------------------------------- /src/polyswarmd/services/artifact/__init__.py: -------------------------------------------------------------------------------- 1 | from polyswarmdconfig.artifactclient import AbstractArtifactServiceClient 2 | from .service import ArtifactServices 3 | 4 | __all__ = ['AbstractArtifactServiceClient', 'ArtifactServices'] 5 | -------------------------------------------------------------------------------- /src/polyswarmd/services/artifact/exceptions.py: -------------------------------------------------------------------------------- 1 | from polyswarmd.exceptions import PolyswarmdException 2 | 3 | 4 | class ArtifactException(PolyswarmdException): 5 | pass 6 | 7 | 8 | class InvalidUriException(ArtifactException): 9 | pass 10 | 11 | 12 | class ArtifactNotFoundException(ArtifactException): 13 | pass 14 | 15 | 16 | class ArtifactEmptyException(ArtifactException): 17 | pass 18 | 19 | 20 | class ArtifactTooLargeException(ArtifactException): 21 | pass 22 | -------------------------------------------------------------------------------- /src/polyswarmd/services/artifact/ipfs.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import re 4 | import uuid 5 | 6 | import base58 7 | import ipfshttpclient 8 | from urllib3.util import parse_url 9 | 10 | from polyswarmdconfig.artifactclient import AbstractArtifactServiceClient 11 | from polyswarmd.services.artifact.exceptions import ( 12 | ArtifactException, 13 | ArtifactNotFoundException, 14 | ArtifactTooLargeException, 15 | InvalidUriException, 16 | ) 17 | 18 | logger = logging.getLogger(__name__) 19 | 20 | 21 | class IpfsServiceClient(AbstractArtifactServiceClient): 22 | """ 23 | Artifact Service Client for IPFS. 24 | 25 | Uses MFS for adding to directories, since limits on IPFS API requests prevent 256 file requests. 26 | """ 27 | 28 | def __init__(self, base_uri=None): 29 | self.base_uri = base_uri or os.environ.get('IPFS_URI') 30 | reachable_endpoint = f"{self.base_uri}{'/api/v0/bootstrap'}" 31 | super().__init__('IPFS', reachable_endpoint) 32 | self._client = None 33 | 34 | @property 35 | def client(self): 36 | if self._client is None: 37 | url = parse_url(self.base_uri) 38 | self._client = ipfshttpclient.connect( 39 | f'/dns/{url.host}/tcp/{url.port}/{url.scheme}', session=True 40 | ) 41 | 42 | return self._client 43 | 44 | @staticmethod 45 | def check_ls(artifacts, index, max_size=None): 46 | if index < 0 or index > 256 or index >= len(artifacts): 47 | raise ArtifactNotFoundException('Could not locate artifact ID') 48 | 49 | _, artifact, size = artifacts[index] 50 | if max_size and size > max_size: 51 | raise ArtifactTooLargeException() 52 | 53 | return artifacts[index] 54 | 55 | @staticmethod 56 | def check_redis(uri, redis): 57 | if not redis: 58 | return None 59 | 60 | try: 61 | result = redis.get(f'polyswarmd:{uri}') 62 | if result: 63 | return result 64 | except RuntimeError: 65 | # happens if redis is not configured and websocket poll calls this 66 | pass 67 | 68 | def add_artifacts(self, artifacts, session): 69 | directory = self.mkdir() 70 | for artifact in artifacts: 71 | response = self.client.add(artifact[1], pin=False) 72 | filename = artifact[0] 73 | source = f'/ipfs/{response["Hash"]}' 74 | dest = f'{directory}/{filename}' 75 | self.client.files.cp(source, dest) 76 | 77 | stat = self.client.files.stat(directory) 78 | return stat.get('Hash', '') 79 | 80 | def add_artifact(self, artifact, session, redis=None): 81 | # We cannot add a string using client.add, it will take a string or b-string and tries to load a file 82 | ipfs_uri = self.client.add_str(artifact) 83 | # add_str does not accept any way to set pin=False, so we have to remove in a second call 84 | try: 85 | self.client.pin.rm(ipfs_uri, timeout=1) 86 | except ( 87 | ipfshttpclient.exceptions.ErrorResponse, ipfshttpclient.exceptions.TimeoutError 88 | ) as e: 89 | logger.warning('Got error when removing pin: %s', e) 90 | # Only seen when the pin didn't exist, not a big deal 91 | pass 92 | 93 | if redis: 94 | redis.set(f'polyswarmd:{ipfs_uri}', artifact, ex=300) 95 | 96 | return ipfs_uri 97 | 98 | # noinspection PyBroadException 99 | def check_uri(self, uri): 100 | # TODO: Further multihash validation 101 | try: 102 | return len(uri) < 100 and base58.b58decode(uri) 103 | except Exception: 104 | raise InvalidUriException() 105 | 106 | def details(self, uri, index, session): 107 | self.check_uri(uri) 108 | artifacts = self.ls(uri, session) 109 | name, artifact, _ = IpfsServiceClient.check_ls(artifacts, index) 110 | 111 | try: 112 | stat = self.client.object.stat(artifact, session, timeout=1) 113 | except ipfshttpclient.exceptions.TimeoutError: 114 | raise ArtifactNotFoundException('Could not locate artifact ID') 115 | 116 | logger.info(f'Got artifact details {stat}') 117 | 118 | # Convert stats to snake_case 119 | stats = {re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', k).lower(): v for k, v in stat.items()} 120 | stats['name'] = name 121 | 122 | return stats 123 | 124 | def get_artifact(self, uri, session, index=None, max_size=None, redis=None): 125 | self.check_uri(uri) 126 | redis_response = IpfsServiceClient.check_redis(uri, redis) 127 | if redis_response: 128 | return redis_response 129 | 130 | if index is not None: 131 | artifacts = self.ls(uri, session) 132 | _, uri, _ = IpfsServiceClient.check_ls(artifacts, index, max_size) 133 | 134 | try: 135 | return self.client.cat(uri, timeout=1) 136 | except ipfshttpclient.exceptions.TimeoutError: 137 | raise ArtifactNotFoundException('Could not locate artifact ID') 138 | 139 | def ls(self, uri, session): 140 | self.check_uri(uri) 141 | try: 142 | stats = self.client.object.stat(uri, timeout=1) 143 | ls = self.client.object.links(uri, timeout=1) 144 | except ipfshttpclient.exceptions.TimeoutError: 145 | raise ArtifactException('Timeout running ls') 146 | 147 | # Return self if not directory 148 | if stats.get('NumLinks', 0) == 0: 149 | return [('', stats.get('Hash', ''), stats.get('DataSize'))] 150 | 151 | if ls: 152 | links = [(l.get('Name', ''), l.get('Hash', ''), l.get('Size', 0)) 153 | for l in ls.get('Links', [])] 154 | 155 | if not links: 156 | links = [('', stats.get('Hash', ''), stats.get('DataSize', 0))] 157 | 158 | return links 159 | 160 | raise ArtifactNotFoundException('Could not locate IPFS resource') 161 | 162 | def status(self, session): 163 | return {'online': self.client.object.sys()['net']['online']} 164 | 165 | def mkdir(self): 166 | while True: 167 | directory_name = f'/{str(uuid.uuid4())}' 168 | # Try again if name is taken (Should never happen) 169 | try: 170 | if self.client.files.ls(directory_name, timeout=1): 171 | logger.critical('Got collision on names. Some assumptions were wrong') 172 | continue 173 | except (ipfshttpclient.exceptions.ErrorResponse, ipfshttpclient.exceptions.TimeoutError): 174 | # Raises error if it doesn't exists, so we want to continue in this case. 175 | break 176 | 177 | try: 178 | self.client.files.mkdir(directory_name, timeout=1) 179 | return directory_name 180 | except ipfshttpclient.exceptions.TimeoutError: 181 | raise ArtifactException('Timeout running ls') 182 | -------------------------------------------------------------------------------- /src/polyswarmd/services/artifact/service.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict 2 | 3 | from requests_futures.sessions import FuturesSession 4 | 5 | from polyswarmd.config.service import Service 6 | from polyswarmdconfig.artifactclient import AbstractArtifactServiceClient 7 | 8 | 9 | class ArtifactServices(Service): 10 | """Service for all ArtifactServices""" 11 | artifact_client: AbstractArtifactServiceClient 12 | 13 | def __init__(self, artifact_client: AbstractArtifactServiceClient, session: FuturesSession): 14 | self.artifact_client = artifact_client 15 | super().__init__('artifact_services', artifact_client.reachable_endpoint, session) 16 | 17 | def build_output(self, reachable) -> Dict[str, Any]: 18 | return {self.artifact_client.name.lower(): {'reachable': reachable}} 19 | -------------------------------------------------------------------------------- /src/polyswarmd/services/auth.py: -------------------------------------------------------------------------------- 1 | from polyswarmd.config.service import Service 2 | 3 | 4 | class AuthService(Service): 5 | """Service declaration for Ethereum""" 6 | 7 | def __init__(self, base_uri, session): 8 | super().__init__('auth', AuthService.build_uri(base_uri), session) 9 | 10 | @staticmethod 11 | def build_uri(base_uri) -> str: 12 | return f'{base_uri}/communities/public' 13 | 14 | def connect_to_service(self): 15 | future = self.session.get(self.uri) 16 | response = future.result() 17 | response.raise_for_status() 18 | -------------------------------------------------------------------------------- /src/polyswarmd/services/consul.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from requests_futures.sessions import FuturesSession 4 | 5 | from polyswarmd.config.service import Service 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | class ConsulService(Service): 11 | """Service for Consul""" 12 | 13 | def __init__(self, uri: str, session: FuturesSession): 14 | super().__init__('consul', uri, session) 15 | -------------------------------------------------------------------------------- /src/polyswarmd/services/ethereum/__init__.py: -------------------------------------------------------------------------------- 1 | from .service import EthereumService 2 | 3 | __all__ = ['EthereumService'] 4 | -------------------------------------------------------------------------------- /src/polyswarmd/services/ethereum/rpc.py: -------------------------------------------------------------------------------- 1 | from signal import SIGQUIT 2 | from typing import AnyStr, List, Optional, SupportsBytes, Union 3 | 4 | import gevent 5 | from gevent.lock import BoundedSemaphore 6 | 7 | from polyswarmd.exceptions import WebsocketConnectionAbortedError 8 | from polyswarmd.utils import logging 9 | from polyswarmd.views.event_message import WebSocket 10 | from polyswarmd.websockets.filter import FilterManager 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | class EthereumRpc: 16 | """ 17 | This class periodically polls several geth filters, and multicasts the results across any open WebSockets 18 | """ 19 | filter_manager: FilterManager 20 | websockets: Optional[List[WebSocket]] 21 | websockets_lock: BoundedSemaphore 22 | 23 | def __init__(self, chain): 24 | self.chain = chain 25 | self.filter_manager = FilterManager() 26 | self.websockets = None 27 | self.websockets_lock = BoundedSemaphore(1) 28 | self.chain = chain 29 | 30 | def __repr__(self): 31 | return f"" 32 | 33 | def broadcast(self, message: Union[AnyStr, SupportsBytes]): 34 | """ 35 | Send a message to all connected WebSockets 36 | :param message: dict to be converted to json and sent 37 | """ 38 | # XXX This can be replaced with a broadcast inside the WebsocketHandlerApplication 39 | logger.debug("I have %s websockets on %s", len(self.websockets), repr(self)) 40 | with self.websockets_lock: 41 | if len(self.websockets) == 0: 42 | raise WebsocketConnectionAbortedError 43 | for ws in self.websockets: 44 | try: 45 | ws.send(message) 46 | except Exception: 47 | logger.exception('Error adding message to the queue') 48 | continue 49 | 50 | # noinspection PyBroadException 51 | def poll(self): 52 | """ 53 | Continually poll all Ethereum filters as long as there are WebSockets listening 54 | """ 55 | # Start the pool 56 | try: 57 | for filter_events in self.filter_manager.fetch(): 58 | for msg in filter_events: 59 | self.broadcast(msg) 60 | except WebsocketConnectionAbortedError: 61 | logger.exception("Shutting down poll()") 62 | self.websockets = None 63 | except gevent.GreenletExit: 64 | logger.exception( 65 | 'Exiting poll() Greenlet with %d connected clients websockets', len(self.websockets) 66 | ) 67 | # if the greenlet is killed, we need to destroy the websocket connections (if any exist) 68 | self.websockets = None 69 | except Exception: 70 | logger.exception( 71 | 'Exception in filter checks with %d connected websockets', len(self.websockets) 72 | ) 73 | # Creates a new greenlet with all new filters and let's this one die. 74 | greenlet = gevent.spawn(self.poll) 75 | gevent.signal(SIGQUIT, greenlet.kill) 76 | 77 | def register(self, ws: WebSocket): 78 | """ 79 | Register a WebSocket with the rpc nodes 80 | Gets all events going forward 81 | :param ws: WebSocket wrapper to register 82 | """ 83 | with self.websockets_lock: 84 | logger.debug('Registering WebSocket %s', id(ws)) 85 | if self.websockets is None: 86 | self.websockets = [ws] 87 | logger.debug('First WebSocket registered, starting greenlet') 88 | self.filter_manager.setup_event_filters(self.chain) 89 | greenlet = gevent.spawn(self.poll) 90 | gevent.signal(SIGQUIT, greenlet.kill) 91 | else: 92 | self.websockets.append(ws) 93 | 94 | def unregister(self, ws: WebSocket): 95 | """ 96 | Remove a Websocket wrapper object 97 | :param ws: WebSocket to remove 98 | """ 99 | logger.debug('Unregistering WebSocket %s', id(ws)) 100 | with self.websockets_lock: 101 | if ws in self.websockets: 102 | logger.debug('Removing WebSocket %s', id(ws)) 103 | self.websockets.remove(ws) 104 | -------------------------------------------------------------------------------- /src/polyswarmd/services/ethereum/service.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict 2 | 3 | from polyswarmd.config.service import Service 4 | 5 | 6 | class EthereumService(Service): 7 | """Service for Ethereum""" 8 | 9 | def __init__(self, name, chain, session): 10 | self.chain = chain 11 | 12 | super().__init__(name, chain.eth_uri, session) 13 | 14 | def build_output(self, reachable) -> Dict[str, Any]: 15 | if reachable: 16 | self.check_chain_id() 17 | return {'reachable': True, 'syncing': self.is_syncing(), 'block': self.get_block()} 18 | else: 19 | return super().build_output(False) 20 | 21 | def connect_to_service(self): 22 | future = self.session.post(self.uri, headers={'Content-Type': 'application/json'}) 23 | response = future.result() 24 | response.raise_for_status() 25 | 26 | def is_syncing(self) -> bool: 27 | return self.chain.w3.eth.syncing is not False 28 | 29 | def get_block(self) -> int: 30 | return self.chain.w3.eth.blockNumber 31 | 32 | def check_chain_id(self): 33 | if int(self.chain.chain_id) != int(self.chain.w3.version.network): 34 | raise ValueError( 35 | f'Chain id mismatch: {self.chain.chain_id} != {self.chain.w3.version.network}' 36 | ) 37 | -------------------------------------------------------------------------------- /src/polyswarmd/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .utils import ( 2 | IN_TESTENV, 3 | assertion_to_dict, 4 | bloom_to_dict, 5 | bool_list_to_int, 6 | bounty_to_dict, 7 | cache_contract_view, 8 | camel_case_to_snake_case, 9 | channel_to_dict, 10 | dict_to_state, 11 | g, 12 | int_to_bool_list, 13 | logging, 14 | new_cancel_agreement_event_to_dict, 15 | new_init_channel_event_to_dict, 16 | new_settle_challenged_event, 17 | new_settle_started_event, 18 | safe_int_to_bool_list, 19 | sha3, 20 | state_to_dict, 21 | to_padded_hex, 22 | uint256_list_to_hex_string, 23 | uuid, 24 | validate_ws_url, 25 | vote_to_dict, 26 | ) 27 | 28 | __all__ = [ 29 | 'uuid', 'logging', 'IN_TESTENV', 'assertion_to_dict', 'bloom_to_dict', 'bool_list_to_int', 30 | 'bounty_to_dict', 'cache_contract_view', 'camel_case_to_snake_case', 'channel_to_dict', 'g', 31 | 'dict_to_state', 'int_to_bool_list', 'new_cancel_agreement_event_to_dict', 32 | 'new_init_channel_event_to_dict', 'new_settle_challenged_event', 'new_settle_started_event', 33 | 'safe_int_to_bool_list', 'state_to_dict', 'to_padded_hex', 'sha3', 'uint256_list_to_hex_string', 34 | 'validate_ws_url', 'vote_to_dict' 35 | ] 36 | -------------------------------------------------------------------------------- /src/polyswarmd/utils/bloom.py: -------------------------------------------------------------------------------- 1 | # Based on eth-bloom (https://github.com/ethereum/eth-bloom, used under MIT 2 | # license) with modifications 3 | import logging 4 | import numbers 5 | import operator 6 | 7 | from polyswarmd.utils import sha3 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | FILTER_BITS = 8 * 256 12 | HASH_FUNCS = 8 13 | 14 | 15 | def get_chunks_for_bloom(value_hash): 16 | assert HASH_FUNCS * 2 <= len(value_hash) 17 | for i in range(0, HASH_FUNCS): 18 | yield value_hash[2 * i:2 * (i+1)] # noqa 19 | 20 | 21 | def chunk_to_bloom_bits(chunk): 22 | assert FILTER_BITS <= (1 << 16) 23 | high, low = bytearray(chunk) 24 | return 1 << ((low + (high << 8)) & (FILTER_BITS - 1)) 25 | 26 | 27 | def get_bloom_bits(value): 28 | # Could decode the ipfs_hash and use it as is, but instead hash the 29 | # multihash representation to side-step different hash formats going 30 | # forward. Should rexamine this decision 31 | value_hash = sha3(value) 32 | for chunk in get_chunks_for_bloom(value_hash): 33 | bloom_bits = chunk_to_bloom_bits(chunk) 34 | yield bloom_bits 35 | 36 | 37 | class BloomFilter(numbers.Number): 38 | value = None 39 | 40 | def __init__(self, value=0): 41 | self.value = value 42 | 43 | def __int__(self): 44 | return self.value 45 | 46 | def add(self, value): 47 | if not isinstance(value, bytes): 48 | raise TypeError("Value must be of type `bytes`") 49 | for bloom_bits in get_bloom_bits(value): 50 | self.value |= bloom_bits 51 | 52 | def extend(self, iterable): 53 | for value in iterable: 54 | self.add(value) 55 | 56 | @classmethod 57 | def from_iterable(cls, iterable): 58 | bloom = cls() 59 | bloom.extend(iterable) 60 | return bloom 61 | 62 | def __contains__(self, value): 63 | if not isinstance(value, bytes): 64 | raise TypeError("Value must be of type `bytes`") 65 | return all(self.value & bloom_bits for bloom_bits in get_bloom_bits(value)) 66 | 67 | def __index__(self): 68 | return operator.index(self.value) 69 | 70 | def _combine(self, other): 71 | if not isinstance(other, (int, BloomFilter)): 72 | raise TypeError("The `or` operator is only supported for other `BloomFilter` instances") 73 | return BloomFilter(int(self) | int(other)) 74 | 75 | def __hash__(self): 76 | return hash(self.value) 77 | 78 | def __or__(self, other): 79 | return self._combine(other) 80 | 81 | def __add__(self, other): 82 | return self._combine(other) 83 | 84 | def _icombine(self, other): 85 | if not isinstance(other, (int, BloomFilter)): 86 | raise TypeError("The `or` operator is only supported for other `BloomFilter` instances") 87 | self.value |= int(other) 88 | return self 89 | 90 | def __ior__(self, other): 91 | return self._icombine(other) 92 | 93 | def __iadd__(self, other): 94 | return self._icombine(other) 95 | -------------------------------------------------------------------------------- /src/polyswarmd/utils/decorators/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/polyswarm/polyswarmd/b732d60f0f829cc355c1f938bbe6de69f9985098/src/polyswarmd/utils/decorators/__init__.py -------------------------------------------------------------------------------- /src/polyswarmd/utils/decorators/chains.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import logging 3 | 4 | from flask import current_app as app 5 | from flask import g, request 6 | 7 | from polyswarmd.utils.response import failure 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | def chain(_func=None, chain_name=None, account_required=True): 13 | """ 14 | This decorator takes the chain passed as a request arg and modifies a set of globals. 15 | There are a few guarantees made by this function. 16 | 17 | If any of the values for the given chain are missing, the decorator will skip the function 18 | and return an error to the user. (500) 19 | If the chain is not recognized, it will return an error to the user. (400) 20 | If it is the home chain, the offer contract address and bindings will also be validated, or 21 | an error returned. (500) 22 | """ 23 | 24 | @functools.wraps(_func) 25 | def decorator_wrapper(func): 26 | 27 | @functools.wraps(func) 28 | def wrapper(*args, **kwargs): 29 | g.eth_address = request.args.get('account') 30 | if not g.eth_address and account_required: 31 | return failure('Account must be provided', 400) 32 | 33 | c = chain_name 34 | if c is None: 35 | c = request.args.get('chain', 'side') 36 | 37 | chain = app.config['POLYSWARMD'].chains.get(c) 38 | if not chain: 39 | chain_options = ", ".join(app.config['POLYSWARMD'].chains) 40 | return failure(f'Chain must one of {chain_options}', 400) 41 | 42 | g.chain = chain 43 | return func(*args, **kwargs) 44 | 45 | return wrapper 46 | 47 | if _func is None: 48 | return decorator_wrapper 49 | 50 | return decorator_wrapper(_func) 51 | -------------------------------------------------------------------------------- /src/polyswarmd/utils/logger.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | import logging 3 | import signal 4 | 5 | from pythonjsonlogger import jsonlogger 6 | 7 | 8 | def init_logging(log_format, log_level): 9 | """ 10 | Logic to support JSON logging. 11 | """ 12 | logger_config = LoggerConfig(log_format, log_level) 13 | logger_config.configure() 14 | 15 | 16 | class LoggerConfig: 17 | LEVELS = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL] 18 | 19 | def __init__(self, log_format, log_level=logging.WARNING): 20 | self.log_format = log_format 21 | self.log_level = log_level 22 | 23 | def configure(self): 24 | logger = logging.getLogger() 25 | if self.log_format and self.log_format in ['json', 'datadog']: 26 | log_handler = logging.StreamHandler() 27 | formatter = PolyswarmdJsonFormatter('(timestamp) (level) (name) (message)') 28 | log_handler.setFormatter(formatter) 29 | logger.addHandler(log_handler) 30 | logger.setLevel(self.log_level) 31 | logger.info("Logging in JSON format.") 32 | elif not logger.handlers: 33 | # logger.handlers will have a value during pytest 34 | logging.basicConfig(level=self.log_level) 35 | logger.info("Logging in text format.") 36 | else: 37 | logger.setLevel(self.log_level) 38 | logger.info("Logging in text format.") 39 | 40 | signal.signal(signal.SIGUSR1, self.__signal_handler) 41 | 42 | def set_level(self, new_level): 43 | self.log_level = new_level 44 | logger = logging.getLogger() 45 | logger.setLevel(self.log_level) 46 | logger.log(self.log_level, 'Changed log level') 47 | 48 | def __signal_handler(self, _signum, _frame): 49 | try: 50 | cur_index = self.LEVELS.index(self.log_level) 51 | except ValueError: 52 | raise ValueError('Invalid logging level') 53 | 54 | index = 0 if cur_index == len(self.LEVELS) - 1 else cur_index + 1 55 | self.set_level(self.LEVELS[index]) 56 | 57 | 58 | class PolyswarmdJsonFormatter(jsonlogger.JsonFormatter): 59 | """ 60 | Class to add custom JSON fields to our logger. 61 | Presently just adds a timestamp if one isn't present and the log level. 62 | INFO: https://github.com/madzak/python-json-logger#customizing-fields 63 | """ 64 | 65 | def add_fields(self, log_record, record, message_dict): 66 | super(PolyswarmdJsonFormatter, self).add_fields(log_record, record, message_dict) 67 | if not log_record.get('timestamp'): 68 | # this doesn't use record.created, so it is slightly off 69 | now = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ') 70 | log_record['timestamp'] = now 71 | if log_record.get('level'): 72 | log_record['level'] = log_record['level'].upper() 73 | else: 74 | log_record['level'] = record.levelname 75 | -------------------------------------------------------------------------------- /src/polyswarmd/utils/profiler.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import flask_profiler 4 | 5 | logger = logging.getLogger(__name__) 6 | 7 | 8 | def setup_profiler(app): 9 | profiler = app.config['POLYSWARMD'].profiler 10 | if not profiler.enabled: 11 | return 12 | 13 | if profiler.db_uri is None: 14 | logger.error('Profiler enabled but no db configured') 15 | return 16 | 17 | app.config['flask_profiler'] = { 18 | 'enabled': True, 19 | 'measurement': True, 20 | 'gui': False, 21 | 'storage': { 22 | 'engine': 'sqlalchemy', 23 | 'db_url': profiler.db_uri, 24 | }, 25 | } 26 | 27 | flask_profiler.init_app(app) 28 | -------------------------------------------------------------------------------- /src/polyswarmd/utils/response.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from flask import jsonify 4 | from werkzeug.exceptions import HTTPException, default_exceptions 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | 9 | def install_error_handlers(app): 10 | 11 | def make_json_error(e): 12 | response = jsonify(message=str(e)) 13 | response.status_code = e.code if isinstance(e, HTTPException) else 500 14 | return response 15 | 16 | for code in default_exceptions: 17 | app.register_error_handler(code, make_json_error) 18 | 19 | 20 | def success(result=None): 21 | if result is not None: 22 | return jsonify({'status': 'OK', 'result': result}), 200 23 | 24 | return jsonify({'status': 'OK'}), 200 25 | 26 | 27 | def failure(message, code=500): 28 | return jsonify({'status': 'FAIL', 'errors': message}), code 29 | -------------------------------------------------------------------------------- /src/polyswarmd/utils/utils.py: -------------------------------------------------------------------------------- 1 | import codecs 2 | import logging 3 | from os import getenv 4 | import re 5 | import string 6 | from typing import Union 7 | import uuid 8 | 9 | from Crypto.Hash import keccak 10 | from flask import g 11 | 12 | from polyswarmartifact import ArtifactType 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | # Indicates if we are inside a testing environment 17 | IN_TESTENV = getenv('POLY_WORK') == 'testing' 18 | 19 | 20 | def bool_list_to_int(bs): 21 | return sum([1 << n if b else 0 for n, b in enumerate(bs)]) 22 | 23 | 24 | def int_to_bool_list(i): 25 | s = format(i, 'b') 26 | return [x == '1' for x in s[::-1]] 27 | 28 | 29 | def safe_int_to_bool_list(num, max): 30 | if int(num) == 0: 31 | return [False] * int(max) 32 | else: 33 | converted = int_to_bool_list(num) 34 | return converted + [False] * (max - len(converted)) 35 | 36 | 37 | def uint256_list_to_hex_string(us): 38 | return hex(sum([x << (256 * n) for n, x in enumerate(us)])) 39 | 40 | 41 | def bounty_to_dict(bounty): 42 | return { 43 | 'guid': str(uuid.UUID(int=bounty[0])), 44 | 'artifact_type': ArtifactType.to_string(ArtifactType(bounty[1])), 45 | 'author': bounty[2], 46 | 'amount': str(bounty[3]), 47 | 'uri': bounty[4], 48 | 'num_artifacts': bounty[5], 49 | 'expiration': bounty[6], 50 | 'assigned_arbiter': bounty[7], 51 | 'quorum_reached': bounty[8], 52 | 'quorum_reached_block': bounty[9], 53 | 'quorum_mask': safe_int_to_bool_list(bounty[10], bounty[5]), 54 | 'metadata': bounty[11] 55 | } 56 | 57 | 58 | def bloom_to_dict(bloom): 59 | return { 60 | 'bloom': bloom, 61 | } 62 | 63 | 64 | def assertion_to_dict(assertion, num_artifacts): 65 | return { 66 | 'author': assertion[0], 67 | 'mask': safe_int_to_bool_list(assertion[1], num_artifacts), 68 | 'commitment': str(assertion[2]), 69 | 'nonce': str(assertion[3]), 70 | 'verdicts': safe_int_to_bool_list(assertion[4], num_artifacts), 71 | 'metadata': assertion[5], 72 | } 73 | 74 | 75 | def vote_to_dict(vote, num_artifacts): 76 | return { 77 | 'voter': vote[0], 78 | 'votes': safe_int_to_bool_list(vote[1], num_artifacts), 79 | 'valid_bloom': vote[2], 80 | } 81 | 82 | 83 | def channel_to_dict(channel_data): 84 | return { 85 | 'msig_address': channel_data[0], 86 | 'ambassador': channel_data[1], 87 | 'expert': channel_data[2], 88 | } 89 | 90 | 91 | def state_to_dict(state): 92 | if not g.chain: 93 | raise ValueError('g.chain not found') 94 | 95 | # gets state of non required state 96 | offer_info = g.chain.offer_registry.contract.functions.getOfferState(state).call() 97 | 98 | return { 99 | 'nonce': g.chain.w3.toInt(offer_info[1]), 100 | 'offer_amount': g.chain.w3.toInt(offer_info[2]), 101 | 'msig_address': offer_info[3], 102 | 'ambassador_balance': g.chain.w3.toInt(offer_info[4]), 103 | 'expert_balance': g.chain.w3.toInt(offer_info[5]), 104 | 'ambassador': offer_info[6], 105 | 'expert': offer_info[7], 106 | 'is_closed': offer_info[8], 107 | 'token': offer_info[9], 108 | 'mask': int_to_bool_list(g.chain.w3.toInt(offer_info[10])), 109 | 'verdicts': int_to_bool_list(g.chain.w3.toInt(offer_info[11])), 110 | } 111 | 112 | 113 | def new_init_channel_event_to_dict(new_init_event): 114 | return { 115 | 'guid': str(uuid.UUID(int=new_init_event.guid)), 116 | 'ambassador': new_init_event.ambassador, 117 | 'expert': new_init_event.expert, 118 | 'multi_signature': new_init_event.msig, 119 | } 120 | 121 | 122 | def new_settle_challenged_event(new_event): 123 | return { 124 | 'challenger': new_event.challenger, 125 | 'nonce': new_event.sequence, 126 | 'settle_period_end': new_event.settlementPeriodEnd, 127 | } 128 | 129 | 130 | def new_settle_started_event(new_event): 131 | return { 132 | 'initiator': new_event.initiator, 133 | 'nonce': new_event.sequence, 134 | 'settle_period_end': new_event.settlementPeriodEnd, 135 | } 136 | 137 | 138 | def new_cancel_agreement_event_to_dict(new_event): 139 | return { 140 | 'expert': new_event._expert, 141 | 'ambassador': new_event._ambassador, 142 | } 143 | 144 | 145 | # https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case 146 | def camel_case_to_snake_case(s): 147 | s1 = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', s) 148 | return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', s1).lower() 149 | 150 | 151 | def to_padded_hex(val: Union[str, bool, int, bytes]) -> str: 152 | """ 153 | Convert an argument to a hexadecimal string and zero-extend to 64 width""" 154 | 155 | def encode_hex(xs: bytes) -> str: 156 | return codecs.encode(xs, "hex").decode("ascii") # type: ignore 157 | 158 | if isinstance(val, str): 159 | if val.startswith('0x'): 160 | hex_suffix = val[2:].lower() 161 | # verify we're passing back a hexadecimal value 162 | if not all(c in string.hexdigits for c in hex_suffix): 163 | raise ValueError("Invalid hexadecimal characters detected") 164 | else: 165 | hex_suffix = encode_hex(val.encode('utf-8')) 166 | elif isinstance(val, bool): 167 | hex_suffix = str(int(val)) 168 | # do we need to check other types here? 169 | elif isinstance(val, (bytes, bytearray)): 170 | hex_suffix = encode_hex(val) 171 | elif isinstance(val, int): 172 | hex_suffix = hex(val)[2:] 173 | else: 174 | raise TypeError("Cannot convert to padded hex value") 175 | 176 | # `rjust' pads out it's member string to 64 chars 177 | result = hex_suffix.rjust(64, '0') 178 | if len(result) > 64: 179 | raise ValueError("Invalid string passed in. Too long.") 180 | 181 | return result 182 | 183 | 184 | def dict_to_state(state_dict): 185 | state_str = '0x' 186 | 187 | state_str = state_str + to_padded_hex(state_dict['close_flag']) 188 | state_str = state_str + to_padded_hex(state_dict['nonce']) 189 | state_str = state_str + to_padded_hex(state_dict['ambassador']) 190 | state_str = state_str + to_padded_hex(state_dict['expert']) 191 | state_str = state_str + to_padded_hex(state_dict['msig_address']) 192 | state_str = state_str + to_padded_hex(state_dict['ambassador_balance']) 193 | state_str = state_str + to_padded_hex(state_dict['expert_balance']) 194 | state_str = state_str + to_padded_hex(state_dict['token_address']) 195 | state_str = state_str + to_padded_hex(int(state_dict['guid'])) 196 | state_str = state_str + to_padded_hex(state_dict['offer_amount']) 197 | 198 | if 'artifact_hash' in state_dict: 199 | state_str = state_str + to_padded_hex(state_dict['artifact_hash']) 200 | else: 201 | state_str = state_str + to_padded_hex('') 202 | 203 | if 'ipfs_hash' in state_dict: 204 | state_str = state_str + to_padded_hex(state_dict['ipfs_hash']) 205 | else: 206 | state_str = state_str + to_padded_hex('') 207 | 208 | if 'engagement_deadline' in state_dict: 209 | state_str = state_str + to_padded_hex(state_dict['engagement_deadline']) 210 | else: 211 | state_str = state_str + to_padded_hex('') 212 | 213 | if 'assertion_deadline' in state_dict: 214 | state_str = state_str + to_padded_hex(state_dict['assertion_deadline']) 215 | else: 216 | state_str = state_str + to_padded_hex('') 217 | 218 | if 'mask' in state_dict: 219 | state_str = state_str + to_padded_hex(state_dict['mask']) 220 | else: 221 | state_str = state_str + to_padded_hex('') 222 | 223 | if 'verdicts' in state_dict: 224 | state_str = state_str + to_padded_hex(state_dict['verdicts']) 225 | else: 226 | state_str = state_str + to_padded_hex('') 227 | 228 | if 'meta_data' in state_dict: 229 | state_str = state_str + to_padded_hex(state_dict['meta_data']) 230 | else: 231 | state_str = state_str + to_padded_hex('') 232 | 233 | return state_str 234 | 235 | 236 | def validate_ws_url(uri): 237 | regex = re.compile( 238 | r'^(?:ws)s?://' # http:// or https:// 239 | r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain... 240 | r'localhost|' # localhost... 241 | r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip 242 | r'(?::\d+)?' # optional port 243 | r'(?:/?|[/?]\S+)$', 244 | re.IGNORECASE 245 | ) 246 | 247 | return re.match(regex, uri) is not None 248 | 249 | 250 | def sha3(data): 251 | h = keccak.new(digest_bits=256) 252 | h.update(data) 253 | return h.digest() 254 | 255 | 256 | def cache_contract_view( 257 | contract_view, key, redis, serialize=None, deserialize=None, invalidate=False 258 | ): 259 | """Returns tuple with boolean to indicate chached, and the response from chain, or from redis cache 260 | 261 | >>> from collections import namedtuple 262 | >>> ContractCall = namedtuple('ContractCall', 'call') 263 | >>> cache_contract_view(ContractCall(lambda : '12'), '', None) 264 | (False, '12') 265 | >>> Redis = namedtuple('Redis', ('get', 'set', 'exists')) 266 | >>> cache_contract_view(ContractCall(lambda : '12'), '', redis=Redis(lambda k: '13', None, lambda k: True)) 267 | (True, '13') 268 | >>> cache_contract_view(ContractCall(lambda : '12'), '', 269 | ... redis=Redis(lambda k: '13', lambda k, v, ex: None, lambda k: False)) 270 | (False, '12') 271 | >>> cache_contract_view(ContractCall(lambda : '12'), '', 272 | ... redis=Redis(lambda k: '13', lambda k, v, ex: None, lambda k: True), invalidate=True) 273 | (False, '12') 274 | >>> cache_contract_view(ContractCall(lambda : '12'), '', redis= 275 | ... Redis(lambda k: '13', lambda k, v, ex: None, lambda k: True), invalidate=False) 276 | (True, '13') 277 | """ 278 | if redis is None: 279 | return False, contract_view.call() 280 | 281 | if redis.exists(key) and not invalidate: 282 | response = redis.get(key) 283 | return True, deserialize(response) if deserialize is not None else response 284 | else: 285 | response = contract_view.call() 286 | redis.set(key, serialize(response) if serialize is not None else response, ex=600) 287 | return False, response 288 | -------------------------------------------------------------------------------- /src/polyswarmd/views/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/polyswarm/polyswarmd/b732d60f0f829cc355c1f938bbe6de69f9985098/src/polyswarmd/views/__init__.py -------------------------------------------------------------------------------- /src/polyswarmd/views/artifacts.py: -------------------------------------------------------------------------------- 1 | from io import SEEK_END 2 | import logging 3 | 4 | from flask import Blueprint 5 | from flask import current_app as app 6 | from flask import g, request 7 | from requests import HTTPError 8 | 9 | from polyswarmd.services.artifact.exceptions import ( 10 | ArtifactEmptyException, 11 | ArtifactException, 12 | ArtifactNotFoundException, 13 | ArtifactTooLargeException, 14 | InvalidUriException, 15 | ) 16 | from polyswarmd.utils.response import failure, success 17 | 18 | logger = logging.getLogger(__name__) 19 | artifacts: Blueprint = Blueprint('artifacts', __name__) 20 | 21 | 22 | def check_size(f, maxsize): 23 | """Return True if size is between 0 and the user's maximum 24 | 25 | >>> from collections import namedtuple 26 | >>> from io import StringIO 27 | >>> from werkzeug.datastructures import FileStorage 28 | >>> check_size(FileStorage(stream=StringIO('')), 1) #doctest:+ELLIPSIS 29 | Traceback (most recent call last): 30 | ... 31 | polyswarmd.services.artifact.exceptions.ArtifactEmptyException 32 | >>> check_size(namedtuple('TestFile', 'content_length')(32), 64) 33 | True 34 | >>> check_size(namedtuple('TestFile', 'content_length')(16), 11) #doctest:+ELLIPSIS 35 | Traceback (most recent call last): 36 | ... 37 | polyswarmd.services.artifact.exceptions.ArtifactTooLargeException 38 | """ 39 | size = get_size(f) 40 | if maxsize < size: 41 | raise ArtifactTooLargeException() 42 | elif 0 >= size: 43 | raise ArtifactEmptyException() 44 | 45 | return True 46 | 47 | 48 | def get_size(f): 49 | """Return ``f.content_length`` falling back to the position of ``f``'s stream end. 50 | 51 | >>> from io import StringIO 52 | >>> from werkzeug.datastructures import FileStorage 53 | >>> get_size(FileStorage(stream=StringIO('A' * 16))) 54 | 16 55 | >>> from collections import namedtuple 56 | >>> get_size(namedtuple('TestFile', 'content_length')(32)) 57 | 32 58 | """ 59 | if f.content_length: 60 | logger.debug('Content length %s', f.content_length) 61 | return f.content_length 62 | 63 | original_position = f.tell() 64 | f.seek(0, SEEK_END) 65 | size = f.tell() 66 | logger.debug('Seek length %s', size) 67 | f.seek(original_position) 68 | return size 69 | 70 | 71 | @artifacts.route('/status', methods=['GET']) 72 | def get_artifacts_status(): 73 | config = app.config['POLYSWARMD'] 74 | session = app.config['REQUESTS_SESSION'] 75 | 76 | try: 77 | return success(config.artifact.client.status(session)) 78 | 79 | except HTTPError as e: 80 | return failure(e.response.content, e.response.status_code) 81 | except ArtifactException as e: 82 | return failure(e.message, 500) 83 | 84 | 85 | @artifacts.route('', methods=['POST']) 86 | def post_artifacts(): 87 | config = app.config['POLYSWARMD'] 88 | session = app.config['REQUESTS_SESSION'] 89 | try: 90 | files = [(f'{i:06d}', f) 91 | for (i, f) in enumerate(request.files.getlist(key='file')) 92 | if check_size(f, g.user.max_artifact_size)] 93 | except (AttributeError, IOError): 94 | logger.error('Error checking file size') 95 | return failure('Unable to read file sizes', 400) 96 | except ArtifactTooLargeException: 97 | return failure('Artifact too large', 413) 98 | except ArtifactEmptyException: 99 | return failure('Artifact empty', 400) 100 | 101 | if not files: 102 | return failure('No artifacts', 400) 103 | if len(files) > config.artifact.limit: 104 | return failure('Too many artifacts', 400) 105 | 106 | try: 107 | response = success(config.artifact.client.add_artifacts(files, session)) 108 | except HTTPError as e: 109 | response = failure(e.response.content, e.response.status_code) 110 | except ArtifactException as e: 111 | response = failure(e.message, 500) 112 | 113 | return response 114 | 115 | 116 | @artifacts.route('/', methods=['GET']) 117 | def get_artifacts_identifier(identifier): 118 | config = app.config['POLYSWARMD'] 119 | session = app.config['REQUESTS_SESSION'] 120 | try: 121 | arts = config.artifact.client.ls(identifier, session) 122 | if len(arts) > 256: 123 | return failure(f'Invalid {config.artifact.client.name} resource, too many links', 400) 124 | 125 | response = success([{'name': a[0], 'hash': a[1]} for a in arts]) 126 | 127 | except HTTPError as e: 128 | response = failure(e.response.content, e.response.status_code) 129 | except InvalidUriException: 130 | response = failure('Invalid artifact URI', 400) 131 | except ArtifactNotFoundException: 132 | response = failure(f'Artifact with URI {identifier} not found', 404) 133 | except ArtifactException as e: 134 | response = failure(e.message, 500) 135 | 136 | return response 137 | 138 | 139 | @artifacts.route('//', methods=['GET']) 140 | def get_artifacts_identifier_id(identifier, id_): 141 | config = app.config['POLYSWARMD'] 142 | session = app.config['REQUESTS_SESSION'] 143 | try: 144 | response = config.artifact.client.get_artifact( 145 | identifier, session, index=id_, max_size=g.user.max_artifact_size 146 | ) 147 | except HTTPError as e: 148 | response = failure(e.response.content, e.response.status_code) 149 | except InvalidUriException: 150 | response = failure('Invalid artifact URI', 400) 151 | except ArtifactNotFoundException: 152 | response = failure(f'Artifact with URI {identifier}/{id_} not found', 404) 153 | except ArtifactTooLargeException: 154 | response = failure(f'Artifact with URI {identifier}/{id_} too large', 400) 155 | except ArtifactException as e: 156 | response = failure(e.message, 500) 157 | 158 | return response 159 | 160 | 161 | @artifacts.route('///stat', methods=['GET']) 162 | def get_artifacts_identifier_id_stat(identifier, id_): 163 | config = app.config['POLYSWARMD'] 164 | session = app.config['REQUESTS_SESSION'] 165 | try: 166 | response = success(config.artifact.client.details(identifier, id_, session)) 167 | except HTTPError as e: 168 | response = failure(e.response.content, e.response.status_code) 169 | except InvalidUriException: 170 | response = failure('Invalid artifact URI', 400) 171 | except ArtifactNotFoundException: 172 | response = failure(f'Artifact with URI {identifier} not found', 404) 173 | except ArtifactException as e: 174 | response = failure(e.message, 500) 175 | 176 | return response 177 | -------------------------------------------------------------------------------- /src/polyswarmd/views/balances.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from flask import Blueprint, g 4 | 5 | from polyswarmd.utils.decorators.chains import chain 6 | from polyswarmd.utils.response import failure, success 7 | 8 | logger = logging.getLogger(__name__) 9 | balances: Blueprint = Blueprint('balances', __name__) 10 | 11 | 12 | @balances.route('/
/eth', methods=['GET']) 13 | @chain(account_required=False) 14 | def get_balance_address_eth(address): 15 | if not g.chain.w3.isAddress(address): 16 | return failure('Invalid address', 400) 17 | 18 | address = g.chain.w3.toChecksumAddress(address) 19 | try: 20 | balance = g.chain.w3.eth.getBalance(address) 21 | return success(str(balance)) 22 | except Exception: 23 | logger.exception('Unexpected exception retrieving ETH balance') 24 | return failure("Could not retrieve balance") 25 | 26 | 27 | @balances.route('/
/staking/total', methods=['GET']) 28 | @chain(account_required=False) 29 | def get_balance_total_stake(address): 30 | if not g.chain.w3.isAddress(address): 31 | return failure('Invalid address', 400) 32 | address = g.chain.w3.toChecksumAddress(address) 33 | try: 34 | balance = g.chain.arbiter_staking.contract.functions.balanceOf(address).call() 35 | return success(str(balance)) 36 | except Exception: 37 | logger.exception('Unexpected exception retrieving total staking balance') 38 | return failure("Could not retrieve balance") 39 | 40 | 41 | @balances.route('/
/staking/withdrawable', methods=['GET']) 42 | @chain(account_required=False) 43 | def get_balance_withdrawable_stake(address): 44 | if not g.chain.w3.isAddress(address): 45 | return failure('Invalid address', 400) 46 | 47 | address = g.chain.w3.toChecksumAddress(address) 48 | try: 49 | balance = g.chain.arbiter_staking.contract.functions.withdrawableBalanceOf(address).call() 50 | return success(str(balance)) 51 | except Exception: 52 | logger.exception('Unexpected exception retrieving withdrawable staking balance') 53 | return failure("Could not retrieve balance") 54 | 55 | 56 | @balances.route('/
/nct', methods=['GET']) 57 | @chain(account_required=False) 58 | def get_balance_address_nct(address): 59 | if not g.chain.w3.isAddress(address): 60 | return failure('Invalid address', 400) 61 | 62 | address = g.chain.w3.toChecksumAddress(address) 63 | try: 64 | balance = g.chain.nectar_token.contract.functions.balanceOf(address).call() 65 | return success(str(balance)) 66 | except Exception: 67 | logger.exception('Unexpected exception retrieving NCT balance') 68 | return failure("Could not retrieve balance") 69 | -------------------------------------------------------------------------------- /src/polyswarmd/views/eth.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | import logging 3 | from typing import Any, Dict, List, Tuple, Type 4 | 5 | from eth.vm.forks.constantinople.transactions import ConstantinopleTransaction 6 | from eth_abi import decode_abi 7 | from eth_abi.exceptions import InsufficientDataBytes 8 | import fastjsonschema 9 | from flask import Blueprint 10 | from flask import current_app as app 11 | from flask import g, request 12 | import gevent 13 | from hexbytes import HexBytes 14 | import rlp 15 | from web3.exceptions import MismatchedABI 16 | from web3.module import Module 17 | from web3.utils.events import get_event_data 18 | 19 | from polyswarmd.app import cache 20 | from polyswarmd.utils.decorators.chains import chain 21 | from polyswarmd.utils.response import failure, success 22 | from polyswarmd.websockets import messages 23 | 24 | logger = logging.getLogger(__name__) 25 | 26 | misc: Blueprint = Blueprint('misc', __name__) 27 | 28 | MAX_GAS_LIMIT = 50000000 29 | GAS_MULTIPLIER = 1.5 30 | ZERO_ADDRESS = '0x0000000000000000000000000000000000000000' 31 | TRANSFER_SIGNATURE_HASH = 'a9059cbb' 32 | HOME_TIMEOUT = 60 33 | SIDE_TIMEOUT = 10 34 | 35 | 36 | class Debug(Module): 37 | ERROR_SELECTOR = '08c379a0' 38 | 39 | def getTransactionError(self, txhash): 40 | if not txhash.startswith('0x'): 41 | txhash = '0x' + txhash 42 | 43 | trace = self.web3.manager.request_blocking( 44 | 'debug_traceTransaction', 45 | [txhash, { 46 | 'disableStorage': True, 47 | 'disableMemory': True, 48 | 'disableStack': True, 49 | }] 50 | ) 51 | 52 | if not trace.get('failed'): 53 | logger.error('Transaction receipt indicates failure but trace succeeded') 54 | return 'Transaction receipt indicates failure but trace succeeded' 55 | 56 | # Parse out the revert error code if it exists 57 | # See https://solidity.readthedocs.io/en/v0.4.24/control-structures.html#error-handling-assert-require-revert-and-exceptions # noqa: E501 58 | # Encode as if a function call to `Error(string)` 59 | rv = HexBytes(trace.get('returnValue')) 60 | 61 | # Trim off function selector for "Error" 62 | if not rv.startswith(HexBytes(Debug.ERROR_SELECTOR)): 63 | logger.error( 64 | 'Expected revert encoding to begin with %s, actual is %s', Debug.ERROR_SELECTOR, 65 | rv[:4].hex() 66 | ) 67 | return 'Invalid revert encoding' 68 | rv = rv[4:] 69 | 70 | error = decode_abi(['string'], rv)[0] 71 | return error.decode('utf-8') 72 | 73 | 74 | @misc.route('/syncing', methods=['GET']) 75 | @chain 76 | def get_syncing(): 77 | if not g.chain.w3.eth.syncing: 78 | return success(False) 79 | 80 | return success(dict(g.chain.w3.eth.syncing)) 81 | 82 | 83 | @misc.route('/nonce', methods=['GET']) 84 | @chain 85 | def get_nonce(): 86 | account = g.chain.w3.toChecksumAddress(g.eth_address) 87 | if 'ignore_pending' in request.args.keys(): 88 | return success(g.chain.w3.eth.getTransactionCount(account)) 89 | else: 90 | return success(g.chain.w3.eth.getTransactionCount(account, 'pending')) 91 | 92 | 93 | @misc.route('/pending', methods=['GET']) 94 | @chain 95 | def get_pending_nonces(): 96 | tx_pool = get_txpool() 97 | logger.debug('Got txpool response from Ethereum node: %s', tx_pool) 98 | transactions = dict() 99 | for key in tx_pool.keys(): 100 | tx_pool_category_nonces = tx_pool[key].get(g.eth_address, {}) 101 | transactions.update(dict(tx_pool_category_nonces)) 102 | 103 | nonces = [str(nonce) for nonce in transactions.keys()] 104 | logger.debug('Pending txpool for %s: %s', g.eth_address, nonces) 105 | return success(nonces) 106 | 107 | 108 | _get_transactions_schema_validator = fastjsonschema.compile({ 109 | 'type': 'object', 110 | 'properties': { 111 | 'transactions': { 112 | 'type': 'array', 113 | 'maxItems': 10, 114 | 'items': { 115 | 'type': 'string', 116 | 'minLength': 2, 117 | 'maxLength': 66, 118 | 'pattern': r'^(0x)?[0-9a-fA-F]{64}$', 119 | } 120 | }, 121 | }, 122 | 'required': ['transactions'], 123 | }) 124 | 125 | 126 | @misc.route('/transactions', methods=['GET']) 127 | @chain 128 | def get_transactions(): 129 | body = request.get_json() 130 | try: 131 | _get_transactions_schema_validator(body) 132 | except fastjsonschema.JsonSchemaException as e: 133 | return failure('Invalid JSON: ' + e.message, 400) 134 | 135 | ret: Dict[str, List[Any]] = defaultdict(list) 136 | for transaction in body['transactions']: 137 | event = events_from_transaction(HexBytes(transaction), g.chain.name) 138 | for k, v in event.items(): 139 | ret[k].extend(v) 140 | 141 | if ret['errors']: 142 | logging.error('Got transaction errors: %s', ret['errors']) 143 | return failure(ret, 400) 144 | return success(ret) 145 | 146 | 147 | _post_transactions_schema = fastjsonschema.compile({ 148 | 'type': 'object', 149 | 'properties': { 150 | 'transactions': { 151 | 'type': 'array', 152 | 'maxItems': 10, 153 | 'items': { 154 | 'type': 'string', 155 | 'minLength': 1, 156 | 'pattern': r'^[0-9a-fA-F]+$', 157 | } 158 | }, 159 | }, 160 | 'required': ['transactions'], 161 | }) 162 | 163 | 164 | @misc.route('/transactions', methods=['POST']) 165 | @chain 166 | def post_transactions(): 167 | threadpool_executor = app.config['THREADPOOL'] 168 | account = g.chain.w3.toChecksumAddress(g.eth_address) 169 | 170 | # Does not include offer_multi_sig contracts, need to loosen validation for those 171 | contract_addresses = { 172 | g.chain.w3.toChecksumAddress(c.address) for c in ( 173 | g.chain.nectar_token, g.chain.bounty_registry, g.chain.arbiter_staking, 174 | g.chain.erc20_relay, g.chain.offer_registry 175 | ) if c.address is not None 176 | } 177 | 178 | body = request.get_json() 179 | try: 180 | _post_transactions_schema(body) 181 | except fastjsonschema.JsonSchemaException as e: 182 | return failure('Invalid JSON: ' + e.message, 400) 183 | 184 | withdrawal_only = not g.user and app.config['POLYSWARMD'].auth.require_api_key 185 | # If we don't have a user key, and they are required, start checking the transaction 186 | if withdrawal_only and len(body['transactions']) != 1: 187 | return failure('Posting multiple transactions requires an API key', 403) 188 | 189 | errors = False 190 | results = [] 191 | decoded_txs = [] # type: Any 192 | try: 193 | future = threadpool_executor.submit(decode_all, body['transactions']) 194 | decoded_txs = future.result() 195 | except ValueError as e: 196 | logger.critical('Invalid transaction: %s', e) 197 | errors = True 198 | results.append({'is_error': True, 'message': f'Invalid transaction: {e}'}) 199 | except Exception: 200 | logger.exception('Unexpected exception while parsing transaction') 201 | errors = True 202 | results.append({ 203 | 'is_error': True, 204 | 'message': 'Unexpected exception while parsing transaction' 205 | }) 206 | 207 | for raw_tx, tx in zip(body['transactions'], decoded_txs): 208 | if withdrawal_only and not is_withdrawal(tx): 209 | errors = True 210 | results.append({ 211 | 'is_error': 212 | True, 213 | 'message': 214 | f'Invalid transaction for tx {tx.hash.hex()}: only withdrawals allowed without an API key' 215 | }) 216 | continue 217 | 218 | sender = g.chain.w3.toChecksumAddress(tx.sender.hex()) 219 | if sender != account: 220 | errors = True 221 | results.append({ 222 | 'is_error': 223 | True, 224 | 'message': 225 | f'Invalid transaction sender for tx {tx.hash.hex()}: expected {account} got {sender}' 226 | }) 227 | continue 228 | 229 | # Redundant check against zero address, but explicitly guard against contract deploys via this route 230 | to = g.chain.w3.toChecksumAddress(tx.to.hex()) 231 | if to == ZERO_ADDRESS or to not in contract_addresses: 232 | errors = True 233 | results.append({ 234 | 'is_error': True, 235 | 'message': f'Invalid transaction recipient for tx {tx.hash.hex()}: {to}' 236 | }) 237 | continue 238 | 239 | logger.info('Sending tx from %s to %s with nonce %s', sender, to, tx.nonce) 240 | 241 | try: 242 | results.append({ 243 | 'is_error': False, 244 | 'message': g.chain.w3.eth.sendRawTransaction(HexBytes(raw_tx)).hex() 245 | }) 246 | except ValueError as e: 247 | errors = True 248 | results.append({ 249 | 'is_error': True, 250 | 'message': f'Invalid transaction error for tx {tx.hash.hex()}: {e}' 251 | }) 252 | if errors: 253 | return failure(results, 400) 254 | 255 | return success(results) 256 | 257 | 258 | @cache.memoize(1) 259 | def get_txpool(): 260 | return g.chain.w3.txpool.inspect 261 | 262 | 263 | def get_gas_limit(): 264 | gas_limit = MAX_GAS_LIMIT 265 | if app.config['CHECK_BLOCK_LIMIT']: 266 | gas_limit = g.chain.w3.eth.getBlock('latest').gasLimit 267 | 268 | if app.config['CHECK_BLOCK_LIMIT'] and gas_limit >= MAX_GAS_LIMIT: 269 | app.config['CHECK_BLOCK_LIMIT'] = False 270 | 271 | return gas_limit 272 | 273 | 274 | def build_transaction(call, nonce): 275 | # Only a problem for fresh chains 276 | gas_limit = get_gas_limit() 277 | options = { 278 | 'nonce': nonce, 279 | 'chainId': int(g.chain.chain_id), 280 | 'gas': gas_limit, 281 | } 282 | 283 | gas = gas_limit 284 | if g.chain.free: 285 | options["gasPrice"] = 0 286 | else: 287 | try: 288 | gas = int(call.estimateGas({'from': g.eth_address, **options}) * GAS_MULTIPLIER) 289 | except ValueError as e: 290 | logger.debug('Error estimating gas, using default: %s', e) 291 | 292 | options['gas'] = min(gas_limit, gas) 293 | logger.debug('options: %s', options) 294 | 295 | return call.buildTransaction(options) 296 | 297 | 298 | def decode_all(raw_txs): 299 | return [rlp.decode(bytes.fromhex(raw_tx), sedes=ConstantinopleTransaction) for raw_tx in raw_txs] 300 | 301 | 302 | def is_withdrawal(tx): 303 | """ 304 | Take a transaction and return True if that transaction is a withdrawal 305 | """ 306 | data = tx.data[4:] 307 | to = g.chain.w3.toChecksumAddress(tx.to.hex()) 308 | sender = g.chain.w3.toChecksumAddress(tx.sender.hex()) 309 | 310 | try: 311 | target, amount = decode_abi(['address', 'uint256'], data) 312 | except InsufficientDataBytes: 313 | logger.warning('Transaction by %s to %s is not a withdrawal', sender, to) 314 | return False 315 | 316 | target = g.chain.w3.toChecksumAddress(target) 317 | if ( 318 | tx.data.startswith(HexBytes(TRANSFER_SIGNATURE_HASH)) and 319 | g.chain.nectar_token.address == to and tx.value == 0 and 320 | tx.network_id == app.config["POLYSWARMD"].chains['side'].chain_id and 321 | target == g.chain.erc20_relay.address and amount > 0 322 | ): 323 | logger.info('Transaction is a withdrawal by %s for %d NCT', sender, amount) 324 | return True 325 | 326 | logger.warning('Transaction by %s to %s is not a withdrawal', sender, to) 327 | return False 328 | 329 | 330 | def events_from_transaction(txhash, chain): 331 | config = app.config['POLYSWARMD'] 332 | trace_transactions = config.eth.trace_transactions 333 | if trace_transactions: 334 | try: 335 | Debug.attach(g.chain.w3, 'debug') 336 | except AttributeError: 337 | # We've already attached, just continue 338 | pass 339 | 340 | # TODO: Check for out of gas, other 341 | timeout = gevent.Timeout(HOME_TIMEOUT if chain == 'home' else SIDE_TIMEOUT) 342 | timeout.start() 343 | 344 | try: 345 | while True: 346 | tx = g.chain.w3.eth.getTransaction(txhash) 347 | if tx is not None and tx.blockNumber: 348 | # fix suggested by https://github.com/ethereum/web3.js/issues/2917#issuecomment-507154487 349 | while g.chain.w3.eth.blockNumber - tx.blockNumber < 1: 350 | gevent.sleep(1) 351 | receipt = g.chain.w3.eth.getTransactionReceipt(txhash) 352 | break 353 | gevent.sleep(1) 354 | 355 | except gevent.Timeout as t: 356 | if t is not timeout: 357 | raise 358 | logging.error('Transaction %s: timeout waiting for receipt', bytes(txhash).hex()) 359 | return {'errors': [f'transaction {bytes(txhash).hex()}: timeout during wait for receipt']} 360 | except Exception: 361 | logger.exception( 362 | 'Transaction %s: error while fetching transaction receipt', 363 | bytes(txhash).hex() 364 | ) 365 | return { 366 | 'errors': [ 367 | f'transaction {bytes(txhash).hex()}: unexpected error while fetching transaction receipt' 368 | ] 369 | } 370 | finally: 371 | timeout.cancel() 372 | 373 | txhash = bytes(txhash).hex() 374 | if not receipt: 375 | return {'errors': [f'transaction {txhash}: receipt not available']} 376 | if receipt.gasUsed == MAX_GAS_LIMIT: 377 | return {'errors': [f'transaction {txhash}: out of gas']} 378 | if receipt.status != 1: 379 | if trace_transactions: 380 | error = g.chain.w3.debug.getTransactionError(txhash) 381 | logger.error('Transaction %s failed with error message: %s', txhash, error) 382 | return { 383 | 'errors': [ 384 | f'transaction {txhash}: transaction failed at block {receipt.blockNumber}, error: {error}' 385 | ] 386 | } 387 | else: 388 | return { 389 | 'errors': [ 390 | f'transaction {txhash}: transaction failed at block {receipt.blockNumber}, check parameters' 391 | ] 392 | } 393 | 394 | # This code builds the return value from the list of (CONTRACT, [HANDLER, ...]) 395 | # a HANDLER is a tuple of (RESULT KEY, EXTRACTION CLASS). RESULT KEY is the key that will be used in the result, 396 | # EXTRACTION CLASS is any class which inherits from `EventLogMessage'. 397 | # NOTE EXTRACTION CLASS's name is used to id the contract event, which is then pass to it's own `extract` fn 398 | # XXX The `extract' method is a conversion function also used to convert events for WebSocket consumption. 399 | contracts: List[Tuple[Any, List[Tuple[str, Type[messages.EventLogMessage]]]]] 400 | contracts = [ 401 | (g.chain.nectar_token.contract.events, [('transfers', messages.Transfer)]), 402 | ( 403 | g.chain.bounty_registry.contract.events, [('bounties', messages.NewBounty), 404 | ('assertions', messages.NewAssertion), 405 | ('votes', messages.NewVote), 406 | ('reveals', messages.RevealedAssertion)] 407 | ), 408 | ( 409 | g.chain.arbiter_staking.contract.events, [('withdrawals', messages.NewWithdrawal), 410 | ('deposits', messages.NewDeposit)] 411 | ) 412 | ] 413 | 414 | if g.chain.offer_registry.contract: 415 | offer_msig = g.chain.offer_multi_sig.bind(ZERO_ADDRESS) 416 | contracts.append(( 417 | g.chain.offer_registry.contract.events, 418 | [('offers_initialized', messages.InitializedChannel)] 419 | )) 420 | contracts.append(( 421 | offer_msig.events, [('offers_opened', messages.OpenedAgreement), 422 | ('offers_canceled', messages.CanceledAgreement), 423 | ('offers_joined', messages.JoinedAgreement), 424 | ('offers_closed', messages.ClosedAgreement), 425 | ('offers_settled', messages.StartedSettle), 426 | ('offers_challenged', messages.SettleStateChallenged)] 427 | )) 428 | ret: Dict[str, List[Dict[str, Any]]] = {} 429 | for contract, processors in contracts: 430 | for key, extractor in processors: 431 | filter_event = extractor.contract_event_name 432 | contract_event = contract[filter_event] 433 | if not contract_event: 434 | logger.warning("No contract event for: %s", filter_event) 435 | continue 436 | # Now pull out the pertinent logs from the transaction receipt 437 | abi = contract_event._get_event_abi() 438 | for log in receipt['logs']: 439 | try: 440 | event_log = get_event_data(abi, log) 441 | if event_log: 442 | ret[key] = [extractor.extract(event_log['args'])] 443 | break 444 | except MismatchedABI: 445 | continue 446 | 447 | return ret 448 | 449 | 450 | @cache.memoize(1) 451 | def bounty_fee(bounty_registry): 452 | return bounty_registry.functions.bountyFee().call() 453 | 454 | 455 | @cache.memoize(1) 456 | def assertion_fee(bounty_registry): 457 | return bounty_registry.functions.assertionFee().call() 458 | 459 | 460 | @cache.memoize(1) 461 | def bounty_amount_min(bounty_registry): 462 | return bounty_registry.functions.BOUNTY_AMOUNT_MINIMUM().call() 463 | 464 | 465 | @cache.memoize(1) 466 | def assertion_bid_min(bounty_registry): 467 | return bounty_registry.functions.ASSERTION_BID_ARTIFACT_MINIMUM().call() 468 | 469 | 470 | @cache.memoize(1) 471 | def assertion_bid_max(bounty_registry): 472 | return bounty_registry.functions.ASSERTION_BID_ARTIFACT_MAXIMUM().call() 473 | 474 | 475 | @cache.memoize(1) 476 | def staking_total_max(arbiter_staking): 477 | return arbiter_staking.functions.MAXIMUM_STAKE().call() 478 | 479 | 480 | @cache.memoize(1) 481 | def staking_total_min(arbiter_staking): 482 | return arbiter_staking.functions.MINIMUM_STAKE().call() 483 | -------------------------------------------------------------------------------- /src/polyswarmd/views/event_message.py: -------------------------------------------------------------------------------- 1 | import json 2 | import time 3 | from typing import Any, Dict, List, Type 4 | 5 | import fastjsonschema 6 | from flask_sockets import Sockets 7 | import gevent 8 | from gevent.queue import Empty, Queue 9 | from geventwebsocket import WebSocketApplication, WebSocketError 10 | import ujson 11 | 12 | from polyswarmd.utils import channel_to_dict, g, logging, state_to_dict, uuid 13 | from polyswarmd.utils.decorators.chains import chain 14 | from polyswarmd.websockets.filter import FilterManager 15 | from polyswarmd.websockets.messages import ( 16 | ClosedAgreement, 17 | Connected, 18 | SettleStateChallenged, 19 | StartedSettle, 20 | WebsocketFilterMessage, 21 | ) 22 | 23 | logger = logging.getLogger(__name__) 24 | 25 | 26 | class WebSocket: 27 | """ 28 | Wrapper around a WebSocket that has a queue of messages that can be sent from another greenlet. 29 | """ 30 | 31 | def __init__(self, ws): 32 | """ 33 | Create a wrapper around a WebSocket with a guid to easily identify it, and a queue of 34 | messages to send 35 | :param ws: gevent WebSocket to wrap 36 | """ 37 | self.guid = uuid.uuid4() 38 | self.ws = ws 39 | self.queue = Queue() 40 | 41 | def send(self, message): 42 | """ 43 | Add message to the queue of messages to be sent 44 | :param message: json blob to be sent over the WebSocket 45 | """ 46 | self.queue.put(message) 47 | 48 | def __repr__(self): 49 | return f'' 50 | 51 | def __eq__(self, other): 52 | return isinstance(other, WebSocket) and other.guid == self.guid 53 | 54 | 55 | _messages_schema = fastjsonschema.compile({ 56 | 'type': 'object', 57 | 'properties': { 58 | 'type': { 59 | 'type': 'string', 60 | }, 61 | 'from_socket': { 62 | 'type': 'string', 63 | }, 64 | 'to_socket': { 65 | 'type': 'string', 66 | }, 67 | 'state': { 68 | 'type': 'string', 69 | }, 70 | 'artifact': { 71 | 'type': 'string', 72 | }, 73 | 'r': { 74 | 'type': 'string', 75 | }, 76 | 'v': { 77 | 'type': 'integer', 78 | }, 79 | 's': { 80 | 'type': 'string', 81 | } 82 | }, 83 | 'required': ['type', 'state'], 84 | }) 85 | 86 | 87 | def init_websockets(app): 88 | sockets = Sockets(app) 89 | start_time = time.time() 90 | message_sockets: Dict[uuid.UUID, List[WebSocketApplication]] = dict() 91 | 92 | @sockets.route('/events/') 93 | @sockets.route('/events') 94 | @chain(account_required=False) 95 | def events(ws): 96 | rpc = g.chain.rpc 97 | ws.send(Connected.serialize_message({'start_time': str(start_time)})) 98 | logger.debug("Websocket connection on %s", repr(g.chain)) 99 | 100 | wrapper = WebSocket(ws) 101 | 102 | rpc.register(wrapper) 103 | 104 | while not ws.closed: 105 | try: 106 | # Try to read a message off the queue, and then send over the websocket. 107 | msg = wrapper.queue.get(block=False) 108 | ws.send(msg) 109 | logger.debug("Sent message %s to %s", msg, repr(ws)) 110 | except Empty: 111 | # Anytime there are no new messages to send, check that the websocket is still connected 112 | with gevent.Timeout(.5, False): 113 | logger.debug('Checking %s against timeout', wrapper) 114 | # This raises WebSocketError if socket is closed, and does not block if there 115 | # are no messages 116 | ws.receive() 117 | except WebSocketError as e: 118 | logger.error('Websocket %s closed %s', wrapper, e) 119 | rpc.unregister(wrapper) 120 | return 121 | 122 | rpc.unregister(wrapper) 123 | 124 | # for receiving messages about offers that might need to be signed 125 | @sockets.route('/events//') 126 | @sockets.route('/events/') 127 | @chain(chain_name='home', account_required=False) 128 | def channel_events(ws, guid): 129 | offer_channel = channel_to_dict( 130 | g.chain.offer_registry.contract.functions.guidToChannel(guid.int).call() 131 | ) 132 | msig_address = offer_channel['msig_address'] 133 | offer_msig = g.chain.offer_multi_sig.bind(msig_address) 134 | filter_manager = FilterManager() 135 | filter_events: Any[Type[WebsocketFilterMessage]] = [ 136 | ClosedAgreement, 137 | StartedSettle, 138 | SettleStateChallenged, 139 | ] 140 | for evt in filter_events: 141 | filter_manager.register(offer_msig.eventFilter, evt) 142 | 143 | with filter_manager.fetch() as results: 144 | for messages in results: 145 | if ws.closed: 146 | raise RuntimeError("WebSocket is closed") 147 | for msg in messages: 148 | return ws.send(msg) 149 | 150 | @sockets.route('/messages//') 151 | @sockets.route('/messages/') 152 | @chain(chain_name='home', account_required=False) 153 | def messages(ws, guid): 154 | 155 | if guid not in message_sockets: 156 | message_sockets[guid] = [ws] 157 | else: 158 | message_sockets[guid].append(ws) 159 | 160 | while not ws.closed: 161 | try: 162 | msg = ws.receive() 163 | 164 | if not msg: 165 | break 166 | 167 | body = json.loads(msg) 168 | 169 | try: 170 | _messages_schema(body) 171 | except fastjsonschema.JsonSchemaException: 172 | logger.exception('Invalid JSON') 173 | 174 | state_dict = state_to_dict(body['state']) 175 | state_dict['guid'] = guid.int 176 | ret = {'type': body['type'], 'raw_state': body['state'], 'state': state_dict} 177 | 178 | if 'r' in body: 179 | ret['r'] = body['r'] 180 | 181 | if 'v' in body: 182 | ret['v'] = body['v'] 183 | 184 | if 's' in body: 185 | ret['s'] = body['s'] 186 | 187 | if 'artifact' in body: 188 | ret['artifact'] = body['artifact'] 189 | 190 | if body['type'] != 'accept' and body['type'] != 'payout': 191 | # delete zero verdict 192 | if 'mask' in ret['state']: 193 | del ret['state']['mask'] 194 | del ret['state']['verdicts'] 195 | 196 | for message_websocket in message_sockets[guid]: 197 | if not message_websocket.closed: 198 | message_websocket.send(ujson.dumps(ret)) 199 | 200 | gevent.sleep(1) 201 | except WebSocketError: 202 | logger.info('Websocket connection closed, exiting loop') 203 | break 204 | except Exception: 205 | logger.exception('Exception in /events') 206 | continue 207 | # for receiving messages about offers that might need to be signed 208 | -------------------------------------------------------------------------------- /src/polyswarmd/views/relay.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import fastjsonschema 4 | from flask import Blueprint, g, request 5 | 6 | from polyswarmd.utils.decorators.chains import chain 7 | from polyswarmd.utils.response import failure, success 8 | from polyswarmd.views.eth import build_transaction 9 | 10 | logger = logging.getLogger(__name__) 11 | relay: Blueprint = Blueprint('relay', __name__) 12 | 13 | 14 | @relay.route('/deposit', methods=['POST']) 15 | @chain(chain_name='home') 16 | def deposit_funds(): 17 | # Move funds from home to side 18 | return send_funds_from() 19 | 20 | 21 | @relay.route('/withdrawal', methods=['POST']) 22 | @chain(chain_name='side') 23 | def withdraw_funds(): 24 | # Move funds from side to home 25 | return send_funds_from() 26 | 27 | 28 | @relay.route('/fees', methods=['GET']) 29 | @chain 30 | def fees(): 31 | return success({'fees': g.chain.erc20_relay.contract.functions.fees().call()}) 32 | 33 | 34 | _send_funds_from_schema = fastjsonschema.compile({ 35 | 'type': 'object', 36 | 'properties': { 37 | 'amount': { 38 | 'type': 'string', 39 | 'minLength': 1, 40 | 'maxLength': 64, 41 | 'pattern': r'^\d+$', 42 | }, 43 | }, 44 | 'required': ['amount'], 45 | }) 46 | 47 | 48 | def send_funds_from(): 49 | # Grab correct versions by chain type 50 | account = g.chain.w3.toChecksumAddress(g.eth_address) 51 | base_nonce = int(request.args.get('base_nonce', g.chain.w3.eth.getTransactionCount(account))) 52 | erc20_relay_address = g.chain.w3.toChecksumAddress(g.chain.erc20_relay.address) 53 | 54 | body = request.get_json() 55 | try: 56 | _send_funds_from_schema(body) 57 | except fastjsonschema.JsonSchemaException as e: 58 | return failure('Invalid JSON: ' + e.message, 400) 59 | 60 | amount = int(body['amount']) 61 | 62 | transactions = [ 63 | build_transaction( 64 | g.chain.nectar_token.contract.functions.transfer(erc20_relay_address, amount), base_nonce 65 | ), 66 | ] 67 | 68 | return success({'transactions': transactions}) 69 | -------------------------------------------------------------------------------- /src/polyswarmd/views/staking.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import fastjsonschema 4 | from flask import Blueprint, g, request 5 | 6 | from polyswarmd.utils.decorators.chains import chain 7 | from polyswarmd.utils.response import failure, success 8 | from polyswarmd.views import eth 9 | from polyswarmd.views.eth import build_transaction 10 | 11 | logger = logging.getLogger(__name__) 12 | staking: Blueprint = Blueprint('staking', __name__) 13 | 14 | 15 | @staking.route('/parameters', methods=['GET']) 16 | @chain 17 | def get_staking_parameters(): 18 | minimum_stake = g.chain.arbiter_staking.contract.functions.MINIMUM_STAKE().call() 19 | maximum_stake = g.chain.arbiter_staking.contract.functions.MAXIMUM_STAKE().call() 20 | vote_ratio_numerator = g.chain.arbiter_staking.contract.functions.VOTE_RATIO_NUMERATOR().call() 21 | vote_ratio_denominator = g.chain.arbiter_staking.contract.functions.VOTE_RATIO_DENOMINATOR( 22 | ).call() 23 | 24 | return success({ 25 | 'minimum_stake': minimum_stake, 26 | 'maximum_stake': maximum_stake, 27 | 'vote_ratio_numerator': vote_ratio_numerator, 28 | 'vote_ratio_denominator': vote_ratio_denominator 29 | }) 30 | 31 | 32 | _post_arbiter_staking_deposit_schema = fastjsonschema.compile({ 33 | 'type': 'object', 34 | 'properties': { 35 | 'amount': { 36 | 'type': 'string', 37 | 'minLength': 1, 38 | 'maxLength': 100, 39 | 'pattern': r'^\d+$', 40 | } 41 | }, 42 | 'required': ['amount'], 43 | }) 44 | 45 | 46 | @staking.route('/deposit', methods=['POST']) 47 | @chain 48 | def post_arbiter_staking_deposit(): 49 | account = g.chain.w3.toChecksumAddress(g.eth_address) 50 | base_nonce = int(request.args.get('base_nonce', g.chain.w3.eth.getTransactionCount(account))) 51 | 52 | body = request.get_json() 53 | try: 54 | _post_arbiter_staking_deposit_schema(body) 55 | except fastjsonschema.JsonSchemaException as e: 56 | return failure('Invalid JSON: ' + e.message, 400) 57 | 58 | amount = int(body['amount']) 59 | 60 | total = g.chain.arbiter_staking.contract.functions.balanceOf(account).call() 61 | 62 | if amount + total >= eth.staking_total_max(g.chain.arbiter_staking.contract): 63 | return failure('Total stake above allowable maximum.', 400) 64 | 65 | transactions = [ 66 | build_transaction( 67 | g.chain.nectar_token.contract.functions.approve( 68 | g.chain.arbiter_staking.contract.address, amount 69 | ), base_nonce 70 | ), 71 | build_transaction( 72 | g.chain.arbiter_staking.contract.functions.deposit(amount), base_nonce + 1 73 | ), 74 | ] 75 | 76 | return success({'transactions': transactions}) 77 | 78 | 79 | _post_arbiter_staking_withdrawal_schema = fastjsonschema.compile({ 80 | 'type': 'object', 81 | 'properties': { 82 | 'amount': { 83 | 'type': 'string', 84 | 'minLength': 1, 85 | 'maxLength': 100, 86 | 'pattern': r'^\d+$', 87 | } 88 | }, 89 | 'required': ['amount'], 90 | }) 91 | 92 | 93 | @staking.route('/withdraw', methods=['POST']) 94 | @chain 95 | def post_arbiter_staking_withdrawal(): 96 | account = g.chain.w3.toChecksumAddress(g.eth_address) 97 | base_nonce = int(request.args.get('base_nonce', g.chain.w3.eth.getTransactionCount(account))) 98 | body = request.get_json() 99 | try: 100 | _post_arbiter_staking_withdrawal_schema(body) 101 | except fastjsonschema.JsonSchemaException as e: 102 | return failure('Invalid JSON: ' + e.message, 400) 103 | 104 | amount = int(body['amount']) 105 | 106 | available = g.chain.arbiter_staking.contract.functions.withdrawableBalanceOf(account).call() 107 | 108 | if amount > available: 109 | return failure('Exceeds withdrawal eligible %s' % available, 400) 110 | 111 | transactions = [ 112 | build_transaction(g.chain.arbiter_staking.contract.functions.withdraw(amount), base_nonce), 113 | ] 114 | 115 | return success({'transactions': transactions}) 116 | -------------------------------------------------------------------------------- /src/polyswarmd/websockets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/polyswarm/polyswarmd/b732d60f0f829cc355c1f938bbe6de69f9985098/src/polyswarmd/websockets/__init__.py -------------------------------------------------------------------------------- /src/polyswarmd/websockets/conftest.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | import json 3 | from pprint import pprint 4 | 5 | import pytest 6 | import ujson 7 | 8 | from polyswarmd.websockets import messages 9 | 10 | 11 | @pytest.fixture(autouse=True) 12 | def mock_md_fetch(monkeypatch): 13 | """Mock out the metadata-fetching implementation 14 | 15 | Override _substitute_metadata so we can test `fetch_metadata' without network IO. 16 | doctest.testmod does accept globals-setting parameters (`globs` & `extraglobs'), but they 17 | haven't been as easy as just overwriting messages here 18 | """ 19 | 20 | def mock_sub(uri, validate): 21 | # These are fake URIs, intended to resemble the output that substitute_metadata might 22 | # actually encounter. 23 | fake_uris = { 24 | 'ZWbountyuri': 25 | ujson.dumps([{ 26 | "malware_family": "EICAR", 27 | "scanner": { 28 | "environment": { 29 | "architecture": "x86_64", 30 | "operating_system": "Linux" 31 | } 32 | } 33 | }]), 34 | 'ZWassertionuri': 35 | ujson.dumps([{ 36 | "md5": "44d88612fea8a8f36de82e1278abb02f", 37 | "sha1": "3395856ce81f2b7382dee72602f798b642f14140", 38 | "size": 68, 39 | "type": "FILE", 40 | "sha256": "275a021bbfb6489e54d471899f7db9d1663fc695ec2fe2a2c4538aabf651fd0f", 41 | "filename": "eicar_true", 42 | "mimetype": "text/plain", 43 | "bounty_id": 69540800813340, 44 | "extended_type": "EICAR virus test files", 45 | }]) 46 | } 47 | if uri in fake_uris: 48 | content = json.loads(fake_uris[uri]) 49 | 50 | if validate: 51 | if validate(content): 52 | return content 53 | else: 54 | return None 55 | return uri 56 | 57 | monkeypatch.setattr(messages.MetadataHandler, "_substitute_metadata", mock_sub) 58 | 59 | 60 | class mkevent: 61 | DEFAULT_BLOCK = 117 62 | ALTERNATE_BLOCK = 220 63 | 64 | def __init__(self, *args, **kwargs): 65 | event_default = { 66 | 'args': {}, 67 | 'event': 'test event', 68 | 'logIndex': 19845, 69 | 'transactionIndex': 1276, 70 | 'transactionHash': (11).to_bytes(16, byteorder='big'), 71 | 'address': '0xFACE0EEE000000000000000000000001', 72 | 'blockHash': (90909090).to_bytes(16, byteorder='big'), 73 | 'blockNumber': self.DEFAULT_BLOCK, 74 | } 75 | for i, (attr, default) in enumerate(event_default.items()): 76 | if len(args) > i: 77 | setattr(self, attr, args[i]) 78 | elif attr in kwargs: 79 | setattr(self, attr, kwargs[attr]) 80 | else: 81 | setattr(self, attr, default) 82 | 83 | def __getitem__(self, k): 84 | if (type(k) == str): 85 | return self.__getattribute__(k) 86 | 87 | 88 | @pytest.fixture(autouse=True) 89 | def add_websockets_doctest_deps(doctest_namespace): 90 | TestChain = namedtuple('TestChain', ['blockNumber']) 91 | FakeFormatter = namedtuple('FakeFormatter', ['contract_event_name']) 92 | doctest_namespace['decoded_msg'] = lambda wsmsg: pprint(json.loads(wsmsg.decode('ascii'))) 93 | doctest_namespace["chain1"] = TestChain(mkevent.DEFAULT_BLOCK) 94 | doctest_namespace["chain2"] = TestChain(mkevent.ALTERNATE_BLOCK) 95 | doctest_namespace["addr1"] = "0x00000000000000000000000000000001" 96 | doctest_namespace["addr2"] = "0x00000000000000000000000000000002" 97 | doctest_namespace["mkevent"] = mkevent 98 | doctest_namespace["pprint"] = pprint 99 | doctest_namespace['fake_formatter'] = FakeFormatter 100 | doctest_namespace['identity'] = lambda *args: args 101 | doctest_namespace['approx'] = pytest.approx 102 | -------------------------------------------------------------------------------- /src/polyswarmd/websockets/filter.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from random import gauss 3 | from typing import Any, Callable, Iterable, List, NoReturn, Type 4 | 5 | import gevent 6 | from gevent.pool import Group 7 | from gevent.queue import Queue 8 | from requests.exceptions import ConnectionError 9 | 10 | from . import messages 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | class ContractFilter: 16 | callbacks: List[Callable[..., Any]] 17 | stopped: bool 18 | poll_interval: float 19 | filter_id: int 20 | web3: Any 21 | 22 | def get_new_entries(self) -> List[messages.EventData]: 23 | ... 24 | 25 | def get_all_entries(self) -> List[messages.EventData]: 26 | ... 27 | 28 | 29 | FormatClass = Type[messages.WebsocketFilterMessage] 30 | Message = bytes 31 | FilterInstaller = Callable[[str], ContractFilter] 32 | 33 | 34 | class FilterWrapper: 35 | """A utility class which wraps a contract filter with websocket-messaging features""" 36 | filter: ContractFilter 37 | filter_installer = Callable[[], ContractFilter] 38 | formatter: FormatClass 39 | backoff: bool 40 | MIN_WAIT: float = 0.5 41 | MAX_WAIT: float = 4.0 42 | JITTER: float = 0.1 43 | 44 | def __init__(self, filter_installer: FilterInstaller, formatter: FormatClass, backoff: bool): 45 | self.formatter = formatter 46 | self.backoff = backoff 47 | self._filter_installer = filter_installer 48 | self.filter = self.create_filter() 49 | 50 | def create_filter(self) -> ContractFilter: 51 | """Return a new filter 52 | 53 | NOTE: this function is here instead of directly assigned in __init__ to appease mypy""" 54 | installer: FilterInstaller = self._filter_installer 55 | return installer(self.formatter.contract_event_name) 56 | 57 | def compute_wait(self, ctr: int) -> float: 58 | """Compute the amount of wait time from a counter of (sequential) empty replies 59 | 60 | >>> FilterWrapper.JITTER = 0.0 61 | >>> tv = (0, 1, 3, 6, 10, 100) 62 | >>> backoff = FilterWrapper(identity, fake_formatter, backoff=True) 63 | >>> wait_times = list(map(backoff.compute_wait, tv)) 64 | >>> wait_times 65 | [0.5, 0.5, 1.0, 4.0, 4.0, 4.0] 66 | >>> no_backoff = FilterWrapper(identity, fake_formatter, backoff=False) 67 | >>> list(map(no_backoff.compute_wait, tv)) 68 | [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] 69 | >>> FilterWrapper.JITTER = 0.1 70 | >>> all(n != j and approx((n, j)) for n, j in zip(wait_times, map(backoff.compute_wait, tv))) 71 | True 72 | """ 73 | if self.backoff: 74 | # backoff 'exponentially' 75 | exp = (1 << max(0, ctr - 2)) - 1 76 | base_wait = min(self.MAX_WAIT, max(self.MIN_WAIT, exp)) 77 | return abs(gauss(base_wait, self.JITTER)) 78 | else: 79 | return self.MIN_WAIT 80 | 81 | def get_new_entries(self) -> List[Message]: 82 | return [self.formatter.serialize_message(e) for e in self.filter.get_new_entries()] 83 | 84 | def spawn_poll_loop(self, callback: Callable[[Iterable[Message]], NoReturn]): 85 | """Spawn a greenlet which polls the filter's contract events, passing results to `callback'""" 86 | ctr: int = 0 # number of loops since the last non-empty response 87 | wait: float = 0.0 # The amount of time this loop will wait. 88 | logger.debug("Spawning fetch: %s", self.filter) 89 | while True: 90 | ctr += 1 91 | # XXX spawn_later prevents easily killing the pool. Use `wait` here. 92 | gevent.sleep(wait) 93 | try: 94 | result = self.get_new_entries() 95 | # LookupError generally occurs when our schema doesn't match the message 96 | except LookupError: 97 | logger.exception("LookupError inside spawn_poll_loop") 98 | wait = 1 99 | continue 100 | # ConnectionError generally occurs when we cannot fetch events 101 | except (ConnectionError, TimeoutError): 102 | logger.exception("ConnectionError/timeout in spawn_poll_loop") 103 | wait = self.compute_wait(ctr) 104 | continue 105 | # ValueError generally occurs when Geth removed the filter 106 | except ValueError: 107 | logger.exception("Filter removed by Ethereum client") 108 | self.filter = self.create_filter() 109 | wait = 1 110 | continue 111 | 112 | # Reset the ctr if we received a non-empty response or we shouldn't backoff 113 | if len(result) != 0: 114 | ctr = 0 115 | callback(result) 116 | 117 | wait = self.compute_wait(ctr) 118 | logger.debug("%s wait=%f", self.filter, wait) 119 | 120 | 121 | class FilterManager: 122 | """Manages access to filtered Ethereum events.""" 123 | 124 | def __init__(self): 125 | self.wrappers = [] 126 | self.pool = Group() 127 | 128 | def register( 129 | self, filter_installer: FilterInstaller, fmt_cls: FormatClass, backoff: bool = True 130 | ): 131 | """Add a new filter, with an optional associated WebsocketMessage-serializer class""" 132 | wrapper = FilterWrapper(filter_installer, fmt_cls, backoff) 133 | self.wrappers.append(wrapper) 134 | logger.debug('Registered new filter: %s', wrapper) 135 | 136 | def flush(self): 137 | """End all event polling, uninstall all filters and remove their corresponding wrappers""" 138 | self.pool.kill() 139 | self.wrappers.clear() 140 | 141 | def fetch(self): 142 | """Return a queue of currently managed contract events""" 143 | queue = Queue() 144 | for wrapper in self.wrappers: 145 | self.pool.spawn(wrapper.spawn_poll_loop, queue.put_nowait) 146 | yield from queue 147 | 148 | def setup_event_filters(self, chain: Any): 149 | """Setup the most common event filters""" 150 | if len(self.wrappers) != 0: 151 | logger.exception("Attempting to initialize already initialized filter manager") 152 | self.flush() 153 | 154 | bounty_contract = chain.bounty_registry.contract 155 | 156 | # Setup Latest (although this could pass `w3.eth.filter` directly) 157 | self.register(chain.w3.eth.filter, messages.LatestEvent.make(chain.w3.eth), backoff=False) 158 | # messages.NewBounty shouldn't wait or back-off from new bounties. 159 | self.register(bounty_contract.eventFilter, messages.NewBounty, backoff=False) 160 | 161 | filter_events: List[FormatClass] = [ 162 | messages.FeesUpdated, 163 | messages.WindowsUpdated, 164 | messages.NewAssertion, 165 | messages.NewVote, 166 | messages.QuorumReached, 167 | messages.SettledBounty, 168 | messages.RevealedAssertion, 169 | messages.Deprecated, 170 | messages.Undeprecated, 171 | ] 172 | 173 | for cls in filter_events: 174 | self.register(bounty_contract.eventFilter, cls) 175 | 176 | offer_registry = chain.offer_registry 177 | if offer_registry and offer_registry.contract: 178 | self.register(offer_registry.contract.eventFilter, messages.InitializedChannel) 179 | -------------------------------------------------------------------------------- /src/polyswarmd/websockets/json_schema.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | import operator 3 | from typing import ( 4 | TYPE_CHECKING, 5 | Any, 6 | Callable, 7 | ClassVar, 8 | Dict, 9 | Iterable, 10 | List, 11 | Mapping, 12 | SupportsInt, 13 | Union, 14 | cast, 15 | ) 16 | import uuid 17 | 18 | # HACK: please upgrade me to python 3.8 19 | if TYPE_CHECKING: 20 | from mypy_extensions import TypedDict 21 | else: 22 | 23 | def TypedDict(*args, **kwargs): 24 | return object 25 | 26 | 27 | SchemaType = str 28 | SchemaFormat = str 29 | SchemaExtraction = Dict[Any, Any] 30 | 31 | SchemaDef = TypedDict( 32 | 'SchemaDef', { 33 | 'type': 'SchemaType', 34 | 'format': 'SchemaFormat', 35 | 'enum': Iterable[Any], 36 | 'items': 'SchemaType', 37 | 'srckey': Union[str, Callable[[str, Any], Any]], 38 | }, 39 | total=False 40 | ) 41 | 42 | JSONSchema = TypedDict('JSONSchema', {'properties': Mapping[str, 'SchemaDef']}, total=False) 43 | 44 | 45 | def compose(f, g): 46 | """"Return a function which which composes/pipes g(x) into f(x)""" 47 | return lambda x: f(g(x)) 48 | 49 | 50 | def to_int_uuid(x: SupportsInt) -> uuid.UUID: 51 | """"Return an uuid from an int-able value""" 52 | return uuid.UUID(int=int(x)) 53 | 54 | 55 | class PSJSONSchema: 56 | """Extract and format fields from a source `instance` object 57 | 58 | This uses ordinary jsonschema manifests, with the addition of polyswarm-specific keys: 59 | 60 | srckey: str - Get the field with the same name from `instance` 61 | srckey: callable - Run this function with `instance` as an argument 62 | 63 | If srckey is not present at all, it attempts to fetch the value from `source` 64 | with the same name/key as the definition. 65 | 66 | >>> make_range = lambda key, src: range(1, src[key]) 67 | >>> schema = PSJSONSchema({ 68 | ... 'properties': { 69 | ... 'a': {'type':'string'}, 70 | ... 'b': {'type':'string', 'srckey': 'b_src'}, 71 | ... 'c': {'type':'string', 'srckey': 'c_src'}, 72 | ... 'x': {'type':'integer'}, 73 | ... 'range': {'type': 'array', 'items': 'string', 'srckey': make_range}, 74 | ... 'xs': {'type': 'array', 'items': 'integer' }}}) 75 | >>> instance = { 'a': "1", 'b_src': "2", 'c_src': 3, 'x': 4, 'xs': ["5","6"], 'range': 3 } 76 | >>> schema.extract(instance) 77 | {'a': '1', 'b': '2', 'c': '3', 'x': 4, 'range': ['1', '2'], 'xs': [5, 6]} 78 | """ 79 | _TYPES: ClassVar[Dict[str, Callable[[Any], Any]]] = { 80 | 'string': str, 81 | 'integer': int, 82 | 'number': float, 83 | 'boolean': bool, 84 | 'array': list 85 | } 86 | _FORMATTERS: ClassVar[Dict[str, Callable[[Any], Any]]] = {'uuid': to_int_uuid} 87 | 88 | schema: JSONSchema 89 | _extractor: Dict[str, Callable[[Any], Any]] 90 | 91 | def __init__(self, schema: Dict[str, Any]): 92 | self.schema = cast(JSONSchema, schema) 93 | self._extractor = self.build_extractor() 94 | 95 | def visitor(self): 96 | yield from self.schema.get('properties', {}).items() 97 | 98 | def extract(self, instance: Any) -> SchemaExtraction: 99 | return {k: fn(instance) for k, fn in self._extractor.items()} 100 | 101 | def build_extractor(self): 102 | """Return a dictionary of functions which each extract/format a def_name""" 103 | extract_map = {} 104 | # This code works by mapping each formatting-task to a particular function and then applying 105 | # them in series, e.g 106 | # {'type': 'uuid', 'srckey': 'SRC', 'format': 'uuid' } 107 | # would be converted into (using the _TYPES and _FORMATTERS tables above) 108 | # str(uuid.UUID(int=operator.itemgetter('SRC'))) 109 | for def_name, def_schema in self.visitor(): 110 | # You may use a string to indicate where to look in the source map, 111 | # and if none is provided, it will use the key/def_name by default 112 | srckey = def_schema.get('srckey', def_name) 113 | if type(srckey) is str: 114 | extract_fn = operator.itemgetter(srckey) 115 | elif callable(srckey): 116 | extract_fn = partial(srckey, def_name) 117 | 118 | fmt = def_schema.get('format') 119 | if fmt and fmt in self._FORMATTERS: 120 | extract_fn = compose(self._FORMATTERS[fmt], extract_fn) 121 | 122 | itype = def_schema.get('items') 123 | if itype: 124 | extract_fn = compose(partial(map, self._TYPES[itype]), extract_fn) 125 | 126 | dtype = def_schema.get('type') 127 | if dtype: 128 | extract_fn = compose(self._TYPES[dtype], extract_fn) 129 | extract_map[def_name] = extract_fn 130 | 131 | return extract_map 132 | 133 | def build_annotations(self): 134 | """Return a mypy function annotation for this schema 135 | 136 | This is used by gen_stubs.py, not in application logic""" 137 | annotations = {} 138 | for name, schema in self.visitor(): 139 | type_name = schema.get('type') 140 | if type_name: 141 | if type_name == 'array': 142 | elt = List[self._TYPES.get(schema.get('items'), Any)] # type: ignore 143 | else: 144 | elt = self._TYPES.get(type_name, Any) 145 | 146 | try: 147 | annotations[name] = f'zv.{elt.__name__}' 148 | except (NameError, LookupError, AttributeError): 149 | annotations[name] = elt 150 | else: 151 | annotations[name] = Any # type: ignore 152 | return annotations 153 | -------------------------------------------------------------------------------- /src/polyswarmd/websockets/message_types.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file has been automatically generated by scripts/gen_stubs.py 3 | """ 4 | 5 | from typing import Any, Dict, Generic, List, Mapping, Optional, TypeVar 6 | 7 | try: 8 | from typing import TypedDict # type: ignore 9 | except ImportError: 10 | from mypy_extensions import TypedDict 11 | 12 | D = TypeVar('D') 13 | E = TypeVar('E') 14 | 15 | 16 | class EventData(Mapping): 17 | """Event data returned from web3 filter requests""" 18 | args: Dict[str, Any] 19 | event: str 20 | logIndex: int 21 | transactionIndex: int 22 | transactionHash: bytes 23 | address: str 24 | blockHash: bytes 25 | blockNumber: int 26 | 27 | 28 | class WebsocketEventMessage(Generic[D], Mapping): 29 | """An Polyswarm WebSocket message""" 30 | event: str 31 | data: D 32 | block_number: Optional[int] 33 | txhash: Optional[str] 34 | 35 | 36 | TransferMessageData = TypedDict('TransferMessageData', {'to': str, 'from': str, 'value': str}) 37 | 38 | NewDepositMessageData = TypedDict('NewDepositMessageData', {'value': int, 'from': str}) 39 | 40 | NewWithdrawalMessageData = TypedDict('NewWithdrawalMessageData', {'to': str, 'value': int}) 41 | 42 | FeesUpdatedMessageData = TypedDict( 43 | 'FeesUpdatedMessageData', { 44 | 'bounty_fee': int, 45 | 'assertion_fee': int 46 | } 47 | ) 48 | 49 | WindowsUpdatedMessageData = TypedDict( 50 | 'WindowsUpdatedMessageData', { 51 | 'assertion_reveal_window': int, 52 | 'arbiter_vote_window': int 53 | } 54 | ) 55 | 56 | NewBountyMessageData = TypedDict( 57 | 'NewBountyMessageData', { 58 | 'guid': str, 59 | 'artifact_type': str, 60 | 'author': str, 61 | 'amount': str, 62 | 'uri': Any, 63 | 'expiration': str, 64 | 'metadata': str 65 | } 66 | ) 67 | 68 | NewAssertionMessageData = TypedDict( 69 | 'NewAssertionMessageData', { 70 | 'bounty_guid': str, 71 | 'author': str, 72 | 'index': int, 73 | 'bid': List[str], 74 | 'mask': List[bool], 75 | 'commitment': str 76 | } 77 | ) 78 | 79 | RevealedAssertionMessageData = TypedDict( 80 | 'RevealedAssertionMessageData', { 81 | 'bounty_guid': str, 82 | 'author': str, 83 | 'index': int, 84 | 'nonce': str, 85 | 'verdicts': List[bool], 86 | 'metadata': Any 87 | } 88 | ) 89 | 90 | NewVoteMessageData = TypedDict( 91 | 'NewVoteMessageData', { 92 | 'bounty_guid': str, 93 | 'voter': str, 94 | 'votes': List[bool] 95 | } 96 | ) 97 | 98 | QuorumReachedMessageData = TypedDict('QuorumReachedMessageData', {'bounty_guid': str}) 99 | 100 | SettledBountyMessageData = TypedDict( 101 | 'SettledBountyMessageData', { 102 | 'bounty_guid': str, 103 | 'settler': str, 104 | 'payout': int 105 | } 106 | ) 107 | 108 | InitializedChannelMessageData = TypedDict( 109 | 'InitializedChannelMessageData', { 110 | 'ambassador': str, 111 | 'expert': str, 112 | 'guid': str, 113 | 'multi_signature': str 114 | } 115 | ) 116 | 117 | ClosedAgreementMessageData = TypedDict( 118 | 'ClosedAgreementMessageData', { 119 | 'ambassador': str, 120 | 'expert': str 121 | } 122 | ) 123 | 124 | StartedSettleMessageData = TypedDict( 125 | 'StartedSettleMessageData', { 126 | 'initiator': str, 127 | 'nonce': int, 128 | 'settle_period_end': int 129 | } 130 | ) 131 | 132 | SettleStateChallengedMessageData = TypedDict( 133 | 'SettleStateChallengedMessageData', { 134 | 'challenger': str, 135 | 'nonce': int, 136 | 'settle_period_end': int 137 | } 138 | ) 139 | 140 | DeprecatedMessageData = TypedDict('DeprecatedMessageData', {'rollover': bool}) 141 | 142 | UndeprecatedMessageData = TypedDict('UndeprecatedMessageData', {}) 143 | 144 | # Latest event's data type is not synthesized from a schema. 145 | # If it's type changes, update gen_stubs.py 146 | LatestEventMessageData = TypedDict('LatestEventMessageData', {'number': int}) 147 | -------------------------------------------------------------------------------- /src/polyswarmd/websockets/scripts/gen_stubs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """gen_stubs.py 3 | This script generates the additional stubs for polyswarmd messages 4 | 5 | You can run it from src/polyswarmd like so: 6 | 7 | python3 -m websockets.scripts.gen_stubs 8 | """ 9 | import inspect 10 | import re 11 | from typing import Any 12 | 13 | from polyswarmd.websockets import messages 14 | 15 | HEADER = """\"\"\" 16 | This file has been automatically generated by scripts/gen_stubs.py 17 | \"\"\" 18 | 19 | from typing import Any, Dict, Generic, List, Mapping, Optional, TypeVar 20 | 21 | try: 22 | from typing import TypedDict # noqa 23 | except ImportError: 24 | from mypy_extensions import TypedDict # noqa 25 | 26 | D = TypeVar('D') 27 | E = TypeVar('E') 28 | 29 | 30 | class EventData(Mapping): 31 | "Event data returned from web3 filter requests" 32 | args: Dict[str, Any] 33 | event: str 34 | logIndex: int 35 | transactionIndex: int 36 | transactionHash: bytes 37 | address: str 38 | blockHash: bytes 39 | blockNumber: int 40 | 41 | 42 | class WebsocketEventMessage(Generic[D], Mapping): 43 | "An Polyswarm WebSocket message" 44 | event: str 45 | data: D 46 | block_number: Optional[int] 47 | txhash: Optional[str] 48 | """ 49 | 50 | 51 | def gen_stub(cls: Any, klass: str = None): 52 | """Return a string of the type returned by that class's schema extraction""" 53 | name = cls.contract_event_name 54 | # Create a new `TypedDict' definition 55 | tname = f'{name}MessageData' 56 | type_str = f"{tname} = TypedDict('{tname}', {cls.schema.build_annotations()})" 57 | return re.sub(r"'?(typing|zv|builtin).(\w*)'?", r'\2', type_str) 58 | 59 | 60 | if __name__ == "__main__": 61 | print(HEADER) 62 | for dts in [messages.EventLogMessage, messages.WebsocketFilterMessage]: 63 | for scls in dts.__subclasses__(): 64 | if 'schema' in dir(scls): 65 | klass = inspect.getsource(scls) 66 | print(gen_stub(scls, klass)) 67 | print("\n") 68 | print("# Latest event's data type is not synthesized from a schema.") 69 | print("# If it's type changes, update gen_stubs.py") 70 | print("""LatestEventMessageData = TypedDict('LatestEventMessageData', {'number': int})""") 71 | -------------------------------------------------------------------------------- /src/polyswarmd/websockets/serve.py: -------------------------------------------------------------------------------- 1 | import gevent 2 | from geventwebsocket import Resource, WebSocketApplication, WebSocketServer 3 | 4 | from .filter import FilterManager 5 | 6 | 7 | class EventServer(WebSocketApplication): 8 | 9 | def __init__(self, *args): 10 | super().__init__(*args) 11 | self.filter_manager = FilterManager() 12 | 13 | def setup(self): 14 | self.filter_manager.setup_event_filters(self.chain) 15 | gevent.spawn(self.filter_poll) 16 | 17 | def filter_poll(self): 18 | with self.filter_manager.fetch() as results: 19 | for messages in results: 20 | if self.websockets is None: 21 | return 22 | for msg in messages: 23 | self.broadcast(msg) 24 | 25 | def broadcast(self, msg): 26 | for client in self.ws.handler.server.clients.values(): 27 | client.ws.send(msg) 28 | 29 | 30 | resource = Resource([('/events', EventServer)]) 31 | 32 | if __name__ == "__main__": 33 | server = WebSocketServer(('', 8000), resource, debug=True) 34 | server.serve_forever() 35 | -------------------------------------------------------------------------------- /src/polyswarmd/wsgi.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | 5 | from polyswarmd.utils.logger import init_logging 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | def app(*args, **kwargs): 11 | # Can't directly pass command line arguments via gunicorn, but can pass arguments to callable 12 | # https://stackoverflow.com/questions/8495367/using-additional-command-line-arguments-with-gunicorn 13 | log_format = os.environ.get('LOG_FORMAT', kwargs.get('log_format', 'text')) 14 | 15 | log_level = os.environ.get('LOG_LEVEL', kwargs.get('log_level', 'WARNING')) 16 | log_level = getattr(logging, log_level.upper(), None) 17 | 18 | try: 19 | init_logging(log_format, log_level) 20 | except (TypeError, ValueError) as e: 21 | logging.error('Invalid log level') 22 | logging.exception(e) 23 | sys.exit(10) 24 | except Exception as e: 25 | logging.exception(e) 26 | sys.exit(-1) 27 | 28 | from polyswarmd.app import app as application 29 | 30 | logger.critical("polyswarmd is ready!") 31 | return application 32 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/polyswarm/polyswarmd/b732d60f0f829cc355c1f938bbe6de69f9985098/tests/__init__.py -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | """ 2 | isort:skip_file 3 | """ 4 | import os 5 | import pytest 6 | import unittest.mock 7 | 8 | from .utils import read_chain_cfg 9 | import web3.datastructures 10 | 11 | import web3.manager 12 | 13 | import web3.eth 14 | import web3.contract 15 | 16 | # a list of function patches to be applied prior `import polyswarmd` 17 | PRE_INIT_PATCHES = ( 18 | # don't both with patching gevent methods inside pytest 19 | unittest.mock.patch('gevent.monkey.patch_all', return_value=None), 20 | # # fake out the underlying ipfshttpclient connect 21 | # set `POLY_WORK` to be 'testing' (if not already set) 22 | unittest.mock.patch( 23 | 'os.getenv', lambda *args, **kwargs: 'testing' if args[0] == 'POLY_WORK' else os.getenv 24 | ) 25 | ) 26 | 27 | for pa in PRE_INIT_PATCHES: 28 | pa.start() 29 | 30 | # NOTE polyswarmd is structured such that merely importing a package in the `polyswarmd` namespace will 31 | # raise an exception. Fixing this (e.g moving stuff outta src/polyswarmd/__init__.py) has been on the 32 | # todo list for some time, but for now, we just patch up methods which have unsafe side effects to 33 | # run unit tests without side-effects. 34 | import polyswarmd as _polyswarmd # noqa 35 | from polyswarmd.app import app as _app # noqa 36 | 37 | for pa in PRE_INIT_PATCHES: 38 | pa.stop() 39 | 40 | 41 | @pytest.fixture(scope='session') 42 | def app(): 43 | return _app 44 | 45 | 46 | @pytest.fixture(scope='session') 47 | def client(app): 48 | app.config['TESTING'] = True 49 | yield app.test_client() 50 | 51 | 52 | @pytest.fixture(params=['home', 'side'], scope='session') 53 | def chain_config(request): 54 | return read_chain_cfg(request.param) 55 | 56 | 57 | @pytest.fixture(params=['home', 'side'], scope='session') 58 | def chains(request, app): 59 | return app.config['POLYSWARMD'].chains[request.param] 60 | 61 | 62 | @pytest.fixture 63 | def chain_id(): 64 | return 1337 65 | 66 | 67 | @pytest.fixture(scope='session') 68 | def community(): 69 | return 'gamma' 70 | 71 | 72 | @pytest.fixture(scope='session') 73 | def base_nonce(): 74 | return 1248924 75 | 76 | 77 | @pytest.fixture 78 | def balances(token_address): 79 | return {token_address: 12345} 80 | 81 | 82 | @pytest.fixture(scope='session') 83 | def token_address(): 84 | return '0x4B1867c484871926109E3C47668d5C0938CA3527' 85 | 86 | 87 | @pytest.fixture 88 | def TX_SIG_HASH(): 89 | from polyswarmd.views.eth import TRANSFER_SIGNATURE_HASH as TX_SIG_HASH 90 | return TX_SIG_HASH 91 | 92 | 93 | @pytest.fixture 94 | def gas_limit(): 95 | return 94040201 96 | 97 | 98 | @pytest.fixture 99 | def block_number(token_address): 100 | return 5197 101 | 102 | 103 | @pytest.fixture 104 | def fees_schedule(): 105 | return 91027619323716 106 | 107 | 108 | @pytest.fixture 109 | def bounty_parameters(): 110 | return { 111 | 'arbiter_lookback_range': 100, 112 | 'arbiter_vote_window': 100, 113 | 'assertion_bid_maximum': 1000000000000000000, 114 | 'assertion_bid_minimum': 62500000000000000, 115 | 'assertion_fee': 31250000000000000, 116 | 'assertion_reveal_window': 10, 117 | 'bounty_amount_minimum': 100, 118 | 'bounty_fee': 62500000000000000, 119 | 'max_duration': 100 120 | } 121 | 122 | 123 | @pytest.fixture 124 | def contract_fns(token_address, balances, bounty_parameters, fees_schedule): 125 | """mock out values of contract functions 126 | 127 | NOTE: if the function shares a name with a patched function here, that value will be used, e.g 128 | `contract_fns` does *not* distinguish between contracts. 129 | """ 130 | 131 | fn_table = {} 132 | 133 | def patch_contract(func): 134 | fn_table[func.__name__] = lambda s, *args: func(*args) 135 | return fn_table[func.__name__] 136 | 137 | @patch_contract 138 | def balanceOf(address): 139 | return balances[address] 140 | 141 | @patch_contract 142 | def withdrawableBalanceOf(address): 143 | return balances[address] 144 | 145 | @patch_contract 146 | def bountyFee(): 147 | return bounty_parameters['bounty_fee'] 148 | 149 | @patch_contract 150 | def assertionFee(): 151 | return bounty_parameters['assertion_fee'] 152 | 153 | @patch_contract 154 | def assertionRevealWindow(): 155 | return bounty_parameters['assertion_reveal_window'] 156 | 157 | @patch_contract 158 | def arbiterVoteWindow(): 159 | return bounty_parameters['arbiter_vote_window'] 160 | 161 | @patch_contract 162 | def ASSERTION_BID_ARTIFACT_MAXIMUM(): 163 | return bounty_parameters['assertion_bid_maximum'] 164 | 165 | @patch_contract 166 | def ASSERTION_BID_ARTIFACT_MINIMUM(): 167 | return bounty_parameters['assertion_bid_minimum'] 168 | 169 | @patch_contract 170 | def fees(): 171 | return fees_schedule 172 | 173 | for name, value in bounty_parameters.items(): 174 | fn_table[name.upper()] = lambda s: value 175 | 176 | return fn_table 177 | 178 | 179 | @pytest.fixture 180 | def web3_blocking_values(balances, token_address, block_number, chain_id, gas_limit): 181 | """mock values for `web3.manager.request_blocking`""" 182 | return { 183 | 'eth_blockNumber': 184 | block_number, 185 | 'eth_call': 186 | lambda data, to: '0x' + '0'*64, 187 | 'eth_getBalance': 188 | lambda token_address, block: balances[token_address], 189 | 'eth_getBlockByNumber': 190 | lambda *_: web3.datastructures.AttributeDict({'gasLimit': gas_limit}), 191 | 'eth_getTransactionCount': 192 | lambda token_address, block: balances[token_address], 193 | 'eth_syncing': 194 | False, 195 | 'net_version': 196 | chain_id 197 | } 198 | 199 | 200 | @pytest.fixture(autouse=True) 201 | def mock_polyswarmd(monkeypatch): 202 | """Mock polyswarmd functions which call out to external services""" 203 | monkeypatch.setattr(_polyswarmd.config.service.Service, "test_reachable", lambda *_: True) 204 | monkeypatch.setattr( 205 | _polyswarmd.services.ethereum.service.EthereumService, "check_chain_id", lambda *_: True 206 | ) 207 | 208 | 209 | @pytest.fixture(autouse=True) 210 | def mock_w3(monkeypatch, contract_fns, web3_blocking_values): 211 | """Mock out underlying w3py functions so that tests can be run sans-geth""" 212 | _ContractFunction_call = web3.contract.ContractFunction.call 213 | 214 | def mock_call(w3_cfn, *args, **kwargs): 215 | name = w3_cfn.fn_name 216 | if name not in contract_fns: 217 | print("WARNING: Using non-mocked contract function: ", name) 218 | fn = contract_fns.get(name, _ContractFunction_call) 219 | return fn(w3_cfn, *w3_cfn.args) 220 | 221 | def mock_request_blocking(self, method, params): 222 | mock = web3_blocking_values[method] 223 | return mock(*params) if callable(mock) else mock 224 | 225 | monkeypatch.setattr(web3.manager.RequestManager, "request_blocking", mock_request_blocking) 226 | monkeypatch.setattr(web3.contract.ContractFunction, "call", mock_call) 227 | -------------------------------------------------------------------------------- /tests/fixtures/config/chain/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "ipfs_uri": "http://localhost:5001", 3 | "artifact_limit": 256, 4 | "profiler_enabled": false 5 | } 6 | -------------------------------------------------------------------------------- /tests/fixtures/config/chain/homechain.json: -------------------------------------------------------------------------------- 1 | { 2 | "nectar_token_address": "0x2A2cEDE7598F91240068714fD83E449b19853Cb9", 3 | "arbiter_staking_address": "0x7EB236462eCe1151Ac8502a0A31dED27bC85aDa1", 4 | "erc20_relay_address": "0xa4a398E8E531B70c935D9085850465d855a59306", 5 | "offer_registry_address": "0x63c652d6b2798221354d57E3dCEb241700c48fc2", 6 | "bounty_registry_address": "0x6aDf0cDB042588Ec4a88393871Ddede57cC168E3", 7 | "eth_uri": "http://localhost:8545", 8 | "chain_id": 1337, 9 | "free": true 10 | } 11 | -------------------------------------------------------------------------------- /tests/fixtures/config/chain/sidechain.json: -------------------------------------------------------------------------------- 1 | { 2 | "nectar_token_address": "0x2A2cEDE7598F91240068714fD83E449b19853Cb9", 3 | "arbiter_staking_address": "0xd143eD6591c0F11c3b9e279bC4A5932958caaC08", 4 | "erc20_relay_address": "0xBc7eFcE4b8be4715630475a060B53c33829101e0", 5 | "offer_registry_address": "0x377A1ce373Ae0C5878C248cA34f72186A8e1Da38", 6 | "bounty_registry_address": "0x21262bF29FF08691C8A72bc6f22F791996E1891F", 7 | "eth_uri": "http://localhost:7545", 8 | "chain_id": 1338, 9 | "free": true 10 | } 11 | -------------------------------------------------------------------------------- /tests/fixtures/config/polyswarmd/polyswarmd.yml: -------------------------------------------------------------------------------- 1 | artifact: 2 | max_size: 34603008 3 | fallback_max_size: 10485760 4 | limit: 256 5 | library: 6 | module: polyswarmd.services.artifact.ipfs 7 | class_name: IpfsServiceClient 8 | community: gamma 9 | eth: 10 | trace_transactions: true 11 | directory: "tests/fixtures/config/chain/" 12 | # consul: 13 | # uri: http://localhost:8500 14 | profiler: 15 | enabled: false 16 | websocket: 17 | enabled: true 18 | -------------------------------------------------------------------------------- /tests/test_balances.py: -------------------------------------------------------------------------------- 1 | from .utils import failed, heck 2 | 3 | 4 | def test_get_balance_total_stake(client, token_address, balances): 5 | assert client.get(f'/balances/{token_address}/staking/total').json == { 6 | 'result': str(balances[token_address]), 7 | 'status': 'OK' 8 | } 9 | 10 | assert failed(client.get(f'/balances/INVALID/staking/total')) 11 | 12 | 13 | def test_get_balance_withdrawable_stake(client, token_address, balances): 14 | assert client.get(f'/balances/{token_address}/staking/withdrawable').json == { 15 | 'result': str(balances[token_address]), 16 | 'status': 'OK' 17 | } 18 | 19 | assert failed(client.get(f'/balances/INVALID/staking/withdrawable')) 20 | 21 | 22 | def test_get_balance_address_eth(client, token_address): 23 | assert client.get(f'/balances/{token_address}/eth').json == heck({ 24 | 'result': heck.NONEMPTYSTR, 25 | 'status': 'OK' 26 | }) 27 | -------------------------------------------------------------------------------- /tests/test_bloom.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | import os 3 | import random 4 | 5 | import pytest 6 | 7 | from polyswarmd.utils.bloom import BloomFilter 8 | 9 | 10 | @pytest.fixture 11 | def log_entries(): 12 | 13 | def _mk_address(): 14 | return os.urandom(20) 15 | 16 | def _mk_topic(): 17 | return os.urandom(32) 18 | 19 | return [(_mk_address(), [_mk_topic() 20 | for _ in range(1, random.randint(0, 4))]) 21 | for _ in range(1, random.randint(0, 30))] 22 | 23 | 24 | def check_bloom(bloom, log_entries): 25 | for address, topics in log_entries: 26 | assert address in bloom 27 | for topic in topics: 28 | assert topic in bloom 29 | 30 | 31 | def test_bloom_filter_add_method(log_entries): 32 | bloom = BloomFilter() 33 | 34 | for address, topics in log_entries: 35 | bloom.add(address) 36 | for topic in topics: 37 | bloom.add(topic) 38 | 39 | check_bloom(bloom, log_entries) 40 | 41 | 42 | def test_bloom_filter_extend_method(log_entries): 43 | bloom = BloomFilter() 44 | 45 | for address, topics in log_entries: 46 | bloom.extend([address]) 47 | bloom.extend(topics) 48 | 49 | check_bloom(bloom, log_entries) 50 | 51 | 52 | def test_bloom_filter_from_iterable_method(log_entries): 53 | bloomables = itertools.chain.from_iterable( 54 | itertools.chain([address], topics) for address, topics in log_entries 55 | ) 56 | bloom = BloomFilter.from_iterable(bloomables) 57 | check_bloom(bloom, log_entries) 58 | 59 | 60 | def test_casting_to_integer(): 61 | bloom = BloomFilter() 62 | 63 | assert int(bloom) == 0 64 | 65 | bloom.add(b'value 1') 66 | bloom.add(b'value 2') 67 | assert int(bloom) == int( 68 | '63119152483043774890037882090529841075600744123634985501563996' 69 | '49538536948165624479433922134690234594539820621615046612478986' 70 | '72305890903532059401028759565544372404512800814146245947429340' 71 | '89705729059810916441565944632818634262808769353435407547341248' 72 | '57159120012171916234314838712163868338766358254974260070831608' 73 | '96074485863379577454706818623806701090478504217358337630954958' 74 | '46332941618897428599499176135798020580888127915804442383594765' 75 | '16518489513817430952759084240442967521334544396984240160630545' 76 | '50638819052173088777264795248455896326763883458932483359201374' 77 | '72931724136975431250270748464358029482656627802817691648' 78 | ) 79 | 80 | 81 | def test_casting_to_binary(): 82 | bloom = BloomFilter() 83 | 84 | assert bin(bloom) == '0b0' 85 | 86 | bloom.add(b'value 1') 87 | bloom.add(b'value 2') 88 | assert bin(bloom) == ( 89 | '0b1000000000000000000000000000000000000000001000000100000000000000' 90 | '000000000000000000000000000000000000000000000010000000000000000000' 91 | '000000000000000000000000000000000000000000000000000000000000000000' 92 | '000000000000000000000000000000000000000000000000000000000001000000' 93 | '000000000000000000000000000000000000000000000000000000000000000010' 94 | '000000000000000000000000000000000000000100000000000000000000001000' 95 | '000000000000000000000000000000000000000000000000000000000000000000' 96 | '000000000000000000000000000000000000000000000000000000000000000000' 97 | '000000000000000000000000000000000000000000000000000000000000000000' 98 | '000000000000000000000000000000000000000010000000000000000000000000' 99 | '000000000000000000000000000000000000000000000000000000000000000000' 100 | '000000000000000000000000000000000000000000000000000000000000000000' 101 | '000000000000000000000000000000000000000000000000000000000000000000' 102 | '000000000000000000000000000000000000000000000000000000000000000000' 103 | '000000000000000000000000000000000000000000000000000000000000000000' 104 | '000000000000000000000000000000000000000000000000000000000000000000' 105 | '000000000000000000000000000000000000000000000000000000000000000000' 106 | '000000000000000000000000000000000010000000000001000000000000001000' 107 | '000000000000000000000000000000000000000000000000000000000000000000' 108 | '000000000000000000000000000000000000000000000000000000000000000000' 109 | '000000000000000000000000000000000000000000000000000000000000000000' 110 | '000000000000000000000000000000000000000000000000000000000000000000' 111 | '000000000000000000000000000000000000000000000000000000000000000000' 112 | '000000000000000000000000000000000000000000000000000000000000000000' 113 | '000000000000000000000000000000000000000000000000000000000000000000' 114 | '000000000000000000000000000000000000000000000000000000000000000000' 115 | '000000001000000000000000000000000000000000000000000000000000100000' 116 | '000000000000000000000000000000000000000000000000000000000000000000' 117 | '000000000000000000000000000000000000000000000000000000000000000000' 118 | '000000000000000000000000000000000000000000000000100000000000000000' 119 | '00000000000000000000000000000000000001000000000000000000000000' 120 | ) 121 | 122 | 123 | def test_combining_filters(): 124 | b1 = BloomFilter() 125 | b2 = BloomFilter() 126 | 127 | b1.add(b'a') 128 | b1.add(b'b') 129 | b1.add(b'c') 130 | 131 | b2.add(b'd') 132 | b2.add(b'e') 133 | b2.add(b'f') 134 | 135 | b1.add(b'common') 136 | b2.add(b'common') 137 | 138 | assert b'a' in b1 139 | assert b'b' in b1 140 | assert b'c' in b1 141 | 142 | assert b'a' not in b2 143 | assert b'b' not in b2 144 | assert b'c' not in b2 145 | 146 | assert b'd' in b2 147 | assert b'e' in b2 148 | assert b'f' in b2 149 | 150 | assert b'd' not in b1 151 | assert b'e' not in b1 152 | assert b'f' not in b1 153 | 154 | assert b'common' in b1 155 | assert b'common' in b2 156 | 157 | b3 = b1 | b2 158 | 159 | assert b'a' in b3 160 | assert b'b' in b3 161 | assert b'c' in b3 162 | assert b'd' in b3 163 | assert b'e' in b3 164 | assert b'f' in b3 165 | assert b'common' in b3 166 | 167 | b4 = b1 + b2 168 | 169 | assert b'a' in b4 170 | assert b'b' in b4 171 | assert b'c' in b4 172 | assert b'd' in b4 173 | assert b'e' in b4 174 | assert b'f' in b4 175 | assert b'common' in b4 176 | 177 | b5 = BloomFilter(int(b1)) 178 | b5 |= b2 179 | 180 | assert b'a' in b5 181 | assert b'b' in b5 182 | assert b'c' in b5 183 | assert b'd' in b5 184 | assert b'e' in b5 185 | assert b'f' in b5 186 | assert b'common' in b5 187 | 188 | b6 = BloomFilter(int(b1)) 189 | b6 += b2 190 | 191 | assert b'a' in b6 192 | assert b'b' in b6 193 | assert b'c' in b6 194 | assert b'd' in b6 195 | assert b'e' in b6 196 | assert b'f' in b6 197 | assert b'common' in b6 198 | -------------------------------------------------------------------------------- /tests/test_bounties.py: -------------------------------------------------------------------------------- 1 | from .utils import heck 2 | 3 | 4 | def test_get_bounties(client, token_address): 5 | response = client.get('/bounties', query_string={'account': token_address}) 6 | assert response.json == heck({'result': heck.ARRAY, 'status': 'OK'}) 7 | 8 | 9 | def test_get_bounties(client, token_address, bounty_parameters): 10 | response = client.get('/bounties/parameters', query_string={'account': token_address}) 11 | assert response.json == heck({'result': bounty_parameters, 'status': 'OK'}) 12 | -------------------------------------------------------------------------------- /tests/test_eth.py: -------------------------------------------------------------------------------- 1 | from .utils import heck 2 | 3 | 4 | def test_get_nonce(client, token_address): 5 | response = client.get('/nonce', query_string={'account': token_address}).json 6 | assert response == heck({'result': heck.UINT, 'status': 'OK'}) 7 | -------------------------------------------------------------------------------- /tests/test_event_message.py: -------------------------------------------------------------------------------- 1 | from collections import UserList 2 | from contextlib import contextmanager 3 | from curses.ascii import EOT as END_OF_TRANSMISSION 4 | import statistics 5 | import time 6 | from typing import ClassVar, Generator, Iterator, List, Mapping 7 | from string import ascii_lowercase 8 | import unittest.mock 9 | import uuid 10 | 11 | import gevent 12 | from gevent.queue import Empty 13 | import pytest 14 | import ujson 15 | 16 | from polyswarmd.services.ethereum.rpc import EthereumRpc 17 | from polyswarmd.views.event_message import WebSocket 18 | from polyswarmd.websockets.filter import ( 19 | ContractFilter, 20 | FilterManager, 21 | FilterWrapper, 22 | ) 23 | from polyswarmd.websockets.messages import WebsocketMessage 24 | 25 | BEGIN = time.time() 26 | 27 | 28 | def now(): 29 | return int(1000 * (time.time() - BEGIN)) 30 | 31 | 32 | TX_TS = 'sent' 33 | TXDIFF = 'interval' 34 | CPUTIME = 'pipeline_latency' 35 | START = 'start' 36 | FILTER = 'filter' 37 | STEP = 'step' 38 | NTH = 'nth' 39 | TICK = 'tick' 40 | 41 | # How many 'ticks' should happen in a single step, allowing us to test more than one message being 42 | # returned at one time 43 | STRIDE = 10 44 | 45 | # Generator for printable names used in debugging 46 | FILTER_IDS = (l1 + l2 for l1 in ascii_lowercase for l2 in ascii_lowercase) 47 | 48 | 49 | @pytest.fixture 50 | def mock_sleep(monkeypatch): 51 | """If loaded, `gevent.sleep` simply returns a no-op unittest.Mock""" 52 | mock = unittest.mock.Mock(gevent.sleep) 53 | monkeypatch.setattr(gevent, "sleep", mock) 54 | return mock 55 | 56 | 57 | @pytest.fixture 58 | def rpc(monkeypatch): 59 | 60 | def patch(chain): 61 | RPC = EthereumRpc(chain) 62 | RPC.register = unittest.mock.Mock(wraps=RPC.register) 63 | RPC.unregister = unittest.mock.Mock(wraps=RPC.unregister) 64 | RPC.poll = unittest.mock.Mock(wraps=RPC.poll) 65 | RPC.filter_manager.setup_event_filters = unittest.mock.Mock( 66 | FilterManager.setup_event_filters 67 | ) 68 | return RPC 69 | 70 | return patch 71 | 72 | 73 | class NOPMessage(WebsocketMessage): 74 | """Stub for a polyswarmd.websocket.message object""" 75 | contract_event_name: ClassVar[str] = 'NOP_CONTRACT' 76 | event: ClassVar[str] = 'NOP_EVENT' 77 | 78 | 79 | class MockFilter(ContractFilter): 80 | """Mock implementation of a Web3Py Filter. 81 | 82 | If no ``source`` generator is provided, it creates an event message generator running for for 83 | ``end`` "steps" and yielding ``1/rate`` messages on average. 84 | """ 85 | 86 | def __init__(self, rate=1.0, source=None, end=100, backoff=False): 87 | # The rate at which this filter should generate new messages 88 | self.poll_interval = rate 89 | self.source = source or self.uniform(int(self.poll_interval * STRIDE), end=end) 90 | self.backoff = backoff 91 | 92 | def __call__(self, contract_event_name): 93 | # Verify that _something_ is being passed in, even if we're not using it. 94 | assert contract_event_name == NOPMessage.contract_event_name 95 | self.current = -1 96 | self.filter_id = next(FILTER_IDS) 97 | self.sent = 0 98 | self.start = now() 99 | return self 100 | 101 | def format_entry(self, step): 102 | return {FILTER: self.filter_id, NTH: self.sent, TX_TS: now(), START: self.start, STEP: step} 103 | 104 | def get_new_entries(self) -> Iterator: 105 | try: 106 | msgs = list(filter(None, [next(self.source) for i in range(STRIDE)])) 107 | yield from msgs 108 | self.sent += len(msgs) 109 | except StopIteration: 110 | raise gevent.GreenletExit 111 | 112 | def uniform(self, rate: int, end: int, offset=0) -> Generator: 113 | """Event message generator, runs for ``end`` steps, yielding ``1/rate`` messages each step""" 114 | for step in range(0, end * STRIDE): 115 | yield self.format_entry(step=step) if step % rate == 0 else None 116 | return step 117 | 118 | 119 | class enrich(UserList): 120 | elapsed = property(lambda msgs: max(msgs[TX_TS]) - min(msgs[TX_TS])) 121 | steps = property(lambda msgs: [v for v in msgs[STEP] if v > 1e-1]) 122 | responses = property(lambda msgs: len(msgs.steps)) 123 | latency_var = property(lambda msgs: statistics.pvariance(msgs.steps)) 124 | latency_avg = property(lambda msgs: statistics.mean(msgs.steps)) 125 | usertime_avg = property(lambda msgs: statistics.mean(msgs[CPUTIME])) 126 | sources = property(lambda msgs: len(set(msgs[FILTER]))) 127 | 128 | def __init__(self, messages, extra={}): 129 | self.data = list(self.enrich_messages(messages, extra)) 130 | 131 | def enrich_messages(self, messages, extra={}): 132 | # ensure we've got some messages 133 | assert len(messages) > 0 134 | prev = {} 135 | for msg in map(ujson.loads, messages): 136 | # ensure we got an 'event' tag which should be included with all WebsocketMessage 137 | assert msg['event'] == NOPMessage.event 138 | emsg = msg['data'] 139 | emsg[TX_TS] = emsg.get(TX_TS, -1) 140 | emsg[TXDIFF] = emsg[TX_TS] - prev.get(TX_TS, emsg[TX_TS]) 141 | msg.update(extra) 142 | yield emsg 143 | prev = emsg 144 | 145 | def by_source(self): 146 | sources = {} 147 | for msg in self: 148 | sources[msg[FILTER]] = sources.get(msg[FILTER], []) + [msg] 149 | return sources 150 | 151 | def __getitem__(self, key): 152 | if isinstance(key, int): 153 | return super().__getitem__(key) 154 | return [msg[key] for msg in self.data] 155 | 156 | def __str__(self): 157 | props = ['elapsed', 'responses', 'usertime_avg'] 158 | return '<%s summary=%s>' % (type(self), {k: getattr(self, k) for k in props}) 159 | 160 | 161 | class TestWebsockets: 162 | 163 | @contextmanager 164 | def start_rpc(self, filters, ws, RPC): 165 | RPC.register(ws) 166 | for ft in filters: 167 | RPC.filter_manager.register(ft, NOPMessage, backoff=ft.backoff) 168 | yield RPC 169 | RPC.unregister(ws) 170 | for mock in [RPC.poll, RPC.register, RPC.unregister, RPC.filter_manager.setup_event_filters]: 171 | mock.assert_called_once() 172 | assert len(RPC.filter_manager.pool) == 0 173 | assert len(RPC.websockets) == 0 174 | 175 | def events(self, filters, RPC, ws=None): 176 | """Mimic the behavior of ``/events`` endpoint 177 | 178 | Keep the implementation of this test as close as possible to 179 | `event_message.py#init_websockets#events(ws)`, e.g please do 180 | do not switch this to `join()` the pool, etc. 181 | 182 | If you find a way to use `Flask.test_client` with `Sockets`, please let me know - zv 183 | """ 184 | ws = ws or WebSocket(str(uuid.uuid4())) 185 | with self.start_rpc(filters=filters, ws=ws, RPC=RPC): 186 | msgs = [] 187 | while True: 188 | try: 189 | msg = ws.queue.get(block=True, timeout=0) 190 | msgs.append(msg) 191 | # print(msg) 192 | except Empty: 193 | break 194 | enriched = enrich(msgs) 195 | assert enriched.sources == len(filters) 196 | return enriched 197 | 198 | def test_recv(self, chains, rpc, mock_sleep): 199 | """ 200 | - Verify that we can recieve messages through ``EthereumRPC``'s ``FilterManager`` 201 | - The number of messages sent should equal the number of messages recieved 202 | """ 203 | filters = [MockFilter(rate=1 / 2) for i in range(10)] 204 | enriched = self.events(filters=filters, RPC=rpc(chains)) 205 | assert len(enriched) == sum(f.sent for f in filters) 206 | 207 | def test_concurrent_rpc(self, app, rpc, mock_sleep): 208 | """ 209 | - Test multiple concurrent RPC & FilterManager instances: 210 | - Two ``EthereumRPC``, ``FilterManager`` & ``Websocket``s should be able to operate 211 | independently of one another on different chains""" 212 | rate = 1 / 2 213 | gs = gevent.joinall([ 214 | gevent.spawn( 215 | self.events, 216 | filters=[MockFilter(rate=rate, end=100 + (1+i) * 50) for _ in range(2)], 217 | RPC=RPC 218 | ) for i, RPC in enumerate(map(rpc, app.config['POLYSWARMD'].chains.values())) 219 | ]) 220 | # need good greenlets 221 | assert all(g.successful() for g in gs) 222 | ag, bg = map(lambda g: g.value, gs) 223 | # we should ahve gotten at least end * rate - 5 messages 224 | assert len(ag) >= 600 225 | # the second RPC ran for longer, therefore should have more messages 226 | assert len(bg) / len(ag) == 200 / 150 227 | 228 | def test_backoff(self, chains, rpc, mock_sleep): 229 | """ 230 | - Validate filter-request backoff logic: 231 | - Filters with identical message intervals but differing in wait parameters should differ in 232 | the number of messages ultimately dispatched 233 | - We should automatically introduce random variance to prevent a large number of clients 234 | from simultaneous reconnects 235 | - Filters should never wait more than 30x their minimum wait time 236 | """ 237 | rate = 11 / 2 238 | filters = [MockFilter(rate=rate * i, backoff=True, end=300) for i in range(1, 6)] 239 | enriched = self.events(filters=filters, RPC=rpc(chains)) 240 | 241 | sleeps = [s for s in map(lambda x: x[0][0], mock_sleep.call_args_list)] 242 | rounded = set([round(s * 2) / 2 for s in sleeps]) 243 | # we should never have a (base) wait time more than 10x larger than the smallest (nonzero) wait time 244 | assert min(filter(lambda x: x > 0, rounded)) * 10 > max(rounded) 245 | # we should be adding a random factor to each `compute_wait` output 246 | assert len(sleeps) > len(rounded) 247 | # We should see each of the wait periods (rounded to the nearest 0.5) at least once 248 | assert len(rounded) - 2 * (int(FilterWrapper.MAX_WAIT) - int(FilterWrapper.MIN_WAIT)) <= 1 249 | 250 | # verify that despite the backoff, sources with a higher rate churn out more events 251 | by_src = enriched.by_source() 252 | for i in range(len(filters) - 1): 253 | f, s = map(lambda idx: filters[idx].filter_id, (i, i + 1)) 254 | assert len(by_src[f]) > len(by_src[s]) 255 | -------------------------------------------------------------------------------- /tests/test_offers.py: -------------------------------------------------------------------------------- 1 | from .utils import failed, heck 2 | 3 | # def test_get_balance_total_stake(client, mock_w3, token_address, balances): 4 | # assert sane( 5 | # response=client.get(f'/offers/'), 6 | # expected=heck({ 7 | # 'result': str(balances[token_address]), 8 | # 'status': 'OK' 9 | # }) 10 | # ) 11 | 12 | # assert failed(client.get(f'/balances/INVALID/staking/total')) 13 | -------------------------------------------------------------------------------- /tests/test_relay.py: -------------------------------------------------------------------------------- 1 | from urllib.parse import urlencode 2 | 3 | import pytest 4 | 5 | from .utils import heck 6 | 7 | 8 | @pytest.fixture 9 | def tx_success_response(token_address, base_nonce, TX_SIG_HASH): 10 | return heck({ 11 | 'result': { 12 | 'transactions': [{ 13 | 'chainId': heck.SUBSTITUTE, 14 | 'data': lambda s: s[2:].startswith(TX_SIG_HASH) and len(s) > len(TX_SIG_HASH) + 32, 15 | 'gas': heck.POSINT, 16 | 'gasPrice': 0, 17 | 'nonce': base_nonce, 18 | 'to': heck.ETHADDR, 19 | 'value': 0 20 | }] 21 | }, 22 | 'status': 'OK' 23 | }) 24 | 25 | 26 | @pytest.fixture 27 | def tx_query_string(token_address, base_nonce): 28 | return {'account': token_address, 'base_nonce': base_nonce} 29 | 30 | 31 | def test_deposit_funds_success(client, tx_success_response, tx_query_string): 32 | response = client.post('/relay/deposit', query_string=tx_query_string, json={'amount': '1'}) 33 | assert response.json == tx_success_response 34 | 35 | 36 | def test_withdrawal_funds_success(client, tx_success_response, tx_query_string): 37 | response = client.post('/relay/withdrawal', query_string=tx_query_string, json={'amount': '1'}) 38 | assert response.json == tx_success_response 39 | 40 | 41 | def test_fees_endpoint(client, chain_config, token_address, fees_schedule): 42 | resp = client.get( 43 | f'/relay/fees?' + urlencode({ 44 | 'chain': chain_config["chain_name"], 45 | 'account': token_address 46 | }) 47 | ).json 48 | assert resp == {'status': 'OK', 'result': {'fees': fees_schedule}} 49 | -------------------------------------------------------------------------------- /tests/test_status.py: -------------------------------------------------------------------------------- 1 | from .utils import heck 2 | 3 | 4 | def test_get_status(client): 5 | assert client.get('/status').json == heck({ 6 | 'result': { 7 | 'artifact_services': { 8 | 'ipfs': { 9 | 'reachable': True 10 | } 11 | }, 12 | 'community': 'gamma', 13 | 'home': { 14 | 'block': lambda x: x > 0, 15 | 'reachable': True, 16 | 'syncing': False 17 | }, 18 | 'side': { 19 | 'block': lambda x: x > 0, 20 | 'reachable': True, 21 | 'syncing': False 22 | } 23 | }, 24 | 'status': 'OK' 25 | }) 26 | -------------------------------------------------------------------------------- /tests/test_suite_internals.py: -------------------------------------------------------------------------------- 1 | import pytest # noqa 2 | 3 | from .utils import heck 4 | 5 | 6 | def pytest_generate_tests(metafunc): 7 | # This pytest hook that is called once for each test function It takes all of the dicts found in 8 | # `TestHeck`, selecting the key that shares the current test function's name & then 9 | # parameterizes by those as if they were arguments. 10 | name = metafunc.function.__name__ 11 | metafunc.parametrize(['obj'], [[funcargs] for funcargs in metafunc.cls.params[name]]) 12 | 13 | 14 | GOOD = 'good' 15 | BAD = 'bad' 16 | F1 = 'first' 17 | F2 = 'second' 18 | F3 = 'third' 19 | N1 = 'Alpha' 20 | N2 = 'Beta' 21 | N3 = 'Delta' 22 | N4 = 'wut' 23 | 24 | 25 | class TestHeck: 26 | params = { 27 | 'test_equal_simple': [ 28 | {GOOD: 1}, 29 | {GOOD: lambda x: x == 1}, 30 | {GOOD: lambda x: x}, 31 | {GOOD: lambda x: True}, 32 | {GOOD: lambda x: x} 33 | ], 34 | 'test_equal_deep': [ 35 | {F1: {F2: {F3: {N1: 1, N2: 'two', N3: [3, 4]}}}}, 36 | {F1: {F2: {F3: {N1: 1, N2: 'two', N3: [3, lambda x: x == 4]}}}}, 37 | {F1: {F2: {F3: {N1: 1, N2: lambda x: isinstance(x, str), N3: [3, 4]}}}}, 38 | {F1: {F2: {F3: {N1: 1, N2: 'two', N3: lambda x: len(x) > 0}}}}, 39 | {F1: {F2: {F3: {N1: lambda x: x == 1, N2: lambda x: isinstance(x, str), N3: [3, 4]}}}}, 40 | {F1: {F2: {F3: {N1: lambda x: x == 1, N2: lambda x: isinstance(x, str), N3: lambda x: x[0] == 3}}}}, 41 | {F1: {F2: {F3: lambda d: len(d.keys()) == 3}}} 42 | ], 43 | 'test_not_equal_simple': [ 44 | {BAD: lambda x: x > 1}, 45 | {BAD: lambda x: x == float('nan')}, 46 | {BAD: lambda x: x == -10123123}, 47 | {BAD: lambda x: not x}, 48 | {BAD: 2} 49 | ], 50 | 'test_not_equal_deep': [ 51 | {F1: {F2: {F3: {N1: 2, N2: 'two', N3: [3, 4]}}}}, 52 | {F1: {F2: {F3: {N1: 1, N2: 'two', N3: [4, 3]}}}}, 53 | {F1: {F2: {F3: {N1: 1, N2: 'two', N3: [4, lambda x: x != 3]}}}}, 54 | {F1: {F2: {F3: {N1: 1, N2: lambda x: isinstance(x, int), N3: [3, 4]}}}}, 55 | {F1: {F2: {F3: {N1: 1, N2: 'two', N3: lambda x: len(x) == 0}}}}, 56 | {F1: {F2: {F3: {N1: lambda x: x == 1, N2: lambda x: isinstance(x, str), N3: [4, 3]}}}}, 57 | {F1: {F2: {F3: {N1: lambda x: x == 2, N2: lambda x: isinstance(x, str), N3: [3, 4]}}}}, 58 | {F1: {F2: {F3: lambda d: len(d.keys()) == 0}}}, 59 | {F1: {F2: {F3: 1}}} 60 | ], 61 | } 62 | 63 | def test_equal_simple(self, obj): 64 | assert {GOOD: 1} == heck(obj) 65 | 66 | def test_equal_deep(self, obj): 67 | assert {F1: {F2: {F3: {N1: 1, N2: 'two', N3: [3, 4]}}}} == heck(obj) 68 | 69 | def test_not_equal_simple(self, obj): 70 | assert {BAD: 1} != heck(obj) 71 | 72 | def test_not_equal_deep(self, obj): 73 | assert {F1: {F2: {F3: {N1: 1, N2: 'two', N3: [3, 4]}}}} != heck(obj) 74 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import ujson 3 | 4 | import polyswarmd.utils 5 | 6 | 7 | def test_to_padded_hex(): 8 | assert polyswarmd.utils.to_padded_hex("0xabcd").endswith("abcd") 9 | assert polyswarmd.utils.to_padded_hex(15).endswith("f") 10 | assert polyswarmd.utils.to_padded_hex("AAAA").endswith("41414141") 11 | assert polyswarmd.utils.to_padded_hex(b"AAAA").endswith("41414141") 12 | 13 | 14 | def test_bool_list_to_int(): 15 | bool_list = polyswarmd.utils.bool_list_to_int([True, True, False, True]) 16 | expected = 11 17 | assert bool_list == expected 18 | 19 | 20 | def test_int_to_bool_list(): 21 | bool_list = polyswarmd.utils.int_to_bool_list(11) 22 | expected = [True, True, False, True] 23 | assert bool_list == expected 24 | 25 | 26 | def test_safe_int_to_bool_list(): 27 | bool_list = polyswarmd.utils.safe_int_to_bool_list(0, 5) 28 | expected = [False, False, False, False, False] 29 | assert bool_list == expected 30 | 31 | 32 | def test_safe_int_to_bool_list_leading_zeros(): 33 | bool_list = polyswarmd.utils.safe_int_to_bool_list(1, 5) 34 | expected = [True, False, False, False, False] 35 | assert bool_list == expected 36 | 37 | 38 | @pytest.mark.skip(reason='waiting on dump of input inside getOfferState() run') 39 | def test_state_to_dict(client, token_address, app, ZERO_ADDRESS): 40 | with app.app_context(): 41 | token = app.config['POLYSWARMD'].chains['home'].nectar_token.address 42 | w3 = app.config['POLYSWARMD'].chains['home'].w3 43 | mock_state_dict = { 44 | 'guid': '3432', 45 | 'close_flag': 1, 46 | 'nonce': 10, 47 | 'offer_amount': 100, 48 | 'expert': token_address, 49 | 'expert_balance': 1234, 50 | 'ambassador': token_address, 51 | 'ambassador_balance': 1234, 52 | 'msig_address': ZERO_ADDRESS, 53 | 'artifact_hash': 'null', 54 | 'mask': [True], 55 | 'verdicts': [True], 56 | 'meta_data': 'test' 57 | } 58 | rv = client.post( 59 | f'/offers/state?account={token_address}', 60 | content_type='application/json', 61 | data=ujson.dumps(mock_state_dict) 62 | ) 63 | state = rv.json['result']['state'] 64 | expected = { 65 | 'nonce': 10, 66 | 'offer_amount': 100, 67 | 'msig_address': ZERO_ADDRESS, 68 | 'ambassador_balance': 1234, 69 | 'expert_balance': 1234, 70 | 'ambassador': token_address, 71 | 'expert': token_address, 72 | 'is_closed': 1, 73 | 'token': w3.toChecksumAddress(token), 74 | 'mask': [True], 75 | 'verdicts': [True] 76 | } 77 | 78 | assert polyswarmd.utils.state_to_dict(state) == expected 79 | -------------------------------------------------------------------------------- /tests/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file contains utilities *FOR TESTING*, it should *NOT* contain tests of polyswarmd utilities 3 | """ 4 | from collections import UserDict 5 | from collections.abc import Collection, Mapping 6 | from curses.ascii import CAN as CANCEL 7 | from curses.ascii import SUB as SUBSTITUTE 8 | import json 9 | import string 10 | 11 | 12 | class heck(UserDict): 13 | """MappingProxy which allows functions as value to overide inner equality checks""" 14 | SUBSTITUTE = bytes([SUBSTITUTE]) 15 | FAILED = bytes([CANCEL]) 16 | 17 | def __init__(self, data): 18 | if not isinstance(data, Mapping): 19 | raise ValueError("Invalid type: %s" % type(data)) 20 | super().__init__(data.copy()) 21 | 22 | def fixup(self, actual, expected): 23 | """Checks if `expected` is callable & `expected(actual)` is truthy, returning `actual` or `expected`""" 24 | if isinstance(expected, Collection) and isinstance(actual, Collection): 25 | if isinstance(expected, Mapping): 26 | return {k: self.fixup(actual[k], expected[k]) for k in expected} 27 | elif isinstance(expected, list): 28 | return [self.fixup(actual[i], expected[i]) for i, _ in enumerate(expected)] 29 | elif callable(expected): 30 | return actual if expected(actual) else self.FAILED 31 | elif expected == self.SUBSTITUTE: 32 | return actual 33 | return expected 34 | 35 | def __eq__(self, actual): 36 | """Checks if ACTUAL is identical to EXPECTED, all funcs in actual are evaluated with ACTUAL 'cousin'""" 37 | return actual == self.fixup(actual, expected=self.data) 38 | 39 | # ----------------------------------- 40 | 41 | @staticmethod 42 | def ETHADDR(addr: str) -> bool: 43 | addr = (addr[2:] if addr.startswith('0x') else addr).lower() 44 | return all(ch in string.hexdigits for ch in addr) 45 | 46 | @staticmethod 47 | def POSINT(num: int) -> bool: 48 | try: 49 | return num > 0 50 | except Exception: 51 | return False 52 | 53 | @staticmethod 54 | def UINT(num: int) -> bool: 55 | try: 56 | return num >= 0 57 | except Exception: 58 | return False 59 | 60 | @staticmethod 61 | def ARRAY(x) -> bool: 62 | return isinstance(x, list) 63 | 64 | @staticmethod 65 | def NONEMPTYSTR(x: str) -> bool: 66 | return isinstance(x, str) and len(x) > 0 67 | 68 | 69 | def failed(response): 70 | return (response.status_code >= 400) or response.json.get('STATUS') == 'FAIL' 71 | 72 | 73 | def read_chain_cfg(chain_name): 74 | cfgpath = f'tests/fixtures/config/chain/{chain_name}chain.json' 75 | return {'chain_name': chain_name, **json.load(open(cfgpath))} 76 | --------------------------------------------------------------------------------