├── .dockerignore ├── .env.template ├── .github └── workflows │ ├── ci.yml │ └── package.yml ├── .gitignore ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── config ├── sys.config └── vm.args ├── priv ├── accounts.sql ├── blocks.sql ├── challenges.sql ├── cities.sql ├── dc_burns.sql ├── elections.sql ├── hotspots.sql ├── locations.sql ├── oracles.sql ├── ouis.sql ├── rewards.sql ├── snapshots.sql ├── state_channels.sql ├── stats.sql ├── txns.sql ├── validators.sql └── vars.sql ├── rebar.config ├── rebar.lock ├── rebar3 ├── src ├── bh_burn_type.erl ├── bh_cache.erl ├── bh_db_worker.erl ├── bh_db_worker.hrl ├── bh_gateway_mode.erl ├── bh_middleware_cors.erl ├── bh_middleware_cursor.erl ├── bh_middleware_throttle.erl ├── bh_pending_transaction_nonce_type.erl ├── bh_pending_transaction_status.erl ├── bh_pool_watcher.erl ├── bh_route_accounts.erl ├── bh_route_assert_locations.erl ├── bh_route_blocks.erl ├── bh_route_challenges.erl ├── bh_route_cities.erl ├── bh_route_dc_burns.erl ├── bh_route_elections.erl ├── bh_route_handler.erl ├── bh_route_handler.hrl ├── bh_route_hotspots.erl ├── bh_route_locations.erl ├── bh_route_oracle.erl ├── bh_route_ouis.erl ├── bh_route_pending_txns.erl ├── bh_route_rewards.erl ├── bh_route_snapshots.erl ├── bh_route_state_channels.erl ├── bh_route_stats.erl ├── bh_route_txns.erl ├── bh_route_validators.erl ├── bh_route_vars.erl ├── bh_route_versions.erl ├── bh_routes.erl ├── bh_sup.erl ├── bh_transaction_type.erl ├── bh_validator_status.erl ├── blockchain_http.app.src ├── blockchain_http_app.erl └── epgsql_cmd_eequery.erl └── test ├── bh_route_accounts_SUITE.erl ├── bh_route_assert_locations_SUITE.erl ├── bh_route_blocks_SUITE.erl ├── bh_route_challenges_SUITE.erl ├── bh_route_cities_SUITE.erl ├── bh_route_dc_burns_SUITE.erl ├── bh_route_elections_SUITE.erl ├── bh_route_hotspots_SUITE.erl ├── bh_route_locations_SUITE.erl ├── bh_route_oracle_SUITE.erl ├── bh_route_oui_SUITE.erl ├── bh_route_rewards_SUITE.erl ├── bh_route_snapshots_SUITE.erl ├── bh_route_state_channels_SUITE.erl ├── bh_route_stats_SUITE.erl ├── bh_route_txns_SUITE.erl ├── bh_route_validators_SUITE.erl ├── bh_route_vars_SUITE.erl ├── bh_route_versions_SUITE.erl ├── ct_utils.erl └── ct_utils.hrl /.dockerignore: -------------------------------------------------------------------------------- 1 | _build 2 | README.md 3 | LICENSE 4 | CONTRIBUTING.md 5 | Dockerfile 6 | Makefile 7 | -------------------------------------------------------------------------------- /.env.template: -------------------------------------------------------------------------------- 1 | DATABASE_RO_URL=postgresql://user:password@hostname/database 2 | DATABASE_RW_URL=postgresql://user:password@hostname/database 3 | DATABASE_RO_POOL_SIZE=10 -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [master] 6 | pull_request: 7 | branches: [master] 8 | 9 | jobs: 10 | CI: 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v2 16 | 17 | - name: Setup erlang 18 | uses: erlef/setup-beam@v1 19 | with: 20 | otp-version: 24 21 | 22 | - name: Setup rust 23 | uses: actions-rs/toolchain@v1 24 | with: 25 | toolchain: stable 26 | default: true 27 | 28 | - name: Cancel previous runs 29 | uses: styfle/cancel-workflow-action@0.5.0 30 | with: 31 | access_token: ${{ github.token }} 32 | 33 | - name: Install build deps 34 | run: sudo apt install libsodium-dev 35 | 36 | - name: Cache Hex Packages 37 | if: ${{ !env.ACT }} 38 | uses: actions/cache@v1 39 | with: 40 | path: ~/.cache/rebar3/hex/hexpm/packages 41 | key: ${{ runner.os }}-hex-${{ hashFiles(format('{0}{1}', github.workspace, '/rebar.lock')) }} 42 | restore-keys: | 43 | ${{ runner.os }}-hex- 44 | 45 | - name: Cache Dialyzer PLTs 46 | if: ${{ !env.ACT }} 47 | uses: actions/cache@v1 48 | with: 49 | path: ~/.cache/rebar3/rebar3_*_plt 50 | key: ${{ runner.os }}-dialyzer-${{ hashFiles(format('{0}{1}', github.workspace, '/rebar.config')) }} 51 | restore-keys: | 52 | ${{ runner.os }}-dialyzer- 53 | 54 | - name: Build 55 | run: rebar3 compile 56 | 57 | - name: Run tests 58 | env: 59 | DATABASE_RO_URL: ${{ secrets.DATABASE_RO_URL }} 60 | DATABASE_RO_POOL_SIZE: 5 61 | run: rebar3 as test do eunit,ct,cover 62 | 63 | - name: Run Dialyzer 64 | run: rebar3 do dialyzer, xref 65 | 66 | - name: Generate coverage report 67 | run: rebar3 covertool generate 68 | 69 | - name: Upload coverage report 70 | if: ${{ !env.ACT }} 71 | uses: codecov/codecov-action@v1 72 | with: 73 | file: _build/test/covertool/blockchain_http.covertool.xml 74 | fail_ci_if_error: true 75 | -------------------------------------------------------------------------------- /.github/workflows/package.yml: -------------------------------------------------------------------------------- 1 | name: Package 2 | 3 | on: 4 | push: 5 | tags: "*" 6 | 7 | jobs: 8 | package: 9 | runs-on: ubuntu-latest 10 | 11 | steps: 12 | - name: Checkout 13 | uses: actions/checkout@v2 14 | with: 15 | # fetch all branch and tag history 16 | fetch-depth: 0 17 | 18 | - name: Set up Docker Buildx 19 | uses: docker/setup-buildx-action@v1 20 | 21 | - name: Name Release 22 | id: docker-tag 23 | uses: yuya-takeyama/docker-tag-from-github-ref-action@v1 24 | 25 | - name: Cache Docker Layers 26 | uses: actions/cache@v2 27 | with: 28 | path: /tmp/.buildx-cache 29 | key: ${{ runner.os }}-buildx-${{ github.sha }} 30 | restore-keys: | 31 | ${{ runner.os }}-buildx- 32 | 33 | - name: Login to Quay 34 | uses: docker/login-action@v1 35 | with: 36 | registry: quay.io 37 | username: ${{ secrets.QUAY_USERNAME }} 38 | password: ${{ secrets.QUAY_SECRET }} 39 | 40 | - name: Build and push 41 | if: ${{ startsWith(github.ref, 'refs/tags') }} 42 | id: docker_build 43 | uses: docker/build-push-action@v2 44 | with: 45 | # do not checkout a fresh copy of repo 46 | context: . 47 | push: true 48 | target: runner 49 | cache-from: type=local,src=/tmp/.buildx-cache 50 | cache-to: type=local,dest=/tmp/.buildx-cache 51 | tags: | 52 | quay.io/team-helium/blockchain-http:${{ steps.docker-tag.outputs.tag }} 53 | 54 | - name: Image digest 55 | if: ${{ startsWith(github.ref, 'refs/tags') }} 56 | run: echo ${{ steps.docker_build.outputs.digest }} 57 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .rebar3 2 | _* 3 | .eunit 4 | *.o 5 | *.beam 6 | *.plt 7 | *.swp 8 | *.swo 9 | .erlang.cookie 10 | ebin 11 | log 12 | erl_crash.dump 13 | .rebar 14 | logs 15 | _build 16 | .idea 17 | *.iml 18 | rebar3.crashdump 19 | .DS_Store 20 | data/ 21 | .env* 22 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to Contribute to this repository # 2 | 3 | We value contributions from the community and will do everything we 4 | can go get them reviewed in a timely fashion. If you have code to send 5 | our way or a bug to report: 6 | 7 | * **Contributing Code**: If you have new code or a bug fix, fork this 8 | repo, create a logically-named branch, and [submit a PR against this 9 | repo](https://github.com/helium/libp2p_crypto/issues). Include a 10 | write up of the PR with details on what it does. 11 | 12 | * **Reporting Bugs**: Open an issue [against this 13 | repo](https://github.com/helium/libp2p_crypto/issues) with as much 14 | detail as you can. At the very least you'll include steps to 15 | reproduce the problem. 16 | 17 | This project is intended to be a safe, welcoming space for 18 | collaboration, and contributors are expected to adhere to the 19 | [Contributor Covenant Code of 20 | Conduct](http://contributor-covenant.org/). 21 | 22 | Above all, thank you for taking the time to be a part of the Helium community. 23 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM erlang:24-alpine as builder 2 | 3 | WORKDIR /app 4 | ENV REBAR_BASE_DIR /app/_build 5 | 6 | RUN apk add --no-cache --update \ 7 | git tar build-base linux-headers autoconf automake libtool pkgconfig \ 8 | dbus-dev bzip2 bison flex gmp-dev cmake lz4 libsodium-dev openssl-dev \ 9 | sed curl cargo 10 | 11 | # Install Rust toolchain 12 | RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y 13 | 14 | # build and cache dependencies as their own layer 15 | COPY rebar3 rebar.config rebar.lock ./ 16 | RUN ./rebar3 compile 17 | 18 | COPY . . 19 | RUN ./rebar3 compile 20 | 21 | RUN mkdir -p /opt/rel && \ 22 | ./rebar3 as prod tar && \ 23 | tar -zxvf $REBAR_BASE_DIR/prod/rel/*/*.tar.gz -C /opt/rel 24 | 25 | FROM alpine:3.16 as runner 26 | 27 | RUN apk add --update openssl libsodium ncurses libstdc++ 28 | 29 | ENV COOKIE=blockchain_http \ 30 | RELX_OUT_FILE_PATH=/tmp 31 | 32 | WORKDIR /opt/blockchain_http 33 | EXPOSE 8080 34 | 35 | COPY --from=builder /opt/rel . 36 | 37 | ENTRYPOINT ["/opt/blockchain_http/bin/blockchain_http"] 38 | CMD ["foreground"] 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | Copyright 2018, Helium Systems Inc. 179 | 180 | Licensed under the Apache License, Version 2.0 (the "License"); 181 | you may not use this file except in compliance with the License. 182 | You may obtain a copy of the License at 183 | 184 | http://www.apache.org/licenses/LICENSE-2.0 185 | 186 | Unless required by applicable law or agreed to in writing, software 187 | distributed under the License is distributed on an "AS IS" BASIS, 188 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 189 | See the License for the specific language governing permissions and 190 | limitations under the License. 191 | 192 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: compile rel cover test typecheck doc ci start stop reset 2 | 3 | REBAR=./rebar3 4 | SHORTSHA=`git rev-parse --short HEAD` 5 | PKG_NAME_VER=${SHORTSHA} 6 | 7 | OS_NAME=$(shell uname -s) 8 | 9 | ifeq (${OS_NAME},FreeBSD) 10 | make="gmake" 11 | else 12 | MAKE="make" 13 | endif 14 | 15 | compile: 16 | $(REBAR) compile 17 | 18 | shell: 19 | $(REBAR) shell 20 | 21 | clean: 22 | $(REBAR) clean 23 | 24 | cover: 25 | $(REBAR) cover 26 | 27 | test: 28 | $(REBAR) as test do eunit,ct 29 | 30 | ci: 31 | $(REBAR) as test do eunit,ct,cover && $(REBAR) do xref, dialyzer 32 | $(REBAR) covertool generate 33 | codecov --required -f _build/test/covertool/blockchain_http.covertool.xml 34 | 35 | typecheck: 36 | $(REBAR) dialyzer 37 | 38 | doc: 39 | $(REBAR) edoc 40 | 41 | release: 42 | $(REBAR) as prod do release 43 | 44 | 45 | start: 46 | cp -f .env ./_build/prod/rel/blockchain_http/ 47 | ./_build/prod/rel/blockchain_http/bin/blockchain_http foreground 48 | 49 | stop: 50 | -./_build/prod/rel/blockchain_http/bin/blockchain_http stop 51 | 52 | reset: stop 53 | rm -rf ./_build/prod/rel/blockchain_http/log/* 54 | 55 | console: 56 | ./_build/prod/rel/blockchain_http/bin/blockchain_http remote_console 57 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # blockchain-http 2 | 3 | [![CI](https://github.com/helium/blockchain-http/actions/workflows/ci.yml/badge.svg)](https://github.com/helium/blockchain-http/actions/workflows/ci.yml) 4 | [![codecov](https://codecov.io/gh/helium/blockchain-http/branch/master/graph/badge.svg)](https://codecov.io/gh/helium/blockchain-http) 5 | 6 | This is an Erlang application to serve up the Helium blockchain as 7 | stored by the 8 | [blockchain-etl](https://github.com/helium/blockchain-etl) service and 9 | schema. The two applications rely on the schema being compatible to 10 | work 11 | 12 | 13 | ## Developer Usage 14 | 15 | * Clone this repository 16 | * Create `.env` file by copying `.env.template` and editing it to 17 | reflect your postgres read-only and read-write access URLs 18 | 19 | * Run `make release` in the top level folder 20 | 21 | * Run `make start` to start the application. Logs will be at 22 | `_build/default/rel/blockchain_http/log/*`. 23 | 24 | Once started the application will start serving up the blockchain 25 | through a number of routes. Documentation for these routes will be 26 | added soon. 27 | 28 | ### Installing Ubuntu Required Packages 29 | 30 | If running on Ubuntu, you will need the following packages installed before 31 | running `make release`: 32 | 33 | ```bash 34 | wget https://packages.erlang-solutions.com/erlang-solutions_2.0_all.deb 35 | sudo dpkg -i erlang-solutions_2.0_all.deb 36 | sudo apt-get update 37 | sudo apt install esl-erlang=1:23.2.3-1 cmake libsodium-dev libssl-dev 38 | sudo apt install build-essential 39 | ``` 40 | 41 | 42 | ## WARNING 43 | 44 | This application does NOT serve up over TLS, and does NOT rate 45 | control, or access control clients. Please run this service behind a 46 | load balancer that terminates SSL and does some rate and access 47 | control. 48 | 49 | ## Using Docker 50 | 51 | ### Building the Docker Image 52 | 53 | `docker build -t helium/api .` 54 | 55 | ### Running the Docker Container 56 | 57 | ``` 58 | docker run -d --init \ 59 | --restart unless-stopped \ 60 | --publish 8080:8080/tcp \ 61 | --name api \ 62 | --mount type=bind,source=$HOME/api_data,target=/var/data \ 63 | -e DATABASE_RO_URL=postgresql://user:pass@127.0.0.1:5432/helium_blockchain \ 64 | -e DATABASE_RW_URL=postgresql://user:pass@127.0.0.1:5432/helium_blockchain \ 65 | -e DATABASE_RO_POOL_SIZE=10 \ 66 | helium/api 67 | ``` 68 | ### Updating Docker 69 | 70 | #### Navigate to your copy of the `blockchain-http` repository. 71 | 72 | `cd /path/to/blockchain-http` 73 | 74 | #### Stop the Docker container. 75 | 76 | `docker stop api` 77 | 78 | #### Remove the existing Docker container. 79 | 80 | `docker rm api` 81 | 82 | #### Update the repository. 83 | 84 | `git pull` 85 | 86 | #### Rebuild the Docker image. 87 | 88 | `docker build -t helium/api .` 89 | 90 | #### Run the updated Docker container. 91 | 92 | ``` 93 | docker run -d --init \ 94 | --restart unless-stopped \ 95 | --publish 8080:8080/tcp \ 96 | --name api \ 97 | --mount type=bind,source=$HOME/api_data,target=/var/data \ 98 | -e DATABASE_RO_URL=postgresql://user:pass@127.0.0.1:5432/helium_blockchain \ 99 | -e DATABASE_RW_URL=postgresql://user:pass@127.0.0.1:5432/helium_blockchain \ 100 | -e DATABASE_RO_POOL_SIZE=10 \ 101 | helium/api 102 | ``` 103 | -------------------------------------------------------------------------------- /config/sys.config: -------------------------------------------------------------------------------- 1 | %% -*- erlang -*- 2 | [ 3 | {blockchain_http, [ 4 | {port, 8080}, 5 | {throttle, #{ 6 | %% how much request time is allowed 7 | request_time => 20000, 8 | %% how many requests are allowed 9 | request_count => 100, 10 | %% over how long 11 | request_interval => 60000, 12 | %% requests shorter than this do not count towards quota 13 | grace_time => 0, 14 | %% log requests that take longer than this 15 | log_longer_than => 10000, 16 | %% put remaining request time in a http response header 17 | debug => false, 18 | %% actor header to key requests throttle by 19 | actor_header => <<"X-Forwarded-For">> 20 | }}, 21 | {db_ro_pool, [ 22 | {size, 100}, 23 | %% ets, named, or atomics 24 | {watcher_type, atomics}, 25 | %% hash or round_robin 26 | {dispatch_mechanism, round_robin} 27 | ]}, 28 | {db_rw_pool, [ 29 | {size, 2}, 30 | %% ets, named, or atomics 31 | {watcher_type, atomics}, 32 | %% hash or round_robin 33 | {dispatch_mechanism, round_robin} 34 | ]}, 35 | {db_ro_handlers, [ 36 | bh_route_blocks, 37 | bh_route_accounts, 38 | bh_route_hotspots, 39 | bh_route_txns, 40 | bh_route_elections, 41 | bh_route_challenges, 42 | bh_route_pending_txns, 43 | bh_route_stats, 44 | bh_route_oracle, 45 | bh_route_vars, 46 | bh_route_snapshots, 47 | bh_route_cities, 48 | bh_route_rewards, 49 | bh_route_ouis, 50 | bh_route_locations, 51 | bh_route_dc_burns, 52 | bh_route_state_channels, 53 | bh_route_validators, 54 | bh_route_versions 55 | ]}, 56 | {db_rw_handlers, [ 57 | bh_route_pending_txns 58 | ]} 59 | ]}, 60 | {lager, [ 61 | {suppress_supervisor_start_stop, true}, 62 | {killer_hwm, 1000}, 63 | {async_threshold, undefined}, 64 | {killer_reinstall_after, 5000}, 65 | {flush_queue, true}, 66 | {flush_threshold, 500}, 67 | {handlers, [ 68 | {lager_console_backend, [{level, info}]} 69 | ]} 70 | ]} 71 | ]. 72 | -------------------------------------------------------------------------------- /config/vm.args: -------------------------------------------------------------------------------- 1 | -name {{release_name}}@127.0.0.1 2 | -setcookie {{release_name}} 3 | 4 | +c true 5 | +C multi_time_warp 6 | +K true 7 | +A30 8 | -------------------------------------------------------------------------------- /priv/accounts.sql: -------------------------------------------------------------------------------- 1 | -- :account_list_base 2 | 3 | :with 4 | select 5 | :height, 6 | l.address, 7 | l.dc_balance, 8 | l.dc_nonce, 9 | l.security_balance, 10 | l.security_nonce, 11 | l.balance, 12 | coalesce(l.staked_balance, 0), 13 | coalesce(l.mobile_balance, 0), 14 | coalesce(l.iot_balance, 0), 15 | l.nonce 16 | :extend 17 | from :source 18 | :scope 19 | :order 20 | :limit 21 | 22 | -- :account_list_height 23 | (select max(height) from blocks) as height 24 | 25 | -- :account_inventory_source 26 | account_inventory l 27 | 28 | -- :account_scope 29 | where l.address = $1 30 | 31 | -- :account_at_block_with 32 | with account_data as ( 33 | select * from accounts 34 | where address = $1 and block <= $2 35 | ) 36 | 37 | -- :account_at_block_source 38 | account_data l 39 | 40 | -- :account_at_block_extend 41 | , (select first_block from account_inventory where address = $1) as first_block 42 | 43 | -- :account_list_extend 44 | , l.first_block 45 | 46 | -- :account_list_order 47 | order by l.first_block desc, l.address 48 | 49 | -- :account_list_before_scope 50 | where (l.address > $1 and l.first_block = $2) or (l.first_block < $2) 51 | 52 | -- :account_speculative_extend 53 | , l.first_block 54 | , (select count(*) from gateway_inventory where owner = l.address) as hotspot_count 55 | , (select count(*) from validator_inventory where owner = l.address) as validator_count 56 | , (select greatest(l.nonce, coalesce(max(p.nonce), l.nonce)) 57 | from pending_transactions p 58 | where p.address = l.address and nonce_type='balance' and status != 'failed' 59 | ) as speculative_nonce 60 | , (select greatest(l.security_nonce, coalesce(max(p.nonce)), l.security_nonce) 61 | from pending_transactions p 62 | where p.address = l.address and nonce_type='security' and status != 'failed' 63 | ) as speculative_sec_nonce 64 | 65 | 66 | -- :account_balance_series 67 | with ts as ( 68 | select generate_series( 69 | date_trunc($4::text, $2::timestamptz + $5::interval) - $3::interval, 70 | date_trunc($4::text, $2::timestamptz + $5::interval), 71 | $5::interval) as timestamp 72 | order by timestamp desc 73 | ), 74 | accounts_ts as ( 75 | select 76 | accounts.*, 77 | blocks.timestamp 78 | from accounts inner join blocks on blocks.height = accounts.block 79 | where address = $1 80 | ) 81 | select 82 | ts.timestamp, 83 | (select ARRAY[balance, mobile_balance, iot_balance] 84 | from accounts_ts 85 | where timestamp <= ts.timestamp 86 | order by timestamp desc limit 1) 87 | from ts 88 | -------------------------------------------------------------------------------- /priv/blocks.sql: -------------------------------------------------------------------------------- 1 | -- :block_span 2 | with max as ( 3 | select height from blocks where timestamp <= $1 order by timestamp desc limit 1 4 | ), 5 | min as ( 6 | select height from blocks where timestamp >= $2 order by timestamp limit 1 7 | ) 8 | select (select height from max) as max, (select height from min) as min 9 | 10 | 11 | -- Get block stats 12 | -- :block_times 13 | with month_interval as ( 14 | select to_timestamp(time) as timestamp, 15 | time - (lead(time) over (order by height desc)) as diff_time 16 | from blocks 17 | where to_timestamp(time) > (now() - '1 month'::interval) 18 | ), 19 | week_interval as ( 20 | select * from month_interval where timestamp > (now() - '1 week'::interval) 21 | ), 22 | day_interval as ( 23 | select * from week_interval where timestamp > (now() - '24 hour'::interval) 24 | ), 25 | hour_interval as ( 26 | select * from day_interval where timestamp > (now() - '1 hour'::interval) 27 | ) 28 | select 29 | (select avg(diff_time) from hour_interval)::float as last_hour_avg, 30 | (select avg(diff_time) from day_interval)::float as last_day_avg, 31 | (select avg(diff_time) from week_interval)::float as last_week_avg, 32 | (select avg(diff_time) from month_interval)::float as last_month_avg, 33 | (select stddev(diff_time) from hour_interval)::float as last_hour_stddev, 34 | (select stddev(diff_time) from day_interval)::float as last_day_stddev, 35 | (select stddev(diff_time) from week_interval)::float as last_week_stddev, 36 | (select stddev(diff_time) from month_interval)::float as last_month_stddev 37 | -------------------------------------------------------------------------------- /priv/challenges.sql: -------------------------------------------------------------------------------- 1 | -- Get currently active and last day challenge count 2 | -- :challenges_stats 3 | with block_poc_range as ( 4 | select greatest(0, max(height) - coalesce((select value::bigint from vars_inventory where name = 'poc_challenge_interval'), 30)) as min, 5 | max(height) 6 | from blocks 7 | ), 8 | block_last_day_range as ( 9 | select min(height), max(height) from blocks 10 | where timestamp between now() - '24 hour'::interval and now() 11 | ), 12 | last_day_challenges as ( 13 | select hash from transactions 14 | where block between (select min from block_last_day_range) and (select max from block_last_day_range) 15 | and type in ('poc_receipts_v1', 'poc_receipts_v2') 16 | ) 17 | select * from 18 | (select 0) as active, 19 | (select count(*) as last_day_challenges from last_day_challenges) as last_day 20 | 21 | -------------------------------------------------------------------------------- /priv/cities.sql: -------------------------------------------------------------------------------- 1 | -- :city_list_base 2 | with online_data as ( 3 | select 4 | last(l.short_city) as short_city, l.long_city, 5 | last(l.short_state) as short_state, l.long_state, 6 | last(l.short_country) as short_country, l.long_country, 7 | last(l.city_id) as city_id, 8 | count(*) as hotspot_count, 9 | s.online as online 10 | from 11 | locations l inner join gateway_inventory g on g.location = l.location 12 | left join gateway_status s on s.address = g.address 13 | :inner_scope 14 | group by (l.long_country, l.long_state, l.long_city, s.online) 15 | ), 16 | data as ( 17 | select 18 | d.*, 19 | coalesce(o.hotspot_count, 0) as online_count, 20 | (d.hotspot_count - coalesce(o.hotspot_count, 0)) as offline_count, 21 | :rank 22 | from 23 | (select 24 | last(short_city) as short_city, long_city, 25 | last(short_state) as short_state, long_state, 26 | last(short_country) as short_country, long_country, 27 | last(city_id) as city_id, 28 | coalesce(sum(hotspot_count), 0) as hotspot_count 29 | from online_data 30 | group by (long_country, long_state, long_city)) d 31 | left join online_data o on d.city_id = o.city_id and o.online = 'online' 32 | ) 33 | select 34 | d.short_city, d.long_city, 35 | d.short_state, d.long_state, 36 | d.short_country, d.long_country, 37 | d.city_id, 38 | d.hotspot_count::integer, 39 | d.online_count::integer, 40 | d.offline_count::integer, 41 | d.rank 42 | from data d 43 | :scope 44 | :order 45 | :limit 46 | 47 | -- :city_list_count_order 48 | order by rank desc, city_id 49 | 50 | -- :city_list_count_rank 51 | case $1 52 | when 'hotspot_count' then d.hotspot_count::bigint 53 | when 'online_count' then coalesce(o.hotspot_count, 0)::bigint 54 | when 'offline_count' then (d.hotspot_count - coalesce(o.hotspot_count, 0))::bigint 55 | end as rank 56 | 57 | -- :city_list_count_before_scope 58 | where rank <= $2 and city_id > $3 59 | 60 | -- :city_list_name_order 61 | order by rank, city_id 62 | 63 | -- :city_list_name_rank 64 | d.long_city as rank 65 | 66 | -- :city_list_name_before_scope 67 | where rank >= $1 and city_id > $2 68 | 69 | -- :city_search_order 70 | order by rank desc, city_id 71 | 72 | -- :city_search_rank 73 | word_similarity(d.long_city, $1) as rank 74 | 75 | -- :city_search_inner_scope 76 | where l.search_city %> lower($1) 77 | 78 | -- :city_by_id_inner_scope 79 | where l.city_id = $1 80 | -------------------------------------------------------------------------------- /priv/dc_burns.sql: -------------------------------------------------------------------------------- 1 | -- :burn_list_base 2 | select d.block, d.actor, d.type, d.amount, d.oracle_price 3 | from dc_burns d 4 | :scope 5 | order by d.block desc, d.actor 6 | :limit 7 | 8 | -- :burn_list_scope 9 | where d.type = ANY($1) 10 | 11 | -- :burn_list_before_scope 12 | where d.type = ANY($1) 13 | and (d.actor > $2 and d.block = $3) or (d.block < $3) 14 | 15 | -- :burn_stats 16 | select $2, t.type, sum(t.amount)::bigint from ( 17 | select sum(d.amount) as amount, max(d.time) as time, d.type, d.block 18 | from dc_burns d 19 | where d.block > $1 20 | group by d.block, d.type 21 | ) t group by t.type; 22 | 23 | 24 | -- :burn_sum 25 | select d.type, sum(d.amount)::bigint as amount 26 | from dc_burns d 27 | where d.time >= extract(epoch from $1::timestamptz) 28 | and d.time <= extract(epoch from $2::timestamptz) 29 | group by d.type; 30 | 31 | -- :burn_bucketed_sum 32 | with time_range as ( 33 | select 34 | extract(epoch from low)::bigint as low, 35 | extract(epoch from high)::bigint as high 36 | from ( 37 | select 38 | timestamp as low, 39 | lag(timestamp) over (order by timestamp desc) as high 40 | from generate_series($1::timestamptz, $2::timestamptz, $3::interval) as timestamp) t 41 | where high is not null 42 | ), 43 | burn_data as ( 44 | select sum(d.amount)::bigint as amount, d.time, d.type 45 | from dc_burns d 46 | where d.time >= (select min(low) from time_range) 47 | and d.time <= (select max(high) from time_range) 48 | group by d.time, d.type 49 | ) 50 | select 51 | t.low, 52 | d.type, 53 | sum(d.amount)::bigint 54 | from time_range t 55 | left join burn_data d 56 | on d.time >= low and d.time < high 57 | group by t.low, d.type 58 | order by t.low desc; 59 | -------------------------------------------------------------------------------- /priv/elections.sql: -------------------------------------------------------------------------------- 1 | -- Get election times 2 | -- :election_times 3 | with month_interval as ( 4 | select to_timestamp(time) as timestamp, 5 | time - (lead(time) over (order by block desc)) as diff_time 6 | from transactions 7 | where to_timestamp(time) > (now() - '1 month'::interval) 8 | and type = 'consensus_group_v1' 9 | ), 10 | week_interval as ( 11 | select * from month_interval where timestamp > (now() - '1 week'::interval) 12 | ), 13 | day_interval as ( 14 | select * from week_interval where timestamp > (now() - '24 hour'::interval) 15 | ), 16 | hour_interval as ( 17 | select * from day_interval where timestamp > (now() - '1 hour'::interval) 18 | ) 19 | select 20 | (select avg(diff_time) from hour_interval)::float as last_hour_avg, 21 | (select avg(diff_time) from day_interval)::float as last_day_avg, 22 | (select avg(diff_time) from week_interval)::float as last_week_avg, 23 | (select avg(diff_time) from month_interval)::float as last_month_avg, 24 | (select stddev(diff_time) from hour_interval)::float as last_hour_stddev, 25 | (select stddev(diff_time) from day_interval)::float as last_day_stddev, 26 | (select stddev(diff_time) from week_interval)::float as last_week_stddev, 27 | (select stddev(diff_time) from month_interval)::float as last_month_stddev 28 | -------------------------------------------------------------------------------- /priv/hotspots.sql: -------------------------------------------------------------------------------- 1 | -- :hotspot_list_base 2 | select 3 | (select max(height) from blocks) as height, 4 | g.last_block, 5 | g.first_block, 6 | g.first_timestamp, 7 | g.last_poc_challenge, 8 | g.address, 9 | g.mode, 10 | g.owner, 11 | g.payer, 12 | g.location, 13 | g.location_hex, 14 | g.nonce, 15 | g.name, 16 | g.reward_scale, 17 | g.elevation, 18 | g.gain, 19 | s.online as online_status, 20 | s.block as block_status, 21 | s.peer_timestamp as status_timestamp, 22 | s.listen_addrs as listen_addrs, 23 | l.short_street, l.long_street, 24 | l.short_city, l.long_city, 25 | l.short_state, l.long_state, 26 | l.short_country, l.long_country, 27 | l.city_id 28 | :source 29 | left join locations l on g.location = l.location 30 | left join gateway_status s on s.address = g.address 31 | :scope 32 | :order 33 | :limit 34 | 35 | -- :hotspot_list_order 36 | order by g.first_block desc, g.address 37 | 38 | -- :hotspot_list_source 39 | from gateway_inventory g 40 | 41 | -- :hotspot_source 42 | , (select greatest(g.nonce, coalesce(max(p.nonce), g.nonce)) 43 | from pending_transactions p 44 | where p.address = g.address and nonce_type = 'gateway' and status != 'failed' 45 | ) as speculative_nonce 46 | from gateway_inventory g 47 | 48 | -- :hotspot_list_scope 49 | where g.mode = ANY($1) 50 | 51 | -- :hotspot_list_before_scope 52 | where ((g.address > $1 and g.first_block = $2) or (g.first_block < $2)) 53 | and g.mode = ANY($3) 54 | 55 | -- :owner_hotspot_list_source 56 | from (select * from gateway_inventory where owner = $1) as g 57 | 58 | -- :owner_hotspot_list_scope 59 | where g.mode = ANY($2) 60 | 61 | -- :owner_hotspot_list_before_scope 62 | where ((g.address > $2 and g.first_block = $3) or (g.first_block < $3)) 63 | and g.mode = ANY($4) 64 | 65 | -- :city_hotspot_list_before_scope 66 | where l.city_id = $1 67 | and ((g.address > $2 and g.first_block = $3) or (g.first_block < $3)) 68 | and g.mode = ANY($4) 69 | 70 | -- :city_hotspot_list_scope 71 | where l.city_id = $1 72 | and g.mode = ANY($2) 73 | 74 | -- :hex_hotspot_list_scope 75 | where g.location_hex = $1 76 | 77 | -- :hex_hotspot_list_before_scope 78 | where g.location_hex = $1 79 | and ((g.address > $2 and g.first_block = $3) or (g.first_block < $3)) 80 | 81 | -- :hotspot_name_search_source 82 | from gateway_inventory g 83 | 84 | -- :hotspot_name_search_scope 85 | where g.name %> lower($1) 86 | 87 | -- :hotspot_name_search_order 88 | order by word_similarity(g.name, $1) desc, name 89 | 90 | -- :hotspot_location_box_search_order 91 | order by g.first_block desc, g.address 92 | 93 | -- :hotspot_location_box_search_scope 94 | where ST_Intersects(ST_MakeEnvelope($1, $2, $3, $4, 4326), l.geometry) 95 | 96 | -- :hotspot_location_box_search_before_scope 97 | where ST_Intersects(ST_MakeEnvelope($1, $2, $3, $4, 4326), l.geometry) 98 | and ((g.address > $5 and g.first_block = $6) or (g.first_block < $6)) 99 | 100 | -- :hotspot_location_distance_search_order 101 | order by ST_Distance(ST_SetSRID(ST_MakePoint($1, $2), 4326)::geography, l.geometry::geography), g.address 102 | 103 | -- :hotspot_location_distance_search_source 104 | , ST_Distance(ST_SetSRID(ST_MakePoint($1, $2), 4326)::geography, l.geometry::geography) as distance 105 | from gateway_inventory g 106 | 107 | -- :hotspot_location_distance_search_scope 108 | where ST_DWithin(ST_SetSRID(ST_MakePoint($1, $2), 4326)::geography, l.geometry::geography, $3) 109 | 110 | -- :hotspot_location_distance_search_before_scope 111 | where ST_DWithin(ST_SetSRID(ST_MakePoint($1, $2), 4326)::geography, l.geometry::geography, $3) 112 | and ((g.address > $4 and ST_Distance(ST_SetSRID(ST_MakePoint($1, $2), 4326)::geography, l.geometry::geography) = $5) 113 | or (ST_Distance(ST_SetSRID(ST_MakePoint($1, $2), 4326)::geography, l.geometry::geography) > $5)) 114 | 115 | -- :hotspot_witness_list 116 | with last_assert as ( 117 | select t.block as height from transactions t inner join transaction_actors a on t.hash = a.transaction_hash 118 | where (t.type = 'assert_location_v1' or t.type = 'assert_location_v2') 119 | and a.actor = $1 and a.actor_role = 'gateway' 120 | order by t.block desc limit 1 121 | ), 122 | min as ( 123 | select GREATEST((select height from last_assert), $2) as height 124 | ), 125 | recent_transactions as ( 126 | select transaction_hash 127 | from transaction_actors 128 | where actor = $1 129 | and actor_role = 'challengee' 130 | and block >= $2 131 | ), 132 | hotspot_witnesses as ( 133 | select actor as witness 134 | from transaction_actors 135 | where transaction_hash in (select transaction_hash from recent_transactions) 136 | and actor_role = 'witness' 137 | group by actor 138 | ) 139 | :hotspot_select 140 | 141 | -- :hotspot_witnessed_list 142 | with recent_transactions as ( 143 | select transaction_hash 144 | from transaction_actors 145 | where actor = $1 146 | and actor_role = 'witness' 147 | and block >= $2 148 | ), 149 | hotspot_witnessed as ( 150 | select distinct(actor) as witnessed 151 | from transaction_actors 152 | where transaction_hash in (select transaction_hash from recent_transactions) 153 | and actor_role = 'challengee' 154 | ) 155 | :hotspot_select 156 | 157 | -- :hotspot_witness_list_source 158 | from (select * from hotspot_witnesses w inner join gateway_inventory i on (w.witness = i.address)) g 159 | 160 | -- :hotspot_witnessed_list_source 161 | from (select * from hotspot_witnessed w inner join gateway_inventory i on (w.witnessed = i.address)) g 162 | 163 | -- :hotspot_elected_list 164 | with field_members as ( 165 | select fields->'members' as members 166 | from transactions 167 | where type = 'consensus_group_v1' :filter 168 | order by block desc 169 | limit 1 170 | ), 171 | members as ( 172 | select * 173 | from jsonb_array_elements_text((select members from field_members)) 174 | ) 175 | :hotspot_select 176 | 177 | -- :hotspot_elected_list_scope 178 | where g.address in (select * from members) 179 | 180 | -- :hotspot_bucketed_witnesses_source 181 | (select 182 | (select count(*) from jsonb_object_keys(jsonb_merge_agg(w.witnesses))), 183 | w.time 184 | from witness_data w 185 | group by w.time, w.address) 186 | 187 | -- :hotspot_bucketed_witnesses_base 188 | with time_range as ( 189 | select 190 | extract(epoch from low)::bigint as low, 191 | extract(epoch from high)::bigint as high 192 | from ( 193 | select 194 | timestamp as low, 195 | lag(timestamp) over (order by timestamp desc) as high 196 | from generate_series($2::timestamptz, $3::timestamptz, $4::interval) as timestamp) t 197 | where high is not null 198 | ), 199 | witness_data as ( 200 | select 201 | g.address, 202 | g.witnesses, 203 | g.time 204 | from gateways g 205 | :scope 206 | and g.time >= (select min(low) from time_range) and g.time <= (select max(high) from time_range) 207 | ) 208 | select 209 | to_timestamp(t.low) as timestamp, 210 | coalesce(min(d.count), 0) as min, 211 | coalesce(max(d.count), 0) as max, 212 | coalesce(percentile_cont(0.5) within group (order by d.count), 0)::float as median, 213 | coalesce(avg(d.count), 0)::float as avg, 214 | coalesce(stddev(d.count), 0)::float as stddev 215 | from time_range t 216 | left join :source d 217 | on d.time >= low and d.time < high 218 | group by t.low 219 | order by t.low desc; 220 | 221 | -- :hotspot_bucketed_challenges_source 222 | (select 223 | count(d.time) as count, 224 | d.time 225 | from challenge_data d 226 | group by d.time, d.address) 227 | 228 | -- :hotspot_bucketed_challenges_base 229 | with time_range as ( 230 | select 231 | extract(epoch from low)::bigint as low, 232 | extract(epoch from high)::bigint as high 233 | from ( 234 | select 235 | timestamp as low, 236 | lag(timestamp) over (order by timestamp desc) as high 237 | from generate_series($2::timestamptz, $3::timestamptz, $4::interval) as timestamp) t 238 | where high is not null 239 | ), 240 | challenge_data as ( 241 | select 242 | a.actor as address, 243 | b.time 244 | from transaction_actors a inner join blocks b on b.height = a.block 245 | :scope 246 | and b.time >= (select min(low) from time_range) and b.time <= (select max(high) from time_range) 247 | ) 248 | select 249 | to_timestamp(t.low) as timestamp, 250 | coalesce(min(d.count), 0)::bigint as min, 251 | coalesce(max(d.count), 0)::bigint as max, 252 | coalesce(sum(d.count), 0)::bigint as sum, 253 | coalesce(percentile_cont(0.5) within group (order by d.count), 0)::float as median, 254 | coalesce(avg(d.count), 0)::float as avg, 255 | coalesce(stddev(d.count), 0)::float as stddev 256 | from time_range t 257 | left join :source d 258 | on d.time >= low and d.time < high 259 | group by t.low 260 | order by t.low desc; 261 | -------------------------------------------------------------------------------- /priv/locations.sql: -------------------------------------------------------------------------------- 1 | -- :location_list_base 2 | select 3 | l.short_street, l.long_street, 4 | l.short_city, l.long_city, 5 | l.short_state, l.long_state, 6 | l.short_country, l.long_country, 7 | l.city_id, 8 | l.location 9 | from locations l 10 | :scope 11 | 12 | -------------------------------------------------------------------------------- /priv/oracles.sql: -------------------------------------------------------------------------------- 1 | -- :oracle_price_list_base 2 | select p.block, p.price, b.timestamp 3 | from oracle_prices p inner join blocks b on p.block = b.height 4 | :scope 5 | order by p.block desc limit :limit 6 | 7 | -- :oracle_price_predictions 8 | select time, price 9 | from oracle_price_predictions 10 | order by time DESC 11 | 12 | -- :oracle_price_stats 13 | with price_data as ( 14 | select 15 | p.price 16 | from oracle_prices p inner join blocks b on p.block = b.height 17 | where b.time >= extract(epoch from $1::timestamptz) 18 | and b.time <= extract(epoch from $2::timestamptz) 19 | ) 20 | select 21 | coalesce(min(d.price) / 100000000, 0)::float as min, 22 | coalesce(max(d.price) / 100000000, 0)::float as max, 23 | coalesce(percentile_cont(0.5) within group (order by d.price) / 100000000, 0)::float as median, 24 | coalesce(avg(d.price) / 100000000, 0)::float as avg, 25 | coalesce(stddev(d.price) / 100000000, 0)::float as stddev 26 | from price_data d -------------------------------------------------------------------------------- /priv/ouis.sql: -------------------------------------------------------------------------------- 1 | -- :oui_list_base 2 | select 3 | (select max(height) from blocks) as height, 4 | l.oui, 5 | l.owner, 6 | l.nonce, 7 | l.addresses, 8 | l.subnets, 9 | l.first_block 10 | from oui_inventory l 11 | :scope 12 | :order 13 | :limit 14 | 15 | -- :oui_list_order 16 | order by l.first_block desc, l.oui 17 | 18 | -- :oui_list_before_scope 19 | where (l.oui > $1 and l.first_block = $2) or (l.first_block < $2) 20 | 21 | -- :owner_oui_list_scope 22 | where l.owner = $1 23 | 24 | -- :owner_oui_list_before_scope 25 | where l.owner = $1 26 | and ((l.oui > $2 and l.first_block = $3) or (l.first_block < $3)) 27 | 28 | -- :oui_active 29 | select count(*) from oui_inventory; 30 | -------------------------------------------------------------------------------- /priv/rewards.sql: -------------------------------------------------------------------------------- 1 | -- :reward_fields 2 | r.block, r.transaction_hash, to_timestamp(r.time) as timestamp, r.account, r.gateway, r.amount, r.type 3 | 4 | -- Make sure that marker fields and fields are equivalent except for the marker 5 | -- placeholder! 6 | -- :reward_marker_fields 7 | r.block, r.transaction_hash, to_timestamp(r.time) as timestamp, r.account, r.gateway, r.amount, r.type, :marker 8 | 9 | -- :reward_list_base 10 | select :fields 11 | from rewards r 12 | :scope 13 | and r.block >= $2 and r.block < $3 14 | order by r.block desc, :marker 15 | 16 | -- :reward_list_rem_base 17 | select :fields 18 | from rewards r 19 | :scope 20 | and r.block = $2 and :marker> $3 21 | order by :marker 22 | 23 | -- :reward_block_list_base 24 | select :fields 25 | from rewards r 26 | :scope 27 | and r.block = $2 28 | order by r.gateway, r.type 29 | offset $3 fetch next $4 rows only 30 | 31 | -- :reward_sum_hotspot_source 32 | (select 33 | sum(r.amount) as amount 34 | from reward_data r 35 | group by r.gateway) 36 | 37 | -- :reward_sum_time_source 38 | (select 39 | sum(r.amount) as amount 40 | from reward_data r 41 | group by r.time) 42 | 43 | -- :reward_sum_validator_source 44 | (select 45 | sum(r.amount) as amount 46 | from reward_data r 47 | where r.gateway in (select address from validator_inventory) 48 | group by r.gateway) 49 | 50 | -- :reward_sum_base 51 | with reward_data as ( 52 | select 53 | r.amount, 54 | r.gateway, 55 | r.time 56 | from rewards r 57 | :scope 58 | and r.block >= $2 59 | and r.block <= $3 60 | ) 61 | select 62 | coalesce(min(d.amount) / 100000000, 0)::float as min, 63 | coalesce(max(d.amount) / 100000000, 0)::float as max, 64 | coalesce(sum(d.amount), 0)::bigint as sum, 65 | coalesce(sum(d.amount) / 100000000, 0)::float as total, 66 | coalesce(percentile_cont(0.5) within group (order by d.amount) / 100000000, 0)::float as median, 67 | coalesce(avg(d.amount) / 100000000, 0)::float as avg, 68 | coalesce(stddev(d.amount) / 100000000, 0)::float as stddev 69 | from :source d 70 | 71 | -- Bucket reward_data by timestamp and gateway to be calculate statistics over hotspot totals in a bucket 72 | -- rather than individual rewards. 73 | -- :reward_bucketed_hotspot_source 74 | (select 75 | sum(r.amount) as amount, 76 | r.time 77 | from reward_data r 78 | group by r.time, r.gateway) 79 | 80 | -- Bucket global reward_data by timestamp only 81 | -- :reward_bucketed_time_source 82 | (select 83 | sum(r.amount) as amount, 84 | r.time 85 | from reward_data r 86 | group by r.time) 87 | 88 | -- Bucket reward_data by timestamp and gateway to be calculate statistics over totals in a bucket 89 | -- :reward_bucketed_validator_source 90 | (select 91 | sum(r.amount) as amount, 92 | r.time 93 | from reward_data r 94 | where r.gateway in (select address from validator_inventory) 95 | group by r.time, r.gateway) 96 | 97 | -- :reward_bucketed_base 98 | with time_range as ( 99 | select 100 | extract(epoch from low)::bigint as low, 101 | extract(epoch from high)::bigint as high 102 | from ( 103 | select 104 | timestamp as low, 105 | lag(timestamp) over (order by timestamp desc) as high 106 | from generate_series($2::timestamptz, $3::timestamptz, $4::interval) as timestamp) t 107 | where high is not null 108 | ), 109 | reward_data as ( 110 | select 111 | r.amount, 112 | r.gateway, 113 | r.time 114 | from rewards r 115 | :scope 116 | and r.time >= (select min(low) from time_range) and r.time <= (select max(high) from time_range) 117 | ) 118 | select 119 | to_timestamp(t.low) as timestamp, 120 | coalesce(min(d.amount::float) / 100000000, 0) as min, 121 | coalesce(max(d.amount::float) / 100000000, 0) as max, 122 | coalesce(sum(d.amount), 0)::bigint as sum, 123 | coalesce(sum(d.amount::float) / 100000000, 0)::float as total, 124 | coalesce(percentile_cont(0.5) within group (order by d.amount) / 100000000, 0)::float as median, 125 | coalesce(avg(d.amount) / 100000000, 0)::float as avg, 126 | coalesce(stddev(d.amount) / 100000000, 0)::float as stddev 127 | from time_range t 128 | left join :source d 129 | on d.time >= low and d.time < high 130 | group by t.low 131 | order by t.low desc; 132 | 133 | -------------------------------------------------------------------------------- /priv/snapshots.sql: -------------------------------------------------------------------------------- 1 | -- :snapshot_list_base 2 | select 3 | b.height, b.snapshot_hash 4 | from blocks b 5 | where b.snapshot_hash is not null and b.snapshot_hash != '' 6 | :scope 7 | order by height desc 8 | :limit 9 | 10 | -- :snapshot_list_before_scope 11 | and b.height < $1 -------------------------------------------------------------------------------- /priv/state_channels.sql: -------------------------------------------------------------------------------- 1 | -- Get currently active and last day state_channel count 2 | -- :state_channels_stats 3 | with block_last_day_range as ( 4 | select min(height), max(height) from blocks 5 | where timestamp between now() - '24 hour'::interval and now() 6 | ), 7 | last_day_state_channels as ( 8 | select hash from transactions 9 | where block between (select min from block_last_day_range) and (select max from block_last_day_range) 10 | and type = 'state_channel_close_v1' 11 | ) 12 | select * from 13 | (select count(*) as last_day_state_channels from last_day_state_channels) as last_day 14 | 15 | -------------------------------------------------------------------------------- /priv/stats.sql: -------------------------------------------------------------------------------- 1 | -- Get all global count stats 2 | -- :stats_counts 3 | select name, value from stats_inventory 4 | 5 | -- Get token supply 6 | -- :stats_token_supply 7 | select 8 | coalesce((select (sum(stake) / 100000000)::float from validator_inventory), 0) 9 | + coalesce((select (sum(balance) / 100000000)::float from account_inventory), 0) 10 | 11 | -------------------------------------------------------------------------------- /priv/txns.sql: -------------------------------------------------------------------------------- 1 | -- :txn_list_base 2 | select 3 | t.block, 4 | t.time, 5 | t.hash, 6 | t.type, 7 | :fields 8 | :source 9 | :scope 10 | :order 11 | :limit 12 | 13 | -- :txn_get_scope 14 | where hash = $1 15 | 16 | -- :txn_list_scope 17 | where t.type = ANY($1) 18 | and block >= $2 and block < $3 19 | 20 | -- :txn_list_fields 21 | t.fields 22 | 23 | -- :txn_actor_list_fields 24 | txn_filter_actor_activity($2, t.type, t.fields) as fields 25 | 26 | -- :txn_list_order 27 | order by t.block desc, t.hash 28 | 29 | -- :txn_list_limit 30 | limit $4 31 | 32 | -- :txn_list_source 33 | from transactions t 34 | 35 | -- :txn_list_rem_source 36 | from ( 37 | select * from transactions tr 38 | where tr.type = ANY($1) 39 | and tr.block = $2 40 | order by tr.hash 41 | ) as t 42 | 43 | -- :txn_list_rem_scope 44 | where t.hash > $3 45 | 46 | -- :txn_actor_list_source 47 | from ( 48 | select a.block, last(tr.time) as time, a.transaction_hash as hash, last(tr.type) as type, a.actor, last(tr.fields) as fields 49 | from transaction_actors a inner join transactions tr on a.transaction_hash = tr.hash 50 | where a.block >= $3 and a.block < $4 51 | :actor_scope 52 | and tr.type = ANY($2) 53 | group by (a.transaction_hash, a.actor, a.block) 54 | order by a.block desc, a.transaction_hash 55 | limit $5 56 | ) as t 57 | 58 | -- :txn_actor_list_rem_source 59 | from ( 60 | select a.block, last(tr.time) as time, a.transaction_hash as hash, last(tr.type) as type, a.actor, last(tr.fields) as fields 61 | from transaction_actors a inner join transactions tr on a.transaction_hash = tr.hash 62 | where a.block = $3 63 | :actor_scope 64 | and tr.type = ANY($2) 65 | and tr.hash > $4 66 | group by (a.transaction_hash, a.actor, a.block) 67 | order by a.transaction_hash 68 | limit $5 69 | ) as t 70 | 71 | -- :txn_actor_role_list_source 72 | from ( 73 | select a.block, tr.time, a.transaction_hash as hash, tr.type, a.actor, a.actor_role as role 74 | from transaction_actors a inner join transactions tr on a.transaction_hash = tr.hash 75 | where a.block >= $3 and a.block < $4 76 | :actor_scope 77 | and tr.type = ANY($2) 78 | order by a.block desc, a.transaction_hash 79 | limit $5 80 | ) as t 81 | 82 | -- :txn_actor_role_list_rem_source 83 | from ( 84 | select a.block, tr.time, a.transaction_hash as hash, tr.type, a.actor, a.actor_role as role 85 | from transaction_actors a inner join transactions tr on a.transaction_hash = tr.hash 86 | where a.block = $3 87 | :actor_scope 88 | and tr.type = ANY($2) 89 | and tr.hash > $4 90 | order by a.transaction_hash 91 | limit $5 92 | ) as t 93 | 94 | 95 | -- :txn_activity_list_fields 96 | txn_filter_actor_activity(t.actor, t.type, t.fields) as fields 97 | 98 | -- :txn_role_list_fields 99 | t.role as fields 100 | 101 | -- :txn_actor_scope 102 | and a.actor = $1 103 | 104 | -- :txn_hotspot_activity_actor_scope 105 | and a.actor = $1 106 | and a.actor_role not in ('payer', 'payee', 'owner') 107 | 108 | -- :txn_owned_hotspot_actor_scope 109 | and a.actor in (select address from gateway_inventory where owner = $1) 110 | 111 | -- :txn_account_activity_actor_scope 112 | and a.actor = $1 113 | and a.actor_role in ('payer', 'payee', 'owner') 114 | 115 | -- :txn_validator_activity_actor_scope 116 | and a.actor = $1 117 | and a.actor_role = 'validator' 118 | 119 | -- :txn_actor_count_base 120 | select type, count(*) 121 | from ( 122 | select distinct on (tr.block, tr.hash, a.actor) tr.type 123 | from transaction_actors a inner join transactions tr on a.transaction_hash = tr.hash 124 | where a.actor = $1 125 | :actor_scope 126 | and tr.type = ANY($2)) as t 127 | group by t.type 128 | 129 | -- :txn_location 130 | select 131 | l.short_street, l.long_street, 132 | l.short_city, l.long_city, 133 | l.short_state, l.long_state, 134 | l.short_country, l.long_country, 135 | l.city_id 136 | from locations l 137 | where location = $1 138 | 139 | -- :txn_hotspot_activity_min_block 140 | select first_block from gateway_inventory where address = $1 141 | 142 | -- :txn_account_activity_min_block 143 | select first_block from account_inventory where address = $1 144 | 145 | -- :txn_oracle_activity_min_block 146 | select min(block) from transaction_actors 147 | where actor = $1 and actor_role = 'oracle' 148 | 149 | -- :txn_validator_activity_min_block 150 | select first_block from validator_inventory where address = $1 151 | 152 | -- :txn_genesis_min_block 153 | select 1 -------------------------------------------------------------------------------- /priv/validators.sql: -------------------------------------------------------------------------------- 1 | -- :validator_list_base 2 | select 3 | (select max(height) from blocks) as height, 4 | l.address, 5 | l.name, 6 | l.owner, 7 | l.stake, 8 | l.status, 9 | l.last_heartbeat, 10 | l.version_heartbeat, 11 | l.penalty, 12 | l.penalties, 13 | l.nonce, 14 | l.first_block, 15 | s.online as online_status, 16 | s.block as block_status, 17 | s.peer_timestamp as status_timestamp, 18 | s.listen_addrs as listen_addrs, 19 | s.grpc_addr as grpc_addr 20 | :source 21 | from validator_inventory l 22 | left join validator_status s on s.address = l.address 23 | :scope 24 | :order 25 | :limit 26 | 27 | -- :validator_source 28 | , (select count(*) from transaction_actors where actor = $1 and actor_role = 'consensus_member') as cg_count 29 | 30 | -- :validator_list_order 31 | order by l.first_block desc, l.address 32 | 33 | -- :validator_list_before_scope 34 | where (l.address > $1 and l.first_block = $2) or (l.first_block < $2) 35 | 36 | -- :owner_validator_list_scope 37 | where l.owner = $1 38 | 39 | -- :owner_validator_list_before_scope 40 | where l.owner = $1 41 | and ((l.address > $2 and l.first_block = $3) or (l.first_block < $3)) 42 | 43 | 44 | -- :validator_elected_list 45 | with field_members as ( 46 | select fields->'members' as members 47 | from transactions 48 | where type = 'consensus_group_v1' :filter 49 | order by block desc 50 | limit 1 51 | ), 52 | members as ( 53 | select * 54 | from jsonb_array_elements_text((select members from field_members)) 55 | ) 56 | :validator_select 57 | 58 | -- :validator_elected_list_scope 59 | where l.address in (select * from members) 60 | 61 | -- :validator_name_search_scope 62 | where l.name %> lower($1) 63 | 64 | -- Validator stats 65 | -- :validator_stats 66 | select v.status, count(*), (sum(stake) / 100000000)::float 67 | from validator_inventory v 68 | group by v.status 69 | 70 | 71 | -- :validator_active 72 | select * from stats_inventory where name = 'validators' 73 | -------------------------------------------------------------------------------- /priv/vars.sql: -------------------------------------------------------------------------------- 1 | -- :var_list 2 | SELECT 3 | v.name, 4 | v.type, 5 | v.value 6 | from vars_inventory v 7 | where v.name not like 'region_%' 8 | order by name 9 | 10 | -- :var_get 11 | SELECT v.name, v.type, v.value from vars_inventory v where v.name = $1 12 | 13 | -- :var_list_named 14 | SELECT v.name, v.type, v.value from vars_inventory v where v.name = ANY($1) 15 | -------------------------------------------------------------------------------- /rebar.config: -------------------------------------------------------------------------------- 1 | %% -*- erlang -*- 2 | {erl_opts, [ 3 | debug_info, 4 | {parse_transform, lager_transform}, 5 | warnings_as_errors 6 | ]}. 7 | 8 | {cover_export_enabled, true}. 9 | 10 | {cover_enabled, true}. 11 | 12 | {covertool, [ 13 | {coverdata_files, [ 14 | "ct.coverdata", 15 | "eunit.coverdata" 16 | ]} 17 | ]}. 18 | 19 | {cover_excl_mods, [ 20 | blockchain_http_app 21 | ]}. 22 | 23 | {project_plugins, [ 24 | covertool 25 | ]}. 26 | 27 | {plugins, [erlfmt]}. 28 | 29 | {deps, [ 30 | lager, 31 | jiffy, 32 | recon, 33 | h3, 34 | iso8601, 35 | libp2p_crypto, 36 | {base64url, "1.0.1"}, 37 | {elli, "3.3.0"}, 38 | {epgsql, "4.3.0"}, 39 | {eql, "0.2.0"}, 40 | {erl_angry_purple_tiger, 41 | {git, "https://github.com/helium/erl_angry_purple_tiger.git", {branch, "master"}}}, 42 | {psql_migration, {git, "https://github.com/helium/psql-migration.git", {branch, "master"}}}, 43 | {helium_proto, {git, "https://github.com/helium/proto.git", {branch, "master"}}}, 44 | {envloader, {git, "https://github.com/nuex/envloader.git", {branch, "master"}}}, 45 | {dispcount, {git, "https://github.com/Vagabond/dispcount", {branch, "adt/transaction"}}}, 46 | {throttle, {git, "https://github.com/helium/throttle", {branch, "adt/custom-increment"}}} 47 | ]}. 48 | 49 | {xref_checks, [ 50 | undefined_function_calls, 51 | undefined_functions 52 | ]}. 53 | 54 | {shell, [{apps, [lager, envloader, epgsql]}]}. 55 | 56 | {ct_opts, [{sys_config, ["config/sys.config"]}]}. 57 | 58 | {relx, [ 59 | {release, {blockchain_http, git}, [ 60 | blockchain_http 61 | ]}, 62 | {vm_args, "./config/vm.args"}, 63 | {sys_config, "./config/sys.config"}, 64 | {extended_start_script, true}, 65 | {include_src, true}, 66 | {overlay, [ 67 | {template, "config/vm.args", "{{output_dir}}/releases/{{release_version}}/vm.args"}, 68 | {copy, "priv/*.sql", "priv/"} 69 | ]} 70 | ]}. 71 | 72 | {profiles, [ 73 | {prod, [ 74 | {relx, [ 75 | {dev_mode, false}, 76 | {include_src, false}, 77 | {include_erts, true} 78 | ]} 79 | ]}, 80 | {test, [ 81 | {deps, [meck]} 82 | ]} 83 | ]}. 84 | -------------------------------------------------------------------------------- /rebar.lock: -------------------------------------------------------------------------------- 1 | {"1.2.0", 2 | [{<<"base32">>,{pkg,<<"base32">>,<<"0.1.0">>},2}, 3 | {<<"base64url">>,{pkg,<<"base64url">>,<<"1.0.1">>},0}, 4 | {<<"dispcount">>, 5 | {git,"https://github.com/Vagabond/dispcount", 6 | {ref,"c5bde4945ab31089823c30ab09d60a944201d776"}}, 7 | 0}, 8 | {<<"ecc_compact">>,{pkg,<<"ecc_compact">>,<<"1.0.5">>},1}, 9 | {<<"elli">>,{pkg,<<"elli">>,<<"3.3.0">>},0}, 10 | {<<"enacl">>,{pkg,<<"enacl">>,<<"1.1.1">>},1}, 11 | {<<"envloader">>, 12 | {git,"https://github.com/nuex/envloader.git", 13 | {ref,"27a97e04f35c554995467b9236d8ae0188d468c7"}}, 14 | 0}, 15 | {<<"epgsql">>,{pkg,<<"epgsql">>,<<"4.3.0">>},0}, 16 | {<<"eql">>,{pkg,<<"eql">>,<<"0.2.0">>},0}, 17 | {<<"erl_angry_purple_tiger">>, 18 | {git,"https://github.com/helium/erl_angry_purple_tiger.git", 19 | {ref,"c5476b6639314a75a99400c9dfa7603b24a6d18a"}}, 20 | 0}, 21 | {<<"erl_base58">>,{pkg,<<"erl_base58">>,<<"0.0.1">>},1}, 22 | {<<"getopt">>,{pkg,<<"getopt">>,<<"1.0.2">>},1}, 23 | {<<"goldrush">>,{pkg,<<"goldrush">>,<<"0.1.9">>},1}, 24 | {<<"h3">>,{pkg,<<"h3">>,<<"3.6.4">>},0}, 25 | {<<"helium_proto">>, 26 | {git,"https://github.com/helium/proto.git", 27 | {ref,"13a093fbd3b2109c1ce91fa17bbd083828e88507"}}, 28 | 0}, 29 | {<<"iso8601">>,{pkg,<<"iso8601">>,<<"1.3.3">>},0}, 30 | {<<"jiffy">>,{pkg,<<"jiffy">>,<<"1.1.1">>},0}, 31 | {<<"lager">>,{pkg,<<"lager">>,<<"3.9.2">>},0}, 32 | {<<"libp2p_crypto">>,{pkg,<<"libp2p_crypto">>,<<"1.4.1">>},0}, 33 | {<<"multiaddr">>,{pkg,<<"multiaddr">>,<<"1.1.3">>},1}, 34 | {<<"multihash">>,{pkg,<<"multihash">>,<<"2.1.0">>},1}, 35 | {<<"psql_migration">>, 36 | {git,"https://github.com/helium/psql-migration.git", 37 | {ref,"4c4bdd660ca1a8d79bd96cc8ec43ab2433f0a37f"}}, 38 | 0}, 39 | {<<"recon">>,{pkg,<<"recon">>,<<"2.5.2">>},0}, 40 | {<<"small_ints">>,{pkg,<<"small_ints">>,<<"0.1.0">>},2}, 41 | {<<"throttle">>, 42 | {git,"https://github.com/helium/throttle", 43 | {ref,"6614e5d22faa8d66cedd5a63386c7fc8db56d98c"}}, 44 | 0}]}. 45 | [ 46 | {pkg_hash,[ 47 | {<<"base32">>, <<"044F6DC95709727CA2176F3E97A41DDAA76B5BC690D3536908618C0CB32616A2">>}, 48 | {<<"base64url">>, <<"F8C7F2DA04CA9A5D0F5F50258F055E1D699F0E8BF4CFDB30B750865368403CF6">>}, 49 | {<<"ecc_compact">>, <<"C9696FF16A1D721F2DC8CCD760440B8F45586522974C5C7BD88640822E08AACA">>}, 50 | {<<"elli">>, <<"089218762A7FF3D20AE81C8E911BD0F73EE4EE0ED85454226D1FC6B4FFF3B4F6">>}, 51 | {<<"enacl">>, <<"F65DC64D9BFF2D8A534CB77AEF14DA5E7A2FA148987D87856F79A4745C9C2627">>}, 52 | {<<"epgsql">>, <<"26D9CF04D74773D1DC4DA24AD39E926B34E107232591FE1866EFDFBC0A098396">>}, 53 | {<<"eql">>, <<"598ABC19A1CF6AFB8EF89FFEA869F43BAEBB1CEC3260DD5065112FEE7D8CE3E2">>}, 54 | {<<"erl_base58">>, <<"37710854461D71DF338E73C65776302DB41C4BAB4674D2EC134ED7BCFC7B5552">>}, 55 | {<<"getopt">>, <<"33D9B44289FE7AD08627DDFE1D798E30B2DA0033B51DA1B3A2D64E72CD581D02">>}, 56 | {<<"goldrush">>, <<"F06E5D5F1277DA5C413E84D5A2924174182FB108DABB39D5EC548B27424CD106">>}, 57 | {<<"h3">>, <<"E07E353C59058EA69FBF3C7E5E82C2DF9AF7732EE89C075D34E961F768B8F030">>}, 58 | {<<"iso8601">>, <<"994AFF5DFE760F14A8C4F2D8C3CF500371BF1A8CF309C3C0CB510401064223E0">>}, 59 | {<<"jiffy">>, <<"ACA10F47AA91697BF24AB9582C74E00E8E95474C7EF9F76D4F1A338D0F5DE21B">>}, 60 | {<<"lager">>, <<"4CAB289120EB24964E3886BD22323CB5FEFE4510C076992A23AD18CF85413D8C">>}, 61 | {<<"libp2p_crypto">>, <<"CB93FE834A53059671C547957C1235C9426E5B7EE91D156AD5D90A9710BF4AF0">>}, 62 | {<<"multiaddr">>, <<"978E58E28F6FACAF428C87AF933612B1E2F3F2775F1794EDA5E831A4EACD2984">>}, 63 | {<<"multihash">>, <<"F084F7C6BEC062F0C0E82AE18CFDC8DAEC8F4FAA4C8E1ACE0B9C676A9323162F">>}, 64 | {<<"recon">>, <<"CBA53FA8DB83AD968C9A652E09C3ED7DDCC4DA434F27C3EAA9CA47FFB2B1FF03">>}, 65 | {<<"small_ints">>, <<"82A824C8794A2DDC73CB5CD00EAD11331DB296521AD16A619C13D668572B868A">>}]}, 66 | {pkg_hash_ext,[ 67 | {<<"base32">>, <<"10A73951D857D8CB1ECEEA8EB96C6941F6A76E105947AD09C2B73977DEE07638">>}, 68 | {<<"base64url">>, <<"F9B3ADD4731A02A9B0410398B475B33E7566A695365237A6BDEE1BB447719F5C">>}, 69 | {<<"ecc_compact">>, <<"3DB649D21AE7FEDF460B4D650B99813B761DD905EED6286420C2B2C7E169A356">>}, 70 | {<<"elli">>, <<"698B13B33D05661DB9FE7EFCBA41B84825A379CCE86E486CF6AFF9285BE0CCF8">>}, 71 | {<<"enacl">>, <<"60D329AC3976008F774E21ABA254671104976D61A792287615BB26816F09EA0F">>}, 72 | {<<"epgsql">>, <<"37961B8550EED7474423EBA7961355C503E85DCB9FFA55FBB79AD9BA2B20AF03">>}, 73 | {<<"eql">>, <<"513BE6B36EE86E8292B2B7475C257ABB66CED5AAD40CBF7AD21E233D0A3BF51D">>}, 74 | {<<"erl_base58">>, <<"41E8EC356C5C5558A45682F61F80725789AE9A11BD1CC7D5C73CDE1E3B546DD2">>}, 75 | {<<"getopt">>, <<"A0029AEA4322FB82A61F6876A6D9C66DC9878B6CB61FAA13DF3187384FD4EA26">>}, 76 | {<<"goldrush">>, <<"99CB4128CFFCB3227581E5D4D803D5413FA643F4EB96523F77D9E6937D994CEB">>}, 77 | {<<"h3">>, <<"99DB71588F14CF78C4C086BC825B0981D3D849525F9D5BFA477281DB7CC59B76">>}, 78 | {<<"iso8601">>, <<"BCC7767D691E4D8A26E713F48DA51ABD951BEC4E071AE841F371766F96B46834">>}, 79 | {<<"jiffy">>, <<"62E1F0581C3C19C33A725C781DFA88410D8BFF1BBAFC3885A2552286B4785C4C">>}, 80 | {<<"lager">>, <<"7F904D9E87A8CB7E66156ED31768D1C8E26EBA1D54F4BC85B1AA4AC1F6340C28">>}, 81 | {<<"libp2p_crypto">>, <<"A21C278896DA65E31A2C8FE767784F1493289CD956E172E7BA247EAB75207037">>}, 82 | {<<"multiaddr">>, <<"980D3EA5EB0EB2EFC51E3D10953F17447D417C49BD2FCD7FC6A2A42D1F66D5EE">>}, 83 | {<<"multihash">>, <<"E73AD5D0099DBFFB4EE429A78436B605AFE2530ED684AB36BB86733AB65707C8">>}, 84 | {<<"recon">>, <<"2C7523C8DEE91DFF41F6B3D63CBA2BD49EB6D2FE5BF1EEC0DF7F87EB5E230E1C">>}, 85 | {<<"small_ints">>, <<"00B3BFF6C446711F8EA4EA942056F375E0F13C7983CC3950C6EA1DE014C7C416">>}]} 86 | ]. 87 | -------------------------------------------------------------------------------- /rebar3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/helium/blockchain-http/0dd476615f0f7f8aa7dfcac80e68e0a14520c21b/rebar3 -------------------------------------------------------------------------------- /src/bh_burn_type.erl: -------------------------------------------------------------------------------- 1 | -module(bh_burn_type). 2 | 3 | -behaviour(epgsql_codec). 4 | 5 | -export([init/2, names/0, encode/3, decode/3]). 6 | 7 | init(Choices, _) -> Choices. 8 | 9 | names() -> 10 | [burn_type]. 11 | 12 | encode(Atom, burn_type, Choices) when is_atom(Atom) -> 13 | true = lists:member(Atom, Choices), 14 | atom_to_binary(Atom, utf8); 15 | encode(Binary, burn_type, Choices) -> 16 | true = lists:member(binary_to_existing_atom(Binary, utf8), Choices), 17 | Binary. 18 | 19 | decode(Bin, burn_type, Choices) -> 20 | Atom = binary_to_existing_atom(Bin, utf8), 21 | true = lists:member(Atom, Choices), 22 | Atom. 23 | -------------------------------------------------------------------------------- /src/bh_cache.erl: -------------------------------------------------------------------------------- 1 | %% @doc In memory cache for expensive query results 2 | -module(bh_cache). 3 | 4 | -behaviour(gen_server). 5 | 6 | -define(TBL_NAME, '__bh_cache_table'). 7 | -define(ETS_OPTS, [named_table, {keypos, 2}, {read_concurrency, true}]). 8 | % seconds 9 | -define(DEFAULT_TTL, 60). 10 | % milliseconds 11 | -define(TICK_INTERVAL, 15000). 12 | % 1000 microseconds to 1 millisecond 13 | -define(TO_MILLIS, 1000). 14 | 15 | %% public API 16 | -export([ 17 | start_link/0, 18 | get/1, 19 | get/2, 20 | get/3, 21 | put/2, 22 | put/3 23 | ]). 24 | 25 | %% required callbacks 26 | -export([ 27 | init/1, 28 | handle_cast/2, 29 | handle_call/3, 30 | handle_info/2 31 | ]). 32 | 33 | -record(state, { 34 | tid = undefined :: ets:tid(), 35 | tref = undefined :: reference(), 36 | hits = 0 :: non_neg_integer(), 37 | misses = [] :: [non_neg_integer()] 38 | % input is microsecs, 39 | % but list is milliseconds 40 | }). 41 | 42 | -record(entry, { 43 | key = undefined :: term(), 44 | value = undefined :: term(), 45 | expire_ts = 0 :: non_neg_integer() 46 | }). 47 | 48 | -spec start_link() -> {ok, Pid :: pid()} | ignore | {error, Error :: term()}. 49 | start_link() -> 50 | gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). 51 | 52 | -spec get(Key :: term()) -> not_found | {ok, Value :: term()}. 53 | %% @doc Attempt to lookup a value associated with the given key 54 | %% in the cache. If the key is not found, return `not_found'. 55 | get(Key) -> 56 | case ets:lookup(?TBL_NAME, Key) of 57 | [] -> 58 | not_found; 59 | [Entry] -> 60 | maybe_expired(Entry#entry.value, erlang:system_time(seconds), Entry#entry.expire_ts) 61 | end. 62 | 63 | -spec get( 64 | Key :: term(), 65 | LookupFun :: fun(() -> Value :: term()) 66 | ) -> {ok, Value :: term()}. 67 | %% @doc Attempt to lookup a value associated with the given key 68 | %% in the cache. If the key is not found, execute the provided 69 | %% zero arity fun to return a value to cache. 70 | %% 71 | %% This call is equivalent to `get/3' with all options defaulted. 72 | get(Key, LookupFun) -> 73 | get(Key, LookupFun, #{}). 74 | 75 | -spec get( 76 | Key :: term(), 77 | LookupFun :: fun(() -> Value :: term()), 78 | Options :: map() 79 | ) -> {ok, Value :: term()}. 80 | %% @doc Attempt to lookup a value associated with the given key 81 | %% in the cache. If the key is not found, execute the provided 82 | %% zero arity fun to return a value to cache. 83 | %% 84 | %% The only defined option at this time is `ttl' which 85 | %% defines the number of seconds to keep a value in the cache. 86 | %% The default time is 60 seconds. 87 | get(Key, LookupFun, Opts) -> 88 | case ?MODULE:get(Key) of 89 | not_found -> 90 | {MicroSecs, Value} = timer:tc(fun() -> LookupFun() end), 91 | log_miss(Key, MicroSecs), 92 | ?MODULE:put(Key, Value, Opts); 93 | V -> 94 | log_hit(Key), 95 | V 96 | end. 97 | 98 | -spec put( 99 | Key :: term(), 100 | Value :: term() 101 | ) -> {ok, Value :: term()}. 102 | %% @doc Associate the given key with the given value. 103 | %% 104 | %% Equivalent to `put/3' with all options defaulted. 105 | put(Key, Value) -> 106 | put(Key, Value, #{}). 107 | 108 | -spec put( 109 | Key :: term(), 110 | Value :: term(), 111 | Options :: map() 112 | ) -> {ok, Value :: term()}. 113 | %% @doc Associate the given key with the given value. 114 | %% 115 | %% The only defined option at this time is `ttl' which 116 | %% defines the number of seconds to keep a value in the cache. 117 | %% The default time is 60 seconds. 118 | put(Key, Value, Opts) -> 119 | gen_server:call(?MODULE, {put, Key, Value, Opts}). 120 | 121 | -spec log_hit(Key :: term()) -> ok. 122 | %% @doc (Asynchronously) record a cache hit for the given key 123 | log_hit(Key) -> 124 | gen_server:cast(?MODULE, {hit, Key}). 125 | 126 | -spec log_miss( 127 | Key :: term(), 128 | MicroSecs :: non_neg_integer() 129 | ) -> ok. 130 | %% @doc (Asynchronously) record a cache miss for the given key; 131 | %% record the amount of computation time to generate the cached 132 | %% answer 133 | log_miss(Key, MicroSecs) -> 134 | gen_server:cast(?MODULE, {miss, Key, MicroSecs}). 135 | 136 | %% gen server callbacks 137 | init([]) -> 138 | Tid = ets:new(?TBL_NAME, ?ETS_OPTS), 139 | Tref = schedule_new_tick(), 140 | {ok, #state{tid = Tid, tref = Tref}}. 141 | 142 | handle_call({put, Key, Value, Opts}, _From, State) -> 143 | TTL = maps:get(ttl, Opts, ?DEFAULT_TTL), 144 | ExpireTime = erlang:system_time(seconds) + TTL, 145 | true = ets:insert( 146 | ?TBL_NAME, 147 | #entry{ 148 | key = Key, 149 | value = Value, 150 | expire_ts = ExpireTime 151 | } 152 | ), 153 | {reply, {ok, Value}, State}; 154 | handle_call(Call, From, State) -> 155 | lager:warning("Unexpected call ~p from ~p", [Call, From]), 156 | {reply, diediedie, State}. 157 | 158 | handle_cast({hit, _Key}, #state{hits = H} = State) -> 159 | {noreply, State#state{hits = H + 1}}; 160 | handle_cast({miss, _Key, Micros}, #state{misses = M} = State) -> 161 | {noreply, State#state{misses = [Micros div ?TO_MILLIS | M]}}; 162 | handle_cast(Cast, State) -> 163 | lager:warning("Unexpected cast ~p", [Cast]), 164 | {noreply, State}. 165 | 166 | handle_info(bh_cache_tick, State) -> 167 | ok = expire_cache(), 168 | NewState = compute_cache_stats(State), 169 | Tref = schedule_new_tick(), 170 | {noreply, NewState#state{tref = Tref}}; 171 | handle_info(Info, State) -> 172 | lager:warning("Unexpected info ~p", [Info]), 173 | {noreply, State}. 174 | 175 | %% internal functions 176 | -spec maybe_expired( 177 | Value :: term(), 178 | Current :: non_neg_integer(), 179 | Expire :: non_neg_integer() 180 | ) -> not_found | {ok, Value :: term()}. 181 | maybe_expired(_Value, Current, Expire) when Current >= Expire -> not_found; 182 | maybe_expired(Value, _Current, _Expire) -> {ok, Value}. 183 | 184 | -spec schedule_new_tick() -> reference(). 185 | schedule_new_tick() -> 186 | erlang:send_after(?TICK_INTERVAL, self(), bh_cache_tick). 187 | 188 | -spec expire_cache() -> ok. 189 | expire_cache() -> 190 | Current = erlang:system_time(seconds), 191 | Removed = ets:foldl( 192 | fun 193 | (#entry{key = K, expire_ts = E}, Acc) when Current >= E -> 194 | true = ets:delete(?TBL_NAME, K), 195 | Acc + 1; 196 | (_Entry, Acc) -> 197 | Acc 198 | end, 199 | 0, 200 | ?TBL_NAME 201 | ), 202 | case Removed of 203 | 0 -> 204 | ok; 205 | _ -> 206 | lager:info("Removed ~p cache entries this tick.", [Removed]), 207 | ok 208 | end. 209 | 210 | compute_cache_stats(#state{hits = 0, misses = []} = State) -> 211 | State; 212 | compute_cache_stats(#state{hits = H, misses = []} = State) -> 213 | lager:info("Cache hits: ~p", [H]), 214 | State#state{hits = 0}; 215 | compute_cache_stats(#state{hits = H, misses = M} = State) -> 216 | Misses = length(M), 217 | Max = lists:max(M), 218 | Min = lists:min(M), 219 | Avg = lists:sum(M) div Misses, 220 | lager:info( 221 | "Cache hits: ~p, misses count: ~p, max ms: ~p, min ms: ~p, avg ms: ~p", 222 | [H, Misses, Max, Min, Avg] 223 | ), 224 | State#state{hits = 0, misses = []}. 225 | -------------------------------------------------------------------------------- /src/bh_db_worker.erl: -------------------------------------------------------------------------------- 1 | -module(bh_db_worker). 2 | 3 | -include("bh_route_handler.hrl"). 4 | 5 | -include_lib("epgsql/include/epgsql.hrl"). 6 | 7 | -ifdef(TEST). 8 | 9 | -include_lib("eunit/include/eunit.hrl"). 10 | 11 | -endif. 12 | 13 | -callback prepare_conn(epgsql:connection()) -> map(). 14 | 15 | -behaviour(dispcount). 16 | 17 | %% how long the pool worker, if we get one, has to give us the db conn 18 | -define(POOL_CHECKOUT_TIMEOUT, 500). 19 | %% how many times to try to get a worker 20 | -define(POOL_CHECKOUT_RETRIES, 3). 21 | %% how long to wait for a query response 22 | -define(POOL_QUERY_TIMEOUT, 15000). 23 | 24 | -export([ 25 | init/1, 26 | checkout/2, 27 | transaction/3, 28 | checkin/2, 29 | handle_info/2, 30 | dead/1, 31 | terminate/2, 32 | code_change/3 33 | ]). 34 | 35 | -export([load_from_eql/2]). 36 | -export([prepared_query/3, execute_batch/2]). 37 | 38 | -record(state, { 39 | given = false :: boolean(), 40 | db_opts :: map(), 41 | db_conn :: undefined | epgsql:connection(), 42 | handlers :: [atom()], 43 | prepared_statements = #{} :: map() 44 | }). 45 | 46 | -spec prepared_query(Pool :: term(), Name :: string(), Params :: [epgsql:bind_param()]) -> 47 | epgsql_cmd_prepared_query:response(). 48 | prepared_query(shutdown, _, _) -> 49 | throw(?RESPONSE_503_SHUTDOWN); 50 | prepared_query(Pool, Name, Params) -> 51 | Ref = make_ref(), 52 | Fun = fun(From, {Stmts, Conn}) -> 53 | {Query, Types} = maps:get(Name, Stmts), 54 | %% construct the same kind of cast the epgsqla commands do, but redirect 55 | %% the output to the elli process directly 56 | gen_server:cast( 57 | Conn, 58 | {{cast, From, Ref}, epgsql_cmd_eequery, {Query, Params, Types}} 59 | ) 60 | end, 61 | case dispcount:transaction(Pool, Fun) of 62 | ok -> 63 | receive 64 | {_Conn, Ref, Res} -> 65 | Res 66 | after ?POOL_QUERY_TIMEOUT -> throw(?RESPONSE_503) 67 | end; 68 | {error, busy} -> 69 | throw(?RESPONSE_503) 70 | end. 71 | 72 | -spec execute_batch(Pool :: term(), [{Name :: string(), Params :: [epgsql:bind_param()]}]) -> 73 | epgsql_cmd_batch:response(). 74 | execute_batch(shutdown, _) -> 75 | throw(?RESPONSE_503_SHUTDOWN); 76 | execute_batch(Pool, Queries) -> 77 | Ref = make_ref(), 78 | Fun = fun(From, {Stmts, Conn}) -> 79 | Batch = lists:foldr( 80 | fun({Name, Params}, Acc) -> 81 | {Query, Types} = maps:get(Name, Stmts), 82 | [{Query, Params, Types} | Acc] 83 | end, 84 | [], 85 | Queries 86 | ), 87 | %% construct the same kind of cast the epgsqla commands do, but redirect 88 | %% the output to the elli process directly 89 | gen_server:cast( 90 | Conn, 91 | {{cast, From, Ref}, epgsql_cmd_eequery, {batch, Batch}} 92 | ) 93 | end, 94 | case dispcount:transaction(Pool, Fun) of 95 | ok -> 96 | receive 97 | {_Conn, Ref, Res} -> 98 | Res 99 | after ?POOL_QUERY_TIMEOUT -> throw(?RESPONSE_503) 100 | end; 101 | {error, busy} -> 102 | throw(?RESPONSE_503) 103 | end. 104 | 105 | load_from_eql(Filename, Loads) -> 106 | PrivDir = code:priv_dir(blockchain_http), 107 | {ok, Queries} = eql:compile(filename:join(PrivDir, Filename)), 108 | ResolveParams = fun 109 | R({K, Name}) when is_atom(Name) -> 110 | R({K, {Name, []}}); 111 | R({K, {Name, Params}}) -> 112 | case 113 | case Params of 114 | [] -> eql:get_query(Name, Queries); 115 | _ -> eql:get_query(Name, Queries, lists:map(R, Params)) 116 | end 117 | of 118 | {ok, Q} -> {K, Q}; 119 | undefined -> error({badarg, Name}) 120 | end; 121 | R({K, V}) -> 122 | {K, V} 123 | end, 124 | Load = fun 125 | L({Key, {Name, Params, Types}}) -> 126 | %% Leverage the equivalent pattern in ResolveParams to 127 | %% expand out nested eql fragments and their parameters. 128 | {Key, Query} = ResolveParams({Key, {maybe_fix_name(Name), Params}}), 129 | {Key, {Query, Types}}; 130 | L({Key, {Name, Params}}) -> 131 | L({Key, {Name, Params, []}}); 132 | L(Key) -> 133 | L({Key, {Key, [], []}}) 134 | end, 135 | 136 | Statements = lists:map(Load, Loads), 137 | maps:from_list(Statements). 138 | 139 | maybe_fix_name(N) when is_list(N) -> 140 | list_to_atom(N); 141 | maybe_fix_name(N) -> 142 | N. 143 | 144 | init(Args) -> 145 | GetOpt = fun(K) -> 146 | case lists:keyfind(K, 1, Args) of 147 | false -> error({missing_opt, K}); 148 | {_, V} -> V 149 | end 150 | end, 151 | Codecs = [{epgsql_codec_json, {jiffy, [], [return_maps]}}], 152 | DBOpts = (GetOpt(db_opts))#{codecs => Codecs}, 153 | Handlers = GetOpt(db_handlers), 154 | {ok, 155 | connect(#state{ 156 | db_opts = DBOpts, 157 | given = false, 158 | handlers = Handlers 159 | })}. 160 | 161 | checkout(_From, State = #state{given = true}) -> 162 | lager:warning("unexpected checkout when already checked out"), 163 | {error, busy, State}; 164 | checkout(_From, State = #state{db_conn = Conn}) -> 165 | {ok, Conn, State#state{given = true}}. 166 | 167 | transaction(From, Fun, State = #state{db_conn = Conn, prepared_statements = Stmts}) -> 168 | try Fun(From, {Stmts, Conn}) of 169 | _ -> ok 170 | catch 171 | What:Why:Stack -> 172 | lager:warning("Transaction failed: ~p", [{What, Why, Stack}]) 173 | end, 174 | {ok, State}. 175 | 176 | checkin(Conn, State = #state{db_conn = Conn, given = true}) -> 177 | {ok, State#state{given = false}}; 178 | checkin(Conn, State) -> 179 | lager:warning("unexpected checkin of ~p when we have ~p", [Conn, State#state.db_conn]), 180 | {ignore, State}. 181 | 182 | dead(State) -> 183 | {ok, State#state{given = false}}. 184 | 185 | handle_info({'EXIT', Conn, Reason}, State = #state{db_conn = Conn}) -> 186 | lager:info("dispcount worker's db connection exited ~p", [Reason]), 187 | {stop, Reason, State}; 188 | handle_info(_Msg, State) -> 189 | lager:info("dispcount worker got unexpected message ~p ~p", [_Msg, State]), 190 | {ok, State}. 191 | 192 | terminate(_Reason, _State) -> 193 | %% let the GC clean the socket. 194 | ok. 195 | 196 | code_change(_OldVsn, State, _Extra) -> 197 | {ok, State}. 198 | 199 | connect(State = #state{db_opts = DBOpts, handlers = Handlers}) -> 200 | {ok, Conn} = epgsql:connect(DBOpts), 201 | PreparedStatements = lists:foldl( 202 | fun(Mod, Acc) -> 203 | maps:merge(Mod:prepare_conn(Conn), Acc) 204 | end, 205 | #{}, 206 | Handlers 207 | ), 208 | %% set the statement timeout to 1 second less than POOL_QUERY_TIMEOUT 209 | {ok, [], []} = epgsql:squery( 210 | Conn, 211 | io_lib:format("SET statement_timeout = '~bs';", [(?POOL_QUERY_TIMEOUT div 1000) - 1]) 212 | ), 213 | State#state{db_conn = Conn, prepared_statements = PreparedStatements}. 214 | 215 | -ifdef(TEST). 216 | 217 | eql_test() -> 218 | %% we need to load the application here so that code:priv/2 will work correctly 219 | ok = application:load(blockchain_http), 220 | Files = [ 221 | {"vars.sql", [ 222 | "var_list", 223 | {"var_get", {"var_get", [], [text]}} 224 | ]} 225 | ], 226 | lists:all( 227 | fun 228 | (#{}) -> true; 229 | (_) -> false 230 | end, 231 | [load_from_eql(F, L) || {F, L} <- Files] 232 | ), 233 | ok. 234 | 235 | -endif. 236 | -------------------------------------------------------------------------------- /src/bh_db_worker.hrl: -------------------------------------------------------------------------------- 1 | -define(DB_RO_POOL, persistent_term:get(ro_pool)). 2 | -define(DB_RW_POOL, persistent_term:get(rw_pool)). 3 | 4 | -define(PREPARED_QUERY(S, A), ?PREPARED_QUERY(?DB_RO_POOL, (S), (A))). 5 | -define(PREPARED_QUERY(P, S, A), bh_db_worker:prepared_query((P), (S), (A))). 6 | 7 | -define(EXECUTE_BATCH(S), ?EXECUTE_BATCH(?DB_RO_POOL, (S))). 8 | -define(EXECUTE_BATCH(P, S), bh_db_worker:execute_batch((P), (S))). 9 | -------------------------------------------------------------------------------- /src/bh_gateway_mode.erl: -------------------------------------------------------------------------------- 1 | -module(bh_gateway_mode). 2 | 3 | -behaviour(epgsql_codec). 4 | 5 | -export([init/2, names/0, encode/3, decode/3]). 6 | 7 | init(Choices, _) -> Choices. 8 | 9 | names() -> 10 | [gateway_mode]. 11 | 12 | encode(Atom, gateway_mode, Choices) when is_atom(Atom) -> 13 | lager:info("encoding burn type ~p ~p", [Atom, Choices]), 14 | true = lists:member(Atom, Choices), 15 | atom_to_binary(Atom, utf8); 16 | encode(Binary, gateway_mode, Choices) -> 17 | true = lists:member(binary_to_existing_atom(Binary, utf8), Choices), 18 | Binary. 19 | 20 | decode(Bin, gateway_mode, Choices) -> 21 | Atom = binary_to_existing_atom(Bin, utf8), 22 | true = lists:member(Atom, Choices), 23 | Atom. 24 | -------------------------------------------------------------------------------- /src/bh_middleware_cors.erl: -------------------------------------------------------------------------------- 1 | -module(bh_middleware_cors). 2 | 3 | -export([postprocess/3]). 4 | 5 | -define(RESPONSE_HEADER_CORS, {<<"Access-Control-Allow-Origin">>, <<"*">>}). 6 | 7 | postprocess(_Req, {Code, Body}, _) -> 8 | {Code, [?RESPONSE_HEADER_CORS], Body}; 9 | postprocess(_Req, {Code, Headers, Body}, _) -> 10 | {Code, [?RESPONSE_HEADER_CORS | Headers], Body}. 11 | -------------------------------------------------------------------------------- /src/bh_middleware_cursor.erl: -------------------------------------------------------------------------------- 1 | -module(bh_middleware_cursor). 2 | 3 | -export([handle/2]). 4 | 5 | -spec handle(elli:req(), elli:config()) -> elli_handler:result(). 6 | handle(Req, _Config) -> 7 | Args = elli_request:get_args(Req), 8 | case lists:keyfind(<<"cursor">>, 1, Args) of 9 | false -> 10 | ignore; 11 | {<<"cursor">>, _Cursor} when length(Args) == 1 -> 12 | ignore; 13 | {<<"cursor">>, Cursor} -> 14 | Location = [ 15 | <<"/">>, 16 | lists:join(<<"/">>, elli_request:path(Req)), 17 | <<"?cursor=">>, 18 | Cursor 19 | ], 20 | {301, [{<<"Location">>, Location}], <<>>} 21 | end. 22 | -------------------------------------------------------------------------------- /src/bh_middleware_throttle.erl: -------------------------------------------------------------------------------- 1 | -module(bh_middleware_throttle). 2 | 3 | -export([handle/2, postprocess/3, handle_event/3]). 4 | 5 | -behaviour(elli_handler). 6 | 7 | -include("bh_route_handler.hrl"). 8 | 9 | %% 10 | %% ELLI 11 | %% 12 | 13 | handle(Req, _Args) -> 14 | Host = get_actor(Req), 15 | case throttle:check(request_count, Host) of 16 | {limit_exceeded, 0, TimeToResetInMs} -> 17 | ?RESPONSE_429(TimeToResetInMs); 18 | _ -> 19 | case throttle:peek(request_time, Host) of 20 | {limit_exceeded, 0, TimeToResetInMs} -> 21 | ?RESPONSE_429(TimeToResetInMs); 22 | _ -> 23 | ignore 24 | end 25 | end. 26 | 27 | postprocess(Req, {Response, Headers, Body}, #{debug := true}) -> 28 | {_, Remainder, _} = throttle:peek(request_time, get_actor(Req)), 29 | {Response, [{<<"X-Request-Time-Remaining">>, integer_to_list(Remainder)} | Headers], Body}; 30 | postprocess(_Req, Res, _Args) -> 31 | Res. 32 | 33 | %% 34 | %% ELLI EVENT CALLBACKS 35 | %% 36 | 37 | handle_event(request_complete, [Req, Code, _Hs, _B, {Timings, _Sizes}], Args) -> 38 | RequestStart = proplists:get_value(request_start, Timings), 39 | RequestEnd = proplists:get_value(request_end, Timings), 40 | GraceTime = persistent_term:get(throttle_grace_time), 41 | Duration = max(1, ((RequestEnd - RequestStart) div 1000000) - GraceTime), 42 | Actor = get_actor(Req), 43 | throttle:update(request_time, Actor, Duration), 44 | case Duration > maps:get(log_longer_than, Args, infinity) of 45 | true -> 46 | lager:notice("~s request for ~s from ~s with result ~p took ~p ms", [ 47 | elli_request:method(Req), 48 | elli_request:raw_path(Req), 49 | Actor, 50 | Code, 51 | Duration + GraceTime 52 | ]); 53 | false -> 54 | ok 55 | end, 56 | ok; 57 | handle_event(elli_startup, _Args, Config = #{ 58 | request_time := MS, 59 | request_interval := Interval, 60 | request_count := Count 61 | }) -> 62 | throttle:setup(request_time, MS, Interval), 63 | throttle:setup(request_count, Count, Interval), 64 | %% Stash grace time 65 | GraceTime = maps:get(grace_time, Config, 0), 66 | persistent_term:put(throttle_grace_time, GraceTime), 67 | %% Stash actor rqeuest header to use 68 | ActorHeader = maps:get(actor_header, Config, <<"X-Forwarded-For">>), 69 | persistent_term:put(throttle_actor_header, ActorHeader), 70 | lager:info("throttle starting with ~p", [Config]); 71 | handle_event(_Event, _Data, _Args) -> 72 | ok. 73 | 74 | %% internal functions 75 | 76 | get_actor(Req) -> 77 | ActorHeader = persistent_term:get(throttle_actor_header), 78 | case lists:keyfind(ActorHeader, 1, elli_request:headers(Req)) of 79 | false -> 80 | elli_request:peer(Req); 81 | {ActorHeader, Value} -> 82 | hd(binary:split(Value, <<",">>)) 83 | end. 84 | -------------------------------------------------------------------------------- /src/bh_pending_transaction_nonce_type.erl: -------------------------------------------------------------------------------- 1 | -module(bh_pending_transaction_nonce_type). 2 | 3 | -behaviour(epgsql_codec). 4 | 5 | -export([init/2, names/0, encode/3, decode/3]). 6 | 7 | init(Choices, _) -> Choices. 8 | 9 | names() -> 10 | [pending_transaction_nonce_type]. 11 | 12 | encode(Atom, pending_transaction_nonce_type, Choices) when is_atom(Atom) -> 13 | true = lists:member(Atom, Choices), 14 | atom_to_binary(Atom, utf8); 15 | encode(Binary, pending_transaction_nonce_type, Choices) -> 16 | true = lists:member(binary_to_existing_atom(Binary, utf8), Choices), 17 | Binary. 18 | 19 | decode(Bin, pending_transaction_nonce_type, Choices) -> 20 | Atom = binary_to_existing_atom(Bin, utf8), 21 | true = lists:member(Atom, Choices), 22 | Atom. 23 | -------------------------------------------------------------------------------- /src/bh_pending_transaction_status.erl: -------------------------------------------------------------------------------- 1 | -module(bh_pending_transaction_status). 2 | 3 | -behaviour(epgsql_codec). 4 | 5 | -export([init/2, names/0, encode/3, decode/3]). 6 | 7 | init(Choices, _) -> Choices. 8 | 9 | names() -> 10 | [pending_transaction_status]. 11 | 12 | encode(Atom, pending_transaction_status, Choices) when is_atom(Atom) -> 13 | true = lists:member(Atom, Choices), 14 | atom_to_binary(Atom, utf8); 15 | encode(Binary, pending_transaction_status, Choices) -> 16 | true = lists:member(binary_to_existing_atom(Binary, utf8), Choices), 17 | Binary. 18 | 19 | decode(Bin, pending_transaction_status, Choices) -> 20 | Atom = binary_to_existing_atom(Bin, utf8), 21 | true = lists:member(Atom, Choices), 22 | Atom. 23 | -------------------------------------------------------------------------------- /src/bh_pool_watcher.erl: -------------------------------------------------------------------------------- 1 | -module(bh_pool_watcher). 2 | 3 | -behaviour(gen_server). 4 | 5 | -export([start_link/1]). 6 | 7 | -export([init/1, handle_call/3, handle_cast/2, handle_info/2]). 8 | 9 | start_link(Pools) -> 10 | gen_server:start_link(?MODULE, Pools, []). 11 | 12 | init(Pools) -> 13 | Monitors = maps:from_list([ 14 | {erlang:monitor(process, get_name(PoolName)), PoolName} 15 | || PoolName <- Pools 16 | ]), 17 | {ok, Monitors}. 18 | 19 | handle_call(Msg, From, State) -> 20 | lager:warning("Unexpected call ~p from ~p", [Msg, From]), 21 | {reply, error, State}. 22 | 23 | handle_cast(Msg, State) -> 24 | lager:warning("Unexpected cast ~p", [Msg]), 25 | {noreply, State}. 26 | 27 | handle_info({monitor, Name}, State) -> 28 | Ref = erlang:monitor(process, get_name(Name)), 29 | try dispcount:dispatcher_info(Name) of 30 | {ok, PoolInfo} -> 31 | lager:info("dispcount is back"), 32 | %% it's back, update the persistent term 33 | persistent_term:put(Name, PoolInfo), 34 | {noreply, maps:put(Ref, Name, State)} 35 | catch 36 | What:Why -> 37 | lager:info("dispcount still not ready ~p:~p", [What, Why]), 38 | %% likely things have not finished restarting, try again shortly 39 | erlang:demonitor(Ref, [flush]), 40 | erlang:send_after(500, self(), {monitor, Name}), 41 | {noreply, State} 42 | end; 43 | handle_info({'DOWN', Ref, process, _Pid, noproc}, State) -> 44 | case maps:find(Ref, State) of 45 | {ok, Name} -> 46 | lager:notice("Pool ~p monitor failed with noproc, retrying", [Name]), 47 | %% noproc means the process wasn't alive when we tried to monitor it 48 | %% we should probably wait a bit and retry 49 | erlang:send_after(5000, self(), {monitor, Name}), 50 | {noreply, maps:remove(Ref, State)}; 51 | error -> 52 | lager:warning("unknown ref ~p exited with reason noproc", [Ref]), 53 | {noreply, State} 54 | end; 55 | handle_info({'DOWN', Ref, process, _Pid, Reason}, State) -> 56 | case maps:find(Ref, State) of 57 | {ok, Name} -> 58 | self() ! {monitor, Name}, 59 | lager:notice("Pool ~p exited with reason ~p", [Name, Reason]), 60 | {noreply, maps:remove(Ref, State)}; 61 | error -> 62 | lager:warning("unknown ref ~p exited with reason ~p", [Ref, Reason]), 63 | {noreply, State} 64 | end; 65 | handle_info(Msg, State) -> 66 | lager:warning("Unexpected info ~p", [Msg]), 67 | {noreply, State}. 68 | 69 | get_name(Name) -> 70 | list_to_atom(atom_to_list(Name) ++ "_serv"). 71 | -------------------------------------------------------------------------------- /src/bh_route_assert_locations.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_assert_locations). 2 | 3 | -export([prepare_conn/1, handle/3]). 4 | 5 | -behavior(bh_route_handler). 6 | -behavior(bh_db_worker). 7 | 8 | -include("bh_route_handler.hrl"). 9 | 10 | prepare_conn(_Conn) -> 11 | #{}. 12 | 13 | handle('GET', [], Req) -> 14 | Args = add_filter_types(?GET_ARGS([cursor, max_time, min_time, limit], Req)), 15 | Result = bh_route_txns:get_txn_list(Args), 16 | CacheTime = bh_route_txns:get_txn_list_cache_time(Result), 17 | ?MK_RESPONSE(Result, CacheTime); 18 | handle(_Method, _Path, _Req) -> 19 | ?RESPONSE_404. 20 | 21 | add_filter_types(Args) -> 22 | Args ++ [{filter_types, <<"assert_location_v1,assert_location_v2">>}]. 23 | -------------------------------------------------------------------------------- /src/bh_route_blocks.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_blocks). 2 | 3 | -behavior(bh_route_handler). 4 | -behavior(bh_db_worker). 5 | 6 | -include("bh_route_handler.hrl"). 7 | 8 | -export([prepare_conn/1, handle/3]). 9 | %% Utilities 10 | -export([ 11 | get_block_height/0, 12 | get_block_height/1, 13 | get_block_list/1, 14 | get_block_list_cache_time/1, 15 | get_block/1, 16 | get_block_txn_list/2, 17 | get_block_stats/0, 18 | get_block_span/2 19 | ]). 20 | 21 | -define(S_BLOCK_HEIGHT, "block_height"). 22 | -define(S_BLOCK_HEIGHT_BY_TIME, "block_height_by_time"). 23 | 24 | -define(S_BLOCK_LIST_BEFORE, "block_list_before"). 25 | 26 | -define(S_BLOCK_SPAN, "block_span"). 27 | -define(S_BLOCK_BY_HASH, "block_by_hash"). 28 | -define(S_BLOCK_BY_HEIGHT, "block_by_height"). 29 | -define(S_BLOCK_HEIGHT_TXN_LIST, "block_height_txn_list"). 30 | -define(S_BLOCK_HEIGHT_TXN_LIST_BEFORE, "block_height_txn_list_before"). 31 | -define(S_BLOCK_HASH_TXN_LIST, "block_hash_txn_list_list"). 32 | -define(S_BLOCK_HASH_TXN_LIST_BEFORE, "block_hash_txn_list_before"). 33 | -define(S_BLOCK_TIMES, "block_times"). 34 | 35 | -define(SELECT_BLOCK_BASE, 36 | "select b.height, b.time, b.block_hash, b.prev_hash, b.transaction_count, b.snapshot_hash from blocks b " 37 | ). 38 | 39 | -define(SELECT_BLOCK_HEIGHT_TXN_LIST_BASE, [ 40 | ?SELECT_TXN_BASE, 41 | "from (select * from transactions where block = $1 order by hash) t " 42 | ]). 43 | 44 | -define(SELECT_BLOCK_HASH_TXN_LIST_BASE, [ 45 | ?SELECT_TXN_BASE, 46 | "from (select * from transactions where block = (select height from blocks where block_hash = $1) order by hash) t " 47 | ]). 48 | 49 | prepare_conn(_Conn) -> 50 | S1 = {"select max(height) from blocks", []}, 51 | 52 | S3 = { 53 | [ 54 | ?SELECT_BLOCK_BASE, 55 | "where b.height < $1 order by height DESC limit $2" 56 | ], 57 | [int8, int4] 58 | }, 59 | 60 | S4 = { 61 | [ 62 | ?SELECT_BLOCK_BASE, 63 | "where b.height = $1" 64 | ], 65 | [int8] 66 | }, 67 | 68 | S5 = { 69 | [ 70 | ?SELECT_BLOCK_BASE, 71 | "where b.block_hash = $1" 72 | ], 73 | [text] 74 | }, 75 | 76 | S6 = { 77 | [ 78 | ?SELECT_BLOCK_HEIGHT_TXN_LIST_BASE, 79 | "limit ", 80 | integer_to_list(?BLOCK_TXN_LIST_LIMIT) 81 | ], 82 | [int8] 83 | }, 84 | 85 | S7 = { 86 | [ 87 | ?SELECT_BLOCK_HEIGHT_TXN_LIST_BASE, 88 | "where t.hash > $2", 89 | "limit ", 90 | integer_to_list(?BLOCK_TXN_LIST_LIMIT) 91 | ], 92 | [int8, text] 93 | }, 94 | 95 | S8 = { 96 | [ 97 | ?SELECT_BLOCK_HASH_TXN_LIST_BASE, 98 | "limit ", 99 | integer_to_list(?BLOCK_TXN_LIST_LIMIT) 100 | ], 101 | [text] 102 | }, 103 | 104 | S9 = { 105 | [ 106 | ?SELECT_BLOCK_HASH_TXN_LIST_BASE, 107 | "where t.hash > $2", 108 | "limit ", 109 | integer_to_list(?BLOCK_TXN_LIST_LIMIT) 110 | ], 111 | [text, text] 112 | }, 113 | 114 | S10 = { 115 | [ 116 | "select height from blocks ", 117 | "where time < extract(epoch from $1::timestamptz) ", 118 | "order by height desc ", 119 | "limit 1" 120 | ], 121 | [timestamptz] 122 | }, 123 | 124 | M = bh_db_worker:load_from_eql("blocks.sql", [ 125 | {?S_BLOCK_TIMES, {?S_BLOCK_TIMES, [], []}}, 126 | {?S_BLOCK_SPAN, {?S_BLOCK_SPAN, [], [timestamptz, timestamptz]}} 127 | ]), 128 | 129 | maps:merge( 130 | #{ 131 | ?S_BLOCK_HEIGHT => S1, 132 | ?S_BLOCK_LIST_BEFORE => S3, 133 | ?S_BLOCK_BY_HEIGHT => S4, 134 | ?S_BLOCK_BY_HASH => S5, 135 | ?S_BLOCK_HEIGHT_TXN_LIST => S6, 136 | ?S_BLOCK_HEIGHT_TXN_LIST_BEFORE => S7, 137 | ?S_BLOCK_HASH_TXN_LIST => S8, 138 | ?S_BLOCK_HASH_TXN_LIST_BEFORE => S9, 139 | ?S_BLOCK_HEIGHT_BY_TIME => S10 140 | }, 141 | M 142 | ). 143 | 144 | handle('GET', [], Req) -> 145 | Args = ?GET_ARGS([cursor], Req), 146 | Result = get_block_list(Args), 147 | CacheTime = get_block_list_cache_time(Result), 148 | ?MK_RESPONSE(Result, CacheTime); 149 | handle('GET', [<<"height">>], Req) -> 150 | Args = ?GET_ARGS([max_time], Req), 151 | ?MK_RESPONSE(get_block_height(Args), block_time); 152 | handle('GET', [<<"hash">>, BlockHash], _Req) -> 153 | ?MK_RESPONSE(get_block({hash, BlockHash}), infinity); 154 | handle('GET', [<<"hash">>, BlockHash, <<"transactions">>], Req) -> 155 | Args = ?GET_ARGS([cursor], Req), 156 | ?MK_RESPONSE(get_block_txn_list({hash, BlockHash}, Args), infinity); 157 | handle('GET', [<<"stats">>], _Req) -> 158 | ?MK_RESPONSE( 159 | {ok, 160 | bh_route_stats:mk_stats_from_time_results( 161 | get_block_stats() 162 | )}, 163 | block_time 164 | ); 165 | handle('GET', [BlockId], _Req) -> 166 | bh_route_handler:try_or_else( 167 | fun() -> binary_to_integer(BlockId) end, 168 | fun(Height) -> 169 | ?MK_RESPONSE(get_block({height, Height}), infinity) 170 | end, 171 | ?RESPONSE_400 172 | ); 173 | handle('GET', [BlockId, <<"transactions">>], Req) -> 174 | Args = ?GET_ARGS([cursor], Req), 175 | bh_route_handler:try_or_else( 176 | fun() -> binary_to_integer(BlockId) end, 177 | fun(Height) -> 178 | ?MK_RESPONSE(get_block_txn_list({height, Height}, Args), infinity) 179 | end, 180 | ?RESPONSE_400 181 | ); 182 | handle(_Method, _Path, _Req) -> 183 | ?RESPONSE_404. 184 | 185 | -spec get_block_span(High :: binary() | undefined, Low :: binary() | undefined) -> 186 | {ok, {bh_route_handler:timespan(), bh_route_handler:blockspan()}} 187 | | {error, term()}. 188 | get_block_span(MaxTime0, MinTime0) -> 189 | case ?PARSE_TIMESPAN(MaxTime0, MinTime0) of 190 | {ok, {MaxTime, MinTime}} -> 191 | {ok, _, [{HighBlock, LowBlock}]} = 192 | ?PREPARED_QUERY(?S_BLOCK_SPAN, [MaxTime, MinTime]), 193 | {ok, {{MaxTime, MinTime}, {HighBlock, LowBlock}}}; 194 | {error, Error} -> 195 | {error, Error} 196 | end. 197 | 198 | get_block_list([{cursor, undefined}]) -> 199 | {ok, #{height := Height}} = get_block_height(), 200 | case Height rem ?BLOCK_LIST_LIMIT of 201 | 0 -> 202 | %% Handle the perfect block aligned height by returning an empty 203 | %% response with a cursor that can be used as the cache key. 204 | {ok, [], mk_block_list_cursor(Height + 1)}; 205 | Limit -> 206 | {ok, _, Results} = ?PREPARED_QUERY(?S_BLOCK_LIST_BEFORE, [Height + 1, Limit]), 207 | {ok, block_list_to_json(Results), mk_block_list_cursor(Height + 1 - length(Results))} 208 | end; 209 | get_block_list([{cursor, Cursor}]) -> 210 | case ?CURSOR_DECODE(Cursor) of 211 | {ok, #{<<"before">> := Before}} -> 212 | {ok, _, Results} = ?PREPARED_QUERY(?S_BLOCK_LIST_BEFORE, [Before, ?BLOCK_LIST_LIMIT]), 213 | {ok, block_list_to_json(Results), mk_block_list_cursor(Before - length(Results))}; 214 | _ -> 215 | {error, badarg} 216 | end. 217 | 218 | get_block_list_cache_time({ok, Results, _}) when length(Results) == ?BLOCK_LIST_LIMIT -> 219 | %% This is a proper page, cursor and a full list of entries. This reliies on 220 | %% the result of a block list aligned height to have 0 results. 221 | infinity; 222 | get_block_list_cache_time({ok, Results, _}) when length(Results) < ?BLOCK_LIST_LIMIT -> 223 | %% This is a partial result. Shoudl only happen on the first result 224 | block_time; 225 | get_block_list_cache_time(_) -> 226 | never. 227 | 228 | mk_block_list_cursor(Before) when Before =< 1 -> 229 | undefined; 230 | mk_block_list_cursor(Before) -> 231 | #{before => Before}. 232 | 233 | get_block_height() -> 234 | {ok, Result, _, _} = get_block_height([{max_time, undefined}]), 235 | {ok, Result}. 236 | 237 | get_block_height([{max_time, undefined}]) -> 238 | mk_block_height_result(undefined, ?PREPARED_QUERY(?S_BLOCK_HEIGHT, [])); 239 | get_block_height([{max_time, Time0}]) -> 240 | case ?PARSE_TIMESTAMP(Time0) of 241 | {ok, Timestamp} -> 242 | mk_block_height_result( 243 | Timestamp, 244 | ?PREPARED_QUERY( 245 | ?S_BLOCK_HEIGHT_BY_TIME, 246 | [Timestamp] 247 | ) 248 | ); 249 | {error, _} = Error -> 250 | Error 251 | end. 252 | 253 | mk_block_height_result(Timestamp, {ok, _, [{Height}]}) -> 254 | Meta = 255 | case Timestamp of 256 | undefined -> undefined; 257 | _ -> #{max_time => iso8601:format(Timestamp)} 258 | end, 259 | {ok, #{height => Height}, undefined, Meta}. 260 | 261 | get_block({height, Height}) -> 262 | Result = ?PREPARED_QUERY(?S_BLOCK_BY_HEIGHT, [Height]), 263 | mk_block_from_result(Result); 264 | get_block({hash, Hash}) -> 265 | Result = ?PREPARED_QUERY(?S_BLOCK_BY_HASH, [Hash]), 266 | mk_block_from_result(Result). 267 | 268 | mk_block_from_result({ok, _, [Result]}) -> 269 | {ok, block_to_json(Result)}; 270 | mk_block_from_result(_) -> 271 | {error, not_found}. 272 | 273 | get_block_txn_list({height, Height}, Args) -> 274 | case get_block({height, Height}) of 275 | {ok, _} -> 276 | get_block_txn_list( 277 | Height, 278 | {?S_BLOCK_HEIGHT_TXN_LIST, ?S_BLOCK_HEIGHT_TXN_LIST_BEFORE}, 279 | Args 280 | ); 281 | Error -> 282 | Error 283 | end; 284 | get_block_txn_list({hash, Hash}, Args) -> 285 | case get_block({hash, Hash}) of 286 | {ok, _} -> 287 | get_block_txn_list(Hash, {?S_BLOCK_HASH_TXN_LIST, ?S_BLOCK_HASH_TXN_LIST_BEFORE}, Args); 288 | Error -> 289 | Error 290 | end. 291 | 292 | get_block_txn_list(Block, {StartQuery, _CursorQuery}, [{cursor, undefined}]) -> 293 | Result = ?PREPARED_QUERY(StartQuery, [Block]), 294 | mk_txn_list_from_result(Result); 295 | get_block_txn_list(Block, {_StartQuery, CursorQuery}, [{cursor, Cursor}]) -> 296 | case ?CURSOR_DECODE(Cursor) of 297 | {ok, #{<<"hash">> := Hash}} -> 298 | Result = ?PREPARED_QUERY(CursorQuery, [Block, Hash]), 299 | mk_txn_list_from_result(Result) 300 | end. 301 | 302 | get_block_stats() -> 303 | bh_cache:get({?MODULE, block_stats}, fun() -> 304 | {ok, _Columns, Data} = ?PREPARED_QUERY(?S_BLOCK_TIMES, []), 305 | Data 306 | end). 307 | 308 | mk_txn_list_from_result({ok, _, Results}) -> 309 | {ok, ?TXN_LIST_TO_JSON(Results), mk_txn_list_cursor(Results)}. 310 | 311 | mk_txn_list_cursor(Results) -> 312 | case length(Results) < ?BLOCK_TXN_LIST_LIMIT of 313 | true -> 314 | undefined; 315 | false -> 316 | {_Height, _Time, Hash, _Type, _Fields} = lists:last(Results), 317 | #{hash => Hash} 318 | end. 319 | 320 | block_list_to_json(Results) -> 321 | lists:map(fun block_to_json/1, Results). 322 | 323 | block_to_json({Height, Time, Hash, PrevHash, TxnCount, SnapshotHash}) -> 324 | NullToStr = fun 325 | (null) -> <<"">>; 326 | (Bin) -> Bin 327 | end, 328 | #{ 329 | height => Height, 330 | time => Time, 331 | hash => Hash, 332 | prev_hash => PrevHash, 333 | transaction_count => TxnCount, 334 | snapshot_hash => NullToStr(SnapshotHash) 335 | }. 336 | -------------------------------------------------------------------------------- /src/bh_route_challenges.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_challenges). 2 | 3 | -export([prepare_conn/1, handle/3]). 4 | -export([ 5 | get_challenge_list/2, 6 | get_challenge_stats/0 7 | ]). 8 | 9 | -behavior(bh_route_handler). 10 | -behavior(bh_db_worker). 11 | 12 | -include("bh_route_handler.hrl"). 13 | 14 | -define(S_CHALLENGE_STATS, "challenges_stats"). 15 | 16 | prepare_conn(_Conn) -> 17 | Loads = [?S_CHALLENGE_STATS], 18 | bh_db_worker:load_from_eql("challenges.sql", Loads). 19 | 20 | handle('GET', [], Req) -> 21 | Args = add_filter_types(?GET_ARGS([cursor, max_time, min_time, limit], Req)), 22 | Result = bh_route_txns:get_txn_list(Args), 23 | CacheTime = bh_route_txns:get_txn_list_cache_time(Result), 24 | ?MK_RESPONSE(Result, CacheTime); 25 | handle('GET', [<<"stats">>], _Req) -> 26 | ?MK_RESPONSE( 27 | {ok, 28 | bh_route_stats:mk_stats_from_challenge_results( 29 | get_challenge_stats() 30 | )}, 31 | block_time 32 | ); 33 | handle(_Method, _Path, _Req) -> 34 | ?RESPONSE_404. 35 | 36 | add_filter_types(Args) -> 37 | Args ++ [{filter_types, [<<"poc_receipts_v1">>, <<"poc_receipts_v2">>]}]. 38 | 39 | get_challenge_list({hotspot, Address}, Args) -> 40 | bh_route_txns:get_actor_txn_list({hotspot, Address}, add_filter_types(Args)); 41 | get_challenge_list({account, Address}, Args) -> 42 | bh_route_txns:get_actor_txn_list({account, Address}, add_filter_types(Args)). 43 | 44 | get_challenge_stats() -> 45 | bh_cache:get( 46 | {?MODULE, challenge_stats}, 47 | fun() -> 48 | {ok, _Columns, Data} = ?PREPARED_QUERY(?S_CHALLENGE_STATS, []), 49 | Data 50 | end 51 | ). 52 | -------------------------------------------------------------------------------- /src/bh_route_cities.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_cities). 2 | 3 | -behavior(bh_route_handler). 4 | -behavior(bh_db_worker). 5 | 6 | -include("bh_route_handler.hrl"). 7 | 8 | -export([prepare_conn/1, handle/3]). 9 | %% Utilities 10 | -export([get_city_list/1]). 11 | 12 | -define(S_CITY_LIST_COUNT, "city_list_count"). 13 | -define(S_CITY_LIST_COUNT_BEFORE, "city_list_count_before"). 14 | -define(S_CITY_LIST_NAME, "city_list_name"). 15 | -define(S_CITY_LIST_NAME_BEFORE, "city_list_name_before"). 16 | -define(S_CITY_BY_ID, "city_by_id"). 17 | -define(S_CITY_SEARCH, "city_search"). 18 | -define(S_CITY_SEARCH_BEFORE, "city_search_before"). 19 | -define(S_CITY_HOTSPOT_LIST, "hotspot_city_list"). 20 | -define(S_CITY_HOTSPOT_LIST_BEFORE, "hotspor_city_list_before"). 21 | 22 | -define(CITY_LIST_LIMIT, 100). 23 | 24 | prepare_conn(_Conn) -> 25 | CityListLimit = "limit " ++ integer_to_list(?CITY_LIST_LIMIT), 26 | Loads = [ 27 | {?S_CITY_BY_ID, 28 | {city_list_base, 29 | [ 30 | {inner_scope, city_by_id_inner_scope}, 31 | {rank, city_search_rank}, 32 | {scope, ""}, 33 | {order, city_search_order}, 34 | {limit, "limit 1"} 35 | ], 36 | [text]}}, 37 | {?S_CITY_SEARCH, 38 | {city_list_base, 39 | [ 40 | {inner_scope, city_search_inner_scope}, 41 | {rank, city_search_rank}, 42 | {scope, ""}, 43 | {order, city_search_order}, 44 | {limit, CityListLimit} 45 | ], 46 | [text]}}, 47 | {?S_CITY_LIST_NAME, 48 | {city_list_base, [ 49 | {rank, city_list_name_rank}, 50 | {inner_scope, ""}, 51 | {scope, ""}, 52 | {order, city_list_name_order}, 53 | {limit, CityListLimit} 54 | ]}}, 55 | {?S_CITY_LIST_NAME_BEFORE, 56 | {city_list_base, 57 | [ 58 | {rank, city_list_name_rank}, 59 | {inner_scope, ""}, 60 | {scope, city_list_name_before_scope}, 61 | {order, city_list_name_order}, 62 | {limit, CityListLimit} 63 | ], 64 | [text, text]}}, 65 | {?S_CITY_LIST_COUNT, 66 | {city_list_base, 67 | [ 68 | {rank, city_list_count_rank}, 69 | {inner_scope, ""}, 70 | {scope, ""}, 71 | {order, city_list_count_order}, 72 | {limit, CityListLimit} 73 | ], 74 | [text]}}, 75 | {?S_CITY_LIST_COUNT_BEFORE, 76 | {city_list_base, 77 | [ 78 | {rank, city_list_count_rank}, 79 | {inner_scope, ""}, 80 | {scope, city_list_count_before_scope}, 81 | {order, city_list_count_order}, 82 | {limit, CityListLimit} 83 | ], 84 | [text, int8, text]}} 85 | ], 86 | bh_db_worker:load_from_eql("cities.sql", Loads). 87 | 88 | handle('GET', [], Req) -> 89 | Args = ?GET_ARGS([search, order, cursor], Req), 90 | Result = get_city_list(Args), 91 | ?MK_RESPONSE(Result, block_time); 92 | handle('GET', [City, <<"hotspots">>], Req) -> 93 | Args = ?GET_ARGS([filter_modes, cursor], Req), 94 | CityId = ?B64_TO_BIN(City), 95 | try 96 | Result = bh_route_hotspots:get_hotspot_list([{owner, undefined}, {city, CityId} | Args]), 97 | ?MK_RESPONSE(Result, block_time) 98 | catch 99 | _:_ -> 100 | ?RESPONSE_404 101 | end; 102 | handle('GET', [City], _Req) -> 103 | CityId = ?B64_TO_BIN(City), 104 | try 105 | Result = get_city(CityId), 106 | ?MK_RESPONSE(Result, block_time) 107 | catch 108 | _:_ -> 109 | ?RESPONSE_404 110 | end; 111 | handle(_, _, _Req) -> 112 | ?RESPONSE_404. 113 | 114 | order_to_rank(<<"online_count">>) -> 115 | <<"online_count">>; 116 | order_to_rank(<<"offline_count">>) -> 117 | <<"offline_count">>; 118 | order_to_rank(<<"hotspot_count">>) -> 119 | <<"hotspot_count">>; 120 | order_to_rank(_) -> 121 | throw(?RESPONSE_400). 122 | 123 | get_city(CityId) -> 124 | Result = ?PREPARED_QUERY(?S_CITY_BY_ID, [CityId]), 125 | mk_city_from_result(Result). 126 | 127 | get_city_list([{search, undefined}, {order, undefined}, {cursor, undefined}]) -> 128 | Result = ?PREPARED_QUERY(?S_CITY_LIST_NAME, []), 129 | mk_city_list_from_result(#{}, Result); 130 | get_city_list([{search, undefined}, {order, Order}, {cursor, undefined}]) -> 131 | Rank = order_to_rank(Order), 132 | Result = ?PREPARED_QUERY(?S_CITY_LIST_COUNT, [Rank]), 133 | mk_city_list_from_result(#{order => Order}, Result); 134 | get_city_list([{search, Search}, {order, _}, {cursor, undefined}]) -> 135 | Result = ?PREPARED_QUERY(?S_CITY_SEARCH, [Search]), 136 | mk_city_list_from_result(#{}, Result); 137 | get_city_list([{search, _}, {order, _}, {cursor, Cursor}]) -> 138 | case ?CURSOR_DECODE(Cursor) of 139 | {ok, 140 | #{ 141 | <<"city_id">> := CityId, 142 | <<"rank">> := CursorRank 143 | } = C} -> 144 | case maps:get(<<"order">>, C, false) of 145 | false -> 146 | Result = ?PREPARED_QUERY(?S_CITY_LIST_NAME_BEFORE, [CursorRank, CityId]), 147 | mk_city_list_from_result(#{}, Result); 148 | Order -> 149 | Rank = order_to_rank(Order), 150 | Result = ?PREPARED_QUERY(?S_CITY_LIST_COUNT_BEFORE, [Rank, CursorRank, CityId]), 151 | mk_city_list_from_result(#{order => Order}, Result) 152 | end; 153 | _ -> 154 | {error, badarg} 155 | end. 156 | 157 | mk_city_from_result({ok, _, [Result]}) -> 158 | {ok, city_to_json(Result)}. 159 | 160 | mk_city_list_from_result(CursorBase, {ok, _, Results}) -> 161 | {ok, city_list_to_json(Results), mk_city_list_cursor(CursorBase, Results)}. 162 | 163 | mk_city_list_cursor(CursorBase, Results) when is_list(Results) -> 164 | case length(Results) < ?CITY_LIST_LIMIT of 165 | true -> 166 | undefined; 167 | false -> 168 | {_ShortCity, _LongCity, _ShortState, _LongState, _ShortCountry, _LongCountry, CityId, 169 | _TotalCount, _OnlineCount, _OfflineCount, Rank} = lists:last(Results), 170 | CursorBase#{ 171 | city_id => CityId, 172 | rank => Rank 173 | } 174 | end. 175 | 176 | %% 177 | %% to_jaon 178 | %% 179 | 180 | city_list_to_json(Results) -> 181 | lists:map(fun city_to_json/1, Results). 182 | 183 | city_to_json( 184 | {ShortCity, LongCity, ShortState, LongState, ShortCountry, LongCountry, CityId, TotalCount, 185 | OnlineCount, OfflineCount, _Rank} 186 | ) -> 187 | Base = bh_route_hotspots:to_geo_json( 188 | {ShortCity, LongCity, ShortState, LongState, ShortCountry, LongCountry, CityId} 189 | ), 190 | Base#{ 191 | hotspot_count => TotalCount, 192 | online_count => OnlineCount, 193 | offline_count => OfflineCount 194 | }. 195 | -------------------------------------------------------------------------------- /src/bh_route_dc_burns.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_dc_burns). 2 | 3 | -behavior(bh_route_handler). 4 | -behavior(bh_db_worker). 5 | 6 | -include("bh_route_handler.hrl"). 7 | 8 | -export([prepare_conn/1, handle/3]). 9 | -export([get_burn_stats/0, mk_burn_stats_result/1]). 10 | 11 | -define(S_BURN_LIST_BEFORE, "burn_list_before"). 12 | -define(S_BURN_LIST, "burn_list"). 13 | -define(S_BURN_STATS, "burn_stats"). 14 | -define(S_BURN_SUM, "burn_sum"). 15 | -define(S_BURN_BUCKETED_SUM, "burn_bucketed_sum"). 16 | 17 | -define(BURN_LIST_LIMIT, 100). 18 | -define(BURN_LIST_BLOCK_ALIGN, 100). 19 | 20 | -define(FILTER_TYPES, [ 21 | add_gateway, 22 | assert_location, 23 | state_channel, 24 | routing, 25 | fee 26 | ]). 27 | 28 | prepare_conn(Conn) -> 29 | epgsql:update_type_cache(Conn, [ 30 | {bh_burn_type, [fee, state_channel, assert_location, add_gateway, oui, routing]} 31 | ]), 32 | BurnListLimit = "limit " ++ integer_to_list(?BURN_LIST_LIMIT), 33 | Loads = [ 34 | {?S_BURN_LIST_BEFORE, 35 | {burn_list_base, 36 | [ 37 | {scope, burn_list_before_scope}, 38 | {limit, BurnListLimit} 39 | ], 40 | [{array, burn_type}, text, int8]}}, 41 | {?S_BURN_LIST, 42 | {burn_list_base, 43 | [ 44 | {scope, burn_list_scope}, 45 | {limit, BurnListLimit} 46 | ], 47 | [{array, burn_type}]}}, 48 | {?S_BURN_SUM, {?S_BURN_SUM, [], [timestamptz, timestamptz]}}, 49 | {?S_BURN_BUCKETED_SUM, {?S_BURN_BUCKETED_SUM, [], [timestamptz, timestamptz, interval]}}, 50 | {?S_BURN_STATS, {?S_BURN_STATS, [], [int8, text]}} 51 | ], 52 | bh_db_worker:load_from_eql("dc_burns.sql", Loads). 53 | 54 | handle('GET', [], Req) -> 55 | Args = ?GET_ARGS([cursor, filter_types], Req), 56 | Result = get_burn_list(Args), 57 | CacheTime = get_burn_list_cache_time(Args), 58 | ?MK_RESPONSE(Result, CacheTime); 59 | handle('GET', [<<"stats">>], _Req) -> 60 | ?MK_RESPONSE(get_stats(), {block_time, 60}); 61 | handle('GET', [<<"sum">>], Req) -> 62 | Args = ?GET_ARGS([max_time, min_time, bucket], Req), 63 | ?MK_RESPONSE(get_burn_sum(Args), block_time); 64 | handle(_, _, _Req) -> 65 | ?RESPONSE_404. 66 | 67 | get_burn_list([{cursor, undefined}, {filter_types, FilterTypes0}]) -> 68 | FilterTypes = 69 | case FilterTypes0 of 70 | undefined -> ?FILTER_TYPES; 71 | _ -> FilterTypes0 72 | end, 73 | Result = ?PREPARED_QUERY(?S_BURN_LIST, [FilterTypes]), 74 | mk_burn_list_from_result(FilterTypes, Result); 75 | get_burn_list([{cursor, Cursor}, {filter_types, _}]) -> 76 | case ?CURSOR_DECODE(Cursor) of 77 | {ok, #{ 78 | <<"before_address">> := BeforeAddress, 79 | <<"before_block">> := BeforeBlock, 80 | <<"filter_types">> := FilterTypes0 81 | }} -> 82 | FilterTypes = 83 | case FilterTypes0 of 84 | undefined -> 85 | ?FILTER_TYPES; 86 | _ -> 87 | FilterTypes0 88 | end, 89 | Result = ?PREPARED_QUERY(?S_BURN_LIST_BEFORE, [ 90 | FilterTypes, 91 | BeforeAddress, 92 | BeforeBlock 93 | ]), 94 | mk_burn_list_from_result(FilterTypes, Result); 95 | _ -> 96 | {error, badarg} 97 | end. 98 | 99 | %% If the request had a cursor in it we can cache the response for that request 100 | %% for a long time since the cursor makes the response stable. 101 | get_burn_list_cache_time([{cursor, undefined}, {filter_types, _}]) -> 102 | block_time; 103 | get_burn_list_cache_time([{cursor, _}, {filter_types, _}]) -> 104 | infinity. 105 | 106 | mk_burn_list_from_result(FilterTypes, {ok, _, Results}) -> 107 | {ok, burn_list_to_json(Results), mk_cursor(FilterTypes, Results)}. 108 | 109 | mk_cursor(FilterTypes, Results) when is_list(Results) -> 110 | case length(Results) < ?BURN_LIST_LIMIT of 111 | true -> 112 | undefined; 113 | false -> 114 | {Height, Actor, _Type, _Amount, _Proce} = lists:last(Results), 115 | #{ 116 | filter_types => FilterTypes, 117 | before_address => Actor, 118 | before_block => Height 119 | } 120 | end. 121 | 122 | get_stats() -> 123 | Results = get_burn_stats(), 124 | Now = calendar:universal_time(), 125 | Meta = #{ 126 | timestamp => iso8601:format(Now) 127 | }, 128 | {ok, mk_burn_stats_result(Results), undefined, Meta}. 129 | 130 | get_burn_stats() -> 131 | bh_cache:get( 132 | {?MODULE, burn_stats}, 133 | fun() -> 134 | {ok, {_, {_, MonthMinBlock}}} = bh_route_blocks:get_block_span( 135 | undefined, <<"-30 day">> 136 | ), 137 | {ok, {_, {_, WeekMinBlock}}} = bh_route_blocks:get_block_span( 138 | undefined, <<"-1 week">> 139 | ), 140 | {ok, {_, {_, DayMinBlock}}} = bh_route_blocks:get_block_span( 141 | undefined, <<"-1 day">> 142 | ), 143 | [{ok, _, MonthStats}, {ok, _, WeekStats}, {ok, _, DayStats}] = 144 | ?EXECUTE_BATCH([ 145 | {?S_BURN_STATS, [MonthMinBlock, <<"last_month">>]}, 146 | {?S_BURN_STATS, [WeekMinBlock, <<"last_week">>]}, 147 | {?S_BURN_STATS, [DayMinBlock, <<"last_day">>]} 148 | ]), 149 | lists:flatten([MonthStats, WeekStats, DayStats]) 150 | end 151 | ). 152 | 153 | get_burn_sum([ 154 | {max_time, MaxTime0}, 155 | {min_time, MinTime0}, 156 | {bucket, undefined} 157 | ]) -> 158 | case ?PARSE_TIMESPAN(MaxTime0, MinTime0) of 159 | {ok, {MaxTime, MinTime}} -> 160 | Result = ?PREPARED_QUERY(?S_BURN_SUM, [MinTime, MaxTime]), 161 | Meta = #{ 162 | max_time => iso8601:format(MaxTime), 163 | min_time => iso8601:format(MinTime) 164 | }, 165 | {ok, mk_burn_sum_result(Result), undefined, Meta}; 166 | {error, _} = Error -> 167 | Error 168 | end; 169 | get_burn_sum([ 170 | {max_time, MaxTime0}, 171 | {min_time, MinTime0}, 172 | {bucket, Bucket} 173 | ]) -> 174 | case ?PARSE_BUCKETED_TIMESPAN(MaxTime0, MinTime0, Bucket) of 175 | {ok, {{MaxTime, MinTime}, {BucketType, BucketStep}}} -> 176 | Result = ?PREPARED_QUERY(?S_BURN_BUCKETED_SUM, [MaxTime, MinTime, BucketStep]), 177 | Meta = #{ 178 | max_time => iso8601:format(MaxTime), 179 | min_time => iso8601:format(MinTime), 180 | bucket => BucketType 181 | }, 182 | BucketResults = mk_burn_stats_result(Result), 183 | {_, Buckets} = lists:unzip( 184 | lists:reverse(lists:keysort(1, maps:to_list(BucketResults))) 185 | ), 186 | {ok, Buckets, undefined, Meta}; 187 | {error, Error} -> 188 | {error, Error} 189 | end. 190 | 191 | mk_burn_sum_result({ok, _, Results}) -> 192 | mk_burn_sum_result({ok, Results}); 193 | mk_burn_sum_result({ok, Results}) -> 194 | maps:from_list(Results). 195 | 196 | mk_burn_stats_result({ok, _, Results}) -> 197 | mk_burn_stats_result({ok, Results}); 198 | mk_burn_stats_result({ok, Results}) -> 199 | lists:foldl( 200 | fun 201 | ({_Interval, _Type, 0}, Acc) -> 202 | Acc; 203 | ({Interval, Type, NumDCs}, Acc) -> 204 | maps:update_with( 205 | Interval, 206 | fun(IntervalMap = #{total := Total}) -> 207 | maps:put(Type, NumDCs, IntervalMap#{total => Total + NumDCs}) 208 | end, 209 | #{Type => NumDCs, total => NumDCs}, 210 | Acc 211 | ) 212 | end, 213 | #{}, 214 | Results 215 | ). 216 | 217 | %% 218 | %% json 219 | %% 220 | 221 | burn_list_to_json(Results) -> 222 | lists:map(fun burn_to_json/1, Results). 223 | 224 | burn_to_json({Block, Actor, Type, Amount, OraclePrice}) -> 225 | #{ 226 | block => Block, 227 | address => Actor, 228 | type => Type, 229 | amount => Amount, 230 | oracle_price => OraclePrice 231 | }. 232 | -------------------------------------------------------------------------------- /src/bh_route_elections.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_elections). 2 | 3 | -export([prepare_conn/1, handle/3]). 4 | -export([ 5 | get_election_list/2, 6 | get_election_time_stats/0 7 | ]). 8 | 9 | -behavior(bh_route_handler). 10 | -behavior(bh_db_worker). 11 | 12 | -include("bh_route_handler.hrl"). 13 | 14 | -define(S_ELECTION_TIMES, "election_times"). 15 | 16 | prepare_conn(_Conn) -> 17 | Loads = [?S_ELECTION_TIMES], 18 | bh_db_worker:load_from_eql("elections.sql", Loads). 19 | 20 | handle('GET', [], Req) -> 21 | Args = add_filter_types(?GET_ARGS([cursor, max_time, min_time, limit], Req)), 22 | Result = bh_route_txns:get_txn_list(Args), 23 | CacheTime = bh_route_txns:get_txn_list_cache_time(Result), 24 | ?MK_RESPONSE(Result, CacheTime); 25 | handle('GET', [<<"stats">>], _Req) -> 26 | ?MK_RESPONSE( 27 | {ok, 28 | bh_route_stats:mk_stats_from_time_results( 29 | get_election_time_stats() 30 | )}, 31 | block_time 32 | ); 33 | handle(_Method, _Path, _Req) -> 34 | ?RESPONSE_404. 35 | 36 | add_filter_types(Args) -> 37 | Args ++ [{filter_types, [<<"consensus_group_v1">>]}]. 38 | 39 | get_election_list({hotspot, Address}, Args) -> 40 | bh_route_txns:get_actor_txn_list({hotspot, Address}, add_filter_types(Args)); 41 | get_election_list({account, Address}, Args) -> 42 | bh_route_txns:get_actor_txn_list({account, Address}, add_filter_types(Args)). 43 | 44 | get_election_time_stats() -> 45 | bh_cache:get( 46 | {?MODULE, election_time_stats}, 47 | fun() -> 48 | {ok, _Columns, Data} = ?PREPARED_QUERY(?S_ELECTION_TIMES, []), 49 | Data 50 | end 51 | ). 52 | -------------------------------------------------------------------------------- /src/bh_route_handler.hrl: -------------------------------------------------------------------------------- 1 | -include("bh_db_worker.hrl"). 2 | 3 | -define(JSON_CONTENT, {<<"Content-Type">>, <<"application/json; charset=utf-8">>}). 4 | -define(RESPONSE_404, {404, [?JSON_CONTENT], jiffy:encode(#{error => <<"Not Found">>})}). 5 | -define(RESPONSE_409, {409, [?JSON_CONTENT], jiffy:encode(#{error => <<"Conflict">>})}). 6 | -define(RESPONSE_503, {503, [?JSON_CONTENT], jiffy:encode(#{error => <<"Too Busy">>})}). 7 | -define(RESPONSE_429(Time), 8 | {429, [?JSON_CONTENT, {<<"Retry-After">>, integer_to_list(Time div 1000)}], 9 | jiffy:encode(#{error => <<"Too Busy">>, come_back_in_ms => Time})} 10 | ). 11 | -define(RESPONSE_503_SHUTDOWN, {503, [?JSON_CONTENT], jiffy:encode(#{error => <<"Stopping">>})}). 12 | -define(RESPONSE_400, ?RESPONSE_400("Bad Request")). 13 | -define(RESPONSE_400(S), {400, [?JSON_CONTENT], jiffy:encode(#{error => list_to_binary((S))})}). 14 | -define(MAX_LIMIT, 1000). 15 | -define(DEFAULT_ARG_LIMIT, <<"100">>). 16 | -define(GET_ARGS(A, R), bh_route_handler:get_args((A), (R))). 17 | -define(PARSE_INTERVAL(B), bh_route_handler:parse_interval((B))). 18 | -define(PARSE_TIMESPAN(H, L), bh_route_handler:parse_timespan((H), (L))). 19 | -define(PARSE_BUCKETED_TIMESPAN(H, L, B), 20 | bh_route_handler:parse_bucketed_timespan((H), (L), (B)) 21 | ). 22 | -define(PARSE_TIMESTAMP(T), bh_route_handler:parse_timestamp((T))). 23 | -define(PARSE_FLOAT(F), bh_route_handler:parse_float((F))). 24 | -define(PARSE_INT(I), bh_route_handler:parse_int((I))). 25 | 26 | -define(FILTER_TYPES_TO_LIST(L, B), bh_route_handler:filter_types_to_list((L), (B))). 27 | -define(HOTSPOT_MODES_TO_LIST(L, B), bh_route_handler:hotspot_modes_to_list((L), (B))). 28 | 29 | -define(MK_RESPONSE(R), ?MK_RESPONSE(R, undefined)). 30 | -define(MK_RESPONSE(R, C), bh_route_handler:mk_response((R), (C))). 31 | -define(INSERT_LAT_LON(L, N, F), bh_route_handler:lat_lon((L), (N), (F))). 32 | -define(INSERT_LAT_LON(L, F), bh_route_handler:lat_lon((L), (F))). 33 | -define(INSERT_LOCATION_HEX(L, N, F), bh_route_handler:insert_location_hex((L), (N), (F))). 34 | -define(INSERT_LOCATION_HEX(L, F), bh_route_handler:insert_location_hex((L), (F))). 35 | -define(CURSOR_ENCODE(M), bh_route_handler:cursor_encode(M)). 36 | -define(CURSOR_DECODE(B), bh_route_handler:cursor_decode(B)). 37 | -define(BIN_TO_B64(B), base64url:encode((B))). 38 | -define(B64_TO_BIN(B), base64url:decode((B))). 39 | -define(BIN_TO_B58(B), list_to_binary(libp2p_crypto:bin_to_b58((B)))). 40 | -define(SELECT_TXN_FIELDS(F), ["select t.block, t.time, t.hash, t.type, ", (F), " "]). 41 | -define(SELECT_TXN_BASE, ?SELECT_TXN_FIELDS("t.fields")). 42 | -define(TXN_LIST_TO_JSON(R), bh_route_txns:txn_list_to_json((R))). 43 | -define(LOCATION_HEX_RES, 8). 44 | -define(BLOCK_LIST_LIMIT, 100). 45 | -define(BLOCK_TXN_LIST_LIMIT, 50). 46 | -define(SNAPSHOT_LIST_LIMIT, 100). 47 | -define(CHALLENGE_TXN_LIST_LIMIT, 50). 48 | -define(STATE_CHANNEL_TXN_LIST_LIMIT, 10). 49 | -define(PENDING_TXN_LIST_LIMIT, 100). 50 | -define(TXN_LIST_LIMIT, 100). 51 | -------------------------------------------------------------------------------- /src/bh_route_locations.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_locations). 2 | 3 | -behavior(bh_route_handler). 4 | -behavior(bh_db_worker). 5 | 6 | -include("bh_route_handler.hrl"). 7 | 8 | -export([prepare_conn/1, handle/3]). 9 | %% Utilities 10 | -export([get_location/1]). 11 | 12 | -define(S_LOCATION, "location_at_index"). 13 | 14 | prepare_conn(_Conn) -> 15 | Loads = [ 16 | {?S_LOCATION, 17 | {location_list_base, 18 | [ 19 | {scope, "where location = $1"} 20 | ], 21 | [text]}} 22 | ], 23 | bh_db_worker:load_from_eql("locations.sql", Loads). 24 | 25 | handle('GET', [Location], _Req) -> 26 | ?MK_RESPONSE(get_location(Location), infinity); 27 | handle(_, _, _Req) -> 28 | ?RESPONSE_404. 29 | 30 | get_location(Location) -> 31 | case ?PREPARED_QUERY(?S_LOCATION, [Location]) of 32 | {ok, _, [Result]} -> 33 | {ok, location_to_json(Result)}; 34 | _ -> 35 | {error, not_found} 36 | end. 37 | 38 | %% 39 | %% json 40 | %% 41 | 42 | location_to_json( 43 | {ShortStreet, LongStreet, ShortCity, LongCity, ShortState, LongState, ShortCountry, LongCountry, 44 | CityId, Location} 45 | ) -> 46 | MaybeB64 = fun 47 | (null) -> null; 48 | (Bin) -> ?BIN_TO_B64(Bin) 49 | end, 50 | #{ 51 | short_street => ShortStreet, 52 | long_street => LongStreet, 53 | short_city => ShortCity, 54 | long_city => LongCity, 55 | short_state => ShortState, 56 | long_state => LongState, 57 | short_country => ShortCountry, 58 | long_country => LongCountry, 59 | city_id => MaybeB64(CityId), 60 | location => Location 61 | }. 62 | -------------------------------------------------------------------------------- /src/bh_route_oracle.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_oracle). 2 | 3 | -behavior(bh_route_handler). 4 | -behavior(bh_db_worker). 5 | 6 | -include("bh_route_handler.hrl"). 7 | 8 | -export([prepare_conn/1, handle/3]). 9 | %% Utilities 10 | -export([get_price_list/1, get_price_at_block/1]). 11 | 12 | -define(S_PRICE_LIST_BEFORE, "oeracle_price_list_befor"). 13 | -define(S_PRICE_LIST, "oracle_price_list"). 14 | -define(S_PRICE_STATS, "oracle_price_stats"). 15 | -define(S_PRICE_AT_BLOCK, "oracle_price_at_block"). 16 | -define(S_PRICE_PREDICTIONS, "oracle_price_predictions"). 17 | -define(PRICE_LIST_LIMIT, 100). 18 | -define(PRICE_LIST_BLOCK_ALIGN, 100). 19 | 20 | prepare_conn(_Conn) -> 21 | PriceListLimit = integer_to_list(?PRICE_LIST_LIMIT), 22 | Loads = [ 23 | {?S_PRICE_LIST, {oracle_price_list_base, [{scope, ""}, {limit, PriceListLimit}]}}, 24 | {?S_PRICE_LIST_BEFORE, 25 | {oracle_price_list_base, 26 | [ 27 | {scope, "where p.block < $1"}, 28 | {limit, PriceListLimit} 29 | ], 30 | [int8]}}, 31 | {?S_PRICE_AT_BLOCK, 32 | {oracle_price_list_base, 33 | [ 34 | {scope, "where block <= coalesce($1, (select max(height) from blocks))"}, 35 | {limit, "1"} 36 | ], 37 | [int8]}}, 38 | ?S_PRICE_PREDICTIONS, 39 | {?S_PRICE_STATS, {?S_PRICE_STATS, [], [timestamptz, timestamptz]}} 40 | ], 41 | bh_db_worker:load_from_eql("oracles.sql", Loads). 42 | 43 | handle('GET', [<<"prices">>], Req) -> 44 | Args = ?GET_ARGS([max_block, cursor], Req), 45 | Result = get_price_list(Args), 46 | CacheTime = get_price_list_cache_time(Args), 47 | ?MK_RESPONSE(Result, CacheTime); 48 | handle('GET', [<<"prices">>, <<"current">>], _Req) -> 49 | ?MK_RESPONSE(get_price_at_block(undefined), block_time); 50 | handle('GET', [<<"prices">>, <<"stats">>], Req) -> 51 | Args = ?GET_ARGS([max_time, min_time], Req), 52 | ?MK_RESPONSE(get_price_stats(Args), block_time); 53 | handle('GET', [<<"prices">>, Block], _Req) -> 54 | bh_route_handler:try_or_else( 55 | fun() -> binary_to_integer(Block) end, 56 | fun(Height) -> 57 | ?MK_RESPONSE(get_price_at_block(Height), infinity) 58 | end, 59 | ?RESPONSE_400 60 | ); 61 | handle('GET', [<<"predictions">>], _Req) -> 62 | ?MK_RESPONSE(get_price_predictions(), block_time); 63 | handle('GET', [<<"activity">>], Req) -> 64 | Args = add_filter_types(?GET_ARGS([cursor, max_time, min_time, limit], Req)), 65 | Result = bh_route_txns:get_txn_list(Args), 66 | CacheTime = bh_route_txns:get_txn_list_cache_time(Result), 67 | ?MK_RESPONSE(Result, CacheTime); 68 | handle('GET', [Address, <<"activity">>], Req) -> 69 | Args = add_filter_types(?GET_ARGS([cursor, max_time, min_time, limit], Req)), 70 | Result = bh_route_txns:get_actor_txn_list({oracle, Address}, Args), 71 | CacheTime = bh_route_txns:get_txn_list_cache_time(Result), 72 | ?MK_RESPONSE(Result, CacheTime); 73 | handle(_, _, _Req) -> 74 | ?RESPONSE_404. 75 | 76 | add_filter_types(Args) -> 77 | Args ++ [{filter_types, [<<"price_oracle_v1">>]}]. 78 | 79 | get_price_list([{max_block, BeforeBlock}, {cursor, undefined}]) -> 80 | try 81 | {ok, _, Results} = 82 | case BeforeBlock of 83 | undefined -> 84 | ?PREPARED_QUERY(?S_PRICE_LIST, []); 85 | _ -> 86 | Before = binary_to_integer(BeforeBlock), 87 | ?PREPARED_QUERY(?S_PRICE_LIST_BEFORE, [ 88 | Before - (Before rem ?PRICE_LIST_LIMIT) 89 | ]) 90 | end, 91 | {ok, price_list_to_json(Results), mk_price_list_cursor(undefined, Results)} 92 | catch 93 | error:badarg -> {error, badarg} 94 | end; 95 | get_price_list([{max_block, _}, {cursor, Cursor}]) -> 96 | case ?CURSOR_DECODE(Cursor) of 97 | {ok, #{<<"before">> := Before}} -> 98 | {ok, _, Results} = ?PREPARED_QUERY(?S_PRICE_LIST_BEFORE, [ 99 | Before - (Before rem ?PRICE_LIST_LIMIT) 100 | ]), 101 | {ok, price_list_to_json(Results), mk_price_list_cursor(Cursor, Results)}; 102 | _ -> 103 | {error, badarg} 104 | end. 105 | 106 | %% If the request had a cursor in it we can cache the response for that request 107 | %% for a long time since the cursor makes the response stable. 108 | get_price_list_cache_time([{max_block, _}, {cursor, undefined}]) -> 109 | block_time; 110 | get_price_list_cache_time([{max_block, _}, {cursor, _}]) -> 111 | infinity. 112 | 113 | mk_price_list_cursor(PrevCursor, Results) when is_list(Results) -> 114 | case length(Results) of 115 | 0 -> 116 | undefined; 117 | N when (N < ?PRICE_LIST_LIMIT) and not (PrevCursor == undefined) -> 118 | %% We have a cursor and we didn't get the full length. We 119 | %% must have reached the end of available data. 120 | undefined; 121 | _ -> 122 | {Block, _Price, _Timestamp} = lists:last(Results), 123 | #{before => Block} 124 | end. 125 | 126 | get_price_at_block(Height) -> 127 | case ?PREPARED_QUERY(?S_PRICE_AT_BLOCK, [Height]) of 128 | {ok, _, [Result]} -> 129 | {ok, price_to_json(Result)}; 130 | _ -> 131 | {ok, price_to_json({1, 0, {{1970, 1, 1}, {0, 0, 0}}})} 132 | end. 133 | 134 | get_price_predictions() -> 135 | {ok, _, Results} = ?PREPARED_QUERY(?S_PRICE_PREDICTIONS, []), 136 | {ok, price_predictions_to_json(Results)}. 137 | 138 | get_price_stats([{max_time, MaxTime0}, {min_time, MinTime0}]) -> 139 | case ?PARSE_TIMESPAN(MaxTime0, MinTime0) of 140 | {ok, {MaxTime, MinTime}} -> 141 | Result = ?PREPARED_QUERY(?S_PRICE_STATS, [MinTime, MaxTime]), 142 | mk_price_stats_result(MaxTime, MinTime, Result); 143 | {error, _} = Error -> 144 | Error 145 | end. 146 | 147 | mk_price_stats_result(MaxTime, MinTime, {ok, _, [Result]}) -> 148 | Meta = #{ 149 | max_time => iso8601:format(MaxTime), 150 | min_time => iso8601:format(MinTime) 151 | }, 152 | %% Result is expected to have the same fields as a stat results 153 | {ok, price_stat_to_json(Result), undefined, Meta}. 154 | 155 | %% 156 | %% json 157 | %% 158 | 159 | price_list_to_json(Results) -> 160 | lists:map(fun price_to_json/1, Results). 161 | 162 | price_to_json({Block, Price, Timestamp}) -> 163 | #{ 164 | block => Block, 165 | price => Price, 166 | timestamp => iso8601:format(Timestamp) 167 | }. 168 | 169 | price_predictions_to_json(Results) -> 170 | lists:map(fun price_prediction_to_json/1, Results). 171 | 172 | price_prediction_to_json({Time, Price}) -> 173 | #{ 174 | time => Time, 175 | price => Price 176 | }. 177 | 178 | price_stat_to_json({Min, Max, Median, Avg, StdDev}) -> 179 | #{ 180 | min => Min, 181 | max => Max, 182 | median => Median, 183 | avg => Avg, 184 | stddev => StdDev 185 | }. 186 | -------------------------------------------------------------------------------- /src/bh_route_ouis.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_ouis). 2 | 3 | -behavior(bh_route_handler). 4 | -behavior(bh_db_worker). 5 | 6 | -include("bh_route_handler.hrl"). 7 | 8 | -export([prepare_conn/1, handle/3]). 9 | %% Utilities 10 | -export([get_last_oui/0, get_oui_list/1]). 11 | 12 | -define(S_OUI_LIST_BEFORE, "oui_list_before"). 13 | -define(S_OUI_LIST, "oui_list"). 14 | -define(S_OWNER_OUI_LIST_BEFORE, "owner_oui_list_before"). 15 | -define(S_OWNER_OUI_LIST, "owner_oui_list"). 16 | -define(S_OUI, "oui"). 17 | -define(S_LAST_OUI, "last_oui"). 18 | -define(S_ACTIVE_OUIS, "active_ouis"). 19 | 20 | -define(OUI_LIST_LIMIT, 100). 21 | 22 | -define(S_OUI_CURRENT, "oui_current"). 23 | 24 | prepare_conn(_Conn) -> 25 | OuiListLimit = "limit " ++ integer_to_list(?OUI_LIST_LIMIT), 26 | Loads = [ 27 | {?S_OUI_LIST_BEFORE, 28 | {oui_list_base, 29 | [ 30 | {scope, oui_list_before_scope}, 31 | {order, oui_list_order}, 32 | {limit, OuiListLimit} 33 | ], 34 | [text, int8]}}, 35 | {?S_OUI_LIST, 36 | {oui_list_base, [ 37 | {scope, ""}, 38 | {order, oui_list_order}, 39 | {limit, OuiListLimit} 40 | ]}}, 41 | {?S_OWNER_OUI_LIST_BEFORE, 42 | {oui_list_base, 43 | [ 44 | {scope, owner_oui_list_before_scope}, 45 | {order, oui_list_order}, 46 | {limit, OuiListLimit} 47 | ], 48 | [text, text, int8]}}, 49 | {?S_OWNER_OUI_LIST, 50 | {oui_list_base, 51 | [ 52 | {scope, owner_oui_list_scope}, 53 | {order, oui_list_order}, 54 | {limit, OuiListLimit} 55 | ], 56 | [text]}}, 57 | {?S_LAST_OUI, 58 | {oui_list_base, [ 59 | {scope, "where oui = (select max(oui) from ouis)"}, 60 | {order, ""}, 61 | {limit, ""} 62 | ]}}, 63 | {?S_OUI, 64 | {oui_list_base, 65 | [ 66 | {scope, "where oui = $1"}, 67 | {order, ""}, 68 | {limit, ""} 69 | ], 70 | [int4]}}, 71 | {?S_ACTIVE_OUIS, {oui_active, []}} 72 | ], 73 | 74 | bh_db_worker:load_from_eql("ouis.sql", Loads). 75 | 76 | handle('GET', [], Req) -> 77 | Args = ?GET_ARGS([cursor], Req), 78 | ?MK_RESPONSE(get_oui_list([{owner, undefined} | Args]), block_time); 79 | handle('GET', [<<"stats">>], _Req) -> 80 | ?MK_RESPONSE(get_stats(), block_time); 81 | handle('GET', [<<"last">>], _Req) -> 82 | ?MK_RESPONSE(get_last_oui(), block_time); 83 | handle('GET', [OuiBin], _Req) -> 84 | bh_route_handler:try_or_else( 85 | fun() -> binary_to_integer(OuiBin) end, 86 | fun(Oui) -> 87 | ?MK_RESPONSE(get_oui(Oui), infinity) 88 | end, 89 | ?RESPONSE_400 90 | ); 91 | handle(_, _, _Req) -> 92 | ?RESPONSE_404. 93 | 94 | get_oui_list([{owner, undefined}, {cursor, undefined}]) -> 95 | Result = ?PREPARED_QUERY(?S_OUI_LIST, []), 96 | mk_oui_list_from_result(Result); 97 | get_oui_list([{owner, Owner}, {cursor, undefined}]) -> 98 | Result = ?PREPARED_QUERY(?S_OWNER_OUI_LIST, [Owner]), 99 | mk_oui_list_from_result(Result); 100 | get_oui_list([{owner, Owner}, {cursor, Cursor}]) -> 101 | case ?CURSOR_DECODE(Cursor) of 102 | {ok, #{ 103 | <<"before_address">> := BeforeAddress, 104 | <<"before_block">> := BeforeBlock, 105 | <<"height">> := _Height 106 | }} -> 107 | case Owner of 108 | undefined -> 109 | Result = 110 | ?PREPARED_QUERY(?S_OUI_LIST_BEFORE, [ 111 | BeforeAddress, 112 | BeforeBlock 113 | ]), 114 | mk_oui_list_from_result(Result); 115 | Owner -> 116 | Result = 117 | ?PREPARED_QUERY(?S_OWNER_OUI_LIST_BEFORE, [ 118 | Owner, 119 | BeforeAddress, 120 | BeforeBlock 121 | ]), 122 | mk_oui_list_from_result(Result) 123 | end; 124 | _ -> 125 | {error, badarg} 126 | end. 127 | 128 | get_oui(Oui) -> 129 | case ?PREPARED_QUERY(?S_OUI, [Oui]) of 130 | {ok, _, [Result]} -> {ok, oui_to_json(Result)}; 131 | _ -> {error, not_found} 132 | end. 133 | 134 | get_last_oui() -> 135 | case ?PREPARED_QUERY(?S_LAST_OUI, []) of 136 | {ok, _, [Result]} -> {ok, oui_to_json(Result)}; 137 | _ -> {error, not_found} 138 | end. 139 | 140 | get_stats() -> 141 | CountStats = ?PREPARED_QUERY(?S_ACTIVE_OUIS, []), 142 | mk_stats_from_results(CountStats). 143 | 144 | mk_oui_list_from_result({ok, _, Results}) -> 145 | {ok, oui_list_to_json(Results), mk_cursor(Results)}. 146 | 147 | mk_stats_from_results({ok, _, [{Count}]}) -> 148 | {ok, #{count => Count}}. 149 | 150 | mk_cursor(Results) when is_list(Results) -> 151 | case length(Results) < ?OUI_LIST_LIMIT of 152 | true -> 153 | undefined; 154 | false -> 155 | {Height, Oui, _Owner, _Nonce, _Addresses, _Subnets, FirstBlock} = lists:last(Results), 156 | #{ 157 | before_oui => Oui, 158 | before_block => FirstBlock, 159 | height => Height 160 | } 161 | end. 162 | 163 | %% 164 | %% json 165 | %% 166 | 167 | oui_list_to_json(Results) -> 168 | lists:map(fun oui_to_json/1, Results). 169 | 170 | oui_to_json( 171 | {Height, Oui, Owner, Nonce, Addresses, Subnets, _FirstBlock} 172 | ) -> 173 | MkSubnet = fun 174 | ([Base, Mask], Acc) -> 175 | [#{base => Base, mask => Mask} | Acc]; 176 | (_, Acc) -> 177 | Acc 178 | end, 179 | 180 | #{ 181 | block => Height, 182 | oui => Oui, 183 | owner => Owner, 184 | nonce => Nonce, 185 | addresses => Addresses, 186 | subnets => lists:foldl(MkSubnet, [], Subnets) 187 | }. 188 | -------------------------------------------------------------------------------- /src/bh_route_snapshots.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_snapshots). 2 | 3 | -behavior(bh_route_handler). 4 | -behavior(bh_db_worker). 5 | 6 | -include("bh_route_handler.hrl"). 7 | 8 | -export([prepare_conn/1, handle/3]). 9 | %% Utilities 10 | -export([get_snapshot_list/1]). 11 | 12 | -define(S_SNAPSHOT_LIST, "snapshot_list"). 13 | -define(S_SNAPSHOT_LIST_BEFORE, "snapshot_list_before"). 14 | -define(S_SNAPSHOT_CURRENT, "snapshot_curent"). 15 | 16 | prepare_conn(_Conn) -> 17 | SnapshotListLimit = integer_to_list(?SNAPSHOT_LIST_LIMIT), 18 | Loads = [ 19 | {?S_SNAPSHOT_LIST, 20 | {snapshot_list_base, 21 | [ 22 | {scope, ""}, 23 | {limit, "limit " ++ SnapshotListLimit} 24 | ], 25 | []}}, 26 | {?S_SNAPSHOT_LIST_BEFORE, 27 | {snapshot_list_base, 28 | [ 29 | {scope, snapshot_list_before_scope}, 30 | {limit, "limit " ++ SnapshotListLimit} 31 | ], 32 | [int8]}}, 33 | {?S_SNAPSHOT_CURRENT, 34 | {snapshot_list_base, 35 | [ 36 | {scope, ""}, 37 | {limit, "limit 1"} 38 | ], 39 | []}} 40 | ], 41 | bh_db_worker:load_from_eql("snapshots.sql", Loads). 42 | 43 | handle('GET', [], Req) -> 44 | Args = ?GET_ARGS([cursor], Req), 45 | Result = get_snapshot_list(Args), 46 | CacheTime = get_snapshot_list_cache_time(Result), 47 | ?MK_RESPONSE(Result, CacheTime); 48 | handle('GET', [<<"current">>], _Req) -> 49 | Result = get_snapshot_current(), 50 | ?MK_RESPONSE(Result, block_time); 51 | handle(_, _, _Req) -> 52 | ?RESPONSE_404. 53 | 54 | get_snapshot_list([{cursor, undefined}]) -> 55 | {ok, _, Results} = ?PREPARED_QUERY(?S_SNAPSHOT_LIST, []), 56 | {ok, snapshot_list_to_json(Results), mk_snapshot_list_cursor(Results)}; 57 | get_snapshot_list([{cursor, Cursor}]) -> 58 | case ?CURSOR_DECODE(Cursor) of 59 | {ok, #{<<"before">> := Before}} -> 60 | {ok, _, Results} = ?PREPARED_QUERY(?S_SNAPSHOT_LIST_BEFORE, [Before]), 61 | {ok, snapshot_list_to_json(Results), mk_snapshot_list_cursor(Results)}; 62 | _ -> 63 | {error, badarg} 64 | end. 65 | 66 | get_snapshot_current() -> 67 | case ?PREPARED_QUERY(?S_SNAPSHOT_CURRENT, []) of 68 | {ok, _, [Result]} -> 69 | {ok, snapshot_to_json(Result)}; 70 | _ -> 71 | {error, not_found} 72 | end. 73 | 74 | mk_snapshot_list_cursor(Results) when is_list(Results) -> 75 | case length(Results) of 76 | 0 -> 77 | undefined; 78 | _ -> 79 | case lists:last(Results) of 80 | {Height, _SnapshotHash} when Height == 1 -> undefined; 81 | {Height, _SnapshotHash} -> #{before => Height} 82 | end 83 | end. 84 | 85 | get_snapshot_list_cache_time({ok, _, undefined}) -> 86 | %% End of cursor data 87 | infinity; 88 | get_snapshot_list_cache_time({ok, _, _}) -> 89 | block_time; 90 | get_snapshot_list_cache_time(_) -> 91 | never. 92 | 93 | %% 94 | %% json 95 | %% 96 | 97 | snapshot_list_to_json(Results) -> 98 | lists:map(fun snapshot_to_json/1, Results). 99 | 100 | snapshot_to_json({Height, Hash}) -> 101 | #{ 102 | block => Height, 103 | snapshot_hash => Hash 104 | }. 105 | -------------------------------------------------------------------------------- /src/bh_route_state_channels.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_state_channels). 2 | 3 | -export([prepare_conn/1, handle/3]). 4 | -export([get_state_channel_stats/0]). 5 | 6 | -behavior(bh_route_handler). 7 | -behavior(bh_db_worker). 8 | 9 | -include("bh_route_handler.hrl"). 10 | 11 | -define(S_STATE_CHANNEL_STATS, "state_channels_stats"). 12 | 13 | prepare_conn(_Conn) -> 14 | Loads = [?S_STATE_CHANNEL_STATS], 15 | bh_db_worker:load_from_eql("state_channels.sql", Loads). 16 | 17 | handle('GET', [], Req) -> 18 | Args = add_filter_types(?GET_ARGS([cursor, max_time, min_time, limit], Req)), 19 | Result = bh_route_txns:get_txn_list(Args, ?STATE_CHANNEL_TXN_LIST_LIMIT), 20 | CacheTime = bh_route_txns:get_txn_list_cache_time(Result), 21 | ?MK_RESPONSE(Result, CacheTime); 22 | handle('GET', [<<"stats">>], _Req) -> 23 | ?MK_RESPONSE( 24 | {ok, 25 | mk_stats_from_state_channel_results( 26 | get_state_channel_stats() 27 | )}, 28 | block_time 29 | ); 30 | handle(_Method, _Path, _Req) -> 31 | ?RESPONSE_404. 32 | 33 | add_filter_types(Args) -> 34 | Args ++ [{filter_types, [<<"state_channel_close_v1">>]}]. 35 | 36 | get_state_channel_stats() -> 37 | bh_cache:get( 38 | {?MODULE, state_channel_stats}, 39 | fun() -> 40 | {ok, _Columns, Data} = ?PREPARED_QUERY(?S_STATE_CHANNEL_STATS, []), 41 | Data 42 | end 43 | ). 44 | 45 | mk_stats_from_state_channel_results({ok, [{LastDayChallenges}]}) -> 46 | #{ 47 | last_day => ?PARSE_INT(LastDayChallenges) 48 | }. 49 | -------------------------------------------------------------------------------- /src/bh_route_stats.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_stats). 2 | 3 | -behavior(bh_route_handler). 4 | -behavior(bh_db_worker). 5 | 6 | -include("bh_route_handler.hrl"). 7 | 8 | -export([prepare_conn/1, handle/3]). 9 | %% Utilities 10 | -export([ 11 | get_stats/0, 12 | get_count_stats/0, 13 | get_token_supply_stats/0, 14 | mk_stats_from_time_results/1, 15 | mk_stats_from_challenge_results/1 16 | ]). 17 | 18 | -define(S_STATS_TOKEN_SUPPLY, "stats_token_supply"). 19 | -define(S_STATS_COUNTS, "stats_counts"). 20 | 21 | prepare_conn(_Conn) -> 22 | Loads = [ 23 | ?S_STATS_COUNTS, 24 | ?S_STATS_TOKEN_SUPPLY 25 | ], 26 | bh_db_worker:load_from_eql("stats.sql", Loads). 27 | 28 | handle('GET', [], _Req) -> 29 | ?MK_RESPONSE(get_stats(), {block_time, 5}); 30 | handle('GET', [<<"counts">>], _Req) -> 31 | ?MK_RESPONSE( 32 | {ok, 33 | mk_stats_from_counts_results( 34 | get_count_stats() 35 | )}, 36 | block_time 37 | ); 38 | handle('GET', [<<"token_supply">>], Req) -> 39 | Args = ?GET_ARGS([format], Req), 40 | get_token_supply(Args, block_time); 41 | handle(_, _, _Req) -> 42 | ?RESPONSE_404. 43 | 44 | get_count_stats() -> 45 | bh_cache:get( 46 | {?MODULE, count_stats}, 47 | fun() -> 48 | {ok, _Columns, Data} = ?PREPARED_QUERY(?S_STATS_COUNTS, []), 49 | Data 50 | end 51 | ). 52 | 53 | get_token_supply_stats() -> 54 | bh_cache:get( 55 | {?MODULE, token_supply_stats}, 56 | fun() -> 57 | {ok, _Columns, Data} = ?PREPARED_QUERY(?S_STATS_TOKEN_SUPPLY, []), 58 | Data 59 | end 60 | ). 61 | 62 | get_token_supply([{format, Format}], CacheTime) -> 63 | Result = get_token_supply_stats(), 64 | case Format of 65 | <<"raw">> -> 66 | Headers = [{<<"Content-Type">>, <<"text/plain">>}], 67 | {ok, bh_route_handler:add_cache_header(CacheTime, Headers), 68 | float_to_binary(mk_token_supply_from_result(Result), [{decimals, 8}, compact])}; 69 | _ -> 70 | ?MK_RESPONSE({ok, #{token_supply => mk_token_supply_from_result(Result)}}, CacheTime) 71 | end. 72 | 73 | get_stats() -> 74 | BlockTimeResults = bh_route_blocks:get_block_stats(), 75 | ElectionTimeResults = bh_route_elections:get_election_time_stats(), 76 | SupplyResult = get_token_supply_stats(), 77 | CountsResults = get_count_stats(), 78 | ChallengeResults = bh_route_challenges:get_challenge_stats(), 79 | 80 | {ok, #{ 81 | block_times => mk_stats_from_time_results(BlockTimeResults), 82 | election_times => mk_stats_from_time_results(ElectionTimeResults), 83 | token_supply => mk_token_supply_from_result(SupplyResult), 84 | counts => mk_stats_from_counts_results(CountsResults), 85 | challenge_counts => mk_stats_from_challenge_results(ChallengeResults) 86 | }}. 87 | 88 | mk_stats_from_time_results( 89 | {ok, [ 90 | {LastHrAvg, LastDayAvg, LastWeekAvg, LastMonthAvg, LastHrStddev, LastDayStddev, 91 | LastWeekStddev, LastMonthStddev} 92 | ]} 93 | ) -> 94 | #{ 95 | last_hour => #{avg => ?PARSE_FLOAT(LastHrAvg), stddev => ?PARSE_FLOAT(LastHrStddev)}, 96 | last_day => #{avg => ?PARSE_FLOAT(LastDayAvg), stddev => ?PARSE_FLOAT(LastDayStddev)}, 97 | last_week => #{avg => ?PARSE_FLOAT(LastWeekAvg), stddev => ?PARSE_FLOAT(LastWeekStddev)}, 98 | last_month => #{avg => ?PARSE_FLOAT(LastMonthAvg), stddev => ?PARSE_FLOAT(LastMonthStddev)} 99 | }. 100 | 101 | mk_stats_from_counts_results({ok, CountsResults}) -> 102 | maps:from_list(CountsResults). 103 | 104 | mk_stats_from_challenge_results({ok, [{ActiveChallenges, LastDayChallenges}]}) -> 105 | #{ 106 | active => ?PARSE_INT(ActiveChallenges), 107 | last_day => ?PARSE_INT(LastDayChallenges) 108 | }. 109 | 110 | mk_token_supply_from_result({ok, [{TokenSupply}]}) -> 111 | ?PARSE_FLOAT(TokenSupply). 112 | -------------------------------------------------------------------------------- /src/bh_route_vars.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_vars). 2 | 3 | -behavior(bh_route_handler). 4 | -behavior(bh_db_worker). 5 | 6 | -include("bh_route_handler.hrl"). 7 | 8 | -export([prepare_conn/1, handle/3]). 9 | %% Utilities 10 | -export([get_var_list/1, get_var/1]). 11 | 12 | -define(MAX_KEY_NAMES, 50). 13 | 14 | -define(S_VAR_LIST, "var_list"). 15 | -define(S_VAR_LIST_NAMED, "var_list_named"). 16 | -define(S_VAR, "var_get"). 17 | 18 | prepare_conn(_Conn) -> 19 | Loads = [ 20 | ?S_VAR_LIST, 21 | {?S_VAR_LIST_NAMED, {?S_VAR_LIST_NAMED, [], [{array, text}]}}, 22 | {?S_VAR, {?S_VAR, [], [text]}} 23 | ], 24 | bh_db_worker:load_from_eql( 25 | "vars.sql", 26 | Loads 27 | ). 28 | 29 | handle('GET', [], Req) -> 30 | Args = ?GET_ARGS([keys], Req), 31 | ?MK_RESPONSE(get_var_list(Args), block_time); 32 | handle('GET', [<<"activity">>], Req) -> 33 | Args = add_filter_types(?GET_ARGS([cursor, max_time, min_time, limit], Req)), 34 | Result = bh_route_txns:get_txn_list(Args), 35 | CacheTime = bh_route_txns:get_txn_list_cache_time(Result), 36 | ?MK_RESPONSE(Result, CacheTime); 37 | handle('GET', [Name], _Req) -> 38 | ?MK_RESPONSE(get_var(Name), block_time); 39 | handle(_, _, _Req) -> 40 | ?RESPONSE_404. 41 | 42 | add_filter_types(Args) -> 43 | Args ++ [{filter_types, [<<"vars_v1">>]}]. 44 | 45 | get_var_list([{keys, undefined}]) -> 46 | {ok, _, Results} = ?PREPARED_QUERY(?S_VAR_LIST, []), 47 | {ok, var_list_to_json(Results)}; 48 | get_var_list([{keys, KeyBins}]) -> 49 | case binary:split(KeyBins, <<",">>, [global]) of 50 | [] -> 51 | {error, badarg}; 52 | KeyNames when length(KeyNames) > ?MAX_KEY_NAMES -> 53 | {error, badarg}; 54 | KeyNames -> 55 | {ok, _, Results} = ?PREPARED_QUERY(?S_VAR_LIST_NAMED, [KeyNames]), 56 | {ok, var_list_to_json(Results)} 57 | end. 58 | 59 | get_var(Name) -> 60 | case ?PREPARED_QUERY(?S_VAR, [Name]) of 61 | {ok, _, [Result]} -> 62 | {_, Value} = var_to_json(Result), 63 | {ok, Value}; 64 | _ -> 65 | {error, not_found} 66 | end. 67 | 68 | %% 69 | %% json 70 | %% 71 | 72 | var_list_to_json(Results) -> 73 | maps:from_list(lists:map(fun var_to_json/1, Results)). 74 | 75 | var_to_json({Name, <<"integer">>, Value}) -> 76 | {Name, binary_to_integer(Value)}; 77 | var_to_json({Name, <<"float">>, Value}) -> 78 | {Name, binary_to_float(Value)}; 79 | var_to_json({Name, <<"atom">>, <<"true">>}) -> 80 | {Name, true}; 81 | var_to_json({Name, <<"atom">>, <<"false">>}) -> 82 | {Name, false}; 83 | var_to_json({Name, <<"atom">>, Value}) -> 84 | {Name, Value}; 85 | var_to_json({<<"staking_keys">> = Name, <<"binary">>, Value}) -> 86 | {Name, b64_to_keys(Value)}; 87 | var_to_json({<<"price_oracle_public_keys">> = Name, <<"binary">>, Value}) -> 88 | {Name, b64_to_keys(Value)}; 89 | var_to_json({<<"staking_keys_to_mode_mappings">> = Name, <<"binary">>, Value}) -> 90 | {Name, maps:from_list([{?BIN_TO_B58(Key), Mode} || {Key, Mode} <- b64_to_props(Value, 8)])}; 91 | var_to_json({<<"hip17_res_", _/binary>> = Name, <<"binary">>, Value}) -> 92 | {Name, [binary_to_integer(N) || N <- string:split(?B64_TO_BIN(Value), ",", all)]}; 93 | var_to_json({Name, <<"binary">>, Value}) -> 94 | {Name, Value}. 95 | 96 | b64_to_keys(Value) -> 97 | Bin = ?B64_TO_BIN(Value), 98 | BinKeys = [Key || <> <= Bin], 99 | [?BIN_TO_B58(Key) || Key <- BinKeys]. 100 | 101 | b64_to_props(B64Value, Size) -> 102 | Bin = ?B64_TO_BIN(B64Value), 103 | [ 104 | {Key, Value} 105 | || <> <= Bin 107 | ]. 108 | -------------------------------------------------------------------------------- /src/bh_route_versions.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_versions). 2 | 3 | -behavior(bh_route_handler). 4 | -behavior(bh_db_worker). 5 | 6 | -include("bh_route_handler.hrl"). 7 | 8 | -export([prepare_conn/1, handle/3]). 9 | %% Utilities 10 | 11 | prepare_conn(_Conn) -> 12 | #{}. 13 | 14 | handle('GET', [], _Req) -> 15 | ?MK_RESPONSE(get_versions(), {block_time, 60}); 16 | handle(_, _, _Req) -> 17 | ?RESPONSE_404. 18 | 19 | get_versions() -> 20 | {ok, HttpVersion} = application:get_key(blockchain_http, vsn), 21 | Versions = #{ 22 | blockchain_http => list_to_binary(HttpVersion) 23 | }, 24 | {ok, Versions}. 25 | -------------------------------------------------------------------------------- /src/bh_routes.erl: -------------------------------------------------------------------------------- 1 | -module(bh_routes). 2 | 3 | -export([handle/2, handle_event/3]). 4 | 5 | -include_lib("elli/include/elli.hrl"). 6 | 7 | -include("bh_route_handler.hrl"). 8 | 9 | -behaviour(elli_handler). 10 | 11 | handle(Req, _Args) -> 12 | %% Delegate to our handler function 13 | handle(Req#req.method, elli_request:path(Req), Req). 14 | 15 | handle(Method, [<<"v1">>, <<"stats">> | Tail], Req) -> 16 | bh_route_stats:handle(Method, Tail, Req); 17 | handle(Method, [<<"v1">>, <<"blocks">> | Tail], Req) -> 18 | bh_route_blocks:handle(Method, Tail, Req); 19 | handle(Method, [<<"v1">>, <<"accounts">> | Tail], Req) -> 20 | bh_route_accounts:handle(Method, Tail, Req); 21 | handle(Method, [<<"v1">>, <<"hotspots">> | Tail], Req) -> 22 | bh_route_hotspots:handle(Method, Tail, Req); 23 | handle(Method, [<<"v1">>, <<"transactions">> | Tail], Req) -> 24 | bh_route_txns:handle(Method, Tail, Req); 25 | handle(Method, [<<"v1">>, <<"pending_transactions">> | Tail], Req) -> 26 | bh_route_pending_txns:handle(Method, Tail, Req); 27 | handle(Method, [<<"v1">>, <<"elections">> | Tail], Req) -> 28 | bh_route_elections:handle(Method, Tail, Req); 29 | handle(Method, [<<"v1">>, <<"challenges">> | Tail], Req) -> 30 | bh_route_challenges:handle(Method, Tail, Req); 31 | handle(Method, [<<"v1">>, <<"oracle">> | Tail], Req) -> 32 | bh_route_oracle:handle(Method, Tail, Req); 33 | handle(Method, [<<"v1">>, <<"vars">> | Tail], Req) -> 34 | bh_route_vars:handle(Method, Tail, Req); 35 | handle(Method, [<<"v1">>, <<"snapshots">> | Tail], Req) -> 36 | bh_route_snapshots:handle(Method, Tail, Req); 37 | handle(Method, [<<"v1">>, <<"cities">> | Tail], Req) -> 38 | bh_route_cities:handle(Method, Tail, Req); 39 | handle(Method, [<<"v1">>, <<"ouis">> | Tail], Req) -> 40 | bh_route_ouis:handle(Method, Tail, Req); 41 | handle(Method, [<<"v1">>, <<"locations">> | Tail], Req) -> 42 | bh_route_locations:handle(Method, Tail, Req); 43 | handle(Method, [<<"v1">>, <<"rewards">> | Tail], Req) -> 44 | bh_route_rewards:handle(Method, Tail, Req); 45 | handle(Method, [<<"v1">>, <<"dc_burns">> | Tail], Req) -> 46 | bh_route_dc_burns:handle(Method, Tail, Req); 47 | handle(Method, [<<"v1">>, <<"state_channels">> | Tail], Req) -> 48 | bh_route_state_channels:handle(Method, Tail, Req); 49 | handle(Method, [<<"v1">>, <<"assert_locations">> | Tail], Req) -> 50 | bh_route_assert_locations:handle(Method, Tail, Req); 51 | handle(Method, [<<"v1">>, <<"validators">> | Tail], Req) -> 52 | bh_route_validators:handle(Method, Tail, Req); 53 | handle(Method, [<<"v1">>, <<"versions">> | Tail], Req) -> 54 | bh_route_versions:handle(Method, Tail, Req); 55 | handle('GET', [], _Req) -> 56 | {200, [], <<>>}; 57 | handle(_, _, _Req) -> 58 | ?RESPONSE_404. 59 | 60 | handle_event(request_throw, [Req, Exception, Stack], _Config) -> 61 | lager:error( 62 | "exception: ~p~nstack: ~p~nrequest: ~p~n", 63 | [Exception, Stack, elli_request:to_proplist(Req)] 64 | ), 65 | ok; 66 | handle_event(request_exit, [Req, Exit, Stack], _Config) -> 67 | lager:error( 68 | "exit: ~p~nstack: ~p~nrequest: ~p~n", 69 | [Exit, Stack, elli_request:to_proplist(Req)] 70 | ), 71 | ok; 72 | handle_event(request_error, [Req, Error, Stack], _Config) -> 73 | lager:error( 74 | "error: ~p~nstack: ~p~nrequest: ~p~n", 75 | [Error, Stack, elli_request:to_proplist(Req)] 76 | ), 77 | ok; 78 | handle_event(_, _, _) -> 79 | ok. 80 | -------------------------------------------------------------------------------- /src/bh_sup.erl: -------------------------------------------------------------------------------- 1 | -module(bh_sup). 2 | 3 | -behaviour(supervisor). 4 | 5 | -include("bh_db_worker.hrl"). 6 | 7 | %% API 8 | -export([start_link/0]). 9 | 10 | %% Supervisor callbacks 11 | -export([init/1]). 12 | 13 | -define(SERVER, ?MODULE). 14 | 15 | -define(WORKER(I, Args), #{ 16 | id => I, 17 | start => {I, start_link, Args}, 18 | restart => permanent, 19 | shutdown => 5000, 20 | type => worker, 21 | modules => [I] 22 | }). 23 | 24 | start_link() -> 25 | supervisor:start_link({local, ?SERVER}, ?MODULE, []). 26 | 27 | init([]) -> 28 | SupFlags = #{ 29 | strategy => rest_for_one, 30 | intensity => 10, 31 | period => 10 32 | }, 33 | 34 | PoolNames = 35 | case os:getenv("DATABASE_RW_URL") of 36 | false -> [ro_pool]; 37 | _ -> [ro_pool, rw_pool] 38 | end, 39 | 40 | lists:foreach( 41 | fun(PoolName) -> 42 | {ok, {PoolOpts, DBOpts, DBHandlers}} = pool_opts(PoolName), 43 | init_pool(PoolName, PoolOpts, DBOpts, DBHandlers) 44 | end, 45 | PoolNames 46 | ), 47 | 48 | {ok, ListenPort} = application:get_env(blockchain_http, port), 49 | 50 | lager:info("Starting http listener on ~p", [ListenPort]), 51 | 52 | {ok, 53 | ThrottleConfig0 = #{ 54 | request_time := ThrottleRequestTime, 55 | request_count := ThrottleRequestCount, 56 | request_interval := ThrottleRequestInterval 57 | }} = application:get_env( 58 | blockchain_http, 59 | throttle 60 | ), 61 | ThrottleConfig = maps:merge(ThrottleConfig0, #{ 62 | request_time => list_to_integer( 63 | os:getenv("THROTTLE_REQUEST_TIME", integer_to_list(ThrottleRequestTime)) 64 | ), 65 | request_count => list_to_integer( 66 | os:getenv("THROTTLE_REQUEST_COUNT", integer_to_list(ThrottleRequestCount)) 67 | ), 68 | request_interval => list_to_integer( 69 | os:getenv("THROTTLE_REQUEST_INTERVAL", integer_to_list(ThrottleRequestInterval)) 70 | ), 71 | grace_time => list_to_integer( 72 | os:getenv( 73 | "THROTTLE_GRACE_TIME", 74 | integer_to_list(maps:get(grace_time, ThrottleConfig0, 0)) 75 | ) 76 | ), 77 | actor_header => list_to_binary( 78 | os:getenv( 79 | "THROTTLE_ACTOR_HEADER", 80 | binary_to_list(maps:get(actor_header, ThrottleConfig0, <<"X-Forwarded-For">>)) 81 | ) 82 | ) 83 | }), 84 | 85 | ElliConfig = [ 86 | {mods, [ 87 | {bh_middleware_throttle, ThrottleConfig}, 88 | % {bh_middleware_cursor, []}, 89 | {bh_middleware_cors, []}, 90 | {bh_routes, []} 91 | ]} 92 | ], 93 | ChildSpecs = [ 94 | ?WORKER(bh_cache, []), 95 | ?WORKER(bh_pool_watcher, [PoolNames]), 96 | ?WORKER(elli, [ 97 | [ 98 | {callback, elli_middleware}, 99 | {callback_args, ElliConfig}, 100 | {port, ListenPort} 101 | ] 102 | ]) 103 | ], 104 | 105 | {ok, {SupFlags, ChildSpecs}}. 106 | 107 | pool_opts(ro_pool) -> 108 | {ok, DBOpts} = psql_migration:connection_opts([], {env, "DATABASE_RO_URL"}), 109 | {ok, DBHandlers} = application:get_env(blockchain_http, db_ro_handlers), 110 | {ok, PoolOpts0} = application:get_env(blockchain_http, db_ro_pool), 111 | PoolSize = 112 | case os:getenv("DATABASE_RO_POOL_SIZE") of 113 | false -> proplists:get_value(size, PoolOpts0, 100); 114 | SizeStr -> list_to_integer(SizeStr) 115 | end, 116 | PoolOpts = lists:keystore(size, 1, PoolOpts0, {size, PoolSize}), 117 | {ok, {PoolOpts, DBOpts, DBHandlers}}; 118 | pool_opts(rw_pool) -> 119 | {ok, PoolOpts} = application:get_env(blockchain_http, db_rw_pool), 120 | {ok, DBOpts} = psql_migration:connection_opts([], {env, "DATABASE_RW_URL"}), 121 | {ok, DBHandlers} = application:get_env(blockchain_http, db_rw_handlers), 122 | {ok, {PoolOpts, DBOpts, DBHandlers}}. 123 | 124 | init_pool(Name, PoolOpts, DBOpts, DBHandlers) -> 125 | ok = dispcount:start_dispatch( 126 | Name, 127 | {bh_db_worker, [ 128 | {db_opts, DBOpts}, 129 | {db_handlers, DBHandlers} 130 | ]}, 131 | [ 132 | {restart, permanent}, 133 | {shutdown, 4000}, 134 | {dispatch_mechanism, proplists:get_value(dispatch_mechanism, PoolOpts, hash)}, 135 | {watcher_type, proplists:get_value(watcher_type, PoolOpts, ets)}, 136 | {maxr, 10}, 137 | {maxt, 60}, 138 | {resources, proplists:get_value(size, PoolOpts, 5)} 139 | ] 140 | ), 141 | 142 | {ok, PoolInfo} = dispcount:dispatcher_info(Name), 143 | persistent_term:put(Name, PoolInfo), 144 | ok. 145 | -------------------------------------------------------------------------------- /src/bh_transaction_type.erl: -------------------------------------------------------------------------------- 1 | -module(bh_transaction_type). 2 | 3 | -behaviour(epgsql_codec). 4 | 5 | -export([init/2, names/0, encode/3, decode/3]). 6 | 7 | init(Choices, _) -> Choices. 8 | 9 | names() -> 10 | [transaction_type]. 11 | 12 | encode(Atom, transaction_type, Choices) when is_atom(Atom) -> 13 | true = lists:member(Atom, Choices), 14 | atom_to_binary(Atom, utf8); 15 | encode(Binary, transaction_type, Choices) -> 16 | true = lists:member(binary_to_existing_atom(Binary, utf8), Choices), 17 | Binary. 18 | 19 | decode(Bin, transaction_type, Choices) -> 20 | Atom = binary_to_existing_atom(Bin, utf8), 21 | true = lists:member(Atom, Choices), 22 | Atom. 23 | -------------------------------------------------------------------------------- /src/bh_validator_status.erl: -------------------------------------------------------------------------------- 1 | -module(bh_validator_status). 2 | 3 | -behaviour(epgsql_codec). 4 | 5 | -export([init/2, names/0, encode/3, decode/3]). 6 | 7 | init(Choices, _) -> Choices. 8 | 9 | names() -> 10 | [validator_status]. 11 | 12 | encode(Atom, gateway_mode, Choices) when is_atom(Atom) -> 13 | lager:info("encoding validator_status type ~p ~p", [Atom, Choices]), 14 | true = lists:member(Atom, Choices), 15 | atom_to_binary(Atom, utf8); 16 | encode(Binary, gateway_mode, Choices) -> 17 | true = lists:member(binary_to_existing_atom(Binary, utf8), Choices), 18 | Binary. 19 | 20 | decode(Bin, validator_status, Choices) -> 21 | Atom = binary_to_existing_atom(Bin, utf8), 22 | true = lists:member(Atom, Choices), 23 | Atom. 24 | -------------------------------------------------------------------------------- /src/blockchain_http.app.src: -------------------------------------------------------------------------------- 1 | %% -*- erlang -*- 2 | {application, blockchain_http, [ 3 | {description, "A Helium blockchain HTTP API"}, 4 | {vsn, "0.1.0"}, 5 | {registered, []}, 6 | {mod, {blockchain_http_app, []}}, 7 | { 8 | applications, 9 | [ 10 | kernel, 11 | stdlib, 12 | syntax_tools, 13 | compiler, 14 | lager, 15 | epgsql, 16 | eql, 17 | dispcount, 18 | jiffy, 19 | base64url, 20 | h3, 21 | iso8601, 22 | envloader, 23 | psql_migration, 24 | helium_proto, 25 | libp2p_crypto, 26 | erl_angry_purple_tiger, 27 | elli, 28 | tools, 29 | recon, 30 | throttle 31 | ] 32 | }, 33 | {env, []}, 34 | {modules, []}, 35 | {licenses, ["Apache 2.0"]}, 36 | {links, []} 37 | ]}. 38 | -------------------------------------------------------------------------------- /src/blockchain_http_app.erl: -------------------------------------------------------------------------------- 1 | -module(blockchain_http_app). 2 | 3 | -behaviour(application). 4 | 5 | -export([start/2, prep_stop/1, stop/1]). 6 | 7 | start(_StartType, _StartArgs) -> 8 | bh_sup:start_link(). 9 | 10 | prep_stop(State) -> 11 | persistent_term:put(ro_pool, shutdown), 12 | persistent_term:put(rw_pool, shutdown), 13 | State. 14 | 15 | stop(_State) -> 16 | ok. 17 | -------------------------------------------------------------------------------- /src/epgsql_cmd_eequery.erl: -------------------------------------------------------------------------------- 1 | %% Single-roundtrip version of epgsql:equery/3 2 | %% 3 | %% It does parse-bind-execute sequence in 1 network roundtrip. 4 | %% The cost is that user should manually provide the datatype information for 5 | %% each bind-parameter. 6 | %% Another potential problem is that connection will crash if epgsql does not 7 | %% have a codec for any of result columns. Explicit type casting may save you 8 | %% in this case: `SELECT my_enum::text FROM my_tab'. Or you can implement the 9 | %% codec you need. 10 | %% 11 | %% Examples: 12 | %%
 13 | %% CREATE TABLE public.test_eequery
 14 | %% (
 15 | %%   id bigserial NOT NULL DEFAULT,
 16 | %%   my_blob bytea,
 17 | %%   my_text text,
 18 | %%   my_timestamp timestamp with time zone,
 19 | %%   my_json json,
 20 | %%   CONSTRAINT test_eequery_pk PRIMARY KEY (id)
 21 | %% )
 22 | %% 
23 | %%
 24 | %% > epgsql_cmd_eequery:run(
 25 | %%     C,
 26 | %%     "INSERT INTO test_eequery (my_blob, my_text, my_timestamp, my_json) VALUES ($1, $2, $3, $4)",
 27 | %%     [<<1,2,3>>, <<"hello">>, calendar:universal_time(), <<"{}">>],
 28 | %%     [bytea, text, timestamptz, json]).
 29 | %% {ok,1}
 30 | %% > epgsql_cmd_eequery:run(
 31 | %%     C,
 32 | %%     "SELECT * FROM test_eequery", [], []).
 33 | %% {ok,[#column{name = <<"id">>,type = int8,oid = 20,size = 8,
 34 | %%              modifier = -1,format = 1},
 35 | %%      #column{name = <<"my_blob">>,type = bytea,oid = 17,
 36 | %%              size = -1,modifier = -1,format = 1},
 37 | %%      #column{name = <<"my_text">>,type = text,oid = 25,size = -1,
 38 | %%              modifier = -1,format = 1},
 39 | %%      #column{name = <<"my_timestamp">>,type = timestamptz,
 40 | %%              oid = 1184,size = 8,modifier = -1,format = 1},
 41 | %%      #column{name = <<"my_json">>,type = json,oid = 114,
 42 | %%              size = -1,modifier = -1,format = 1}],
 43 | %%     [{1,
 44 | %%       <<1,2,3>>,
 45 | %%       <<"hello">>,
 46 | %%       {{2020,1,28},{16,25,26.0}},
 47 | %%       <<"{}">>}]}
 48 | %% 
49 | %% 50 | %% In case you provided wrong datatype, it's nothing serious, but server 51 | %% will return an #error{} (no type conversion attempt will be made): 52 | %%
 53 | %% > epgsql_cmd_eequery:run(C, "INSERT INTO test_eequery (my_json) VALUES ($1)", [<<"{}">>], [text]).
 54 | %% {error,#error{severity = error,code = <<"42804">>,
 55 | %%               codename = datatype_mismatch,
 56 | %%               message = <<"column \"my_json\" is of type json but expression is of type text">>,
 57 | %%               extra = [{file,<<"parse_target.c">>},
 58 | %%                        {hint,<<"You will need to rewrite or cast the expression.">>},
 59 | %%                        {line,<<"540">>},
 60 | %%                        {position,<<"44">>},
 61 | %%                        {routine,<<"transformAssignedExpr">>},
 62 | %%                        {severity,<<"ERROR">>}]}}
 63 | %% 
64 | %% 65 | %% You can still do explicit type conversion: 66 | %%
 67 | %% > epgsql_cmd_eequery:run(C, "INSERT INTO test_eequery (my_json) VALUES ($1::text::json)", [<<"{}">>], [text]).
 68 | %% {ok,1}
 69 | %% 
70 | %% But, if you are able to do that, it means you already know the type! So, why 71 | %% add extra complexity? 72 | %% 73 | %% https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-FLOW-EXT-QUERY 74 | %% > Parse 75 | %% < ParseComplete 76 | %% > Bind 77 | %% < BindComplete 78 | %% > Describe 79 | %% < ParameterDescription 80 | %% < RowDescription | NoData 81 | %% > Execute 82 | %% < {DataRow* 83 | %% < CommandComplete} | EmptyQuery 84 | %% > Close 85 | %% < CloseComplete 86 | %% > Sync 87 | %% < ReadyForQuery 88 | -module(epgsql_cmd_eequery). 89 | 90 | -behaviour(epgsql_command). 91 | 92 | -export([init/1, execute/2, handle_message/4]). 93 | -export([run/4]). 94 | 95 | -export_type([response/0]). 96 | 97 | -type response() :: 98 | {ok, Count :: non_neg_integer(), Cols :: [epgsql:column()], Rows :: [tuple()]} | 99 | {ok, Count :: non_neg_integer()} | 100 | {ok, Cols :: [epgsql:column()], Rows :: [tuple()]} | 101 | {error, epgsql:query_error()}. 102 | 103 | -include_lib("epgsql/include/epgsql.hrl"). 104 | -include_lib("epgsql/include/protocol.hrl"). 105 | 106 | -record(eequery, { 107 | %% Data from client (init/1): 108 | sql :: [iodata()], 109 | param_types :: [[epgsql:epgsql_type()]], 110 | params :: [[any()]], 111 | %% Data from server: 112 | columns = [] :: [epgsql:column()], 113 | decoder :: undefined | epgsql_wire:row_decoder() 114 | }). 115 | 116 | -spec run( 117 | epgsql:connection(), 118 | epgsql:sql_query(), 119 | [epgsql:bind_param()], 120 | [epgsql:epgsql_type()] 121 | ) -> response(). 122 | run(C, SQL, Params, ParamTypes) -> 123 | epgsql_sock:sync_command(C, ?MODULE, {SQL, Params, ParamTypes}). 124 | 125 | init({batch, Queries}) -> 126 | lists:foldl( 127 | fun({SQL, Params, Types}, #eequery{sql = SQLs, param_types = Typess, params = Paramss}) -> 128 | #eequery{ 129 | sql = [SQL | SQLs], 130 | param_types = [Types | Typess], 131 | params = [Params | Paramss] 132 | } 133 | end, 134 | #eequery{sql = [], param_types = [], params = []}, 135 | lists:reverse(Queries) 136 | ); 137 | init({SQL, Params, Types}) -> 138 | #eequery{ 139 | sql = [SQL], 140 | param_types = [Types], 141 | params = [Params] 142 | }. 143 | 144 | execute(Sock, #eequery{sql = Sqls, param_types = ParamTypess, params = Paramss} = St) -> 145 | %% #statement{name = StatementName, columns = Columns} = Stmt, 146 | Codec = epgsql_sock:get_codec(Sock), 147 | Commands = lists:flatmap( 148 | fun({Sql, ParamTypes, Params}) -> 149 | BinParamTypes = epgsql_wire:encode_types(ParamTypes, Codec), 150 | TypedParams = lists:zip(ParamTypes, Params), 151 | BinParameters = epgsql_wire:encode_parameters(TypedParams, Codec), 152 | %% XXX: we ask server to send all columns in binary format. 153 | %% If we don't have a decoder for any of the result columns (eg, enums), 154 | %% connection process will crash 155 | BinAllBinaryResult = <<1:?int16, 1:?int16>>, 156 | [ 157 | {?PARSE, ["", 0, Sql, 0, BinParamTypes]}, 158 | {?BIND, ["", 0, "", 0, BinParameters, BinAllBinaryResult]}, 159 | {?DESCRIBE, [?PREPARED_STATEMENT, "", 0]}, 160 | {?EXECUTE, ["", 0, <<0:?int32>>]}, 161 | {?CLOSE, [?PREPARED_STATEMENT, "", 0]} 162 | ] 163 | end, 164 | lists:zip3(Sqls, ParamTypess, Paramss) 165 | ), 166 | epgsql_sock:send_multi( 167 | Sock, 168 | Commands ++ [{?SYNC, []}] 169 | ), 170 | {ok, Sock, St}. 171 | 172 | handle_message(?PARSE_COMPLETE, <<>>, Sock, _State) -> 173 | {noaction, Sock}; 174 | handle_message(?PARAMETER_DESCRIPTION, _Bin, Sock, _State) -> 175 | %% Since ?BIND is executed before ?DESCRIBE, we will not get 176 | %% ?PARAMETER_DESCRIPTION message at all if user-provided types do not 177 | %% match server expectations (server will send #error{} instead). 178 | %% If they do match, there is no point parsing this message, because we 179 | %% already have all the same info in #eequery.param_types 180 | {noaction, Sock}; 181 | handle_message( 182 | ?ROW_DESCRIPTION, 183 | <>, 184 | Sock, 185 | #eequery{} = St 186 | ) -> 187 | Codec = epgsql_sock:get_codec(Sock), 188 | Columns = epgsql_wire:decode_columns(Count, Bin, Codec), 189 | Columns2 = [Col#column{format = epgsql_wire:format(Col, Codec)} || Col <- Columns], 190 | Decoder = epgsql_wire:build_decoder(Columns2, Codec), 191 | Sock2 = epgsql_sock:notify(Sock, {columns, Columns2}), 192 | {noaction, Sock2, St#eequery{columns = Columns2, decoder = Decoder}}; 193 | handle_message(?NO_DATA, <<>>, Sock, #eequery{}) -> 194 | {noaction, Sock}; 195 | handle_message(?BIND_COMPLETE, <<>>, Sock, #eequery{}) -> 196 | {noaction, Sock}; 197 | handle_message( 198 | ?DATA_ROW, 199 | <<_Count:?int16, Bin/binary>>, 200 | Sock, 201 | #eequery{decoder = Decoder} = St 202 | ) -> 203 | Row = epgsql_wire:decode_data(Bin, Decoder), 204 | {add_row, Row, Sock, St}; 205 | handle_message( 206 | ?COMMAND_COMPLETE, 207 | Bin, 208 | Sock, 209 | #eequery{columns = Cols} = St 210 | ) -> 211 | Complete = epgsql_wire:decode_complete(Bin), 212 | Rows = epgsql_sock:get_rows(Sock), 213 | Result = 214 | case Complete of 215 | {_, Count} when Cols == [] -> 216 | {ok, Count}; 217 | {_, Count} -> 218 | {ok, Count, Cols, Rows}; 219 | _ -> 220 | {ok, Cols, Rows} 221 | end, 222 | {add_result, Result, {complete, Complete}, Sock, St}; 223 | handle_message(?EMPTY_QUERY, <<>>, Sock, St) -> 224 | {add_result, {ok, [], []}, {complete, empty}, Sock, St}; 225 | handle_message(?CLOSE_COMPLETE, _, Sock, _State) -> 226 | {noaction, Sock}; 227 | handle_message(?READY_FOR_QUERY, _Status, Sock, _State) -> 228 | case epgsql_sock:get_results(Sock) of 229 | [Result] -> 230 | {finish, Result, done, Sock}; 231 | [] -> 232 | {finish, done, done, Sock}; 233 | Results -> 234 | {finish, Results, done, Sock} 235 | end; 236 | handle_message(?ERROR, Error, Sock, St) -> 237 | Result = {error, Error}, 238 | {add_result, Result, Result, Sock, St}; 239 | handle_message(_, _, _, _) -> 240 | unknown. 241 | -------------------------------------------------------------------------------- /test/bh_route_accounts_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_accounts_SUITE). 2 | 3 | -compile([nowarn_export_all, export_all]). 4 | 5 | -include("bh_route_handler.hrl"). 6 | -include("ct_utils.hrl"). 7 | 8 | all() -> 9 | [ 10 | get_test, 11 | get_at_block_test, 12 | not_found_test, 13 | activity_count_test, 14 | activity_result_test, 15 | activity_low_block_test, 16 | activity_filter_no_result_test, 17 | roles_count_test, 18 | roles_result_test, 19 | roles_low_block_test, 20 | roles_filter_no_result_test, 21 | hotspots_test, 22 | ouis_test, 23 | stats_test, 24 | rewards_test, 25 | rewards_dupe_test, 26 | rewards_sum_test, 27 | rewards_buckets_test, 28 | rewards_block_test, 29 | rich_list_test 30 | ]. 31 | 32 | init_per_suite(Config) -> 33 | ?init_bh(Config). 34 | 35 | end_per_suite(Config) -> 36 | ?end_bh(Config). 37 | 38 | get_test(_Config) -> 39 | FetchAddress = "1122ZQigQfeeyfSmH2i4KM4XMQHouBqK4LsTp33ppP3W2Knqh8gY", 40 | {ok, {_, _, Json}} = ?json_request(["/v1/accounts/", FetchAddress]), 41 | #{ 42 | <<"data">> := #{ 43 | <<"address">> := Address 44 | } 45 | } = Json, 46 | ?assertEqual(FetchAddress, binary_to_list(Address)), 47 | ok. 48 | 49 | get_at_block_test(_Config) -> 50 | MaxBlock = 708022, 51 | FetchAddress = "1122ZQigQfeeyfSmH2i4KM4XMQHouBqK4LsTp33ppP3W2Knqh8gY", 52 | {ok, {_, _, Json}} = ?json_request([ 53 | "/v1/accounts/", 54 | FetchAddress, 55 | "?max_block=", 56 | integer_to_list(MaxBlock) 57 | ]), 58 | #{ 59 | <<"data">> := #{ 60 | <<"address">> := Address, 61 | <<"block">> := Block 62 | } 63 | } = Json, 64 | ?assertEqual(FetchAddress, binary_to_list(Address)), 65 | ?assertEqual(MaxBlock, Block), 66 | ok. 67 | 68 | not_found_test(_Config) -> 69 | ?assertMatch({error, {_, 404, _}}, ?json_request("/v1/accounts/no_account/no_path")), 70 | ok. 71 | 72 | activity_count_test(_Config) -> 73 | Account = "1122ZQigQfeeyfSmH2i4KM4XMQHouBqK4LsTp33ppP3W2Knqh8gY", 74 | {ok, {_, _, Json}} = ?json_request([ 75 | "/v1/accounts/", 76 | Account, 77 | "/activity/count?filter_types=payment_v1" 78 | ]), 79 | #{ 80 | <<"data">> := Data 81 | } = Json, 82 | ?assertEqual(1, maps:size(Data)), 83 | ?assert(maps:get(<<"payment_v1">>, Data) >= 0), 84 | ok. 85 | 86 | activity_result_test(_Config) -> 87 | %% Test activity for an account. This may or may not have data 88 | %% returned. Expect a maybe empty array with a start and end block 89 | %% and a cursor to a next block range 90 | {ok, {_, _, Json}} = ?json_request( 91 | "/v1/accounts/1122ZQigQfeeyfSmH2i4KM4XMQHouBqK4LsTp33ppP3W2Knqh8gY/activity" 92 | ), 93 | #{ 94 | <<"data">> := Data, 95 | <<"cursor">> := Cursor 96 | } = Json, 97 | {ok, #{<<"block">> := _}} = ?CURSOR_DECODE(Cursor), 98 | ?assert(length(Data) =< ?TXN_LIST_LIMIT). 99 | 100 | activity_low_block_test(_Config) -> 101 | GetCursor = #{ 102 | block => 50, 103 | min_block => 1, 104 | max_block => 50 105 | }, 106 | ct:pal("~p", [binary_to_list(?CURSOR_ENCODE(GetCursor))]), 107 | {ok, {_, _, Json}} = ?json_request( 108 | [ 109 | "/v1/accounts/1122ZQigQfeeyfSmH2i4KM4XMQHouBqK4LsTp33ppP3W2Knqh8gY/activity", 110 | "?cursor=", 111 | binary_to_list(?CURSOR_ENCODE(GetCursor)) 112 | ] 113 | ), 114 | #{<<"data">> := Data} = Json, 115 | %% This account has just one coinebase transaction in block 1 116 | ?assertEqual(1, length(Data)), 117 | ?assertEqual(undefined, maps:get(<<"cursor">>, Json, undefined)). 118 | 119 | activity_filter_no_result_test(_Config) -> 120 | %% We know this account has only a coinbase transaction in block 1 over that block range 121 | %% so filtering for rewards should return no data. 122 | GetCursor = #{ 123 | block => 50, 124 | min_block => 1, 125 | max_block => 50, 126 | types => <<"rewards_v1">> 127 | }, 128 | {ok, {_, _, Json}} = ?json_request( 129 | [ 130 | "/v1/accounts/1122ZQigQfeeyfSmH2i4KM4XMQHouBqK4LsTp33ppP3W2Knqh8gY/activity", 131 | "?cursor=", 132 | binary_to_list(?CURSOR_ENCODE(GetCursor)) 133 | ] 134 | ), 135 | #{<<"data">> := Data} = Json, 136 | ?assertEqual(0, length(Data)), 137 | ok. 138 | 139 | roles_count_test(_Config) -> 140 | Account = "1122ZQigQfeeyfSmH2i4KM4XMQHouBqK4LsTp33ppP3W2Knqh8gY", 141 | {ok, {_, _, Json}} = ?json_request([ 142 | "/v1/accounts/", 143 | Account, 144 | "/roles/count?filter_types=payment_v1" 145 | ]), 146 | #{ 147 | <<"data">> := Data 148 | } = Json, 149 | ?assertEqual(1, maps:size(Data)), 150 | ?assert(maps:get(<<"payment_v1">>, Data) >= 0), 151 | ok. 152 | 153 | roles_result_test(_Config) -> 154 | %% Test activity for an account. This may or may not have data 155 | %% returned. Expect a maybe empty array with a start and end block 156 | %% and a cursor to a next block range 157 | {ok, {_, _, Json}} = ?json_request( 158 | "/v1/accounts/1122ZQigQfeeyfSmH2i4KM4XMQHouBqK4LsTp33ppP3W2Knqh8gY/roles" 159 | ), 160 | #{ 161 | <<"data">> := Data, 162 | <<"cursor">> := Cursor 163 | } = Json, 164 | {ok, #{<<"block">> := _}} = ?CURSOR_DECODE(Cursor), 165 | ?assert(length(Data) =< ?TXN_LIST_LIMIT). 166 | 167 | roles_low_block_test(_Config) -> 168 | GetCursor = #{ 169 | block => 50, 170 | min_block => 1, 171 | max_block => 50 172 | }, 173 | {ok, {_, _, Json}} = ?json_request( 174 | [ 175 | "/v1/accounts/1122ZQigQfeeyfSmH2i4KM4XMQHouBqK4LsTp33ppP3W2Knqh8gY/roles", 176 | "?cursor=", 177 | binary_to_list(?CURSOR_ENCODE(GetCursor)) 178 | ] 179 | ), 180 | #{<<"data">> := Data} = Json, 181 | %% This account has just one coinebase transaction in block 1 182 | ?assertEqual(1, length(Data)), 183 | ?assertEqual(undefined, maps:get(<<"cursor">>, Json, undefined)). 184 | 185 | roles_filter_no_result_test(_Config) -> 186 | %% We know this account has only a coinbase transaction in block 1 over that block range 187 | %% so filtering for rewards should return no data. 188 | GetCursor = #{ 189 | block => 50, 190 | min_block => 1, 191 | max_block => 50, 192 | types => <<"rewards_v1">> 193 | }, 194 | {ok, {_, _, Json}} = ?json_request( 195 | [ 196 | "/v1/accounts/1122ZQigQfeeyfSmH2i4KM4XMQHouBqK4LsTp33ppP3W2Knqh8gY/roles", 197 | "?cursor=", 198 | binary_to_list(?CURSOR_ENCODE(GetCursor)) 199 | ] 200 | ), 201 | #{<<"data">> := Data} = Json, 202 | ?assertEqual(0, length(Data)), 203 | ok. 204 | 205 | hotspots_test(_Config) -> 206 | Account = "13YuCz3mZ55HZ6hJJvQHCZXGgE8ooe2CSvbtSHQR3m5vZ1EVCNZ", 207 | {ok, {_, _, Json}} = ?json_request(["/v1/accounts/", Account, "/hotspots"]), 208 | #{<<"data">> := Data} = Json, 209 | ?assert(length(Data) > 0), 210 | 211 | ok. 212 | 213 | ouis_test(_Config) -> 214 | Account = "13tyMLKRFYURNBQqLSqNJg9k41maP1A7Bh8QYxR13oWv7EnFooc", 215 | {ok, {_, _, Json}} = ?json_request(["/v1/accounts/", Account, "/ouis"]), 216 | #{<<"data">> := Data} = Json, 217 | ?assert(length(Data) > 0), 218 | 219 | ok. 220 | 221 | stats_test(_Config) -> 222 | Account = "13YuCz3mZ55HZ6hJJvQHCZXGgE8ooe2CSvbtSHQR3m5vZ1EVCNZ", 223 | {ok, {_, _, Json}} = ?json_request(["/v1/accounts/", Account, "/stats"]), 224 | #{<<"data">> := Data} = Json, 225 | lists:foreach( 226 | fun(Key) -> 227 | Entry = maps:get(Key, Data), 228 | ?assert(length(Entry) > 0) 229 | end, 230 | [ 231 | <<"last_day">>, 232 | <<"last_week">>, 233 | <<"last_month">> 234 | ] 235 | ). 236 | 237 | rewards_test(_Config) -> 238 | Account = "13YuCz3mZ55HZ6hJJvQHCZXGgE8ooe2CSvbtSHQR3m5vZ1EVCNZ", 239 | {ok, {_, _, Json}} = 240 | ?json_request([ 241 | "/v1/accounts/", 242 | Account, 243 | "/rewards?max_time=2020-08-27&min_time=2020-05-27" 244 | ]), 245 | #{<<"data">> := Data} = Json, 246 | ?assert(length(Data) >= 0), 247 | 248 | case maps:get(<<"cursor">>, Json, undefined) of 249 | undefined -> 250 | ok; 251 | Cursor -> 252 | {ok, {_, _, CursorJson}} = 253 | ?json_request([ 254 | "/v1/accounts/", 255 | Account, 256 | "/rewards?cursor=", 257 | Cursor 258 | ]), 259 | #{<<"data">> := CursorData} = CursorJson, 260 | ?assert(length(CursorData) >= 0) 261 | end, 262 | 263 | ok. 264 | 265 | rewards_block_test(_Config) -> 266 | Account = "13ESLoXiie3eXoyitxryNQNamGAnJjKt2WkiB4gNq95knxAiGEp", 267 | {ok, {_, _, Json}} = 268 | ?json_request([ 269 | "/v1/accounts/", 270 | Account, 271 | "/rewards/1167207" 272 | ]), 273 | #{<<"data">> := Data, <<"cursor">> := Cursor} = Json, 274 | ?assert(length(Data) >= 0), 275 | 276 | {ok, {_, _, CursorJson}} = 277 | ?json_request([ 278 | "/v1/accounts/", 279 | Account, 280 | "/rewards/1167207?cursor=", 281 | Cursor 282 | ]), 283 | #{<<"data">> := CursorData} = CursorJson, 284 | ?assert(length(CursorData) >= 0), 285 | 286 | ok. 287 | 288 | rewards_dupe_test(_Config) -> 289 | % This account and time range was reported to have a duplicate between the 290 | % two pages that build it up. This test ensures that the fetched 291 | % tranasctions don't have a duplicate in them. 292 | % 293 | % NOTE: This test relies on the page being 100 294 | Account = "14cWRnJk7oZDeRSfo9yS3jpWfQmqZxNEzxQoygkoPBixLVSQaTg", 295 | MaxTime = "2020-06-16T07:00:00", 296 | MinTime = "2020-06-05T00:00:00", 297 | Base = ["/v1/accounts/", Account, "/rewards"], 298 | TxnHashes = ct_utils:fold_json_request( 299 | fun(E, Acc) -> Acc ++ [maps:get(<<"hash">>, E)] end, 300 | Base, 301 | ?json_request([Base, "?min_time=", MinTime, "&max_time=", MaxTime]), 302 | [] 303 | ), 304 | % No duplicates 305 | DedupedSize = sets:size(sets:from_list(TxnHashes)), 306 | ?assertEqual(DedupedSize, length(TxnHashes)), 307 | % No missing 308 | {ok, DirectList, undefined} = bh_route_rewards:get_full_reward_list( 309 | {account, Account}, 310 | [{max_time, MaxTime}, {min_time, MinTime}] 311 | ), 312 | ?assertEqual(DedupedSize, length(DirectList)), 313 | ok. 314 | 315 | rewards_sum_test(_Config) -> 316 | Account = "13YuCz3mZ55HZ6hJJvQHCZXGgE8ooe2CSvbtSHQR3m5vZ1EVCNZ", 317 | {ok, {_, _, Json}} = 318 | ?json_request([ 319 | "/v1/accounts/", 320 | Account, 321 | "/rewards/sum?max_time=2020-08-27&min_time=2020-07-27" 322 | ]), 323 | #{<<"data">> := #{<<"sum">> := Sum}} = Json, 324 | ?assert(Sum >= 0), 325 | 326 | ok. 327 | 328 | rewards_buckets_test(_Config) -> 329 | Account = "13YuCz3mZ55HZ6hJJvQHCZXGgE8ooe2CSvbtSHQR3m5vZ1EVCNZ", 330 | {ok, {_, _, Json}} = 331 | ?json_request([ 332 | "/v1/accounts/", 333 | Account, 334 | "/rewards/sum?max_time=2020-09-27&min_time=2020-08-27&bucket=day" 335 | ]), 336 | #{<<"data">> := Data} = Json, 337 | ?assertEqual(31, length(Data)), 338 | 339 | ok. 340 | 341 | rich_list_test(_Config) -> 342 | {ok, {_, _, Json}} = ?json_request("/v1/accounts/rich"), 343 | #{<<"data">> := List} = Json, 344 | ?assert(length(List) > 0), 345 | 346 | {ok, {_, _, LimitJson}} = ?json_request("/v1/accounts/rich?limit=10"), 347 | #{<<"data">> := LimitList} = LimitJson, 348 | ?assert(length(LimitList) > 0), 349 | ?assert(length(LimitList) =< 10), 350 | 351 | ok. 352 | -------------------------------------------------------------------------------- /test/bh_route_assert_locations_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_assert_locations_SUITE). 2 | -compile([nowarn_export_all, export_all]). 3 | 4 | -include("ct_utils.hrl"). 5 | -include("../src/bh_route_handler.hrl"). 6 | 7 | all() -> 8 | [ 9 | list_test 10 | ]. 11 | 12 | init_per_suite(Config) -> 13 | ?init_bh(Config). 14 | 15 | end_per_suite(Config) -> 16 | ?end_bh(Config). 17 | 18 | list_test(_Config) -> 19 | {ok, {_, _, FirstJson}} = ?json_request("/v1/assert_locations"), 20 | #{ 21 | <<"data">> := FirstTxns, 22 | <<"cursor">> := Cursor 23 | } = FirstJson, 24 | 25 | ?assert(length(FirstTxns) =< ?TXN_LIST_LIMIT), 26 | 27 | {ok, {_, _, NextJson}} = ?json_request(["/v1/assert_locations?cursor=", Cursor]), 28 | #{ 29 | <<"data">> := NextTxns, 30 | <<"cursor">> := _ 31 | } = NextJson, 32 | ?assert(length(NextTxns) =< ?TXN_LIST_LIMIT), 33 | 34 | ok. 35 | -------------------------------------------------------------------------------- /test/bh_route_blocks_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_blocks_SUITE). 2 | -compile([nowarn_export_all, export_all]). 3 | 4 | -include("ct_utils.hrl"). 5 | 6 | all() -> 7 | [ 8 | height_test, 9 | height_by_time_test, 10 | block_for_height_test, 11 | block_for_invalid_height_test, 12 | block_for_height_txns_test, 13 | block_for_invalid_height_txns_test, 14 | block_for_height_txns_cursor_test, 15 | block_for_hash_test, 16 | block_for_hash_txns_test, 17 | block_for_invalid_hash_txns_test, 18 | block_stats_test 19 | ]. 20 | 21 | init_per_suite(Config) -> 22 | ?init_bh(Config). 23 | 24 | end_per_suite(Config) -> 25 | ?end_bh(Config). 26 | 27 | height_test(_Config) -> 28 | {ok, {_, _, Json}} = ?json_request("/v1/blocks/height"), 29 | ?assertMatch(#{<<"data">> := #{<<"height">> := _}}, Json), 30 | 31 | ok. 32 | 33 | height_by_time_test(_Config) -> 34 | {ok, {_, _, Json}} = ?json_request("/v1/blocks/height?max_time=2021-04-20"), 35 | ?assertMatch(#{<<"data">> := #{<<"height">> := _}}, Json), 36 | 37 | ok. 38 | 39 | block_for_height_test(_Config) -> 40 | {ok, {_, _, Json}} = ?json_request("/v1/blocks/1"), 41 | ?assertMatch( 42 | #{ 43 | <<"data">> := 44 | #{ 45 | <<"height">> := 1, 46 | <<"transaction_count">> := 70 47 | } 48 | }, 49 | Json 50 | ). 51 | 52 | block_for_invalid_height_test(_Config) -> 53 | ?assertMatch({error, {_, 400, _}}, ?json_request("/v1/blocks/not_int")), 54 | ok. 55 | 56 | block_for_height_txns_test(_Config) -> 57 | {ok, {_, _, Json}} = ?json_request("/v1/blocks/1/transactions"), 58 | #{<<"data">> := Txns, <<"cursor">> := _} = Json, 59 | ?assertEqual(50, length(Txns)). 60 | 61 | block_for_height_txns_cursor_test(_Config) -> 62 | {ok, {_, _, Json}} = ?json_request([ 63 | "/v1/blocks/1109085/transactions", 64 | "?cursor=eyJoYXNoIjoianhRX3BPakdQSGFoTWk5dzBPNW1oZDJ3WGlGTUp3Q3NsNnhyeDNvYTFHUSJ9" 65 | ]), 66 | #{<<"data">> := Txns, <<"cursor">> := _} = Json, 67 | ?assertEqual(50, length(Txns)). 68 | 69 | block_for_invalid_height_txns_test(_Config) -> 70 | ?assertMatch({error, {_, 404, _}}, ?json_request("/v1/blocks/0/transactions")), 71 | ?assertMatch({error, {_, 400, _}}, ?json_request("/v1/blocks/not_int/transactions")), 72 | ok. 73 | 74 | block_for_hash_test(_Config) -> 75 | {ok, {_, _, Json}} = ?json_request( 76 | "/v1/blocks/hash/La6PuV80Ps9qTP0339Pwm64q3_deMTkv6JOo1251EJI" 77 | ), 78 | ?assertMatch( 79 | #{ 80 | <<"data">> := 81 | #{ 82 | <<"height">> := 1, 83 | <<"transaction_count">> := 70 84 | } 85 | }, 86 | Json 87 | ). 88 | 89 | block_for_hash_txns_test(_Config) -> 90 | {ok, {_, _, Json}} = ?json_request( 91 | "/v1/blocks/hash/La6PuV80Ps9qTP0339Pwm64q3_deMTkv6JOo1251EJI/transactions" 92 | ), 93 | #{<<"data">> := Txns, <<"cursor">> := _} = Json, 94 | ?assertEqual(50, length(Txns)). 95 | 96 | block_for_invalid_hash_txns_test(_Config) -> 97 | ?assertMatch({error, {_, 404, _}}, ?json_request("/v1/blocks/hash/no_such_hash/transactions")), 98 | ok. 99 | 100 | block_stats_test(_Config) -> 101 | {ok, {_, _, Json}} = ?json_request("/v1/blocks/stats"), 102 | #{<<"data">> := Stats} = Json, 103 | ?assert(maps:size(Stats) > 0), 104 | 105 | ok. 106 | -------------------------------------------------------------------------------- /test/bh_route_challenges_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_challenges_SUITE). 2 | -compile([nowarn_export_all, export_all]). 3 | 4 | -include("ct_utils.hrl"). 5 | -include("../src/bh_route_handler.hrl"). 6 | 7 | all() -> [ 8 | challenge_list_test, 9 | hotspot_challenge_list_test, 10 | account_challenge_list_test 11 | ]. 12 | 13 | init_per_suite(Config) -> 14 | ?init_bh(Config). 15 | 16 | end_per_suite(Config) -> 17 | ?end_bh(Config). 18 | 19 | challenge_list_test(_Config) -> 20 | {ok, {_, _, FirstJson}} = ?json_request("/v1/challenges"), 21 | #{ <<"data">> := FirstTxns, 22 | <<"cursor">> := Cursor 23 | } = FirstJson, 24 | 25 | ?assert(length(FirstTxns) =< ?TXN_LIST_LIMIT), 26 | 27 | {ok, {_, _, NextJson}} = ?json_request(["/v1/challenges?cursor=", Cursor]), 28 | #{ <<"data">> := NextTxns, 29 | <<"cursor">> := _ 30 | } = NextJson, 31 | ?assert(length(NextTxns) =< ?TXN_LIST_LIMIT), 32 | 33 | ok. 34 | 35 | hotspot_challenge_list_test(_Config) -> 36 | {ok, {_, _, Json}} = ?json_request("/v1/hotspots/1182nyT3oXZPMztMSww4mzaaQXGXd5T7JwDfEth6obSCwwxxfsB/challenges"), 37 | #{ <<"data">> := Txns } = Json, 38 | ?assert(length(Txns) >= 0). 39 | 40 | account_challenge_list_test(_Config) -> 41 | {ok, {_, _, Json}} = ?json_request("/v1/accounts/13GCcF7oGb6waFBzYDMmydmXx4vNDUZGX4LE3QUh8eSBG53s5bx/challenges"), 42 | #{ <<"data">> := Txns } = Json, 43 | ?assert(length(Txns) >= 0). 44 | -------------------------------------------------------------------------------- /test/bh_route_cities_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_cities_SUITE). 2 | 3 | -compile([nowarn_export_all, export_all]). 4 | 5 | -include("bh_route_handler.hrl"). 6 | 7 | -include("ct_utils.hrl"). 8 | 9 | all() -> 10 | [ 11 | city_list_name_test, 12 | city_list_count_test, 13 | city_search_test, 14 | get_test, 15 | city_hotspots_test, 16 | invalid_city_hotspots_test 17 | ]. 18 | 19 | init_per_suite(Config) -> 20 | ?init_bh(Config). 21 | 22 | end_per_suite(Config) -> 23 | ?end_bh(Config). 24 | 25 | city_list_name_test(_Config) -> 26 | {ok, {_, _, Json}} = ?json_request(["/v1/cities"]), 27 | #{ 28 | <<"data">> := Data, 29 | <<"cursor">> := Cursor 30 | } = Json, 31 | ?assert(length(Data) >= 0), 32 | 33 | {ok, {_, _, NextJson}} = ?json_request(["/v1/cities?cursor=", Cursor]), 34 | #{<<"data">> := NextData} = NextJson, 35 | ?assert(length(NextData) >= 0), 36 | ok. 37 | 38 | city_list_count_test(_Config) -> 39 | {ok, {_, _, Json}} = ?json_request(["/v1/cities?order=hotspot_count"]), 40 | #{ 41 | <<"data">> := Data, 42 | <<"cursor">> := Cursor 43 | } = Json, 44 | ?assert(length(Data) >= 0), 45 | 46 | {ok, {_, _, NextJson}} = ?json_request(["/v1/cities?cursor=", Cursor]), 47 | #{<<"data">> := NextData} = NextJson, 48 | ?assert(length(NextData) >= 0), 49 | ok. 50 | 51 | city_search_test(_Config) -> 52 | {ok, {_, _, Json}} = ?json_request(["/v1/cities?search=ma"]), 53 | #{ 54 | <<"data">> := Data, 55 | <<"cursor">> := Cursor 56 | } = Json, 57 | ?assert(length(Data) >= 0), 58 | 59 | {ok, {_, _, NextJson}} = ?json_request(["/v1/cities?cursor=", Cursor]), 60 | #{<<"data">> := NextData} = NextJson, 61 | ?assert(length(NextData) >= 0), 62 | ok. 63 | 64 | get_test(_Config) -> 65 | FetchId = "dG9yb250b29udGFyaW9jYW5hZGE", 66 | {ok, {_, _, Json}} = ?json_request(["/v1/cities/", FetchId]), 67 | #{ 68 | <<"data">> := #{ 69 | <<"city_id">> := CityId, 70 | <<"long_city">> := <<"Toronto">> 71 | } 72 | } = Json, 73 | ?assertEqual(FetchId, binary_to_list(CityId)), 74 | ok. 75 | 76 | city_hotspots_test(_Config) -> 77 | {ok, {_, _, Json}} = ?json_request([ 78 | "/v1/cities/c2FuIGZyYW5jaXNjb2NhbGlmb3JuaWF1bml0ZWQgc3RhdGVz/hotspots" 79 | ]), 80 | #{<<"data">> := Data} = Json, 81 | ?assert(length(Data) >= 0), 82 | 83 | ok. 84 | 85 | invalid_city_hotspots_test(_Config) -> 86 | %% base64 always decodes but will cause sql to fail. We ensure here that 87 | %% it's always interpreted as a not_found 88 | ?assertMatch({error, {_, 404, _}}, ?json_request("/v1/cities/not_city/hotspots")), 89 | 90 | ok. 91 | -------------------------------------------------------------------------------- /test/bh_route_dc_burns_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_dc_burns_SUITE). 2 | 3 | -compile([nowarn_export_all, export_all]). 4 | 5 | -include("ct_utils.hrl"). 6 | 7 | all() -> 8 | [ 9 | list_test, 10 | stats_test, 11 | sum_test, 12 | bucket_sum_test 13 | ]. 14 | 15 | init_per_suite(Config) -> 16 | ?init_bh(Config). 17 | 18 | end_per_suite(Config) -> 19 | ?end_bh(Config). 20 | 21 | list_test(_Config) -> 22 | {ok, {_, _, Json}} = ?json_request("/v1/dc_burns"), 23 | #{<<"data">> := Data, <<"cursor">> := Cursor} = Json, 24 | ?assert(length(Data) >= 0), 25 | 26 | {ok, {_, _, NextJson}} = ?json_request( 27 | [ 28 | "/v1/dc_burns", 29 | "?cursor=", 30 | Cursor 31 | ] 32 | ), 33 | #{<<"data">> := NextData} = NextJson, 34 | ?assert(length(NextData) >= 0), 35 | 36 | ok. 37 | 38 | stats_test(_Config) -> 39 | {ok, {_, _, Json}} = ?json_request("/v1/dc_burns/stats"), 40 | #{<<"data">> := #{<<"last_day">> := Value}} = Json, 41 | ?assert(Value >= 0), 42 | ok. 43 | 44 | sum_test(_Config) -> 45 | {ok, {_, _, Json}} = 46 | ?json_request(["/v1/dc_burns/sum?min_time=-2%20day"]), 47 | #{<<"data">> := Data} = Json, 48 | ?assert(maps:size(Data) > 0), 49 | 50 | ok. 51 | 52 | bucket_sum_test(_Config) -> 53 | {ok, {_, _, Json}=Res} = 54 | ?json_request(["/v1/dc_burns/sum?min_time=-7%20day&bucket=day"]), 55 | ct:pal("Res ~p", [Res]), 56 | #{<<"data">> := Data} = Json, 57 | ?assertEqual(7, length(Data)), 58 | 59 | ok. 60 | -------------------------------------------------------------------------------- /test/bh_route_elections_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_elections_SUITE). 2 | -compile([nowarn_export_all, export_all]). 3 | 4 | -include("ct_utils.hrl"). 5 | -include("../src/bh_route_handler.hrl"). 6 | 7 | all() -> [ 8 | election_list_test, 9 | hotspot_election_list_test, 10 | account_election_list_test 11 | ]. 12 | 13 | init_per_suite(Config) -> 14 | ?init_bh(Config). 15 | 16 | end_per_suite(Config) -> 17 | ?end_bh(Config). 18 | 19 | election_list_test(_Config) -> 20 | {ok, {_, _, FirstJson}} = ?json_request("/v1/elections"), 21 | #{ <<"data">> := FirstTxns, 22 | <<"cursor">> := Cursor 23 | } = FirstJson, 24 | 25 | ?assert(length(FirstTxns) =< ?TXN_LIST_LIMIT), 26 | 27 | {ok, {_, _, NextJson}} = ?json_request(["/v1/elections?cursor=", Cursor]), 28 | #{ <<"data">> := NextTxns, 29 | <<"cursor">> := _ 30 | } = NextJson, 31 | ?assert(length(NextTxns) =< ?TXN_LIST_LIMIT). 32 | 33 | hotspot_election_list_test(_Config) -> 34 | {ok, {_, _, Json}} = ?json_request("/v1/hotspots/1182nyT3oXZPMztMSww4mzaaQXGXd5T7JwDfEth6obSCwwxxfsB/elections"), 35 | #{ <<"data">> := Txns } = Json, 36 | ?assert(length(Txns) >= 0). 37 | 38 | account_election_list_test(_Config) -> 39 | {ok, {_, _, Json}} = ?json_request("/v1/accounts/13GCcF7oGb6waFBzYDMmydmXx4vNDUZGX4LE3QUh8eSBG53s5bx/elections"), 40 | #{ <<"data">> := Txns } = Json, 41 | ?assert(length(Txns) >= 0). 42 | -------------------------------------------------------------------------------- /test/bh_route_locations_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_locations_SUITE). 2 | 3 | -compile([nowarn_export_all, export_all]). 4 | 5 | -include("ct_utils.hrl"). 6 | 7 | all() -> 8 | [ 9 | get_test 10 | ]. 11 | 12 | init_per_suite(Config) -> 13 | ?init_bh(Config). 14 | 15 | end_per_suite(Config) -> 16 | ?end_bh(Config). 17 | 18 | get_test(_Config) -> 19 | {ok, {_, _, Json}} = ?json_request("/v1/locations/8c28347213117ff"), 20 | #{<<"data">> := _} = Json, 21 | 22 | ok. 23 | -------------------------------------------------------------------------------- /test/bh_route_oracle_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_oracle_SUITE). 2 | 3 | -compile([nowarn_export_all, export_all]). 4 | 5 | -include("ct_utils.hrl"). 6 | 7 | all() -> 8 | [ 9 | price_test, 10 | price_at_block_test, 11 | price_at_invalid_block_test, 12 | list_test, 13 | list_block_test, 14 | activity_list_test, 15 | price_predictions_test, 16 | price_stats_test 17 | ]. 18 | 19 | init_per_suite(Config) -> 20 | ?init_bh(Config). 21 | 22 | end_per_suite(Config) -> 23 | ?end_bh(Config). 24 | 25 | price_test(_Config) -> 26 | {ok, {_, _, Json}} = ?json_request("/v1/oracle/prices/current"), 27 | ?assertMatch( 28 | #{<<"data">> := #{<<"block">> := _, <<"price">> := _, <<"timestamp">> := _}}, 29 | Json 30 | ), 31 | 32 | ok. 33 | 34 | price_at_block_test(_Config) -> 35 | {ok, {_, _, Json}} = ?json_request("/v1/oracle/prices/366920"), 36 | ?assertMatch( 37 | #{<<"data">> := #{<<"block">> := _, <<"price">> := _, <<"timestamp">> := _}}, 38 | Json 39 | ), 40 | 41 | ok. 42 | 43 | price_at_invalid_block_test(_Config) -> 44 | ?assertMatch({error, {_, 400, _}}, ?json_request("/v1/oracle/prices/not_int")), 45 | 46 | ok. 47 | 48 | list_test(_Config) -> 49 | {ok, {_, _, Json}} = ?json_request("/v1/oracle/prices"), 50 | #{<<"data">> := Data} = Json, 51 | ?assert(length(Data) >= 0), 52 | 53 | ok. 54 | 55 | list_block_test(_Config) -> 56 | {ok, {_, _, Json}} = ?json_request("/v1/oracle/prices?max_block=500000"), 57 | #{<<"data">> := Data} = Json, 58 | ?assert(length(Data) >= 0), 59 | 60 | ok. 61 | 62 | activity_list_test(_Config) -> 63 | {ok, {_, _, AllJson}} = ?json_request("/v1/oracle/activity"), 64 | #{<<"data">> := AllData} = AllJson, 65 | ?assert(length(AllData) >= 0), 66 | 67 | {ok, {_, _, OneJson}} = ?json_request( 68 | "/v1/oracle/13CFFcmPtMvNQCpWQRXCTqXPnXtcsibDWVwiQRKpUCt4nqtF7RE/activity" 69 | ), 70 | #{<<"data">> := OneData} = OneJson, 71 | ?assert(length(OneData) >= 0), 72 | 73 | ok. 74 | 75 | price_predictions_test(_Config) -> 76 | {ok, {_, _, AllJson}} = ?json_request("/v1/oracle/predictions"), 77 | #{<<"data">> := AllData} = AllJson, 78 | ?assert(length(AllData) >= 0), 79 | 80 | ok. 81 | 82 | price_stats_test(_Config) -> 83 | {ok, {_, _, Json}} = ?json_request("/v1/oracle/prices/stats?min_time=-30%20day"), 84 | #{<<"data">> := #{<<"max">> := Max}} = Json, 85 | ?assert(Max >= 0), 86 | ok. 87 | -------------------------------------------------------------------------------- /test/bh_route_oui_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_oui_SUITE). 2 | 3 | -compile([nowarn_export_all, export_all]). 4 | 5 | -include("ct_utils.hrl"). 6 | 7 | all() -> 8 | [ 9 | get_test, 10 | get_invalid_test, 11 | list_test, 12 | last_test, 13 | stats_test 14 | ]. 15 | 16 | init_per_suite(Config) -> 17 | ?init_bh(Config). 18 | 19 | end_per_suite(Config) -> 20 | ?end_bh(Config). 21 | 22 | get_test(_Config) -> 23 | {ok, {_, _, Json}} = ?json_request("/v1/ouis/1"), 24 | #{<<"data">> := _} = Json, 25 | 26 | ok. 27 | 28 | get_invalid_test(_Config) -> 29 | ?assertMatch({error, {_, 400, _}}, ?json_request("/v1/ouis/not_int")), 30 | 31 | ok. 32 | 33 | last_test(_Config) -> 34 | {ok, {_, _, Json}} = ?json_request("/v1/ouis/last"), 35 | #{<<"data">> := _} = Json, 36 | 37 | ok. 38 | 39 | list_test(_Config) -> 40 | {ok, {_, _, Json}} = ?json_request("/v1/ouis"), 41 | #{<<"data">> := Data} = Json, 42 | ?assert(length(Data) >= 0), 43 | 44 | ok. 45 | 46 | stats_test(_Config) -> 47 | {ok, {_, _, Json}} = ?json_request(["/v1/ouis/stats"]), 48 | #{<<"data">> := Data} = Json, 49 | ?assert(maps:size(Data) > 0), 50 | 51 | ok. 52 | -------------------------------------------------------------------------------- /test/bh_route_rewards_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_rewards_SUITE). 2 | 3 | -compile([nowarn_export_all, export_all]). 4 | 5 | -include("bh_route_handler.hrl"). 6 | -include("ct_utils.hrl"). 7 | 8 | all() -> 9 | [ 10 | rewards_sum_test, 11 | rewards_buckets_test 12 | ]. 13 | 14 | init_per_suite(Config) -> 15 | ?init_bh(Config). 16 | 17 | end_per_suite(Config) -> 18 | ?end_bh(Config). 19 | 20 | rewards_sum_test(_Config) -> 21 | {ok, {_, _, Json}} = 22 | ?json_request(["/v1/rewards/sum?min_time=-2%20day"]), 23 | #{<<"data">> := #{<<"sum">> := Sum}} = Json, 24 | ?assert(Sum >= 0), 25 | 26 | ok. 27 | 28 | rewards_buckets_test(_Config) -> 29 | {ok, {_, _, Json}} = 30 | ?json_request(["/v1/rewards/sum?min_time=-7%20day&bucket=day"]), 31 | #{<<"data">> := Data} = Json, 32 | ?assertEqual(7, length(Data)), 33 | 34 | ok. 35 | -------------------------------------------------------------------------------- /test/bh_route_snapshots_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_snapshots_SUITE). 2 | -compile([nowarn_export_all, export_all]). 3 | 4 | -include("ct_utils.hrl"). 5 | -include("../src/bh_route_handler.hrl"). 6 | 7 | all() -> [ 8 | list_test, 9 | current_test 10 | ]. 11 | 12 | init_per_suite(Config) -> 13 | ?init_bh(Config). 14 | 15 | end_per_suite(Config) -> 16 | ?end_bh(Config). 17 | 18 | list_test(_Config) -> 19 | {ok, {_, _, FirstJson}} = ?json_request("/v1/snapshots"), 20 | #{ <<"data">> := FirstTxns, 21 | <<"cursor">> := Cursor 22 | } = FirstJson, 23 | 24 | ?assert(length(FirstTxns) =< ?SNAPSHOT_LIST_LIMIT), 25 | 26 | {ok, {_, _, NextJson}} = ?json_request(["/v1/snapshots?cursor=", Cursor]), 27 | #{ <<"data">> := NextTxns 28 | } = NextJson, 29 | ?assert(length(NextTxns) =< ?SNAPSHOT_LIST_LIMIT). 30 | 31 | current_test(_Config) -> 32 | {ok, {_, _, Json}} = ?json_request("/v1/snapshots/current"), 33 | ?assertMatch(#{ <<"data">> := 34 | #{ <<"block">> := _, 35 | <<"snapshot_hash">> := _ 36 | }}, Json). 37 | 38 | -------------------------------------------------------------------------------- /test/bh_route_state_channels_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_state_channels_SUITE). 2 | -compile([nowarn_export_all, export_all]). 3 | 4 | -include("ct_utils.hrl"). 5 | -include("../src/bh_route_handler.hrl"). 6 | 7 | all() -> 8 | [ 9 | list_test, 10 | stats_test 11 | ]. 12 | 13 | init_per_suite(Config) -> 14 | ?init_bh(Config). 15 | 16 | end_per_suite(Config) -> 17 | ?end_bh(Config). 18 | 19 | list_test(_Config) -> 20 | {ok, {_, _, FirstJson}} = ?json_request("/v1/state_channels"), 21 | #{ 22 | <<"data">> := FirstTxns, 23 | <<"cursor">> := Cursor 24 | } = FirstJson, 25 | 26 | ?assert(length(FirstTxns) =< ?STATE_CHANNEL_TXN_LIST_LIMIT), 27 | 28 | {ok, {_, _, NextJson}} = ?json_request(["/v1/state_channels?cursor=", Cursor]), 29 | #{ 30 | <<"data">> := NextTxns, 31 | <<"cursor">> := _ 32 | } = NextJson, 33 | ?assert(length(NextTxns) =< ?STATE_CHANNEL_TXN_LIST_LIMIT), 34 | 35 | ok. 36 | 37 | stats_test(_Config) -> 38 | {ok, {_, _, Json}} = ?json_request("/v1/state_channels/stats"), 39 | #{<<"data">> := #{<<"last_day">> := Value}} = Json, 40 | ?assert(Value >= 0), 41 | ok. 42 | -------------------------------------------------------------------------------- /test/bh_route_stats_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_stats_SUITE). 2 | 3 | -compile([nowarn_export_all, export_all]). 4 | 5 | -include("ct_utils.hrl"). 6 | -include("../src/bh_route_handler.hrl"). 7 | 8 | all() -> 9 | [ 10 | stats_test, 11 | token_supply_test 12 | ]. 13 | 14 | init_per_suite(Config) -> 15 | ?init_bh(Config). 16 | 17 | end_per_suite(Config) -> 18 | ?end_bh(Config). 19 | 20 | stats_test(_Config) -> 21 | {ok, {_, _, Json}} = ?json_request("/v1/stats"), 22 | #{ 23 | <<"data">> := #{ 24 | <<"block_times">> := _, 25 | <<"election_times">> := _, 26 | <<"token_supply">> := _ 27 | } 28 | } = Json, 29 | ok. 30 | 31 | token_supply_test(_Config) -> 32 | {ok, {_, _, Json}} = ?json_request("/v1/stats/token_supply"), 33 | #{<<"data">> := #{<<"token_supply">> := _}} = Json, 34 | 35 | {ok, {_, _, Value}} = ?json_request("/v1/stats/token_supply?format=raw"), 36 | ?assert(Value >= 0), 37 | ok. 38 | -------------------------------------------------------------------------------- /test/bh_route_txns_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_txns_SUITE). 2 | -compile([nowarn_export_all, export_all]). 3 | 4 | -include("ct_utils.hrl"). 5 | 6 | all() -> 7 | [ 8 | get_test, 9 | get_actor_test 10 | ]. 11 | 12 | init_per_suite(Config) -> 13 | ?init_bh(Config). 14 | 15 | end_per_suite(Config) -> 16 | ?end_bh(Config). 17 | 18 | get_test(_Config) -> 19 | TxnHash = "DmTG-Nbp6rLr4ERjqOvBcDC-94G4qsWg-Ii9BeL2qhY", 20 | {ok, {_, _, Json}} = ?json_request([ 21 | "/v1/transactions/", 22 | TxnHash 23 | ]), 24 | ?assertMatch(#{<<"data">> := _}, Json), 25 | ok. 26 | 27 | get_actor_test(_Config) -> 28 | TxnHash = "ks_-16PtsDQo7zWgdFoucKw4AA4pj5dvUzv17gSdU0o", 29 | Actor = "14fkPiFHh6oqecKhkjRWTtcJByy44t5S68SZQG6QExtStSQAZsr", 30 | {ok, {_, _, Json}} = ?json_request([ 31 | "/v1/transactions/", 32 | TxnHash, 33 | "?actor=", 34 | Actor 35 | ]), 36 | #{ 37 | <<"data">> := #{ 38 | <<"rewards">> := Rewards 39 | } 40 | } = Json, 41 | ?assertMatch( 42 | [ 43 | #{ 44 | <<"account">> := 45 | <<"14fkPiFHh6oqecKhkjRWTtcJByy44t5S68SZQG6QExtStSQAZsr">>, 46 | <<"gateway">> := 47 | <<"112jbxsCXERvuu6Xq3cjJUBpoKSwLsUuUGNF5wgh8cgj8Vsf5guV">>, 48 | <<"amount">> := _, 49 | <<"type">> := <<"poc_witnesses">> 50 | } 51 | ], 52 | Rewards 53 | ), 54 | ok. 55 | -------------------------------------------------------------------------------- /test/bh_route_validators_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_validators_SUITE). 2 | 3 | -compile([nowarn_export_all, export_all]). 4 | 5 | -include("bh_route_handler.hrl"). 6 | -include("ct_utils.hrl"). 7 | 8 | all() -> 9 | [ 10 | list_test, 11 | get_test, 12 | not_found_test, 13 | activity_count_test, 14 | activity_result_test, 15 | activity_low_block_test, 16 | activity_filter_no_result_test, 17 | roles_count_test, 18 | roles_result_test, 19 | roles_low_block_test, 20 | roles_filter_no_result_test, 21 | elected_test, 22 | elected_block_test, 23 | elected_invalid_block_test, 24 | elected_hash_test, 25 | rewards_test, 26 | rewards_all_sum_test, 27 | rewards_sum_test, 28 | rewards_buckets_test, 29 | name_test, 30 | name_search_test, 31 | stats_test 32 | ]. 33 | 34 | init_per_suite(Config) -> 35 | ?init_bh(Config). 36 | 37 | end_per_suite(Config) -> 38 | ?end_bh(Config). 39 | 40 | list_test(_Config) -> 41 | {ok, {_, _, Json}} = ?json_request(["/v1/validators"]), 42 | #{ 43 | <<"data">> := Data, 44 | <<"cursor">> := Cursor 45 | } = Json, 46 | ?assert(length(Data) > 0), 47 | 48 | {ok, {_, _, NextJson}} = ?json_request(["/v1/validators?cursor=", Cursor]), 49 | #{<<"data">> := NextData} = NextJson, 50 | ?assert(length(NextData) > 0), 51 | ok. 52 | 53 | get_test(_Config) -> 54 | FetchAddress = "11Q7Gmwq1fRe7pcEmBBtPatWWnyXrBtJLks65gGh89GLypbBaQW", 55 | {ok, {_, _, Json}} = ?json_request(["/v1/validators/", FetchAddress]), 56 | #{ 57 | <<"data">> := #{ 58 | <<"address">> := Address 59 | } 60 | } = Json, 61 | ?assertEqual(FetchAddress, binary_to_list(Address)), 62 | ok. 63 | 64 | not_found_test(_Config) -> 65 | ?assertMatch({error, {_, 404, _}}, ?json_request("/v1/validators/no_address")), 66 | ok. 67 | 68 | activity_count_test(_Config) -> 69 | Validator = "11Q7Gmwq1fRe7pcEmBBtPatWWnyXrBtJLks65gGh89GLypbBaQW", 70 | {ok, {_, _, Json}} = ?json_request([ 71 | "/v1/validators/", 72 | Validator, 73 | "/activity/count?filter_types=validator_heartbeat_v1" 74 | ]), 75 | #{ 76 | <<"data">> := Data 77 | } = Json, 78 | ?assertEqual(1, maps:size(Data)), 79 | ?assert(maps:get(<<"validator_heartbeat_v1">>, Data) > 0), 80 | ok. 81 | 82 | activity_result_test(_Config) -> 83 | %% Test activity for a hotspot. This may or may not have data 84 | %% returned. Expect a maybe empty array with a start and end block 85 | %% and a cursor to a next block range 86 | {ok, {_, _, Json}} = ?json_request( 87 | "/v1/validators/11Q7Gmwq1fRe7pcEmBBtPatWWnyXrBtJLks65gGh89GLypbBaQW/activity" 88 | ), 89 | #{ 90 | <<"data">> := Data, 91 | <<"cursor">> := Cursor 92 | } = Json, 93 | {ok, #{<<"block">> := _}} = ?CURSOR_DECODE(Cursor), 94 | ?assert(length(Data) =< ?TXN_LIST_LIMIT). 95 | 96 | activity_low_block_test(_Config) -> 97 | GetCursor = #{ 98 | block => 50, 99 | max_block => 50, 100 | min_block => 1 101 | }, 102 | {ok, {_, _, Json}} = ?json_request( 103 | [ 104 | "/v1/validators/11Q7Gmwq1fRe7pcEmBBtPatWWnyXrBtJLks65gGh89GLypbBaQW/activity", 105 | "?cursor=", 106 | binary_to_list(?CURSOR_ENCODE(GetCursor)) 107 | ] 108 | ), 109 | #{<<"data">> := Data} = Json, 110 | %% This validator has no activity in the low blocks 111 | ?assertEqual(0, length(Data)), 112 | ?assertEqual(undefined, maps:get(<<"cursor">>, Json, undefined)). 113 | 114 | activity_filter_no_result_test(_Config) -> 115 | %% Filter for no rewards, which the given hotspot should not have 116 | GetCursor = #{ 117 | block => 50, 118 | min_block => 1, 119 | max_block => 50, 120 | types => <<"rewards_v1">> 121 | }, 122 | {ok, {_, _, Json}} = ?json_request( 123 | [ 124 | "/v1/validators/11Q7Gmwq1fRe7pcEmBBtPatWWnyXrBtJLks65gGh89GLypbBaQW/activity", 125 | "?cursor=", 126 | binary_to_list(?CURSOR_ENCODE(GetCursor)) 127 | ] 128 | ), 129 | #{<<"data">> := Data} = Json, 130 | ?assertEqual(0, length(Data)), 131 | ok. 132 | 133 | roles_count_test(_Config) -> 134 | Validator = "11Q7Gmwq1fRe7pcEmBBtPatWWnyXrBtJLks65gGh89GLypbBaQW", 135 | {ok, {_, _, Json}} = ?json_request([ 136 | "/v1/validators/", 137 | Validator, 138 | "/roles/count?filter_types=validator_heartbeat_v1" 139 | ]), 140 | #{ 141 | <<"data">> := Data 142 | } = Json, 143 | ?assertEqual(1, maps:size(Data)), 144 | ?assert(maps:get(<<"validator_heartbeat_v1">>, Data) > 0), 145 | ok. 146 | 147 | roles_result_test(_Config) -> 148 | %% Test activity for a validator. This may or may not have data 149 | %% returned. Expect a maybe empty array with a start and end block 150 | %% and a cursor to a next block range 151 | {ok, {_, _, Json}} = ?json_request( 152 | "/v1/validators/11Q7Gmwq1fRe7pcEmBBtPatWWnyXrBtJLks65gGh89GLypbBaQW/roles" 153 | ), 154 | #{ 155 | <<"data">> := Data, 156 | <<"cursor">> := Cursor 157 | } = Json, 158 | {ok, #{<<"block">> := _}} = ?CURSOR_DECODE(Cursor), 159 | ?assert(length(Data) =< ?TXN_LIST_LIMIT). 160 | 161 | roles_low_block_test(_Config) -> 162 | GetCursor = #{ 163 | block => 50, 164 | max_block => 50, 165 | min_block => 1 166 | }, 167 | {ok, {_, _, Json}} = ?json_request( 168 | [ 169 | "/v1/validators/11Q7Gmwq1fRe7pcEmBBtPatWWnyXrBtJLks65gGh89GLypbBaQW/roles", 170 | "?cursor=", 171 | binary_to_list(?CURSOR_ENCODE(GetCursor)) 172 | ] 173 | ), 174 | #{<<"data">> := Data} = Json, 175 | %% This validator has no activity in the low blocks 176 | ?assertEqual(0, length(Data)), 177 | ?assertEqual(undefined, maps:get(<<"cursor">>, Json, undefined)). 178 | 179 | roles_filter_no_result_test(_Config) -> 180 | %% Filter for no rewards, which the given hotspot should not have 181 | GetCursor = #{ 182 | block => 50, 183 | min_block => 1, 184 | max_block => 50, 185 | types => <<"rewards_v1">> 186 | }, 187 | {ok, {_, _, Json}} = ?json_request( 188 | [ 189 | "/v1/validators/11Q7Gmwq1fRe7pcEmBBtPatWWnyXrBtJLks65gGh89GLypbBaQW/roles", 190 | "?cursor=", 191 | binary_to_list(?CURSOR_ENCODE(GetCursor)) 192 | ] 193 | ), 194 | #{<<"data">> := Data} = Json, 195 | ?assertEqual(0, length(Data)), 196 | ok. 197 | 198 | elected_test(_Config) -> 199 | {ok, {_, _, Json}} = ?json_request(["/v1/validators/elected"]), 200 | #{ 201 | <<"data">> := Data 202 | } = Json, 203 | ?assert(length(Data) >= 0), 204 | 205 | ok. 206 | 207 | elected_block_test(_Config) -> 208 | {ok, {_, _, Json}} = ?json_request(["/v1/validators/elected/910550"]), 209 | #{ 210 | <<"data">> := Data 211 | } = Json, 212 | ?assert(length(Data) > 0), 213 | 214 | ok. 215 | 216 | elected_invalid_block_test(_Config) -> 217 | ?assertMatch({error, {_, 400, _}}, ?json_request("/v1/validators/elected/not_int")), 218 | 219 | ok. 220 | 221 | elected_hash_test(_Config) -> 222 | {ok, {_, _, Json}} = 223 | ?json_request([ 224 | "/v1/validators/elected/hash/icmV8BofCxxVi1aS33zKsodrdngjgilBIsTGRYStd_s" 225 | ]), 226 | #{ 227 | <<"data">> := Data 228 | } = Json, 229 | ?assert(length(Data) > 0), 230 | 231 | ok. 232 | 233 | rewards_test(_Config) -> 234 | Validator = "11Q7Gmwq1fRe7pcEmBBtPatWWnyXrBtJLks65gGh89GLypbBaQW", 235 | {ok, {_, _, Json}} = 236 | ?json_request([ 237 | "/v1/validators/", 238 | Validator, 239 | "/rewards?max_time=2021-07-27&min_time=2021-06-27" 240 | ]), 241 | #{<<"data">> := Data} = Json, 242 | ?assert(length(Data) >= 0), 243 | 244 | case maps:get(<<"cursor">>, Json, undefined) of 245 | undefined -> 246 | ok; 247 | Cursor -> 248 | {ok, {_, _, CursorJson}} = 249 | ?json_request(["/v1/validators/", Validator, "/rewards?cursor=", Cursor]), 250 | #{<<"data">> := CursorData} = CursorJson, 251 | ?assert(length(CursorData) >= 0) 252 | end, 253 | ok. 254 | 255 | rewards_all_sum_test(_Config) -> 256 | {ok, {_, _, Json}} = 257 | ?json_request([ 258 | "/v1/validators/", 259 | "rewards/sum?max_time=2021-07-27&min_time=2021-07-20" 260 | ]), 261 | #{<<"data">> := #{<<"sum">> := Sum}} = Json, 262 | ?assert(Sum >= 0), 263 | 264 | ok. 265 | 266 | rewards_sum_test(_Config) -> 267 | Validator = "11Q7Gmwq1fRe7pcEmBBtPatWWnyXrBtJLks65gGh89GLypbBaQW", 268 | {ok, {_, _, Json}} = 269 | ?json_request([ 270 | "/v1/validators/", 271 | Validator, 272 | "/rewards/sum?max_time=2021-07-27&min_time=2021-07-20" 273 | ]), 274 | #{<<"data">> := #{<<"sum">> := Sum}} = Json, 275 | ?assert(Sum >= 0), 276 | 277 | ok. 278 | 279 | rewards_buckets_test(_Config) -> 280 | Validator = "11Q7Gmwq1fRe7pcEmBBtPatWWnyXrBtJLks65gGh89GLypbBaQW", 281 | {ok, {_, _, Json}} = 282 | ?json_request([ 283 | "/v1/validators/", 284 | Validator, 285 | "/rewards/sum?max_time=2021-07-27&min_time=2021-07-20&bucket=day" 286 | ]), 287 | #{<<"data">> := Data} = Json, 288 | ?assertEqual(7, length(Data)), 289 | 290 | ok. 291 | 292 | name_test(_Config) -> 293 | FetchName = "best-raisin-hare", 294 | {ok, {_, _, Json}} = ?json_request(["/v1/validators/name/", FetchName]), 295 | #{ 296 | <<"data">> := Results 297 | } = Json, 298 | ?assert(length(Results) >= 1), 299 | ok. 300 | 301 | name_search_test(_Config) -> 302 | Search = "raisin", 303 | {ok, {_, _, Json}} = ?json_request(["/v1/validators/name?search=", Search]), 304 | #{ 305 | <<"data">> := Results 306 | } = Json, 307 | ?assert(length(Results) >= 1), 308 | ok. 309 | 310 | stats_test(_Config) -> 311 | {ok, {_, _, Json}} = ?json_request(["/v1/validators/stats"]), 312 | #{ 313 | <<"data">> := #{ 314 | <<"active">> := _, 315 | <<"staked">> := _, 316 | <<"unstaked">> := _ 317 | } 318 | } = Json, 319 | ok. 320 | -------------------------------------------------------------------------------- /test/bh_route_vars_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_vars_SUITE). 2 | -compile([nowarn_export_all, export_all]). 3 | 4 | -include("ct_utils.hrl"). 5 | 6 | all() -> 7 | [ 8 | name_test, 9 | list_test, 10 | list_named_test, 11 | activity_list_test 12 | ]. 13 | 14 | init_per_suite(Config) -> 15 | ?init_bh(Config). 16 | 17 | end_per_suite(Config) -> 18 | ?end_bh(Config). 19 | 20 | name_test(_Config) -> 21 | % integer 22 | Names = [ 23 | "poc_version", 24 | % float 25 | "securities_percent", 26 | % atom 27 | "predicate_callback_mod", 28 | % oracle keys 29 | "price_oracle_public_keys" 30 | ], 31 | lists:foreach( 32 | fun(Name) -> 33 | {ok, {_, _, Json}} = ?json_request(["/v1/vars/", Name]), 34 | ?assertMatch(#{<<"data">> := _}, Json) 35 | end, 36 | Names 37 | ), 38 | ok. 39 | 40 | list_test(_Config) -> 41 | {ok, {_, _, Json}} = ?json_request("/v1/vars"), 42 | #{<<"data">> := Data} = Json, 43 | ?assert(map_size(Data) >= 0), 44 | 45 | ok. 46 | 47 | list_named_test(_Config) -> 48 | {ok, {_, _, Json}} = ?json_request("/v1/vars?keys=poc_version,securities_percent"), 49 | #{<<"data">> := Data} = Json, 50 | ?assertEqual(2, map_size(Data)), 51 | 52 | ok. 53 | 54 | activity_list_test(_Config) -> 55 | {ok, {_, _, AllJson}} = ?json_request("/v1/vars/activity"), 56 | #{<<"data">> := AllData} = AllJson, 57 | ?assert(length(AllData) >= 0), 58 | 59 | ok. 60 | -------------------------------------------------------------------------------- /test/bh_route_versions_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(bh_route_versions_SUITE). 2 | -compile([nowarn_export_all, export_all]). 3 | 4 | -include("ct_utils.hrl"). 5 | 6 | all() -> 7 | [ 8 | get_test 9 | ]. 10 | 11 | init_per_suite(Config) -> 12 | ?init_bh(Config). 13 | 14 | end_per_suite(Config) -> 15 | ?end_bh(Config). 16 | 17 | get_test(_Config) -> 18 | {ok, {_, _, #{<<"data">> := Versions}}} = ?json_request("/v1/versions"), 19 | ?assertMatch(#{<<"blockchain_http">> := _}, Versions), 20 | ok. 21 | -------------------------------------------------------------------------------- /test/ct_utils.erl: -------------------------------------------------------------------------------- 1 | -module(ct_utils). 2 | 3 | -compile([nowarn_export_all, export_all]). 4 | 5 | -include_lib("common_test/include/ct.hrl"). 6 | 7 | init_bh(Config) -> 8 | application:ensure_all_started(lager), 9 | application:ensure_all_started(dispcount), 10 | application:ensure_all_started(throttle), 11 | application:load(blockchain_http), 12 | application:set_env(blockchain_http, throttle, #{ 13 | request_time => 10000000000, 14 | request_interval => 10, 15 | %% how many requests are allowed 16 | request_count => 1000 17 | }), 18 | {ok, Pid} = bh_sup:start_link(), 19 | unlink(Pid), 20 | [{bh_sup, Pid} | Config]. 21 | 22 | end_bh(Config) -> 23 | Sup = ?config(bh_sup, Config), 24 | gen_server:stop(Sup), 25 | dispcount:stop_dispatch(ro_pool), 26 | dispcount:stop_dispatch(rw_pool), 27 | Config. 28 | 29 | request(Path) -> 30 | httpc:request(get, {lists:flatten(["http://localhost:8080", Path]), []}, [], [ 31 | {body_format, binary} 32 | ]). 33 | 34 | json_request(Path) -> 35 | case ?MODULE:request(Path) of 36 | {ok, {Status = {_, 200, _}, Headers, Body}} -> 37 | Json = jiffy:decode(Body, [return_maps]), 38 | {ok, {Status, Headers, Json}}; 39 | {ok, {Status, _Headers, _Body}} -> 40 | {error, Status}; 41 | {error, Error} -> 42 | {error, Error} 43 | end. 44 | 45 | fold_json_request(Fun, Base, {ok, {_, _, Json}}, Acc) -> 46 | NewAcc = lists:foldl(Fun, Acc, maps:get(<<"data">>, Json)), 47 | case maps:get(<<"cursor">>, Json, undefined) of 48 | undefined -> 49 | NewAcc; 50 | Cursor -> 51 | fold_json_request(Fun, Base, json_request([Base, "?cursor=", Cursor]), NewAcc) 52 | end. 53 | -------------------------------------------------------------------------------- /test/ct_utils.hrl: -------------------------------------------------------------------------------- 1 | -include_lib("common_test/include/ct.hrl"). 2 | -include_lib("eunit/include/eunit.hrl"). 3 | 4 | -define(init_bh(C), ct_utils:init_bh((C))). 5 | -define(end_bh(C), ct_utils:end_bh((C))). 6 | -define(json_request(P), ct_utils:json_request((P))). 7 | -define(request(P), ct_utils:reqest((P))). 8 | --------------------------------------------------------------------------------