├── .circleci └── config.yml ├── .dockerignore ├── .eslintignore ├── .eslintrc.js ├── .eslintrc.json ├── .github ├── dependabot.yml └── workflows │ ├── dependency-review.yml │ └── yarn_upgrade.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── charts └── polkadot-watcher-csv-exporter │ ├── Chart.yaml │ ├── templates │ ├── _helpers.tpl │ ├── configmap.yaml │ ├── cronjob.yaml │ ├── secret.yaml │ ├── service.yaml │ └── statefulset.yaml │ └── values.yaml ├── config ├── main.sample.complete.yaml ├── main.sample.eraScanner.yaml ├── main.sample.historic.yaml └── main.sample.live.yaml ├── helmfile.d ├── 100-polkadot-watcher-csv-exporter.yaml └── config │ ├── era-scanner-values.yaml.gotmpl │ └── session-exporter-values.yaml.gotmpl ├── package.json ├── scripts ├── integration-tests.sh └── start-kind-local-registry.sh ├── src ├── actions │ └── start.ts ├── constants.ts ├── csvWriter.ts ├── dataGatherer.ts ├── dataGathererHistoric.ts ├── fileUploader.ts ├── index.ts ├── subscriber │ ├── ISubscriber.ts │ ├── SubscriberFactory.ts │ ├── subscriber.ts │ ├── subscriberEraScanner.ts │ └── subscriberTemplate.ts ├── types.ts └── utils.ts ├── tsconfig.json └── yarn.lock /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | jobs: 4 | helmLint: 5 | docker: 6 | - image: web3f/ci-commons:v3 7 | steps: 8 | - checkout 9 | - run: 10 | command: | 11 | helm lint ./charts/polkadot-watcher-csv-exporter 12 | 13 | yarnLint: 14 | docker: 15 | - image: web3f/node-dind:v2 16 | steps: 17 | - checkout 18 | - run: yarn 19 | - run: yarn lint 20 | 21 | 22 | buildImage: 23 | docker: 24 | - image: web3f/ci-commons:v3 25 | steps: 26 | - checkout 27 | - setup_remote_docker: 28 | docker_layer_caching: true 29 | version: 20.10.7 30 | - run: 31 | command: | 32 | /scripts/build-image.sh web3f/polkadot-watcher-csv-exporter . 33 | 34 | publishImage: 35 | docker: 36 | - image: web3f/ci-commons:v3 37 | steps: 38 | - checkout 39 | - setup_remote_docker: 40 | version: 20.10.7 41 | - run: 42 | command: | 43 | /scripts/publish-image.sh web3f/polkadot-watcher-csv-exporter 44 | 45 | publishChart: 46 | docker: 47 | - image: web3f/ci-commons:v3 48 | steps: 49 | - checkout 50 | - run: 51 | command: | 52 | /scripts/publish-chart.sh 53 | 54 | integrationTests: 55 | docker: 56 | - image: web3f/ci-commons:v3 57 | steps: 58 | - checkout 59 | - setup_remote_docker: 60 | version: 20.10.7 61 | - run: 62 | description: run integration tests 63 | command: | 64 | /scripts/integration-tests.sh kindest/node:v1.21.1 65 | 66 | workflows: 67 | version: 2 68 | test_and_deploy: 69 | jobs: 70 | - helmLint: 71 | filters: 72 | tags: 73 | only: /.*/ 74 | - yarnLint: 75 | filters: 76 | tags: 77 | only: /.*/ 78 | - buildImage: 79 | context: dockerhub-bot 80 | filters: 81 | tags: 82 | only: /.*/ 83 | requires: 84 | - helmLint 85 | - yarnLint 86 | - integrationTests: 87 | filters: 88 | tags: 89 | only: /.*/ 90 | requires: 91 | - buildImage 92 | - publishImage: 93 | context: dockerhub-bot 94 | filters: 95 | branches: 96 | ignore: /.*/ 97 | tags: 98 | only: /^v(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$/ 99 | requires: 100 | - integrationTests 101 | - publishChart: 102 | context: github-bot 103 | filters: 104 | branches: 105 | ignore: /.*/ 106 | tags: 107 | only: /^v(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$/ 108 | requires: 109 | - integrationTests 110 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | dist/ 3 | charts/ 4 | helmfile.d/ 5 | .circleci/ 6 | .github/ 7 | scripts/ 8 | test/ 9 | data-csv/ 10 | .eslintignore 11 | .eslintrc.js 12 | .eslintrc.json 13 | .gitignore 14 | LICENSE 15 | README.md -------------------------------------------------------------------------------- /.eslintignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | dist 3 | coverage 4 | -------------------------------------------------------------------------------- /.eslintrc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | root: true, 3 | parser: '@typescript-eslint/parser', 4 | plugins: [ 5 | '@typescript-eslint', 6 | ], 7 | extends: [ 8 | 'eslint:recommended', 9 | 'plugin:@typescript-eslint/eslint-recommended', 10 | 'plugin:@typescript-eslint/recommended', 11 | ], 12 | "rules": { 13 | "@typescript-eslint/camelcase": ["error", { "properties": "never" } ] 14 | }, 15 | env: { 16 | node: true, 17 | }, 18 | }; 19 | -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "env": { 3 | "node": true, 4 | "commonjs": true, 5 | "es6": true, 6 | "mocha": true 7 | }, 8 | "extends": "eslint:recommended", 9 | "globals": { 10 | "Atomics": "readonly", 11 | "SharedArrayBuffer": "readonly" 12 | }, 13 | "parserOptions": { 14 | "ecmaVersion": 2018 15 | }, 16 | "rules": { 17 | "no-console": 0, 18 | "no-useless-escape": 0 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: npm 4 | directory: "/" 5 | schedule: 6 | interval: weekly 7 | open-pull-requests-limit: 10 8 | reviewers: 9 | - "w3f/infrastructure" 10 | ignore: 11 | - dependency-name: "*" 12 | update-types: ["version-update:semver-minor","version-update:semver-patch"] 13 | 14 | - package-ecosystem: docker 15 | directory: "/" 16 | schedule: 17 | interval: weekly 18 | open-pull-requests-limit: 10 19 | ignore: 20 | - dependency-name: "*" 21 | update-types: ["version-update:semver-minor","version-update:semver-patch"] 22 | -------------------------------------------------------------------------------- /.github/workflows/dependency-review.yml: -------------------------------------------------------------------------------- 1 | # Dependency Review Action 2 | # 3 | # This Action will scan dependency manifest files that change as part of a Pull Request, surfacing known-vulnerable versions of the packages declared or updated in the PR. Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable packages will be blocked from merging. 4 | # 5 | # Source repository: https://github.com/actions/dependency-review-action 6 | # Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement 7 | name: 'Dependency Review' 8 | on: [pull_request] 9 | 10 | permissions: 11 | contents: read 12 | 13 | jobs: 14 | dependency-review: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: 'Checkout Repository' 18 | uses: actions/checkout@v3 19 | - name: 'Dependency Review' 20 | uses: actions/dependency-review-action@v2 21 | -------------------------------------------------------------------------------- /.github/workflows/yarn_upgrade.yml: -------------------------------------------------------------------------------- 1 | name: 'Yarn Upgrade' 2 | on: 3 | schedule: 4 | - cron: '0 10 * * 1' 5 | workflow_dispatch: 6 | 7 | jobs: 8 | yarn-upgrade: 9 | uses: w3f/base-services-charts/.github/workflows/yarn_upgrade.yml@master 10 | secrets: 11 | PR_PAT: ${{ secrets.BOT_PAT }} 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | config/* 2 | !config/*sample* 3 | node_modules 4 | dist 5 | data* -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:16-alpine 2 | 3 | RUN apk add --no-cache make gcc g++ python3 4 | 5 | WORKDIR /app 6 | 7 | COPY package.json yarn.lock ./ 8 | RUN yarn --ignore-scripts 9 | 10 | COPY . . 11 | RUN yarn && \ 12 | yarn build && \ 13 | apk del make gcc g++ python3 14 | 15 | ENTRYPOINT ["yarn", "start"] 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![CircleCI](https://circleci.com/gh/w3f/polkadot-watcher-csv-exporter.svg?style=svg)](https://circleci.com/gh/w3f/polkadot-watcher-csv-exporter) [![Total alerts](https://img.shields.io/lgtm/alerts/g/w3f/polkadot-watcher-csv-exporter.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/w3f/polkadot-watcher-csv-exporter/alerts/) [![Language grade: JavaScript](https://img.shields.io/lgtm/grade/javascript/g/w3f/polkadot-watcher-csv-exporter.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/w3f/polkadot-watcher-csv-exporter/context:javascript) 2 | 3 | # polkadot-watcher-csv-exporter 4 | 5 | ## Please Note 6 | All the relevant data model code is located in the [csvWriter](src/csvWriter.ts) typescript module. 7 | 8 | ## How to Run 9 | 10 | ### Requirements 11 | - yarn: https://classic.yarnpkg.com/en/docs/install/ 12 | 13 | ```bash 14 | git clone https://github.com/w3f/polkadot-watcher-csv-exporter.git 15 | cd polkadot-watcher-csv-exporter 16 | cp config/main.sample.complete.yaml config/main.yaml 17 | #just the first time 18 | 19 | yarn 20 | yarn build 21 | yarn start 22 | ``` 23 | 24 | ## Features 25 | 26 | - live 27 | - live + cronjob 28 | - historic 29 | - scanner (historic) 30 | 31 | ## How to configure the application 32 | 33 | Sample files of the possible configurations can be found [here](config/) 34 | 35 | ### Output 36 | If not explicitly specified, the default configuration will create a ./data-csv folder that will be populated with the chain data -------------------------------------------------------------------------------- /charts/polkadot-watcher-csv-exporter/Chart.yaml: -------------------------------------------------------------------------------- 1 | description: Polkadot Watcher 2 | name: polkadot-watcher-csv-exporter 3 | version: v1.3.4 4 | appVersion: v1.3.4 5 | apiVersion: v2 6 | -------------------------------------------------------------------------------- /charts/polkadot-watcher-csv-exporter/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* Returns the app name */}} 2 | {{- define "app.name" -}} 3 | {{- default .Release.Name .Values.nameOverride -}} 4 | {{- end }} -------------------------------------------------------------------------------- /charts/polkadot-watcher-csv-exporter/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ include "app.name" . }} 5 | data: 6 | main.yaml: |- 7 | {{ toYaml .Values.config | indent 4 }} 8 | -------------------------------------------------------------------------------- /charts/polkadot-watcher-csv-exporter/templates/cronjob.yaml: -------------------------------------------------------------------------------- 1 | {{ if eq .Values.config.cronjob.enabled true }} 2 | apiVersion: batch/v1 3 | kind: CronJob 4 | metadata: 5 | name: {{ include "app.name" . }} 6 | labels: 7 | app: {{ include "app.name" . }} 8 | spec: 9 | schedule: {{ .Values.cronjob.schedule | quote }} 10 | failedJobsHistoryLimit: 0 11 | concurrencyPolicy: Forbid 12 | jobTemplate: 13 | spec: 14 | backoffLimit: 0 15 | template: 16 | metadata: 17 | labels: 18 | app: {{ include "app.name" . }} 19 | annotations: 20 | checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} 21 | spec: 22 | {{- if .Values.cronjob.affinity }} 23 | affinity: 24 | {{ toYaml .Values.cronjob.affinity | indent 12 }} 25 | {{- end }} 26 | {{- if .Values.cronjob.tolerations }} 27 | tolerations: 28 | {{ toYaml .Values.cronjob.tolerations | indent 12 }} 29 | {{- end }} 30 | restartPolicy: Never 31 | containers: 32 | - name: {{ include "app.name" . }} 33 | image: {{ .Values.image.repo }}:{{ .Values.image.tag | default .Chart.AppVersion }} 34 | imagePullPolicy: {{ .Values.image.pullPolicy | default "Always" }} 35 | {{ if eq .Values.config.cronjob.timeout true }} 36 | command: ["timeout", "1200", "yarn", "start"] 37 | {{ end }} 38 | args: 39 | - -c 40 | - /app/config/main.yaml 41 | ports: 42 | - name: metrics 43 | containerPort: {{ .Values.config.port }} 44 | livenessProbe: 45 | httpGet: 46 | path: /healthcheck 47 | port: {{ .Values.config.port }} 48 | initialDelaySeconds: 10 49 | timeoutSeconds: 200 50 | {{ if ne .Values.environment "ci" }} 51 | resources: 52 | {{- toYaml .Values.resources | nindent 14 }} 53 | {{ end }} 54 | volumeMounts: 55 | - name: config 56 | mountPath: /app/config 57 | - name: service-account 58 | mountPath: {{ dir .Values.config.bucketUpload.gcpServiceAccount }} 59 | readOnly: true 60 | {{ if ne .Values.environment "ci" }} 61 | - name: data-csv 62 | mountPath: {{ .Values.config.exportDir }} 63 | {{ end }} 64 | volumes: 65 | - name: config 66 | configMap: 67 | name: {{ include "app.name" . }} 68 | - name: service-account 69 | secret: 70 | secretName: {{ include "app.name" . }} 71 | items: 72 | - key: service_account_json 73 | path: {{ base .Values.config.bucketUpload.gcpServiceAccount }} 74 | {{ if ne .Values.environment "ci" }} 75 | - name: data-csv 76 | persistentVolumeClaim: 77 | claimName: {{ include "app.name" . }}-data-csv 78 | {{ end }} 79 | 80 | --- 81 | 82 | {{ if ne .Values.environment "ci" }} 83 | kind: PersistentVolumeClaim 84 | apiVersion: v1 85 | metadata: 86 | name: {{ include "app.name" . }}-data-csv 87 | spec: 88 | accessModes: 89 | - ReadWriteOnce 90 | resources: 91 | requests: 92 | storage: 1Gi 93 | {{ end }} 94 | 95 | 96 | {{ end }} 97 | -------------------------------------------------------------------------------- /charts/polkadot-watcher-csv-exporter/templates/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: {{ include "app.name" . }} 5 | type: Opaque 6 | data: 7 | service_account_json: {{ .Values.secret.gcpServiceAccountValue | b64enc | quote }} -------------------------------------------------------------------------------- /charts/polkadot-watcher-csv-exporter/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "app.name" . }} 5 | labels: 6 | app: {{ include "app.name" . }} 7 | spec: 8 | ports: 9 | - name: metrics 10 | port: {{ .Values.config.port }} 11 | selector: 12 | app: {{ include "app.name" . }} 13 | -------------------------------------------------------------------------------- /charts/polkadot-watcher-csv-exporter/templates/statefulset.yaml: -------------------------------------------------------------------------------- 1 | {{ if ne .Values.config.cronjob.enabled true }} 2 | 3 | 4 | apiVersion: apps/v1 5 | kind: StatefulSet 6 | metadata: 7 | name: {{ include "app.name" . }} 8 | labels: 9 | app: {{ include "app.name" . }} 10 | spec: 11 | replicas: 1 12 | revisionHistoryLimit: 3 13 | updateStrategy: 14 | type: RollingUpdate 15 | selector: 16 | matchLabels: 17 | app: {{ include "app.name" . }} 18 | serviceName: {{ include "app.name" . }} 19 | template: 20 | metadata: 21 | labels: 22 | app: {{ include "app.name" . }} 23 | annotations: 24 | checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} 25 | spec: 26 | containers: 27 | - name: {{ include "app.name" . }} 28 | image: {{ .Values.image.repo }}:{{ .Values.image.tag | default .Chart.AppVersion }} 29 | imagePullPolicy: {{ .Values.image.pullPolicy | default "Always" }} 30 | args: 31 | - -c 32 | - /app/config/main.yaml 33 | ports: 34 | - name: metrics 35 | containerPort: {{ .Values.config.port }} 36 | livenessProbe: 37 | httpGet: 38 | path: /healthcheck 39 | port: {{ .Values.config.port }} 40 | initialDelaySeconds: 10 41 | timeoutSeconds: 200 42 | {{ if ne .Values.environment "ci" }} 43 | resources: 44 | {{- toYaml .Values.resources | nindent 10 }} 45 | {{ end }} 46 | volumeMounts: 47 | - name: config 48 | mountPath: /app/config 49 | - name: service-account 50 | mountPath: {{ dir .Values.config.bucketUpload.gcpServiceAccount }} 51 | readOnly: true 52 | {{ if ne .Values.environment "ci" }} 53 | - name: data-csv 54 | mountPath: {{ .Values.config.exportDir }} 55 | {{ if eq .Values.config.eraScanner.enabled true }} 56 | - name: data-scanner 57 | mountPath: {{ .Values.config.eraScanner.dataDir }} 58 | {{ end }} 59 | {{ end }} 60 | volumes: 61 | - name: config 62 | configMap: 63 | name: {{ include "app.name" . }} 64 | - name: service-account 65 | secret: 66 | secretName: {{ include "app.name" . }} 67 | items: 68 | - key: service_account_json 69 | path: {{ base .Values.config.bucketUpload.gcpServiceAccount }} 70 | {{ if ne .Values.environment "production" }} 71 | - name: data-csv 72 | emptyDir: {} 73 | {{ if eq .Values.config.eraScanner.enabled true }} 74 | - name: data-scanner 75 | emptyDir: {} 76 | {{ end }} 77 | {{ else }} 78 | volumeClaimTemplates: 79 | - metadata: 80 | name: data-csv 81 | spec: 82 | accessModes: [ "ReadWriteOnce" ] 83 | resources: 84 | requests: 85 | storage: 1Gi 86 | {{ if eq .Values.config.eraScanner.enabled true }} 87 | - metadata: 88 | name: data-scanner 89 | spec: 90 | accessModes: [ "ReadWriteOnce" ] 91 | resources: 92 | requests: 93 | storage: 1Gi 94 | {{ end }} 95 | {{ end }} 96 | 97 | 98 | {{ end }} -------------------------------------------------------------------------------- /charts/polkadot-watcher-csv-exporter/values.yaml: -------------------------------------------------------------------------------- 1 | environment: production 2 | 3 | image: 4 | repo: web3f/polkadot-watcher-csv-exporter 5 | #tag: latest 6 | 7 | config: 8 | endpoint: "wss://kusama-rpc.polkadot.io" 9 | port: 3000 10 | logLevel: info 11 | debug: 12 | enabled: false 13 | forceInitialWrite: false 14 | exportDir: "/app/data-csv" 15 | endSessionBlockDistance: 20 16 | apiChunkSize: 2000 17 | apiTimeoutMs: 180000 18 | sessionOnly: false 19 | bucketUpload: 20 | enabled: false 21 | gcpServiceAccount: '/app/service-account/credentials.json' 22 | gcpProject: '' 23 | gcpBucketName: '' 24 | cronjob: 25 | enabled: false 26 | timeout: true 27 | historic: 28 | enabled: false 29 | historySize: 5 30 | eraScanner: 31 | enabled: false 32 | dataDir: "/app/data-scanner" 33 | #startFromEra: 3309 34 | 35 | 36 | secret: 37 | gcpServiceAccountValue: | 38 | {"type":"service_account","project_id":"xxx","private_key_id":"xxx","private_key":"xxx"} 39 | 40 | cronjob: 41 | schedule: "* * * * *" 42 | tolerations: [] 43 | affinity: {} 44 | 45 | resources: 46 | requests: 47 | cpu: "600m" 48 | memory: "1Gi" 49 | limits: 50 | cpu: "2000m" 51 | memory: "4Gi" 52 | -------------------------------------------------------------------------------- /config/main.sample.complete.yaml: -------------------------------------------------------------------------------- 1 | endpoint: "wss://kusama-rpc.polkadot.io/" 2 | port: 3000 3 | logLevel: info 4 | debug: 5 | enabled: false 6 | forceInitialWrite: false 7 | exportDir: "./data-csv" 8 | endSessionBlockDistance: 20 9 | apiChunkSize: 3000 10 | apiTimeoutMs: 180000 11 | sessionOnly: false 12 | bucketUpload: 13 | enabled: false 14 | gcpServiceAccount: '~/Documents/serviceAccount.json' 15 | gcpProject: 'project-12345' 16 | gcpBucketName: 'my_bucket' 17 | cronjob: 18 | enabled: false 19 | historic: 20 | enabled: false 21 | historySize: 5 22 | eraScanner: 23 | enabled: false 24 | dataDir: "./data-scanner" 25 | #startFromEra: 3309 -------------------------------------------------------------------------------- /config/main.sample.eraScanner.yaml: -------------------------------------------------------------------------------- 1 | endpoint: "wss://kusama-rpc.polkadot.io/" 2 | port: 3000 3 | logLevel: info 4 | exportDir: "./data-csv" 5 | eraScanner: 6 | enabled: true 7 | dataDir: "./data-scanner" 8 | #startFromEra: 3309 -------------------------------------------------------------------------------- /config/main.sample.historic.yaml: -------------------------------------------------------------------------------- 1 | endpoint: "wss://kusama-rpc.polkadot.io/" 2 | port: 3000 3 | logLevel: info 4 | exportDir: "./data-csv" 5 | historic: 6 | enabled: true 7 | historySize: 5 -------------------------------------------------------------------------------- /config/main.sample.live.yaml: -------------------------------------------------------------------------------- 1 | endpoint: "wss://kusama-rpc.polkadot.io/" 2 | port: 3000 3 | logLevel: info 4 | exportDir: "./data-csv" 5 | endSessionBlockDistance: 20 6 | apiChunkSize: 3000 7 | cronjob: 8 | enabled: false -------------------------------------------------------------------------------- /helmfile.d/100-polkadot-watcher-csv-exporter.yaml: -------------------------------------------------------------------------------- 1 | environments: 2 | ci: 3 | local: 4 | 5 | repositories: 6 | - name: w3f 7 | url: https://w3f.github.io/helm-charts/ 8 | 9 | releases: 10 | 11 | {{- if or (eq .Environment.Name "ci") (eq .Environment.Name "local") }} 12 | 13 | - name: session-exporter 14 | chart: ../charts/polkadot-watcher-csv-exporter 15 | values: 16 | - ./config/session-exporter-values.yaml.gotmpl 17 | 18 | - name: era-scanner 19 | chart: ../charts/polkadot-watcher-csv-exporter 20 | values: 21 | - ./config/era-scanner-values.yaml.gotmpl 22 | 23 | {{- end }} 24 | -------------------------------------------------------------------------------- /helmfile.d/config/era-scanner-values.yaml.gotmpl: -------------------------------------------------------------------------------- 1 | environment: {{ .Environment.Name }} 2 | 3 | {{ if eq .Environment.Name "ci" }} 4 | image: 5 | tag: {{ env "CIRCLE_SHA1" | default "kind" }} 6 | {{ else if eq .Environment.Name "local" }} 7 | image: 8 | repo: localhost:5000/polkadot-watcher-csv-exporter 9 | tag: latest 10 | {{ end }} 11 | 12 | config: 13 | debug: 14 | enabled: true 15 | eraScanner: 16 | enabled: true 17 | -------------------------------------------------------------------------------- /helmfile.d/config/session-exporter-values.yaml.gotmpl: -------------------------------------------------------------------------------- 1 | environment: {{ .Environment.Name }} 2 | 3 | {{ if eq .Environment.Name "ci" }} 4 | image: 5 | tag: {{ env "CIRCLE_SHA1" | default "kind" }} 6 | {{ else if eq .Environment.Name "local" }} 7 | image: 8 | repo: localhost:5000/polkadot-watcher-csv-exporter 9 | tag: latest 10 | {{ end }} 11 | 12 | config: 13 | debug: 14 | enabled: true 15 | forceInitialWrite: true 16 | cronjob: 17 | enabled: false 18 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "polkadot-watcher-csv-exporter", 3 | "version": "1.3.4", 4 | "description": "Monitor events on Polkadot networks and export a csv dataset", 5 | "repository": "git@github.com:w3f/polkadot-watcher-csv-exporter.git", 6 | "author": "W3F Infrastructure Team ", 7 | "license": "Apache-2.0", 8 | "main": "./dist/index.js", 9 | "types": "./dist/index.d.ts", 10 | "files": [ 11 | "dist/**/*" 12 | ], 13 | "scripts": { 14 | "lint": "yarn eslint . --ext .js,.jsx,.ts,.tsx", 15 | "build": "tsc --build tsconfig.json", 16 | "prepare": "yarn build", 17 | "pretest": "yarn lint", 18 | "start": "node --max-old-space-size=4096 ./dist/index.js start" 19 | }, 20 | "dependencies": { 21 | "@google-cloud/storage": "^5.20.5", 22 | "@polkadot/api": "^10.9.1", 23 | "@w3f/config": "^0.1.1", 24 | "@w3f/logger": "^0.4.2", 25 | "commander": "^4.0.0", 26 | "express": "^4.17.1", 27 | "fast-crc32c": "^2.0.0", 28 | "got": "^10.3.0", 29 | "prom-client": "^11.5.3", 30 | "ws": "^6.1.2" 31 | }, 32 | "devDependencies": { 33 | "@types/chai": "^4.3.1", 34 | "@types/express": "^4.17.13", 35 | "@types/fs-extra": "^8.1.2", 36 | "@types/lodash": "^4.14.182", 37 | "@types/mocha": "^9.1.1", 38 | "@types/node": "^14.18.21", 39 | "@types/tmp": "^0.2.3", 40 | "@typescript-eslint/eslint-plugin": "^2.34.0", 41 | "@typescript-eslint/parser": "^2.34.0", 42 | "chai": "^4.3.6", 43 | "eslint": "^7.32.0", 44 | "fs-extra": "^9.1.0", 45 | "lodash": "^4.17.21", 46 | "mocha": "^10.0.0", 47 | "nock": "^12.0.3", 48 | "sinon": "^8.1.1", 49 | "tmp": "^0.2.1", 50 | "ts-node": "^10.9.1", 51 | "typescript": "^4.6.4" 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /scripts/integration-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source /scripts/common.sh 4 | source /scripts/bootstrap-helm.sh 5 | 6 | 7 | run_tests() { 8 | echo Running tests... 9 | 10 | wait_pod_ready session-exporter 11 | wait_pod_ready era-scanner 12 | } 13 | 14 | teardown() { 15 | helm delete session-exporter 16 | helm delete era-scanner 17 | } 18 | 19 | main(){ 20 | if [ -z "$KEEP_W3F_POLKADOT_WATCHER" ]; then 21 | trap teardown EXIT 22 | fi 23 | 24 | /scripts/build-helmfile.sh 25 | 26 | run_tests 27 | } 28 | 29 | main 30 | -------------------------------------------------------------------------------- /scripts/start-kind-local-registry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -o errexit 3 | 4 | #here the guide: https://kind.sigs.k8s.io/docs/user/local-registry/ 5 | 6 | # create registry container unless it already exists 7 | reg_name='kind-registry' 8 | reg_port='5000' 9 | running="$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" 10 | if [ "${running}" != 'true' ]; then 11 | docker run \ 12 | -d --restart=always -p "${reg_port}:5000" --name "${reg_name}" \ 13 | registry:2 14 | fi 15 | 16 | # create a cluster with the local registry enabled in containerd 17 | cat < { 9 | 10 | let logLevel = cfg.logLevel 11 | if(cfg.debug?.enabled) logLevel = 'debug' 12 | 13 | return createLogger(logLevel); 14 | } 15 | 16 | export const startAction = async (cmd): Promise =>{ 17 | const cfg = new Config().parse(cmd.config); 18 | 19 | const server = express(); 20 | server.get('/healthcheck', 21 | async (req: express.Request, res: express.Response): Promise => { 22 | res.status(200).send('OK!') 23 | }) 24 | server.listen(cfg.port); 25 | 26 | const logger = _createLogger(cfg); 27 | const subscriber = new SubscriberFactory(cfg,logger).makeSubscriber() 28 | 29 | try { 30 | await subscriber.start(); 31 | } catch (e) { 32 | logger.error(`During subscriber run: ${JSON.stringify(e)}`); 33 | process.exit(-1); 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/constants.ts: -------------------------------------------------------------------------------- 1 | export const apiChunkSize = 3000 2 | export const apiTimeoutMs = 180000 //3 minutes 3 | export const dataFileName = "lastChecked.txt" -------------------------------------------------------------------------------- /src/csvWriter.ts: -------------------------------------------------------------------------------- 1 | /*eslint @typescript-eslint/no-use-before-define: ["error", { "variables": false }]*/ 2 | 3 | import { WriteStream } from 'fs'; 4 | import { initFile, closeFile } from './utils'; 5 | import { WriteCSVRequest, WriteValidatorCSVRequest, WriteNominatorCSVRequest, ChainData, WriteCSVHistoricalRequest, WriteValidatorHistoricCSVRequest } from "./types"; 6 | import { Logger } from '@w3f/logger'; 7 | 8 | export const writeSessionCSV = async (request: WriteCSVRequest, chainData: ChainData, logger: Logger): Promise =>{ 9 | await _writeNominatorSessionCSV({...request,...chainData} as WriteNominatorCSVRequest, logger) 10 | await _writeValidatorSessionCSV({...request,...chainData} as WriteValidatorCSVRequest, logger) 11 | } 12 | 13 | export const writeEraCSV = async (request: WriteCSVRequest, chainData: ChainData, logger: Logger): Promise =>{ 14 | await _writeValidatorEraCSV({...request,...chainData} as WriteValidatorCSVRequest, logger) 15 | } 16 | 17 | export const writeHistoricErasCSV = async (request: WriteCSVHistoricalRequest, chainData: ChainData[], logger: Logger): Promise =>{ 18 | await _writeValidatorHistoricEraCSV({...request, erasData: chainData} as WriteValidatorHistoricCSVRequest, logger) 19 | } 20 | 21 | const _writeValidatorSessionCSV = async (request: WriteValidatorCSVRequest, logger: Logger): Promise => { 22 | const { network, exportDir, sessionIndex } = request 23 | 24 | logger.info(`Writing validators CSV for session ${sessionIndex}`) 25 | 26 | const fileName = `${network}_validators_session_${sessionIndex}.csv` 27 | const file = initFile(exportDir, fileName, logger) 28 | 29 | _writeFileValidatorSession(file,request) 30 | 31 | await closeFile(file) 32 | 33 | logger.info(`Finished writing validators CSV for session ${sessionIndex}`) 34 | } 35 | 36 | const _writeValidatorHistoricEraCSV = async (request: WriteValidatorHistoricCSVRequest, logger: Logger): Promise => { 37 | const { network, exportDir, erasData } = request 38 | 39 | for (const eraData of erasData) { 40 | logger.info(`Writing validators CSV for era ${eraData.eraIndex}`) 41 | 42 | const fileName = `${network}_validators_era_${eraData.eraIndex}.csv` 43 | const file = initFile(exportDir, fileName, logger) 44 | 45 | const requestTranslation = { 46 | api: request.api, 47 | network: request.network, 48 | exportDir: request.exportDir, 49 | eraIndex: eraData.eraIndex, 50 | sessionIndex: eraData.sessionIndex, 51 | blockNumber: eraData.blockNumber, 52 | totalIssuance: eraData.totalIssuance, 53 | validatorRewardsPreviousEra: eraData.validatorRewardsPreviousEra, 54 | myValidatorStaking: eraData.myValidatorStaking, 55 | 56 | } as WriteValidatorCSVRequest 57 | 58 | _writeFileValidatorSession(file,requestTranslation) 59 | 60 | await closeFile(file) 61 | 62 | logger.info(`Finished writing validators CSV for era ${eraData.eraIndex}`) 63 | } 64 | } 65 | 66 | const _writeValidatorEraCSV = async (request: WriteValidatorCSVRequest, logger: Logger): Promise => { 67 | const { network, exportDir, eraIndex } = request 68 | 69 | logger.info(`Writing validators CSV for era ${eraIndex}`) 70 | 71 | const fileName = `${network}_validators_era_${eraIndex}.csv` 72 | const file = initFile(exportDir, fileName, logger) 73 | 74 | _writeFileValidatorSession(file,request) 75 | 76 | await closeFile(file) 77 | 78 | logger.info(`Finished writing validators CSV for era ${eraIndex}`) 79 | } 80 | 81 | const _writeFileNominatorSession = (file: WriteStream, request: WriteNominatorCSVRequest): void => { 82 | const { timestamp, eraIndex, sessionIndex, isEndEraBlock, blockNumber, nominatorStaking } = request 83 | file.write(`era,session,last_session,timestamp,block_number,stash_address,controller_address,bonded_amount,num_targets,targets\n`); 84 | for (const staking of nominatorStaking) { 85 | const numTargets = staking.nominators ? staking.nominators.length : 0; 86 | file.write(`${eraIndex},${sessionIndex},${isEndEraBlock ? 1 : 0},${timestamp},${blockNumber},${staking.accountId},${staking.controllerId},${staking.stakingLedger.total},${numTargets},"${staking.nominators.join(`,`)}"\n`); 87 | } 88 | } 89 | 90 | const _writeNominatorSessionCSV = async (request: WriteNominatorCSVRequest, logger: Logger): Promise =>{ 91 | const { network, exportDir, sessionIndex } = request 92 | 93 | logger.info(`Writing nominators CSV for session ${sessionIndex}`) 94 | 95 | const fileName = `${network}_nominators_session_${sessionIndex}.csv` 96 | const file = initFile(exportDir, fileName, logger) 97 | 98 | _writeFileNominatorSession(file,request) 99 | 100 | await closeFile(file) 101 | 102 | logger.info(`Finished writing nominators CSV for session ${sessionIndex}`) 103 | } 104 | 105 | const _writeFileValidatorSession = (file: WriteStream, request: WriteValidatorCSVRequest): void => { 106 | const { timestamp,eraIndex, sessionIndex, isEndEraBlock, blockNumber, myValidatorStaking, myWaitingValidatorStaking, totalIssuance, validatorRewardsPreviousEra } = request 107 | file.write(`era,session,last_session,timestamp,block_number,active,name,stash_address,controller_address,commission_percent,self_stake,total_stake,num_stakers,stakers,num_voters,voters,era_points,total_issuance,validator_rewards_previous_era\n`); 108 | for (const staking of myValidatorStaking) { 109 | file.write(`${eraIndex},${sessionIndex ? sessionIndex : -1},${isEndEraBlock ? 1 : 0},${timestamp},${blockNumber ? blockNumber : -1},${1},${staking.displayName},${staking.accountId},${staking.controllerId},${(parseInt(staking.validatorPrefs.commission.toString()) / 10000000).toFixed(2)},${staking.exposure.own},${staking.exposure.total},${staking.exposure.others.length},"${staking.exposure.others.map(staker=>staker.who+';'+staker.value).join(`,`)}",${staking.voters.length},"${staking.voters.map(staker=>staker.address+';'+staker.value).join(`,`)}",${staking.eraPoints},${totalIssuance},${validatorRewardsPreviousEra}\n`); 110 | } 111 | if(myWaitingValidatorStaking){ 112 | // total vs active: polkadojs is displaying total as the total and the own stake for the waiting set validators 113 | for (const staking of myWaitingValidatorStaking) { 114 | file.write(`${eraIndex},${sessionIndex ? sessionIndex : -1},${isEndEraBlock ? 1 : 0},${timestamp},${blockNumber ? blockNumber : -1},${0},${staking.displayName},${staking.accountId},${staking.controllerId},${(parseInt(staking.validatorPrefs.commission.toString()) / 10000000).toFixed(2)},${staking.stakingLedger.total},${staking.stakingLedger.total},${staking.exposure.others.length},"${staking.exposure.others.map(staker=>staker.who+';'+staker.value).join(`,`)}",${staking.voters.length},"${staking.voters.map(staker=>staker.address+';'+staker.value).join(`,`)}",${staking.eraPoints},${totalIssuance},${validatorRewardsPreviousEra}\n`); 115 | } 116 | } 117 | } -------------------------------------------------------------------------------- /src/dataGatherer.ts: -------------------------------------------------------------------------------- 1 | /*eslint @typescript-eslint/no-use-before-define: ["error", { "variables": false }]*/ 2 | 3 | import { DeriveStakingAccount, DeriveEraExposure } from '@polkadot/api-derive/staking/types'; 4 | import { MyDeriveStakingAccount, WriteCSVRequest, ChainData, Voter, VotersMap } from "./types"; 5 | import { Logger } from '@w3f/logger'; 6 | import { ApiPromise } from '@polkadot/api'; 7 | import { EraRewardPoints } from '@polkadot/types/interfaces'; 8 | import { delay, getDisplayName, getErrorMessage } from './utils'; 9 | import BN from 'bn.js'; 10 | 11 | export const gatherChainData = async (request: WriteCSVRequest, logger: Logger): Promise =>{ 12 | 13 | logger.info(`Data gathering triggered...`) 14 | const data = await _handleConnectionRetries(_gatherData,request,logger) 15 | logger.info(`Data have been gathered.`) 16 | return data 17 | } 18 | 19 | /* eslint-disable @typescript-eslint/no-explicit-any */ 20 | const _handleConnectionRetries = async (f: { (request: WriteCSVRequest, logger: Logger): Promise }, request: WriteCSVRequest, logger: Logger): Promise => { 21 | let attempts = 0 22 | for(;;){ 23 | try { 24 | const data = await f(request,logger) 25 | return data 26 | } catch (error) { 27 | logger.error(`Could not process the Data gathering...`); 28 | const errorMessage = getErrorMessage(error) 29 | logger.error(errorMessage) 30 | if( 31 | !errorMessage.includes("Unable to decode using the supplied passphrase") && //there is no way to recover from this 32 | ++attempts < 5 33 | ){ 34 | logger.warn(`Retrying...`) 35 | await delay(5000) //wait x seconds before retrying 36 | } 37 | else{ 38 | process.exit(-1); 39 | } 40 | } 41 | } 42 | } 43 | /* eslint-enable @typescript-eslint/no-explicit-any */ 44 | 45 | const _gatherData = async (request: WriteCSVRequest, logger: Logger): Promise =>{ 46 | console.time('_gatherData'); 47 | logger.debug(`gathering some data from the chain...`) 48 | const {api,apiChunkSize,eraIndex} = request 49 | const eraPointsPromise = api.query.staking.erasRewardPoints(eraIndex); 50 | const eraExposures = await api.derive.staking.eraExposure(eraIndex) 51 | const totalIssuance = await api.query.balances.totalIssuance() 52 | const validatorRewardsPreviousEra = (await api.query.staking.erasValidatorReward(eraIndex.sub(new BN(1)))).unwrap(); 53 | 54 | console.time('get nominators'); 55 | logger.debug(`nominators...`); 56 | const nominatorStakingPromise = _getNominatorStaking(api,apiChunkSize,logger) 57 | const [nominatorStaking,eraPoints] = [await nominatorStakingPromise, await eraPointsPromise] 58 | console.timeEnd('get nominators') 59 | 60 | console.time('build voters map'); 61 | logger.debug(`voters map...`); 62 | const votersMap = _buildVotersMap(nominatorStaking) 63 | console.timeEnd('build voters map') 64 | 65 | console.time('get validators') 66 | logger.debug(`validators...`); 67 | const myValidatorStaking = await _getMyValidatorStaking(api,apiChunkSize,votersMap,eraPoints, eraExposures, logger) 68 | console.timeEnd('get validators') 69 | 70 | console.time('get waiting validators') 71 | logger.debug(`waiting validators...`); 72 | const myWaitingValidatorStaking = await _getMyWaitingValidatorStaking(api,apiChunkSize,votersMap,eraPoints, eraExposures, logger) 73 | console.timeEnd('get waiting validators') 74 | 75 | console.timeEnd('_gatherData') 76 | return { 77 | eraPoints, 78 | totalIssuance, 79 | validatorRewardsPreviousEra, 80 | nominatorStaking, 81 | myValidatorStaking, 82 | myWaitingValidatorStaking 83 | } as ChainData 84 | } 85 | 86 | const _getNominatorStaking = async (api: ApiPromise, apiChunkSize: number, logger: Logger): Promise =>{ 87 | 88 | logger.debug(`getting the nominator entries...`) 89 | const nominators = await api.query.staking.nominators.entries(); 90 | logger.debug(`got ${nominators.length} entries !!`) 91 | const nominatorAddresses = nominators.map(([address]) => ""+address.toHuman()[0]); 92 | 93 | logger.debug(`the nominator addresses size is ${nominatorAddresses.length}`) 94 | 95 | //A too big nominators set could make crush the API => Chunk splitting 96 | const size = apiChunkSize 97 | const nominatorAddressesChucked = [] 98 | for (let i = 0; i < nominatorAddresses.length; i += size) { 99 | const chunk = nominatorAddresses.slice(i, i + size) 100 | nominatorAddressesChucked.push(chunk) 101 | } 102 | 103 | const nominatorsStakings: DeriveStakingAccount[] = [] 104 | for (const chunk of nominatorAddressesChucked) { 105 | logger.debug(`the handled chunk size is ${chunk.length}`) 106 | nominatorsStakings.push(...await api.derive.staking.accounts(chunk)) 107 | } 108 | 109 | return nominatorsStakings 110 | } 111 | 112 | const _getMyValidatorStaking = async (api: ApiPromise, apiChunkSize: number, voters: VotersMap, eraPoints: EraRewardPoints, eraExposures: DeriveEraExposure, logger: Logger): Promise =>{ 113 | const validatorsAddresses = await api.query.session.validators(); 114 | logger.debug(`the validator addresses size is ${validatorsAddresses.length}`) 115 | 116 | //A too big nominators set could make crush the API => Chunk splitting 117 | const size = apiChunkSize 118 | const validatorsAddressesChucked = [] 119 | for (let i = 0; i < validatorsAddresses.length; i += size) { 120 | const chunk = validatorsAddresses.slice(i, i + size) 121 | validatorsAddressesChucked.push(chunk) 122 | } 123 | 124 | const validatorsStakings: DeriveStakingAccount[] = [] 125 | for (const chunk of validatorsAddressesChucked) { 126 | logger.debug(`the handled chunk size is ${chunk.length}`) 127 | validatorsStakings.push(...await api.derive.staking.accounts(chunk)) 128 | } 129 | 130 | return await _buildMyValidatorStaking(api,validatorsStakings,voters,eraPoints,eraExposures) 131 | } 132 | 133 | const _getMyWaitingValidatorStaking = async (api: ApiPromise, apiChunkSize: number, voters: VotersMap, eraPoints: EraRewardPoints, eraExposures: DeriveEraExposure, logger: Logger): Promise => { 134 | const validatorsAddresses = await _getWaitingValidatorsAccountId(api) 135 | logger.debug(`the waiting validator addresses size is ${validatorsAddresses.length}`) 136 | 137 | //A too big nominators set could make crush the API => Chunk splitting 138 | const size = apiChunkSize 139 | const validatorsAddressesChucked = [] 140 | for (let i = 0; i < validatorsAddresses.length; i += size) { 141 | const chunk = validatorsAddresses.slice(i, i + size) 142 | validatorsAddressesChucked.push(chunk) 143 | } 144 | 145 | const validatorsStakings: DeriveStakingAccount[] = [] 146 | for (const chunk of validatorsAddressesChucked) { 147 | logger.debug(`the handled chunk size is ${chunk.length}`) 148 | validatorsStakings.push(...await api.derive.staking.accounts(chunk)) 149 | } 150 | 151 | return await _buildMyValidatorStaking(api,validatorsStakings,voters,eraPoints,eraExposures) 152 | } 153 | 154 | const _buildMyValidatorStaking = async (api: ApiPromise, validatorsStakings: DeriveStakingAccount[], votersMap: VotersMap, eraPoints: EraRewardPoints, eraExposures: DeriveEraExposure): Promise =>{ 155 | const myValidatorStaking = Promise.all ( validatorsStakings.map( async validatorStaking => { 156 | 157 | const validatorAddress = validatorStaking.accountId 158 | const infoPromise = api.derive.accounts.info(validatorAddress); 159 | 160 | const validatorEraPoints = eraPoints.toJSON()['individual'][validatorAddress.toHuman()] ? eraPoints.toJSON()['individual'][validatorAddress.toHuman()] : 0 161 | 162 | const exposure = eraExposures.validators[validatorAddress.toHuman()] ? eraExposures.validators[validatorAddress.toHuman()] : {total:0,own:0,others:[]} 163 | 164 | const voters: Voter[] = votersMap.has(validatorAddress.toHuman()) ? votersMap.get(validatorAddress.toHuman()) : [] 165 | 166 | const {identity} = await infoPromise 167 | return { 168 | ...validatorStaking, 169 | displayName: getDisplayName(identity), 170 | voters: voters, 171 | exposure: exposure, 172 | eraPoints: validatorEraPoints, 173 | } as MyDeriveStakingAccount 174 | 175 | })) 176 | return myValidatorStaking 177 | } 178 | 179 | const _getWaitingValidatorsAccountId = async (api: ApiPromise): Promise => { 180 | const skStashes = await api.query.staking.validators.keys() 181 | const stashes = skStashes.map(sk => sk.args[0].toString()) 182 | const active = (await api.query.session.validators()).map(a => a.toString()); 183 | const waiting = stashes.filter((s) => !active.includes(s)); 184 | return waiting.map(account => account.toString()) 185 | } 186 | 187 | const _buildVotersMap = (nominatorsStakings: DeriveStakingAccount[]): VotersMap => { 188 | 189 | const voters: VotersMap = new Map() 190 | nominatorsStakings.forEach( nominator => { 191 | nominator.nominators.forEach ( nominated => { 192 | const key = nominated.toHuman() 193 | const value = { 194 | address: nominator.accountId.toHuman(), 195 | value: nominator.stakingLedger.total 196 | } 197 | if (voters.has(key)){ 198 | voters.get(key).push(value) 199 | } 200 | else{ 201 | voters.set(key,[value]) 202 | } 203 | }) 204 | }) 205 | 206 | return voters 207 | } -------------------------------------------------------------------------------- /src/dataGathererHistoric.ts: -------------------------------------------------------------------------------- 1 | /*eslint @typescript-eslint/no-use-before-define: ["error", { "variables": false }]*/ 2 | 3 | import { DeriveEraPoints } from '@polkadot/api-derive/staking/types'; 4 | import { MyDeriveStakingAccount, ChainData, WriteCSVHistoricalRequest, EraLastBlock, Voter } from "./types"; 5 | import { Logger } from '@w3f/logger'; 6 | import { ApiPromise } from '@polkadot/api'; 7 | import { getDisplayName, erasLastBlock as erasLastBlockFunction } from './utils'; 8 | import { DeriveEraExposure, DeriveStakingAccount } from '@polkadot/api-derive/staking/types' 9 | import { DeriveAccountInfo } from '@polkadot/api-derive/accounts/types' 10 | import BN from 'bn.js'; 11 | import type { StakingLedger, Nominations } from '@polkadot/types/interfaces'; 12 | import type { PalletStakingNominations, PalletStakingStakingLedger, PalletStakingExposure } from '@polkadot/types/lookup'; 13 | 14 | export const gatherChainDataHistorical = async (request: WriteCSVHistoricalRequest, logger: Logger): Promise =>{ 15 | logger.info(`Historical Data gathering triggered...`) 16 | const data = await _gatherDataHistorical(request, logger) 17 | logger.info(`Historical Data have been gathered.`) 18 | return data 19 | } 20 | 21 | const _gatherDataHistorical = async (request: WriteCSVHistoricalRequest, logger: Logger): Promise =>{ 22 | logger.debug(`gathering some data from the chain...`) 23 | const {api,eraIndexes} = request 24 | 25 | logger.info(`Requested eras: ${eraIndexes.map(era => era.toString()).join(', ')}`); 26 | logger.debug(`Gathering data ...`); 27 | 28 | const [ 29 | erasPoints, 30 | erasExposures, 31 | erasLastBlock 32 | ] = await Promise.all([ 33 | api.derive.staking._erasPoints(eraIndexes,false), 34 | api.derive.staking._erasExposure(eraIndexes,false), 35 | erasLastBlockFunction(eraIndexes,api) 36 | ]); 37 | 38 | const chainDataEras = Promise.all( eraIndexes.map( async index => { 39 | 40 | const eraBlockReference = erasLastBlock.find(({ era }) => era.eq(index)) 41 | const hashReference = await api.rpc.chain.getBlockHash(eraBlockReference.block) 42 | const apiAt = await api.at(hashReference) 43 | const sessionIndex = await apiAt.query.session.currentIndex() 44 | 45 | logger.debug(`nominators...`) 46 | const nominators = await _getNominatorStaking(api,eraBlockReference,logger) 47 | logger.debug(`got nominators...`) 48 | logger.debug(`valdiators...`) 49 | const myValidatorStaking = await _getEraHistoricValidatorStakingInfo( 50 | api, 51 | erasPoints.find(({ era }) => era.eq(index)), 52 | erasExposures.find(({ era }) => era.eq(index)), 53 | nominators, 54 | ); 55 | logger.debug(`got validators...`) 56 | 57 | return { 58 | eraIndex: index, 59 | sessionIndex: api.createType('SessionIndex',sessionIndex), 60 | blockNumber: api.createType('Compact', eraBlockReference.block), 61 | eraPoints: await api.query.staking.erasRewardPoints(index), 62 | totalIssuance: await apiAt.query.balances.totalIssuance(), 63 | validatorRewardsPreviousEra: (await api.query.staking.erasValidatorReward(index.sub(new BN(1)))).unwrap(), 64 | nominatorStaking: null, 65 | myValidatorStaking: myValidatorStaking 66 | } as ChainData 67 | })) 68 | 69 | return chainDataEras 70 | } 71 | 72 | interface MyNominator { 73 | address: string; 74 | nominations: Nominations; 75 | ledger: StakingLedger; 76 | } 77 | 78 | const _getNominatorStaking = async (api: ApiPromise, eraLastBlock: EraLastBlock, logger: Logger): Promise =>{ 79 | 80 | const lastBlockHash = await api.rpc.chain.getBlockHash(eraLastBlock.block) 81 | const apiAt = await api.at(lastBlockHash) 82 | logger.debug(`getting the nominator entries...`) 83 | const nominators = await apiAt.query.staking.nominators.entries() //this call requires a node connection with an high --ws-max-out-buffer-capacity 84 | logger.debug(`got ${nominators.length} nominator entries !!`) 85 | const stakingLedgers = await apiAt.query.staking.ledger.entries() //this call requires a node connection with an high --ws-max-out-buffer-capacity 86 | logger.debug(`got ${stakingLedgers.length} ledger entries !!`) 87 | 88 | const nominatorsMap = new Map(); 89 | for (const nominator of nominators) { 90 | const key = nominator[0].toHuman().toString() 91 | const value = nominator[1].unwrap() 92 | if(!nominatorsMap.has(key)){ 93 | nominatorsMap.set(key,[]) 94 | } 95 | else{ 96 | logger.debug("more attention needed, multiple nominators") 97 | } 98 | nominatorsMap.get(key).push(value) 99 | } 100 | 101 | const ledgersMap = new Map(); 102 | for (const ledger of stakingLedgers) { 103 | const key = ledger[0].toHuman().toString() 104 | const value = ledger[1].unwrap() 105 | if(!ledgersMap.has(key)){ 106 | ledgersMap.set(key,[]) 107 | } 108 | else{ 109 | logger.debug("more attention needed, mutiple ledgers") 110 | } 111 | ledgersMap.get(key).push(value) 112 | } 113 | 114 | const nominatorsStakings: MyNominator[] = [] 115 | nominatorsMap.forEach((nominator,address)=>{ 116 | if(ledgersMap.has(address)){ 117 | nominatorsStakings.push({ 118 | "address": address, 119 | "nominations": nominator[0], 120 | "ledger": ledgersMap.get(address)[0] 121 | }) 122 | } 123 | }) 124 | 125 | return nominatorsStakings 126 | } 127 | 128 | const _getEraHistoricValidatorStakingInfo = async (api: ApiPromise, eraPoints: DeriveEraPoints, eraExposure: DeriveEraExposure, nominators: MyNominator[]): Promise => { 129 | const eraValidatorAddresses = Object.keys(eraExposure['validators']); 130 | 131 | console.time('validatorStakings') 132 | const validatorStakings = await api.derive.staking.accounts(eraValidatorAddresses) 133 | const validatorStakingsMap = new Map(); 134 | for (const vs of validatorStakings) { 135 | const key = vs.accountId.toHuman().toString() 136 | const value = vs 137 | validatorStakingsMap.set(key,value) 138 | } 139 | console.timeEnd('validatorStakings') 140 | 141 | console.time('infoMap') 142 | const infoMap = new Map(); 143 | for (const address of validatorStakingsMap.keys()) { 144 | // room for improvment 145 | const info = await api.derive.accounts.info(address); 146 | infoMap.set(address,info) 147 | } 148 | console.timeEnd('infoMap') 149 | 150 | console.time('votersMap') 151 | const votersMap = new Map(); 152 | // init validators with no nominations 153 | for (const address of validatorStakingsMap.keys()) { 154 | votersMap.set(address,[]) 155 | } 156 | for (const nominator of nominators) { 157 | // key: validatorAddress 158 | // value: array of {nominatorAddress,amount} 159 | 160 | for (const validatorAddress of nominator.nominations.targets) { 161 | const key = validatorAddress.toHuman().toString() 162 | const value = {address: nominator.address, value: nominator.ledger.total} 163 | 164 | if(!votersMap.has(key)){ 165 | votersMap.set(key,[]) 166 | } 167 | votersMap.get(key).push(value) 168 | } 169 | 170 | } 171 | console.timeEnd('votersMap') 172 | //votersMap.forEach((value,key)=>console.log(`valAddress: ${key} | voters: ${JSON.stringify(value)}`)) 173 | 174 | 175 | console.time('deriveStakingAccounts') 176 | const deriveStakingAccounts: MyDeriveStakingAccount[] = [] 177 | for (const address of validatorStakingsMap.keys()) { 178 | 179 | const validatorEraPoints = api.createType('RewardPoint', eraPoints.validators[address]); 180 | const exposure: PalletStakingExposure = api.createType('PalletStakingExposure', eraExposure.validators[address]); 181 | 182 | let displayName = "" 183 | if(infoMap.has(address)){ 184 | const {identity} = infoMap.get(address) 185 | displayName = getDisplayName(identity) 186 | } 187 | else{ 188 | console.log("no info map entry for "+address) 189 | } 190 | 191 | //console.log(`valAddress: ${address} | numVoters: ${votersMap.get(address).length} | voters: ${JSON.stringify(votersMap.get(address))}`) 192 | deriveStakingAccounts.push({ 193 | ...validatorStakingsMap.get(address), 194 | displayName: displayName, 195 | voters: votersMap.get(address), 196 | exposure: exposure, 197 | eraPoints: validatorEraPoints.toNumber(), 198 | }) 199 | } 200 | console.timeEnd('deriveStakingAccounts') 201 | return deriveStakingAccounts 202 | } 203 | -------------------------------------------------------------------------------- /src/fileUploader.ts: -------------------------------------------------------------------------------- 1 | import {Storage, Bucket, StorageOptions} from '@google-cloud/storage' 2 | import { BucketUploadConfig } from './types' 3 | import { Logger } from '@w3f/logger'; 4 | import { getFileNames, deleteFile } from './utils'; 5 | 6 | export class BucketGCP { 7 | private storageOptions: StorageOptions; 8 | private storage: Storage; 9 | private bucket: Bucket; 10 | 11 | constructor(bucketUploadConfig: BucketUploadConfig, private readonly logger: Logger) { 12 | this.storageOptions = { 13 | keyFilename: bucketUploadConfig.gcpServiceAccount, 14 | projectId: bucketUploadConfig.gcpProject 15 | } 16 | 17 | this.storage = new Storage(this.storageOptions); 18 | this.bucket = this.storage.bucket(bucketUploadConfig.gcpBucketName); 19 | } 20 | 21 | public uploadCSVFiles = async (sourceDir: string): Promise =>{ 22 | 23 | const fileNames = getFileNames(sourceDir, this.logger) 24 | for (const name of fileNames) { 25 | this.logger.debug(`processing ${name} upload... ( only if csv file )`) 26 | name.includes('.csv') && await this._handleUploadFileToBucket(sourceDir+'/'+name) 27 | } 28 | } 29 | 30 | private _handleUploadFileToBucket = async (filePath: string): Promise =>{ 31 | try { 32 | const response = await this.bucket.upload(filePath) 33 | this.logger.info('uploaded '+response[0].metadata.name+' to '+response[1].mediaLink) 34 | deleteFile(filePath, this.logger) 35 | } catch (error) { 36 | this.logger.error(`Unable to upload ${filePath} because: ` + error) 37 | } 38 | } 39 | 40 | } 41 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | import process from 'process'; 2 | import program from 'commander'; 3 | import { startAction } from './actions/start'; 4 | import '@polkadot/api-augment'; //https://github.com/polkadot-js/api/issues/4450 5 | 6 | 7 | program 8 | .command('start') 9 | .description('Starts the watcher.') 10 | .option('-c, --config [path]', 'Path to config file.', './config/main.yaml') 11 | .action(startAction); 12 | 13 | program.allowUnknownOption(false); 14 | 15 | program.parse(process.argv); 16 | -------------------------------------------------------------------------------- /src/subscriber/ISubscriber.ts: -------------------------------------------------------------------------------- 1 | /* eslint-disable @typescript-eslint/interface-name-prefix */ 2 | 3 | export interface ISubscriber{ 4 | start(): Promise; 5 | } 6 | -------------------------------------------------------------------------------- /src/subscriber/SubscriberFactory.ts: -------------------------------------------------------------------------------- 1 | import { Logger } from "@w3f/logger"; 2 | import { InputConfig } from "../types"; 3 | import { ISubscriber } from "./ISubscriber"; 4 | import { Subscriber } from "./subscriber"; 5 | import { SubscriberEraScanner } from "./subscriberEraScanner"; 6 | 7 | export class SubscriberFactory { 8 | constructor(private readonly cfg: InputConfig, private readonly logger: Logger){} 9 | makeSubscriber = (): ISubscriber => { 10 | 11 | if(this.cfg.eraScanner?.enabled ) 12 | return new SubscriberEraScanner(this.cfg,this.logger) 13 | else 14 | return new Subscriber(this.cfg,this.logger) 15 | } 16 | } -------------------------------------------------------------------------------- /src/subscriber/subscriber.ts: -------------------------------------------------------------------------------- 1 | import { BlockNumber, Header, SessionIndex, EraIndex } from '@polkadot/types/interfaces'; 2 | import { Compact } from '@polkadot/types/codec'; 3 | import { Logger } from '@w3f/logger'; 4 | import { gatherChainData } from '../dataGatherer' 5 | import { DeriveSessionProgress } from '@polkadot/api-derive/session/types' 6 | import { apiChunkSize } from '../constants' 7 | 8 | import { 9 | InputConfig, 10 | } from '../types'; 11 | import { writeEraCSV, writeHistoricErasCSV, writeSessionCSV } from '../csvWriter'; 12 | import { gatherChainDataHistorical } from '../dataGathererHistoric'; 13 | import { SubscriberTemplate } from './subscriberTemplate'; 14 | import { ISubscriber } from './ISubscriber'; 15 | 16 | export class Subscriber extends SubscriberTemplate implements ISubscriber { 17 | 18 | private config: InputConfig 19 | 20 | private timestamp: string; 21 | 22 | private apiChunkSize: number; 23 | 24 | private isInitialWriteForced: boolean; 25 | private isDebugEnabled: boolean; 26 | private isCronjobEnabled: boolean; 27 | 28 | private sessionIndex: SessionIndex; 29 | private eraIndex: EraIndex; 30 | private isCSVWriting: boolean; 31 | private isCSVUploadable: boolean; 32 | 33 | private progress_delta: number //20 = two minutes before the ending of the session/era 34 | 35 | private historySize: number 36 | private isHistoricEnabled: boolean 37 | 38 | constructor( 39 | cfg: InputConfig, 40 | protected readonly logger: Logger) { 41 | super(cfg,logger) 42 | this.config = cfg 43 | this.endpoint = cfg.endpoint; 44 | this.exportDir = cfg.exportDir; 45 | this.isDebugEnabled = cfg.debug?.enabled ? cfg.debug.enabled : false 46 | this.isInitialWriteForced = cfg.debug.forceInitialWrite 47 | this.isBucketEnabled = cfg.bucketUpload?.enabled ? cfg.bucketUpload.enabled : false; 48 | this.isCronjobEnabled = cfg.cronjob?.enabled ? cfg.cronjob?.enabled : false; 49 | this.progress_delta = cfg.endSessionBlockDistance 50 | this.apiChunkSize = cfg.apiChunkSize ? cfg.apiChunkSize : apiChunkSize 51 | this.historySize = cfg.historic?.historySize ? cfg.historic.historySize : 5 //default 52 | this.isHistoricEnabled = cfg.historic?.enabled ? cfg.historic.enabled : false 53 | if(this.isBucketEnabled) this._initBucket(cfg.bucketUpload); 54 | } 55 | 56 | public start = async (): Promise => { 57 | 58 | await this._initAPI(); 59 | await this._initInstanceVariables(); 60 | this._initExportDir(); 61 | 62 | this.isDebugEnabled && await this._triggerDebugActions() 63 | 64 | this.isHistoricEnabled && await this._triggerHistoricActions() 65 | 66 | await this._handleNewHeadSubscriptions(); 67 | } 68 | 69 | private _initInstanceVariables = async (): Promise =>{ 70 | this.timestamp = (await this.api.query.timestamp.now()).toString() 71 | this.sessionIndex = await this.api.query.session.currentIndex(); 72 | this.eraIndex = (await this.api.query.staking.activeEra()).unwrap().index; 73 | this._setCSVUploadable(false) 74 | this._unlockCSVWwrite() 75 | } 76 | 77 | private _triggerDebugActions = async (): Promise => { 78 | this.logger.info('debug mode active') 79 | this.isInitialWriteForced && await this._triggerDebugCSVWrite(); 80 | } 81 | 82 | private _triggerDebugCSVWrite = async (): Promise =>{ 83 | await this._writeEraCSV(this.eraIndex,this.sessionIndex,(await this.api.rpc.chain.getHeader()).number) 84 | this._setCSVUploadable(true) 85 | } 86 | 87 | private _handleNewHeadSubscriptions = async (): Promise =>{ 88 | 89 | this.api.rpc.chain.subscribeNewHeads(async (header) => { 90 | 91 | 92 | await this._writeCSVHandler(header) 93 | 94 | await this._uploadCSVHandler() 95 | 96 | }) 97 | } 98 | 99 | private _triggerHistoricActions = async (): Promise => { 100 | this.logger.info('Historic mode active') 101 | 102 | this.logger.info(`starting the CSV writing for the last ${this.historySize} eras`) 103 | 104 | this._lockCSVWrite() 105 | await this._writeEraCSVHistorical() 106 | this._setCSVUploadable(true) 107 | } 108 | 109 | private _uploadCSVHandler = async (): Promise => { 110 | if(!this.isCSVUploadable) return 111 | this._setCSVUploadable(false) 112 | 113 | await this._uploadToBucket() 114 | this.isCronjobEnabled && await this._handleCronJob() 115 | this.isHistoricEnabled && await this._handleHistoricJob() 116 | } 117 | 118 | private _handleCronJob = async(): Promise =>{ 119 | this.logger.info(`cronjob successfully ending...`) 120 | process.exit() 121 | } 122 | 123 | private _handleHistoricJob = async(): Promise =>{ 124 | this.logger.info(`historic era gathering successfully ending...`) 125 | process.exit() 126 | } 127 | 128 | private _writeCSVHandler = async (header: Header): Promise =>{ 129 | if(this._isCSVWriteLocked()) return 130 | 131 | const deriveSessionProgress = await this.api.derive.session.progress(); 132 | 133 | const isEndEraBlock = await this._isEndEraBlock(deriveSessionProgress) 134 | if (isEndEraBlock) { 135 | this.logger.info(`starting the CSV writing for the session ${deriveSessionProgress.currentIndex}, ending of the era ${deriveSessionProgress.activeEra}`) 136 | 137 | this._lockCSVWrite() 138 | this.config.sessionOnly ? 139 | await this._writeSessionCSV(deriveSessionProgress.activeEra, deriveSessionProgress.currentIndex, header.number, isEndEraBlock) : 140 | await this._writeEraCSV(deriveSessionProgress.activeEra, deriveSessionProgress.currentIndex, header.number) 141 | this._setCSVUploadable(true) 142 | } 143 | 144 | else if (await this._isEndSessionBlock(deriveSessionProgress)) { 145 | 146 | this.logger.info(`starting the CSV writing for the session ${deriveSessionProgress.currentIndex}`) 147 | 148 | this._lockCSVWrite() 149 | await this._writeSessionCSV(deriveSessionProgress.activeEra, deriveSessionProgress.currentIndex, header.number); 150 | this._setCSVUploadable(true) 151 | } 152 | } 153 | 154 | private _writeEraCSV = async (eraIndex: EraIndex, sessionIndex: SessionIndex, blockNumber: Compact): Promise => { 155 | const network = this.chain.toString().toLowerCase() 156 | const request = {timestamp:this.timestamp,api:this.api,network,apiChunkSize:this.apiChunkSize,exportDir:this.exportDir,eraIndex,sessionIndex,blockNumber} 157 | const chainData = await gatherChainData(request, this.logger) 158 | await writeSessionCSV(request, chainData, this.logger) 159 | await writeEraCSV(request, chainData, this.logger) 160 | } 161 | 162 | private _writeEraCSVHistorical = async (): Promise => { 163 | const network = this.chain.toString().toLowerCase() 164 | 165 | const erasHistoric = await this.api.derive.staking.erasHistoric(false); 166 | const eraIndexes = erasHistoric.slice( 167 | Math.max(erasHistoric.length - this.historySize, 0) 168 | ) 169 | this.logger.info(`Requested Historical data for eras: ${eraIndexes.map(era => era.toString()).join(', ')}`); 170 | 171 | //A to big number of era indexes could make crush the API => Chunk splitting 172 | const size = 10 173 | const eraIndexesChucked: EraIndex[][] = [] 174 | for (let i = 0; i < eraIndexes.length; i += size) { 175 | const chunk = eraIndexes.slice(i, i + size) 176 | eraIndexesChucked.push(chunk) 177 | } 178 | 179 | for (const chunk of eraIndexesChucked) { 180 | this.logger.debug(`the handled chunk size is ${chunk.length}`) 181 | const request = {timestamp:this.timestamp,api:this.api,network,exportDir:this.exportDir,eraIndexes:chunk} 182 | const chainData = await gatherChainDataHistorical(request, this.logger) 183 | await writeHistoricErasCSV(request, chainData, this.logger) 184 | } 185 | 186 | } 187 | 188 | private _writeSessionCSV = async (eraIndex: EraIndex, sessionIndex: SessionIndex, blockNumber: Compact, isEndEraBlock = false): Promise => { 189 | const network = this.chain.toString().toLowerCase() 190 | const request = {timestamp:this.timestamp,api:this.api,network,apiChunkSize:this.apiChunkSize,exportDir:this.exportDir,eraIndex,sessionIndex,blockNumber} 191 | const chainData = await gatherChainData(request, this.logger) 192 | chainData.isEndEraBlock = isEndEraBlock 193 | await writeSessionCSV(request, chainData, this.logger) 194 | } 195 | 196 | private _isEndEraBlock = async (deriveSessionProgress: DeriveSessionProgress): Promise =>{ 197 | 198 | if (await this._isEraOutOfSync(deriveSessionProgress)) return false 199 | 200 | return deriveSessionProgress.eraLength.toNumber() - deriveSessionProgress.eraProgress.toNumber() < this.progress_delta 201 | } 202 | 203 | private _isEraOutOfSync = async (deriveSessionProgress: DeriveSessionProgress): Promise =>{ 204 | if (deriveSessionProgress.activeEra > this.eraIndex){ 205 | await this._handleEraChange(deriveSessionProgress.activeEra, deriveSessionProgress.currentIndex) 206 | return true 207 | } 208 | return false 209 | } 210 | 211 | private _handleEraChange = async (newEra: EraIndex, newSession: SessionIndex): Promise =>{ 212 | this.eraIndex = newEra 213 | await this._handleSessionChange(newSession) 214 | } 215 | 216 | private _isEndSessionBlock = async (deriveSessionProgress: DeriveSessionProgress): Promise =>{ 217 | 218 | if(await this._isSessionOutOfSync(deriveSessionProgress)) return false 219 | 220 | //it starts to write from the last few blocks of the session, just to be sure to not loose any session data being the deriveSessionProgress.sessionProgress not fully reliable. 221 | //Unfortunatly it not always reach the very last block and it may jumps directly to the next session. 222 | return deriveSessionProgress.sessionLength.toNumber() - deriveSessionProgress.sessionProgress.toNumber() < this.progress_delta 223 | } 224 | 225 | private _isSessionOutOfSync = async (deriveSessionProgress: DeriveSessionProgress): Promise =>{ 226 | if(deriveSessionProgress.currentIndex > this.sessionIndex) { 227 | await this._handleSessionChange(deriveSessionProgress.currentIndex) 228 | return true 229 | } 230 | return false 231 | } 232 | 233 | private _handleSessionChange = async (newSession: SessionIndex): Promise =>{ 234 | this.sessionIndex = newSession 235 | this._unlockCSVWwrite() 236 | } 237 | 238 | private _lockCSVWrite = (): void =>{ 239 | this.isCSVWriting = true 240 | } 241 | 242 | private _unlockCSVWwrite = (): void =>{ 243 | this.isCSVWriting = false 244 | } 245 | 246 | private _isCSVWriteLocked = (): boolean =>{ 247 | return this.isCSVWriting 248 | } 249 | 250 | private _setCSVUploadable = (status: boolean): void =>{ 251 | this.isCSVUploadable = status 252 | } 253 | 254 | } 255 | -------------------------------------------------------------------------------- /src/subscriber/subscriberEraScanner.ts: -------------------------------------------------------------------------------- 1 | import { EraIndex } from '@polkadot/types/interfaces'; 2 | import { Logger } from '@w3f/logger'; 3 | import { dataFileName } from '../constants' 4 | import readline from 'readline'; 5 | import { 6 | InputConfig, 7 | } from '../types'; 8 | import { closeFile, getFileNames, initReadFileStream, initWriteFileStream, isDirEmpty, isDirExistent, isNewEraEvent, makeDir } from '../utils'; 9 | import { writeHistoricErasCSV } from '../csvWriter'; 10 | import { gatherChainDataHistorical } from '../dataGathererHistoric'; 11 | import { ISubscriber } from './ISubscriber'; 12 | import { SubscriberTemplate } from './subscriberTemplate'; 13 | 14 | export class SubscriberEraScanner extends SubscriberTemplate implements ISubscriber { 15 | private config: InputConfig; 16 | 17 | private eraIndex: EraIndex; 18 | 19 | private dataDir: string 20 | private dataFileName = dataFileName 21 | 22 | private isScanOngoing = false //lock for concurrency 23 | private isNewScanRequired = false 24 | 25 | constructor( 26 | cfg: InputConfig, 27 | protected readonly logger: Logger) { 28 | super(cfg,logger) 29 | this.config=cfg 30 | this.dataDir = cfg.eraScanner?.dataDir 31 | } 32 | 33 | public start = async (): Promise => { 34 | 35 | this.logger.info('Era Scanner mode active') 36 | 37 | await this._initAPI(); 38 | await this._initInstanceVariables(); 39 | this._initExportDir(); 40 | await this._initDataDir() 41 | 42 | await this._handleEventsSubscriptions() // scan immediately after a event detection 43 | this.logger.info(`Event Scanner Based Module subscribed...`) 44 | 45 | this._requestNewScan() //first scan after a restart 46 | } 47 | 48 | private _initDataDir = async (): Promise =>{ 49 | if ( ! isDirExistent(this.dataDir) ) { 50 | makeDir(this.dataDir) 51 | } 52 | 53 | if( isDirEmpty(this.dataDir) || !getFileNames(this.dataDir,this.logger).includes(this.dataFileName) || ! await this._getLastCheckedEra()){ 54 | const firstEraToScan = this.config.eraScanner?.startFromEra ? this.config.eraScanner?.startFromEra : this.eraIndex.toNumber()-2 // from config or current era -2 55 | const file = initWriteFileStream(this.dataDir,this.dataFileName,this.logger) 56 | file.write(`${firstEraToScan}`) 57 | await closeFile(file) 58 | } 59 | } 60 | 61 | private _initInstanceVariables = async (): Promise =>{ 62 | this.eraIndex = (await this.api.query.staking.activeEra()).unwrap().index; 63 | this.logger.info(`Current Era: ${this.eraIndex}`) 64 | } 65 | 66 | private _handleEventsSubscriptions = async (): Promise => { 67 | this.api.query.system.events((events) => { 68 | events.forEach(async (record) => { 69 | const { event } = record; 70 | if(isNewEraEvent(event,this.api)){ 71 | const era = (await this.api.query.staking.activeEra()).unwrap().index 72 | if(era != this.eraIndex) this._handleEraChange(era) 73 | } 74 | }) 75 | }) 76 | } 77 | 78 | private _requestNewScan = async (): Promise => { 79 | if(this.isScanOngoing){ 80 | /* 81 | A new scan can be trigger asynchronously for various reasons (see the subscribe function above). 82 | To ensure an exactly once detection and delivery, only one scan is allowed at time. 83 | */ 84 | this.isNewScanRequired = true 85 | this.logger.info(`new scan queued...`) 86 | } 87 | else{ 88 | try { 89 | do { 90 | this.isScanOngoing = true 91 | this.isNewScanRequired = false 92 | await this._triggerEraScannerActions() 93 | /* 94 | An additional scan will be processed immediately if queued by any of the triggers. 95 | */ 96 | } while (this.isNewScanRequired); 97 | } catch (error) { 98 | this.logger.error(`the SCAN had an issue ! last checked era: ${await this._getLastCheckedEra()}: ${error}`) 99 | this.logger.warn('quitting...') 100 | process.exit(-1); 101 | } finally { 102 | this.isScanOngoing = false 103 | } 104 | } 105 | } 106 | 107 | private _triggerEraScannerActions = async (): Promise => { 108 | while(await this._getLastCheckedEra() => { 118 | const network = this.chain.toString().toLowerCase() 119 | const eraIndex = this.api.createType("EraIndex",era) 120 | const timestamp = (await this.api.query.timestamp.now()).toString() 121 | const request = {timestamp,api:this.api,network,exportDir:this.exportDir,eraIndexes:[eraIndex]} 122 | const chainData = await gatherChainDataHistorical(request, this.logger) 123 | await writeHistoricErasCSV(request, chainData, this.logger) 124 | } 125 | 126 | private _handleEraChange = async (newEra: EraIndex): Promise =>{ 127 | this.eraIndex = newEra 128 | this._requestNewScan() 129 | } 130 | 131 | private _getLastCheckedEra = async (): Promise => { 132 | const file = initReadFileStream(this.dataDir,this.dataFileName,this.logger) 133 | const rl = readline.createInterface({ 134 | input: file, 135 | crlfDelay: Infinity 136 | }); 137 | 138 | let lastCheckedEra: number 139 | for await (const line of rl) { 140 | // Each line in input.txt will be successively available here as `line`. 141 | //console.log(`Line from file: ${line}`); 142 | lastCheckedEra = Number.parseInt(line) 143 | } 144 | await closeFile(file) 145 | 146 | return lastCheckedEra 147 | } 148 | 149 | private _updateLastCheckedEra = async (eraIndex: number): Promise => { 150 | const file = initWriteFileStream(this.dataDir,this.dataFileName,this.logger) 151 | const result = file.write(eraIndex.toString()) 152 | await closeFile(file) 153 | return result 154 | } 155 | 156 | } 157 | -------------------------------------------------------------------------------- /src/subscriber/subscriberTemplate.ts: -------------------------------------------------------------------------------- 1 | import { ApiPromise, WsProvider } from '@polkadot/api'; 2 | import { Logger } from '@w3f/logger'; 3 | import { Text } from '@polkadot/types/primitive'; 4 | import { BucketGCP } from '../fileUploader' 5 | import { 6 | InputConfig, BucketUploadConfig, 7 | } from '../types'; 8 | import { isDirEmpty, isDirExistent, makeDir } from '../utils'; 9 | import { apiTimeoutMs } from '../constants'; 10 | 11 | export abstract class SubscriberTemplate { 12 | protected chain: Text; 13 | protected api: ApiPromise; 14 | protected apiTimeoutMs: number; 15 | protected endpoint: string; 16 | 17 | protected exportDir: string; 18 | protected isBucketEnabled: boolean; 19 | protected bucket: BucketGCP; 20 | 21 | constructor( 22 | cfg: InputConfig, 23 | protected readonly logger: Logger) { 24 | this.endpoint = cfg.endpoint; 25 | this.apiTimeoutMs = cfg.apiTimeoutMs ? cfg.apiTimeoutMs : apiTimeoutMs 26 | this.exportDir = cfg.exportDir; 27 | this.isBucketEnabled = cfg.bucketUpload?.enabled ? cfg.bucketUpload.enabled : false; 28 | if(this.isBucketEnabled) this._initBucket(cfg.bucketUpload); 29 | } 30 | 31 | protected _initBucket = (config: BucketUploadConfig): void =>{ 32 | this.bucket = new BucketGCP(config,this.logger) 33 | } 34 | 35 | protected _initAPI = async (): Promise =>{ 36 | 37 | const endpoints = [this.endpoint] //one could define more than one endpoint 38 | const provider = new WsProvider(endpoints,undefined,undefined,this.apiTimeoutMs); 39 | this.api = await ApiPromise.create({provider,throwOnConnect:true,throwOnUnknown:true}) 40 | this.api.on('error', (error) => {this.logger.warn("The API has an error"); console.log(error)}) 41 | 42 | this.chain = await this.api.rpc.system.chain(); 43 | const [nodeName, nodeVersion] = await Promise.all([ 44 | this.api.rpc.system.name(), 45 | this.api.rpc.system.version() 46 | ]); 47 | this.logger.info( 48 | `You are connected to chain ${this.chain} using ${nodeName} v${nodeVersion}` 49 | ); 50 | } 51 | 52 | protected _initExportDir = (): void =>{ 53 | if ( ! isDirExistent(this.exportDir) ) { 54 | makeDir(this.exportDir) 55 | } 56 | 57 | if( ! isDirEmpty(this.exportDir)){ 58 | this._uploadToBucket() 59 | } 60 | } 61 | 62 | protected _uploadToBucket = async (): Promise =>{ 63 | this.isBucketEnabled && await this.bucket.uploadCSVFiles(this.exportDir) 64 | } 65 | 66 | } 67 | -------------------------------------------------------------------------------- /src/types.ts: -------------------------------------------------------------------------------- 1 | import { ApiPromise } from '@polkadot/api'; 2 | import { EraIndex, SessionIndex, BlockNumber, EraRewardPoints, Balance, BalanceOf } from '@polkadot/types/interfaces'; 3 | import { Compact } from '@polkadot/types'; 4 | import { DeriveStakingAccount } from '@polkadot/api-derive/staking/types'; 5 | import type { PalletStakingExposure } from '@polkadot/types/lookup'; 6 | 7 | export interface InputConfig { 8 | logLevel: string; 9 | debug?: DebugConfig; 10 | port: number; 11 | endpoint: string; 12 | exportDir: string; 13 | sessionOnly: boolean; 14 | endSessionBlockDistance: number; 15 | bucketUpload?: BucketUploadConfig; 16 | cronjob?: CronJobConfig; 17 | apiChunkSize?: number; 18 | apiTimeoutMs?: number; 19 | historic?: { 20 | enabled: boolean; 21 | historySize: number; 22 | }; 23 | eraScanner?: { 24 | enabled: boolean; 25 | dataDir: string; 26 | startFromEra?: number; 27 | }; 28 | } 29 | 30 | interface DebugConfig{ 31 | enabled: boolean; 32 | forceInitialWrite: boolean; 33 | } 34 | 35 | export interface CronJobConfig{ 36 | enabled: boolean; 37 | } 38 | 39 | export interface BucketUploadConfig{ 40 | enabled: boolean; 41 | gcpServiceAccount: string; 42 | gcpProject: string; 43 | gcpBucketName: string; 44 | } 45 | 46 | export interface MyDeriveStakingAccount extends DeriveStakingAccount { 47 | displayName: string; 48 | voters: Voter[]; 49 | eraPoints?: number; 50 | exposure: PalletStakingExposure; 51 | } 52 | 53 | export interface Voter { 54 | address: string; 55 | value: Compact; 56 | } 57 | export type VotersMap = Map 58 | 59 | export interface WriteCSVRequest{ 60 | timestamp: string; 61 | api: ApiPromise; 62 | apiChunkSize: number; 63 | network: string; 64 | exportDir: string; 65 | eraIndex: EraIndex; 66 | sessionIndex: SessionIndex; 67 | isEndEraBlock?: boolean; 68 | blockNumber: Compact; 69 | totalIssuance?: Balance; 70 | validatorRewardsPreviousEra?: BalanceOf; 71 | } 72 | 73 | export interface WriteCSVHistoricalRequest{ 74 | timestamp: string; 75 | api: ApiPromise; 76 | network: string; 77 | exportDir: string; 78 | totalIssuance?: Balance; 79 | validatorRewardsPreviousEra?: BalanceOf; 80 | eraIndexes: EraIndex[]; 81 | } 82 | 83 | export interface WriteNominatorCSVRequest extends WriteCSVRequest{ 84 | nominatorStaking: DeriveStakingAccount[]; 85 | } 86 | 87 | export interface WriteValidatorCSVRequest extends WriteCSVRequest{ 88 | myValidatorStaking: MyDeriveStakingAccount[]; 89 | myWaitingValidatorStaking?: MyDeriveStakingAccount[]; 90 | } 91 | 92 | export interface WriteValidatorHistoricCSVRequest extends WriteCSVHistoricalRequest{ 93 | erasData: ChainData[]; 94 | } 95 | 96 | export interface ChainData { 97 | eraIndex?: EraIndex; 98 | sessionIndex?: SessionIndex; 99 | blockNumber?: Compact; 100 | isEndEraBlock?: boolean; 101 | eraPoints: EraRewardPoints; 102 | totalIssuance: Balance; 103 | validatorRewardsPreviousEra: BalanceOf; 104 | nominatorStaking: DeriveStakingAccount[]; 105 | myValidatorStaking: MyDeriveStakingAccount[]; 106 | myWaitingValidatorStaking?: MyDeriveStakingAccount[]; 107 | } 108 | 109 | export interface EraLastBlock { 110 | era: EraIndex; 111 | block: number; 112 | } -------------------------------------------------------------------------------- /src/utils.ts: -------------------------------------------------------------------------------- 1 | import fs, { ReadStream, WriteStream } from 'fs'; 2 | import { Logger } from '@w3f/logger'; 3 | import { DeriveAccountRegistration } from '@polkadot/api-derive/accounts/types'; 4 | import { EraIndex, Event } from '@polkadot/types/interfaces'; 5 | import { ApiPromise } from '@polkadot/api'; 6 | import { EraLastBlock } from './types'; 7 | 8 | export const isDirEmpty = (path: string): boolean =>{ 9 | return fs.readdirSync(path).length === 0 10 | } 11 | 12 | export const isDirExistent = (path: string): boolean =>{ 13 | return fs.existsSync(path) 14 | } 15 | 16 | export const makeDir = (path: string): void =>{ 17 | fs.mkdirSync(path) 18 | } 19 | 20 | export const getFileNames = (sourceDir: string, logger: Logger): string[] =>{ 21 | 22 | let names = [] 23 | try { 24 | names = fs.readdirSync(sourceDir) 25 | } catch (error) { 26 | logger.error(error) 27 | } 28 | return names 29 | } 30 | 31 | export const deleteFile = (filePath: string, logger: Logger): void =>{ 32 | 33 | try { 34 | fs.unlinkSync(filePath) 35 | logger.info('deleted ' + filePath) 36 | } catch(err) { 37 | logger.error(err) 38 | } 39 | } 40 | 41 | export const initFile = (exportDir: string,fileName: string,logger: Logger): WriteStream => { 42 | 43 | const filePath = `${exportDir}/${fileName}`; 44 | const file = fs.createWriteStream(filePath); 45 | file.on('error', (err) => { logger.error(err.stack) }); 46 | 47 | return file 48 | } 49 | 50 | export const closeFile = (file: WriteStream|ReadStream): Promise=> { 51 | return new Promise(resolve => { 52 | file.on("close", resolve); 53 | file.close(); 54 | }); 55 | } 56 | 57 | export const getDisplayName = (identity: DeriveAccountRegistration): string =>{ 58 | /* TODO 59 | This code is coming from https://github.com/mariopino/substrate-data-csv/blob/master/utils.js 60 | and needs to be refactored 61 | */ 62 | 63 | if ( 64 | identity.displayParent && 65 | identity.displayParent !== `` && 66 | identity.display && 67 | identity.display !== `` 68 | ) { 69 | return `${identity.displayParent.replace(/\n/g, '')} / ${identity.display.replace(/\n/g, '')}`; 70 | } else { 71 | return identity.display || ``; 72 | } 73 | } 74 | 75 | const firstBlockCurrentEra = async (api: ApiPromise): Promise => { 76 | 77 | const last = await api.rpc.chain.getHeader() 78 | const deriveSessionProgress = await api.derive.session.progress(); 79 | //there is an intrinsic api error that has to be corrected next => guessed 80 | const guessedFirstBlockCurrentEra = last.number.unwrap().toNumber() - deriveSessionProgress.eraProgress.toNumber() + 50 81 | 82 | const hash = await api.rpc.chain.getBlockHash(guessedFirstBlockCurrentEra) 83 | const apiAt = await api.at(hash) 84 | const [_,firstBlockCurrentEra] = await apiAt.query.babe.epochStart() 85 | 86 | return firstBlockCurrentEra.toNumber() 87 | } 88 | 89 | const howManyErasAgo = async (eraIndex: EraIndex, api: ApiPromise): Promise => { 90 | 91 | const currentEraIndex = (await api.query.staking.activeEra()).unwrap().index; 92 | return currentEraIndex.toNumber() - eraIndex.toNumber() 93 | 94 | } 95 | 96 | const lastBlockOf = async (eraIndex: EraIndex, api: ApiPromise): Promise => { 97 | 98 | const howManyErasAgoVar = await howManyErasAgo(eraIndex, api) 99 | if (howManyErasAgoVar == 0) return (await api.rpc.chain.getHeader()).number.unwrap().toNumber() 100 | 101 | const lastBlockPreviousEra = await firstBlockCurrentEra(api) - 1 102 | 103 | const deriveSessionProgress = await api.derive.session.progress(); 104 | 105 | // the api result is still not reliable => guessed 106 | const guessedResult = lastBlockPreviousEra - ( ( howManyErasAgoVar - 1 ) * deriveSessionProgress.eraLength.toNumber() ) 107 | 108 | const hash = await api.rpc.chain.getBlockHash(guessedResult + 50) 109 | const apiAt = await api.at(hash) 110 | const [_,firstBlockNextTargetEra] = await apiAt.query.babe.epochStart() 111 | 112 | return firstBlockNextTargetEra.toNumber() - 1 113 | 114 | } 115 | 116 | export const erasLastBlock = async (indexes: EraIndex[], api: ApiPromise): Promise => { 117 | 118 | const result = await Promise.all(indexes.map(async index => { 119 | return {era: index, block: await lastBlockOf(index,api)} 120 | })) 121 | 122 | return result 123 | 124 | } 125 | 126 | export const getErrorMessage = (error: unknown): string => { 127 | let errorString: string 128 | if (typeof error === "string") { 129 | errorString = error 130 | } else if (error instanceof Error) { 131 | errorString = error.message 132 | } 133 | return errorString 134 | } 135 | 136 | export const delay = (ms: number): Promise =>{ 137 | return new Promise( resolve => setTimeout(resolve, ms) ); 138 | } 139 | 140 | export const initWriteFileStream = (dirPath: string,fileName: string,logger: Logger): WriteStream => { 141 | 142 | const filePath = `${dirPath}/${fileName}`; 143 | const file = fs.createWriteStream(filePath); 144 | file.on('error', function(err) { logger.error(err.stack) }); 145 | 146 | return file 147 | } 148 | 149 | export const initReadFileStream = (dirPath: string,fileName: string,logger: Logger): ReadStream => { 150 | 151 | const filePath = `${dirPath}/${fileName}`; 152 | const file = fs.createReadStream(filePath); 153 | file.on('error', function(err) { logger.error(err.stack) }); 154 | 155 | return file 156 | } 157 | 158 | export const isNewEraEvent = (event: Event, api: ApiPromise): boolean => { 159 | return api.events.session.NewSession.is(event) 160 | } -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "esModuleInterop": true, 4 | "declaration": true, 5 | "noImplicitAny": false, 6 | "module": "commonjs", 7 | "target": "es6", 8 | "skipLibCheck": true, 9 | "outDir": "./dist", 10 | "experimentalDecorators": true, 11 | "emitDecoratorMetadata": true 12 | }, 13 | "exclude": [ 14 | "node_modules", 15 | "**/*.spec.ts", 16 | "test/**/*", 17 | "e2e-test/**/*", 18 | "dist/**/*" 19 | ], 20 | "_comment": "The polkadot-watcher app contains typescript errors" 21 | } 22 | --------------------------------------------------------------------------------