├── .gitignore ├── README.md ├── .github └── workflows │ └── main.yml ├── LICENSE ├── addendum.json └── update_pools.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled class file 2 | *.class 3 | 4 | # Log file 5 | *.log 6 | 7 | # BlueJ files 8 | *.ctxt 9 | 10 | # Mobile Tools for Java (J2ME) 11 | .mtj.tmp/ 12 | 13 | # Package Files # 14 | *.jar 15 | *.war 16 | *.nar 17 | *.ear 18 | *.zip 19 | *.tar.gz 20 | *.rar 21 | 22 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 23 | hs_err_pid* 24 | adastat.json 25 | balanceanalytics.json -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | A common place to identify and server definition of groups/clusters for pools in Cardano Ecosystem. 2 | The definition of group/cluster is usually very losely held, allowing many to leverage the wordings. Essentially a pool that may be sharing Core/Relays, Marketing, provisioning pool as a part of a service - are all eligible to be reported to `addendum.json` file on this repository. 3 | 4 | The repo in itself could also further be helpful for disputes resolutions/sharing metadata-related information or rules across various tools. 5 | 6 | Anyone is free to add commits, we'd expect atleast 3 of the existing collaborators to be on same page before it is merged 7 | 8 | In order to add a known pool cluster that's not already in the list, you can directly use [this link](https://github.com/cardano-community/pool_groups/edit/main/descrepancy.json). 9 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Update Pool Defs 2 | 3 | on: 4 | schedule: 5 | - cron: '30 */12 * * *' 6 | workflow_dispatch: 7 | inputs: 8 | tags: 9 | description: 'Trigger workflow manually' 10 | 11 | jobs: 12 | update_pools: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v2 16 | with: 17 | token: ${{ secrets.GIT_CC }} 18 | - uses: actions/setup-python@v2 19 | with: 20 | python-version: 3.9 21 | - name: Update JSON files 22 | run: | 23 | python -m pip install --upgrade pip 24 | pip install urllib3 jsbeautifier 25 | python update_pools.py 26 | - name: Commit to repo 27 | run: | 28 | git add . 29 | git config --global user.name 'cardano-bot' 30 | git config --global user.email '${{ secrets.GIT_EMAIL }}' 31 | git commit -am "Scheduled update" 32 | git push 33 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 cardano-community 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /addendum.json: -------------------------------------------------------------------------------- 1 | { 2 | "cardanians": { 3 | "pools": { 4 | "pool1l3nglypcrr46sjclnvcekc7z8qq9hym792zgtx8xqxxqy6g5pkm":{ "ticker": "CRDN"}, 5 | "pool1ukeqarukv9gxwy9pt8jkr4qvad2yf87758g3xzemmvmtk8lrynp":{ "ticker": "CRDN1"}, 6 | "pool189lsf6c2upyhmrzddyvyfjxxkqnte9pw8aqx7f4cuf85sjxlm02":{ "ticker": "CRDN2"}, 7 | "pool1kmsr376syqkluhxvv8k2axugqw7unjmlkwhfxjxtj7cw7qcn0jn":{ "ticker": "CRDN3"}, 8 | "pool1vx9tzlkgafernd9vpjpxkenutx2gncj4yn88fpq69823qlwcqrt":{ "ticker": "CRDNS"}, 9 | "pool16fyda57p3c8gp5r6gmcq5tvqsp6mnzwvkxswgznkuh8wztrp6vv":{ "ticker": "HRMS"}, 10 | "pool1hcg8sa642l0xeygkzvpgn3sfj5s2yeuzpws7a0gypyy7grrjcje":{ "ticker": "HRMS2"}, 11 | "pool1cduc7ut6grhqyavrmm3gzcdf4qfdsjk7vdwcxy9c0r5fyaxh2sq":{ "ticker": "POOLS"}, 12 | "pool1d2cl6yayuxp0wv0v7dtvu0s52vglg0x4z2lz27d6x4ewj72nk2a":{ "ticker": "WHALE"}, 13 | "pool1c3fjkls7d2aujud8y5xy5e0azu0ueatwn34u7jy3ql85ze3xya8":{ "ticker": "MANDA"} 14 | }, 15 | "comment": "Shared Relays (lookup on dns names resolve to same IP)" 16 | }, 17 | "cardano24": { 18 | "pools": { 19 | "pool1heks0vmxkfka2amg9c8yn8qdsxvgmzelfx6jhpfkj8ht5mn0r2m":{ "ticker": "ADA"}, 20 | "pool1rhq7qyw4y2s80gstyevm6rhz2lsdsrcstawunxx5eld8csvuvlu":{ "ticker": "ART"} 21 | }, 22 | "comment": "Shared Relays (lookup on dns names resolve to same IP)" 23 | }, 24 | "ZETETIC": { 25 | "pools": { 26 | "pool1lskcfz80qg2kr2p6776kl9emjn8ulkqsqse7um7skz4y2lwa0cu":{ "ticker": "CERO"}, 27 | "pool10jd7mphm0mmrnfvyytzs3tfrmjmtkvalfx8nql7tae06v0rkjk3":{ "ticker": "DOS"}, 28 | "pool15zrkyr0f80hxlt4scv72tej8l8zwrcphmrega9wutqchjekceal":{ "ticker": "ZETE"}, 29 | "pool12fzerww0hmk0fyvaujz5t2kg6hl4n8daz85rqy5jk8qk58r59y6":{ "ticker": "ZETE2"} 30 | }, 31 | "comment": "Shared Relays (lookup on dns names resolve to same IP)" 32 | }, 33 | "ATADA": { 34 | "pools": { 35 | "pool1xnaxushl47aeha3mr3uwnxcfkvzj0um6c4zxdr064d3y68z646c":{ "ticker": "ALPEN"}, 36 | "pool19w5khsnmu27au0kprw0kjm8jr7knneysj7lfkqvnu66hyz0jxsx":{ "ticker": "ATAD2"}, 37 | "pool1qqqqqdk4zhsjuxxd8jyvwncf5eucfskz0xjjj64fdmlgj735lr9":{ "ticker": "ATADA"} 38 | }, 39 | "comment": "Shared Relays (lookup on dns names resolve to same IP)" 40 | }, 41 | "5BINARIES": { 42 | "pools": { 43 | "pool1398lzhvtaa0hgz305d2jz4urfkwkkt66yv476wqe6att2f7dphh":{ "ticker": "n/a"}, 44 | "pool1z9m2kxeat06t30yf6ar7sqpert0cjdgxzcv2dv36dcwcqcqtgk4":{ "ticker": "n/a"}, 45 | "pool1nhhsh9yhlrvj9e3nce0zpx0xm2xlt6tx7gqakhr6hjc8wy84sat":{ "ticker": "n/a"}, 46 | "pool1k2qhlrrweu8fecd4hx4hn22lv00nrd3rjdxj6durax7m78q7ynu":{ "ticker": "n/a"}, 47 | "pool1z5rt6kn6yvuczj44qla73mfyln9l55lw0jkz6x4kjw00u32zec3":{ "ticker": "n/a"}, 48 | "pool1pu5jlj4q9w9jlxeu370a3c9myx47md5j5m2str0naunn2q3lkdy":{ "ticker": "NUTS"} 49 | }, 50 | "comment": "Shared Relays (lookup on dns names resolve to same IP)" 51 | }, 52 | "HETZFIN": { 53 | "pools": { 54 | "pool1zkdaju2rjefa52uh6yh6etsxla0x6aqs6p6wm245y5szk7k3msd":{ "ticker": "A3C"}, 55 | "pool1fh4sh2telea0tfdy39h0drx6uq566yt9gf24edpz7335sclx39z":{ "ticker": "CANUK"}, 56 | "pool1c0p84nykx0m4yqs9pkvlgagh3nlnrn77d5zed9kv9ktljh2cjcy":{ "ticker": "GOKEY"} 57 | }, 58 | "comment": "Shared Relays (lookup on dns names resolve to same IP)" 59 | }, 60 | "SAFEBLOCK": { 61 | "pools": { 62 | "pool1cc76kmtcpf6vht32ya5ke9er74dnpy4jh5qpy4klqwp87ygdsu6":{ "ticker": "BLOCK"}, 63 | "pool1wjsshqjpl3n6zlscnfvyy9gxklkav2dvfyprfye6l0kewyvv6yj":{ "ticker": "SAFE"} 64 | }, 65 | "comment": "Shared Relays (lookup on dns names resolve to same IP)" 66 | }, 67 | "RNODE": { 68 | "pools": { 69 | "pool10ldeq0mccp8t4gfmvfkwmfcvh2pvz44hj2hmewfpk9udjxug58y":{ "ticker": "BREAD"}, 70 | "pool1dhf4dt395f0c0ujnscazvzvw73ynzgwvtf5k4y9c907hwjdyaq4":{ "ticker": "JSP"}, 71 | "pool1tj3u8pz6732uy2ut3s90e0j880jyu9j0ve9vtu9zj5xzglhdlqy":{ "ticker": "RPSP"} 72 | }, 73 | "comment": "Shared Relays (lookup on dns names resolve to same IP)" 74 | } 75 | } 76 | 77 | -------------------------------------------------------------------------------- /update_pools.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python3 2 | # Objective: 3 | # - Identify pool as individual vs clusters without relying on self-declared elections 4 | # (which can easily be rigged using social nuances) from more than one source 5 | # - Have strict tolerance: This may mean that a pool whose metadata is updated might take 6 | # a bit to show up due to the way offline-pool metadata information is designed. All 7 | # professional SPOs are expected to be aware of the time it takes for this update in metadata 8 | # aggregators and are expected to accept risk or keep both version of metadata valid for a period. 9 | # - Any mismatch between providers is controlled at source and atleast raised via github issue by relevant pool operator. 10 | # 11 | # High-Level Flow: 12 | # - Download a list of all registered pools frm koios 13 | # - BalanceAnalytics and ADAStat provide their pool group information over API, download pool group information for each to adastat_js and balance_js respectively. 14 | # Any manual additions to the list is in addendum.json, load it to manual_js. 15 | # - balance_js : Those not recognised as part of a group have balance_js[].group = null 16 | # - adastat_js : Those not recognised as part of a group are not in the list 17 | # - manual_js : Those reported manually as cluster, SPaaS participant, etc. 18 | # - Process each pool in balance_js 19 | # - If the pool is also present in adastat_js (which is list of pools who are also groups): 20 | # - Add pool to group , preferring naming convention from balanceanalytics list 21 | # - If adastat and balanceanalytics dont match, add it as discrepancy 22 | # - If they agree on pool being single pool operator, add it to spo 23 | # - Process each pool in adastat_js 24 | # - Add any pools not listed in balance_js as discrepancy 25 | # - Process addendum.json 26 | 27 | import json, os, jsbeautifier, traceback 28 | import urllib3 29 | http = urllib3.PoolManager() 30 | urllib3.disable_warnings() 31 | clustersf='pool_clusters.json' 32 | spof='singlepooloperators.json' 33 | addendumf='addendum.json' 34 | allf='spos.json' 35 | 36 | def load_json(url): 37 | resp = http.request('GET', url, redirect=True) 38 | if str(resp.status) == "200": 39 | obj = json.loads(resp.data) 40 | return obj 41 | else: 42 | print("An error occurred while downloading group definition from url: " + url) 43 | print(" " + str(resp.data)) 44 | exit 45 | 46 | def save_json(obj,jsonfile): 47 | """Save Object argument to JSON file argument.""" 48 | if obj == "": 49 | return 50 | with open(jsonfile, 'w') as f: 51 | options = jsbeautifier.default_options() 52 | options.indent_size = 2 53 | options.preserve_newlines = False 54 | f.write(jsbeautifier.beautify(json.dumps(obj,indent=2,sort_keys=True),options)) 55 | 56 | def open_json(jsonfile): 57 | """Open JSON file argument and return it as object.""" 58 | if not os.path.isfile(jsonfile): 59 | save_json({},jsonfile) 60 | with open(jsonfile, 'r', encoding='utf-8') as f: 61 | obj = json.load(f) 62 | return obj 63 | 64 | def main(): 65 | try: 66 | koios_pool_list = [] 67 | 68 | pool_range = range(0, 100000, 1000) 69 | for offset in pool_range: 70 | fetched = load_json('https://api.koios.rest/api/v1/pool_list?select=pool_id_bech32,ticker,pool_status&pool_status=eq.registered&offset=' + str(offset) + '&limit=1000&order=pool_id_bech32.asc') 71 | koios_pool_list.extend(fetched) 72 | if len(fetched) < 1000: 73 | break 74 | 75 | try: 76 | adastat_js = load_json('https://api.adastat.net/rest/v1/poolclusters.json') 77 | balance_js = load_json('https://www.balanceanalytics.io/api/groupdata.json') 78 | #adastat_js = open_json('adastat.json') 79 | #balance_js = open_json('balanceanalytics.json') 80 | except Exception as e: 81 | print ("ERROR!! Unable to download data from upstream. Exception: " + str(e) + str(traceback.print_exc())) 82 | manual_js = open_json(addendumf) 83 | spos, bal_groups, as_groups = {},{},{} 84 | grplist, singlepooloperatorlist, spolist = [],[],[] 85 | 86 | # for now start off assuming every pool is single-operator, until discovered to be otherwise 87 | for koios_pool in koios_pool_list: 88 | poolid = koios_pool['pool_id_bech32'] 89 | spos[poolid]={"pool_id_bech32": poolid, "ticker": koios_pool['ticker'], "group": None} 90 | # Process Balance Analytics entries 91 | bal_poollist=list(filter(lambda x:x['pool_hash']==poolid,balance_js[0]['pool_group_json'])) 92 | if len(bal_poollist) == 0: 93 | spos[poolid]["balanceanalytics_group"] = None 94 | else: 95 | bal_pool = bal_poollist[0] 96 | my_tmp_group_name=bal_pool['pool_group'] 97 | bal_group_members=list(filter(lambda x:x['pool_group']==my_tmp_group_name,balance_js[0]['pool_group_json'])) 98 | # special logic to treat groups with just 1 pool as SINGLEPOOL e.g. ADASTAT 99 | if len(bal_group_members) == 1: 100 | # print ("replacing " + bal_pool['pool_group'] + " with SINGLEPOOL") 101 | bal_pool['pool_group'] = 'SINGLEPOOL'; 102 | 103 | spos[poolid]["balanceanalytics_group"] = bal_pool['pool_group'] 104 | if bal_pool['pool_group'] != 'SINGLEPOOL': 105 | if bal_pool['pool_group'] not in bal_groups: 106 | bal_groups[bal_pool['pool_group']] = [] 107 | bal_groups[bal_pool['pool_group']].append(poolid) 108 | spos[poolid]["group"] = bal_pool['pool_group'] 109 | # Process adastat entries 110 | as_poollist=list(filter(lambda x:x['pool_id_bech32']==poolid,adastat_js['rows'])) 111 | if len(as_poollist) == 0: 112 | spos[poolid]["adastat_group"] = None 113 | else: 114 | as_pool = as_poollist[0] # Process ADAStat's list definition 115 | spos[poolid]["adastat_group"] = as_pool['cluster_name'] 116 | if as_pool['cluster_name'] not in as_groups: 117 | as_groups[as_pool['cluster_name']] = [] 118 | as_groups[as_pool['cluster_name']].append(poolid) 119 | if spos[poolid]["group"] == None: 120 | spos[poolid]["group"] = as_pool['cluster_name'] 121 | # Process addendum file 122 | for poolgrp in manual_js: 123 | for pool in manual_js[poolgrp]['pools']: 124 | if pool == poolid: # Match found for manual addendum to override single pool operator definition 125 | if poolid in spos and spos[poolid]["group"] == None: 126 | spos[poolid]["group"]=poolgrp 127 | 128 | # Loop through every pool in adastat group list, to fill in groupings from balance analytics 129 | for as_grp in as_groups: 130 | bal_grpname = None 131 | for as_pool in as_groups[as_grp]: 132 | if spos[as_pool]["balanceanalytics_group"] != "SINGLEPOOL" and spos[as_pool]["balanceanalytics_group"] != None: 133 | bal_grpname = spos[as_pool]["balanceanalytics_group"] 134 | for as_pool in as_groups[as_grp]: 135 | if bal_grpname != None: 136 | spos[as_pool]["group"] = bal_grpname 137 | 138 | for spo in sorted(spos): 139 | spolist.append(spos[spo]) 140 | if spos[spo]["group"] != None: 141 | grplist.append(spos[spo]) 142 | else: 143 | singlepooloperatorlist.append(spos[spo]) 144 | 145 | if len(grplist) <= 100: 146 | print("Something went wrong, pool_group size was unexpectedly too low: " + str(len(grplist)) ) 147 | exit(1) 148 | save_json(spolist, allf) 149 | save_json(grplist, clustersf) 150 | save_json(singlepooloperatorlist,spof) 151 | except Exception as e: 152 | print ("Exception: " + str(e) + str(traceback.print_exc())) 153 | import sys 154 | sys.exit(1) 155 | main() 156 | --------------------------------------------------------------------------------