├── .dockerignore ├── .gitignore ├── COINGECKO.py ├── CONFIG.py ├── CONNECT_WEBSOCKET.py ├── Dockerfile ├── Dockerfile.rest ├── HELPERS.py ├── HELPERS_TYPES.py ├── MIGRATIONS └── v0.0.8-0.0.10.md ├── Makefile ├── README.md ├── RequestsHandler.py ├── akash └── deploy.yaml ├── configs ├── .env └── cache_times.json ├── docker-compose.yml ├── docker ├── nginx.conf ├── start.sh └── uwsgi.ini ├── docs ├── AKASH.md ├── CONFIG_VALUES.md ├── SUPPORT.md ├── SYSTEMD_FILES.md ├── WEBSOCKET.md └── logo │ ├── CosmosCacheLogo-Slim.png │ └── CosmosCacheLogo.png ├── requirements └── requirements.txt ├── rest.py ├── rpc.py ├── run_rest.sh ├── run_rpc.sh ├── static ├── README.md └── favicon.png └── test ├── README.md ├── cw_template.wasm └── uni.sh /.dockerignore: -------------------------------------------------------------------------------- 1 | /.mypy_cache 2 | /__pycache__ 3 | 4 | /.env 5 | /cache_times.json 6 | /README.md -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | .mypy*/* 3 | 4 | # ignores project root directory configs 5 | .env 6 | /cache_times.json 7 | 8 | kvstores/*.json 9 | 10 | static/openapi.yml -------------------------------------------------------------------------------- /COINGECKO.py: -------------------------------------------------------------------------------- 1 | import json 2 | from time import time 3 | 4 | from pycoingecko import CoinGeckoAPI 5 | 6 | import CONFIG 7 | from CONFIG import KV_STORE 8 | from HELPERS import ttl_block_only 9 | from HELPERS_TYPES import Mode 10 | 11 | 12 | class Coingecko: 13 | # https://www.coingecko.com/en/api/documentation 14 | # 10-30 calls per minute. So we do 6 seconds to be on the safe side by default. 15 | # If you use a paid plan, you can do 500+ -> https://www.coingecko.com/en/api/pricing 16 | def __init__(self): 17 | api_key = CONFIG.COINGECKO_API_KEY 18 | if len(api_key) > 0: 19 | self.cg = CoinGeckoAPI(api_key=api_key) 20 | else: 21 | self.cg = CoinGeckoAPI() 22 | 23 | def get_symbols(self): 24 | ids = CONFIG.COINGECKO_IDS 25 | 26 | key = f"coingecko_symbols;{ids}" 27 | values = KV_STORE.get(key) 28 | if values is not None: 29 | return json.loads(values) 30 | 31 | values = {} 32 | for _id in ids: 33 | data = self.cg.get_coin_by_id(_id) 34 | symbol = data.get("symbol", "") 35 | values[_id] = symbol 36 | 37 | KV_STORE.set(key, json.dumps(values), timeout=86400) 38 | return values 39 | 40 | def get_price(self): 41 | ids = CONFIG.COINGECKO_IDS 42 | vs_currencies = CONFIG.COINGECKO_FIAT 43 | 44 | cache_seconds = int(CONFIG.COINGECKO_CACHE.get("seconds", 7)) 45 | key = f"coingecko;{ttl_block_only(cache_seconds)};{ids};{vs_currencies}" 46 | 47 | value = KV_STORE.get(key) 48 | if value is not None: 49 | return json.loads(value) 50 | 51 | symbols = self.get_symbols() # cached 1 day 52 | coins = self.cg.get_price(ids=ids, vs_currencies=vs_currencies) 53 | # print(symbols) 54 | 55 | updated_coins = {} 56 | for k, v in coins.items(): 57 | symbol = str(symbols.get(k, k)).upper() 58 | updated_coins[symbol] = {"coingecko-id": k, "prices": v} 59 | 60 | data = { 61 | "coins": updated_coins, 62 | "last_update": int(time()), 63 | } 64 | 65 | if cache_seconds == Mode.FOR_BLOCK_TIME.value: # -2 66 | cache_seconds = int(CONFIG.DEFAULT_CACHE_SECONDS) 67 | 68 | KV_STORE.set(key, json.dumps(data), timeout=int(cache_seconds)) 69 | return data 70 | 71 | 72 | if __name__ == "__main__": 73 | p = Coingecko() 74 | # v = p.get_price() 75 | # print(v) 76 | 77 | # print(p.get_symbols()) 78 | print(p.get_price()) 79 | -------------------------------------------------------------------------------- /CONFIG.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import re 4 | from os import getenv 5 | 6 | import requests 7 | from dotenv import load_dotenv 8 | from py_kvstore import KVStore 9 | 10 | HEADERS = { 11 | "accept": "application/json", 12 | "Content-Type": "application/json", 13 | } 14 | 15 | PROJECT_DIR = os.path.dirname(os.path.realpath(__file__)) 16 | 17 | KV_DIR = os.path.join(PROJECT_DIR, "kvstores") 18 | os.makedirs(KV_DIR, exist_ok=True) 19 | 20 | env_file = os.path.join(PROJECT_DIR, ".env") 21 | 22 | 23 | load_dotenv(env_file) 24 | USE_BACKUP_AS_PRIMARY = getenv("USE_BACKUP_AS_PRIMARY", "false").lower().startswith("t") 25 | 26 | ## == Helper == ## 27 | REMOTE_CONFIG_TIME_FILE = getenv("REMOTE_CONFIG_TIME_FILE", "") 28 | if not os.path.exists(env_file): 29 | if len(REMOTE_CONFIG_TIME_FILE) == 0: 30 | # error as we are not using docker 31 | print("No .env file found. Please copy it and edit. `cp configs/.env .env`") 32 | exit(1) 33 | 34 | 35 | def get_config_file(filename: str): 36 | """ 37 | Gets the custom config file if it exist. If not, uses the custom one if allowed. 38 | 39 | If it is the cache time, we allow it to be downloaded from a remote source so that docker/akash is easier to use 40 | """ 41 | if filename == "cache_times.json" and len(REMOTE_CONFIG_TIME_FILE) > 0: 42 | if os.path.exists(filename): 43 | return os.path.join(PROJECT_DIR, filename) 44 | else: 45 | print("Downloading remote config file...") 46 | r = requests.get(REMOTE_CONFIG_TIME_FILE).text 47 | with open(os.path.join(PROJECT_DIR, filename), "w") as f: 48 | f.write(r) 49 | 50 | # custom file if they moved to the project root dir 51 | custom_config = os.path.join(PROJECT_DIR, filename) 52 | if os.path.exists(custom_config): 53 | return custom_config 54 | 55 | return os.path.join(PROJECT_DIR, "configs", filename) # default 56 | 57 | 58 | DEBUGGING = getenv("DEBUGGING", "false").lower().startswith("t") 59 | 60 | # KVStore 61 | KV_STORE_NAME = getenv("STORE_NAME", "node_store") 62 | KV_STORE = KVStore(name=KV_STORE_NAME, dump_dir=KV_DIR) 63 | KV_STORE.load() 64 | 65 | ENABLE_COUNTER = getenv("ENABLE_COUNTER", "true").lower().startswith("t") 66 | INC_EVERY = int(getenv("INCREASE_COUNTER_EVERY", 250)) 67 | STATS_PASSWORD = getenv("STATS_PASSWORD", "") 68 | 69 | # === Coingecko === 70 | COINGECKO_ENABLED = getenv("COINGECKO_ENABLED", "true").lower().startswith("t") 71 | COINGECKO_API_KEY = getenv("COINGECKO_API_KEY", "") 72 | COINGECKO_IDS = getenv("COINGECKO_IDS", "cosmos,juno-network,osmosis").split(",") 73 | COINGECKO_FIAT = getenv("COINGECKO_FIAT", "usd,eur").split(",") 74 | 75 | # =========== 76 | # === RPC === 77 | # =========== 78 | RPC_PORT = int(getenv("RPC_PORT", 5001)) 79 | 80 | 81 | RPC_URL = getenv("RPC_URL", "https://juno-rpc.polkachu.com:443") 82 | BACKUP_RPC_URL = getenv("BACKUP_RPC_URL", "https://rpc.juno.strange.love:443") 83 | if USE_BACKUP_AS_PRIMARY: 84 | RPC_URL = BACKUP_RPC_URL 85 | 86 | RPC_WEBSOCKET = getenv("RPC_WEBSOCKET", "ws://15.204.143.232:26657/websocket") 87 | BACKUP_RPC_WEBSOCKET = getenv( 88 | "BACKUP_RPC_WEBSOCKET", "ws://rpc.juno.strange.love:443/websocket" 89 | ) 90 | if USE_BACKUP_AS_PRIMARY: 91 | RPC_WEBSOCKET = BACKUP_RPC_WEBSOCKET 92 | 93 | # ============ 94 | # === REST === 95 | # ============ 96 | REST_PORT = int(getenv("REST_PORT", 5000)) 97 | 98 | API_TITLE = getenv("API_TITLE", "Swagger API") 99 | 100 | REST_URL = getenv("REST_URL", "https://juno-api.polkachu.com") 101 | BACKUP_REST_URL = getenv("BACKUP_REST_URL", f"https://api.juno.strange.love") 102 | if USE_BACKUP_AS_PRIMARY: 103 | REST_URL = BACKUP_REST_URL 104 | 105 | OPEN_API = f"{REST_URL}/static/openapi.yml" 106 | 107 | DISABLE_SWAGGER_UI = getenv("DISABLE_SWAGGER_UI", "false").lower().startswith("t") 108 | 109 | # Security 110 | RPC_LISTEN_ADDRESS = getenv("RPC_LISTEN_ADDRESS", "") 111 | NODE_MONIKER = getenv("NODE_MONIKER", "") 112 | 113 | # === Cache Times === 114 | DEFAULT_CACHE_SECONDS: int = 6 115 | 116 | cache_times: dict = {} 117 | RPC_ENDPOINTS: dict = {} 118 | REST_ENDPOINTS: dict = {} 119 | COINGECKO_CACHE: dict = {} 120 | 121 | 122 | # === CACHE HELPER === 123 | def update_cache_times(): 124 | """ 125 | Updates any config variables which can be changed without restarting the server. 126 | Useful for the /cache_info endpoint & actually applying said cache changes at any time 127 | """ 128 | global cache_times, DEFAULT_CACHE_SECONDS, RPC_ENDPOINTS, REST_ENDPOINTS, COINGECKO_CACHE 129 | 130 | cache_times_config = get_config_file("cache_times.json") 131 | cache_times = json.loads(open(cache_times_config, "r").read()) 132 | 133 | DEFAULT_CACHE_SECONDS = cache_times.get("DEFAULT", 6) 134 | RPC_ENDPOINTS = cache_times.get("rpc", {}) 135 | REST_ENDPOINTS = cache_times.get("rest", {}) 136 | COINGECKO_CACHE = cache_times.get("coingecko", {}) 137 | 138 | 139 | def get_cache_time_seconds(path: str, is_rpc: bool) -> int: 140 | """ 141 | Returns an endpoints time to cache in seconds 142 | """ 143 | endpoints = RPC_ENDPOINTS if is_rpc else REST_ENDPOINTS 144 | 145 | cache_seconds = DEFAULT_CACHE_SECONDS 146 | for k, seconds in endpoints.items(): 147 | k.replace("*", ".+") 148 | if re.match(k, path): 149 | cache_seconds = seconds 150 | break 151 | 152 | return cache_seconds 153 | -------------------------------------------------------------------------------- /CONNECT_WEBSOCKET.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | 4 | import rel 5 | import websocket 6 | 7 | from CONFIG import KV_STORE, RPC_WEBSOCKET 8 | 9 | SUBSCRIBE_MSG = '{"jsonrpc": "2.0", "method": "subscribe", "params": ["tm.event=\'NewBlock\'"], "id": 1}' 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | CONNECTED = False 14 | 15 | 16 | # on a new block message, we will clear in the KV Store of any values which the config set to -2 17 | # Use this for an indexer in the future?? :D 18 | def on_message(ws, message): 19 | msg = json.loads(message) 20 | 21 | if msg.get("result") == {}: 22 | logger.info("Subscribed to New Block with TendermintRPC...") 23 | return 24 | 25 | # block_height = msg["result"]["data"]["value"]["block"]["header"]["height"] 26 | block_height = ( 27 | msg.get("result", {}) 28 | .get("data", {}) 29 | .get("value", {}) 30 | .get("block", {}) 31 | .get("header", {}) 32 | .get("height", -1) 33 | ) 34 | 35 | if block_height == -1: 36 | logger.error("Error: block height not found") 37 | return 38 | 39 | logger.debug(f"""New Block: {block_height}""") 40 | 41 | del_keys = KV_STORE.get_keys("*;IsBlockOnly;*") 42 | if len(del_keys) > 0: 43 | res: bool = KV_STORE.delete(del_keys) 44 | if res: 45 | logger.debug(f"Deleting {len(del_keys)} keys...") 46 | # KV_STORE.dump() 47 | 48 | 49 | def on_error(ws, error): 50 | logger.error(error) 51 | 52 | 53 | def on_close(ws, close_status_code, close_msg): 54 | logger.info("Closed connection") 55 | 56 | 57 | def on_open(ws): 58 | logger.info("Opened connection") 59 | ws.send(SUBSCRIBE_MSG) 60 | logger.info("Sent subscribe request") 61 | 62 | 63 | class TendermintRPCWebSocket: 64 | def __init__( 65 | self, 66 | enableSignal: bool = False, 67 | enableTrace: bool = False, 68 | logLevel: int = logging.DEBUG, 69 | ): 70 | self.enableSignal = enableSignal 71 | 72 | websocket.enableTrace(enableTrace) # toggle to show or hide output 73 | self.ws = websocket.WebSocketApp( 74 | f"{RPC_WEBSOCKET}", 75 | on_open=on_open, 76 | on_message=on_message, 77 | on_error=on_error, 78 | on_close=on_close, 79 | ) 80 | 81 | logger.setLevel(logLevel) 82 | logger.addHandler(logging.StreamHandler()) 83 | 84 | def start(self): 85 | if self.enableSignal: 86 | self.ws.run_forever(dispatcher=rel, reconnect=5) 87 | self.signal(2, rel.abort) 88 | self.dispatch() 89 | else: 90 | self.run_forever() 91 | 92 | def signal(self, sig, func): 93 | rel.signal(sig, func) 94 | 95 | def dispatch(self): 96 | rel.dispatch() 97 | 98 | 99 | if __name__ == "__main__": 100 | tmrpc = TendermintRPCWebSocket(enableSignal=True) # so we can ctrl+c 101 | tmrpc.start() 102 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # docker build -t reecepbcups/rpc-cache:latest . 2 | # docker run -e RPC_WORKER_THREADS=2 -e REMOTE_CONFIG_TIME_FILE=https://raw.githubusercontent.com/Reecepbcups/cosmos-endpoint-cache/main/configs/cache_times.json -p 5001:5001 reecepbcups/rpc-cache:latest 3 | 4 | FROM python:3.11-slim 5 | 6 | RUN apt-get clean \ 7 | && apt-get -y update 8 | 9 | RUN apt-get -y install nginx \ 10 | && apt-get -y install python3-dev \ 11 | && apt-get -y install build-essential 12 | 13 | COPY requirements/requirements.txt /srv/flask_app/requirements/requirements.txt 14 | RUN pip install -r /srv/flask_app/requirements/requirements.txt --src /usr/local/src 15 | 16 | COPY . /srv/flask_app 17 | WORKDIR /srv/flask_app 18 | 19 | EXPOSE 5001 20 | 21 | # You can set this at run time with -e 22 | ENV RPC_WORKER_THREADS=1 23 | 24 | # CMD ["gunicorn", "-w", "echo ${WORKER_THREADS}", "-b", "0.0.0.0:5001", "rpc:rpc_app"] 25 | CMD gunicorn -w ${RPC_WORKER_THREADS} -b 0.0.0.0:5001 rpc:rpc_app -------------------------------------------------------------------------------- /Dockerfile.rest: -------------------------------------------------------------------------------- 1 | # docker build . -f Dockerfile.rest -t reecepbcups/api-cache:latest 2 | # docker run -e REST_URL=http://15.204.143.232:1317 -e DISABLE_SWAGGER_UI=false -e REST_WORKER_THREADS=1 -e REMOTE_CONFIG_TIME_FILE=https://raw.githubusercontent.com/Reecepbcups/cosmos-endpoint-cache/main/configs/cache_times.json -p 5000:5000 reecepbcups/api-cache:latest 3 | 4 | FROM python:3.11 5 | 6 | RUN apt-get clean \ 7 | && apt-get -y update 8 | 9 | RUN apt-get -y install nginx \ 10 | && apt-get -y install python3-dev \ 11 | && apt-get -y install build-essential 12 | 13 | COPY requirements/requirements.txt /srv/flask_app/requirements/requirements.txt 14 | RUN pip install -r /srv/flask_app/requirements/requirements.txt --src /usr/local/src 15 | 16 | COPY . /srv/flask_app 17 | WORKDIR /srv/flask_app 18 | 19 | EXPOSE 5000 20 | 21 | # You can set this at run time with -e 22 | ENV REST_WORKER_THREADS=1 23 | 24 | CMD gunicorn -w ${REST_WORKER_THREADS} -b 0.0.0.0:5000 rest:app -------------------------------------------------------------------------------- /HELPERS.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | from os import getenv 4 | 5 | import httpx 6 | 7 | import CONFIG 8 | from CONFIG import KV_STORE 9 | from HELPERS_TYPES import CallType, Mode 10 | 11 | 12 | def ttl_block_only(cache_seconds: int = 0): 13 | # this way on a new block, we delete all *;IsBlockOnly;* keys 14 | return ( 15 | "IsBlockOnly" 16 | if cache_seconds == Mode.FOR_BLOCK_TIME.value 17 | else f"{cache_seconds}s" 18 | ) 19 | 20 | 21 | total_calls = { 22 | # RPC: 23 | CallType.RPC_GET_CACHE.value: 0, 24 | CallType.RPC_GET_OUTBOUND.value: 0, 25 | # RPC Post: 26 | CallType.RPC_POST_CACHE.value: 0, 27 | CallType.RPC_POST_OUTBOUND.value: 0, 28 | # REST: 29 | CallType.REST_GET_CACHE.value: 0, 30 | CallType.REST_GET_OUTBOUND.value: 0, 31 | } 32 | 33 | 34 | def increment_call_value(key: str, amount: int = 1): 35 | global total_calls 36 | 37 | if CONFIG.ENABLE_COUNTER == False: 38 | return 39 | 40 | if key not in total_calls: 41 | total_calls[str(key)] = 0 42 | 43 | if total_calls[key] >= CONFIG.INC_EVERY: 44 | KV_STORE.incr(f"{key}", amount=total_calls[key]) 45 | total_calls[key] = 0 46 | else: 47 | total_calls[key] += amount 48 | 49 | if CONFIG.DEBUGGING: 50 | print(f"incremented {key} to {total_calls[key]}") 51 | 52 | # NOTE: testing only 53 | # print("testing only dump here") 54 | # KV_STORE.dump() 55 | 56 | 57 | def download_openapi_locally(): 58 | # TODO: What if there is no swagger API? 59 | r = httpx.get(CONFIG.OPEN_API) 60 | if r.status_code != 200: 61 | return 62 | file_loc = f"{CONFIG.PROJECT_DIR}/static/openapi.yml" 63 | with open(file_loc, "w") as f: 64 | f.write(r.text) 65 | 66 | 67 | def get_swagger_code_from_source(): 68 | req = httpx.get(f"{CONFIG.REST_URL}") 69 | 70 | html = req.text.replace( 71 | "//unpkg.com/swagger-ui-dist@3.40.0/favicon-16x16.png", 72 | "/static/favicon.png", 73 | ) 74 | html = re.sub(r".*", f"{CONFIG.API_TITLE}", html) 75 | return html 76 | 77 | 78 | def replace_rpc_text() -> str: 79 | # we replace after on requests of the user, then replace this text to our cache endpoint at time of requests to root endpoint 80 | try: 81 | RPC_ROOT_HTML = httpx.get(f"{CONFIG.RPC_URL}/").text 82 | except: 83 | RPC_ROOT_HTML = httpx.get(f"{CONFIG.BACKUP_RPC_URL}/").text 84 | 85 | RPC_TITLE = getenv("RPC_TITLE", "") 86 | if len(RPC_TITLE) > 0: 87 | RPC_ROOT_HTML = RPC_ROOT_HTML.replace( 88 | "", 89 | f"{RPC_TITLE}", 90 | ) 91 | 92 | # Puts text at the bottom, maybe put at the top in the future? 93 | RPC_CUSTOM_TEXT = getenv("RPC_CUSTOM_TEXT", "") 94 | if len(RPC_CUSTOM_TEXT) > 0: 95 | RPC_ROOT_HTML = RPC_ROOT_HTML.replace( 96 | "Available endpoints:

", 97 | f"{RPC_CUSTOM_TEXT}
Available endpoints:

", 98 | ) 99 | 100 | # add cache_info endpoint. THIS REMOVES BLANK 'Available endpoints:

' 101 | RPC_ROOT_HTML = RPC_ROOT_HTML.replace( 102 | "Available endpoints:

", 103 | f'//{{BASE_URL}}/cache_info

', 104 | # we replace the BASE_URL on the call to the root endpoint 105 | ) 106 | 107 | RPC_ROOT_HTML = RPC_ROOT_HTML.replace( 108 | "/cache_info

", 109 | f'/cache_info
//{{BASE_URL}}/prices

', 110 | # we replace the BASE_URL on the call to the root endpoint 111 | ) 112 | 113 | # Set RPC favicon to nothing 114 | RPC_ROOT_HTML = RPC_ROOT_HTML.replace( 115 | "", 116 | f'', 117 | ) 118 | 119 | return RPC_ROOT_HTML 120 | 121 | 122 | INITIAL_HTML = """Cache Stats""" 123 | CLOSING_HTML = """""" 124 | 125 | 126 | def get_config_values(): 127 | KVs = [item for item in dir(CONFIG) if not item.startswith("__")] 128 | items = {item: getattr(CONFIG, item) for item in KVs} 129 | 130 | return f""" 131 | {INITIAL_HTML} 132 |

Config Values

133 |

{items}

134 | {CLOSING_HTML} 135 | """ 136 | 137 | 138 | def get_stats_html(): 139 | updates_every = CONFIG.INC_EVERY 140 | 141 | # gets information about the kv store 142 | rpc_get_cache = KV_STORE.get(CallType.RPC_GET_CACHE.value) 143 | rpc_get_outbound = KV_STORE.get(CallType.RPC_GET_OUTBOUND.value) 144 | 145 | rpc_post_cache = KV_STORE.get(CallType.RPC_POST_CACHE.value) 146 | rpc_post_outbound = KV_STORE.get(CallType.RPC_POST_OUTBOUND.value) 147 | 148 | rest_cache = KV_STORE.get(CallType.REST_GET_CACHE.value) 149 | rest_outbound = KV_STORE.get(CallType.REST_GET_OUTBOUND.value) 150 | # no rest post yet, not added. 151 | 152 | # converts (1 so no div / 0 errors) 153 | rpc_get_cache = 1 if rpc_get_cache == None else int(rpc_get_cache.decode("utf-8")) 154 | rpc_get_outbound = ( 155 | 1 if rpc_get_outbound == None else int(rpc_get_outbound.decode("utf-8")) 156 | ) 157 | 158 | rpc_post_cache = ( 159 | 1 if rpc_post_cache == None else int(rpc_post_cache.decode("utf-8")) 160 | ) 161 | rpc_post_outbound = ( 162 | 1 if rpc_post_outbound == None else int(rpc_post_outbound.decode("utf-8")) 163 | ) 164 | 165 | rest_cache = 1 if rest_cache == None else int(rest_cache.decode("utf-8")) 166 | rest_outbound = 1 if rest_outbound == None else int(rest_outbound.decode("utf-8")) 167 | 168 | return f""" 169 | {INITIAL_HTML} 170 |

Updates every {updates_every} calls

171 | 172 |

RPC GET Cache Stats

173 |

RPC Cache Hits: {rpc_get_cache}

174 |

RPC outbound: {rpc_get_outbound}

175 |

Percent Cached: {round((rpc_get_cache / (rpc_get_cache + rpc_get_outbound)) * 100, 2)}%

176 |
177 |

RPC POST Cache Stats

178 |

RPC Cache Hits: {rpc_post_cache}

179 |

RPC outbound: {rpc_post_outbound}

180 |

Percent Cached: {round((rpc_post_cache / (rpc_post_cache + rpc_post_outbound)) * 100, 2)}%

181 |
182 |

REST GET Cache Stats

183 |

REST Cache Hits: {rest_cache}

184 |

REST outbound: {rest_outbound}

185 |

Percent Cached: {round((rest_cache / (rest_cache + rest_outbound)) * 100, 2)}%

186 | {CLOSING_HTML} 187 | """ 188 | 189 | 190 | def _hide_data(json: dict, str_path: str, cfg_value: str) -> dict: 191 | """ 192 | cfg_value is some string 193 | path is the json path in string form. For example, ['result']['node_info'] is result.node_info 194 | json is teh default json response 195 | 196 | Given this, if the path exist in the json, edit said path and update it to be the cfg_value 197 | Then return the updated JSON 198 | 199 | else: 200 | return the original JSON 201 | """ 202 | if len(str_path) == 0 or len(cfg_value) == 0: 203 | return json 204 | 205 | path = str_path.split(".") 206 | parent = json 207 | for key in path[:-1]: 208 | parent = parent.get(key, {}) 209 | if not parent: 210 | return json 211 | parent[path[-1]] = cfg_value 212 | return json 213 | 214 | 215 | def hide_rpc_data(res: dict, endpoint_path: str): 216 | if endpoint_path.lower().startswith("status"): 217 | res = _hide_data(res, "result.node_info.listen_addr", CONFIG.RPC_LISTEN_ADDRESS) 218 | res = _hide_data( 219 | res, "result.node_info.other.rpc_address", CONFIG.RPC_LISTEN_ADDRESS 220 | ) 221 | res = _hide_data(res, "result.node_info.moniker", CONFIG.NODE_MONIKER) 222 | 223 | return res 224 | 225 | 226 | def hide_rest_data(res: dict, endpoint_path: str): 227 | if endpoint_path.lower().endswith("v1beta1/node_info"): 228 | res = _hide_data( 229 | res, "default_node_info.listen_addr", CONFIG.RPC_LISTEN_ADDRESS 230 | ) 231 | res = _hide_data( 232 | res, "default_node_info.other.rpc_address", CONFIG.RPC_LISTEN_ADDRESS 233 | ) 234 | res = _hide_data(res, "default_node_info.moniker", CONFIG.NODE_MONIKER) 235 | 236 | # hide application_version.build_deps? 237 | 238 | return res 239 | -------------------------------------------------------------------------------- /HELPERS_TYPES.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | import CONFIG 4 | from CONFIG import KV_STORE 5 | 6 | 7 | class Mode(Enum): 8 | NO_CACHE = 0 9 | DISABLED = -1 10 | FOR_BLOCK_TIME = -2 11 | 12 | 13 | class CallType(Enum): 14 | # RPC 15 | RPC_GET_CACHE = f"rpc;amt;cache;rpc_get" 16 | RPC_GET_OUTBOUND = f"rpc;amt;outbound;rpc_get" 17 | 18 | # RPC POST 19 | RPC_POST_CACHE = f"rpc;amt;cache;rpc_post" 20 | RPC_POST_OUTBOUND = f"rpc;amt;outbound;rpc_post" 21 | 22 | # REST GET 23 | REST_GET_CACHE = f"rest;amt;cache;rest_get" 24 | REST_GET_OUTBOUND = f"rest;amt;outbound;rest_get" 25 | 26 | 27 | if __name__ == "__main__": 28 | print(CallType.RPC_GET_CACHE) 29 | print(CallType.RPC_GET_OUTBOUND) 30 | 31 | print(CallType.RPC_POST_CACHE) 32 | print(CallType.RPC_POST_OUTBOUND) 33 | 34 | print(CallType.REST_GET_CACHE) 35 | print(CallType.REST_GET_OUTBOUND) 36 | 37 | v = KV_STORE.get(CallType.RPC_GET_CACHE.value) 38 | print(1 if v == None else int(v.decode("utf-8"))) 39 | -------------------------------------------------------------------------------- /MIGRATIONS/v0.0.8-0.0.10.md: -------------------------------------------------------------------------------- 1 | # v0.0.8 -> v0.0.10 2 | 3 | This upgrade brings a new .env file & removes the redis dependency. Please modify your .env file to take effect. 4 | 5 | ```sh 6 | # Install the latest dependenies. 7 | python3 -m pip install -r requirements/requirements.txt --upgrade 8 | ``` 9 | 10 | ## Config Changes 11 | 12 | ```toml 13 | # Remove 14 | REDIS_URL=... 15 | REDIS_RPC_PREFIX=... 16 | REDIS_REST_PREFIX=... 17 | 18 | # Add 19 | DEBUGGING=false 20 | # Saves to a file in this dir on close / open for the KV values. 21 | # Set this to any unique name 22 | STORE_NAME="reeces_juno-1" 23 | ``` 24 | 25 | ## The same goes for akash / compose image env files if you use. 26 | 27 | --- 28 | 29 | ## (Docker) Worker Threads 30 | 31 | You can now set the number of threads you want in docker. Useful for akash deployments with multiple cores. 32 | 33 | by default, only 1 thread is used. To expand, more threads, use the following 34 | 35 | ```env 36 | RPC_WORKER_THREADS=2 37 | 38 | and 39 | 40 | REST_WORKER_THREADS=2 41 | ``` 42 | 43 | Where "2" launches 2 threads for each process with its cache 44 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | VERSION=0.0.10 2 | 3 | run: 4 | docker-compose up 5 | 6 | build: 7 | docker build . -f Dockerfile -t reecepbcups/rpc-cache:$(VERSION) 8 | docker build . -f Dockerfile.rest -t reecepbcups/api-cache:$(VERSION) 9 | 10 | push: 11 | docker push reecepbcups/api-cache:$(VERSION) 12 | docker push reecepbcups/rpc-cache:$(VERSION) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 |

4 | 5 | Optimize Cosmos query calls with this chain syncronized caching layer. 6 | 7 | This program sits on top of another server and acts as a middleware between the requesting client and the actual cosmos RPC/API server. 8 | 9 | It supports 10 | 11 | - Variable length cache times (for both RPC methods & REST URL endpoints) 12 | - Disable specific endpoints entirely from being queried (ex: REST API /accounts) 13 | - Enable cache only until the next block (via Tendermint RPC event subscription) 14 | 15 | - Cached RPC request 16 | - Cached REST request 17 | 18 | - Swagger + OpenAPI support (openapi.yml cached) 19 | - HttpBatchClient (for RPC with Tendermint 0.34 client) 20 | - Statistics (optional /stats endpoint with password) 21 | - 22 | - Websocket basic passthrough support for Keplr wallet 23 | - Index blocks (TODO?) 24 | 25 | ## Public Endpoints 26 | 27 | ### Juno 28 | - 29 | - 30 | 31 | ### Akash 32 | - 33 | 34 | ### CosmosHub 35 | - 36 | - 37 | 38 | ### Comdex 39 | - 40 | 41 | ### Chihuahua 42 | - 43 | 44 | ### Injective 45 | - 46 | 47 | ## Pre-Requirements 48 | 49 | - A Cosmos RPC / REST server endpoint (state synced, full node, or archive). 50 | - A reverse proxy (to forward subdomain -> the endpoint cache on a machine) 51 | 52 | **NOTE** In the past, Redis was used. If you wish to use Redis still it can be found in [v0.0.8](https://github.com/Reecepbcups/cosmos-endpoint-cache/releases/tag/v0.0.8) 53 | 54 | ## Where to run 55 | 56 | Ideally, you should run this on your RPC/REST Node for localhost queries. However, you can also run on other infra including on your reverse proxy itself, or another separate node. 57 | This makes it possible to run on cloud providers like Akash, AWS, GCP, Azure, etc. 58 | 59 | --- 60 | 61 | ## Setup 62 | 63 | ```bash 64 | python3 -m pip install -r requirements/requirements.txt --upgrade 65 | 66 | # Edit the ENV file to your needs 67 | cp configs/.env .env 68 | 69 | # Update which endpoints you want to disable / allow (regex) & how long to cache each for. 70 | cp configs/cache_times.json cache_times.json 71 | 72 | # THen run to ensure it was setup correctly 73 | python3 rest.py 74 | # ctrl + c 75 | python3 rpc.py 76 | # ctrl + c 77 | 78 | # If all is good, continue on. 79 | # NOTE: You can only run 1 of each locally at a time because WSGI is a pain. Requires Systemd as a service to run both in parallel. 80 | 81 | # Then point your NGINX / CADDY config to this port rather than the default 26657 / 1317 endpoints 82 | ``` 83 | 84 | ## Running in Production 85 | 86 | - [Systemd Files](./docs/SYSTEMD_FILES.md) 87 | - [Tendermint Websocket Support](./docs/WEBSOCKET.md) 88 | - [Akash](./docs/AKASH.md) 89 | 90 | ## Documentation 91 | 92 | - [Configuration Values](./docs/CONFIG_VALUES.md) 93 | -------------------------------------------------------------------------------- /RequestsHandler.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import httpx 4 | 5 | import CONFIG 6 | from CONFIG import KV_STORE 7 | from HELPERS import hide_rest_data, hide_rpc_data, increment_call_value 8 | from HELPERS_TYPES import CallType, Mode 9 | 10 | timeout = httpx.Timeout(30.0, connect=5.0, read=4.0) 11 | 12 | 13 | def set_cache_for_time_if_valid( 14 | status_code: int, 15 | call_key: str, 16 | cache_seconds: int, 17 | redis_key: str, 18 | res: dict, 19 | use_hset: bool = False, 20 | second_key: str = "", # the params / args 21 | ): 22 | increment_call_value(call_key) 23 | 24 | if cache_seconds == Mode.NO_CACHE.value: 25 | # useful for broadcasted txs 26 | return 27 | 28 | if status_code == 200: 29 | # -2 = clear when a new block is minted 30 | if cache_seconds == Mode.FOR_BLOCK_TIME.value: 31 | if CONFIG.DEFAULT_CACHE_SECONDS > 0: 32 | cache_seconds = CONFIG.DEFAULT_CACHE_SECONDS 33 | else: 34 | cache_seconds = 6 35 | 36 | if use_hset: 37 | # Expire timeout is only changed on creation. 38 | # Future: per sub key timeouts? 39 | KV_STORE.hset(redis_key, second_key, json.dumps(res), cache_seconds) 40 | # KV_STORE.delete(redis_key) # Why was this here? 41 | else: 42 | KV_STORE.set(redis_key, json.dumps(res), cache_seconds) 43 | 44 | 45 | class RestApiHandler: 46 | def handle_single_rest_get_requests( 47 | self, path, key, cache_seconds: int, param_args, headers: dict 48 | ) -> dict: 49 | try: 50 | req = httpx.get( 51 | f"{CONFIG.REST_URL}/{path}", params=param_args, headers=headers 52 | ) 53 | except: 54 | req = httpx.get( 55 | f"{CONFIG.BACKUP_REST_URL}/{path}", params=param_args, headers=headers 56 | ) 57 | 58 | res = hide_rest_data(req.json(), path) 59 | 60 | set_cache_for_time_if_valid( 61 | req.status_code, 62 | CallType.REST_GET_OUTBOUND.value, 63 | cache_seconds, 64 | key, 65 | res, 66 | use_hset=True, 67 | second_key=str(param_args), 68 | ) 69 | 70 | return res 71 | 72 | # This breaks right now, very few ever will do this. Needs to be done in the future though, but not a priority 73 | # def handle_single_rest_post_requests(self, path, data: dict) -> dict: 74 | # # simulate, txs 75 | # try: 76 | # req = httpx.post( 77 | # f"{CONFIG.RPC_URL}/{path}", headers=CONFIG.HEADERS, data=data 78 | # ) 79 | # except: 80 | # req = httpx.post( 81 | # f"{CONFIG.BACKUP_RPC_URL}/{path}", 82 | # headers=CONFIG.HEADERS, 83 | # data=data, 84 | # ) 85 | # return req.json() 86 | 87 | 88 | class RPCHandler: 89 | def handle_batch_http_request(self, REQ_DATA: list) -> dict: 90 | """ 91 | This function handles batch http requests from TendermintClient34.create client 92 | """ 93 | # TODO: add cache here in the future possible? since each elem in the list has a method and params like below 94 | # TODO: add hide_rpc_data here for each if they req the status method 95 | try: 96 | req = httpx.post(f"{CONFIG.RPC_URL}", json=REQ_DATA) 97 | except: 98 | req = httpx.post( 99 | f"{CONFIG.BACKUP_RPC_URL}", 100 | json=REQ_DATA, 101 | ) 102 | 103 | return req.json() 104 | 105 | def handle_single_rpc_post_request( 106 | self, data, key, method, cache_seconds, use_hset: bool = False 107 | ) -> dict: 108 | # TODO: add round robin query here for multiple RPC nodes. If a node errors, save to cache for X period to not use (unless its the only 1) 109 | try: 110 | req = httpx.post(f"{CONFIG.RPC_URL}", data=data, timeout=timeout) 111 | except: 112 | req = httpx.post(f"{CONFIG.BACKUP_RPC_URL}", data=data, timeout=timeout) 113 | 114 | # only saves to cache if the request was successful 115 | res = hide_rpc_data(req.json(), method) 116 | 117 | set_cache_for_time_if_valid( 118 | req.status_code, 119 | CallType.RPC_POST_OUTBOUND.value, 120 | cache_seconds, 121 | key, 122 | res, 123 | use_hset, 124 | second_key=str(data), 125 | ) 126 | 127 | return res 128 | 129 | def handle_single_rpc_get_requests( 130 | self, path, key, param_args, cache_seconds: int, use_hset: bool = False 131 | ) -> dict: 132 | try: 133 | req = httpx.get( 134 | f"{CONFIG.RPC_URL}/{path}", params=param_args, timeout=timeout 135 | ) 136 | except Exception as e: 137 | req = httpx.get( 138 | f"{CONFIG.BACKUP_RPC_URL}/{path}", params=param_args, timeout=timeout 139 | ) 140 | 141 | res = hide_rpc_data(req.json(), path) 142 | 143 | set_cache_for_time_if_valid( 144 | req.status_code, 145 | CallType.RPC_GET_OUTBOUND.value, 146 | cache_seconds, 147 | key, 148 | res, 149 | use_hset, 150 | second_key=str(param_args), 151 | ) 152 | 153 | return res 154 | -------------------------------------------------------------------------------- /akash/deploy.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "2.0" 3 | 4 | # Juno Mainnet Example using public providers. 5 | 6 | services: 7 | rpc-cache: 8 | image: reecepbcups/rpc-cache:0.0.10 9 | env: 10 | # NOTE - most up to date will be in the docker-compose.yml file 11 | - REMOTE_CONFIG_TIME_FILE=https://raw.githubusercontent.com/Reecepbcups/cosmos-endpoint-cache/main/configs/cache_times.json 12 | - RPC_PREFIX=junorpc_1 13 | - ENABLE_COUNTER=false 14 | - USE_BACKUP_AS_PRIMARY=false 15 | # Use your own node IPs here as http:// 16 | - RPC_URL=https://rpc.juno.strange.love:443 17 | - BACKUP_RPC_URL=https://juno-rpc.polkachu.com 18 | - RPC_WEBSOCKET=ws://15.204.143.232:26657/websocket 19 | - NODE_MONIKER=reecepbcups 20 | - RPC_LISTEN_ADDRESS=HIDDEN 21 | - COINGECKO_ENABLED=true 22 | # - COINGECKO_API_KEY=HERE 23 | - COINGECKO_CACHE_SECONDS=6 # do not change if you do not have an API key. 24 | - COINGECKO_IDS=cosmos,juno-network,osmosis,canto 25 | - COINGECKO_FIAT=usd,eur,gbp 26 | - RPC_TITLE=Juno Docker RPC 27 | - RPC_CUSTOM_TEXT=Custom caching solution active
My Juno REST API
28 | expose: 29 | - port: 5001 30 | to: 31 | - global: true 32 | - port: 80 33 | to: 34 | - global: true 35 | 36 | api-cache: 37 | image: reecepbcups/api-cache:0.0.10 38 | env: 39 | # NOTE - most up to date will be in the docker-compose.yml file 40 | - REMOTE_CONFIG_TIME_FILE=https://raw.githubusercontent.com/Reecepbcups/cosmos-endpoint-cache/main/configs/cache_times.json 41 | - REST_PREFIX=junoapi 42 | - USE_BACKUP_AS_PRIMARY=false 43 | # Use your own node IPs here as http:// 44 | - REST_URL=https://api.juno.strange.love:443 45 | - BACKUP_REST_URL=https://juno-api.polkachu.com 46 | - NODE_MONIKER=reecepbcups 47 | - ENABLE_COUNTER=false 48 | - API_TITLE=Juno Docker API 49 | - DISABLE_SWAGGER_UI=false 50 | expose: 51 | - port: 5000 52 | to: 53 | - global: true 54 | 55 | profiles: 56 | compute: 57 | rpc-cache: 58 | resources: 59 | cpu: 60 | # Must use 2+ CPU (1 for websocket, one for processing requests) 61 | units: 2.0 62 | memory: 63 | size: 1Gi 64 | storage: 65 | size: 1Gi 66 | api-cache: 67 | resources: 68 | cpu: 69 | units: 1.25 70 | memory: 71 | size: 1Gi 72 | storage: 73 | size: 1Gi 74 | placement: 75 | akash: 76 | attributes: 77 | host: akash 78 | signedBy: 79 | anyOf: 80 | - "akash1365yvmc4s7awdyj3n2sav7xfx76adc6dnmlx63" 81 | - "akash18qa2a2ltfyvkyj0ggj3hkvuj6twzyumuaru9s4" 82 | pricing: 83 | rpc-cache: 84 | denom: uakt 85 | amount: 10000 86 | api-cache: 87 | denom: uakt 88 | amount: 10000 89 | 90 | 91 | deployment: 92 | rpc-cache: 93 | akash: 94 | profile: rpc-cache 95 | count: 1 96 | api-cache: 97 | akash: 98 | profile: api-cache 99 | count: 1 -------------------------------------------------------------------------------- /configs/.env: -------------------------------------------------------------------------------- 1 | DEBUGGING=false 2 | 3 | # Saves to a file in this dir on close / open for the KV values. 4 | STORE_NAME="juno_node1" 5 | 6 | 7 | # == QUERY INCREMENT LOGGING === 8 | ENABLE_COUNTER=true 9 | INCREASE_COUNTER_EVERY=250 10 | STATS_PASSWORD="" # blank = no password for https://network.rest.website.com/stats?password=123 . 11 | 12 | 13 | # ==================================== 14 | # = RPCS & REST ENDPOINTS (TO QUERY) = 15 | # ==================================== 16 | USE_BACKUP_AS_PRIMARY=false 17 | # Note: RPC_URL can be localhost if you run on the machine itself. Or a direct IP address & port. 18 | RPC_URL="http://127.0.0.1:26657" 19 | BACKUP_RPC_URL="https://rpc.juno.strange.love" 20 | 21 | # set to "" if you do not wish to use the websocket 22 | RPC_WEBSOCKET="ws://15.204.143.232:26657/websocket" 23 | BACKUP_RPC_WEBSOCKET="ws://rpc.juno.strange.love:443/websocket" 24 | 25 | # REST API 26 | REST_URL="http://127.0.0.1:1317" 27 | BACKUP_REST_URL="https://lcd.juno.strange.love" 28 | DISABLE_SWAGGER_UI=false 29 | 30 | 31 | # === Security === 32 | # Hides value in the /status endpoint of the RPC 33 | # "" = normal value shown on query - https://youtu.be/5MKV7EDJiS4 34 | RPC_LISTEN_ADDRESS="" 35 | NODE_MONIKER="" 36 | 37 | 38 | # === Coingecko === 39 | # https://rpc/prices 40 | COINGECKO_ENABLED=true 41 | COINGECKO_API_KEY="" 42 | COINGECKO_IDS="cosmos,juno-network,osmosis,canto" 43 | COINGECKO_FIAT="usd,eur,gbp" 44 | 45 | 46 | # === Cosmetic === 47 | RPC_TITLE="Juno Network RPC" 48 | API_TITLE="Juno Network REST API" 49 | RPC_CUSTOM_TEXT='Custom caching solution active
My Juno REST API
' 50 | 51 | 52 | # === TESTING APPLICATION PORTS === 53 | # Only local with `python3 rpc.py`. Systemd services use ports defined in .service files. 54 | REST_PORT=5000 55 | RPC_PORT=5001 -------------------------------------------------------------------------------- /configs/cache_times.json: -------------------------------------------------------------------------------- 1 | { 2 | "DEFAULT": -2, 3 | "coingecko": { 4 | "seconds": 6 5 | }, 6 | "rpc": { 7 | "health.*": -2, 8 | "abci_info.*": -2, 9 | "status.*": -2, 10 | 11 | "broadcast_tx_commit.*": 0, 12 | "broadcast_tx_sync.*": 0, 13 | 14 | "unconfirmed_txs": 3, 15 | 16 | "genesis.*": 259200, 17 | 18 | "block": -2, 19 | "block?height=": 3600, 20 | "tx.*": 3600 21 | }, 22 | "rest": { 23 | "cosmos\/auth\/v1beta1\/accounts": -1, 24 | 25 | ".*\/blocks/latest": -2, 26 | 27 | ".*\/params": 3600, 28 | ".*\/minimum_gas_prices": 300, 29 | ".*\/fee_shares": 30, 30 | ".*delegations": 300, 31 | ".*slashes": 60, 32 | ".*commission": 30, 33 | ".*outstanding_rewards": 30, 34 | "cosmos\/gov\/v1beta1\/proposals.*": 60, 35 | "cosmos\/staking\/v1beta1\/historical_info.*": 3600, 36 | "cosmos\/bank\/v1beta1\/supply": 60, 37 | "cosmos\/staking\/v1beta1\/pool": 30, 38 | "cosmos\/staking\/v1beta1\/validators": 120, 39 | "ibc\/apps\/transfer\/v1\/denom_traces": 30, 40 | "tendermint\/v1beta1\/node_info": 60 41 | } 42 | } -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | services: 4 | # Juno testnet endpoints 5 | rpc: 6 | image: "reecepbcups/rpc-cache:0.0.10" 7 | network_mode: "host" 8 | environment: 9 | - REMOTE_CONFIG_TIME_FILE=https://raw.githubusercontent.com/Reecepbcups/cosmos-endpoint-cache/main/configs/cache_times.json 10 | - RPC_WORKER_THREADS=2 11 | - RPC_PREFIX=unirpc 12 | - ENABLE_COUNTER=false 13 | - USE_BACKUP_AS_PRIMARY=false 14 | - RPC_URL=http://5.161.80.115:26657 15 | - BACKUP_RPC_URL=http://5.161.80.115:26657 16 | - RPC_WEBSOCKET=ws://5.161.80.115:26657/websocket 17 | - BACKUP_RPC_WEBSOCKET=ws://5.161.80.115:26657/websocket 18 | - RPC_LISTEN_ADDRESS=HIDDEN 19 | - NODE_MONIKER="testing" 20 | - COINGECKO_ENABLED=true 21 | # - COINGECKO_API_KEY="HERE" 22 | - COINGECKO_CACHE_SECONDS=6 # do not change if you do not have an API key. 23 | - COINGECKO_IDS=cosmos,juno-network,osmosis,canto 24 | - COINGECKO_FIAT=usd,eur,gbp 25 | - RPC_TITLE=Docker RPC cache 26 | - RPC_CUSTOM_TEXT=Custom caching solution active
My Juno REST API
27 | ports: 28 | - "5001:5001" 29 | 30 | # Juno mainnet endpoints 31 | api: 32 | image: "reecepbcups/api-cache:0.0.10" 33 | network_mode: "host" 34 | environment: 35 | - REMOTE_CONFIG_TIME_FILE=https://raw.githubusercontent.com/Reecepbcups/cosmos-endpoint-cache/main/configs/cache_times.json 36 | - REST_PREFIX=unirest 37 | - REST_WORKER_THREADS=1 38 | - REST_URL=http://15.204.143.232:1317 39 | - BACKUP_REST_URL=https://api.juno.strange.love 40 | - ENABLE_COUNTER=false 41 | - NODE_MONIKER="testing" 42 | - API_TITLE=Docker REST 43 | - DISABLE_SWAGGER_UI=false 44 | ports: 45 | - "5000:5000" -------------------------------------------------------------------------------- /docker/nginx.conf: -------------------------------------------------------------------------------- 1 | user www-data; 2 | worker_processes auto; 3 | pid /run/nginx.pid; 4 | 5 | events { 6 | worker_connections 1024; 7 | use epoll; 8 | multi_accept on; 9 | } 10 | 11 | http { 12 | access_log /dev/stdout; 13 | error_log /dev/stdout; 14 | 15 | sendfile on; 16 | tcp_nopush on; 17 | tcp_nodelay on; 18 | keepalive_timeout 65; 19 | types_hash_max_size 2048; 20 | 21 | include /etc/nginx/mime.types; 22 | default_type application/octet-stream; 23 | 24 | index index.html index.htm; 25 | 26 | server { 27 | listen 80 default_server; 28 | listen [::]:80 default_server; 29 | server_name localhost; 30 | root /var/www/html; 31 | 32 | location / { 33 | include uwsgi_params; 34 | uwsgi_pass unix:/tmp/uwsgi.socket; 35 | } 36 | } 37 | } -------------------------------------------------------------------------------- /docker/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | service nginx start 3 | uwsgi --ini uwsgi.ini -------------------------------------------------------------------------------- /docker/uwsgi.ini: -------------------------------------------------------------------------------- 1 | [uwsgi] 2 | module = hello:app 3 | uid = www-data 4 | gid = www-data 5 | master = true 6 | processes = 5 7 | 8 | socket = /tmp/uwsgi.socket 9 | chmod-sock = 664 10 | vacuum = true 11 | 12 | die-on-term = true -------------------------------------------------------------------------------- /docs/AKASH.md: -------------------------------------------------------------------------------- 1 | # Akash 2 | 3 | **NOTE** At the moment, you can not use multiple workers without compiling the docker image yourself. In the future, I would like to find a way to make this variable. By default, it is 1 worker and 1 thread in the WSGI server. 4 | 5 | You can utilize Akash to deploy your application to the decentralized cloud for cheaper than AWS, GCP, etc. This document will outline considerations you need to account for when deploying infrastructure. 6 | 7 | > [Deploy Tool](https://deploy.cloudmos.io/) 8 | 9 | --- 10 | 11 | # Setup 12 | 13 | You can find the deploy.yaml file [here](../akash/deploy.yaml). This file does not require the `"` string to prefix in the environment variables. Open this file in the [Cloudmos deploy tool](https://deploy.cloudmos.io/). 14 | After running, you must edit the `deploy.yaml` file to to your needs. For now, leave the redis url lines default (currently localhost Redis connections do not work for some reason) 15 | 16 | Be sure to update the `REMOTE_CONFIG_TIME_FILE` env variable for your needs. The default configuration is to only keep data for the length of the block, then wipe it if you have the Websocket RPC set. If not, it will default to 6 seconds of time to live. This means at max your data is 5 seconds old on most Tendermint chains. Even so, we recomend you use -2 for this option. To see other options, review the [Config Values](./CONFIG_VALUES.md) file. 17 | 18 | After uploading and deploying to a provider, your instances will fail. To solve this, we must now update to the user URI and forwarded ports from the provider. This is found in the `Leases` page in the deploy tool online. It will look something like this `http://l94r5hl2itcct9f1vpkd88fno4.ingress.palmito.duckdns.org/` and forwarded ports like so `6379:31652` for example. 19 | 20 | Take the URI and Redis URL forwarded port, then put it in the config by clicking on the `update` tag in the deploy tool. This is how it will look: 21 | 22 | ```bash 23 | ... 24 | 25 | env: 26 | REDIS_URL=redis://l94r5hl2itcct9f1vpkd88fn04.ingress.palmito.duckdns.org:31622/0 27 | ... 28 | # NOTE: if you click on the forwarded ports link, it will return a cleaner URL like so: 29 | # provider.palmito.duckdns.org:31622 30 | ``` 31 | 32 | Then press the `update deployment` button on the right-hand side of the screen, and approve the transaction. Once this has been completed, you can use the cache as intended. 33 | -------------------------------------------------------------------------------- /docs/CONFIG_VALUES.md: -------------------------------------------------------------------------------- 1 | # Documentation 2 | 3 | This section contains information about different configuration files used by this program. 4 | 5 | ## Workers vs Threads 6 | 7 | This program uses a combination of workers and threads to handle the requests. Increase the number of threads greater than workers to handle more throughput requests. 8 | Ideally, you should set this to = `(CPUS/2) * (THREADS*2)`. Though you may find that `(CPU/4) * (THREADS*4)` is more optimal. More threads are better. 9 | 10 | ## .env 11 | 12 | For the best performance, disabled the counter and/or increase the interval to a high number (say 10,000) like so. By doing this, it will drastically reduce the number of logging increment requests to the Redis server instance. (So every query, then adds +1 in memory. Then dumps to Redis at call 10,000 in a single incr(10000) call to be most efficient) 13 | 14 | ```env 15 | # == QUERY INCREMENT LOGGING === 16 | ENABLE_COUNTER=false 17 | INCREASE_COUNTER_EVERY=10000 18 | ``` 19 | 20 | --- 21 | 22 | ## Variable Length Cache 23 | 24 | In the `cache_times.json` file, you can specify specific endpoints and how long said queries should persist in the cache. 25 | This is useful for large queries such as /validators which may return 100+ validators. This data does not change often, making it useful for caching for longer periods. 26 | 27 | There are 4 options: 28 | 29 | - > -2: Cache for the duration of the block (Subscribes to RPC_WEBSOCKET in .env file) 30 | - > -1: Disable this query entirely (prevent DoS attacks on the node) 31 | - > 0: No cache 32 | - > 1+: Cache for the specified number of seconds 33 | 34 | This file uses regex pattern matching as keys, with values as the number of seconds to cache once it has been called. 35 | For python strings, you must prefix any `*` you find with a `.` (period). So to match "random" in "my 8 random11 string", you would do `.*random.*` to match all before and after. 36 | -------------------------------------------------------------------------------- /docs/SUPPORT.md: -------------------------------------------------------------------------------- 1 | # Contact & Support 2 | 3 | > If something is confusing, I am happy to create further documentation / feature requests within reason. 4 | 5 | - Github Issues / PRs accepted. 6 | 7 | - Twitter: [https://twitter.com/Reecepbcups_](https://twitter.com/Reecepbcups_) 8 | -------------------------------------------------------------------------------- /docs/SYSTEMD_FILES.md: -------------------------------------------------------------------------------- 1 | # System Files 2 | 3 | Make sure you replace **NETWORK_RPC / NETWORK_REST** with your value. Ex: **juno_rpc** 4 | 5 | ## RPC 6 | 7 | ```bash 8 | # allow the file to be executed 9 | chmod +x run_rpc.sh 10 | 11 | # If you are running as Group=root : 12 | # `sudo python -m pip install -r requirements/requirements.txt --upgrade` 13 | sudo nano /lib/systemd/system/NETWORK_RPC.service 14 | 15 | # Ensure to change WorkingDirectory and ExecStart to your folder locations 16 | # [ Threads are more important than Workers. Should be roughly <= (2xTHREADS)+1 ] 17 | 18 | # ============ 19 | [Unit] 20 | Description=gunicorn rpc 21 | After=network.target 22 | PartOf=gunicorn.target 23 | ReloadPropagatedFrom=gunicorn.target 24 | [Service] 25 | User=root 26 | Group=root 27 | WorkingDirectory=/root/cosmos-endpoint-cache/%i 28 | ExecStart=/root/cosmos-endpoint-cache/run_rpc.sh 29 | Environment=WORKERS=4 30 | Environment=THREADS=6 31 | Environment=W_CONN=1000 32 | Environment=BACKLOG=2048 33 | Environment=PORT=5001 34 | [Install] 35 | WantedBy=gunicorn.target 36 | # ============ 37 | 38 | # Then you can start / stop / restart the service 39 | sudo systemctl daemon-reload 40 | 41 | sudo systemctl start NETWORK_RPC.service 42 | sudo systemctl enable NETWORK_RPC.service # start after reboot 43 | 44 | # And restart it 1 time every night (~0s of downtime) 45 | # 46 | # crontab -e 47 | # 0 8 * * * systemctl restart NETWORK_RPC 48 | ``` 49 | 50 | ## REST 51 | 52 | ```bash 53 | # allow the file to be executed 54 | chmod +x run_rest.sh 55 | 56 | # If you are running as Group=root : 57 | # `sudo python3 -m pip install -r requirements/requirements.txt --upgrade` 58 | sudo nano /lib/systemd/system/NETWORK_REST.service 59 | 60 | # Ensure to change WorkingDirectory and ExecStart to your folder locations 61 | # REST will get way less requests. So give as lot less resources 62 | 63 | # ============ 64 | [Unit] 65 | Description=gunicorn rest 66 | After=network.target 67 | PartOf=gunicorn.target 68 | ReloadPropagatedFrom=gunicorn.target 69 | [Service] 70 | User=root 71 | Group=root 72 | WorkingDirectory=/root/cosmos-endpoint-cache/%i 73 | ExecStart=/root/cosmos-endpoint-cache/run_rest.sh 74 | Environment=WORKERS=2 75 | Environment=THREADS=2 76 | Environment=W_CONN=1000 77 | Environment=BACKLOG=2048 78 | Environment=PORT=5000 79 | [Install] 80 | WantedBy=gunicorn.target 81 | # ============ 82 | 83 | # Then you can start / stop / restart the service 84 | sudo systemctl daemon-reload 85 | 86 | sudo systemctl start NETWORK_REST.service 87 | sudo systemctl enable NETWORK_REST.service 88 | ``` 89 | -------------------------------------------------------------------------------- /docs/WEBSOCKET.md: -------------------------------------------------------------------------------- 1 | # Tendermint WebSocket 2 | 3 | In Cosmos, some application use Tendermint's websocket to subscribe to events (Specifically keplr to check for when a Tx has completed in their UI). The following is an example for how to add WebSocket support to your cached RPC via NGINX. 4 | 5 | ## NGINX Config 6 | 7 | ```conf 8 | http { 9 | upstream juno_rpc_cache { 10 | server 11.123.123.123:5001; 11 | } 12 | 13 | upstream juno-ws-backend { 14 | ip_hash; 15 | # Juno direct RPC address 16 | server 11.123.123.123:26657; 17 | } 18 | 19 | # ... 20 | 21 | server { 22 | listen 80; 23 | server_name juno-rpc.reece.sh; 24 | 25 | # websocket connections only 26 | # (We convert wss to ws so we do not have to deal with certificates) 27 | location /websocket { 28 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 29 | proxy_set_header Host $host; 30 | 31 | proxy_pass http://juno-ws-backend/websocket; 32 | 33 | proxy_http_version 1.1; 34 | proxy_set_header Upgrade $http_upgrade; 35 | proxy_set_header Connection "upgrade"; 36 | } 37 | 38 | # every other requests through juno-rpc.reece.sh goes here to the standard RPC 39 | location / { 40 | add_header Access-Control-Max-Age 3600; 41 | add_header Access-Control-Expose-Headers Content-Length; 42 | proxy_set_header X-Real-IP $remote_addr; 43 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 44 | proxy_set_header Host $host; 45 | proxy_set_header X-NginX-Proxy true; 46 | 47 | add_header Referrer-Policy 'origin'; 48 | 49 | proxy_pass http://juno_rpc_cache; 50 | 51 | # WebSocket support 52 | proxy_http_version 1.1; 53 | proxy_set_header Upgrade $http_upgrade; 54 | proxy_set_header Connection "upgrade"; 55 | } 56 | } 57 | } 58 | 59 | ``` 60 | 61 | ## Caddy Config 62 | 63 | - TODO -------------------------------------------------------------------------------- /docs/logo/CosmosCacheLogo-Slim.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Reecepbcups/cosmos-cache/a56ff1a80a3a3d7ff9e6c12f2e1c85655c019d8b/docs/logo/CosmosCacheLogo-Slim.png -------------------------------------------------------------------------------- /docs/logo/CosmosCacheLogo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Reecepbcups/cosmos-cache/a56ff1a80a3a3d7ff9e6c12f2e1c85655c019d8b/docs/logo/CosmosCacheLogo.png -------------------------------------------------------------------------------- /requirements/requirements.txt: -------------------------------------------------------------------------------- 1 | # python3 -m pip install -r requirements/requirements.txt 2 | # sudo python3 -m pip install -r requirements/requirements.txt 3 | python-dotenv 4 | 5 | websocket-client # import websocket 6 | rel 7 | 8 | httpx 9 | 10 | Flask==2.2.4 11 | flask_caching 12 | flask-sock 13 | Flask-SocketIO 14 | flask-cors 15 | 16 | uwsgi 17 | gunicorn 18 | 19 | # REMOVED; no longer used afaik 20 | # breaks older python installs. Is this needed for docker? 21 | # uWSGI==2.0.17.1 22 | 23 | pycoingecko 24 | 25 | # https://pypi.org/project/py-kvstore/ / https://github.com/Reecepbcups/py_kvstore 26 | py-kvstore>=0.0.7 -------------------------------------------------------------------------------- /rest.py: -------------------------------------------------------------------------------- 1 | # Reece Williams | https://reece.sh | Jan 2023 2 | 3 | import json 4 | 5 | from flask import Flask, jsonify, request 6 | from flask_cors import CORS, cross_origin 7 | 8 | import CONFIG as CONFIG 9 | from CONFIG import KV_STORE 10 | from HELPERS import ( 11 | Mode, 12 | download_openapi_locally, 13 | get_config_values, 14 | get_stats_html, 15 | get_swagger_code_from_source, 16 | increment_call_value, 17 | ttl_block_only, 18 | ) 19 | from HELPERS_TYPES import CallType 20 | from RequestsHandler import RestApiHandler 21 | 22 | app = Flask(__name__) 23 | cors = CORS(app, resources={r"/*": {"origins": "*"}}) 24 | 25 | 26 | REST_SWAGGER_HTML = "" 27 | REST_HANDLER: RestApiHandler 28 | 29 | 30 | @app.before_first_request 31 | def before_first_request(): 32 | global REST_HANDLER 33 | CONFIG.update_cache_times() 34 | download_openapi_locally() 35 | REST_HANDLER = RestApiHandler() 36 | 37 | # not used yet here 38 | # if len(CONFIG.RPC_WEBSOCKET) > 0: 39 | # tmrpc = TendermintRPCWebSocket(enableSignal=False, logLevel=logging.DEBUG) 40 | # t = threading.Thread(target=tmrpc.ws.run_forever) 41 | # t.daemon = True 42 | # t.start() 43 | 44 | 45 | @app.route("/", methods=["GET"]) 46 | @cross_origin() 47 | def root(): 48 | global REST_SWAGGER_HTML 49 | 50 | if CONFIG.DISABLE_SWAGGER_UI: 51 | return jsonify({"code": 12, "message": "Not Implemented", "details": []}) 52 | 53 | if len(REST_SWAGGER_HTML) > 0: 54 | return REST_SWAGGER_HTML 55 | 56 | REST_SWAGGER_HTML = get_swagger_code_from_source() 57 | return REST_SWAGGER_HTML 58 | 59 | 60 | @app.route("/", methods=["GET"]) 61 | @cross_origin() 62 | def get_rest(path): 63 | if path == "stats": 64 | # https://url/stats?password=123 65 | if ( 66 | len(CONFIG.STATS_PASSWORD) > 0 67 | and request.args.get("password") != CONFIG.STATS_PASSWORD 68 | ): 69 | return "Invalid password" 70 | 71 | return get_stats_html() 72 | 73 | if path == "config": 74 | # https://url/config?password=123 75 | if ( 76 | len(CONFIG.STATS_PASSWORD) > 0 77 | and request.args.get("password") != CONFIG.STATS_PASSWORD 78 | ): 79 | return "Invalid password" 80 | 81 | return get_config_values() 82 | 83 | if path == "debug": 84 | return jsonify(KV_STORE.to_json()) 85 | 86 | args = request.args 87 | 88 | cache_seconds = CONFIG.get_cache_time_seconds(path, is_rpc=False) 89 | if cache_seconds == Mode.DISABLED.value: 90 | return jsonify( 91 | { 92 | "error": f"cosmos endpoint cache: The path '{path}' is disabled on this node..." 93 | } 94 | ) 95 | 96 | # print(cache_seconds) 97 | 98 | # Every rest requests is an hset because of diff arguments 99 | key = f"rest;{ttl_block_only(cache_seconds)};{path};" 100 | 101 | v = KV_STORE.hget(key, str(args)) 102 | if CONFIG.DEBUGGING: 103 | print(f"get_rest. Key: {key} | value: {v}") 104 | if v: 105 | increment_call_value(CallType.REST_GET_CACHE.value) 106 | return jsonify(json.loads(v)) 107 | 108 | return jsonify( 109 | REST_HANDLER.handle_single_rest_get_requests( 110 | path, key, cache_seconds, args, request.headers 111 | ) 112 | ) 113 | 114 | 115 | @app.route("/", methods=["POST"]) 116 | @cross_origin() 117 | def post_rest(path): 118 | # REQ_DATA = json.loads(json.dumps(request.get_json(), separators=(",", ":"))) 119 | # print(type(REQ_DATA)) 120 | # return jsonify(REST_HANDLER.handle_single_rest_post_requests(path, REQ_DATA)) 121 | return jsonify( 122 | { 123 | "error": f"cosmos endpoint cache: The path '{path}' does not yet have support on this REST API..." 124 | } 125 | ) 126 | 127 | 128 | if __name__ == "__main__": 129 | before_first_request() 130 | app.run(debug=True, host="0.0.0.0", port=CONFIG.REST_PORT) 131 | -------------------------------------------------------------------------------- /rpc.py: -------------------------------------------------------------------------------- 1 | # Reece Williams | https://reece.sh | Jan 2023 2 | 3 | import json 4 | import logging 5 | import os 6 | import re 7 | import threading 8 | 9 | import CONFIG as CONFIG 10 | from COINGECKO import Coingecko 11 | from CONFIG import KV_STORE 12 | from CONNECT_WEBSOCKET import TendermintRPCWebSocket 13 | from flask import Flask, jsonify, request, send_from_directory 14 | from flask_cors import CORS, cross_origin 15 | from flask_sock import Sock 16 | from HELPERS import ( 17 | Mode, 18 | hide_rpc_data, 19 | increment_call_value, 20 | replace_rpc_text, 21 | ttl_block_only, 22 | ) 23 | from HELPERS_TYPES import CallType 24 | from RequestsHandler import RPCHandler 25 | 26 | # === FLASK === 27 | rpc_app = Flask(__name__) 28 | 29 | sock = Sock(rpc_app) 30 | cors = CORS(rpc_app, resources={r"/*": {"origins": "*"}}) 31 | 32 | RPC_ROOT_HTML: str 33 | RPC_HANDLER: RPCHandler 34 | 35 | GECKO: Coingecko 36 | 37 | 38 | @rpc_app.before_first_request 39 | def before_first_request(): 40 | global RPC_ROOT_HTML, RPC_HANDLER, GECKO 41 | CONFIG.update_cache_times() 42 | RPC_ROOT_HTML = replace_rpc_text() 43 | RPC_HANDLER = RPCHandler() 44 | GECKO = Coingecko() 45 | 46 | # future: https://stackoverflow.com/questions/24101724/gunicorn-with-multiple-workers-is-there-an-easy-way-to-execute-certain-code-onl 47 | 48 | if len(CONFIG.RPC_WEBSOCKET) > 0: 49 | tmrpc = TendermintRPCWebSocket(enableSignal=False, logLevel=logging.DEBUG) 50 | t = threading.Thread(target=tmrpc.ws.run_forever, kwargs={"reconnect": 5}) 51 | t.daemon = True 52 | t.start() 53 | 54 | 55 | # === ROUTES === 56 | @rpc_app.route("/", methods=["GET"]) 57 | @cross_origin() 58 | def root(): 59 | # get the data between :// and the final / 60 | base = re.search(r"\/\/.*\/", request.base_url).group(0) 61 | # remove any /'s 62 | base = base.replace("/", "") 63 | 64 | #

Endpoints that require arguments:
)', RPC_ROOT_HTML 67 | ).group(0) 68 | 69 | return RPC_ROOT_HTML.replace(rpc_url, base).replace("{BASE_URL}", base) 70 | 71 | 72 | @rpc_app.route("/cache_info", methods=["GET"]) 73 | @cross_origin() 74 | def cache_info(): 75 | """ 76 | Updates viewable cache times (seconds) at DOMAIN/cache_info. 77 | Auto updates for this program on update/change automatically without restart. 78 | 79 | We only store the data so any time its requested every X minutes, we regenerate the data. 80 | """ 81 | key = f"rpc;cache_times" 82 | 83 | # v = REDIS_DB.get(key) 84 | # if v: 85 | # return jsonify(json.loads(v)) 86 | v = KV_STORE.get(key) 87 | if v: 88 | # we can just return v right? (if we save it as json) 89 | return jsonify(v) 90 | 91 | CONFIG.update_cache_times() 92 | 93 | # REDIS_DB.setex(key, 15 * 60, json.dumps(CONFIG.cache_times)) 94 | KV_STORE.set(key, CONFIG.cache_times, 15 * 60) 95 | return jsonify(CONFIG.cache_times) 96 | 97 | 98 | @rpc_app.route("/prices", methods=["GET"]) 99 | @cross_origin() 100 | def coingecko(): 101 | """ 102 | Gets the prices from coingecko as defined in the .env file. 103 | """ 104 | if CONFIG.COINGECKO_ENABLED: 105 | # caching handled in the class 106 | return jsonify(GECKO.get_price()) 107 | else: 108 | return jsonify({"error": "prices are not enabled on this node..."}) 109 | 110 | 111 | def use_redis_hashset(path, args): 112 | if any( 113 | path.startswith(x) 114 | for x in [ 115 | "block", 116 | "tx_search", 117 | ] 118 | ): 119 | return len(args) > 0 120 | return False 121 | 122 | 123 | @rpc_app.route("/favicon.ico") 124 | def favicon(): 125 | return send_from_directory( 126 | os.path.join(rpc_app.root_path, "static"), 127 | "favicon.png", 128 | mimetype="image/vnd.microsoft.icon", 129 | ) 130 | 131 | 132 | @rpc_app.route("/", methods=["GET"]) 133 | @cross_origin() 134 | def get_rpc_endpoint(path: str): 135 | global total_calls 136 | 137 | if path == "debug": 138 | return jsonify(KV_STORE.to_json()) 139 | 140 | args = request.args 141 | 142 | cache_seconds = CONFIG.get_cache_time_seconds(path, is_rpc=True) 143 | if cache_seconds == Mode.DISABLED.value: 144 | return jsonify( 145 | { 146 | "error": f"cosmos endpoint cache: The path '{path}' is disabled on this node..." 147 | } 148 | ) 149 | 150 | use_hset = use_redis_hashset(path, args) 151 | key = f"rpc;{ttl_block_only(cache_seconds)};{path}" 152 | 153 | if CONFIG.DEBUGGING: 154 | print(f"checking if {path} is in the hashset ({use_hset})...") 155 | 156 | if use_hset: 157 | # v = REDIS_DB.hget(key, str(args)) 158 | v = KV_STORE.hget(key, str(args)) 159 | else: 160 | key = f"{key};{args}" 161 | v = KV_STORE.get(key) 162 | 163 | if v: 164 | increment_call_value(CallType.RPC_GET_CACHE.value) 165 | return jsonify(json.loads(v)) 166 | 167 | res = RPC_HANDLER.handle_single_rpc_get_requests( 168 | path, key, args, cache_seconds, use_hset 169 | ) 170 | 171 | return jsonify(res) 172 | 173 | 174 | @rpc_app.route("/", methods=["POST"]) 175 | @cross_origin() 176 | def post_rpc_endpoint(): 177 | REQ_DATA = request.get_json() 178 | 179 | # BatchHTTPClient's send in a list of JSONRPCRequests 180 | if isinstance(REQ_DATA, list): 181 | increment_call_value(CallType.RPC_POST_OUTBOUND.value, len(REQ_DATA)) 182 | return jsonify(RPC_HANDLER.handle_batch_http_request(REQ_DATA)) 183 | 184 | # If its a single RPC request, the following is used. 185 | method = REQ_DATA.get("method", None) 186 | 187 | cache_seconds = CONFIG.get_cache_time_seconds(method, is_rpc=True) 188 | if cache_seconds == Mode.DISABLED.value: 189 | return jsonify( 190 | { 191 | "error": f"cosmos endpoint cache: The RPC method '{method}' is disabled on this node..." 192 | } 193 | ) 194 | 195 | use_hset = use_redis_hashset(method, request.args) 196 | key = f"rpc;{ttl_block_only(cache_seconds)};{method}" 197 | # We save/get requests data since it also has the id of said requests from json RPC. 198 | 199 | modified_data = dict(REQ_DATA) 200 | 201 | # This could also be a UUID 202 | original_req_id = dict(REQ_DATA).get("id", 0) 203 | 204 | # we set the save key as -1 id since that is not real. This way on requests we are forced to change it back to the original requests 205 | # this ensures we cache things such as status independent of the requested id. 206 | modified_data["id"] = -1 207 | 208 | if use_hset: 209 | v = KV_STORE.hget(key, str(modified_data)) 210 | else: 211 | key = f"{key};{modified_data}" 212 | v = KV_STORE.get(key) 213 | 214 | if v: 215 | increment_call_value(CallType.RPC_POST_CACHE.value) 216 | # replace the id with the original id so the requests is valid and in the order requested. 217 | # else we get: Error: wrong ID: response ID (0) does not match request ID (1) 218 | v = json.loads(v) 219 | v["id"] = original_req_id 220 | return jsonify(v) 221 | 222 | res = RPC_HANDLER.handle_single_rpc_post_request( 223 | json.dumps(REQ_DATA), key, method, cache_seconds, use_hset 224 | ) 225 | res = hide_rpc_data(res, method) 226 | 227 | return jsonify(res) 228 | 229 | 230 | # === socket bridge === 231 | 232 | # return JSONRPC/websockets 233 | # JSONRPC requests can be also made via websocket. The websocket endpoint is at /websocket, e.g. localhost:26657/websocket. Asynchronous RPC functions like event subscribe and unsubscribe are only available via websockets. 234 | # https://github.com/hashrocket/ws 235 | # grpcurl -plaintext -d "{\"address\":\"juno10r39fueph9fq7a6lgswu4zdsg8t3gxlq670lt0\"}" wss://juno-rpc.reece.sh/websocket cosmos.bank.v1beta1.Query/AllBalances 236 | # grpcurl -plaintext -d "{\"address\":\"juno10r39fueph9fq7a6lgswu4zdsg8t3gxlq670lt0\"}" 15.204.143.232:9090 cosmos.bank.v1beta1.Query/AllBalances 237 | # curl -X GET -H "Content-Type: application/json" -H "x-cosmos-block-height: 6619410" http://15.204.143.232:1317/cosmos/bank/v1beta1/balances/juno10r39fueph9fq7a6lgswu4zdsg8t3gxlq670lt0 238 | 239 | 240 | # @sock.route("/websocket") 241 | # def websocket(ws): 242 | # print("websocket connected") 243 | # async def handle_subscribe(): 244 | # async with websockets.connect(CONFIG.RPC_WEBSOCKET) as websocket: 245 | # while True: 246 | # # receive data from the websocket 247 | # data = await websocket.recv() 248 | # if data == "close" or data == None: 249 | # emit("close", data) 250 | # await websocket.close() 251 | # break 252 | # emit("message", data) 253 | # asyncio.run(handle_subscribe()) 254 | 255 | 256 | if __name__ == "__main__": 257 | before_first_request() 258 | 259 | # setting to True runs 2 processes 260 | rpc_app.run(debug=True, host="0.0.0.0", port=CONFIG.RPC_PORT) 261 | -------------------------------------------------------------------------------- /run_rest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # chmod +x run_rest.sh 4 | # 5 | # Directions ./docs/SYSTEMD_FILES.md 6 | # 7 | # sudo systemctl daemon-reload 8 | # sudo systemctl start juno_rest.service 9 | 10 | PORT=${PORT:-5000} 11 | 12 | WORKERS=${WORKERS:-2} 13 | THREADS=${THREADS:-4} 14 | W_CONN=${W_CONN:-1000} 15 | BACKLOG=${BACKLOG:-2048} 16 | 17 | THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 18 | cd $THIS_DIR 19 | 20 | python3 -m pip install -r requirements/requirements.txt --upgrade 21 | 22 | gunicorn --workers $WORKERS --threads $THREADS --worker-connections $W_CONN --backlog $BACKLOG --bind 0.0.0.0:$PORT --preload rest:app -------------------------------------------------------------------------------- /run_rpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # Directions ./docs/SYSTEMD_FILES.md 4 | # 5 | # sudo systemctl daemon-reload 6 | # sudo systemctl start juno_rpc.service 7 | # 8 | # Restart nightly: 9 | # crontab -e 10 | # 0 8 * * * systemctl restart juno_rpc 11 | 12 | PORT=${PORT:-5001} 13 | 14 | WORKERS=${WORKERS:-4} 15 | THREADS=${THREADS:-6} 16 | W_CONN=${W_CONN:-1000} 17 | BACKLOG=${BACKLOG:-2048} 18 | 19 | THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 20 | cd $THIS_DIR 21 | 22 | python3 -m pip install -r requirements/requirements.txt --upgrade 23 | 24 | gunicorn --workers $WORKERS --threads $THREADS --worker-connections $W_CONN --backlog $BACKLOG --bind 0.0.0.0:$PORT --preload rpc:rpc_app -------------------------------------------------------------------------------- /static/README.md: -------------------------------------------------------------------------------- 1 | # OpenAPI / Swagger 2 | 3 | This folder contains static data for the swagger API from the REST API. 4 | 5 | openapi.yml 6 | 7 | - auto-downloaded on the start of this program 8 | 9 | favicon.png 10 | 11 | - the image to show in the top tab if using the swagger UI on port 1317 (default) 12 | -------------------------------------------------------------------------------- /static/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Reecepbcups/cosmos-cache/a56ff1a80a3a3d7ff9e6c12f2e1c85655c019d8b/static/favicon.png -------------------------------------------------------------------------------- /test/README.md: -------------------------------------------------------------------------------- 1 | # Test 2 | 3 | misc test against Juno to ensure this program works as expected with queries & Txs 4 | -------------------------------------------------------------------------------- /test/cw_template.wasm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Reecepbcups/cosmos-cache/a56ff1a80a3a3d7ff9e6c12f2e1c85655c019d8b/test/cw_template.wasm -------------------------------------------------------------------------------- /test/uni.sh: -------------------------------------------------------------------------------- 1 | # git checkout v12.0.0-beta.1 && make install 2 | CHAIN_ID="uni-6" 3 | # NODE="--node https://uni-rpc.reece.sh:443" 4 | NODE="--node http://127.0.0.1:5001" 5 | KEY="juno2" 6 | export KEYRING=${KEYRING:-"test"} 7 | export KEYALGO="secp256k1" 8 | 9 | # This is a test account found in juno/scripts/test_node.sh :) 10 | # juno1efd63aw40lxf3n4mhf7dzhjkr453axurv2zdzk 11 | echo "wealth flavor believe regret funny network recall kiss grape useless pepper cram hint member few certain unveil rather brick bargain curious require crowd raise" | junod keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO --recover 12 | 13 | junod q gov params $NODE 14 | 15 | junod q bank balances juno1efd63aw40lxf3n4mhf7dzhjkr453axurv2zdzk $NODE 16 | 17 | junod tx bank send $KEY juno1efd63aw40lxf3n4mhf7dzhjkr453axurv2zdzk 1ujunox --keyring-backend $KEYRING --chain-id $CHAIN_ID $NODE --fees 500ujunox 18 | # junod q tx $NODE 19 | 20 | export JUNOD_COMMAND_ARGS="--from $KEY $NODE -b block --output json --yes --chain-id $CHAIN_ID --gas 1000000 --fees 2500ujunox --keyring-backend $KEYRING" 21 | 22 | # junod tx wasm store ./test/cw_template.wasm --from juno1 --node http://localhost:5001 --keyring-backend test --chain-id uni-6 23 | 24 | function upload_and_init () { 25 | ADMIN=$1 26 | 27 | # cw_template = the basic counter contract 28 | echo "Uploading example contract to chain store" 29 | junod tx wasm store ./test/cw_template.wasm $JUNOD_COMMAND_ARGS 30 | } --------------------------------------------------------------------------------