333 |
334 | | 🐛 [Report Bug](../../issues/new?template=bug_report.md) | 🔀 [Pull Request](../../pulls) | 💡 [Suggest Feature](../../issues/new?template=feature_request.md) |
335 | |-------------------|-----------------|------------------------|
336 |
337 |
338 |
339 | ## 📈 Resource Usage
340 |
341 | ```
342 | System Impact:
343 | CPU: < 1% average usage
344 | RAM: ~ 50MB memory usage
345 | Disk: ~ 10MB for logs
346 | Network: ~ 1MB/hour
347 | ```
348 |
349 | ## ⭐ Support
350 |
351 | If you find this useful:
352 | - 🌟 Star this repository
353 | - 🔄 Fork and contribute
354 | - 📢 Share with others
355 |
356 | ## 📜 License
357 |
358 |
359 |
360 | [MIT License](LICENSE) • Free and Open Source
361 |
362 | ---
363 |
364 |
365 |
366 | Made with ❤️ by Quasar 🤖🧬🧠 validator
367 |
368 | [🔝 Back to Top](#top)
369 |
370 |
371 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # 🔒 Basic Security Settings
2 |
3 | ### Firewall Configuration
4 | ```bash
5 | # Reset and configure UFW
6 | sudo ufw disable
7 | sudo ufw reset
8 |
9 | # Allow essential ports
10 | sudo ufw allow 22/tcp # SSH
11 | sudo ufw allow 26656/tcp # Cosmos P2P
12 |
13 | # Set default policy and enable
14 | sudo ufw default deny incoming
15 | sudo ufw enable
16 | ```
17 |
18 | ### SSH & User Security
19 |
20 | First, you need to add your SSH public key to authorize access to the server:
21 | ```bash
22 | # Create .ssh directory if it doesn't exist
23 | mkdir -p ~/.ssh
24 |
25 | # Create or edit authorized_keys file
26 | nano ~/.ssh/authorized_keys
27 |
28 | # Add your public key (example format):
29 | # ssh-rsa AAAAB3NzaC1yc2EAAAADA... your.email@example.com
30 |
31 | # Set correct permissions
32 | chmod 700 ~/.ssh
33 | chmod 600 ~/.ssh/authorized_keys
34 | ```
35 |
36 | After adding your SSH key, secure the SSH configuration:
37 | ```bash
38 | # Disable password authentication completely (only SSH keys allowed)
39 | # This prevents brute force attacks by requiring SSH key authentication
40 | sudo sed -i 's/#PasswordAuthentication yes/PasswordAuthentication no/' /etc/ssh/sshd_config
41 |
42 | # Restrict root login to only allow SSH key authentication
43 | # Prevents direct root login attempts and adds additional security layer
44 | sudo sed -i 's/PermitRootLogin yes/PermitRootLogin prohibit-password/' /etc/ssh/sshd_config
45 |
46 | # Restart SSH service to apply changes
47 | sudo systemctl restart ssh
48 |
49 | # Edit sudoers file to prevent user 'ubuntu' from using 'sudo su'
50 | sudo visudo
51 | # Add line:
52 | ubuntu ALL=(ALL:ALL) !/bin/su
53 | ```
54 |
55 | ### Check Settings
56 | ```bash
57 | # Check UFW status
58 | sudo ufw status
59 |
60 | # Verify SSH configuration
61 | sudo sshd -T | grep -E 'passwordauthentication|permitrootlogin'
62 |
63 | # Check current user
64 | whoami
65 | ```
66 |
--------------------------------------------------------------------------------
/assets/logo.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
15 |
16 |
17 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
29 |
30 |
31 |
36 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
50 |
51 |
52 | 🛡️
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
64 |
65 | RAM
66 |
67 |
68 |
69 |
70 |
71 |
75 |
76 | DISK
77 |
78 |
79 |
80 |
81 |
82 |
86 |
87 | BLOCKS
88 |
89 |
90 |
91 |
97 | COSMOS VALIDATOR MONITORING
98 |
99 |
100 |
--------------------------------------------------------------------------------
/chain_endpoints.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import requests
3 | import json
4 | from typing import List, Dict
5 | import logging
6 | import os
7 | from endpoint_storage import EndpointStorage
8 | import time
9 |
10 | logging.basicConfig(level=logging.INFO)
11 | logger = logging.getLogger(__name__)
12 |
13 | class ChainEndpoints:
14 | """Manager for chain endpoints with file storage"""
15 |
16 | GITHUB_RAW_BASE = "https://raw.githubusercontent.com/ping-pub/ping.pub/main/chains/mainnet/"
17 |
18 | def __init__(self):
19 | self.headers = {
20 | 'Accept': 'application/vnd.github.v3+json'
21 | }
22 | self.storage = EndpointStorage()
23 |
24 | def _extract_api_endpoints(self, api_data) -> List[str]:
25 | """
26 | Extract API endpoints from different JSON formats
27 |
28 | Handles formats:
29 | 1. List of strings: ["url1", "url2"]
30 | 2. List of dicts with address: [{"address": "url1"}, {"address": "url2"}]
31 | 3. Mixed list: ["url1", {"address": "url2", "provider": "name"}]
32 | 4. Single string: "url"
33 | """
34 | endpoints = []
35 |
36 | if isinstance(api_data, list):
37 | for item in api_data:
38 | if isinstance(item, dict) and 'address' in item:
39 | endpoints.append(item['address'])
40 | elif isinstance(item, str):
41 | endpoints.append(item)
42 | elif isinstance(api_data, str):
43 | endpoints.append(api_data)
44 |
45 | return endpoints
46 |
47 | def _check_endpoint(self, endpoint: str) -> bool:
48 | """Check if endpoint is working"""
49 | try:
50 | response = requests.get(
51 | f"{endpoint}/status", # RPC endpoint check
52 | timeout=5
53 | )
54 | return response.status_code == 200
55 | except:
56 | return False
57 |
58 | def get_endpoints(self, chain_id: str) -> List[str]:
59 | """Get list of API endpoints for a specific chain"""
60 | # Try getting from local storage first
61 | endpoints = self.storage.get_endpoints(chain_id)
62 | if endpoints:
63 | # Verify first endpoint is working
64 | if self._check_endpoint(endpoints[0]):
65 | return endpoints
66 | logger.info(f"Stored endpoints for {chain_id} not working, fetching new ones")
67 |
68 | # If not in storage or not working, get from GitHub
69 | try:
70 | # First try direct chain_id
71 | logger.info(f"Fetching config for {chain_id}")
72 | response = requests.get(f"{self.GITHUB_RAW_BASE}{chain_id}.json")
73 |
74 | if response.status_code == 404:
75 | # If not found, try to find by registry_name in all files
76 | logger.info("Chain not found, searching in all configurations...")
77 | all_chains = requests.get("https://api.github.com/repos/ping-pub/ping.pub/contents/chains/mainnet")
78 | if all_chains.status_code == 200:
79 | for item in all_chains.json():
80 | if item['name'].endswith('.json'):
81 | config_response = requests.get(item['download_url'])
82 | if config_response.status_code == 200:
83 | config = config_response.json()
84 | if (config.get('registry_name') == chain_id or
85 | config.get('chain_name') == chain_id):
86 | response = config_response
87 | break
88 |
89 | if response.status_code == 200:
90 | config = response.json()
91 | rpc_data = config.get('rpc', []) # Get RPC endpoints instead of API
92 | endpoints = self._extract_api_endpoints(rpc_data) # Extract RPC endpoints
93 |
94 | # Filter working endpoints
95 | working_endpoints = [
96 | endpoint for endpoint in endpoints
97 | if self._check_endpoint(endpoint)
98 | ]
99 |
100 | if working_endpoints:
101 | logger.info(f"Found {len(working_endpoints)} working endpoints for {chain_id}")
102 | # Save to local storage
103 | self.storage.save_endpoints(chain_id, working_endpoints)
104 | return working_endpoints
105 | else:
106 | logger.warning(f"No working endpoints found for {chain_id}")
107 | else:
108 | logger.error(f"Failed to get config for {chain_id}: {response.status_code}")
109 | except Exception as e:
110 | logger.error(f"Error fetching config for {chain_id}: {e}")
111 |
112 | return []
113 |
114 | # Singleton instance
115 | chain_endpoints = ChainEndpoints()
116 |
117 | def test_endpoints():
118 | """Test function to demonstrate functionality"""
119 |
120 | # Test 1: Get all available chain configs
121 | print("\n=== Test 1: Available Chains ===")
122 | try:
123 | response = requests.get("https://api.github.com/repos/ping-pub/ping.pub/contents/chains/mainnet")
124 | if response.status_code == 200:
125 | chains = [
126 | item['name'].replace('.json', '')
127 | for item in response.json()
128 | if item['name'].endswith('.json') and not item['name'].endswith('.disabled')
129 | ]
130 | print(f"Total chains available: {len(chains)}")
131 | print("\nFirst 10 chains:")
132 | for chain in sorted(chains[:10]):
133 | print(f" - {chain}")
134 | print("...")
135 | except Exception as e:
136 | print(f"Error getting chain list: {e}")
137 |
138 | # Test 2: Test popular chains RPC endpoints
139 | print("\n=== Test 2: Popular Chains RPC Endpoints ===")
140 | test_chains = ['cosmoshub', 'osmosis', 'celestia', 'juno', 'stargaze']
141 | for chain in test_chains:
142 | print(f"\nTesting {chain}:")
143 | endpoints = chain_endpoints.get_endpoints(chain)
144 | print(f"Found {len(endpoints)} working RPC endpoints:")
145 | for endpoint in endpoints:
146 | print(f" - {endpoint}")
147 |
148 | # Test 3: Test cache functionality
149 | print("\n=== Test 3: Cache Test ===")
150 | chain = 'cosmoshub'
151 | print(f"First request for {chain} (might fetch from GitHub):")
152 | start_time = time.time()
153 | endpoints1 = chain_endpoints.get_endpoints(chain)
154 | time1 = time.time() - start_time
155 |
156 | print(f"Second request for {chain} (should use cache):")
157 | start_time = time.time()
158 | endpoints2 = chain_endpoints.get_endpoints(chain)
159 | time2 = time.time() - start_time
160 |
161 | print(f"\nFirst request time: {time1:.2f}s")
162 | print(f"Second request time: {time2:.2f}s")
163 | print(f"Cache speedup: {time1/time2:.1f}x")
164 |
165 | # Test 4: Test RPC endpoint health check
166 | print("\n=== Test 4: RPC Endpoint Health Check ===")
167 | chain = 'osmosis'
168 | endpoints = chain_endpoints.get_endpoints(chain)
169 | print(f"\nChecking {len(endpoints)} RPC endpoints for {chain}:")
170 | for endpoint in endpoints:
171 | start_time = time.time()
172 | is_working = chain_endpoints._check_endpoint(endpoint)
173 | response_time = time.time() - start_time
174 | status = "✅" if is_working else "❌"
175 | print(f"{status} {endpoint} - Response time: {response_time:.2f}s")
176 |
177 | def test_endpoint_extraction():
178 | """Test different JSON formats for endpoint extraction"""
179 | print("\n=== Test 6: JSON Format Handling ===")
180 |
181 | # Test Format 1: Mixed list (strings and objects) - Axelar example
182 | print("\nTesting Format 1 (Axelar-style):")
183 | axelar_format = [
184 | "https://rpc-axelar.imperator.co:443",
185 | "https://axelar-rpc.quickapi.com:443",
186 | {
187 | "address": "https://tm.axelar.lava.build",
188 | "provider": "Lava network"
189 | }
190 | ]
191 | endpoints = chain_endpoints._extract_api_endpoints(axelar_format)
192 | print(f"Found {len(endpoints)} RPC endpoints:")
193 | for endpoint in endpoints:
194 | print(f" - {endpoint}")
195 |
196 | # Test Format 2: List of objects only - Andromeda example
197 | print("\nTesting Format 2 (Andromeda-style):")
198 | andromeda_format = [
199 | {
200 | "address": "https://rpc.andromeda-1.andromeda.aviaone.com",
201 | "provider": "AVIAONE"
202 | },
203 | {
204 | "address": "https://andromeda-rpc.lavenderfive.com:443",
205 | "provider": "Lavender.Five Nodes 🐝"
206 | }
207 | ]
208 | endpoints = chain_endpoints._extract_api_endpoints(andromeda_format)
209 | print(f"Found {len(endpoints)} RPC endpoints:")
210 | for endpoint in endpoints:
211 | print(f" - {endpoint}")
212 |
213 | # Test Format 3: List of objects with registry name - Cosmos example
214 | print("\nTesting Format 3 (Cosmos-style):")
215 | cosmos_format = [
216 | {"provider": "cosmos.directory", "address": "https://rpc.cosmos.directory/cosmoshub"},
217 | {"provider": "Lava network", "address": "https://cosmoshub.tendermintrpc.lava.build"}
218 | ]
219 | endpoints = chain_endpoints._extract_api_endpoints(cosmos_format)
220 | print(f"Found {len(endpoints)} RPC endpoints:")
221 | for endpoint in endpoints:
222 | print(f" - {endpoint}")
223 |
224 | # Test real chains with different formats
225 | print("\nTesting real chains with different formats:")
226 | test_chains = ['axelar', 'andromeda', 'cosmoshub']
227 | for chain in test_chains:
228 | print(f"\n{chain}:")
229 | endpoints = chain_endpoints.get_endpoints(chain)
230 | print(f"Found {len(endpoints)} working RPC endpoints:")
231 | for endpoint in endpoints:
232 | print(f" - {endpoint}")
233 |
234 | if __name__ == "__main__":
235 | import sys
236 | import time
237 |
238 | if len(sys.argv) > 1:
239 | # Get endpoints for specific chain
240 | chain_id = sys.argv[1]
241 | logger.info(f"Getting endpoints for {chain_id}...")
242 | endpoints = chain_endpoints.get_endpoints(chain_id)
243 | if endpoints:
244 | # Print endpoints space-separated for bash script
245 | print(" ".join(endpoints))
246 | else:
247 | logger.warning("No working endpoints found")
248 | print("") # Print empty line for bash script
249 | else:
250 | # Run tests
251 | test_endpoints()
252 | test_endpoint_extraction()
--------------------------------------------------------------------------------
/check.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Get script directory
4 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
5 |
6 | # Source configuration file
7 | source "${SCRIPT_DIR}/config.sh"
8 |
9 | # Check if all required parameters are provided
10 | # Usage: ./check.sh
[LOG_DIR]
11 | if [ "$#" -lt 4 ]; then
12 | echo "Usage: $0 [LOG_DIR]"
13 | echo "Example: $0 http://localhost:26657 76A8A9A8151255E9E69E89499CFE9CB86F cosmoshub-4 cosmos"
14 | echo "Example: $0 http://localhost:26657 76A8A9A8151255E9E69E89499CFE9CB86F cosmoshub-4 cosmos ./logs"
15 | exit 1
16 | fi
17 |
18 | # Script parameters
19 | RPC_ENDPOINT="$1" # Primary RPC endpoint (usually local)
20 | VALIDATOR_ADDRESS="$2" # Validator address in hex format 849ASD.....
21 | CHAIN_ID="$3" # Chain ID (e.g., cosmoshub-4, osmosis-1)
22 | CHAIN_NAME="$4" # Chain name for endpoint lookup (e.g., cosmos, osmosis)
23 | LOG_DIR="${5:-$LOG_DIR}" # Use provided LOG_DIR or default from config
24 |
25 | # Create log directory and file if they don't exist
26 | mkdir -p "${LOG_DIR}" # Create logs directory if it doesn't exist
27 | touch "${LOG_FILE}" # Create log file if it doesn't exist
28 |
29 | # Function to send messages to Telegram
30 | send_tg_message() {
31 | local message=$1
32 | local response=$(curl -s -X POST "https://api.telegram.org/bot${BOT_TOKEN}/sendMessage" \
33 | -d "chat_id=${CHAT_ID}" \
34 | -d "message_thread_id=${THREAD_ID}" \
35 | -d "text=${message}" \
36 | -d "parse_mode=HTML")
37 |
38 | if echo "$response" | grep -q '"ok":true'; then
39 | echo "Message sent successfully to Telegram"
40 | else
41 | echo "Failed to send message to Telegram. Error response:"
42 | echo "$response"
43 | fi
44 | }
45 |
46 | # Main function to check missed blocks
47 | # Checks last N blocks and counts how many were missed by the validator
48 | check_missed_blocks_in_last_n() {
49 | local N=10 # Number of blocks to check
50 | local missed_blocks=0 # Counter for missed blocks
51 | local current_endpoint="$RPC_ENDPOINT" # Current working endpoint
52 | local failed_attempts=0 # Counter for failed endpoint attempts
53 |
54 | # Get latest block height from current endpoint
55 | LATEST_HEIGHT=$(curl -s "${current_endpoint}/status" | jq -r '.result.sync_info.latest_block_height')
56 |
57 | # If primary endpoint fails, get and try backup endpoints
58 | if [[ -z "$LATEST_HEIGHT" || "$LATEST_HEIGHT" == "null" ]]; then
59 | echo "Local endpoint not responding, searching for backup endpoints..."
60 |
61 | # Get backup endpoints from chain_endpoints.py
62 | echo "Running: python3 ${SCRIPT_DIR}/chain_endpoints.py $CHAIN_NAME"
63 | if ! BACKUP_ENDPOINTS=$(python3 "${SCRIPT_DIR}/chain_endpoints.py" "$CHAIN_NAME" 2>/dev/null); then
64 | echo "Error running chain_endpoints.py"
65 | BACKUP_ENDPOINTS=""
66 | fi
67 | SCRIPT_EXIT_CODE=$?
68 | echo "Script exit code: $SCRIPT_EXIT_CODE"
69 | echo "Backup endpoints: $BACKUP_ENDPOINTS"
70 |
71 | if [ $SCRIPT_EXIT_CODE -eq 0 ] && [ ! -z "$BACKUP_ENDPOINTS" ]; then
72 | IFS=' ' read -r -a endpoints <<< "$BACKUP_ENDPOINTS"
73 | echo "Loaded ${#endpoints[@]} backup endpoints for ${CHAIN_NAME}"
74 |
75 | # Try each backup endpoint
76 | for endpoint in "${endpoints[@]}"; do
77 | echo "Trying endpoint: $endpoint"
78 | LATEST_HEIGHT=$(curl -s "${endpoint}/status" | jq -r '.result.sync_info.latest_block_height')
79 | if [[ ! -z "$LATEST_HEIGHT" && "$LATEST_HEIGHT" != "null" ]]; then
80 | current_endpoint="$endpoint"
81 | echo "Found working endpoint: $endpoint"
82 | break
83 | fi
84 | ((failed_attempts++))
85 | echo "Endpoint failed: $endpoint"
86 | done
87 | else
88 | echo "Failed to get backup endpoints"
89 | declare -a endpoints=()
90 | fi
91 | fi
92 |
93 | # Handle case when all endpoints failed
94 | if [[ -z "$LATEST_HEIGHT" || "$LATEST_HEIGHT" == "null" ]]; then
95 | local msg="🔴 $CHAIN_ID - no response from $RPC_ENDPOINT or backup endpoints (tried ${failed_attempts} additional endpoints) on $(hostname)"
96 | local log_msg="$(date): $CHAIN_ID - all endpoints failed after ${failed_attempts} attempts!"
97 | echo "$log_msg" >> "$LOG_FILE"
98 | send_tg_message "$msg"
99 | return 1
100 | fi
101 |
102 | # Check last N blocks for missed signatures
103 | for ((i=0; i= THRESHOLD )); then
120 | LOG_ENTRY="$(date): Validator missed ${missed_blocks} blocks on $(hostname)! for ${CHAIN_ID} (using endpoint: ${current_endpoint})"
121 | echo "$LOG_ENTRY" | tee -a "$LOG_FILE"
122 |
123 | send_tg_message "$ALERT_MSG"
124 | fi
125 | }
126 |
127 | # Run the check
128 | check_missed_blocks_in_last_n
129 |
--------------------------------------------------------------------------------
/config.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Telegram configuration
4 | export BOT_TOKEN=""
5 | export CHAT_ID=""
6 | export THREAD_ID=""
7 |
8 | # Monitoring thresholds
9 | export DISK_THRESHOLD=300 # Free space threshold in GB
10 | export RAM_THRESHOLD=100 # RAM usage threshold in GB
11 | export BLOCKS_THRESHOLD=10 # Missed blocks threshold
12 |
13 | # Log configuration
14 | export LOG_DIR="./logs"
15 | export LOG_FILE="${LOG_DIR}/cosmos_monitor.log"
16 |
17 | # Alert messages
18 | export DISK_ALERT_MSG="⚠️ Critical low disk space on $(hostname)!
19 | Free space: %dGB
20 | Threshold: ${DISK_THRESHOLD}GB
21 |
22 | ❗️ Disk cleanup required!"
23 |
24 | export RAM_ALERT_MSG="⚠️ High RAM usage on $(hostname)!
25 | Used: %dGB
26 | Threshold: ${RAM_THRESHOLD}GB
27 |
28 | 🔄 Server restart required."
29 |
30 | export BLOCKS_ALERT_MSG="⚠️ Validator for %s missed more than ${BLOCKS_THRESHOLD} blocks on $(hostname)."
31 |
--------------------------------------------------------------------------------
/disk_check.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Get the directory where the script is located
4 | # This ensures we can find the config file regardless of where the script is called from
5 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
6 |
7 | # Load configuration variables from config.sh (thresholds, Telegram settings, etc.)
8 | source "${SCRIPT_DIR}/config.sh"
9 |
10 | # Add after source command
11 | if [ -z "$DISK_THRESHOLD" ] || [ -z "$BOT_TOKEN" ] || [ -z "$CHAT_ID" ] || [ -z "$THREAD_ID" ]; then
12 | logger "Disk Monitor: Configuration error - missing required variables"
13 | exit 1
14 | fi
15 |
16 | # Function to send notifications via Telegram
17 | # Parameters:
18 | # $1 - message text to send
19 | send_tg_message() {
20 | local message=$1
21 | curl -s -X POST "https://api.telegram.org/bot${BOT_TOKEN}/sendMessage" \
22 | -d chat_id="${CHAT_ID}" \
23 | -d message_thread_id="${THREAD_ID}" \
24 | -d text="${message}" \
25 | -d parse_mode="HTML"
26 | }
27 |
28 | # Get available disk space in gigabytes
29 | # df -BG - show disk space in GB units
30 | # awk 'NR==2 {print $4}' - get the 4th column (free space) from the second line
31 | # sed 's/G//' - remove the 'G' suffix from the number
32 | FREE_SPACE=$(df -BG / | awk 'NR==2 {print $4}' | sed 's/G//')
33 |
34 | # Add after FREE_SPACE definition
35 | if [ -z "$FREE_SPACE" ]; then
36 | logger "Disk Monitor: Error getting disk space information"
37 | exit 1
38 | fi
39 |
40 | # Check if free space is below the threshold
41 | # If free space is less than DISK_THRESHOLD (defined in config.sh), send alert
42 | if [ ${FREE_SPACE} -lt ${DISK_THRESHOLD} ]; then
43 | # Format alert message with current free space value
44 | FORMATTED_ALERT=$(printf "$DISK_ALERT_MSG" "$FREE_SPACE")
45 |
46 | # Send notification to Telegram channel/group
47 | send_tg_message "$FORMATTED_ALERT"
48 |
49 | # Write event to system log for future reference
50 | logger "Disk Monitor: Free space is below ${DISK_THRESHOLD}GB (Current: ${FREE_SPACE}GB)"
51 | fi
52 |
--------------------------------------------------------------------------------
/endpoint_storage.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import json
3 | import os
4 | from datetime import datetime, timedelta
5 |
6 | class EndpointStorage:
7 | """
8 | Local storage manager for chain endpoints
9 | """
10 | def __init__(self, cache_dir="cache"):
11 | self.cache_dir = cache_dir
12 | self.cache_file = os.path.join(cache_dir, "endpoints_cache.json")
13 | self.cache_duration = timedelta(hours=24) # Cache valid for 24 hours
14 |
15 | # Create cache directory if it doesn't exist
16 | os.makedirs(cache_dir, exist_ok=True)
17 |
18 | # Create cache file with empty JSON object if it doesn't exist
19 | if not os.path.exists(self.cache_file):
20 | with open(self.cache_file, 'w') as f:
21 | json.dump({}, f)
22 |
23 | def save_endpoints(self, chain_id: str, endpoints: list):
24 | """Save endpoints to storage"""
25 | cache_data = self.load_cache()
26 | cache_data[chain_id] = {
27 | "endpoints": endpoints,
28 | "timestamp": datetime.now().isoformat()
29 | }
30 |
31 | with open(self.cache_file, 'w') as f:
32 | json.dump(cache_data, f, indent=2)
33 |
34 | def get_endpoints(self, chain_id: str) -> list:
35 | """Get endpoints from storage if they're still valid"""
36 | cache_data = self.load_cache()
37 |
38 | if chain_id in cache_data:
39 | cached = cache_data[chain_id]
40 | cached_time = datetime.fromisoformat(cached["timestamp"])
41 |
42 | # Return cached endpoints if they're still valid
43 | if datetime.now() - cached_time < self.cache_duration:
44 | return cached["endpoints"]
45 |
46 | return []
47 |
48 | def load_cache(self) -> dict:
49 | """Load storage from file"""
50 | try:
51 | with open(self.cache_file) as f:
52 | data = f.read().strip()
53 | if not data: # If file is empty
54 | return {}
55 | return json.loads(data)
56 | except json.JSONDecodeError: # If JSON is invalid
57 | return {}
58 | except Exception as e:
59 | print(f"Error loading storage: {e}")
60 | return {}
--------------------------------------------------------------------------------
/ram_check.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Get the directory where the script is located
4 | # This ensures we can find the config file regardless of where the script is called from
5 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
6 |
7 | # Load configuration variables from config.sh (thresholds, Telegram settings, etc.)
8 | source "${SCRIPT_DIR}/config.sh"
9 |
10 | # Add after source command
11 | if [ -z "$RAM_THRESHOLD" ] || [ -z "$BOT_TOKEN" ] || [ -z "$CHAT_ID" ] || [ -z "$THREAD_ID" ]; then
12 | logger "RAM Monitor: Configuration error - missing required variables"
13 | exit 1
14 | fi
15 |
16 | # Function to send notifications via Telegram
17 | # Parameters:
18 | # $1 - message text to send
19 | send_tg_message() {
20 | local message=$1
21 | curl -s -X POST "https://api.telegram.org/bot${BOT_TOKEN}/sendMessage" \
22 | -d chat_id="${CHAT_ID}" \
23 | -d message_thread_id="${THREAD_ID}" \
24 | -d text="${message}" \
25 | -d parse_mode="HTML"
26 | }
27 |
28 | # Get current RAM usage in gigabytes
29 | # free -g - show memory info in GB
30 | # grep Mem - get the line with RAM info
31 | # awk '{print $3}' - get the 3rd column (used memory)
32 | USED_RAM=$(free -g | grep Mem | awk '{print $3}')
33 |
34 | # Add after USED_RAM definition
35 | if [ -z "$USED_RAM" ]; then
36 | logger "RAM Monitor: Error getting RAM usage information"
37 | exit 1
38 | fi
39 |
40 | # Check if RAM usage is above the threshold
41 | # If used RAM is greater or equal to RAM_THRESHOLD (defined in config.sh)
42 | if [ ${USED_RAM} -ge ${RAM_THRESHOLD} ]; then
43 | # Format alert message with current RAM usage
44 | FORMATTED_ALERT=$(printf "$RAM_ALERT_MSG" "$USED_RAM")
45 |
46 | # Send notification to Telegram channel/group
47 | send_tg_message "$FORMATTED_ALERT"
48 |
49 | # Write event to system log for future reference
50 | logger "RAM Monitor: Memory usage exceeded ${RAM_THRESHOLD}GB (Current: ${USED_RAM}GB)"
51 |
52 | # Reboot the system when RAM usage is too high
53 | # Using full path to reboot command for security
54 | # Requires sudo privileges for the user running the script
55 | sudo /sbin/reboot
56 | fi
57 |
--------------------------------------------------------------------------------