├── .gitignore ├── 1_provision_s3.py ├── 2_hash_upload_files.py ├── 3_download_files.py ├── README.md ├── aws_ranges_parser.py ├── blackhole.py ├── build-linux-tkg-docker.sh ├── default.json ├── draytek_dhcp_options.py ├── exif_df.py ├── ext_dir_scanner.py ├── fast_portscanner.py ├── file_monitor.py ├── my_certbot_renewal.sh ├── nhs_upvoter.py ├── numpy_nn.py ├── oci_free_tf ├── README.md ├── cloud_init.yaml ├── main.tf ├── terraform.tfvars └── variables.tf ├── optimize_imports.py ├── privacy_bluetooth.py ├── proxmox_recovery_scripts_reddit.md ├── recover-lxc-configs.sh ├── recover-qemu-configs.sh ├── reddit_shadowban_check.py ├── reddit_spam_detection.py ├── remove_metadata.py ├── scrub_git.sh ├── setup_docker.sh ├── subnet_calculator.py ├── subscriptions.xml ├── tech_companies_summary.txt ├── text_image_processor.py ├── trading_functions.py ├── tweaks.sysctl └── update_sysctl.sh /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /1_provision_s3.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | 3 | # Create an S3 client 4 | s3 = boto3.client('s3') 5 | 6 | # Create a new bucket 7 | response = s3.create_bucket( 8 | Bucket='my-secure-sql-backup-bucket' 9 | ) 10 | 11 | # Set bucket policy to allow only the S3 bucket owner to access the bucket 12 | s3.put_bucket_policy( 13 | Bucket='my-secure-sql-backup-bucket', 14 | Policy=""" 15 | { 16 | "Version": "2012-10-17", 17 | "Statement": [ 18 | { 19 | "Sid": "AllowBucketOwner", 20 | "Effect": "Allow", 21 | "Principal": { 22 | "AWS": "arn:aws:iam:::root" 23 | }, 24 | "Action": [ 25 | "s3:GetObject", 26 | "s3:PutObject" 27 | ], 28 | "Resource": "arn:aws:s3:::my-secure-sql-backup-bucket/*" 29 | } 30 | ] 31 | } 32 | """ 33 | ) 34 | -------------------------------------------------------------------------------- /2_hash_upload_files.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import os 3 | 4 | import boto3 5 | 6 | # Set the directory to search for BAK files 7 | directory = '/path/to/bak/files' 8 | 9 | # Create a list to store the BAK files and hashes 10 | bak_files = [] 11 | file_hashes = [] 12 | 13 | # Iterate over the files in the directory 14 | for filename in os.listdir(directory): 15 | # Check if the file is a BAK file 16 | if filename.endswith('.bak'): 17 | # Calculate the hash of the BAK file 18 | with open(os.path.join(directory, filename), 'rb') as f: 19 | file_hash = hashlib.sha256(f.read()).hexdigest() 20 | bak_files.append(filename) 21 | file_hashes.append(file_hash) 22 | 23 | # Print the list of BAK files and hashes 24 | for bak_file, file_hash in zip(bak_files, file_hashes): 25 | print(f'{bak_file}: {file_hash}') 26 | 27 | 28 | # Set the name of the S3 bucket 29 | bucket_name = 'my-secure-sql-backup-bucket' 30 | 31 | # Create an S3 client 32 | s3 = boto3.client('s3') 33 | 34 | # Iterate over the files in the directory 35 | for filename in os.listdir(directory): 36 | # Check if the file is a BAK file 37 | if filename.endswith('.bak'): 38 | # Upload the BAK file to the S3 bucket 39 | s3.upload_file(os.path.join(directory, filename), bucket_name, filename) 40 | -------------------------------------------------------------------------------- /3_download_files.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import os 3 | 4 | import boto3 5 | 6 | # Set the name of the S3 bucket 7 | bucket_name = 'my-secure-sql-backup-bucket' 8 | 9 | # Set the directory where the BAK files will be saved 10 | directory = '/path/to/bak/files' 11 | 12 | # Create an S3 client 13 | s3 = boto3.client('s3') 14 | 15 | # Iterate over the objects in the S3 bucket 16 | for obj in s3.list_objects(Bucket=bucket_name)['Contents']: 17 | # Check if the object is a BAK file 18 | if obj['Key'].endswith('.bak'): 19 | # Download the BAK file to the specified directory 20 | s3.download_file(bucket_name, obj['Key'], os.path.join(directory, obj['Key'])) 21 | 22 | 23 | # Set the directory where the BAK files are saved 24 | directory = '/path/to/bak/files' 25 | 26 | # Read the hash text file 27 | with open('/path/to/hash/text/file.txt') as f: 28 | hashes = f.readlines() 29 | 30 | # Create a dictionary to store the original file hashes 31 | original_hashes = {} 32 | 33 | # Iterate over the hashes in the file 34 | for h in hashes: 35 | # Split the hash and filename 36 | filename, file_hash = h.split(': ') 37 | 38 | # Strip the newline character from the hash 39 | file_hash = file_hash.strip() 40 | 41 | # Add the hash to the dictionary 42 | original_hashes[filename] = file_hash 43 | 44 | # Iterate over the files in the directory 45 | for filename in os.listdir(directory): 46 | # Check if the file is a BAK file 47 | if filename.endswith('.bak'): 48 | # Calculate the hash of the downloaded BAK file 49 | with open(os.path.join(directory, filename), 'rb') as f: 50 | file_hash = hashlib.sha256(f.read()).hexdigest() 51 | 52 | # Check if the calculated hash matches the original hash 53 | if file_hash == original_hashes[filename]: 54 | print(f'{filename} is verified') 55 | else: 56 | print(f'{filename} is NOT verified') 57 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # script-toolbox 2 | 3 | This repo holds a set of scripts and tools I’ve built to solve real-world problems I've run into. 4 | 5 | Open to feedback, ideas, or improvements — especially if they help solve more of the messy stuff. 6 | 7 | ## Updates 8 | 9 | - **Reddit Shadowban Check** 10 | [reddit_shadowban_check.py](https://github.com/tg12/script-toolbox/blob/main/reddit_shadowban_check.py) 11 | 12 | - **Tech Company IP/ASN Summary** 13 | [tech_companies_summary.txt](https://github.com/tg12/script-toolbox/blob/main/tech_companies_summary.txt) 14 | 15 | - **Black Hole – Arcade-Style Python Game** 16 | A fast-paced game built with Pygame. 17 | [Reddit post](https://www.reddit.com/r/pygame/comments/1lh7if6/python_game_black_hole_a_fun_arcadestyle_game/) 18 | 19 | --- 20 | 21 | If you found something useful, here's my BTC address: 22 | `3QjWqhQbHdHgWeYHTpmorP8Pe1wgDjJy54` 23 | 24 | Bitcoin QR Code 25 | -------------------------------------------------------------------------------- /aws_ranges_parser.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import requests 3 | import pandas as pd 4 | from tabulate import tabulate 5 | 6 | # Configure logging 7 | logging.basicConfig(level=logging.INFO, 8 | format="%(asctime)s [%(levelname)s] %(message)s", 9 | datefmt="%Y-%m-%d %H:%M:%S") 10 | 11 | def download_json_file(url): 12 | try: 13 | response = requests.get(url, verify=False) 14 | response.raise_for_status() 15 | except requests.exceptions.RequestException as err: 16 | logging.error(f"HTTP request failed: {err}") 17 | return None 18 | 19 | try: 20 | data = response.json() 21 | except ValueError: 22 | logging.error("Failed to parse JSON") 23 | return None 24 | 25 | return data 26 | 27 | def process_data(data): 28 | # Extract the 'prefixes' and 'ipv6_prefixes' into separate dataframes 29 | prefixes_df = pd.DataFrame(data['prefixes']) 30 | ipv6_prefixes_df = pd.DataFrame(data['ipv6_prefixes']) 31 | 32 | # Combine both into a single DataFrame 33 | combined_df = pd.concat([prefixes_df, ipv6_prefixes_df]) 34 | 35 | return combined_df 36 | 37 | def filter_and_sort(df): 38 | # Filter rows in the London region 39 | london_df = df[df['region'] == 'eu-west-2'] 40 | 41 | # Filter rows with the AMAZON service 42 | amazon_df = london_df[london_df['service'] == 'AMAZON'] 43 | 44 | # Sort by the column 'ip_prefix' 45 | amazon_df.sort_values(by=['ip_prefix'], inplace=True) 46 | 47 | # Reset the index 48 | amazon_df.reset_index(drop=True, inplace=True) 49 | 50 | return amazon_df 51 | 52 | def main(): 53 | url = "https://ip-ranges.amazonaws.com/ip-ranges.json" 54 | logging.info(f"Downloading JSON file from {url}") 55 | data = download_json_file(url) 56 | 57 | if data is None: 58 | logging.error("Failed to download or parse JSON file") 59 | return 60 | 61 | logging.info("Processing data") 62 | df = process_data(data) 63 | 64 | logging.info("Filtering and sorting data") 65 | final_df = filter_and_sort(df) 66 | 67 | # Print the dataframe in a nice format 68 | print(tabulate(final_df, headers='keys', tablefmt='psql')) 69 | 70 | if __name__ == "__main__": 71 | main() 72 | -------------------------------------------------------------------------------- /blackhole.py: -------------------------------------------------------------------------------- 1 | # BLACK HOLE - A fun arcade-style game! 2 | # 3 | # Click to place black holes and suck in all the planets. 4 | # You have a limited number of black holes and time. 5 | # Black holes last a few seconds and show a countdown. 6 | # Planets bounce around - plan your shots! 7 | # Can you clear the galaxy? Good luck! 8 | # 9 | # Disclaimer: This game is for entertainment and educational purposes only. 10 | # No real planets were harmed in the making of this code. 11 | # 12 | 13 | """Copyright (C) 2025 James Sawyer 14 | All rights reserved. 15 | 16 | This script and the associated files are private 17 | and confidential property. Unauthorized copying of 18 | this file, via any medium, and the divulgence of any 19 | contained information without express written consent 20 | is strictly prohibited. 21 | 22 | This script is intended for personal use only and should 23 | not be distributed or used in any commercial or public 24 | setting unless otherwise authorized by the copyright holder. 25 | By using this script, you agree to abide by these terms. 26 | 27 | DISCLAIMER: This script is provided 'as is' without warranty 28 | of any kind, either express or implied, including, but not 29 | limited to, the implied warranties of merchantability, 30 | fitness for a particular purpose, or non-infringement. In no 31 | event shall the authors or copyright holders be liable for 32 | any claim, damages, or other liability, whether in an action 33 | of contract, tort or otherwise, arising from, out of, or in 34 | connection with the script or the use or other dealings in 35 | the script. 36 | """ 37 | 38 | # -*- coding: utf-8 -*- 39 | # pylint: disable=C0116, W0621, W1203, C0103, C0301, W1201, W0511, E0401, E1101, E0606 40 | # C0116: Missing function or method docstring 41 | # W0621: Redefining name %r from outer scope (line %s) 42 | # W1203: Use % formatting in logging functions and pass the % parameters as arguments 43 | # C0103: Constant name "%s" doesn't conform to UPPER_CASE naming style 44 | # C0301: Line too long (%s/%s) 45 | # W1201: Specify string format arguments as logging function parameters 46 | # W0511: TODOs 47 | # E1101: Module 'holidays' has no 'US' member (no-member) ... it does, so ignore this 48 | # E0606: possibly-used-before-assignment, ignore this 49 | # UP018: native-literals (UP018) 50 | 51 | import math 52 | import random 53 | import sys 54 | import time 55 | 56 | import numpy as np 57 | import pygame 58 | 59 | # --- CONFIG --- 60 | WIDTH, HEIGHT = 1280, 720 61 | FPS = 90 62 | 63 | PLANET_RADIUS = 24 64 | BLACKHOLE_RADIUS_RANGE = (90, 120) # min, max 65 | BLACKHOLE_LIFETIME_RANGE = (2.0, 3.5) # seconds 66 | PLANET_COLORS = [ 67 | (255, 0, 0), 68 | (0, 255, 0), 69 | (0, 0, 255), 70 | (255, 255, 0), 71 | (0, 255, 255), 72 | (255, 0, 255), 73 | (127, 255, 0), 74 | (128, 0, 255), 75 | ] 76 | FONT_NAME = "freesansbold.ttf" 77 | 78 | # --- DIFFICULTY SETTINGS --- 79 | DIFFICULTY_LEVELS = { 80 | "Easy": {"planets": 10, "holes": 4, "speed": 2.5, "timer": 40}, 81 | "Medium": {"planets": 16, "holes": 3, "speed": 3.5, "timer": 32}, 82 | "Hard": {"planets": 22, "holes": 2, "speed": 4.5, "timer": 24}, 83 | } 84 | 85 | 86 | # --- STARFIELD --- 87 | def make_starfield(num=120): 88 | return [ 89 | (random.randint(0, WIDTH), random.randint(0, HEIGHT), random.randint(1, 3)) 90 | for _ in range(num) 91 | ] 92 | 93 | 94 | def draw_starfield(screen, stars): 95 | for x, y, r in stars: 96 | pygame.draw.circle(screen, (255, 255, 255), (x, y), r) 97 | 98 | 99 | # --- CLASSES --- 100 | 101 | 102 | class Planet: 103 | def __init__(self): 104 | # Use numpy for random positions and velocities 105 | self.x = np.random.randint(PLANET_RADIUS, WIDTH - PLANET_RADIUS) 106 | self.y = np.random.randint(PLANET_RADIUS + 70, HEIGHT - PLANET_RADIUS) 107 | angle = np.random.uniform(0, 2 * np.pi) 108 | speed = np.random.uniform(self.speed_min, self.speed_max) 109 | self.vx = np.cos(angle) * speed 110 | self.vy = np.sin(angle) * speed 111 | self.color = random.choice(PLANET_COLORS) 112 | self.alive = True 113 | self.sucked = False 114 | self.suck_target = None 115 | self.suck_progress = 0 116 | 117 | @classmethod 118 | def set_speed_range(cls, minv, maxv): 119 | cls.speed_min = minv 120 | cls.speed_max = maxv 121 | 122 | def move(self): 123 | if self.sucked and self.suck_target: 124 | dx = self.suck_target[0] - self.x 125 | dy = self.suck_target[1] - self.y 126 | dist = math.hypot(dx, dy) 127 | if dist < 5: 128 | self.alive = False 129 | else: 130 | angle = math.atan2(dy, dx) 131 | spiral = angle + 0.2 132 | self.x += math.cos(spiral) * max(dist * 0.18, 2) 133 | self.y += math.sin(spiral) * max(dist * 0.18, 2) 134 | self.suck_progress += 1 135 | else: 136 | self.x += self.vx 137 | self.y += self.vy 138 | # Bounce 139 | if self.x < PLANET_RADIUS or self.x > WIDTH - PLANET_RADIUS: 140 | self.vx *= -1 141 | if self.y < PLANET_RADIUS + 60 or self.y > HEIGHT - PLANET_RADIUS: 142 | self.vy *= -1 143 | 144 | def draw(self, screen): 145 | if self.alive: 146 | glow_color = tuple(min(255, c + 120) for c in self.color) 147 | pygame.draw.circle( 148 | screen, glow_color, (int(self.x), int(self.y)), PLANET_RADIUS + 7 149 | ) 150 | pygame.draw.circle( 151 | screen, 152 | (220, 220, 220), 153 | (int(self.x), int(self.y)), 154 | PLANET_RADIUS + 2, 155 | 2, 156 | ) 157 | pygame.draw.circle( 158 | screen, self.color, (int(self.x), int(self.y)), PLANET_RADIUS 159 | ) 160 | if self.sucked: 161 | shrink = max(0, PLANET_RADIUS - self.suck_progress // 3) 162 | if shrink > 0: 163 | pygame.draw.circle( 164 | screen, self.color, (int(self.x), int(self.y)), shrink 165 | ) 166 | 167 | 168 | class BlackHole: 169 | def __init__(self, pos): 170 | self.x, self.y = pos 171 | self.radius = random.randint(*BLACKHOLE_RADIUS_RANGE) 172 | self.lifetime = random.uniform(*BLACKHOLE_LIFETIME_RANGE) 173 | self.created = time.time() 174 | self.active = True 175 | self.angle = np.random.uniform(0, 2 * np.pi) 176 | 177 | def update(self): 178 | if time.time() - self.created > self.lifetime: 179 | self.active = False 180 | self.angle += 0.09 181 | 182 | def draw(self, screen): 183 | t = (time.time() - self.created) / self.lifetime 184 | alpha = max(0, 255 - int(200 * t)) 185 | s = pygame.Surface((self.radius * 2, self.radius * 2), pygame.SRCALPHA) 186 | for i in range(6): 187 | a = self.angle + i * math.pi / 3 188 | r = int(self.radius * 0.85) 189 | x = int(self.radius + math.cos(a) * r * 0.7) 190 | y = int(self.radius + math.sin(a) * r * 0.7) 191 | pygame.draw.circle( 192 | s, (40, 40, 90, int(alpha * 0.25)), (x, y), int(self.radius * 0.35) 193 | ) 194 | pygame.draw.circle( 195 | s, (10, 10, 10, alpha), (self.radius, self.radius), self.radius 196 | ) 197 | pygame.draw.circle( 198 | s, 199 | (40, 40, 90, int(alpha * 0.4)), 200 | (self.radius, self.radius), 201 | int(self.radius * 0.75), 202 | ) 203 | pygame.draw.circle( 204 | s, 205 | (120, 120, 255, int(alpha * 0.18)), 206 | (self.radius, self.radius), 207 | int(self.radius * 0.97), 208 | 3, 209 | ) 210 | screen.blit(s, (self.x - self.radius, self.y - self.radius)) 211 | # Draw countdown seconds 212 | seconds_left = max( 213 | 0, int(math.ceil(self.lifetime - (time.time() - self.created))) 214 | ) 215 | if seconds_left > 0: 216 | font = pygame.font.Font(FONT_NAME, 32) 217 | txt = font.render(str(seconds_left), True, (255, 255, 180)) 218 | txt_rect = txt.get_rect(center=(self.x, self.y)) 219 | screen.blit(txt, txt_rect) 220 | 221 | 222 | def draw_text(screen, text, size, x, y, color=(255, 255, 255), center=True): 223 | font = pygame.font.Font(FONT_NAME, size) 224 | txt_surf = font.render(text, True, color) 225 | rect = txt_surf.get_rect() 226 | if center: 227 | rect.center = (x, y) 228 | else: 229 | rect.topleft = (x, y) 230 | screen.blit(txt_surf, rect) 231 | 232 | 233 | def countdown(screen, seconds, stars): 234 | for i in range(seconds, 0, -1): 235 | screen.fill((0, 0, 0)) 236 | draw_starfield(screen, stars) 237 | draw_text(screen, f"{i}", 160, WIDTH // 2, HEIGHT // 2) 238 | pygame.display.flip() 239 | pygame.time.wait(800) 240 | screen.fill((0, 0, 0)) 241 | draw_starfield(screen, stars) 242 | draw_text(screen, "GO!", 120, WIDTH // 2, HEIGHT // 2, (90, 255, 90)) 243 | pygame.display.flip() 244 | pygame.time.wait(700) 245 | 246 | 247 | def main(): 248 | pygame.init() 249 | screen = pygame.display.set_mode((WIDTH, HEIGHT)) 250 | pygame.display.set_caption("Black Hole") 251 | clock = pygame.time.Clock() 252 | stars = make_starfield() 253 | best_score = 0 254 | 255 | # --- DIFFICULTY MENU --- 256 | selected = 0 257 | difficulties = list(DIFFICULTY_LEVELS.keys()) 258 | running = True 259 | while running: 260 | screen.fill((12, 17, 37)) 261 | draw_starfield(screen, stars) 262 | draw_text(screen, "BLACK HOLE", 72, WIDTH // 2, 70, (255, 255, 255)) 263 | draw_text(screen, "Select Difficulty", 50, WIDTH // 2, 340, (180, 220, 255)) 264 | for i, level in enumerate(difficulties): 265 | color = (140, 255, 180) if i == selected else (255, 255, 255) 266 | draw_text(screen, f"{level}", 42, WIDTH // 2, 420 + i * 70, color) 267 | pygame.display.flip() 268 | for event in pygame.event.get(): 269 | if event.type == pygame.QUIT: 270 | sys.exit() 271 | elif event.type == pygame.KEYDOWN: 272 | if event.key == pygame.K_UP: 273 | selected = (selected - 1) % len(difficulties) 274 | elif event.key == pygame.K_DOWN: 275 | selected = (selected + 1) % len(difficulties) 276 | elif event.key in [pygame.K_RETURN, pygame.K_SPACE]: 277 | running = False 278 | clock.tick(24) 279 | 280 | diff = difficulties[selected] 281 | params = DIFFICULTY_LEVELS[diff] 282 | Planet.set_speed_range(params["speed"] * 0.75, params["speed"] * 1.15) 283 | 284 | while True: 285 | # --- PREPARE GAME --- 286 | planets = [Planet() for _ in range(params["planets"])] 287 | blackholes = [] 288 | blackholes_left = params["holes"] 289 | timer = params["timer"] 290 | start_time = time.time() 291 | sucked_count = 0 292 | 293 | # --- COUNTDOWN --- 294 | countdown(screen, 3, stars) 295 | 296 | # --- MAIN GAME LOOP --- 297 | game_over = False 298 | win = False 299 | 300 | while True: 301 | dt = clock.tick(FPS) / 1000.0 302 | elapsed = int(time.time() - start_time) 303 | time_left = max(0, timer - elapsed) 304 | 305 | for event in pygame.event.get(): 306 | if event.type == pygame.QUIT: 307 | pygame.quit() 308 | sys.exit() 309 | elif event.type == pygame.MOUSEBUTTONDOWN and not game_over: 310 | if blackholes_left > 0: 311 | mx, my = pygame.mouse.get_pos() 312 | blackholes.append(BlackHole((mx, my))) 313 | blackholes_left -= 1 314 | 315 | # Update black holes 316 | for bh in blackholes: 317 | bh.update() 318 | blackholes = [bh for bh in blackholes if bh.active] 319 | 320 | # Improved collision detection: use squared distance and sum of radii 321 | for p in planets: 322 | if not p.alive or p.sucked: 323 | continue 324 | for bh in blackholes: 325 | dx = bh.x - p.x 326 | dy = bh.y - p.y 327 | sum_r = bh.radius + PLANET_RADIUS 328 | if dx * dx + dy * dy < sum_r * sum_r: 329 | p.sucked = True 330 | p.suck_target = (bh.x, bh.y) 331 | break 332 | 333 | # Update planets 334 | for p in planets: 335 | if p.alive: 336 | prev_alive = p.alive 337 | p.move() 338 | if prev_alive and not p.alive: 339 | sucked_count += 1 340 | alive_planets = [p for p in planets if p.alive] 341 | 342 | # --- DRAW --- 343 | screen.fill((0, 0, 0)) 344 | draw_starfield(screen, stars) 345 | draw_text( 346 | screen, f"Time: {time_left}", 32, 80, 30, (255, 255, 180), center=False 347 | ) 348 | draw_text( 349 | screen, 350 | f"Black Holes: {blackholes_left}", 351 | 32, 352 | WIDTH - 270, 353 | 30, 354 | (180, 255, 255), 355 | center=False, 356 | ) 357 | draw_text( 358 | screen, 359 | f"Planets Left: {len(alive_planets)}", 360 | 32, 361 | WIDTH // 2 - 80, 362 | 30, 363 | (255, 180, 255), 364 | center=False, 365 | ) 366 | for bh in blackholes: 367 | bh.draw(screen) 368 | for p in planets: 369 | p.draw(screen) 370 | 371 | if game_over: 372 | # Freeze time and blackholes_left for bonus/score calculation at game over 373 | if not hasattr(main, "_final_bonus"): 374 | # Only calculate once at game over 375 | main._final_blackholes_left = blackholes_left 376 | main._final_time_left = time_left 377 | main._final_bonus = max( 378 | 0, main._final_blackholes_left * 3 + main._final_time_left 379 | ) 380 | main._final_score = sucked_count + main._final_bonus 381 | bonus = main._final_bonus 382 | score = main._final_score 383 | if win: 384 | draw_text( 385 | screen, 386 | "YOU WIN!", 387 | 88, 388 | WIDTH // 2, 389 | HEIGHT // 2 - 60, 390 | (100, 255, 100), 391 | ) 392 | else: 393 | draw_text( 394 | screen, 395 | "TIME'S UP!", 396 | 74, 397 | WIDTH // 2, 398 | HEIGHT // 2 - 60, 399 | (255, 90, 90), 400 | ) 401 | draw_text( 402 | screen, 403 | f"Planets Sucked In: {sucked_count}", 404 | 52, 405 | WIDTH // 2, 406 | HEIGHT // 2 + 10, 407 | (255, 255, 180), 408 | ) 409 | draw_text( 410 | screen, 411 | f"Bonus: {bonus}", 412 | 40, 413 | WIDTH // 2, 414 | HEIGHT // 2 + 60, 415 | (180, 255, 255), 416 | ) 417 | draw_text( 418 | screen, 419 | f"Final Score: {score}", 420 | 54, 421 | WIDTH // 2, 422 | HEIGHT // 2 + 120, 423 | (255, 255, 255), 424 | ) 425 | if score > best_score: 426 | best_score = score 427 | draw_text( 428 | screen, 429 | f"Best Score: {best_score}", 430 | 38, 431 | WIDTH // 2, 432 | HEIGHT // 2 + 180, 433 | (255, 255, 180), 434 | ) 435 | draw_text( 436 | screen, 437 | "Press R to Restart", 438 | 38, 439 | WIDTH // 2, 440 | HEIGHT // 2 + 240, 441 | (180, 220, 255), 442 | ) 443 | else: 444 | # Reset the final bonus/score cache if not game over 445 | if hasattr(main, "_final_bonus"): 446 | del main._final_bonus 447 | del main._final_score 448 | del main._final_blackholes_left 449 | del main._final_time_left 450 | pygame.display.flip() 451 | 452 | # --- WIN/LOSE LOGIC --- 453 | if not game_over: 454 | if not alive_planets: 455 | win, game_over = True, True 456 | elif time_left == 0: 457 | win, game_over = False, True 458 | # Instant game over if no black holes left and planets remain 459 | elif ( 460 | blackholes_left == 0 461 | and len(alive_planets) > 0 462 | and not any(bh.active for bh in blackholes) 463 | ): 464 | win, game_over = False, True 465 | else: 466 | keys = pygame.key.get_pressed() 467 | if keys[pygame.K_r]: 468 | break # Restart with same difficulty 469 | 470 | 471 | if __name__ == "__main__": 472 | main() 473 | -------------------------------------------------------------------------------- /build-linux-tkg-docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # Configuration 5 | IMAGE_NAME="tkg-builder" 6 | SCRIPT_DIR="$(pwd)" 7 | OUT_DIR_HOST="$SCRIPT_DIR/tkg-out" 8 | EXT_CFG_HOST="$SCRIPT_DIR/linux-tkg.cfg" 9 | DOCKERFILE_HOST="$SCRIPT_DIR/Dockerfile" 10 | 11 | # 1) Write external linux-tkg config 12 | cat > "$EXT_CFG_HOST" < "$DOCKERFILE_HOST" <<'EOF' 25 | FROM gcc:latest 26 | 27 | # Install build dependencies 28 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive \ 29 | apt-get install -y --no-install-recommends \ 30 | bc bison build-essential ccache cpio flex git kmod \ 31 | libelf-dev libncurses5-dev libssl-dev lz4 qtbase5-dev \ 32 | rsync schedtool wget zstd && \ 33 | rm -rf /var/lib/apt/lists/* 34 | 35 | # Shallow-clone linux-tkg 36 | RUN git clone --depth 1 https://github.com/Frogging-Family/linux-tkg.git /linux-tkg 37 | 38 | # Copy in external config and set env var 39 | COPY linux-tkg.cfg /root/.config/frogminer/linux-tkg.cfg 40 | ENV _EXT_CONFIG_PATH=/root/.config/frogminer/linux-tkg.cfg 41 | 42 | WORKDIR /linux-tkg 43 | EOF 44 | 45 | echo "Wrote Dockerfile to $DOCKERFILE_HOST" 46 | 47 | # 3) Build the Docker image 48 | docker build -t "$IMAGE_NAME" . 49 | echo "Built Docker image: $IMAGE_NAME" 50 | 51 | # 4) Prepare host output directory 52 | mkdir -p "$OUT_DIR_HOST" 53 | echo "Host output directory ready: $OUT_DIR_HOST" 54 | 55 | # 5) Run the container interactively to build and export linux-src-git 56 | echo "Launching container for interactive build..." 57 | echo " • In the prompts, choose 'Generic' and answer 'n' when asked to install inside the container." 58 | docker run --rm -it \ 59 | -v "$OUT_DIR_HOST":/out \ 60 | "$IMAGE_NAME" \ 61 | bash -euo pipefail -c ' 62 | cd /linux-tkg 63 | ./install.sh install 64 | cp -R linux-src-git /out/linux-src-git 65 | ' 66 | 67 | # 6) On host: install the kernel 68 | SRC_DIR="$OUT_DIR_HOST/linux-src-git" 69 | if [ ! -d "$SRC_DIR" ]; then 70 | echo "Error: build tree not found at $SRC_DIR" >&2 71 | exit 1 72 | fi 73 | 74 | # Detect kernel version 75 | KVER=$(make -s -C "$SRC_DIR" kernelrelease) 76 | echo "Detected kernel version: $KVER" 77 | 78 | # Prepare /usr/src and remove old tree 79 | sudo mkdir -p /usr/src 80 | sudo rm -rf "/usr/src/linux-tkg-$KVER" 81 | 82 | # Copy build tree into place 83 | echo "Copying build tree to /usr/src/linux-tkg-$KVER" 84 | sudo cp -R "$SRC_DIR" "/usr/src/linux-tkg-$KVER" 85 | 86 | # Install modules and kernel 87 | echo "Installing modules and kernel..." 88 | cd "/usr/src/linux-tkg-$KVER" 89 | sudo make modules_install 90 | sudo make install 91 | 92 | echo "linux-tkg $KVER has been installed on your host." 93 | -------------------------------------------------------------------------------- /default.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "distribution": "Angstrom", 4 | "default_username": "root", 5 | "default_password": "no password (empty)" 6 | }, 7 | { 8 | "distribution": "BalenaOS", 9 | "default_username": "root", 10 | "default_password": "no password (empty)" 11 | }, 12 | { 13 | "distribution": "Yocto Project", 14 | "default_username": "root", 15 | "default_password": "no password (empty)" 16 | }, 17 | { 18 | "distribution": "Tiny Core Linux", 19 | "default_username": "tc", 20 | "default_password": "no password (empty)" 21 | }, 22 | { 23 | "distribution": "Buildroot", 24 | "default_username": "root", 25 | "default_password": "no password (empty)" 26 | }, 27 | { 28 | "distribution": "Photon OS", 29 | "default_username": "root", 30 | "default_password": "changeme" 31 | }, 32 | { 33 | "distribution": "Armbian", 34 | "default_username": "root", 35 | "default_password": "1234" 36 | }, 37 | { 38 | "distribution": "OpenWRT", 39 | "default_username": "root", 40 | "default_password": "no password (empty)" 41 | }, 42 | { 43 | "distribution": "LEDE", 44 | "default_username": "root", 45 | "default_password": "no password (empty)" 46 | }, 47 | { 48 | "distribution": "TorizonCore", 49 | "default_username": "torizon", 50 | "default_password": "torizon" 51 | }, 52 | { 53 | "distribution": "Volumio", 54 | "default_username": "volumio", 55 | "default_password": "volumio" 56 | }, 57 | { 58 | "distribution": "Moode Audio", 59 | "default_username": "pi", 60 | "default_password": "moodeaudio" 61 | }, 62 | { 63 | "distribution": "HassOS", 64 | "default_username": "root", 65 | "default_password": "no password (empty)" 66 | }, 67 | { 68 | "distribution": "Jeedom", 69 | "default_username": "root", 70 | "default_password": "Mjeedom96" 71 | }, 72 | { 73 | "distribution": "FreePBX", 74 | "default_username": "root", 75 | "default_password": "admin" 76 | }, 77 | { 78 | "distribution": "Pi-hole", 79 | "default_username": "admin", 80 | "default_password": "user-created-during-setup" 81 | }, 82 | { 83 | "distribution": "NextCloudPi", 84 | "default_username": "ncp", 85 | "default_password": "ownyourbits" 86 | }, 87 | { 88 | "distribution": "IoT Core OS", 89 | "default_username": "root", 90 | "default_password": "no password (empty)" 91 | }, 92 | { 93 | "distribution": "Alpine Linux IoT", 94 | "default_username": "root", 95 | "default_password": "no password (empty)" 96 | }, 97 | { 98 | "distribution": "Nebula Linux", 99 | "default_username": "nebula", 100 | "default_password": "nebula" 101 | }, 102 | { 103 | "distribution": "Zynthian", 104 | "default_username": "pi", 105 | "default_password": "raspberry" 106 | }, 107 | { 108 | "distribution": "Snappy Ubuntu Core", 109 | "default_username": "user-created-during-setup", 110 | "default_password": "user-specified-during-setup" 111 | }, 112 | { 113 | "distribution": "MeshCentral", 114 | "default_username": "admin", 115 | "default_password": "meshpassword" 116 | }, 117 | { 118 | "distribution": "BalenaSound", 119 | "default_username": "root", 120 | "default_password": "no password (empty)" 121 | }, 122 | { 123 | "distribution": "Parrot Security OS", 124 | "default_username": "user", 125 | "default_password": "toor" 126 | }, 127 | { 128 | "distribution": "Slitaz", 129 | "default_username": "tux", 130 | "default_password": "root" 131 | }, 132 | { 133 | "distribution": "Puppy Linux", 134 | "default_username": "root", 135 | "default_password": "woofwoof" 136 | }, 137 | { 138 | "distribution": "DragonOS", 139 | "default_username": "dragon", 140 | "default_password": "dragon" 141 | }, 142 | { 143 | "distribution": "Cumulus Linux", 144 | "default_username": "cumulus", 145 | "default_password": "CumulusLinux!" 146 | }, 147 | { 148 | "distribution": "OpenMediaVault", 149 | "default_username": "admin", 150 | "default_password": "openmediavault" 151 | } 152 | ] 153 | -------------------------------------------------------------------------------- /draytek_dhcp_options.py: -------------------------------------------------------------------------------- 1 | # Description: This script is used to convert domain suffixes to hexadecimal for use in DHCP option 43 2 | # Author: James Sawyer 3 | # Email: githubtools@jamessawyer.co.uk 4 | # Website: http://www.jamessawyer.co.uk/ 5 | 6 | 7 | """ 8 | The domain suffixes presented in hexadecimal with a number indicate the length in front of it. For example, "draytek.com" 9 | should be presented as "076472617974656b03636f6d" where 07 means there are 7 characters followed, 6472617974656b is "draytek", 10 | 03 means there are 3 characters followed, and 636f6d is "com""" 11 | 12 | 13 | """ This tool is specifically designed for DreyTek routers. It allows users to easily configure and manage their DreyTek routers, 14 | including setting up network security, parental controls, and other advanced features. 15 | 16 | Disclaimer: This tool is not affiliated with or endorsed by DreyTek. Use of this tool is at your own risk. 17 | We are not responsible for any damage or data loss that may result from using this tool. 18 | Please make sure to backup your router settings before using this tool. """ 19 | 20 | 21 | # https://www.draytek.com/support/knowledge-base/5314 22 | 23 | def domain_to_hex(domain): 24 | # Split the domain into its individual segments 25 | segments = domain.split(".") 26 | 27 | # Initialize a list to store the hexadecimal representation of each segment 28 | hex_segments = [] 29 | 30 | # Loop through each segment of the domain 31 | for segment in segments: 32 | # Convert the segment to its hexadecimal representation 33 | hex_segment = segment.encode("utf-8").hex() 34 | 35 | # Add the length of the segment (in hexadecimal) to the beginning of 36 | # the hexadecimal representation 37 | hex_segment = "{:02x}".format(len(segment)) + hex_segment 38 | 39 | # Add the segment to the list of hexadecimal segments 40 | hex_segments.append(hex_segment) 41 | 42 | # Join the hexadecimal segments together with a "." to create the final 43 | # hexadecimal representation of the domain 44 | hex_domain = "".join(hex_segments) 45 | 46 | return hex_domain 47 | 48 | 49 | def create_dhcp_43(domain_suffixes): 50 | # Convert the domain suffixes to their hexadecimal representation 51 | hex_suffixes = domain_to_hex(domain_suffixes) 52 | 53 | # Create the 6-byte DHCP option 43 by concatenating the hexadecimal representation of the domain suffixes 54 | # with the necessary padding to make the total length 6 bytes 55 | dhcp_43 = hex_suffixes + "00" * (6 - (len(hex_suffixes) // 2)) 56 | 57 | return dhcp_43 58 | 59 | 60 | # Test the function with the domain "local.lan" 61 | print(domain_to_hex("local.lan")) # should output "056c6f63616c036c616e" 62 | 63 | print(domain_to_hex("192.168.1.250")) 64 | -------------------------------------------------------------------------------- /exif_df.py: -------------------------------------------------------------------------------- 1 | """ Description: 2 | This script extracts metadata from image files and returns it as a pandas dataframe. It uses the piexif library to extract metadata from the images, and the geopy library to convert GPS coordinates to place names. 3 | 4 | Disclaimer: The information and content provided by me, is for informational purposes only. All content provided is the property of @James12396379, and any use or distribution of this content should include proper attribution to @James12396379. 5 | 6 | """ 7 | 8 | # Author: James Sawyer 9 | # Email: githubtools@jamessawyer.co.uk 10 | # Website: http://www.jamessawyer.co.uk/ 11 | # Twitter: https://twitter.com/James12396379 12 | 13 | import os 14 | 15 | import pandas as pd 16 | import piexif 17 | from geopy.geocoders import Nominatim 18 | from PIL import Image 19 | from tabulate import tabulate 20 | 21 | 22 | # allows Pillow to open and manipulate images in the HEIF (i.e. HEIC) format 23 | from pillow_heif import register_heif_opener 24 | register_heif_opener() 25 | 26 | 27 | def extract_metadata(dir_path): 28 | """ 29 | Extracts the metadata from all the image files in the specified directory and 30 | returns it as a pandas dataframe. 31 | 32 | Args: 33 | dir_path (str): The path to the directory containing the image files. 34 | 35 | Returns: 36 | pandas.DataFrame: A dataframe containing the metadata of all the image files. 37 | """ 38 | metadata_list = [] 39 | for file in os.listdir(dir_path): 40 | print("[+] Processing {}".format(file)) 41 | if file.endswith(".jpg") or file.endswith(".jpeg") or file.endswith(".png") or file.endswith(".tiff") or file.endswith(".bmp") or file.endswith(".gif") or file.endswith( 42 | ".webp") or file.endswith(".psd") or file.endswith(".raw") or file.endswith(".cr2") or file.endswith(".nef") or file.endswith(".heic") or file.endswith(".sr2"): 43 | print("[+] Extracting metadata from {}".format(file)) 44 | try: 45 | with Image.open(os.path.join(dir_path, file)) as img: 46 | exif_data = piexif.load(img.info["exif"]) 47 | print("[+] Metadata extracted from {}".format(file)) 48 | 49 | # Extract GPS latitude, longitude, and altitude data 50 | gps_latitude = exif_data['GPS'][piexif.GPSIFD.GPSLatitude] 51 | gps_latitude_ref = exif_data['GPS'][piexif.GPSIFD.GPSLatitudeRef] 52 | gps_longitude = exif_data['GPS'][piexif.GPSIFD.GPSLongitude] 53 | gps_longitude_ref = exif_data['GPS'][piexif.GPSIFD.GPSLongitudeRef] 54 | gps_altitude = exif_data['GPS'][piexif.GPSIFD.GPSAltitude] 55 | gps_altitude_ref = exif_data['GPS'][piexif.GPSIFD.GPSAltitudeRef] 56 | 57 | # Convert GPS latitude and longitude data to decimal degrees 58 | gps_latitude_decimal = gps_to_decimal( 59 | gps_latitude, gps_latitude_ref) 60 | gps_longitude_decimal = gps_to_decimal( 61 | gps_longitude, gps_longitude_ref) 62 | 63 | metadata = { 64 | 'filename': file, 65 | 'gps_latitude': gps_latitude_decimal, 66 | 'gps_longitude': gps_longitude_decimal, 67 | 'gps_altitude': gps_altitude, 68 | 'gps_altitude_ref': gps_altitude_ref, 69 | 'make': exif_data['0th'][piexif.ImageIFD.Make], 70 | 'model': exif_data['0th'][piexif.ImageIFD.Model], 71 | 'software': exif_data['0th'][piexif.ImageIFD.Software], 72 | 'datetime': exif_data['0th'][piexif.ImageIFD.DateTime], 73 | # 'exposure_time': exif_data['Exif'][piexif.ExifIFD.ExposureTime], 74 | # 'f_number': exif_data['Exif'][piexif.ExifIFD.FNumber], 75 | # 'iso_speed_ratings': exif_data['Exif'][piexif.ExifIFD.ISOSpeedRatings], 76 | # 'focal_length': exif_data['Exif'][piexif.ExifIFD.FocalLength], 77 | # 'focal_length_in_35mm_film': exif_data['Exif'][piexif.ExifIFD.FocalLengthIn35mmFilm], 78 | # 'exposure_mode': exif_data['Exif'][piexif.ExifIFD.ExposureMode], 79 | # 'white_balance': exif_data['Exif'][piexif.ExifIFD.WhiteBalance], 80 | # 'metering_mode': exif_data['Exif'][piexif.ExifIFD.MeteringMode], 81 | # 'flash': exif_data['Exif'][piexif.ExifIFD.Flash], 82 | # 'exposure_program': exif_data['Exif'][piexif.ExifIFD.ExposureProgram], 83 | # 'exif_version': exif_data['Exif'][piexif.ExifIFD.ExifVersion], 84 | # 'date_time_original': exif_data['Exif'][piexif.ExifIFD.DateTimeOriginal], 85 | # 'date_time_digitized': exif_data['Exif'][piexif.ExifIFD.DateTimeDigitized], 86 | # 'components_configuration': exif_data['Exif'][piexif.ExifIFD.ComponentsConfiguration], 87 | # 'compressed_bits_per_pixel': exif_data['Exif'][piexif.ExifIFD.CompressedBitsPerPixel], 88 | # 'shutter_speed_value': exif_data['Exif'][piexif.ExifIFD.ShutterSpeedValue], 89 | # 'aperture_value': exif_data['Exif'][piexif.ExifIFD.ApertureValue], 90 | # 'brightness_value': exif_data['Exif'][piexif.ExifIFD.BrightnessValue], 91 | # 'exposure_bias_value': exif_data['Exif'][piexif.ExifIFD.ExposureBiasValue], 92 | # 'max_aperture_value': exif_data['Exif'][piexif.ExifIFD.MaxApertureValue], 93 | # 'subject_distance': exif_data['Exif'][piexif.ExifIFD.SubjectDistance], 94 | # 'metering_mode': exif_data['Exif'][piexif.ExifIFD.MeteringMode], 95 | # 'light_source': exif_data['Exif'][piexif.ExifIFD.LightSource], 96 | # 'flash': exif_data['Exif'][piexif.ExifIFD.Flash], 97 | # 'focal_length': exif_data['Exif'][piexif.ExifIFD.FocalLength], 98 | # 'subject_area': exif_data['Exif'][piexif.ExifIFD.SubjectArea], 99 | 100 | } 101 | 102 | print("-----------------------------") 103 | 104 | metadata_list.append(metadata) 105 | except Exception as e: 106 | print("[!] Error processing {}: {}".format(file, str(e))) 107 | 108 | # Convert the metadata list to a pandas dataframe 109 | metadata_df = pd.DataFrame(metadata_list) 110 | 111 | return metadata_df 112 | 113 | 114 | def gps_to_decimal(coord, ref): 115 | """ 116 | Converts GPS coordinates to decimal degrees. 117 | 118 | Args: 119 | coord (tuple): A tuple containing the GPS coordinates. 120 | ref (str): The reference direction (e.g., N, S, E, W). 121 | 122 | Returns: 123 | float: The GPS coordinates in decimal degrees. 124 | """ 125 | decimal = coord[0][0] / coord[0][1] + coord[1][0] / \ 126 | (60 * coord[1][1]) + coord[2][0] / (3600 * coord[2][1]) 127 | if ref in ['S', 'W']: 128 | decimal *= -1 129 | return decimal 130 | 131 | 132 | def get_place_name(latitude, longitude): 133 | location = geolocator.reverse(f"{latitude}, {longitude}", exactly_one=True) 134 | if location is None: 135 | return None 136 | else: 137 | return location.address 138 | 139 | 140 | if __name__ == "__main__": 141 | # Define the directory containing the image files 142 | dir_path = "PATH_TO_IMAGE_FILES" 143 | # Extract the metadata from the image files 144 | metadata_df = extract_metadata(dir_path) 145 | print("[+] Metadata extracted from all image files") 146 | print(metadata_df.columns) 147 | geolocator = Nominatim(user_agent="exif_location") 148 | metadata_df['place_name'] = metadata_df.apply( 149 | lambda row: get_place_name( 150 | row['gps_latitude'], row['gps_longitude']), axis=1) 151 | 152 | # Print the dataframe 153 | print(tabulate(metadata_df, headers="keys", tablefmt="psql")) 154 | -------------------------------------------------------------------------------- /ext_dir_scanner.py: -------------------------------------------------------------------------------- 1 | # Description: This script is used to scan a directory and count the number of files in each directory 2 | # Author: James Sawyer 3 | # Email: githubtools@jamessawyer.co.uk 4 | # Website: http://www.jamessawyer.co.uk/ 5 | 6 | from tabulate import tabulate 7 | import os 8 | import pandas as pd 9 | 10 | # Set the directory to index 11 | directory = "/Users/james/github-archive/repos/" 12 | 13 | # Create a dictionary to store the file counts 14 | file_counts = {} 15 | 16 | # Recursively iterate through all directories and files in the directory 17 | for root, dirs, files in os.walk(directory): 18 | for filename in files: 19 | # Get the file extension 20 | file_extension = os.path.splitext(filename)[1] 21 | 22 | # If the file extension is not in the dictionary, add it with a count 23 | # of 1 24 | if file_extension not in file_counts: 25 | file_counts[file_extension] = 1 26 | # Otherwise, increment the count for that file extension 27 | else: 28 | file_counts[file_extension] += 1 29 | 30 | # Create a dataframe from the file counts dictionary 31 | file_counts_df = pd.DataFrame.from_dict( 32 | file_counts, orient="index", columns=["Count"]) 33 | 34 | # Sort the dataframe by the file counts in descending order 35 | file_counts_df = file_counts_df.sort_values(by="Count", ascending=False) 36 | 37 | # print the entire dataframe using python tabulate 38 | 39 | print(tabulate(file_counts_df, headers="keys", tablefmt="psql")) 40 | -------------------------------------------------------------------------------- /fast_portscanner.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import socket 3 | from concurrent.futures import ThreadPoolExecutor 4 | 5 | # Define a function to scan a single host and port 6 | def scan_port(host, port): 7 | try: 8 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: 9 | s.settimeout(1) 10 | s.connect((host, port)) 11 | return True 12 | except: 13 | return False 14 | 15 | # Define a list of hosts and ports to scan 16 | hosts = ['google.com', 'facebook.com', 'amazon.com'] 17 | ports = [80, 443, 8080] 18 | 19 | # Scan the hosts and ports using multithreading 20 | results = [] 21 | with ThreadPoolExecutor(max_workers=10) as executor: 22 | for host in hosts: 23 | for port in ports: 24 | results.append({'Host': host, 'Port': port, 'Open': executor.submit(scan_port, host, port).result()}) 25 | 26 | # Create a Pandas DataFrame from the results 27 | df = pd.DataFrame(results) 28 | 29 | # Print the results 30 | print(df) 31 | -------------------------------------------------------------------------------- /file_monitor.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2024 James Sawyer 3 | All rights reserved. 4 | 5 | This script and the associated files are private 6 | and confidential property. Unauthorized copying of 7 | this file, via any medium, and the divulgence of any 8 | contained information without express written consent 9 | is strictly prohibited. 10 | 11 | This script is intended for personal use only and should 12 | not be distributed or used in any commercial or public 13 | setting unless otherwise authorized by the copyright holder. 14 | By using this script, you agree to abide by these terms. 15 | 16 | DISCLAIMER: This script is provided 'as is' without warranty 17 | of any kind, either express or implied, including, but not 18 | limited to, the implied warranties of merchantability, 19 | fitness for a particular purpose, or non-infringement. In no 20 | event shall the authors or copyright holders be liable for 21 | any claim, damages, or other liability, whether in an action 22 | of contract, tort or otherwise, arising from, out of, or in 23 | connection with the script or the use or other dealings in 24 | the script. 25 | """ 26 | 27 | # -*- coding: utf-8 -*- 28 | 29 | import os 30 | import platform 31 | import time 32 | import logging 33 | import inotify.adapters 34 | 35 | logging.basicConfig(filename='file_monitor.log', level=logging.INFO, 36 | format='%(asctime)s - %(levelname)s - %(message)s') 37 | 38 | 39 | def monitor_file_close(directory): 40 | i = inotify.adapters.Inotify() 41 | i.add_watch(directory) 42 | 43 | for event in i.event_gen(yield_nones=False): 44 | (_, type_names, path, filename) = event 45 | if 'IN_CLOSE_WRITE' in type_names: 46 | logging.info(f"File closed: {os.path.join(path, filename)}") 47 | 48 | 49 | def main(): 50 | directory = '/path/to/monitor' 51 | current_os = platform.system() 52 | 53 | logging.info(f"Operating System: {current_os}") 54 | 55 | while True: 56 | monitor_file_close(directory) 57 | time.sleep(1) 58 | 59 | 60 | if __name__ == "__main__": 61 | main() 62 | -------------------------------------------------------------------------------- /my_certbot_renewal.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Define variables 4 | INTERFACE="eth0" 5 | PORT="80" 6 | WWW_ROOT="/var/www/html" 7 | HTACCESS_FILE="${WWW_ROOT}/.htaccess" 8 | TEMP_HTACCESS_FILE="${HTACCESS_FILE}.backup" 9 | 10 | # Insert iptables rule to allow traffic on port 80 11 | echo "Adding iptables rule to allow traffic on port ${PORT} through ${INTERFACE}..." 12 | sudo iptables -I INPUT 1 -i ${INTERFACE} -p tcp --dport ${PORT} -j ACCEPT 13 | 14 | # Check if .htaccess file exists and rename it 15 | if [ -f "${HTACCESS_FILE}" ]; then 16 | echo "Renaming .htaccess to ${TEMP_HTACCESS_FILE}..." 17 | sudo mv "${HTACCESS_FILE}" "${TEMP_HTACCESS_FILE}" 18 | else 19 | echo ".htaccess file not found in ${WWW_ROOT}." 20 | fi 21 | 22 | # Run certbot renew 23 | echo "Running certbot renew..." 24 | sudo certbot renew 25 | 26 | # Cleanup: Restore the .htaccess file and remove the iptables rule 27 | 28 | # Remove the iptables rule 29 | echo "Removing iptables rule..." 30 | sudo iptables -D INPUT -i ${INTERFACE} -p tcp --dport ${PORT} -j ACCEPT 31 | 32 | # Restore the .htaccess file 33 | if [ -f "${TEMP_HTACCESS_FILE}" ]; then 34 | echo "Restoring original .htaccess file..." 35 | sudo mv "${TEMP_HTACCESS_FILE}" "${HTACCESS_FILE}" 36 | else 37 | echo "Temporary .htaccess file not found. Nothing to restore." 38 | fi 39 | 40 | echo "Cleanup complete." 41 | -------------------------------------------------------------------------------- /nhs_upvoter.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Streams comments from specified subreddits on Reddit using the Python Reddit API Wrapper (PRAW). 4 | 5 | Analyzes the sentiment of each comment using the VADER sentiment analysis tool. 6 | 7 | Upvotes comments with a positive sentiment. 8 | 9 | Prints a message for each upvoted comment and each comment that was not upvoted. 10 | 11 | Uses a regular expression pattern to match keywords in comments. 12 | 13 | """ 14 | 15 | import datetime 16 | import re 17 | 18 | import praw 19 | from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer 20 | 21 | 22 | def analyze_and_upvote(post): 23 | # Set up the sentiment analyzer 24 | analyzer = SentimentIntensityAnalyzer() 25 | 26 | # Analyze the sentiment of the comment 27 | sentiment = analyzer.polarity_scores(post.body) 28 | 29 | # Check if the post is positive (i.e., has a high compound score) 30 | if sentiment["compound"] > 0.5: 31 | post.upvote() 32 | # Print upvoted post plus a timestamp 33 | print(f"[+]Upvoted post at {datetime.datetime.now()}") 34 | else: 35 | print(f"[-]Post not upvoted: {post.title}") 36 | 37 | 38 | # Create a Reddit instance 39 | reddit = praw.Reddit( 40 | client_id="", 41 | client_secret="", 42 | password="", 43 | user_agent="", 44 | username="", 45 | ) 46 | 47 | # Define the subreddits to stream from 48 | subreddits = ["ukpolitics", "LabourUK", "unitedkingdom", "GreenAndPleasant"] 49 | 50 | # Compile a regular expression pattern to match keywords 51 | keyword_pattern = re.compile( 52 | r"\b(NHS|National Health Service|NHSUK)\b", 53 | re.IGNORECASE, 54 | ) 55 | 56 | # Stream posts from the specified subreddits 57 | for subreddit in subreddits: 58 | subreddit = reddit.subreddit(subreddit) 59 | for comment in subreddit.stream.comments(skip_existing=True): 60 | # Check if the comment contains the keyword 61 | if keyword_pattern.search(comment.body): 62 | # Analyze the sentiment of the comment 63 | analyze_and_upvote(comment) 64 | -------------------------------------------------------------------------------- /numpy_nn.py: -------------------------------------------------------------------------------- 1 | # ============================================================================== 2 | # DISCLAIMER 3 | # ---------------------------------------------------------------------------- 4 | # This script is for educational and research purposes only. 5 | # It is NOT intended for production use, financial decision-making, or medical 6 | # applications. The neural network implementation here is minimal, explanatory, 7 | # and intentionally leaves out many features and safeguards found in real-world 8 | # machine learning frameworks. 9 | # 10 | # You are free to use, modify, or share this code at your own risk. 11 | # THERE ARE NO WARRANTIES OF ANY KIND—EXPRESS OR IMPLIED. 12 | # By using this script, you accept full responsibility for any consequences, 13 | # bugs, losses, or catastrophic quantum singularities that may result. 14 | # 15 | # If you break it, you get to keep both pieces. 16 | # ============================================================================== 17 | 18 | 19 | """ 20 | numpy_nn.py 21 | 22 | A minimal neural network implementation using NumPy for regression on time series data (e.g., stock prices). 23 | - Demonstrates synthetic data generation and real CSV data loading. 24 | - Implements a simple feedforward neural network with one hidden layer. 25 | - Includes manual normalization, training loop, early stopping, and prediction plotting. 26 | - Useful for educational purposes, prototyping, and understanding neural network fundamentals without external ML libraries. 27 | 28 | Typical use cases: 29 | - Predicting next-day prices based on historical OHLC data. 30 | - Experimenting with neural network training and inference using only NumPy. 31 | - Visualizing model predictions and trends. 32 | 33 | Requirements: 34 | - numpy 35 | - pandas 36 | - matplotlib 37 | """ 38 | 39 | import datetime 40 | import logging 41 | import sys 42 | 43 | import matplotlib.pyplot as plt 44 | import numpy as np 45 | import pandas as pd 46 | 47 | # Configure verbose logging 48 | logging.basicConfig( 49 | level=logging.INFO, 50 | format="%(asctime)s [%(levelname)s] %(message)s", 51 | handlers=[logging.StreamHandler()], 52 | ) 53 | logger = logging.getLogger(__name__) 54 | 55 | # --- Colored Logging Setup --- 56 | 57 | 58 | class ColorFormatter(logging.Formatter): 59 | COLORS = { 60 | logging.DEBUG: "\033[94m", # Blue 61 | logging.INFO: "\033[92m", # Green 62 | logging.WARNING: "\033[93m", # Yellow 63 | logging.ERROR: "\033[91m", # Red 64 | logging.CRITICAL: "\033[95m", # Magenta 65 | } 66 | RESET = "\033[0m" 67 | 68 | def format(self, record): 69 | color = self.COLORS.get(record.levelno, self.RESET) 70 | message = super().format(record) 71 | return f"{color}{message}{self.RESET}" 72 | 73 | 74 | # Replace default handler with colored formatter 75 | for handler in logger.handlers: 76 | handler.setFormatter(ColorFormatter("%(asctime)s [%(levelname)s] %(message)s")) 77 | 78 | # Ignore E731 warning 79 | # flake8: noqa 80 | # -*- coding: utf-8 -*- 81 | # pylint: disable=C0116, W0621, W1203, C0103, C0301, W1201, W0511, E0401, E1101, E0606, E731 82 | 83 | # --- Synthetic Data Generation for Testing --- 84 | np.random.seed(42) # For reproducibility 85 | 86 | 87 | def generate_synthetic_data(n_samples): 88 | """ 89 | Generate synthetic historical price data and targets for demonstration. 90 | Each sample consists of 5 noisy price points and a target (next day's price). 91 | """ 92 | historical_prices = [] 93 | target_prices = [] 94 | for i in range(n_samples): 95 | base_price = 100 + i * 0.1 # Simulate an increasing trend 96 | noise = np.random.normal(0, 0.5, 5) # Add random noise 97 | prices = base_price + noise 98 | target_price = base_price + 0.1 + np.random.normal(0, 0.5) # Next day's price 99 | historical_prices.append(prices) 100 | target_prices.append(target_price) 101 | return np.array(historical_prices), np.array(target_prices) 102 | 103 | 104 | n_samples = 100 105 | historical_prices, target_prices = generate_synthetic_data(n_samples) 106 | 107 | # --- Neural Network Layer Definition --- 108 | 109 | 110 | class Layer: 111 | """ 112 | Represents a fully connected neural network layer. 113 | """ 114 | 115 | def __init__(self, input_size, output_size): 116 | # Xavier/Glorot uniform initialization for weights 117 | limit = np.sqrt(6 / (input_size + output_size)) 118 | self.weights = np.random.uniform(-limit, limit, (output_size, input_size)) 119 | self.biases = np.zeros((output_size, 1)) 120 | logger.debug( 121 | f"Initialized Layer: weights shape {self.weights.shape}, biases shape {self.biases.shape}" 122 | ) 123 | 124 | def forward(self, inputs): 125 | """ 126 | Forward pass: compute weighted sum plus bias. 127 | """ 128 | logger.debug(f"Layer forward: input shape {inputs.shape}") 129 | return np.matmul(self.weights, inputs) + self.biases 130 | 131 | def backward(self, previous_inputs, output_grad, learning_rate): 132 | """ 133 | Backward pass: update weights and biases using gradients. 134 | """ 135 | grad_w = np.matmul(output_grad, previous_inputs.T) 136 | grad_b = np.sum(output_grad, axis=1, keepdims=True) 137 | # Clip gradients to avoid exploding gradients 138 | grad_w = np.clip(grad_w, -1, 1) 139 | grad_b = np.clip(grad_b, -1, 1) 140 | self.weights -= learning_rate * grad_w 141 | self.biases -= learning_rate * grad_b 142 | logger.debug(f"Layer backward: updated weights and biases") 143 | # Return gradient for previous layer 144 | return np.matmul(self.weights.T, output_grad) 145 | 146 | 147 | # --- Activation Functions --- 148 | 149 | 150 | def relu(x): 151 | """ReLU activation (elementwise).""" 152 | return np.maximum(0, x) 153 | 154 | 155 | def relu_derivative(x): 156 | """Derivative of ReLU.""" 157 | return (x > 0).astype(float) 158 | 159 | 160 | # --- Loss Functions --- 161 | 162 | 163 | def mse_loss(pred, true): 164 | """Mean squared error loss.""" 165 | return np.mean((pred - true) ** 2) 166 | 167 | 168 | def mse_loss_derivative(pred, true): 169 | """Derivative of MSE loss.""" 170 | return 2 * (pred - true) / true.size 171 | 172 | 173 | # --- Utility Functions --- 174 | 175 | 176 | def normalize(data): 177 | """Standard score normalization.""" 178 | mean = np.mean(data, axis=0) 179 | std = np.std(data, axis=0) 180 | return (data - mean) / std, mean, std 181 | 182 | 183 | def denormalize(data, mean, std): 184 | """Reverse normalization.""" 185 | return data * std + mean 186 | 187 | 188 | def custom_date_parser(date_str): 189 | """Parse custom date format from CSV.""" 190 | return datetime.datetime.strptime(date_str, "%Y:%m:%d-%H:%M:%S") 191 | 192 | 193 | def early_stopping(validation_losses, patience=10): 194 | """ 195 | Early stopping: stop if validation loss hasn't improved for 'patience' epochs. 196 | """ 197 | if len(validation_losses) > patience and all( 198 | validation_losses[-i] > validation_losses[-(i + 1)] 199 | for i in range(1, patience + 1) 200 | ): 201 | return True 202 | return False 203 | 204 | 205 | def log_message(message): 206 | """Log a message at INFO level.""" 207 | logger.info(message) 208 | 209 | 210 | # --- Main Execution --- 211 | if __name__ == "__main__": 212 | # --- Plain English Explanation for Students --- 213 | # This script demonstrates how to build, train, and evaluate a simple neural network using only NumPy. 214 | # It covers: 215 | # 1. Generating synthetic data to simulate a real-world regression problem. 216 | # 2. Defining a neural network layer from scratch. 217 | # 3. Implementing forward and backward passes (core of neural network training). 218 | # 4. Training the network on both synthetic and real data. 219 | # 5. Using validation data to monitor overfitting. 220 | # 6. Dynamically adjusting learning rate and restoring best weights (self-healing). 221 | # 7. Visualizing predictions and trends. 222 | logger.info("=== Neural Network Training Exercise ===") 223 | logger.info("Step 1: Generating synthetic data for demonstration.") 224 | # Prepare data for training (synthetic) 225 | inputs = historical_prices.T # shape: (features, samples) 226 | y = target_prices.reshape(1, -1) # shape: (1, samples) 227 | hidden_layer = Layer(inputs.shape[0], 10) 228 | output_layer = Layer(10, 1) 229 | learning_rate = 0.001 230 | 231 | # Training loop for synthetic data 232 | logger.info("Step 3: Training on synthetic data (observe loss every 100 epochs).") 233 | for epoch in range(1000): 234 | hidden_output = hidden_layer.forward(inputs) 235 | activation_output = relu(hidden_output) 236 | output = output_layer.forward(activation_output) 237 | loss = mse_loss(output, y) 238 | loss_grad = mse_loss_derivative(output, y) 239 | indirect_loss = output_layer.backward( 240 | activation_output, loss_grad, learning_rate 241 | ) 242 | hidden_output_grad = indirect_loss * relu_derivative(hidden_output) 243 | hidden_layer.backward(inputs, hidden_output_grad, learning_rate) 244 | if epoch % 100 == 0: 245 | logger.info(f"[Synthetic] Epoch {epoch}, Loss: {loss:.4f}") 246 | 247 | logger.info("Step 4: Predicting on synthetic data and plotting results.") 248 | 249 | def predict(x): 250 | """Run forward pass for prediction.""" 251 | return output_layer.forward(relu(hidden_layer.forward(x))) 252 | 253 | predictions = predict(inputs).flatten() 254 | 255 | # Plot actual vs predicted prices (synthetic) 256 | plt.figure(figsize=(10, 5)) 257 | plt.plot(target_prices, label="Actual Prices", marker="o") 258 | plt.plot(predictions, label="Predicted Prices", marker="x") 259 | plt.xlabel("Sample") 260 | plt.ylabel("Price") 261 | plt.title("Actual vs Predicted Prices (Synthetic Data)") 262 | plt.legend() 263 | plt.show() 264 | 265 | # --- Real Data Section --- 266 | logger.info( 267 | "Step 5: Loading real data from CSV (make sure 'backtest_prices.csv' exists)." 268 | ) 269 | try: 270 | data = pd.read_csv( 271 | "backtest_prices.csv", 272 | parse_dates=["snapshotTime"], 273 | date_parser=custom_date_parser, 274 | ) 275 | logger.info("Successfully loaded real data.") 276 | except Exception as e: 277 | logger.error(f"Failed to load CSV: {e}") 278 | sys.exit(1) 279 | 280 | # Extract OHLC features and target (next day's close) 281 | historical_prices = data[["open", "high", "low", "close"]].values 282 | target_prices = data["close"].shift(-1).dropna().values 283 | historical_prices = historical_prices[:-1] # Align lengths 284 | 285 | logger.info("Step 6: Extracting features and targets from real data.") 286 | 287 | # Normalize features and targets 288 | historical_prices_norm, mean_prices, std_prices = normalize(historical_prices) 289 | target_prices_norm, mean_target, std_target = normalize( 290 | target_prices.reshape(-1, 1) 291 | ) 292 | 293 | logger.info("Step 7: Normalizing features and targets for stable training.") 294 | 295 | # Prepare data for training (real) 296 | inputs = historical_prices_norm.T 297 | y = target_prices_norm.T 298 | hidden_layer = Layer(inputs.shape[0], 10) 299 | output_layer = Layer(10, 1) 300 | learning_rate = 0.001 301 | min_learning_rate = 1e-6 302 | epochs = 70000 303 | patience = 10 304 | lr_patience = 5 # patience for learning rate reduction 305 | validation_split = 0.2 306 | validation_index = int(inputs.shape[1] * (1 - validation_split)) 307 | train_inputs, val_inputs = ( 308 | inputs[:, :validation_index], 309 | inputs[:, validation_index:], 310 | ) 311 | train_y, val_y = y[:, :validation_index], y[:, validation_index:] 312 | validation_losses = [] 313 | 314 | logger.info( 315 | "Step 9: Starting training on real data with self-healing feedback loop." 316 | ) 317 | logger.info(" - The model will reduce learning rate if validation loss plateaus.") 318 | logger.info(" - Early stopping will occur if no improvement for several epochs.") 319 | logger.info(" - Best weights are restored at the end to avoid overfitting.") 320 | 321 | # --- Self-healing feedback loop variables --- 322 | best_val_loss = float("inf") 323 | best_weights = None 324 | best_biases = None 325 | best_output_weights = None 326 | best_output_biases = None 327 | epochs_since_improvement = 0 328 | epochs_since_lr_reduce = 0 329 | 330 | for epoch in range(epochs): 331 | # Forward pass 332 | hidden_output = hidden_layer.forward(train_inputs) 333 | activation_output = relu(hidden_output) 334 | output = output_layer.forward(activation_output) 335 | loss = mse_loss(output, train_y) 336 | # Backward pass 337 | loss_grad = mse_loss_derivative(output, train_y) 338 | indirect_loss = output_layer.backward( 339 | activation_output, loss_grad, learning_rate 340 | ) 341 | hidden_output_grad = indirect_loss * relu_derivative(hidden_output) 342 | hidden_layer.backward(train_inputs, hidden_output_grad, learning_rate) 343 | # Validation loss 344 | val_hidden_output = hidden_layer.forward(val_inputs) 345 | val_activation_output = relu(val_hidden_output) 346 | val_output = output_layer.forward(val_activation_output) 347 | val_loss = mse_loss(val_output, val_y) 348 | validation_losses.append(val_loss) 349 | 350 | # --- Feedback loop: self-healing fine-tuning --- 351 | if val_loss < best_val_loss - 1e-6: # Significant improvement 352 | best_val_loss = val_loss 353 | # Save best weights and biases 354 | best_weights = hidden_layer.weights.copy() 355 | best_biases = hidden_layer.biases.copy() 356 | best_output_weights = output_layer.weights.copy() 357 | best_output_biases = output_layer.biases.copy() 358 | epochs_since_improvement = 0 359 | epochs_since_lr_reduce = 0 360 | else: 361 | epochs_since_improvement += 1 362 | epochs_since_lr_reduce += 1 363 | 364 | # Reduce learning rate if no improvement for lr_patience epochs 365 | if epochs_since_lr_reduce >= lr_patience: 366 | old_lr = learning_rate 367 | learning_rate = max(learning_rate * 0.5, min_learning_rate) 368 | logger.info( 369 | f"Reducing learning rate from {old_lr:.6f} to {learning_rate:.6f} at epoch {epoch}" 370 | ) 371 | epochs_since_lr_reduce = 0 372 | 373 | # Early stopping if no improvement for 'patience' epochs 374 | if epochs_since_improvement >= patience: 375 | logger.info( 376 | f"Early stopping at epoch {epoch} (no val improvement for {patience} epochs)" 377 | ) 378 | break 379 | 380 | if epoch % 10 == 0: 381 | logger.debug( 382 | f"Epoch {epoch}: Training loss {loss:.6f}, Validation loss {val_loss:.6f}" 383 | ) 384 | if epoch % 100 == 0: 385 | log_message( 386 | f"Epoch {epoch}, Loss: {loss:.4f}, Val Loss: {val_loss:.4f}, LR: {learning_rate:.6f}" 387 | ) 388 | 389 | # Restore best weights (self-healing) 390 | if best_weights is not None: 391 | logger.info( 392 | "Restoring best model weights based on validation loss to avoid overfitting." 393 | ) 394 | hidden_layer.weights = best_weights 395 | hidden_layer.biases = best_biases 396 | output_layer.weights = best_output_weights 397 | output_layer.biases = best_output_biases 398 | 399 | logger.info("Step 10: Predicting on all data and denormalizing predictions.") 400 | 401 | def predict(x): 402 | """Run forward pass for prediction.""" 403 | return output_layer.forward(relu(hidden_layer.forward(x))) 404 | 405 | predictions_norm = predict(inputs).flatten() 406 | predictions = denormalize(predictions_norm, mean_target, std_target) 407 | 408 | logger.info("Step 11: Predicting the next few days based on the last known input.") 409 | # Predict next few days' prices based on last input 410 | next_days_predictions_norm = [] 411 | num_days = 5 412 | last_input = historical_prices_norm[-1].reshape(-1, 1) 413 | for _ in range(num_days): 414 | next_day_prediction_norm = predict(last_input) 415 | next_days_predictions_norm.append(next_day_prediction_norm.mean()) 416 | # Roll input and append prediction for next step 417 | last_input = np.roll(last_input, -1) 418 | last_input[-1] = next_day_prediction_norm.mean() 419 | 420 | next_days_predictions = denormalize( 421 | np.array(next_days_predictions_norm), mean_target, std_target 422 | ).flatten() 423 | next_days_means = np.array(next_days_predictions) 424 | next_days_stds = np.std(next_days_means) 425 | 426 | # Generate future dates for plotting 427 | last_date = pd.to_datetime(data["snapshotTime"].iloc[-1]) 428 | future_dates = [last_date + datetime.timedelta(days=i + 1) for i in range(num_days)] 429 | 430 | logger.info( 431 | "Step 12: Plotting actual prices and predicted price range for the next few days." 432 | ) 433 | # Plot actual prices and predicted next few days' price range 434 | plt.figure(figsize=(12, 6)) 435 | plt.plot( 436 | pd.to_datetime(data["snapshotTime"][:-1]), 437 | target_prices, 438 | label="Actual Prices", 439 | marker="o", 440 | color="black", 441 | ) 442 | for i in range(num_days): 443 | color = "green" if next_days_means[i] > target_prices[-1] else "red" 444 | plt.fill_between( 445 | [future_dates[i] - datetime.timedelta(days=1), future_dates[i]], 446 | next_days_means[i] - next_days_stds, 447 | next_days_means[i] + next_days_stds, 448 | color=color, 449 | alpha=0.3, 450 | label="Predicted Range" if i == 0 else "", 451 | ) 452 | plt.plot(future_dates[i], next_days_means[i], "ro" if color == "red" else "go") 453 | plt.xlabel("Date") 454 | plt.ylabel("Price") 455 | plt.title("Actual Prices and Predicted Next Few Days Price Range") 456 | plt.legend() 457 | plt.xticks(rotation=45) 458 | plt.tight_layout() 459 | plt.show() 460 | 461 | logger.info( 462 | "Step 13: Printing a summary of the predicted trend for the next few days." 463 | ) 464 | # Print a summary of the predicted trend for the next few days 465 | for i, price in enumerate(next_days_means): 466 | trend = "up" if price > target_prices[-1] else "down" 467 | logger.info(f"Day {i + 1}: Predicted mean price = {price:.2f} ({trend})") 468 | 469 | # --- End of Training Exercise --- 470 | logger.info("=== End of Neural Network Training Exercise ===") 471 | -------------------------------------------------------------------------------- /oci_free_tf/README.md: -------------------------------------------------------------------------------- 1 | # OCI Free VM with Volume 2 | 3 | This Terraform project provisions a free-tier Oracle Cloud Infrastructure (OCI) virtual machine (VM) with an attached block volume. It is designed to help users quickly deploy and manage a free-tier VM instance with persistent storage. 4 | 5 | ## Features 6 | - Provisions a free-tier VM instance in OCI. 7 | - Attaches a block volume for persistent storage. 8 | - Configurable through `terraform.tfvars`. 9 | - Uses `cloud_init.yaml` for instance initialization. 10 | 11 | ## Prerequisites 12 | 1. Install [Terraform](https://www.terraform.io/downloads). 13 | 2. Set up an OCI account and generate API keys. 14 | 3. Configure your OCI CLI with the required credentials. 15 | 16 | ## Usage 17 | 18 | 1. **Clone the Repository** 19 | ```bash 20 | git clone 21 | cd oci_free_vm_with_volume 22 | ``` 23 | 24 | 2. **Configure Variables** 25 | Update the `terraform.tfvars` file with your OCI credentials and desired configuration: 26 | ```hcl 27 | tenancy_ocid = "" 28 | user_ocid = "" 29 | region = "" 30 | compartment_ocid = "" 31 | ssh_public_key = "" 32 | ``` 33 | 34 | 3. **Initialize Terraform** 35 | Run the following command to download the required providers: 36 | ```bash 37 | terraform init 38 | ``` 39 | 40 | 4. **Plan the Deployment** 41 | Preview the changes that Terraform will make: 42 | ```bash 43 | terraform plan 44 | ``` 45 | 46 | 5. **Apply the Configuration** 47 | Deploy the resources to OCI: 48 | ```bash 49 | terraform apply 50 | ``` 51 | Confirm the prompt with `yes`. 52 | 53 | 6. **Access the VM** 54 | Use the public IP address of the VM to SSH into it: 55 | ```bash 56 | ssh -i opc@ 57 | ``` 58 | 59 | ## Cleanup 60 | To destroy the resources created by this project, run: 61 | ```bash 62 | terraform destroy 63 | ``` 64 | Confirm the prompt with `yes`. 65 | 66 | ## File Structure 67 | - `main.tf`: Defines the OCI resources to be provisioned. 68 | - `variables.tf`: Declares input variables for the project. 69 | - `terraform.tfvars`: Contains user-specific variable values. 70 | - `cloud_init.yaml`: Specifies initialization scripts for the VM. 71 | - `.terraform.lock.hcl`: Tracks provider versions. 72 | 73 | ## Notes 74 | - Ensure that your OCI account has sufficient free-tier resources available. 75 | - Modify `cloud_init.yaml` to customize the VM initialization process. 76 | -------------------------------------------------------------------------------- /oci_free_tf/cloud_init.yaml: -------------------------------------------------------------------------------- 1 | 2 | #cloud-config 3 | package_update: true 4 | package_upgrade: true 5 | packages: 6 | - fail2ban 7 | - ufw 8 | - htop 9 | - net-tools 10 | - curl 11 | runcmd: 12 | - mkdir -p /mnt/persistent 13 | - mkfs.ext4 /dev/oracleoci/oraclevdb 14 | - mount /dev/oracleoci/oraclevdb /mnt/persistent 15 | - echo '/dev/oracleoci/oraclevdb /mnt/persistent ext4 defaults,nofail 0 2' >> /etc/fstab 16 | - ufw default deny incoming 17 | - ufw default allow outgoing 18 | - ufw allow 22/tcp 19 | - ufw --force enable 20 | - systemctl enable fail2ban 21 | - sed -i 's/^PermitRootLogin.*/PermitRootLogin no/' /etc/ssh/sshd_config 22 | - sed -i 's/^#PasswordAuthentication.*/PasswordAuthentication no/' /etc/ssh/sshd_config 23 | - systemctl restart sshd 24 | final_message: "Setup complete. Persistent volume mounted at /mnt/persistent." 25 | -------------------------------------------------------------------------------- /oci_free_tf/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | oci = { 4 | source = "oracle/oci" 5 | version = ">= 7.10.0" 6 | } 7 | } 8 | } 9 | 10 | provider "oci" { 11 | region = var.region 12 | tenancy_ocid = var.tenancy_ocid 13 | user_ocid = var.user_ocid 14 | fingerprint = var.fingerprint 15 | private_key = file(var.private_key_path) 16 | } 17 | 18 | data "oci_identity_availability_domains" "ads" { 19 | compartment_id = var.tenancy_ocid 20 | } 21 | 22 | data "oci_core_images" "ubuntu_image" { 23 | compartment_id = var.compartment_ocid 24 | operating_system = "Canonical Ubuntu" 25 | operating_system_version = "24.04" 26 | shape = var.shape 27 | } 28 | 29 | output "ubuntu_images" { 30 | value = data.oci_core_images.ubuntu_image.images 31 | } 32 | 33 | resource "oci_core_virtual_network" "vcn" { 34 | compartment_id = var.compartment_ocid 35 | display_name = "free-tier-vcn" 36 | cidr_block = "10.0.0.0/16" 37 | dns_label = "vcn" 38 | } 39 | 40 | resource "oci_core_internet_gateway" "igw" { 41 | compartment_id = var.compartment_ocid 42 | vcn_id = oci_core_virtual_network.vcn.id 43 | display_name = "free-tier-igw" 44 | } 45 | 46 | resource "oci_core_route_table" "rt" { 47 | compartment_id = var.compartment_ocid 48 | vcn_id = oci_core_virtual_network.vcn.id 49 | display_name = "free-tier-rt" 50 | 51 | route_rules { 52 | destination = "0.0.0.0/0" 53 | destination_type = "CIDR_BLOCK" 54 | network_entity_id = oci_core_internet_gateway.igw.id 55 | } 56 | } 57 | 58 | resource "oci_core_security_list" "sec_list" { 59 | compartment_id = var.compartment_ocid 60 | vcn_id = oci_core_virtual_network.vcn.id 61 | display_name = "free-tier-sec-list" 62 | 63 | ingress_security_rules { 64 | protocol = "6" 65 | source = "0.0.0.0/0" 66 | source_type = "CIDR_BLOCK" 67 | 68 | tcp_options { 69 | min = 22 70 | max = 22 71 | } 72 | } 73 | 74 | egress_security_rules { 75 | protocol = "all" 76 | destination = "0.0.0.0/0" 77 | } 78 | } 79 | 80 | resource "oci_core_subnet" "subnet" { 81 | compartment_id = var.compartment_ocid 82 | vcn_id = oci_core_virtual_network.vcn.id 83 | cidr_block = "10.0.1.0/24" 84 | display_name = "free-tier-subnet" 85 | dns_label = "subnet" 86 | route_table_id = oci_core_route_table.rt.id 87 | security_list_ids = [oci_core_security_list.sec_list.id] 88 | prohibit_public_ip_on_vnic = false 89 | } 90 | 91 | 92 | resource "oci_core_instance" "vm" { 93 | availability_domain = data.oci_identity_availability_domains.ads.availability_domains[0].name 94 | compartment_id = var.compartment_ocid 95 | shape = var.shape 96 | display_name = "free-tier-vm" 97 | 98 | shape_config { 99 | ocpus = var.ocpus 100 | memory_in_gbs = var.memory 101 | } 102 | 103 | create_vnic_details { 104 | subnet_id = oci_core_subnet.subnet.id 105 | assign_public_ip = true 106 | display_name = "primaryvnic" 107 | hostname_label = "freevm" 108 | } 109 | 110 | metadata = { 111 | ssh_authorized_keys = file(var.ssh_public_key_path) 112 | user_data = base64encode(templatefile("cloud_init.yaml", {})) 113 | } 114 | 115 | source_details { 116 | source_type = "image" 117 | source_id = data.oci_core_images.ubuntu_image.images[0].id 118 | boot_volume_size_in_gbs = 50 119 | } 120 | } 121 | 122 | # resource "oci_core_volume" "persistent_volume" { 123 | # availability_domain = data.oci_identity_availability_domains.ads.availability_domains[0].name 124 | # compartment_id = var.compartment_ocid 125 | # display_name = "free-tier-volume" 126 | # size_in_gbs = 50 127 | # } 128 | 129 | # resource "oci_core_volume_attachment" "attach_persistent_volume" { 130 | # instance_id = oci_core_instance.vm.id 131 | # volume_id = oci_core_volume.persistent_volume.id 132 | # attachment_type = "iscsi" 133 | # device = "/dev/oracleoci/oraclevdb" 134 | # } 135 | 136 | output "vm_public_ip" { 137 | value = oci_core_instance.vm.public_ip 138 | } 139 | 140 | // To SSH into the instance, use the following command: 141 | // ssh -i opc@${oci_core_instance.vm.public_ip} 142 | 143 | -------------------------------------------------------------------------------- /oci_free_tf/terraform.tfvars: -------------------------------------------------------------------------------- 1 | # Fill this with your OCI credentials 2 | tenancy_ocid = "" 3 | user_ocid = "" 4 | fingerprint = "" 5 | private_key_path = "" 6 | compartment_ocid = "" 7 | region = "uk-london-1" 8 | ssh_public_key_path = "" 9 | -------------------------------------------------------------------------------- /oci_free_tf/variables.tf: -------------------------------------------------------------------------------- 1 | 2 | variable "tenancy_ocid" {} 3 | variable "user_ocid" {} 4 | variable "fingerprint" {} 5 | variable "private_key_path" {} 6 | variable "compartment_ocid" {} 7 | variable "region" {} 8 | variable "ssh_public_key_path" {} 9 | 10 | # Available shapes for the free tier: 11 | # VM.Standard.E2.1.Micro 12 | # VM.Standard.A1.Flex 13 | 14 | variable "shape" { 15 | default = "VM.Standard.E2.1.Micro" 16 | } 17 | 18 | variable "ocpus" { 19 | default = 1 20 | } 21 | 22 | variable "memory" { 23 | default = 1 24 | } 25 | -------------------------------------------------------------------------------- /optimize_imports.py: -------------------------------------------------------------------------------- 1 | """Copyright (C) 2025 James Sawyer 2 | All rights reserved. 3 | 4 | This script and the associated files are private 5 | and confidential property. Unauthorized copying of 6 | this file, via any medium, and the divulgence of any 7 | contained information without express written consent 8 | is strictly prohibited. 9 | 10 | This script is intended for personal use only and should 11 | not be distributed or used in any commercial or public 12 | setting unless otherwise authorized by the copyright holder. 13 | By using this script, you agree to abide by these terms. 14 | 15 | DISCLAIMER: This script is provided 'as is' without warranty 16 | of any kind, either express or implied, including, but not 17 | limited to, the implied warranties of merchantability, 18 | fitness for a particular purpose, or non-infringement. In no 19 | event shall the authors or copyright holders be liable for 20 | any claim, damages, or other liability, whether in an action 21 | of contract, tort or otherwise, arising from, out of, or in 22 | connection with the script or the use or other dealings in 23 | the script. 24 | """ 25 | 26 | # -*- coding: utf-8 -*- 27 | # pylint: disable=C0116, W0621, W1203, C0103, C0301, W1201, W0511, E0401, E1101, E0606 28 | # C0116: Missing function or method docstring 29 | # W0621: Redefining name %r from outer scope (line %s) 30 | # W1203: Use % formatting in logging functions and pass the % parameters as arguments 31 | # C0103: Constant name "%s" doesn't conform to UPPER_CASE naming style 32 | # C0301: Line too long (%s/%s) 33 | # W1201: Specify string format arguments as logging function parameters 34 | # W0511: TODOs 35 | # E1101: Module 'holidays' has no 'US' member (no-member) ... it does, so ignore this 36 | # E0606: possibly-used-before-assignment, ignore this 37 | # UP018: native-literals (UP018) 38 | 39 | """I got annoyed and decided to hack this tool together after seeing too many imports 40 | buried inside functions. It scans Python files and moves these imports to the top of 41 | the module where they belong. 42 | 43 | This tool finds import statements declared inside functions and moves them to the top 44 | of the module. Imports within functions should only be used to prevent circular imports, 45 | for optional dependencies, or if an import is slow. 46 | 47 | Run it on a directory and watch the magic happen, or use --dry-run to see what it would do 48 | without making any changes. 49 | """ 50 | 51 | import argparse 52 | import ast 53 | import importlib 54 | import logging 55 | import os 56 | import sys 57 | from ast import NodeVisitor 58 | from typing import List, Set, Tuple 59 | 60 | # These library imports will not be moved to the top of the module 61 | EXCLUDE_LIBS: Set[str] = { 62 | "urllib.request", 63 | "xlrd", 64 | "xlsxwriter", 65 | } 66 | 67 | EXCLUDE_FILES: Set[str] = { 68 | "__init__.py", 69 | } 70 | 71 | 72 | def setup_logging(verbose: bool = False) -> None: 73 | """Configure logging based on verbosity level. 74 | 75 | Args: 76 | verbose: If True, set log level to DEBUG, otherwise INFO 77 | 78 | """ 79 | log_level = logging.DEBUG if verbose else logging.INFO 80 | log_format = "%(levelname)s: %(message)s" 81 | logging.basicConfig(level=log_level, format=log_format) 82 | 83 | 84 | class ImportVisitor(NodeVisitor): 85 | """Visitor to find import statements inside function definitions.""" 86 | 87 | def __init__(self, file_path: str) -> None: 88 | """Initialize the visitor. 89 | 90 | Args: 91 | file_path: Path to the file being analyzed 92 | 93 | """ 94 | self.ret = 0 # Return code (0 = success, 1 = issues found) 95 | self.file_path = file_path 96 | self.line_numbers: List[int] = [] 97 | self.imports_found: List[Tuple[int, str, str]] = [] # line_number, import_name, full_line 98 | 99 | def visit_FunctionDef(self, node: ast.FunctionDef) -> None: 100 | """Visit a function definition and look for imports inside it.""" 101 | for sub_node in ast.walk(node): 102 | if self._is_movable_import_from(sub_node): 103 | self._process_import_from(sub_node) 104 | elif self._is_movable_import(sub_node): 105 | self._process_import(sub_node) 106 | 107 | self.generic_visit(node) 108 | 109 | def _is_movable_import_from(self, node: ast.AST) -> bool: 110 | """Check if the node is an ImportFrom that should be moved.""" 111 | return isinstance(node, ast.ImportFrom) and node.module != "__main__" and node.module not in EXCLUDE_LIBS and node.module.split(".")[0] not in EXCLUDE_LIBS 112 | 113 | def _is_movable_import(self, node: ast.AST) -> bool: 114 | """Check if the node is an Import that should be moved.""" 115 | return isinstance(node, ast.Import) 116 | 117 | def _process_import_from(self, node: ast.ImportFrom) -> None: 118 | """Process an ImportFrom node to determine if it should be moved.""" 119 | try: 120 | importlib.import_module(node.module) 121 | except Exception: 122 | # If the module can't be imported, it's probably not a standard library 123 | pass 124 | else: 125 | message = f"{self.file_path}:{node.lineno}:{node.col_offset} {node.end_lineno} standard library import '{node.module}' should be at the top of the file" 126 | logging.info(message) 127 | 128 | # Store import information for later use 129 | import_stmt = f"from {node.module} import {', '.join(n.name for n in node.names)}" 130 | self.imports_found.append((node.lineno, node.module, import_stmt)) 131 | 132 | self.ret = 1 133 | self.line_numbers.append(node.lineno) 134 | 135 | def _process_import(self, node: ast.Import) -> None: 136 | """Process an Import node to determine if it should be moved.""" 137 | for name in node.names: 138 | if name.name == "__main__" or name.name in EXCLUDE_LIBS or name.name.split(".")[0] in EXCLUDE_LIBS: 139 | continue 140 | 141 | try: 142 | importlib.import_module(name.name) 143 | except Exception: 144 | # If the module can't be imported, it's probably not a standard library 145 | pass 146 | else: 147 | message = f"{self.file_path}:{node.lineno}:{node.col_offset} standard library import '{name.name}' should be at the top of the file" 148 | logging.info(message) 149 | 150 | # Store import information for later use 151 | import_stmt = f"import {name.name}" 152 | self.imports_found.append((node.lineno, name.name, import_stmt)) 153 | 154 | self.ret = 1 155 | self.line_numbers.append(node.lineno) 156 | 157 | 158 | def process_file(file_path: str, dry_run: bool = False) -> int: 159 | """Process a single Python file. 160 | 161 | Args: 162 | file_path: Path to the file to process 163 | dry_run: If True, don't modify files, just report what would change 164 | 165 | Returns: 166 | Integer return code (0 = success, 1 = issues found) 167 | 168 | """ 169 | logging.debug(f"Processing file: {file_path}") 170 | 171 | with open(file_path, encoding="utf-8") as fd: 172 | content = fd.read() 173 | 174 | tree = ast.parse(content) 175 | visitor = ImportVisitor(file_path) 176 | visitor.visit(tree) 177 | 178 | if visitor.line_numbers: 179 | logging.info(f"Found {len(visitor.line_numbers)} imports to move in {file_path}") 180 | 181 | # Sort imports by module name for better organization 182 | visitor.imports_found.sort(key=lambda x: x[1]) 183 | 184 | for _, module_name, import_stmt in visitor.imports_found: 185 | logging.info(f" Will move: {import_stmt}") 186 | 187 | if dry_run: 188 | logging.info("Dry run: No changes made to the file") 189 | return visitor.ret 190 | 191 | content_lines = content.split("\n") 192 | import_lines = [] 193 | 194 | # Make sure to iterate starting from the last element because we are removing lines by index 195 | for line_number in sorted(visitor.line_numbers, reverse=True): 196 | removed_line = content_lines.pop(line_number - 1) 197 | import_lines.append(removed_line) 198 | logging.debug(f"Removed line {line_number}: {removed_line.strip()}") 199 | 200 | # Add the imports at the top of the file 201 | for line in reversed(import_lines): 202 | content_lines.insert(0, line.strip()) 203 | logging.debug(f"Added to top: {line.strip()}") 204 | 205 | if not dry_run: 206 | with open(file_path, encoding="utf-8", mode="w") as fd: 207 | fd.write("\n".join(content_lines)) 208 | logging.info(f"Updated file: {file_path}") 209 | else: 210 | logging.debug(f"No issues found in {file_path}") 211 | 212 | return visitor.ret 213 | 214 | 215 | def main() -> int: 216 | """Main entry point. 217 | 218 | Returns: 219 | Integer return code (0 = success, 1 = issues found) 220 | 221 | """ 222 | parser = argparse.ArgumentParser( 223 | description="Find and fix imports inside functions by moving them to the top of the file", 224 | ) 225 | parser.add_argument("folder", help="Folder to scan for Python files") 226 | parser.add_argument( 227 | "--dry-run", 228 | "-n", 229 | action="store_true", 230 | help="Don't modify files, just show what would be changed", 231 | ) 232 | parser.add_argument( 233 | "--verbose", 234 | "-v", 235 | action="store_true", 236 | help="Enable verbose logging", 237 | ) 238 | parser.add_argument( 239 | "--quiet", 240 | "-q", 241 | action="store_true", 242 | help="Suppress all output except errors", 243 | ) 244 | args = parser.parse_args() 245 | 246 | # Configure logging based on arguments 247 | if args.quiet: 248 | logging.basicConfig(level=logging.ERROR) 249 | else: 250 | setup_logging(args.verbose) 251 | 252 | logging.info(f"Scanning folder: {args.folder}") 253 | if args.dry_run: 254 | logging.info("Running in dry-run mode - no files will be modified") 255 | 256 | ret = 0 257 | files_processed = 0 258 | files_with_issues = 0 259 | 260 | for subdir, _, files in os.walk(args.folder): 261 | for file_ in files: 262 | if not file_.endswith(".py") or file_ in EXCLUDE_FILES: 263 | continue 264 | 265 | file_path = os.path.join(subdir, file_) 266 | files_processed += 1 267 | 268 | file_ret = process_file(file_path, args.dry_run) 269 | ret |= file_ret 270 | 271 | if file_ret: 272 | files_with_issues += 1 273 | 274 | # Summary at the end 275 | logging.info(f"Summary: Processed {files_processed} Python files") 276 | logging.info(f"Found issues in {files_with_issues} files") 277 | 278 | if args.dry_run and files_with_issues > 0: 279 | logging.info("Re-run without --dry-run to apply the changes") 280 | 281 | return ret 282 | 283 | 284 | if __name__ == "__main__": 285 | try: 286 | sys.exit(main()) 287 | except KeyboardInterrupt: 288 | logging.info("Process interrupted by user") 289 | sys.exit(130) 290 | except Exception as e: 291 | logging.error(f"An unexpected error occurred: {e}", exc_info=True) 292 | sys.exit(1) 293 | -------------------------------------------------------------------------------- /privacy_bluetooth.py: -------------------------------------------------------------------------------- 1 | """ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 2 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 3 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND 4 | NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE 5 | DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY, 6 | WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 7 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 8 | SOFTWARE. """ 9 | 10 | # -*- coding: utf-8 -*- 11 | # pylint: disable=C0116, W0621, W1203, C0103, C0301, W1201 12 | # C0116: Missing function or method docstring 13 | # W0621: Redefining name %r from outer scope (line %s) 14 | # W1203: Use % formatting in logging functions and pass the % parameters as arguments 15 | # C0103: Constant name "%s" doesn't conform to UPPER_CASE naming style 16 | # C0301: Line too long (%s/%s) 17 | # W1201: Specify string format arguments as logging function parameters 18 | 19 | """This script scans for nearby Bluetooth devices, storing information about them in a dataframe. 20 | It calculates the distance to each device based on its RSSI (signal strength) and updates the dataframe as new devices are discovered 21 | or existing devices are seen again. 22 | It then prints the dataframe to the screen, sorted by distance from the device running the script. 23 | """ 24 | 25 | # Description: Scan for nearby Bluetooth devices and print their information to the screen 26 | # Author: James Sawyer 27 | # Email: githubtools@jamessawyer.co.uk 28 | # Website: http://www.jamessawyer.co.uk/ 29 | 30 | import asyncio 31 | import datetime 32 | 33 | import pandas as pd 34 | from bleak import BleakScanner 35 | from tabulate import tabulate 36 | 37 | # create an empty DataFrame to store device information 38 | df = pd.DataFrame( 39 | columns=[ 40 | "name", 41 | "rssi", 42 | "address", 43 | "last_seen", 44 | "count", 45 | "distance"]) 46 | 47 | 48 | def calc_distance(rssi, rssi_0=0, n=2): 49 | # rough distance calculation based on RSSI 50 | distance = 10 ** ((rssi_0 - rssi) / (10 * n)) 51 | # return the distance in whole meters 52 | return int(distance) 53 | 54 | 55 | async def main(): 56 | 57 | global df 58 | 59 | while True: 60 | try: 61 | # scan for BLE devices and update the DataFrame 62 | devices = await BleakScanner.discover() 63 | 64 | for d in devices: 65 | # print(d.name, d.rssi, d.address) 66 | if d.address not in df.index: 67 | # add it to the DataFrame 68 | df = pd.concat([df, 69 | pd.DataFrame({"name": d.name, 70 | "rssi": d.rssi, 71 | "address": d.address, 72 | "last_seen": datetime.datetime.now()}, 73 | index=[d.address])], 74 | ignore_index=False) 75 | df.loc[d.address, "distance"] = calc_distance(d.rssi) 76 | df.loc[d.address, "count"] = 1 77 | else: 78 | df.loc[d.address, "last_seen"] = datetime.datetime.now() 79 | df.loc[d.address, "rssi"] = d.rssi 80 | df.loc[d.address, "distance"] = calc_distance(d.rssi) 81 | # increment the count 82 | df.loc[d.address, "count"] += 1 83 | 84 | # remove stale devices (i.e. those that haven't been seen in the last 85 | # 30 seconds) 86 | # df = df[df["last_seen"] > datetime.datetime.now() - 87 | # datetime.timedelta(seconds=30)] 88 | 89 | # adjust last_seen to be relative to now 90 | df["last_seen"] = datetime.datetime.now() - df["last_seen"] 91 | 92 | # sort the dataframe by distance lowest to highest 93 | 94 | df = df.sort_values(by="distance") 95 | 96 | # print the DataFrame using the tabulate library 97 | print(tabulate(df, headers="keys", tablefmt="psql")) 98 | 99 | except Exception as e: 100 | print(e) 101 | pass 102 | 103 | if __name__ == "__main__": 104 | asyncio.run(main()) 105 | -------------------------------------------------------------------------------- /proxmox_recovery_scripts_reddit.md: -------------------------------------------------------------------------------- 1 | **Disclaimer:** 2 | These scripts are provided *as-is* and carry **no warranty**. Use at your own risk—always test in a non-production environment first. They directly manipulate your Proxmox cluster database (`pmxcfs`) and overwrite configuration files. **Make backups** of `/etc/pve` and your VMs/CTs before running them. I’m not responsible for any data loss or downtime they may cause. 3 | 4 | --- 5 | 6 | **Background** 7 | I recently resurrected an old Proxmox node in my lab that had failed months ago—one of those maintenance tasks I’d been avoiding forever. Unfortunately, when I re-joined it to my two-node cluster, none of my running VMs or containers showed up in the GUI anymore, even though I could still SSH into them and verify they were running. The underlying disk images and LXC rootfses were all intact; it was just the cluster metadata that had vanished. 8 | 9 | --- 10 | 11 | **What Happened** 12 | - **Cluster database lost** 13 | My `/etc/pve/nodes/.../{lxc,qemu-server}` directories were empty because pmxcfs snapshots had never been enabled. 14 | - **Containers still running** 15 | LXC stores live configs under `/var/lib/lxc//config`, so the containers continued to run—but Proxmox had no metadata to show them. 16 | - **VMs still running** 17 | QEMU kept its processes alive with pidfiles in `/var/run/qemu-server`, but without the old `/etc/pve/nodes/.../qemu-server/*.conf`, the GUI had nothing to list. 18 | 19 | --- 20 | 21 | **Solution Overview** 22 | 1. **For LXC**: Scan `/var/lib/lxc//config` (and fall back to each container’s `/etc/hostname`), extract hostname, memory, arch, rootfs, net0, and write a new `.conf` into `/etc/pve/nodes//lxc/`. 23 | 2. **For QEMU/KVM**: Scan running `qemu-system-*` processes via their pidfiles, parse out each VM’s ID and name from the command-line, then write new `.conf` files into `/etc/pve/nodes//qemu-server/`. 24 | 3. **Reload**: Restart `pve-cluster` so pmxcfs picks up the rebuilt configs, and voilà—everything reappears in the web UI. 25 | 26 | --- 27 | 28 | ## Recovery Script: LXC Containers 29 | 30 | ```bash 31 | #!/usr/bin/env bash 32 | # 33 | # recover-lxc-configs.sh 34 | # Rebuild Proxmox LXC container definitions from live /var/lib/lxc data. 35 | # 36 | # DISCLAIMER: Overwrites /etc/pve cluster metadata. Backup first! 37 | # 38 | 39 | set -euo pipefail 40 | 41 | # Discover the real Proxmox node and config path 42 | PVE_LXC_DIR=$(readlink -f /etc/pve/lxc) 43 | PVE_NODE=$(basename "$(dirname "$PVE_LXC_DIR")") 44 | mkdir -p "$PVE_LXC_DIR" 45 | 46 | echo "Recovering LXC CT definitions into $PVE_LXC_DIR (node: $PVE_NODE)" 47 | 48 | for CTPATH in /var/lib/lxc/[0-9]*; do 49 | CTID=$(basename "$CTPATH") 50 | LIVE_CFG="$CTPATH/config" 51 | OUT_CFG="$PVE_LXC_DIR/${CTID}.conf" 52 | 53 | printf " - CT %-4s: " "$CTID" 54 | 55 | # Initialize 56 | hostname=""; arch=""; memory_mb=""; rootfs=""; net0="" 57 | 58 | if [[ -f "$LIVE_CFG" ]]; then 59 | # Pull settings from live config (gracefully handle missing fields) 60 | hostname=$(grep -E '^lxc\.uts\.name' "$LIVE_CFG" 2>/dev/null | cut -d= -f2) || true 61 | arch=$(grep -E '^lxc\.arch' "$LIVE_CFG" 2>/dev/null | cut -d= -f2) || true 62 | 63 | mem_bytes=$(grep -E '^lxc\.cgroup\.memory\.limit_in_bytes' "$LIVE_CFG" 2>/dev/null \ 64 | | cut -d= -f2) || true 65 | (( memory_mb = mem_bytes / 1024 / 1024 )) 2>/dev/null || true 66 | 67 | rootfs=$(grep -m1 -E '^lxc\.rootfs\.path' "$LIVE_CFG" 2>/dev/null | cut -d= -f2) || true 68 | 69 | link=$(grep -m1 '^lxc.net.0.link' "$LIVE_CFG" 2>/dev/null | cut -d= -f2) || true 70 | hw=$(grep -m1 '^lxc.net.0.hwaddr' "$LIVE_CFG" 2>/dev/null | cut -d= -f2) || true 71 | [[ -n $link ]] && net0="bridge=${link},hwaddr=${hw}" 72 | 73 | status="full" 74 | else 75 | # Fallback: recover only the hostname from the container’s /etc/hostname 76 | if [[ -f "$CTPATH/rootfs/etc/hostname" ]]; then 77 | hostname=$(<"$CTPATH/rootfs/etc/hostname") 78 | status="name-only" 79 | else 80 | status="skipped" 81 | fi 82 | fi 83 | 84 | # Write the new .conf if we recovered a hostname 85 | if [[ -n "$hostname" ]]; then 86 | { 87 | echo "vmid: $CTID" 88 | echo "hostname: $hostname" 89 | [[ -n "$arch" ]] && echo "arch: $arch" 90 | [[ -n "$memory_mb" ]] && echo "memory: $memory_mb" 91 | [[ -n "$rootfs" ]] && echo "rootfs: $rootfs" 92 | [[ -n "$net0" ]] && echo "net0: $net0" 93 | } > "$OUT_CFG" 94 | fi 95 | 96 | echo "$status" 97 | done 98 | 99 | echo; echo "Restarting pve-cluster…" 100 | systemctl restart pve-cluster 101 | 102 | echo; echo "Done. Verify with:" 103 | echo " pvesh get /nodes/$PVE_NODE/lxc" 104 | echo "Then refresh the Proxmox GUI—your CTs should now appear by name." 105 | ``` 106 | 107 | --- 108 | 109 | ## Recovery Script: QEMU/KVM VMs 110 | 111 | ```bash 112 | #!/usr/bin/env bash 113 | # 114 | # recover-qemu-configs.sh 115 | # Rebuild Proxmox QEMU VM definitions from running qemu-system processes. 116 | # 117 | # DISCLAIMER: Overwrites /etc/pve cluster metadata. Backup first! 118 | # 119 | 120 | set -euo pipefail 121 | 122 | # Discover where Proxmox wants QEMU configs 123 | PVE_QEMU_DIR=$(readlink -f /etc/pve/qemu-server) 124 | PVE_NODE=$(basename "$(dirname "$PVE_QEMU_DIR")") 125 | mkdir -p "$PVE_QEMU_DIR" 126 | 127 | echo "Scanning running QEMU processes for VMIDs & names…" 128 | declare -A vm_names 129 | 130 | for pf in /var/run/qemu-server/*.pid; do 131 | base=$(basename "$pf") 132 | if [[ $base =~ ^([0-9]+)\.pid$ ]]; then 133 | vmid=${BASH_REMATCH[1]} 134 | pid=$(<"$pf") 135 | [[ -d /proc/$pid ]] || continue 136 | cmd=$(tr '\0' ' ' "$out" 164 | done 165 | 166 | echo; echo "Restarting pve-cluster…" 167 | systemctl restart pve-cluster 168 | 169 | echo; echo "Done! Verify with:" 170 | echo " pvesh get /nodes/$PVE_NODE/qemu" 171 | echo "Then refresh the Proxmox GUI—your VMs should now appear by name." 172 | ``` 173 | 174 | --- 175 | 176 | **How to Use** 177 | 1. Copy each script to your Proxmox node (e.g. under `/root/`), then: 178 | ```bash 179 | chmod +x /root/recover-lxc-configs.sh /root/recover-qemu-configs.sh 180 | ``` 181 | 2. **Backup** `/etc/pve` (and optionally `/var/lib/pve-cluster/config.db`) before proceeding. 182 | 3. Run them: 183 | ```bash 184 | /root/recover-lxc-configs.sh 185 | /root/recover-qemu-configs.sh 186 | ``` 187 | 4. Confirm recovery via: 188 | ```bash 189 | pvesh get /nodes/$(hostname)/lxc 190 | pvesh get /nodes/$(hostname)/qemu 191 | ``` 192 | 5. Refresh your Proxmox web UI—your containers and VMs should now reappear, correctly named. 193 | 194 | --- 195 | 196 | **Final Thoughts** 197 | These scripts saved me hours of manual work when my lab cluster’s metadata vanished. If you ever face a similar “everything’s running but nothing shows up in the GUI” scenario, give them a try—just remember the **backup first** disclaimer! 198 | -------------------------------------------------------------------------------- /recover-lxc-configs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Author: James Sawyer 4 | # Disclaimer: Provided as-is. No warranty whatsoever. No liability for any damages. Use at your own risk. 5 | # 6 | set -euo pipefail 7 | 8 | # Discover the real Proxmox node and config path 9 | PVE_LXC_DIR=$(readlink -f /etc/pve/lxc) 10 | PVE_NODE=$(basename "$(dirname "$PVE_LXC_DIR")") 11 | mkdir -p "$PVE_LXC_DIR" 12 | 13 | echo "Recovering LXC CT definitions into $PVE_LXC_DIR (node: $PVE_NODE)" 14 | for CTPATH in /var/lib/lxc/[0-9]*; do 15 | CTID=$(basename "$CTPATH") 16 | LIVE_CFG="$CTPATH/config" 17 | OUT_CFG="$PVE_LXC_DIR/${CTID}.conf" 18 | 19 | printf " - CT %-4s: " "$CTID" 20 | 21 | # Initialize 22 | hostname=""; arch=""; memory_mb=""; rootfs=""; net0="" 23 | 24 | if [[ -f "$LIVE_CFG" ]]; then 25 | hostname=$(grep -E '^lxc\.uts\.name' "$LIVE_CFG" 2>/dev/null | cut -d= -f2) || true 26 | arch=$(grep -E '^lxc\.arch' "$LIVE_CFG" 2>/dev/null | cut -d= -f2) || true 27 | 28 | mem_bytes=$(grep -E '^lxc\.cgroup\.memory\.limit_in_bytes' "$LIVE_CFG" 2>/dev/null | cut -d= -f2) || true 29 | (( memory_mb = mem_bytes / 1024 / 1024 )) 2>/dev/null || true 30 | 31 | rootfs=$(grep -m1 -E '^lxc\.rootfs\.path' "$LIVE_CFG" 2>/dev/null | cut -d= -f2) || true 32 | 33 | link=$(grep -m1 '^lxc.net.0.link' "$LIVE_CFG" 2>/dev/null | cut -d= -f2) || true 34 | hw=$(grep -m1 '^lxc.net.0.hwaddr' "$LIVE_CFG" 2>/dev/null | cut -d= -f2) || true 35 | [[ -n $link ]] && net0="bridge=${link},hwaddr=${hw}" 36 | 37 | status="full" 38 | else 39 | if [[ -f "$CTPATH/rootfs/etc/hostname" ]]; then 40 | hostname=$(<"$CTPATH/rootfs/etc/hostname") 41 | status="name-only" 42 | else 43 | status="skipped" 44 | fi 45 | fi 46 | 47 | if [[ -n "$hostname" ]]; then 48 | { 49 | echo "vmid: $CTID" 50 | echo "hostname: $hostname" 51 | [[ -n "$arch" ]] && echo "arch: $arch" 52 | [[ -n "$memory_mb" ]] && echo "memory: $memory_mb" 53 | [[ -n "$rootfs" ]] && echo "rootfs: $rootfs" 54 | [[ -n "$net0" ]] && echo "net0: $net0" 55 | } > "$OUT_CFG" 56 | fi 57 | 58 | echo "$status" 59 | done 60 | 61 | echo 62 | echo "Restarting pve-cluster…" 63 | systemctl restart pve-cluster 64 | 65 | echo 66 | echo "Done. Verify with:" 67 | echo " pvesh get /nodes/$PVE_NODE/lxc" 68 | echo "Then refresh the Proxmox GUI—your CTs should now appear by name." 69 | -------------------------------------------------------------------------------- /recover-qemu-configs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Author: James Sawyer 4 | # Disclaimer: Provided as-is. No warranty whatsoever. No liability for any damages. Use at your own risk. 5 | # 6 | set -euo pipefail 7 | 8 | # Discover where Proxmox wants QEMU configs 9 | PVE_QEMU_DIR=$(readlink -f /etc/pve/qemu-server) 10 | PVE_NODE=$(basename "$(dirname "$PVE_QEMU_DIR")") 11 | mkdir -p "$PVE_QEMU_DIR" 12 | 13 | echo "Scanning running QEMU processes for VMIDs & names…" 14 | declare -A vm_names 15 | 16 | for pf in /var/run/qemu-server/*.pid; do 17 | base=$(basename "$pf") 18 | if [[ $base =~ ^([0-9]+)\.pid$ ]]; then 19 | vmid=${BASH_REMATCH[1]} 20 | pid=$(<"$pf") 21 | [[ -d /proc/$pid ]] || continue 22 | cmd=$(tr '\0' ' ' "$out" 49 | done 50 | 51 | echo 52 | echo "Restarting pve-cluster…" 53 | systemctl restart pve-cluster 54 | 55 | echo 56 | echo "Done! Verify with:" 57 | echo " pvesh get /nodes/$PVE_NODE/qemu" 58 | echo "Then refresh the Proxmox GUI—your VMs should now appear by name." 59 | -------------------------------------------------------------------------------- /reddit_shadowban_check.py: -------------------------------------------------------------------------------- 1 | """ 2 | DISCLAIMER: 3 | This script is provided for educational purposes only. 4 | It attempts to check if a given Reddit user might be shadowbanned by analyzing publicly available information on Reddit's old interface. The method used is based on common indicators and should not be considered foolproof or entirely reliable. Shadowban detection can be complex and subject to Reddit's internal mechanisms, which are not publicly disclosed. 5 | Furthermore, this script simulates web requests with randomized headers to mimic different user agents and preferences. While this approach is common in web scraping and testing, users should be mindful of Reddit's terms of service and guidelines regarding automated access and user privacy. 6 | The creators of this script cannot guarantee its accuracy, effectiveness, or compliance with Reddit's policies at all times. Users should use this tool responsibly and at their own risk. No responsibility is assumed for any consequences directly or indirectly related to any action or inaction you take based on the information, services, or other material provided. 7 | It's also important to respect privacy and not use this tool for any form of harassment or violation of Reddit's community standards. 8 | USE AT YOUR OWN RISK. 9 | """ 10 | 11 | import logging 12 | import random 13 | import sys 14 | 15 | import requests 16 | from bs4 import BeautifulSoup 17 | from fake_useragent import UserAgent 18 | 19 | # Setup logging 20 | logging.basicConfig( 21 | level=logging.INFO, 22 | format='%(asctime)s - %(levelname)s - %(message)s') 23 | 24 | # Function to generate randomized headers 25 | 26 | def generate_headers(): 27 | user_agent = UserAgent().random 28 | accept_language = random.choice( 29 | ['en-US,en;q=0.9', 'en;q=0.8', 'en-US;q=0.7,en;q=0.3']) 30 | accept_encoding = random.choice(['gzip, deflate, br', 'identity']) 31 | headers = { 32 | 'User-Agent': user_agent, 33 | 'Accept-Language': accept_language, 34 | 'Accept-Encoding': accept_encoding, 35 | 'DNT': '1', # Do Not Track requests header 36 | 'Upgrade-Insecure-Requests': '1' 37 | } 38 | return headers 39 | 40 | def check_shadowban(username): 41 | url = f"https://old.reddit.com/user/{username}" 42 | headers = generate_headers() 43 | 44 | logging.info(f"Checking user: {username}") 45 | 46 | response = requests.get(url, headers=headers) 47 | 48 | # debug, what is the status code and the text 49 | # print(response.status_code) 50 | # print(response.text) 51 | 52 | if response.status_code == 404: 53 | logging.info("Successfully retrieved the page (404 Not Found) Correct Page for shadowban check.") 54 | if "the page you requested does not exist" in response.text and f"u/{username}: page not found" in response.text: 55 | logging.info(f"Forbidden: The user '{username}' is potentially shadowbanned.") 56 | return True 57 | elif response.status_code == 200: 58 | logging.info("Successfully retrieved the page (200 OK)") 59 | logging.info(f"Success: The user '{username}' does not appear to be shadowbanned.") 60 | return False 61 | else: 62 | logging.warning(f"Failed to retrieve the page, status code: {response.status_code}") 63 | return False 64 | 65 | return False 66 | 67 | disclaimer = """ 68 | DISCLAIMER: 69 | -=-=-=-=-=- 70 | This script is provided for educational purposes only. 71 | It attempts to check if a given Reddit user might be shadowbanned by analyzing publicly available information on Reddit's old interface. The method used is based on common indicators and should not be considered foolproof or entirely reliable. Shadowban detection can be complex and subject to Reddit's internal mechanisms, which are not publicly disclosed. 72 | Furthermore, this script simulates web requests with randomized headers to mimic different user agents and preferences. While this approach is common in web scraping and testing, users should be mindful of Reddit's terms of service and guidelines regarding automated access and user privacy. 73 | The creators of this script cannot guarantee its accuracy, effectiveness, or compliance with Reddit's policies at all times. Users should use this tool responsibly and at their own risk. No responsibility is assumed for any consequences directly or indirectly related to any action or inaction you take based on the information, services, or other material provided. 74 | It's also important to respect privacy and not use this tool for any form of harassment or violation of Reddit's community standards. 75 | -=-=-=-=-=- 76 | USE AT YOUR OWN RISK. 77 | """ 78 | 79 | # Function to display disclaimer and require user agreement 80 | def require_agreement(): 81 | print(disclaimer) 82 | agreement = input("Type 'I AGREE' to accept the terms and proceed: ").strip() 83 | if agreement.upper() != "I AGREE": 84 | logging.info("You did not agree to the disclaimer. Exiting.") 85 | sys.exit() 86 | 87 | 88 | if __name__ == "__main__": 89 | require_agreement() 90 | logging.info( 91 | "Warning: You need to be logged out from Reddit for this check to work correctly.") 92 | logged_out = input("Are you logged out? (Y/N): ").strip().upper() 93 | 94 | if logged_out == "Y": 95 | username = input("Enter the Reddit username to check: ").strip() 96 | if check_shadowban(username): 97 | logging.info(f"The user '{username}' is potentially shadowbanned.") 98 | else: 99 | logging.info( 100 | f"The user '{username}' does not appear to be shadowbanned.") 101 | else: 102 | logging.error("Please log out from Reddit and try again.") 103 | -------------------------------------------------------------------------------- /reddit_spam_detection.py: -------------------------------------------------------------------------------- 1 | # Description: Simple script to detect spam comments on Reddit 2 | # Author: James Sawyer 3 | # Email: githubtools@jamessawyer.co.uk 4 | # Website: http://www.jamessawyer.co.uk/ 5 | 6 | """ Use of this tool is at your own risk. 7 | I are not responsible for any damage or data loss that may result from using this tool. 8 | Please make sure to backup your router settings before using this tool. """ 9 | 10 | 11 | import datetime 12 | 13 | import pandas as pd 14 | import praw 15 | 16 | # Create a Reddit instance 17 | reddit = praw.Reddit( 18 | client_id="", 19 | client_secret="", 20 | password="", 21 | user_agent="", 22 | username="" 23 | ) 24 | 25 | spam_df = pd.DataFrame(columns=["author", "comment_body", "subreddit"]) 26 | 27 | # Set the maximum number of comments to check 28 | max_comments = 10000 29 | SPAM_THRESHOLD = 3 30 | 31 | # Create a dataframe to store the comments 32 | df = pd.DataFrame(columns=["comment_id", "comment_hash"]) 33 | 34 | # Start streaming comments from the API 35 | for comment in reddit.subreddit("all").stream.comments(): 36 | # Check if the maximum number of comments has been reached and start again 37 | 38 | if len(df) >= max_comments: 39 | df = pd.DataFrame(columns=["comment_id", "comment_hash"]) 40 | print("[-] Resetting Dataframe") 41 | 42 | # Hash the comment text and add it to the dataframe 43 | comment_hash = hash(comment.body) 44 | 45 | # Print a debug with the comment hash include a timestamp 46 | 47 | # print( 48 | # f"[+] Comment Hash: {comment_hash} at {datetime.datetime.now()}" 49 | # ) 50 | 51 | df = pd.concat([df, 52 | pd.DataFrame({"comment_id": comment.id, 53 | "comment_hash": comment_hash}, 54 | index=[0])], 55 | ignore_index=True) 56 | 57 | # Check if the comment hash has been seen more than x times 58 | if df["comment_hash"].value_counts()[comment_hash] > SPAM_THRESHOLD: 59 | # Print the potential spam comment 60 | # print(f"Comment by {comment.author}: {comment.body}") 61 | # print(f"Comment on {comment.subreddit}") 62 | # print("-------------------------------------------------") 63 | # add the author, comment body and and subreddit to the spam dataframe 64 | spam_df = pd.concat([spam_df, 65 | pd.DataFrame({"author": comment.author, 66 | "comment_body": comment.body, 67 | "subreddit": comment.subreddit}, 68 | index=[0])], 69 | ignore_index=True) 70 | # using tabulate to print the dataframe 71 | from tabulate import tabulate 72 | print(tabulate(spam_df, headers="keys", tablefmt="psql")) -------------------------------------------------------------------------------- /remove_metadata.py: -------------------------------------------------------------------------------- 1 | """ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 2 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 3 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND 4 | NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE 5 | DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY, 6 | WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 7 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 8 | SOFTWARE. """ 9 | 10 | # -*- coding: utf-8 -*- 11 | # pylint: disable=C0116, W0621, W1203, C0103, C0301, W1201 12 | # C0116: Missing function or method docstring 13 | # W0621: Redefining name %r from outer scope (line %s) 14 | # W1203: Use % formatting in logging functions and pass the % parameters as arguments 15 | # C0103: Constant name "%s" doesn't conform to UPPER_CASE naming style 16 | # C0301: Line too long (%s/%s) 17 | # W1201: Specify string format arguments as logging function parameters 18 | 19 | import logging 20 | import os 21 | from pathlib import Path 22 | from typing import Union 23 | 24 | from PIL import Image 25 | from PyPDF2 import PdfFileReader, PdfFileWriter 26 | 27 | # Configure logging 28 | logging.basicConfig( 29 | level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" 30 | ) 31 | 32 | 33 | def remove_metadata(file_path: Union[str, Path]): 34 | """ 35 | Remove metadata from the given file. 36 | """ 37 | try: 38 | extension = file_path.suffix.lower() 39 | 40 | if extension == ".pdf": 41 | # Remove metadata from PDF files 42 | pdf_reader = PdfFileReader(str(file_path)) 43 | pdf_writer = PdfFileWriter() 44 | 45 | for page_num in range(pdf_reader.numPages): 46 | pdf_writer.add_page(pdf_reader.getPage(page_num)) 47 | 48 | with open(str(file_path), "wb") as output_pdf: 49 | pdf_writer.write(output_pdf) 50 | 51 | elif extension in [".jpg", ".jpeg", ".jpe", ".jfif", ".mp4"]: 52 | # Remove metadata from JPEG files 53 | image = Image.open(str(file_path)) 54 | image_info = image.info 55 | logging.info(f"Metadata in {file_path}: {image_info}") 56 | data = list(image.getdata()) 57 | image_without_exif = Image.new(image.mode, image.size) 58 | image_without_exif.putdata(data) 59 | image_without_exif.save(str(file_path)) 60 | 61 | else: 62 | logging.warning(f"Metadata removal is not supported for {extension} files.") 63 | 64 | except Exception as e: 65 | logging.error(f"Failed to remove metadata from {file_path}: {e}") 66 | 67 | 68 | def main(directory_path: Union[str, Path]): 69 | """ 70 | Iterate through files in the given directory and remove metadata. 71 | """ 72 | try: 73 | directory = Path(directory_path) 74 | 75 | if directory.is_dir(): 76 | for file in directory.iterdir(): 77 | if file.is_file(): 78 | logging.info(f"Removing metadata from {file}") 79 | remove_metadata(file) 80 | else: 81 | logging.error("Invalid directory path provided.") 82 | 83 | except Exception as e: 84 | logging.error(f"An error occurred: {e}") 85 | 86 | 87 | if __name__ == "__main__": 88 | directory_path = input("Enter the directory path: ") 89 | main(directory_path) 90 | -------------------------------------------------------------------------------- /scrub_git.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if the current directory is a git repository 4 | if [ ! -d ".git" ]; then 5 | echo "Error: Current directory is not a git repository." 6 | exit 1 7 | fi 8 | 9 | # Verify that the script is being run intentionally 10 | read -p "This will DESTROY your git history and cannot be undone. Are you sure? (y/n) " -n 1 -r 11 | echo # (optional) move to a new line 12 | if [[ $REPLY =~ ^[Yy]$ ]] 13 | then 14 | # Fetch the name of the current branch 15 | current_branch=$(git rev-parse --abbrev-ref HEAD) 16 | if [ -z "$current_branch" ]; then 17 | echo "Error: Failed to determine the current branch." 18 | exit 1 19 | fi 20 | 21 | # Create a fresh temporary branch 22 | git checkout --orphan temp_branch 23 | 24 | # Add all the files 25 | git add -A 26 | 27 | # Commit the changes 28 | git commit -am "Initial commit" 29 | 30 | # Delete the old branch 31 | git branch -D "$current_branch" 32 | 33 | # Rename the temporary branch to the original branch name 34 | git branch -m "$current_branch" 35 | 36 | # Force update the repository to overwrite the history 37 | git push -f origin "$current_branch" 38 | 39 | # Optionally, prune the reflog and run garbage collection to free up space 40 | git reflog expire --expire=now --all 41 | git gc --prune=now 42 | 43 | echo "Git history has been successfully removed." 44 | else 45 | echo "Script aborted." 46 | fi 47 | -------------------------------------------------------------------------------- /setup_docker.sh: -------------------------------------------------------------------------------- 1 | sudo apt-get update 2 | 3 | sudo apt-get install apt-transport-https ca-certificates curl software-properties-common 4 | 5 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 6 | 7 | sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable" 8 | 9 | sudo apt-get update 10 | 11 | sudo apt-get install docker-ce 12 | -------------------------------------------------------------------------------- /subnet_calculator.py: -------------------------------------------------------------------------------- 1 | # Description: This script is used to calculate subnet information 2 | # Author: James Sawyer 3 | # Email: githubtools@jamessawyer.co.uk 4 | # Website: http://www.jamessawyer.co.uk/ 5 | 6 | def calculate_subnet(ip_address, mask): 7 | # Split the IP address and subnet mask into their respective parts 8 | ip_parts = ip_address.split("/") 9 | ip = ip_parts[0] 10 | mask = int(ip_parts[1]) 11 | 12 | # Convert the IP address and mask to binary 13 | ip_binary = "".join([bin(int(x) + 256)[3:] for x in ip.split(".")]) 14 | mask_binary = "1" * mask + "0" * (32 - mask) 15 | 16 | # Calculate the network address and broadcast address 17 | network_address_binary = ip_binary[:mask] + "0" * (32 - mask) 18 | broadcast_address_binary = ip_binary[:mask] + "1" * (32 - mask) 19 | 20 | # Convert the binary values to decimal notation 21 | network_address = ".".join([str(int(x, 2)) for x in [network_address_binary[0:8], network_address_binary[8:16], network_address_binary[16:24], network_address_binary[24:32]]]) 22 | broadcast_address = ".".join([str(int(x, 2)) for x in [broadcast_address_binary[0:8], broadcast_address_binary[8:16], broadcast_address_binary[16:24], broadcast_address_binary[24:32]]]) 23 | 24 | # Calculate the range of host addresses 25 | host_min = network_address.split(".") 26 | host_max = broadcast_address.split(".") 27 | 28 | # Increment the fourth octet of the minimum host address by 1 29 | host_min[3] = str(int(host_min[3]) + 1) 30 | 31 | # Decrement the fourth octet of the maximum host address by 1 32 | host_max[3] = str(int(host_max[3]) - 1) 33 | 34 | # Calculate the number of valid hosts 35 | num_hosts = 2 ** (32 - mask) - 2 36 | 37 | # Return the results 38 | return (network_address, broadcast_address, ".".join(host_min), ".".join(host_max), num_hosts) 39 | 40 | # Prompt the user to enter the IP address and subnet mask 41 | ip_address = input("Enter the IP address and subnet mask (in CIDR notation): ") 42 | 43 | try: 44 | # Split the IP address and subnet mask into their respective parts 45 | ip_parts = ip_address.split("/") 46 | ip = ip_parts[0] 47 | mask = int(ip_parts[1]) 48 | 49 | # Calculate the subnet information 50 | network_address, broadcast_address, host_range_min, host_range_max, num_hosts = calculate_subnet(ip_address, mask) 51 | 52 | # print the results using tabulate 53 | 54 | from tabulate import tabulate 55 | 56 | headers = ["Network address", "Broadcast address", "Range of hosts", "Number of hosts"] 57 | 58 | data = [[network_address, broadcast_address, host_range_min + " - " + host_range_max, num_hosts]] 59 | 60 | print(tabulate(data, headers, tablefmt="grid")) 61 | 62 | 63 | except ValueError: 64 | print("Error: Invalid IP address or subnet mask") 65 | -------------------------------------------------------------------------------- /subscriptions.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /text_image_processor.py: -------------------------------------------------------------------------------- 1 | # Description: This script is used to extract text from images using pytesseract and PIL 2 | # Author: James Sawyer 3 | # Email: githubtools@jamessawyer.co.uk 4 | # Website: http://www.jamessawyer.co.uk/ 5 | 6 | # Import the necessary libraries 7 | from PIL import Image 8 | import pytesseract 9 | import os 10 | import pandas as pd 11 | 12 | # Set the directory containing the images 13 | image_dir = '/Users/james/Desktop' 14 | 15 | # Get a list of all the image files in the directory 16 | image_files = [ 17 | f for f in os.listdir(image_dir) if os.path.isfile( 18 | os.path.join( 19 | image_dir, 20 | f))] 21 | 22 | # Create an empty Pandas dataframe to store the results 23 | df = pd.DataFrame(columns=['filename', 'text']) 24 | 25 | # Iterate over the list of image files 26 | for file in image_files: 27 | try: 28 | # print the file name 29 | print ("Processing file: " + file) 30 | # Open the image and convert it to grayscale 31 | image = Image.open(os.path.join(image_dir, file)) 32 | image = image.convert('L') 33 | 34 | # Apply Otsu's thresholding method to convert the image to black and 35 | # white 36 | threshold = 150 37 | image = image.point(lambda p: p > threshold and 255) 38 | 39 | # Use pytesseract to apply OCR to the image and extract the text 40 | text = pytesseract.image_to_string(image).rstrip() 41 | 42 | # split the text on newline characters and process each line separately 43 | lines = text.split('\n') 44 | 45 | # join the lines into a single string 46 | single_string = ' \n '.join(lines) 47 | 48 | 49 | # Store the text and filename in the dataframe 50 | df = df.append({'filename': file, 'text': single_string}, ignore_index=True) 51 | except BaseException as e: 52 | print ("Error processing file: " + file) 53 | print (e) 54 | pass 55 | 56 | # Print the dataframe to the console 57 | print(df) 58 | -------------------------------------------------------------------------------- /trading_functions.py: -------------------------------------------------------------------------------- 1 | """ def get_profit_loss(position): 2 | try: 3 | deal_size = float(position["position"]["dealSize"]) 4 | if position["position"]["direction"] == "SELL": 5 | return (float(position["position"]["openLevel"]) - 6 | float(position["market"]["offer"])) * deal_size 7 | else: 8 | return (float(position["market"]["bid"]) - 9 | float(position["position"]["openLevel"])) * deal_size 10 | except Exception as e: 11 | logging.error("Error calculating profit/loss: %s", e) 12 | return None 13 | 14 | 15 | def close_trade(base_url, position, delete_headers): 16 | try: 17 | deal_id = position["position"]["dealId"] 18 | deal_size = str(position["position"]["dealSize"]) 19 | close_direction = "BUY" if position["position"]["direction"] == "SELL" else "SELL" 20 | base_url = base_url + "/positions/otc" 21 | data = { 22 | "dealId": deal_id, 23 | "size": deal_size, 24 | "orderType": "MARKET", 25 | "direction": close_direction, 26 | } 27 | response = requests.post( 28 | base_url, 29 | data=json.dumps(data), 30 | headers=delete_headers) 31 | if response.status_code != 200: 32 | logging.error( 33 | "Failed to close deal %s. Reason: %s", 34 | deal_id, 35 | response.text) 36 | return None 37 | return response.json()["dealReference"] 38 | except Exception as e: 39 | logging.error("Error closing trade: %s", e) 40 | return None 41 | 42 | 43 | def close_all_trades(authenticated_headers, base_url, delete_headers): 44 | try: 45 | positions_url = "https://demo-api.ig.com/gateway/deal/positions" 46 | 47 | response = requests.get(positions_url, headers=authenticated_headers) 48 | positions = response.json()["positions"] 49 | for position in positions: 50 | profit_loss = get_profit_loss(position) 51 | # add guard close to check if trade is profitable, maybe??!! 52 | if profit_loss is None: 53 | continue 54 | logging.info( 55 | "Profit/Loss for deal %s is %s", 56 | position["position"]["dealId"], 57 | profit_loss) 58 | deal_ref = close_trade(base_url, position, delete_headers) 59 | if deal_ref is None: 60 | continue 61 | confirm_url = base_url + "/confirms/" + deal_ref 62 | confirm_response = requests.get( 63 | confirm_url, headers=authenticated_headers) 64 | confirm_status = confirm_response.json() 65 | logging.info("Closed deal %s with status: %s, reason: %s", 66 | position["position"]["dealId"], 67 | confirm_status["dealStatus"], 68 | confirm_status["reason"]) 69 | except Exception as e: 70 | logging.error("Error closing all trades: %s", e) 71 | """ 72 | 73 | # def rolling_correlation(drawdown, price, window): 74 | # """ 75 | # Calculate the rolling correlation between the natural gas drawdown trend and the price over a given window. 76 | 77 | # drawdown: a sequence of natural gas drawdown values 78 | # price: a sequence of corresponding natural gas prices 79 | # window: the size of the window to use for calculating the rolling correlation 80 | 81 | # returns: a sequence of rolling correlations with the same length as the input sequences 82 | # """ 83 | # corr = [] 84 | # for i in range(len(drawdown)): 85 | # start = max(0, i - window + 1) # start index for the window 86 | # end = i + 1 # end index for the window 87 | # d = drawdown[start:end] # drawdown values in the window 88 | # p = price[start:end] # prices in the window 89 | # # correlation coefficient between drawdown and price 90 | # r = np.corrcoef(d, p)[0, 1] 91 | # corr.append(r) 92 | # return corr 93 | 94 | # def calculate_correlation(data): 95 | # """Calculate the relationship (correlation) between drawdown of natural gas and the price of natural gas. 96 | 97 | # Args: 98 | # data (Pandas DataFrame): The data to calculate the correlation for. The DataFrame should have columns 99 | # named 'price' and 'drawdown' containing the natural gas price and drawdown data, respectively. 100 | 101 | # Returns: 102 | # float: The Pearson correlation coefficient between the natural gas price and drawdown data. 103 | # """ 104 | # # Extract the natural gas price and drawdown data 105 | # price = data['price'] 106 | # drawdown = data['trend'] 107 | 108 | # # Calculate the Pearson correlation coefficient 109 | # corr, _ = pearsonr(price, drawdown) 110 | 111 | # return corr 112 | 113 | # def find_patterns_np(numbers): 114 | # patterns = [] 115 | # # Convert list to NumPy array 116 | # numbers = np.array(numbers) 117 | # # Check for periodic trend 118 | # fft = np.fft.fft(numbers) 119 | # if np.abs(fft[1]) > 0.5: 120 | # # Compute the average value of the input numbers 121 | # avg = np.mean(numbers) 122 | # # Check if the periodic trend is upward or downward 123 | # if np.abs(fft[1]) / avg > 1: 124 | # direction = "upward" 125 | # else: 126 | # direction = "downward" 127 | # patterns.append( 128 | # ('periodic trend', f'The numbers show a {direction} periodic trend.')) 129 | # else: 130 | # # Check for linear trend 131 | # slope, intercept, r_value, p_value, std_err = stats.linregress( 132 | # range(len(numbers)), numbers) 133 | # if abs(slope) > 0.5: 134 | # if slope > 0: 135 | # direction = "upward" 136 | # else: 137 | # direction = "downward" 138 | # patterns.append( 139 | # ('linear trend', f'The numbers show a {direction} linear trend.')) 140 | # # Check for outliers 141 | # else: 142 | # q75, q25 = np.percentile(numbers, [75, 25]) 143 | # iqr = q75 - q25 144 | # cut_off = iqr * 1.5 145 | # lower, upper = q25 - cut_off, q75 + cut_off 146 | # outliers = [x for x in numbers if x < lower or x > upper] 147 | # if len(outliers) > 0: 148 | # patterns.append(('outliers', 'The numbers contain outliers.')) 149 | # return patterns 150 | 151 | # def find_trend_patterns(prices): 152 | # # First, we will use numpy's polyfit function to fit a polynomial curve to the prices 153 | # # This will allow us to find any underlying patterns in the data 154 | # coefficients = np.polyfit(range(len(prices)), prices, deg=2) 155 | 156 | # # The coefficients returned by polyfit represent the parameters of the polynomial curve 157 | # # We can use these coefficients to define a polynomial function 158 | # def polynomial(x): 159 | # return coefficients[0] * x**2 + coefficients[1] * x + coefficients[2] 160 | 161 | # # Now we can use the polynomial function to generate a list of predicted prices 162 | # # based on the trend pattern identified by the polynomial curve 163 | # predicted_prices = [polynomial(i) for i in range(len(prices))] 164 | 165 | # # Finally, we can return the list of predicted prices as the hidden trend pattern 166 | # return predicted_prices 167 | 168 | # def hull_moving_average(data, window_size=14): 169 | # data['hull_moving_average'] = None 170 | # for i in range(len(data)): 171 | # if i < window_size: 172 | # data.loc[data.index[i], 173 | # 'hull_moving_average'] = data['price'][:i + 1].mean() 174 | # else: 175 | # data.loc[data.index[i], 'hull_moving_average'] = ( 176 | # 2 * data['price'][i - window_size + 1:i + 1].mean() - data['price'][i - window_size:i + 1].mean()) 177 | 178 | # # convert to a float and round to 2 decimal places 179 | 180 | # data['hull_moving_average'] = data['hull_moving_average'].astype( 181 | # float).round(2) 182 | 183 | # return data['hull_moving_average'] 184 | 185 | # def find_patterns(numbers): 186 | # from scipy.signal import detrend, periodogram 187 | # from scipy.stats import linregress, zscore 188 | 189 | # patterns = [] 190 | 191 | # if len(numbers) < 2: 192 | # return patterns 193 | 194 | # # Check for periodic trend 195 | # f, Pxx_den = periodogram(numbers) 196 | # if Pxx_den[1] > 0.5: 197 | # direction = "upward" if Pxx_den[1] / np.mean(numbers) > 1 else "downward" 198 | # patterns.append(('periodic trend', f'The numbers show a {direction} periodic trend.')) 199 | 200 | # # Check for linear trend 201 | # detrended = detrend(numbers) 202 | # if not np.allclose(detrended, 0): 203 | # slope, intercept, r_value, p_value, std_err = linregress(range(len(numbers)), numbers) 204 | # direction = "upward" if slope > 0 else "downward" 205 | # patterns.append(('linear trend', f'The numbers show a {direction} linear trend.')) 206 | 207 | # detrended = detrend(numbers, type='linear') 208 | # if not np.allclose(detrended, 0): 209 | # direction = "upward" if np.polyfit(range(len(numbers)), numbers, 2)[0] > 0 else "downward" 210 | # patterns.append(('quadratic trend', f'The numbers show a {direction} quadratic trend.')) 211 | 212 | # # Check for outliers 213 | # z_scores = zscore(numbers) 214 | # outliers = [x for x in numbers if x < np.mean(numbers) - 3*np.std(numbers) or x > np.mean(numbers) + 3*np.std(numbers)] 215 | # if len(outliers) > 0: 216 | # patterns.append(('outliers', 'The numbers contain outliers.')) 217 | 218 | # return patterns 219 | -------------------------------------------------------------------------------- /tweaks.sysctl: -------------------------------------------------------------------------------- 1 | coda.fake_statfs=0 2 | fs.dentry-state=22187 3 | fs.epoll.max_user_watches=1000000 4 | fs.file-max=2097152 5 | fs.file-nr=864 0 2097152 6 | fs.inode-nr=25711 4732 7 | fs.inode-state=25711 4732 0 0 0 0 0 8 | fs.inotify.max_queued_events=32768 9 | fs.inotify.max_user_instances=512 10 | fs.inotify.max_user_watches=524288 11 | fs.lease-break-time=30 12 | fs.leases-enable=1 13 | fs.mqueue.msg_default=10 14 | fs.mqueue.msg_max=10 15 | fs.mqueue.msgsize_default=8192 16 | fs.mqueue.msgsize_max=8192 17 | fs.mqueue.queues_max=256 18 | fs.nfs.idmap_cache_timeout=3 19 | fs.nfs.nfs_callback_tcpport=0 20 | fs.nfs.nfs_congestion_kb=65536 21 | fs.nfs.nfs_mountpoint_timeout=1000 22 | fs.nfs.nlm_grace_period=10 23 | fs.nfs.nlm_tcpport=0 24 | fs.nfs.nlm_timeout=5 25 | fs.nfs.nlm_udpport=0 26 | fs.nfs.nsm_local_state=3 27 | fs.nfs.nsm_use_hostnames=0 28 | fs.nr_open=4194304 29 | fs.overflowgid=65534 30 | fs.overflowuid=65534 31 | fs.pipe-max-size=1048576 32 | fs.pipe-user-pages-hard=0 33 | fs.pipe-user-pages-soft=32768 34 | fs.protected_hardlinks=0 35 | fs.protected_symlinks=0 36 | fs.quota.allocated_dquots=4 37 | fs.quota.cache_hits=30 38 | fs.quota.drops=26 39 | fs.quota.free_dquots=2 40 | fs.quota.lookups=34 41 | fs.quota.reads=4 42 | fs.quota.syncs=50 43 | fs.quota.warnings=0 44 | fs.quota.writes=92 45 | fs.suid_dumpable=0 46 | fs.xfs.error_level=1 47 | fs.xfs.filestream_centisecs=500 48 | fs.xfs.inherit_noatime=1 49 | fs.xfs.inherit_nodefrag=1 50 | fs.xfs.inherit_nodump=1 51 | fs.xfs.inherit_nosymlinks=0 52 | fs.xfs.inherit_sync=1 53 | fs.xfs.irix_sgid_inherit=0 54 | fs.xfs.irix_symlink_mode=0 55 | fs.xfs.panic_mask=18 56 | fs.xfs.rotorstep=1 57 | fs.xfs.speculative_prealloc_lifetime=300 58 | fs.xfs.stats_clear=0 59 | fs.xfs.xfssyncd_centisecs=3000 60 | fscache.object_max_active=4 61 | fscache.operation_max_active=2 62 | sunrpc.nfs_debug=0x0000 63 | sunrpc.nfsd_debug=0x0000 64 | vm.vfs_cache_pressure=100 65 | kernel.auto_msgmni=0 66 | kernel.cad_pid=1 67 | kernel.cap_last_cap=37 68 | kernel.compat-log=1 69 | kernel.core_pattern=/tmp/%e.%t.%p.%s.core 70 | kernel.core_pipe_limit=0 71 | kernel.core_uses_pid=1 72 | kernel.ctrl-alt-del=0 73 | kernel.dmesg_restrict=1 74 | kernel.domainname=(none) 75 | kernel.ftrace_dump_on_oops=0 76 | kernel.hostname=TNAS-CC26 77 | kernel.hotplug= 78 | kernel.keys.gc_delay=300 79 | kernel.keys.maxbytes=20000 80 | kernel.keys.maxkeys=200 81 | kernel.keys.persistent_keyring_expiry=259200 82 | kernel.keys.root_maxbytes=200000 83 | kernel.keys.root_maxkeys=200 84 | kernel.kptr_restrict=1 85 | kernel.max_lock_depth=1024 86 | kernel.modprobe=/sbin/modprobe 87 | kernel.modules_disabled=0 88 | kernel.msgmax=8192 89 | kernel.msgmnb=65536 90 | kernel.msgmni=16384 91 | kernel.ngroups_max=65536 92 | kernel.osrelease=4.4.18-g8bcbd8a-dirty 93 | kernel.ostype=Linux 94 | kernel.overflowgid=65534 95 | kernel.overflowuid=65534 96 | kernel.panic=3 97 | kernel.panic_on_oops=0 98 | kernel.panic_on_warn=0 99 | kernel.perf_cpu_time_max_percent=25 100 | kernel.perf_event_max_sample_rate=32768 101 | kernel.perf_event_mlock_kb=516 102 | kernel.perf_event_paranoid=2 103 | kernel.pid_max=4194304 104 | kernel.poweroff_cmd=/sbin/poweroff 105 | kernel.print-fatal-signals=0 106 | kernel.printk=7417 4 1 7 107 | kernel.printk_delay=0 108 | kernel.printk_ratelimit=5 109 | kernel.printk_ratelimit_burst=10 110 | kernel.pty.max=4096 111 | kernel.pty.nr=1 112 | kernel.pty.reserve=1024 113 | kernel.random.boot_id=927b13c6-62ed-4352-8c4a-9cdba9b9bb27 114 | kernel.random.entropy_avail=678 115 | kernel.random.poolsize=4096 116 | kernel.random.read_wakeup_threshold=64 117 | kernel.random.urandom_min_reseed_secs=60 118 | kernel.random.uuid=ea18924e-7689-4249-8b05-403fdc89257b 119 | kernel.random.write_wakeup_threshold=256 120 | kernel.randomize_va_space=2 121 | kernel.real-root-dev=0 122 | kernel.sched_child_runs_first=0 123 | kernel.sched_rr_timeslice_ms=10 124 | kernel.sched_rt_period_us=1000000 125 | kernel.sched_rt_runtime_us=950000 126 | kernel.sem=32000 1024000000 500 32000 127 | kernel.shm_rmid_forced=0 128 | kernel.shmall=4294967296 129 | kernel.shmmax=68719476736 130 | kernel.shmmni=4096 131 | kernel.sysctl_writes_strict=0 132 | kernel.sysrq=0 133 | kernel.tainted=4096 134 | kernel.threads-max=7684 135 | kernel.timer_migration=1 136 | kernel.traceoff_on_warning=0 137 | kernel.tracepoint_printk=0 138 | kernel.unprivileged_bpf_disabled=1 139 | kernel.usermodehelper.bset=4294967295 63 140 | kernel.usermodehelper.inheritable=4294967295 63 141 | kernel.version=#1328 SMP Mon Aug 31 12:00:37 CST 2020 142 | vm.admin_reserve_kbytes=8192 143 | vm.block_dump=0 144 | vm.dirty_background_bytes=0 145 | vm.dirty_background_ratio=5 146 | vm.dirty_bytes=0 147 | vm.dirty_expire_centisecs=3000 148 | vm.dirty_ratio=10 149 | vm.dirty_writeback_centisecs=500 150 | vm.dirtytime_expire_seconds=43200 151 | vm.drop_caches=3 152 | vm.extra_free_kbytes=0 153 | vm.hugepages_treat_as_movable=0 154 | vm.hugetlb_shm_group=0 155 | vm.laptop_mode=0 156 | vm.legacy_va_layout=0 157 | vm.lowmem_reserve_ratio=256 32 158 | vm.max_map_count=65530 159 | vm.min_free_kbytes=65536 160 | vm.mmap_min_addr=4096 161 | vm.mmap_rnd_bits=18 162 | vm.mmap_rnd_compat_bits=11 163 | vm.nr_hugepages=0 164 | vm.nr_overcommit_hugepages=0 165 | vm.nr_pdflush_threads=0 166 | vm.oom_dump_tasks=1 167 | vm.oom_kill_allocating_task=0 168 | vm.overcommit_kbytes=0 169 | vm.overcommit_memory=0 170 | vm.overcommit_ratio=50 171 | vm.page-cluster=3 172 | vm.panic_on_oom=0 173 | vm.percpu_pagelist_fraction=0 174 | vm.stat_interval=1 175 | vm.swappiness=10 176 | vm.user_reserve_kbytes=30420 177 | vm.vfs_cache_pressure=100 -------------------------------------------------------------------------------- /update_sysctl.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Path to the tweaks file 4 | tweaks_file="tweaks.sysctl" 5 | 6 | # Check if dry-run flag is invoked 7 | dry_run=false 8 | if [ "$1" == "--dry-run" ]; then 9 | dry_run=true 10 | fi 11 | 12 | # Read the tweaks from the file 13 | while IFS= read -r line; do 14 | # Skip empty lines and comments starting with # 15 | if [[ -n "$line" && "${line:0:1}" != "#" ]]; then 16 | # Split the line into parameter and value 17 | parameter=$(echo "$line" | cut -d '=' -f 1) 18 | value=$(echo "$line" | cut -d '=' -f 2) 19 | 20 | # Trim leading/trailing whitespaces 21 | parameter=$(echo "$parameter" | awk '{$1=$1};1') 22 | value=$(echo "$value" | awk '{$1=$1};1') 23 | 24 | # Get the current value for the parameter 25 | old_value=$(sysctl -n "$parameter") 26 | 27 | # Display the dry run information 28 | if [ "$dry_run" = true ]; then 29 | echo "Dry run: $parameter = $value (Old: $old_value)" 30 | else 31 | # Apply the tweak 32 | if sysctl -w "$parameter=$value" > /dev/null 2>&1; then 33 | echo "Applied tweak: $parameter = $value (Old: $old_value)" 34 | # Create or update the sysctl configuration file 35 | echo "$parameter = $value" | sudo tee -a /etc/sysctl.conf > /dev/null 36 | else 37 | echo "Failed to apply tweak: $parameter = $value" 38 | fi 39 | fi 40 | fi 41 | done < "$tweaks_file" 42 | 43 | if [ "$dry_run" = false ]; then 44 | # Load new settings 45 | if sudo sysctl -p; then 46 | echo "Tweaks have been applied and will be persistent across reboots." 47 | else 48 | echo "Failed to load new settings. Please check the tweaks file and try again." 49 | fi 50 | fi 51 | --------------------------------------------------------------------------------