├── .gitignore ├── LICENSE.md ├── README.md ├── amazon_price_tracker ├── README.md ├── main.py └── requirements.md ├── bike └── price_calculator.py ├── bluesky ├── README.md └── main.py ├── borg ├── README.md └── main.sh ├── cabbie ├── README.md └── main.py ├── craigslist ├── README.md └── main.py ├── dailystatus ├── README.md └── main.py ├── diary └── main.py ├── finance_parser ├── README.md └── main.py ├── foodlog ├── README.md └── main.py ├── githooks ├── README.md ├── apply_pre-push.sh └── pre-push ├── github ├── README.md └── new_repo.py ├── immich ├── Immich.shortcut ├── immich-wrapper.sh └── immich.sh ├── lifelog └── main.py ├── lofi.sh ├── openai ├── README.md └── main.py ├── pihole ├── README.md ├── downtime.sh └── one_more_hour.py ├── renewCert.sh ├── shorten.py ├── spotify_analytics ├── .gitignore ├── README.md ├── main.py └── requirements.md ├── steps └── post.php ├── utils └── get_largest_files.zsh ├── weather.py ├── workout └── main.py └── youtube ├── .gitignore ├── README.md ├── main.py └── requirements.md /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.vscode/* 3 | __pycache__/* 4 | *.pyclisted 5 | cache.json 6 | *.out 7 | *.log 8 | .cache -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Tyler Woodfin (https://www.tyler.cloud) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Tools 2 | - This repository serves as a centralized location for various tools and scripts that have been customized to streamline my personal workflow. 3 | - This is all organized into folders, each representing a specific aspect of my workflow and containing relevant tools and scripts. 4 | - Feel free to use this code as you see fit (within the bounds of the license), but don't expect it to work on your machine. 5 | 6 | ## Recommended: Cabinet 7 | - If you intend to run any of this in any meaningful way, you'll need [cabinet](https://pypi.org/project/cabinet/), my utility for storing and managing variables across projects. 8 | 9 | ## See Other READMEs 10 | - Most folders contain a README that you should check out for more specific information. Happy coding! -------------------------------------------------------------------------------- /amazon_price_tracker/README.md: -------------------------------------------------------------------------------- 1 | # Amazon Price Tracker 2 | 3 | A simple price tracker for Amazon. 4 | 5 | ## dependencies 6 | - [cabinet](https://pypi.org/project/cabinet/) 7 | - [requests](https://pypi.org/project/requests/) 8 | 9 | ## setup 10 | - `pip install -r requirements.md` 11 | - `cabinet -e` to initialize cabinet for the first time 12 | - See [detailed instructions](https://github.com/tylerjwoodfin/cabinet?tab=readme-ov-file#mail) for configuring mail 13 | - add an amazon_tracker object with as many urls as you want to track, following the example below. 14 | - When the price drops below `price_threshold`, an email will be sent to the address specified in the `email` object. 15 | 16 | Your Cabinet settings file should look like this, at a minimum: 17 | ```json 18 | { 19 | "email": { 20 | "from": "throwaway@example.com", 21 | "from_pw": "example", 22 | "from_name": "Cabinet (or other name of your choice)", 23 | "to": "destination@protonmail.com", 24 | "smtp_server": "example.com", 25 | "imap_server": "example.com", 26 | "port": 123 27 | }, 28 | "amazon_tracker": { 29 | "items": [ 30 | { 31 | "url": "https://amazon.com/", 32 | "price_threshold": 0 33 | } 34 | ] 35 | } 36 | } 37 | ``` 38 | 39 | ## usage 40 | - add to crontab as necessary 41 | - email is sent if price is below threshold 42 | -------------------------------------------------------------------------------- /amazon_price_tracker/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | amazon price tracker 3 | 4 | see README.md for more information 5 | """ 6 | 7 | from typing import List, Optional, TypedDict 8 | import re 9 | from bs4 import BeautifulSoup 10 | import requests 11 | from cabinet import Cabinet, Mail 12 | 13 | cabinet = Cabinet() 14 | 15 | class AmazonTrackerItem(TypedDict): 16 | """ 17 | The required data type in Cabinet. See README.md for more information. 18 | """ 19 | url: str 20 | price_threshold: int 21 | 22 | class AmazonTrackerData(TypedDict): 23 | """ 24 | The structure of the amazon_tracker data in Cabinet. 25 | """ 26 | items: List[AmazonTrackerItem] 27 | 28 | def get_page_content(url: str) -> Optional[str]: 29 | """ 30 | retrieves the html content of a given url 31 | 32 | :param url: the url of the amazon product page 33 | :return: the html content if successful, None otherwise 34 | """ 35 | headers = { 36 | "User-Agent": ( 37 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " 38 | "Chrome/120.0.0.0 Safari/537.36" 39 | ), 40 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", 41 | "Accept-Language": "en-US,en;q=0.5", 42 | "Accept-Encoding": "gzip, deflate, br", 43 | "Connection": "keep-alive", 44 | "Upgrade-Insecure-Requests": "1", 45 | "TE": "Trailers" 46 | } 47 | try: 48 | response = requests.get(url, headers=headers, timeout=10) 49 | response.raise_for_status() 50 | return response.text 51 | except requests.RequestException as e: 52 | print(f"Error fetching the page content: {e}") 53 | return None 54 | 55 | def parse_price(html_content: str) -> Optional[float]: 56 | """ 57 | parses the price from the html content using multiple selectors to handle different page layouts 58 | 59 | :param html_content: the html content of the amazon product page 60 | :return: the price as a float if found, None otherwise 61 | """ 62 | soup = BeautifulSoup(html_content, "html.parser") 63 | 64 | # List of possible price selectors 65 | price_selectors = [ 66 | # New price selectors 67 | 'span.a-price span.a-offscreen', 68 | 'span[data-a-color="price"] span.a-offscreen', 69 | '#corePrice_feature_div span.a-offscreen', 70 | '#priceblock_ourprice', 71 | '#priceblock_dealprice', 72 | '.a-price .a-offscreen', 73 | '#price_inside_buybox', 74 | '#newBuyBoxPrice', 75 | 'span.priceToPay span.a-offscreen' 76 | ] 77 | 78 | for selector in price_selectors: 79 | try: 80 | element = soup.select_one(selector) 81 | if element: 82 | # Extract price text and clean it 83 | price_text = element.get_text().strip() 84 | # Remove currency symbols and commas, handle ranges 85 | price_text = re.sub(r'[^\d.,]', '', price_text) 86 | if '-' in price_text: 87 | # If price range, take the lower price 88 | price_text = price_text.split('-')[0] 89 | 90 | # Handle different decimal separators 91 | if ',' in price_text and '.' in price_text: 92 | # Format like 1,234.56 93 | price_text = price_text.replace(',', '') 94 | elif ',' in price_text: 95 | # Format like 1234,56 96 | price_text = price_text.replace(',', '.') 97 | 98 | price = float(price_text) 99 | if price > 0: 100 | return price 101 | except (ValueError, AttributeError) as e: 102 | cabinet.log(f"Error parsing price: {e}", level="warn") 103 | continue 104 | 105 | # Try finding any price-like text in the page 106 | if not price: 107 | price_pattern = r'\$\s*(\d+(?:,\d{3})*(?:\.\d{2})?)' 108 | matches = re.findall(price_pattern, html_content) 109 | if matches: 110 | try: 111 | price = float(matches[0].replace(',', '')) 112 | if price > 0: 113 | return price 114 | except ValueError: 115 | pass 116 | 117 | cabinet.log("Could not find the price element", level="warn") 118 | return None 119 | 120 | def main() -> None: 121 | """ 122 | the main function of the program 123 | """ 124 | mail = Mail() 125 | 126 | amazon_data: AmazonTrackerData | None = cabinet.get("amazon_tracker", 127 | return_type=AmazonTrackerData) 128 | 129 | if not amazon_data or not amazon_data["items"]: 130 | cabinet.log("No Amazon items set. Exiting.") 131 | return 132 | 133 | amazon_urls = amazon_data["items"] 134 | 135 | if not amazon_urls: 136 | cabinet.log("No Amazon URLs set. Exiting.") 137 | return 138 | 139 | for item in amazon_urls: 140 | url: str | None = item.get("url") 141 | price_threshold: int | None = item.get("price_threshold") 142 | 143 | if not url or price_threshold is None: 144 | cabinet.log(f"Missing price or url in amazon_tracker: {item}") 145 | continue 146 | 147 | html_content = get_page_content(url) 148 | if not html_content: 149 | continue 150 | 151 | current_price = parse_price(html_content) 152 | if current_price is None: 153 | cabinet.log(f"Could not parse price for {url}") 154 | continue 155 | 156 | if current_price <= price_threshold: 157 | subject = "Amazon Price Alert" 158 | body = ( 159 | f"The price for the item at {url} has dropped to ${current_price}, " 160 | f"which is below your threshold of ${price_threshold}." 161 | ) 162 | mail.send(subject, body) 163 | cabinet.log(f"Price alert sent for {url}. Current price: ${current_price}") 164 | else: 165 | cabinet.log( 166 | f"Checked {url}. Current price (${current_price}) is above the threshold " 167 | f"(${price_threshold})." 168 | ) 169 | 170 | if __name__ == "__main__": 171 | main() 172 | -------------------------------------------------------------------------------- /amazon_price_tracker/requirements.md: -------------------------------------------------------------------------------- 1 | beautifulsoup4 2 | cabinet 3 | requests -------------------------------------------------------------------------------- /bike/price_calculator.py: -------------------------------------------------------------------------------- 1 | """ 2 | A super basic tool to show myself the price per ride on my bike. 3 | After each ride, I call this script to see how over time, the cost per ride decreases. 4 | """ 5 | 6 | from cabinet import Cabinet 7 | 8 | def main(): 9 | """ 10 | calculate the bike price per ride 11 | """ 12 | cabinet = Cabinet() 13 | bike_price = cabinet.get('bike', 'price', return_type=int) or 0 14 | rides = cabinet.get('bike', 'rides', return_type=int) or 0 15 | rides += 1 16 | 17 | # increment rides 18 | cabinet.put('bike', 'rides', rides) 19 | 20 | print(f"Thank you for riding!\nYou've taken {rides} rides.") 21 | print(f"Your bike price per ride is now: ${bike_price / rides:.2f}.") 22 | 23 | if __name__ == '__main__': 24 | main() 25 | -------------------------------------------------------------------------------- /bluesky/README.md: -------------------------------------------------------------------------------- 1 | # BlueSky 2 | 3 | BlueSky is a Python script for posting messages to [BlueSky](https://bsky.app) directly from the terminal. 4 | 5 | ## Features 6 | 7 | - Authenticate with BlueSky using saved credentials. 8 | - Post messages directly to your BlueSky account from the command line. 9 | - Modular design with reusable components. 10 | 11 | ## Prerequisites 12 | 13 | - Python 3.9 or newer 14 | - Dependencies: 15 | - `atproto` library 16 | - [Cabinet](https://www.github.com/tylerjwoodfin/cabinet) 17 | - Your BlueSky credentials stored in a `Cabinet` instance under the key `bluesky`. 18 | 19 | ## Installation 20 | 21 | 1. Clone the repository: 22 | ```bash 23 | git clone 24 | cd 25 | ``` 26 | 27 | 2. Install dependencies: 28 | ```bash 29 | pip install atproto cabinet 30 | ``` 31 | 32 | 3. Set up your credentials in `Cabinet`: 33 | Use the `cabinet` library to securely store your BlueSky `handle` and `password` under the key `bluesky`: 34 | ```json 35 | { 36 | "bluesky": { 37 | "handle": "your_handle", 38 | "password": "your_password" 39 | } 40 | } 41 | ``` 42 | 43 | ## Usage 44 | 45 | Run the script from the terminal: 46 | 47 | ```bash 48 | python3 bluesky.py 49 | ``` 50 | 51 | You will be prompted to enter a message to post. 52 | 53 | ## Code Overview 54 | 55 | The core functionality is encapsulated in the `BlueSkyTool` class: 56 | - `authenticate()`: Logs in using credentials stored in `Cabinet`. 57 | - `post(message: str)`: Posts a message to your BlueSky account. 58 | - `run(message: str)`: Combines authentication and posting for seamless execution. 59 | 60 | ## License 61 | 62 | This project is licensed under the MIT License. -------------------------------------------------------------------------------- /bluesky/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | BlueSky tool for easily posting through the terminal. 3 | This script allows users to post directly to BlueSky using the official atproto library. 4 | """ 5 | 6 | import sys 7 | from atproto import Client, client_utils 8 | from cabinet import Cabinet 9 | 10 | 11 | class BlueSkyTool: 12 | """ 13 | A tool for interacting with the BlueSky API. 14 | Handles authentication and posting functionality. 15 | """ 16 | 17 | def __init__(self): 18 | """ 19 | initializes the BlueSkyTool with a client and cabinet instance. 20 | """ 21 | self.client = Client() 22 | self.cabinet = Cabinet() 23 | 24 | def authenticate(self) -> None: 25 | """ 26 | authenticates the client using credentials stored in cabinet. 27 | 28 | Raises: 29 | ValueError: if no credentials are found in cabinet. 30 | """ 31 | creds: dict | None = self.cabinet.get('bluesky', return_type=dict) 32 | 33 | if creds is None: 34 | self.cabinet.log('No BlueSky credentials found.', level='error') 35 | raise ValueError("BlueSky credentials not found in Cabinet.") 36 | 37 | self.client.login(creds['handle'], creds['password']) 38 | 39 | def post(self, post_message: str) -> None: 40 | """ 41 | posts a message to BlueSky. 42 | 43 | Args: 44 | post_message (str): the text to be posted. 45 | 46 | Returns: 47 | None 48 | """ 49 | text = client_utils.TextBuilder().text(post_message) 50 | self.client.send_post(text) 51 | print("💙") 52 | 53 | def run(self, post_message: str) -> None: 54 | """ 55 | runs the BlueSkyTool by authenticating and posting a message. 56 | 57 | Args: 58 | message (str): the text to post. 59 | 60 | Returns: 61 | None 62 | """ 63 | try: 64 | self.authenticate() 65 | self.post(post_message) 66 | except ValueError as e: 67 | print(e) 68 | 69 | 70 | if __name__ == '__main__': 71 | # do not take any arguments 72 | if len(sys.argv) > 1: 73 | print("Usage: python main.py; no arguments are accepted.") 74 | sys.exit(1) 75 | 76 | message = input("> ") 77 | tool = BlueSkyTool() 78 | tool.run(message) 79 | -------------------------------------------------------------------------------- /borg/README.md: -------------------------------------------------------------------------------- 1 | # borg 2 | 3 | - an automated backup script to back my Syncthing instance up to Hetzner 4 | 5 | ## setup 6 | 7 | - create two files in `~/syncthing/cabinet/keys` named `BORG_REPO` and `BORG_PASSPHRASE`, containing the respective information. 8 | - make sure that there is no trailing whitespace at the end of these files. 9 | - `BORG_REPO` could look like `ssh://u12345@u12345.your-storagebox.de:23/./syncthing-backups` 10 | - `BORG_PASSPHRASE` comes from the password entered when `borg init` was called. I store this in a password manager. 11 | - adjust the script as needed to match your backup frequency and paths 12 | - create a crontab entry to call this script on a recurring basis 13 | 14 | ## notes 15 | 16 | - by default, this script backs up /home/tyler/syncthing. This is hardcoded as this is a script specific to my use case; adjust as necessary. -------------------------------------------------------------------------------- /borg/main.sh: -------------------------------------------------------------------------------- 1 | #!/bin/zsh 2 | 3 | #!/bin/zsh 4 | 5 | # Define paths 6 | CABINET="$HOME/.local/bin/cabinet" 7 | 8 | # Fetch Borg repo and passphrase securely 9 | BORG_REPO=$("$CABINET" -g "keys" "borg" "repo") || { 10 | echo "Error: Failed to retrieve Borg repo path." >&2 11 | exit 1 12 | } 13 | 14 | BORG_PASSPHRASE=$("$CABINET" -g "keys" "borg" "passphrase") || { 15 | echo "Error: Failed to retrieve Borg passphrase." >&2 16 | exit 1 17 | } 18 | 19 | # Export for Borg to use 20 | export BORG_REPO 21 | export BORG_PASSPHRASE 22 | 23 | # Log repo 24 | echo "Borg repository: $BORG_REPO" 25 | 26 | # Logging functions 27 | info() { "$CABINET" --log "$*"; } 28 | error() { "$CABINET" --log "$*" --level 'error'; } 29 | 30 | # Graceful exit on SIGINT/SIGTERM 31 | trap 'echo "$(date) Backup interrupted" >&2; exit 2' INT TERM 32 | 33 | info 'Starting Borg Backup...' 34 | 35 | # Backup $HOME/syncthing 36 | borg create \ 37 | --verbose \ 38 | --filter AME \ 39 | --list \ 40 | --stats \ 41 | --show-rc \ 42 | --compression lz4 \ 43 | --exclude-caches \ 44 | \ 45 | ::'{hostname}-{now}' \ 46 | $HOME/syncthing 47 | 48 | backup_exit=$? 49 | 50 | info "Pruning repository" 51 | 52 | # Use the `prune` subcommand to maintain 1 daily, 2 weekly and 1 monthly 53 | # archives of THIS machine. The '{hostname}-*' matching is very important to 54 | # limit prune's operation to this machine's archives and not apply to 55 | # other machines' archives also: 56 | 57 | borg prune \ 58 | --list \ 59 | --glob-archives '{hostname}-*' \ 60 | --show-rc \ 61 | --keep-daily 1 \ 62 | --keep-weekly 2 \ 63 | --keep-monthly 1 64 | 65 | prune_exit=$? 66 | 67 | # Actually free repo disk space by compacting segments 68 | info "Compacting repository" 69 | borg compact 70 | 71 | compact_exit=$? 72 | 73 | # Check if backups are performing as expected 74 | check_backups() { 75 | backups=$(borg list --short) 76 | today=$(date +%Y-%m-%d) 77 | yesterday=$(date -d "yesterday" +%Y-%m-%d) 78 | last_month=$(date -d "last month" +%Y-%m) 79 | 80 | today_count=$(echo "$backups" | grep -c "$today") 81 | yesterday_count=$(echo "$backups" | grep -c "$yesterday") 82 | 83 | week_count=0 84 | for i in {0..6}; do 85 | day=$(date -d "$i days ago" +%Y-%m-%d) 86 | week_count=$((week_count + $(echo "$backups" | grep -c "$day"))) 87 | done 88 | 89 | month_count=$(echo "$backups" | grep -c "$last_month") 90 | 91 | if [ "$today_count" -ge 1 ] || [ "$yesterday_count" -ge 1 ]; then 92 | info "Backup from today or yesterday found." 93 | else 94 | error "No backup from today or yesterday found." 95 | return 1 96 | fi 97 | 98 | if [ "$week_count" -ge 2 ]; then 99 | info "At least 2 backups from this week found." 100 | else 101 | error "Less than 2 backups from this week found." 102 | return 1 103 | fi 104 | 105 | if [ "$month_count" -ge 1 ]; then 106 | info "Backup from last month found." 107 | else 108 | error "No backup from last month found." 109 | return 1 110 | fi 111 | 112 | return 0 113 | } 114 | 115 | info "Checking backups" 116 | check_backups 117 | check_exit=$? 118 | 119 | # Use highest exit code as global exit code 120 | global_exit=$(( backup_exit > prune_exit ? backup_exit : prune_exit )) 121 | global_exit=$(( compact_exit > global_exit ? compact_exit : global_exit )) 122 | global_exit=$(( check_exit > global_exit ? check_exit : global_exit )) 123 | 124 | if [ ${global_exit} -eq 0 ]; then 125 | info "Backup, Prune, Compact, and Check finished successfully" 126 | elif [ ${global_exit} -eq 1 ]; then 127 | info "Backup, Prune, Compact, and/or Check finished with warnings" 128 | else 129 | info "Backup, Prune, Compact, and/or Check finished with errors" 130 | fi 131 | 132 | exit ${global_exit} 133 | -------------------------------------------------------------------------------- /cabbie/README.md: -------------------------------------------------------------------------------- 1 | # Cabbie 2 | 3 | Ask AI to run commands on your behalf using natural language. 4 | Now you're playing with fire! 5 | 6 | ## Features 7 | 8 | - Natural language command interpretation 9 | - Automatic command execution with proper process management 10 | - 6-second timeout for command execution 11 | - Graceful process termination (SIGTERM followed by SIGKILL if needed) 12 | - Real-time output capture 13 | - AI-powered output interpretation 14 | - Debug mode for troubleshooting 15 | 16 | ## Installation 17 | 18 | 1. Ensure you have Python 3.6+ installed 19 | 2. Install the required dependencies: 20 | ```bash 21 | pip install openai cabinet 22 | ``` 23 | 3. Make the script executable: 24 | ```bash 25 | chmod +x main.py 26 | ``` 27 | 28 | ## Configuration 29 | 30 | 1. Set up your OpenAI API key in the Cabinet configuration: 31 | ```bash 32 | cabinet put keys openai YOUR_API_KEY 33 | ``` 34 | 35 | ## Usage 36 | 37 | Run the tool with a natural language description of what you want to do: 38 | 39 | ```bash 40 | ./main.py "show me all files in my documents folder" 41 | ``` 42 | 43 | The tool will: 44 | 1. Convert your request into appropriate shell commands 45 | 2. Execute the commands with proper process management 46 | 3. Capture and display the output 47 | 4. Provide a user-friendly interpretation of the results 48 | 49 | ## Examples 50 | 51 | ```bash 52 | # Count files in a directory 53 | ./main.py "how many files are in my documents folder" 54 | 55 | # Check system information 56 | ./main.py "what's my system information" 57 | 58 | # Monitor system resources 59 | ./main.py "show me my system resources" 60 | ``` 61 | 62 | ## Debug Mode 63 | 64 | The tool includes a debug mode that can be enabled by setting `DEBUG = True` in the script. This will show detailed information about: 65 | - Command cleaning and processing 66 | - Process management 67 | - Timeout handling 68 | - Error conditions 69 | 70 | ## Notes 71 | 72 | - Commands are executed with a 6-second timeout 73 | - Long-running commands will be automatically terminated 74 | - The tool uses process groups to ensure proper cleanup of child processes 75 | - Commands are executed in a shell environment 76 | - Output is captured and interpreted by AI for user-friendly presentation -------------------------------------------------------------------------------- /cabbie/main.py: -------------------------------------------------------------------------------- 1 | # Cabbie 2 | #! /usr/bin/env python3 3 | 4 | import subprocess 5 | import sys 6 | import time 7 | import os 8 | import signal 9 | from tyler_python_helpers import ChatGPT 10 | 11 | # Debug mode - set to True to enable debug logging 12 | DEBUG = False 13 | chatgpt = ChatGPT() 14 | 15 | def debug_print(message: str) -> None: 16 | """Print debug messages if DEBUG is True.""" 17 | if DEBUG: 18 | print(f"DEBUG: {message}") 19 | 20 | 21 | def clean_command(command: str) -> str: 22 | """Clean up the command by removing markdown formatting.""" 23 | debug_print(f"Cleaning command: {command}") 24 | # Remove markdown code block syntax 25 | command = command.replace("```bash", "").replace("```", "") 26 | # Remove any leading/trailing whitespace 27 | cleaned = command.strip() 28 | # Remove any remaining 'bash' prefix if it exists 29 | if cleaned.startswith('bash'): 30 | cleaned = cleaned[4:].strip() 31 | debug_print(f"Cleaned command: {cleaned}") 32 | return cleaned 33 | 34 | def run_command(command: str, timeout: int = 6) -> tuple[str, str]: 35 | """Run a command in the background and capture its output.""" 36 | debug_print(f"Running command with {timeout}s timeout: {command}") 37 | 38 | try: 39 | # Use subprocess.run with a timeout 40 | result = subprocess.run( 41 | command, 42 | shell=True, 43 | stdout=subprocess.PIPE, 44 | stderr=subprocess.PIPE, 45 | text=True, 46 | timeout=timeout, 47 | preexec_fn=os.setsid # Create new process group 48 | ) 49 | 50 | debug_print("Command completed successfully") 51 | return result.stdout, result.stderr 52 | 53 | except subprocess.TimeoutExpired as e: 54 | debug_print("Command timed out, attempting to terminate...") 55 | # Get the process group ID from the TimeoutExpired exception 56 | if hasattr(e, 'process'): 57 | try: 58 | os.killpg(os.getpgid(e.process.pid), signal.SIGTERM) 59 | debug_print("Sent SIGTERM to process group") 60 | time.sleep(0.5) # Give it a moment to terminate 61 | try: 62 | os.killpg(os.getpgid(e.process.pid), signal.SIGKILL) 63 | debug_print("Sent SIGKILL to process group") 64 | except: 65 | debug_print("Failed to send SIGKILL") 66 | except: 67 | debug_print("Failed to kill process group") 68 | raise TimeoutError(f"Command timed out after {timeout} seconds") 69 | 70 | except KeyboardInterrupt: 71 | debug_print("Received keyboard interrupt") 72 | raise 73 | except Exception as e: 74 | debug_print(f"Error in run_command: {e}") 75 | raise e 76 | 77 | 78 | def main(): 79 | try: 80 | 81 | # Get system info in a cross-platform way 82 | debug_print("Getting device info") 83 | try: 84 | # Try hostnamectl first (Linux) 85 | device_type = subprocess.check_output(["hostnamectl"]).decode("utf-8") 86 | except: 87 | # Fall back to uname (macOS and other Unix-like systems) 88 | device_type = subprocess.check_output(["uname", "-a"]).decode("utf-8") 89 | debug_print(f"Device info: {device_type}") 90 | 91 | command = " ".join(sys.argv[1:]) 92 | if not command: 93 | print("Error: No command received from user") 94 | sys.exit(1) 95 | debug_print(f"User command: {command}") 96 | 97 | prompt = f""" 98 | You are a helpful assistant that can run commands on behalf of the user. 99 | You are running on a {device_type} device. 100 | 101 | Rules for command generation: 102 | 1. Output only the command(s) to satisfy the user's request 103 | 2. Use simple, reliable commands that work on most Unix-like systems 104 | 3. Avoid complex shell features like command substitution unless necessary 105 | 4. For system info, prefer standard commands like 'uname', 'df', 'free', etc. 106 | 5. Do not include any explanations or markdown formatting 107 | 6. If multiple commands are needed, separate them with && 108 | 7. Do not start command with 'bash' or 'zsh'. 109 | 110 | The user's request is: {command} 111 | """ 112 | 113 | # Get command from OpenAI 114 | debug_print("Getting command from OpenAI") 115 | command_to_run = chatgpt.query(prompt) 116 | command_to_run = clean_command(command_to_run) 117 | 118 | if not command_to_run: 119 | print("Error: No command received from OpenAI") 120 | sys.exit(1) 121 | 122 | # Run the command and store the output 123 | try: 124 | debug_print("Executing command") 125 | stdout, stderr = run_command(command_to_run) 126 | output = stdout.strip() 127 | if stderr: 128 | print(f"Warning: Command produced stderr: {stderr.strip()}") 129 | output = stderr.strip() 130 | except TimeoutError as e: 131 | print(f"Error: {e}") 132 | sys.exit(1) 133 | except subprocess.CalledProcessError as e: 134 | print(f"Error running command: {e}") 135 | if e.stdout: 136 | print(f"Command stdout: {e.stdout}") 137 | if e.stderr: 138 | print(f"Command stderr: {e.stderr}") 139 | sys.exit(1) 140 | except KeyboardInterrupt: 141 | print("\nCommand interrupted by user") 142 | sys.exit(1) 143 | 144 | # Summarize the output 145 | debug_print("Getting summary from OpenAI") 146 | summary_prompt = f""" 147 | A user asked you how to run the following command: {command} 148 | You responded with the following command: {command_to_run} 149 | The output of the command is: {output} 150 | Interpret the output within a single sentence in a user-friendly way. 151 | Do not refer to the command or the output as 'output' or 'command'. 152 | """ 153 | 154 | # Get summary from OpenAI 155 | summary = chatgpt.query(summary_prompt) 156 | debug_print("Got summary from OpenAI") 157 | print(summary) 158 | 159 | except KeyboardInterrupt: 160 | print("\nProgram interrupted by user") 161 | sys.exit(1) 162 | except Exception as e: 163 | debug_print(f"Unexpected error in main: {e}") 164 | print(f"Unexpected error: {e}") 165 | sys.exit(1) 166 | 167 | 168 | if __name__ == "__main__": 169 | main() -------------------------------------------------------------------------------- /craigslist/README.md: -------------------------------------------------------------------------------- 1 | # Craigslist Scraper 2 | 3 | A simple tool to scrape the SF Bay Free Stuff section for certain items. 4 | 5 | ## usage 6 | 7 | ```bash 8 | python3 main.py item1 item2 item3 ... 9 | ``` 10 | 11 | or 12 | 13 | ```bash 14 | python3 main.py 15 | ``` 16 | (uses Cabinet to load requested items from `craigslist -> items`) 17 | 18 | ## dependencies 19 | - [cabinet](https://pypi.org/project/cabinet/) 20 | - [bs4](https://pypi.org/project/beautifulsoup4/) 21 | - [requests](https://pypi.org/project/requests/) -------------------------------------------------------------------------------- /craigslist/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Craigslist Scraper 3 | """ 4 | 5 | import sys 6 | from typing import List 7 | import requests 8 | from bs4 import BeautifulSoup 9 | from cabinet import Cabinet, Mail 10 | 11 | def scrape_craigslist(cli_items: List[str] = []) -> None: # pylint: disable=dangerous-default-value 12 | """ 13 | Scrape Craigslist for requested free items 14 | """ 15 | 16 | url = "https://sfbay.craigslist.org/search/zip?postal=94108&search_distance=3" 17 | response = requests.get(url, timeout=10) 18 | soup = BeautifulSoup(response.text, 'html.parser') 19 | cab: Cabinet = Cabinet() 20 | mail: Mail = Mail() 21 | 22 | # Use command-line arguments if provided, otherwise use Cabinet 23 | if cli_items: 24 | requested_items = [item.lower() for item in cli_items] 25 | else: 26 | requested_items: List[str] = cab.get("craigslist", "items") or [] 27 | 28 | found_items: List[str] = cab.get("craigslist", "sent") or [] 29 | is_found_items: bool = False 30 | 31 | for post in soup.find_all('li', class_='cl-static-search-result', limit=8): 32 | title_div = post.find('div', class_='title') 33 | if title_div: 34 | title = title_div.text.strip().lower() 35 | 36 | # Get the post URL 37 | post_url = post.find('a')['href'] 38 | 39 | # check if any of the requested items are in the title 40 | for item in requested_items: 41 | if item in title and post_url not in found_items: 42 | mail.send(f"Found {item}", f"{title}") 43 | found_items.append(post_url) 44 | is_found_items = True 45 | 46 | # add any sent items to the list 47 | if is_found_items: 48 | cab.put("craigslist", "sent", found_items) 49 | else: 50 | print("No items found") 51 | 52 | if __name__ == "__main__": 53 | # If command-line arguments are provided, use them as requested items 54 | if len(sys.argv) > 1: 55 | scrape_craigslist(sys.argv[1:]) 56 | else: 57 | scrape_craigslist() 58 | -------------------------------------------------------------------------------- /dailystatus/README.md: -------------------------------------------------------------------------------- 1 | # dailystatus 2 | - Feel free to clone, but this is very custom to my setup and backup process 3 | - Backs up logs, crontab, etc. 4 | - Sends me an email each day with my home automation status, the weather forecast, Spotify information, and anything else I find useful 5 | 6 | 7 | ## dependencies 8 | - requests (`pip install requests`) 9 | - [cabinet](https://pypi.org/project/cabinet/) 10 | - Data comes from logs or information in `cabinet` produced by `../weather.py` -------------------------------------------------------------------------------- /dailystatus/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate a daily status email detailing key activities, back up essential files, and manage logs. 3 | """ 4 | 5 | import os 6 | import difflib 7 | import pwd 8 | import datetime 9 | import glob 10 | import subprocess 11 | import textwrap 12 | import socket 13 | import json 14 | from pathlib import Path 15 | import cabinet 16 | 17 | # pylint: disable=invalid-name 18 | 19 | # initialize cabinet for configuration and mail for notifications 20 | cab = cabinet.Cabinet() 21 | mail = cabinet.Mail() 22 | 23 | 24 | def get_paths_and_config(): 25 | """retrieve and configure paths""" 26 | today = datetime.date.today() 27 | device_name = socket.gethostname() 28 | user_home = pwd.getpwuid(os.getuid())[0] 29 | path_dot_cabinet = os.path.join(f"/home/{user_home}/.cabinet") 30 | path_backend = cab.get("path", "cabinet", "log-backup") or f"{path_dot_cabinet}/log-backup" 31 | path_zshrc = os.path.join(f"/home/{user_home}/.zshrc") 32 | path_notes = cab.get("path", "notes") or f"{path_dot_cabinet}/notes" 33 | log_path_today = os.path.join(cab.path_dir_log, str(today)) 34 | log_path_backups = cab.get("path", "backups") or f"{path_dot_cabinet}/backups" 35 | log_backups_location = os.path.join(log_path_backups, "log") 36 | 37 | return { 38 | "today": today, 39 | "device_name": device_name, 40 | "user_home": user_home, 41 | "path_backend": path_backend, 42 | "path_zshrc": path_zshrc, 43 | "path_notes": path_notes, 44 | "log_path_today": log_path_today, 45 | "log_backups_location": log_backups_location 46 | } 47 | 48 | def append_food_log(email): 49 | """check if food has been logged today, print total calories or an error if none found.""" 50 | log_file = os.path.expanduser("~/syncthing/log/food.json") 51 | today = datetime.date.today().isoformat() 52 | 53 | if not os.path.exists(log_file): 54 | cab.log("Food log file does not exist.", level="error") 55 | return 56 | 57 | try: 58 | with open(log_file, "r", encoding="utf-8") as f: 59 | log_data = json.load(f) 60 | 61 | if today not in log_data or not log_data[today]: 62 | cab.log("No food logged for today.", level="error") 63 | else: 64 | total_calories = sum(entry["calories"] for entry in log_data[today]) 65 | return email + textwrap.dedent(f""" 66 |

Calories Eaten Today:

67 |
{total_calories} calories
68 |
69 | """) 70 | 71 | except (json.JSONDecodeError, OSError): 72 | cab.log("Error reading food log file.", level="error") 73 | return email 74 | 75 | def append_syncthing_conflict_check(email): 76 | """ 77 | If there are conflicts (files with `.sync-conflict` in their name) for remind.md 78 | (cabinet -> remindmail -> path -> file), 79 | return a merge conflict-style difference between the conflicting files 80 | with HTML formatting. 81 | """ 82 | # Get the absolute path to the file from Cabinet 83 | target_file = cab.get("remindmail", "path", "file") 84 | 85 | if not target_file or not os.path.isfile(target_file): 86 | return "Error: Target file does not exist." 87 | 88 | # Find files with `.sync-conflict` in the same directory as the target file 89 | target_dir = os.path.dirname(target_file) 90 | base_name = Path(target_file).stem 91 | conflict_pattern = os.path.join(target_dir, f"{base_name}.sync-conflict*") 92 | conflict_files = glob.glob(conflict_pattern) 93 | 94 | if not conflict_files: 95 | return email 96 | 97 | # Read the contents of the original file 98 | try: 99 | with open(target_file, 'r', encoding='utf-8') as f: 100 | original_content = f.readlines() 101 | except (OSError, IOError) as e: 102 | cab.log(f"Error reading original file: {str(e)}", level="error") 103 | return email + f"Error reading original file: {str(e)}" 104 | 105 | # Read and compare each conflict file 106 | html_diffs = [] 107 | for conflict_file in conflict_files: 108 | try: 109 | with open(conflict_file, 'r', encoding='utf-8') as f: 110 | conflict_content = f.readlines() 111 | except (OSError, IOError) as e: 112 | cab.log(f"Error reading conflict file {conflict_file}: {str(e)}", level="error") 113 | return email + f"Error reading conflict file {conflict_file}: {str(e)}" 114 | 115 | # Generate a unified diff and convert to HTML 116 | diff = difflib.unified_diff( 117 | original_content, conflict_content, 118 | fromfile=base_name, tofile=os.path.basename(conflict_file), 119 | lineterm='' 120 | ) 121 | formatted_diff = "
".join( 122 | [f"+{line[1:]}" \ 123 | if line.startswith('+') and not line.startswith('+++') else 124 | f"-{line[1:]}" \ 125 | if line.startswith('-') and not line.startswith('---') else 126 | f"{line}" for line in diff] 127 | ) 128 | html_diffs.append( 129 | f"

remind.md has a conflict:

" 130 | f"
"
131 |             f"{formatted_diff}
" 132 | ) 133 | 134 | # Combine all diffs into a single HTML string 135 | return email + "
".join(html_diffs) 136 | 137 | def backup_files(paths: dict) -> None: 138 | """ 139 | Back up essential files. 140 | 141 | Args: 142 | paths (dict): A dictionary containing paths and other related configuration values. 143 | 144 | Returns: 145 | None 146 | """ 147 | 148 | def build_backup_path(category): 149 | """Helper function to construct backup file paths.""" 150 | return os.path.join(paths["path_backend"], paths["device_name"], 151 | category, f"{category} {paths['today']}.md") 152 | 153 | # Construct backup file paths 154 | path_cron_today = build_backup_path("cron") 155 | path_bash_today = build_backup_path("zsh") 156 | path_notes_today = os.path.join(paths["path_backend"], 157 | paths["device_name"], "notes", f"notes {paths['today']}.zip") 158 | path_log_backup = os.path.join(paths["log_backups_location"], 159 | f"log folder backup {paths['today']}.zip") 160 | 161 | # define backup commands 162 | backup_commands = [ 163 | f"/usr/bin/crontab -l > '{path_cron_today}'", 164 | f"cp -r {paths['path_zshrc']} '{path_bash_today}'", 165 | f"zip -r '{path_notes_today}' {paths['path_notes']}", 166 | f"zip -r '{path_log_backup}' {paths['path_backend']} " 167 | f"--exclude='{os.path.join(paths['path_backend'], 'songs', '*')}'", 168 | ] 169 | 170 | # execute each backup command 171 | try: 172 | for command in backup_commands: 173 | subprocess.run(command, shell=True, check=True) 174 | except subprocess.CalledProcessError as error: 175 | cab.log(f"Command failed: {command} with error: {str(error)}", level="error") 176 | except OSError as error: 177 | cab.log(f"OS error for: {command} with error: {str(error)}", level="error") 178 | 179 | 180 | def prune_old_backups(paths, max_backups=14): 181 | """prune log folder backups exceeding the limit""" 182 | cab.log(f"pruning {paths['log_backups_location']}...") 183 | zip_files = glob.glob(f"{paths['log_backups_location']}/*.zip") 184 | zip_files.sort(key=os.path.getmtime) 185 | excess_count = len(zip_files) - max_backups 186 | for i in range(excess_count): 187 | os.remove(zip_files[i]) 188 | 189 | 190 | def analyze_logs(paths, email): 191 | """append daily log analysis""" 192 | daily_log_file = cab.get_file_as_array(f"LOG_DAILY_{paths['today']}.log", 193 | file_path=paths["log_path_today"]) or [] 194 | 195 | daily_log_issues = [line for line in daily_log_file if \ 196 | "ERROR" in line or "WARN" in line or "CRITICAL" in line] 197 | is_warnings = any("WARN" in issue for issue in daily_log_issues) 198 | is_errors = any("ERROR" in issue or "CRITICAL" in issue for issue in daily_log_issues) 199 | 200 | if daily_log_issues: 201 | daily_log_filtered = "
".join(daily_log_issues) 202 | email += textwrap.dedent(f""" 203 |

Warning/Error/Critical Log:

204 |
{daily_log_filtered}
205 |
206 | """) 207 | 208 | return email, is_warnings, is_errors 209 | 210 | 211 | def append_spotify_info(paths, email): 212 | """append spotify issues and stats""" 213 | spotify_log = cab.get_file_as_array("LOG_SPOTIFY.log", file_path=paths["log_path_today"]) or [] 214 | spotify_stats = cab.get("spotipy") or {} 215 | 216 | spotify_issues = "No Data" 217 | if spotify_log: 218 | issues = [log for log in spotify_log if \ 219 | "WARNING" in log or "ERROR" in log or "CRITICAL" in log] 220 | if issues: 221 | spotify_issues = "
".join(issues) 222 | email += f"

Spotify Issues:

{spotify_issues}

" 223 | 224 | total_tracks = spotify_stats.get("total_tracks", "No Data") 225 | average_year = spotify_stats.get("average_year", "No Data") 226 | 227 | email += f""" 228 |

Spotify Stats:

229 |
    Song Count: {total_tracks}
230 |
    Average Year: {average_year}
231 |
232 | """ 233 | 234 | return email 235 | 236 | 237 | def append_weather_info(email): 238 | """append weather data""" 239 | weather_tomorrow_formatted = cab.get("weather", "data", "tomorrow_formatted") or {} 240 | if weather_tomorrow_formatted: 241 | email += f""" 242 |

Weather Tomorrow:

243 | {weather_tomorrow_formatted} 244 | """ 245 | return email 246 | 247 | 248 | def send_status_email(email, is_warnings, is_errors, today): 249 | """determine and send status email""" 250 | email_subject = f"Daily Status - {today}" 251 | if is_errors and is_warnings: 252 | email_subject += " - Check Errors/Warnings" 253 | elif is_errors: 254 | email_subject += " - Check Errors" 255 | elif is_warnings: 256 | email_subject += " - Check Warnings" 257 | 258 | mail.send(email_subject, email) 259 | 260 | 261 | if __name__ == "__main__": 262 | # retrieve paths and configuration 263 | config_data = get_paths_and_config() 264 | 265 | # set up email content 266 | status_email = "Dear Tyler,

This is your daily status report.

" 267 | 268 | # check if food has been logged today 269 | status_email = append_food_log(status_email) 270 | 271 | # back up files 272 | backup_files(config_data) 273 | 274 | # prune old backups 275 | prune_old_backups(config_data) 276 | 277 | # analyze logs 278 | status_email, has_warnings, has_errors = analyze_logs(config_data, status_email) 279 | 280 | # add syncthing conflict check 281 | status_email = append_syncthing_conflict_check(status_email) 282 | 283 | # add spotify info 284 | status_email = append_spotify_info(config_data, status_email) 285 | 286 | # append weather info 287 | status_email = append_weather_info(status_email) 288 | 289 | # send the email 290 | send_status_email(status_email, has_warnings, has_errors, config_data["today"]) 291 | -------------------------------------------------------------------------------- /diary/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | diary 3 | 4 | in my terminal, `diary` is aliased to this script so I can easily create, edit, 5 | and save a new diary entry (stored in my cloud provider as a markdown file) 6 | """ 7 | import os 8 | import sys 9 | import datetime 10 | from cabinet import Cabinet 11 | 12 | cab = Cabinet() 13 | 14 | PATH_DIARY = cab.get("path", "diary") or "" 15 | FILENAME = f"{PATH_DIARY}/{datetime.datetime.now().strftime('%Y-%m-%d_%H.%M.%S')}.md" 16 | 17 | if not os.path.exists(PATH_DIARY): 18 | cab.log(f"'{PATH_DIARY}' does not exist. Set cabinet -> path -> diary.", level="error") 19 | sys.exit(0) 20 | 21 | cab.edit_file(f"{FILENAME}") 22 | -------------------------------------------------------------------------------- /finance_parser/README.md: -------------------------------------------------------------------------------- 1 | # Finance Transaction Parser 2 | 3 | A simple Python script to read bank-specific transaction CSVs and parse them into categories. 4 | 5 | ## Dependencies 6 | - Python 3.6+ 7 | - pandas 8 | - tkinter (`brew install python-tk` on macOS) 9 | 10 | ## Usage 11 | 12 | - Set up a categories JSON file (modify the path as needed) like the example below: 13 | ``` 14 | { 15 | "categories": { 16 | "Groceries": [ 17 | "groceries", 18 | "trader joe", 19 | "mochi", 20 | "onion", 21 | "grapes", 22 | "cookies", 23 | "milk", 24 | "paper towels", 25 | "apples", 26 | "factor", 27 | "frozen meal" 28 | ], 29 | "Restaurants": [ 30 | "dinner", 31 | "restaurant", 32 | "pho", 33 | "korean", 34 | "meal" 35 | ] 36 | } 37 | } 38 | ``` 39 | - `python3 venmo_parser.py` to run the script. 40 | - Select the CSV file to parse. I recommend your monthly Venmo transaction statement. 41 | 42 | Any transaction containing any of the keywords (wildcard) in the categories will be assigned to that category. 43 | 44 | ## Example Output 45 | ``` 46 | 2024-10-29 03:51:23 Groceries -4.98 Apples 47 | 2024-10-29 03:51:40 Groceries -39.26 Delivery - 6 meals 48 | 2024-10-31 17:01:59 Groceries 3.00 Groceries 49 | 2024-10-02 20:30:53 Other -353.48 Flights for Thanksgiving 50 | 2024-10-06 06:31:23 Other -104.81 Hotel 51 | 2024-10-12 03:04:33 Other -25.00 Gas 52 | 2024-10-14 01:55:57 Other -33.16 Gas 53 | 2024-10-14 01:56:16 Other -15.19 Electricity, October 54 | 2024-10-12 01:29:12 Restaurants 18.45 Tulan restaurant 55 | 2024-10-13 01:15:31 Restaurants -21.60 🇰🇷 Korean dinner 56 | 2024-10-13 04:06:08 Restaurants 22.50 Pho Vietnam 57 | 58 | Category Totals: 59 | Category Adjusted Amount Check 60 | Groceries -102.20 True 61 | Other -973.40 True 62 | Restaurants -53.15 True 63 | ``` 64 | 65 | If the transaction is a 'charge', it is counted as a positive amount. If it is a 'payment', it is counted as a negative amount. -------------------------------------------------------------------------------- /finance_parser/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Multi-file transaction parser for Venmo and Citi CSV files. 3 | """ 4 | 5 | import json 6 | import argparse 7 | from pathlib import Path 8 | from tkinter import Tk 9 | from tkinter.filedialog import askopenfilename 10 | from typing import Dict, Optional 11 | import argcomplete # type: ignore # pylint: disable=import-error 12 | import pandas as pd # type: ignore # pylint: disable=import-error 13 | import pyperclip # type: ignore # pylint: disable=import-error 14 | import ezodf # type: ignore # pylint: disable=import-error 15 | from datetime import datetime, timedelta 16 | from tyler_python_helpers import ChatGPT 17 | 18 | chatgpt = ChatGPT() 19 | 20 | def find_latest_file_in_downloads(pattern: str) -> Optional[str]: 21 | """Finds the latest file in Downloads matching the given pattern.""" 22 | downloads_path = Path.home() / "Downloads" 23 | matching_files = list(downloads_path.glob(pattern)) 24 | if not matching_files: 25 | return None 26 | return str(max(matching_files, key=lambda x: x.stat().st_mtime)) 27 | 28 | class BaseParser: 29 | """Base class for all parsers.""" 30 | def __init__(self, file_path: str, category_file: Path): 31 | self.file_path = file_path 32 | self.category_file = category_file 33 | self.transactions_df = pd.DataFrame() 34 | self.category_mapping: Dict[str, list] = {} 35 | self.filtered_rows: list = [] 36 | 37 | def load_categories(self) -> None: 38 | """Loads categories and filtered rows from the specified JSON file.""" 39 | try: 40 | with open(self.category_file, 'r', encoding='utf-8') as file: 41 | config = json.load(file) 42 | self.category_mapping = config['categories'] 43 | self.filtered_rows = config.get('filteredRows', []) 44 | except FileNotFoundError: 45 | print("Categories JSON file not found.") 46 | exit() 47 | 48 | def should_include_transaction(self, description: str) -> bool: 49 | """ 50 | Determines if a transaction should be included based on filtered rows. 51 | """ 52 | if pd.isnull(description) or not isinstance(description, str): 53 | return True # Include non-string values by default 54 | return not any(filtered_text.lower() in description.lower() 55 | for filtered_text in self.filtered_rows) 56 | 57 | def categorize_transaction(self, note: str) -> str: 58 | """Categorizes transactions based on keywords from the JSON file.""" 59 | if pd.isnull(note) or not isinstance(note, str): 60 | return 'Other' 61 | note_lower = note.lower() 62 | for category, keywords in self.category_mapping.items(): 63 | if any(keyword.lower() in note_lower for keyword in keywords): 64 | return category 65 | return 'Other' 66 | 67 | def clean_amount(self, value: str) -> float: 68 | """Cleans and converts the amount to a float.""" 69 | if pd.isnull(value): 70 | return 0.0 # Handle NaN as 0 71 | if not isinstance(value, str): 72 | return float(value) 73 | 74 | # Skip rows with formulas 75 | if value.startswith('='): 76 | return 0.0 77 | 78 | try: 79 | cleaned_value = value.replace('$', '').replace(',', '').replace(' ', '').strip() 80 | return float(cleaned_value) 81 | except (ValueError, AttributeError): 82 | print(f"Warning: Could not parse amount '{value}', using 0.0") 83 | return 0.0 84 | 85 | def process_transactions(self, source: str) -> pd.DataFrame: 86 | """Processes transactions to categorize and clean amounts.""" 87 | raise NotImplementedError("Subclasses must implement this method.") 88 | 89 | def print_summary(self, summary_df: pd.DataFrame) -> None: 90 | """Prints the transaction summary and category totals.""" 91 | print("\nTransaction Summary (Sorted by Category and Date):") 92 | print(summary_df.to_string(index=False)) 93 | 94 | # Calculate totals for each category 95 | totals_df = summary_df.groupby('Category', 96 | observed=True)['Adjusted Amount'].sum().reset_index() 97 | 98 | # Move 'Other' category to the end 99 | totals_df['Category'] = pd.Categorical( 100 | totals_df['Category'], 101 | categories=sorted(self.category_mapping.keys()) + ['Other'], 102 | ordered=True 103 | ) 104 | totals_df = totals_df.sort_values(by='Category') 105 | 106 | print("\nCategory Totals:") 107 | print(totals_df.to_string(index=False)) 108 | 109 | class VenmoParser(BaseParser): 110 | """ 111 | Parses Venmo transactions from a CSV file. 112 | """ 113 | def load_transactions(self) -> None: 114 | """Loads Venmo transactions from the CSV file.""" 115 | # Automatically find the header row 116 | with open(self.file_path, 'r', encoding='utf-8') as file: 117 | for i, line in enumerate(file): 118 | if "Datetime" in line and "Note" in line: 119 | header_row = i 120 | break 121 | else: 122 | print("Error: Could not find the header row in the Venmo CSV file.") 123 | exit() 124 | 125 | # Load the CSV starting from the header row 126 | self.transactions_df = pd.read_csv(self.file_path, skiprows=header_row, dtype=str) 127 | print("Venmo CSV file successfully loaded!") 128 | 129 | def process_transactions(self, source: str = "Venmo") -> pd.DataFrame: 130 | """Processes Venmo transactions to categorize and clean amounts.""" 131 | # Filter out unwanted transactions 132 | self.transactions_df = self.transactions_df[ 133 | self.transactions_df['Note'].apply(self.should_include_transaction) 134 | ] 135 | 136 | self.transactions_df['Note'] = self.transactions_df['Note'].fillna('') 137 | self.transactions_df['Category'] = \ 138 | self.transactions_df['Note'].apply(self.categorize_transaction) 139 | 140 | self.transactions_df['Adjusted Amount'] = self.transactions_df.apply( 141 | lambda row: -self.clean_amount(row['Amount (total)']), 142 | axis=1 143 | ) 144 | 145 | self.transactions_df['Datetime'] = pd.to_datetime( 146 | self.transactions_df['Datetime'], errors='coerce') 147 | 148 | self.transactions_df['Source'] = source 149 | return self.transactions_df.loc[:, ['Datetime', 150 | 'Category', 'Adjusted Amount', 'Note', 'Source']] 151 | 152 | 153 | class CitiParser(BaseParser): 154 | """ 155 | Parses Citi transactions from a CSV file in the format: 156 | Status,Date,Description,Debit,Credit 157 | For budget tracking: debits (spending) are positive, credits (money received) are negative 158 | """ 159 | def load_transactions(self) -> None: 160 | """Loads Citi transactions from the CSV file.""" 161 | try: 162 | self.transactions_df = pd.read_csv(self.file_path, dtype=str) 163 | print("Citi CSV file successfully loaded!") 164 | except Exception as e: # pylint: disable=broad-except 165 | print(f"Error loading Citi CSV file: {e}") 166 | exit() 167 | 168 | def process_transactions(self, source: str = "Citi") -> pd.DataFrame: 169 | """Processes Citi transactions to categorize and clean amounts.""" 170 | # Filter out unwanted transactions 171 | self.transactions_df = self.transactions_df[ 172 | self.transactions_df['Description'].apply(self.should_include_transaction) 173 | ] 174 | 175 | # Convert date string to datetime 176 | self.transactions_df['Datetime'] = pd.to_datetime( 177 | self.transactions_df['Date'], format='%m/%d/%Y', errors='coerce') 178 | 179 | # Filter for previous month only 180 | today = datetime.now() 181 | first_day_of_current_month = today.replace(day=1) 182 | last_day_of_previous_month = first_day_of_current_month - timedelta(days=1) 183 | first_day_of_previous_month = last_day_of_previous_month.replace(day=1) 184 | 185 | self.transactions_df = self.transactions_df[ 186 | (self.transactions_df['Datetime'] >= first_day_of_previous_month) & 187 | (self.transactions_df['Datetime'] <= last_day_of_previous_month) 188 | ] 189 | 190 | # Categorize based on Description 191 | self.transactions_df['Category'] = \ 192 | self.transactions_df['Description'].apply(self.categorize_transaction) 193 | 194 | # Handle amount calculation from Debit and Credit columns 195 | def calculate_amount(row): 196 | debit = self.clean_amount(row['Debit']) if pd.notna(row['Debit']) else 0 197 | credit = self.clean_amount(row['Credit']) if pd.notna(row['Credit']) else 0 198 | # Debits should be positive (money spent) 199 | # Credits remain negative (money received) 200 | return debit + credit 201 | 202 | self.transactions_df['Adjusted Amount'] = \ 203 | self.transactions_df.apply(calculate_amount, axis=1) 204 | 205 | # Add source column 206 | self.transactions_df['Source'] = source 207 | 208 | # Return only the columns we need 209 | return self.transactions_df.loc[:, ['Datetime', 210 | 'Category', 'Adjusted Amount', 'Description', 'Source']] 211 | 212 | 213 | class AmazonParser(BaseParser): 214 | """ 215 | Parses Amazon transactions from a CSV file. 216 | """ 217 | def load_transactions(self) -> None: 218 | """Loads Amazon transactions from the CSV file.""" 219 | try: 220 | # Read all columns as strings to avoid type conversion issues 221 | self.transactions_df = pd.read_csv(self.file_path, dtype=str) 222 | print("Amazon CSV file successfully loaded!") 223 | except Exception as e: # pylint: disable=broad-except 224 | print(f"Error loading Amazon CSV file: {e}") 225 | exit() 226 | 227 | def process_transactions(self, source: str = "Amazon") -> pd.DataFrame: 228 | """Processes Amazon transactions to categorize and clean amounts.""" 229 | # Convert date string to datetime 230 | self.transactions_df['Datetime'] = pd.to_datetime( 231 | self.transactions_df['date'], format='%Y-%m-%d', errors='coerce') 232 | 233 | # Clean and convert total amount 234 | self.transactions_df['Adjusted Amount'] = self.transactions_df['total'].apply(self.clean_amount) 235 | 236 | # Fill NaN values in items column with empty string 237 | self.transactions_df['items'] = self.transactions_df['items'].fillna('') 238 | 239 | # Filter out unwanted transactions 240 | self.transactions_df = self.transactions_df[ 241 | self.transactions_df['items'].apply(self.should_include_transaction) 242 | ] 243 | 244 | # Prepare prompt for ChatGPT 245 | categories_str = json.dumps(list(self.category_mapping.keys()), indent=2) 246 | items_list = self.transactions_df['items'].tolist() 247 | prompt = f"""I have a list of Amazon purchase items and a set of categories. 248 | Please categorize each item into the most appropriate category. 249 | 250 | IMPORTANT RULES: 251 | 1. The 'Groceries' category applies to Huel, household toiletries and cleaning supplies. 252 | 2. If no category fits, use 'Other' 253 | 3. Do not use the 'Restaurants' or 'Laundry' categories. These are items ordered from Amazon. 254 | 255 | Available categories: 256 | {categories_str} 257 | 258 | Items to categorize: 259 | {json.dumps(items_list, indent=2)} 260 | 261 | Please respond with a JSON array where each element is the category name for the corresponding item in the list above. 262 | Only use the exact category names from the provided categories. 263 | """ 264 | 265 | # Get categorizations from ChatGPT 266 | try: 267 | response = chatgpt.query(prompt) 268 | 269 | # Strip any markdown formatting from the response 270 | response = response.strip() 271 | if response.startswith('```json'): 272 | response = response[7:] 273 | if response.startswith('```'): 274 | response = response[3:] 275 | if response.endswith('```'): 276 | response = response[:-3] 277 | response = response.strip() 278 | 279 | categories = json.loads(response) 280 | self.transactions_df['Category'] = categories 281 | except Exception as e: 282 | print(f"Error getting categorizations from ChatGPT: {e}") 283 | # Fall back to local categorization if ChatGPT fails 284 | self.transactions_df['Category'] = self.transactions_df['items'].apply(self.categorize_transaction) 285 | 286 | # Add source column 287 | self.transactions_df['Source'] = source 288 | 289 | # Return only the columns we need 290 | return self.transactions_df.loc[:, ['Datetime', 291 | 'Category', 'Adjusted Amount', 'items', 'Source']] 292 | 293 | 294 | class SchwabParser(BaseParser): 295 | """ 296 | Parses Schwab transactions from a CSV file. 297 | """ 298 | def __init__(self, file_path: str, category_file: Path): 299 | super().__init__(file_path, category_file) 300 | self.transactions_df = pd.DataFrame() 301 | 302 | def load_transactions(self) -> None: 303 | """Loads Schwab transactions from the CSV file.""" 304 | try: 305 | # Read the file line by line and split by tabs 306 | with open(self.file_path, 'r', encoding='utf-8') as file: 307 | lines = file.readlines() 308 | 309 | # Process each line 310 | data = [] 311 | for line in lines: 312 | # Split by tab and clean up empty strings 313 | parts = [part.strip() for part in line.split('\t')] 314 | 315 | if len(parts) >= 6: # Ensure we have all required columns 316 | # Determine if this is a credit transaction 317 | is_credit = ( 318 | parts[1] in ['CREDIT', 'ATMREBATE', 'INTADJUST'] or # Direct credit types 319 | ' IN ' in parts[3] or # Description contains "IN" 320 | 'CASHOUT' in parts[3] # Description contains "CASHOUT" 321 | ) 322 | 323 | # For credits, use the Balance column (parts[5]) 324 | # For debits, use the Amount column (parts[4]) 325 | amount = parts[5] if is_credit else parts[4] 326 | 327 | data.append({ 328 | 'Date': parts[0], 329 | 'Type': parts[1], 330 | 'Check': parts[2], 331 | 'Description': parts[3], 332 | 'Amount': amount, 333 | 'Balance': parts[5], 334 | 'IsCredit': is_credit 335 | }) 336 | 337 | if not data: 338 | print("No valid transactions found in the file!") 339 | return 340 | 341 | self.transactions_df = pd.DataFrame(data) 342 | print("Schwab CSV file successfully loaded!") 343 | except Exception as e: # pylint: disable=broad-except 344 | print(f"Error loading Schwab CSV file: {e}") 345 | exit() 346 | 347 | def process_transactions(self, source: str = "Schwab") -> pd.DataFrame: 348 | """Processes Schwab transactions to categorize and clean amounts.""" 349 | # Convert date string to datetime 350 | self.transactions_df['Datetime'] = pd.to_datetime( 351 | self.transactions_df['Date'], format='%m/%d/%Y', errors='coerce') 352 | 353 | # Clean and convert amount, making credits negative 354 | self.transactions_df['Adjusted Amount'] = self.transactions_df.apply( 355 | lambda row: -self.clean_amount(row['Amount']) if row['IsCredit'] else self.clean_amount(row['Amount']), 356 | axis=1 357 | ) 358 | 359 | # Filter out unwanted transactions 360 | self.transactions_df = self.transactions_df[ 361 | self.transactions_df['Description'].apply(self.should_include_transaction) 362 | ] 363 | 364 | # Categorize based on Description 365 | self.transactions_df['Category'] = \ 366 | self.transactions_df['Description'].apply(self.categorize_transaction) 367 | 368 | print("Schwab categorization results:") 369 | print(self.transactions_df[['Description', 'Category', 'Adjusted Amount', 'Type']]) 370 | 371 | # Add source column 372 | self.transactions_df['Source'] = source 373 | 374 | # Return only the columns we need 375 | return self.transactions_df.loc[:, ['Datetime', 376 | 'Category', 'Adjusted Amount', 'Description', 'Source']] 377 | 378 | 379 | def ask_for_file(file_description: str) -> str: 380 | """Prompts the user to select a file via a file dialog.""" 381 | print(f"Please select the {file_description}.") 382 | Tk().withdraw() 383 | file_path = askopenfilename(filetypes=[("CSV files", "*.csv"), ("ODS files", "*.ods")]) 384 | if not file_path: 385 | print("No file selected.") 386 | exit() 387 | return file_path 388 | 389 | def update_spreadsheet_with_totals(spreadsheet_path: str, totals_df: pd.DataFrame, 390 | schwab_balance: float, venmo_balance: float) -> None: 391 | """Reads an ODS spreadsheet, allows the user to select a sheet, and updates only Column C.""" 392 | # Open the spreadsheet 393 | doc = ezodf.opendoc(spreadsheet_path) 394 | sheet_names = [sheet.name for sheet in doc.sheets] 395 | 396 | # Display available sheets 397 | print("Available sheets:") 398 | for i, sheet in enumerate(sheet_names, start=1): 399 | print(f"{i}. {sheet}") 400 | 401 | # Ask user to select a sheet 402 | selected_index = int(input(f"Select a sheet (1-{len(sheet_names)}): ")) - 1 403 | if selected_index < 0 or selected_index >= len(sheet_names): 404 | print("Invalid sheet selection.") 405 | return 406 | 407 | selected_sheet = doc.sheets[selected_index] 408 | 409 | # Map totals to their respective categories 410 | unmatched_categories = [] 411 | for _, row in totals_df.iterrows(): 412 | category = row['Category'] 413 | total = row['Adjusted Amount'] 414 | matched = False 415 | 416 | # Iterate over rows in the selected sheet 417 | for row_idx in range(1, selected_sheet.nrows()): # Skip the header 418 | cell_value = selected_sheet[row_idx, 0].value # Column A 419 | if isinstance(cell_value, str): 420 | # Write the total to Column C 421 | if cell_value.strip().lower() == category.lower(): 422 | selected_sheet[row_idx, 2].set_value(total) 423 | matched = True 424 | break 425 | # Update Schwab balance 426 | elif cell_value.strip().lower() == "schwab checking": 427 | selected_sheet[row_idx, 3].set_value(schwab_balance) 428 | matched = True 429 | # Update Venmo balance 430 | elif cell_value.strip().lower().startswith("venmo"): 431 | selected_sheet[row_idx, 3].set_value(venmo_balance) 432 | matched = True 433 | 434 | if not matched: 435 | unmatched_categories.append(category) 436 | 437 | # Save the updated document 438 | doc.save() 439 | print(f"Spreadsheet updated successfully: {spreadsheet_path}") 440 | 441 | # Print unmatched categories 442 | if unmatched_categories: 443 | print("\nUnmatched categories:") 444 | print("\n".join(unmatched_categories)) 445 | else: 446 | print("\nAll categories matched successfully.") 447 | 448 | def get_default_spreadsheet_path() -> str: 449 | """Returns the default spreadsheet path with the current year.""" 450 | current_year = datetime.now().year 451 | return str(Path.home() / f"syncthing/documents/spreadsheets/budget/Budget {current_year}.ods") 452 | 453 | def main() -> None: 454 | """Main function to handle argument parsing and execution.""" 455 | try: 456 | parser = argparse.ArgumentParser( 457 | description="Parse and categorize transactions from relevant CSV files.") 458 | parser.add_argument("-spreadsheet", 459 | type=str, help="Path to the spreadsheet file", required=False) 460 | args = parser.parse_args() 461 | 462 | # Enable autocompletion 463 | argcomplete.autocomplete(parser) 464 | 465 | # Set the path for the category JSON file 466 | categories_file_path = Path.home() / "syncthing/md/docs/selfhosted/transaction_categories.json" 467 | 468 | # Find files in Downloads or fall back to file browser 469 | venmo_file_path = find_latest_file_in_downloads("VenmoStatement*.csv") or ask_for_file("Venmo transactions CSV") 470 | citi_file_path = find_latest_file_in_downloads("Year to date.csv") or ask_for_file("Citi transactions CSV") 471 | amazon_file_path = find_latest_file_in_downloads("amazon_order_history.csv") or ask_for_file("Amazon transactions CSV") 472 | schwab_file_path = find_latest_file_in_downloads("schwab.csv") or ask_for_file("Schwab transactions CSV") 473 | spreadsheet_path = args.spreadsheet or get_default_spreadsheet_path() 474 | 475 | # Process Venmo transactions 476 | venmo_parser = VenmoParser(file_path=venmo_file_path, category_file=categories_file_path) 477 | venmo_parser.load_categories() 478 | venmo_parser.load_transactions() 479 | venmo_summary_df = venmo_parser.process_transactions() 480 | venmo_balance = float(venmo_summary_df['Balance'].iloc[-1]) if 'Balance' in venmo_summary_df.columns else 0.0 481 | 482 | # Process Citi transactions 483 | citi_parser = CitiParser(file_path=citi_file_path, category_file=categories_file_path) 484 | citi_parser.load_categories() 485 | citi_parser.load_transactions() 486 | citi_summary_df = citi_parser.process_transactions() 487 | 488 | # Process Amazon transactions 489 | amazon_parser = AmazonParser(file_path=amazon_file_path, category_file=categories_file_path) 490 | amazon_parser.load_categories() 491 | amazon_parser.load_transactions() 492 | amazon_summary_df = amazon_parser.process_transactions() 493 | 494 | # Process Schwab transactions 495 | schwab_parser = SchwabParser(file_path=schwab_file_path, category_file=categories_file_path) 496 | schwab_parser.load_categories() 497 | schwab_parser.load_transactions() 498 | schwab_summary_df = schwab_parser.process_transactions() 499 | schwab_balance = float(schwab_summary_df['Balance'].iloc[-1]) if 'Balance' in schwab_summary_df.columns else 0.0 500 | 501 | # Combine and sort transactions 502 | combined_df = pd.concat([ 503 | venmo_summary_df, citi_summary_df, amazon_summary_df, schwab_summary_df 504 | ]).sort_values(by=['Source', 'Category', 'Datetime']) 505 | print("\nCombined Transactions:") 506 | print(combined_df.to_string(index=False)) 507 | 508 | # Calculate totals for all transactions 509 | print("\nTotal Amounts by Category:") 510 | totals_df = combined_df.groupby('Category')['Adjusted Amount'].sum().reset_index() 511 | print(totals_df.to_string(index=False)) 512 | 513 | totals_csv = totals_df.to_csv(index=False) # Convert totals DataFrame to CSV format (no index) 514 | 515 | # Copy the CSV to the clipboard 516 | pyperclip.copy(totals_csv) 517 | print("\nThe CSV output has been copied to your clipboard!") 518 | 519 | # Update spreadsheet with totals and balances 520 | update_spreadsheet_with_totals(spreadsheet_path, totals_df, schwab_balance, venmo_balance) 521 | 522 | except KeyboardInterrupt: 523 | print("\nProgram interrupted by user. Exiting gracefully...") 524 | exit(0) 525 | except Exception as e: # pylint: disable=broad-except 526 | print(f"\nAn error occurred: {e}") 527 | exit(1) 528 | 529 | 530 | if __name__ == "__main__": 531 | main() 532 | -------------------------------------------------------------------------------- /foodlog/README.md: -------------------------------------------------------------------------------- 1 | # Food Log 2 | 3 | A simple command-line tool for logging food entries and tracking calories. The tool automatically classifies foods as healthy or junk and provides a visual summary of your eating habits. 4 | 5 | ## Features 6 | 7 | - Log food entries with calorie counts 8 | - Automatic food classification (healthy vs junk) 9 | - Visual summary of daily calorie intake with healthy/junk breakdown 10 | - Food lookup database to remember calorie counts 11 | - AI-powered calorie suggestions for unknown foods 12 | 13 | ## Usage 14 | 15 | ### Basic Logging 16 | ```bash 17 | foodlog "food name" calories 18 | ``` 19 | 20 | Example: 21 | ```bash 22 | foodlog "chicken salad" 540 23 | ``` 24 | 25 | ### Multiple Entries 26 | You can log multiple food entries in a single command using `//` as a separator. Each side of `//` is treated as a separate command: 27 | 28 | ```bash 29 | # Log two foods with their own calorie counts 30 | foodlog "apple" 50 // "banana" 60 31 | 32 | # Mix and match with lookup 33 | foodlog "apple" // "banana" 60 // "cookies" 34 | 35 | # Use with --yesterday (only applies to the command it's associated with) 36 | foodlog "apple" 50 // "banana" 60 --yesterday 37 | ``` 38 | 39 | ### View Today's Log 40 | ```bash 41 | foodlog 42 | ``` 43 | 44 | ### View Summary 45 | ```bash 46 | foodlog --summary 47 | ``` 48 | The summary view shows: 49 | - Food entries for the past 7 days 50 | - Total calories and percentage of healthy vs junk food 51 | - Visual bar graph where: 52 | - Bar length represents total calories (shorter = fewer calories) 53 | - Green portion represents healthy calories 54 | - Red portion represents junk calories 55 | 56 | ### Edit Food Log 57 | ```bash 58 | foodlog --edit 59 | ``` 60 | 61 | ### Log to Yesterda 62 | ```bash 63 | # in case you forgot yesterday! 64 | foodlog "chicken salad" 500 --yesterday 65 | ``` 66 | 67 | ## Data Storage 68 | 69 | - Food entries are stored in `~/.cabinet/log/food.json` 70 | - Food lookup database is stored in `~/.cabinet/log/food_lookup.json` 71 | 72 | ## Configuration 73 | 74 | The tool uses: 75 | - [Cabinet](https://www.github.com/tylerjwoodfin/cabinet). 76 | - You can set: 77 | - `foodlog -> calorie_target`: Your daily calorie target (default: 1750) 78 | - `keys -> openai`: Your OpenAI API key for AI-powered features 79 | - [tyler-python-helpers](https://github.com/tylerjwoodfin/python-helpers) 80 | - `pipx install tyler-python-helpers` 81 | 82 | ## Notes 83 | 84 | - The tool automatically classifies foods as healthy or junk based on common nutritional knowledge 85 | - For new foods, you can use the 'ai' option to get calorie suggestions from ChatGPT 86 | - The summary view's bar graph scales based on your highest calorie day, making it easy to compare daily intake -------------------------------------------------------------------------------- /foodlog/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """A simple food logging tool that logs food entries and calorie counts.""" 4 | 5 | import json 6 | import sys 7 | import os 8 | import datetime 9 | from tyler_python_helpers import ChatGPT 10 | from prompt_toolkit import print_formatted_text, HTML 11 | from cabinet import Cabinet 12 | 13 | cabinet = Cabinet() 14 | chatgpt = ChatGPT() 15 | 16 | # define file paths 17 | LOG_DIR = cabinet.get("path", "cabinet", "log") or os.path.expanduser("~/.cabinet/log") 18 | FOOD_LOG_FILE = os.path.join(LOG_DIR, "food.json") 19 | FOOD_LOOKUP_FILE = os.path.join(LOG_DIR, "food_lookup.json") 20 | 21 | def ensure_log_directory() -> None: 22 | """create log directory if it doesn't exist.""" 23 | os.makedirs(LOG_DIR, exist_ok=True) 24 | 25 | def load_json(file_path: str) -> dict: 26 | """load json data from a file, return empty dict if file doesn't exist.""" 27 | if os.path.exists(file_path): 28 | with open(file_path, "r", encoding="utf-8") as f: 29 | return json.load(f) 30 | return {} 31 | 32 | def save_json(file_path: str, data: dict) -> None: 33 | """save data to a json file.""" 34 | with open(file_path, "w", encoding="utf-8") as f: 35 | json.dump(data, f, indent=4) 36 | 37 | def log_food(food_name: str, calories: int, is_yesterday: bool = False) -> None: 38 | """log food entry for today's date.""" 39 | ensure_log_directory() 40 | log_data = load_json(FOOD_LOG_FILE) 41 | today = datetime.date.today().isoformat() 42 | 43 | if is_yesterday: 44 | today = (datetime.date.today() - datetime.timedelta(days=1)).isoformat() 45 | 46 | if today not in log_data: 47 | log_data[today] = [] 48 | 49 | # Ensure calories is an integer 50 | if isinstance(calories, dict): 51 | calories = calories.get("calories", 0) 52 | elif isinstance(calories, str) and calories.isnumeric(): 53 | calories = int(calories) 54 | 55 | log_data[today].append({"food": food_name, "calories": calories}) 56 | save_json(FOOD_LOG_FILE, log_data) 57 | print_formatted_text(HTML( 58 | f'Logged: {food_name} ({calories} cal)')) 59 | 60 | display_today_calories() 61 | 62 | def update_food_lookup(food_name: str, calories: int) -> None: 63 | """update food lookup file with food and calorie information.""" 64 | lookup_data = load_json(FOOD_LOOKUP_FILE) 65 | 66 | if isinstance(calories, str) and calories.isnumeric(): 67 | calories = int(calories) 68 | 69 | if food_name in lookup_data: 70 | if lookup_data[food_name]["calories"] != calories: 71 | print_formatted_text(HTML( 72 | f'{food_name} has {lookup_data[food_name]["calories"]} cal.')) 73 | choice = input("Overwrite? (y/n): ").strip().lower() 74 | if choice != 'y': 75 | return 76 | lookup_data[food_name]["calories"] = calories 77 | else: 78 | lookup_data[food_name] = {"calories": calories, "type": "unknown"} 79 | 80 | save_json(FOOD_LOOKUP_FILE, lookup_data) 81 | 82 | def display_today_calories() -> None: 83 | """display total calorie count and entries for today.""" 84 | log_data = load_json(FOOD_LOG_FILE) 85 | today = datetime.date.today() 86 | today_str = today.isoformat() 87 | 88 | if today_str in log_data: 89 | # Format the date as "Food for Day, YYYY-MM-DD" 90 | formatted_date = f"\n{today.strftime('%a')}, {today_str}" 91 | print_formatted_text(HTML(f'{formatted_date}')) 92 | total_calories = 0 93 | 94 | # Find the maximum length of calories for alignment 95 | if log_data[today_str]: 96 | max_calories_length = max(len(str(entry["calories"])) for entry in log_data[today_str]) 97 | else: 98 | max_calories_length = 0 99 | for entry in log_data[today_str]: 100 | food = entry["food"] 101 | calories = entry["calories"] 102 | # Ensure calories is an integer 103 | if isinstance(calories, dict): 104 | calories = calories.get("calories", 0) 105 | total_calories += calories 106 | 107 | # Pad calories to maintain consistent alignment 108 | padded_calories = str(calories).ljust(4) # Use fixed width of 4 characters 109 | 110 | print_formatted_text(HTML( 111 | f'{padded_calories}cal - {food}')) 112 | 113 | calorie_target = cabinet.get("foodlog", "calorie_target") 114 | if calorie_target is None: 115 | cabinet.log("Calorie target not set, using 1750", level="warning") 116 | calorie_target = 1750 117 | 118 | # Color-code total calories. +- 150 is green, 150-300 is yellow, over 300 is red 119 | if abs(total_calories - calorie_target) <= 150: 120 | total_color = 'green' 121 | elif abs(total_calories - calorie_target) <= 300: 122 | total_color = 'yellow' 123 | else: 124 | total_color = 'red' 125 | 126 | print_formatted_text(HTML( 127 | f'\nTotal today: <{total_color}>{total_calories}')) 128 | else: 129 | print_formatted_text(HTML('No food logged for today.')) 130 | 131 | def get_calories(food_name: str, lookup_data: dict) -> int: 132 | """get calorie count for a food item, either from lookup or user input.""" 133 | if food_name in lookup_data: 134 | calories = lookup_data[food_name].get("calories") 135 | if calories is None: 136 | raise ValueError(f"No calorie data found for {food_name}") 137 | print(f"{calories} cal found for {food_name}.\n") 138 | choice = input("Use this? (y/n): ").strip().lower() 139 | if choice == 'y': 140 | return calories 141 | 142 | calories = input("Enter calorie count, or 'ai' to ask ChatGPT: ").strip() 143 | if calories == 'ai': 144 | ai_calories = query_chatgpt(food_name) 145 | print(f"\nChatGPT suggests: {ai_calories} calories") 146 | calories = input("Use this value? (y/n): ").strip().lower() 147 | if calories == 'y': 148 | return int(ai_calories) 149 | calories = input("Enter calorie count: ").strip() 150 | 151 | if not calories.isnumeric(): 152 | raise ValueError("Calorie count must be a number.") 153 | return int(calories) 154 | 155 | def query_chatgpt(food_name: str) -> str: 156 | """Query ChatGPT for the calorie count of a food item.""" 157 | query = f"What is the calorie count of {food_name}? \ 158 | Only output your best guess as a number, no other text." 159 | return chatgpt.query(query) 160 | 161 | def classify_food(food_names: list[str]) -> dict[str, str]: 162 | """Classify multiple food items as 'junk' or 'healthy' using AI.""" 163 | 164 | # Create a prompt that lists all foods and asks for classification 165 | food_list = "\n".join([f"- {food}" for food in food_names]) 166 | prompt = f"""Classify each of these food items as either 'junk' or 'healthy'. 167 | For each item, output the food name followed by a colon and its classification. 168 | Only use the words 'junk' or 'healthy' for classification. 169 | 170 | Foods to classify: 171 | {food_list} 172 | 173 | Output format: 174 | food1: junk 175 | food2: healthy 176 | food3: junk 177 | """ 178 | 179 | response = chatgpt.query(prompt) 180 | 181 | # Parse the response into a dictionary 182 | classifications = {} 183 | for line in response.split('\n'): 184 | if ':' in line: 185 | food, classification = line.split(':', 1) 186 | classifications[food.strip()] = classification.strip().lower() 187 | 188 | return classifications 189 | 190 | def show_summary() -> None: 191 | """Display a summary of the past 7 days of food entries with AI classification.""" 192 | log_data = load_json(FOOD_LOG_FILE) 193 | lookup_data = load_json(FOOD_LOOKUP_FILE) 194 | today = datetime.date.today() 195 | 196 | print_formatted_text(HTML('Food Summary (Last 7 Days)\n')) 197 | 198 | # First, collect all unique food items from the past 7 days 199 | all_foods = set() 200 | daily_totals = {} # Store daily totals for the bar graph 201 | daily_healthy = {} # Store daily healthy calories 202 | daily_junk = {} # Store daily junk calories 203 | for i in range(7): 204 | date = today - datetime.timedelta(days=i) 205 | date_str = date.isoformat() 206 | if date_str in log_data: 207 | daily_totals[date] = sum(entry["calories"] for entry in log_data[date_str]) 208 | daily_healthy[date] = 0 209 | daily_junk[date] = 0 210 | for entry in log_data[date_str]: 211 | all_foods.add(entry["food"]) 212 | 213 | # Get classifications for all foods at once 214 | foods_to_classify = [] 215 | for food in all_foods: 216 | if food not in lookup_data or "type" not in lookup_data[food] or \ 217 | lookup_data[food]["type"] == "unknown": 218 | foods_to_classify.append(food) 219 | 220 | if foods_to_classify: 221 | classifications = classify_food(foods_to_classify) 222 | 223 | # Update the lookup file with new classifications 224 | for food, classification in classifications.items(): 225 | if food not in lookup_data: 226 | lookup_data[food] = {"calories": 0, "type": classification} 227 | else: 228 | lookup_data[food]["type"] = classification 229 | save_json(FOOD_LOOKUP_FILE, lookup_data) 230 | else: 231 | classifications = {} 232 | 233 | total_calories = 0 234 | healthy_calories = 0 235 | junk_calories = 0 236 | 237 | # Display entries in reverse chronological order 238 | for i in range(6, -1, -1): 239 | date = today - datetime.timedelta(days=i) 240 | date_str = date.isoformat() 241 | 242 | if date_str in log_data: 243 | print_formatted_text(HTML(f'\n{date.strftime("%a, %Y-%m-%d")}')) 244 | 245 | for entry in log_data[date_str]: 246 | food = entry["food"] 247 | calories = entry["calories"] 248 | total_calories += calories 249 | 250 | # Get the classification from the lookup file 251 | if food in lookup_data: 252 | classification = lookup_data[food].get("type", "unknown") 253 | else: 254 | classification = "unknown" 255 | 256 | if classification == "healthy": 257 | healthy_calories += calories 258 | daily_healthy[date] += calories 259 | food_color = "green" 260 | else: 261 | junk_calories += calories 262 | daily_junk[date] += calories 263 | food_color = "red" 264 | 265 | print_formatted_text(HTML( 266 | f' {calories} cal - <{food_color}>{food} ({classification})')) 267 | 268 | # Print summary statistics 269 | print_formatted_text(HTML('\nSummary Statistics:')) 270 | print_formatted_text(HTML(f' Total calories: {total_calories}')) 271 | if total_calories > 0: 272 | print_formatted_text(HTML(f' Healthy calories: {healthy_calories} ({healthy_calories/total_calories*100:.1f}%)')) 273 | print_formatted_text(HTML(f' Junk calories: {junk_calories} ({junk_calories/total_calories*100:.1f}%)')) 274 | else: 275 | print_formatted_text(HTML(' No calories logged in the past 7 days')) 276 | 277 | # Add daily totals bar graph 278 | print_formatted_text(HTML('\nDaily Calorie Totals:')) 279 | 280 | # Find the maximum calories for scaling the bar graph 281 | max_calories = max(daily_totals.values()) if daily_totals else 0 282 | bar_width = 25 # Maximum width of the bar graph in characters 283 | 284 | # Display bars in reverse chronological order 285 | for i in range(6, -1, -1): 286 | date = today - datetime.timedelta(days=i) 287 | if date in daily_totals: 288 | total = daily_totals[date] 289 | healthy = daily_healthy[date] 290 | junk = daily_junk[date] 291 | 292 | # Calculate the scaled bar length based on total calories 293 | scaled_length = int((total / max_calories) * bar_width) if max_calories > 0 else 0 294 | 295 | # Calculate the healthy/junk ratio within the scaled length 296 | healthy_length = int((healthy / total) * scaled_length) if total > 0 else 0 297 | junk_length = scaled_length - healthy_length 298 | 299 | # Create the bar with both colors 300 | bar = f'{"█" * healthy_length}{"█" * junk_length}' 301 | 302 | print_formatted_text(HTML( 303 | f' {date.strftime("%a")}: {bar} {total} cal')) 304 | 305 | def main() -> None: 306 | """parse command-line arguments and log food entry.""" 307 | is_yesterday = False 308 | 309 | if len(sys.argv) < 2: 310 | display_today_calories() 311 | sys.exit(0) 312 | 313 | if sys.argv[1] == "--edit": 314 | edit_food_json() 315 | sys.exit(0) 316 | 317 | if sys.argv[1] == "--summary": 318 | show_summary() 319 | sys.exit(0) 320 | 321 | try: 322 | # Join all arguments and split by // 323 | full_command = " ".join(sys.argv[1:]) 324 | if "//" in full_command: 325 | # Split into separate commands and process each one 326 | commands = [cmd.strip() for cmd in full_command.split("//")] 327 | for cmd in commands: 328 | # Create new sys.argv for each command 329 | cmd_args = cmd.split() 330 | # Save original sys.argv and restore it after each command 331 | original_argv = sys.argv.copy() 332 | sys.argv = [sys.argv[0]] + cmd_args 333 | try: 334 | main() 335 | finally: 336 | sys.argv = original_argv 337 | return 338 | 339 | # Check if --yesterday is present and remove it 340 | if "--yesterday" in sys.argv: 341 | is_yesterday = True 342 | sys.argv.remove("--yesterday") 343 | 344 | # Regular single command processing 345 | food_name = " ".join(sys.argv[1:-1]) 346 | calories = sys.argv[-1] 347 | 348 | # last arg is a string -> calories not set; get from lookup 349 | if isinstance(calories, str) and not calories.isnumeric(): 350 | food_name = " ".join(sys.argv[1:]) 351 | lookup_data = load_json(FOOD_LOOKUP_FILE) 352 | calories = get_calories(food_name, lookup_data) 353 | else: 354 | calories = int(calories) 355 | 356 | log_food(food_name, calories, is_yesterday) 357 | update_food_lookup(food_name, calories) 358 | 359 | except ValueError as e: 360 | print_formatted_text(HTML(f'Error: {e}')) 361 | sys.exit(1) 362 | 363 | def edit_food_json() -> None: 364 | """Edit the food.json file.""" 365 | os.system(f"{cabinet.editor} {FOOD_LOG_FILE}") 366 | 367 | if __name__ == "__main__": 368 | try: 369 | main() 370 | except KeyboardInterrupt: 371 | sys.exit(0) -------------------------------------------------------------------------------- /githooks/README.md: -------------------------------------------------------------------------------- 1 | # Githooks 2 | 3 | - hooks run before certain Git actions are executed for my repositories 4 | 5 | ## adding hooks to your repos with symlinks 6 | 7 | - `ln -s /path/to/tools/githooks/pre-push .git/hooks/pre-push` 8 | - ensures Pylint is run before pushing; blocks push if lint has errors 9 | 10 | ## apply hooks to all repos in ~/git 11 | 12 | - run `apply_pre-push.sh` -------------------------------------------------------------------------------- /githooks/apply_pre-push.sh: -------------------------------------------------------------------------------- 1 | #!/bin/zsh 2 | 3 | # Base directory to search for Git repositories 4 | base_dir="${1:-$HOME/git}" 5 | 6 | # Path to the pre-push hook script 7 | pre_push_script="$base_dir/tools/githooks/pre-push" 8 | 9 | # Ensure the pre-push script exists 10 | if [ ! -f "$pre_push_script" ]; then 11 | echo "Pre-push script not found at $pre_push_script" 12 | exit 1 13 | fi 14 | 15 | # Iterate over folders in the base directory 16 | find "$base_dir" -type d -name ".git" | while read -r git_dir; do 17 | repo_dir=$(dirname "$git_dir") 18 | 19 | # Create the symbolic link 20 | ln_output=$(ln -s -f "$pre_push_script" "$git_dir/hooks/pre-push" 2>&1) 21 | chmod +x "$git_dir/hooks/pre-push" 22 | if [ $? -eq 0 ]; then 23 | echo "Created pre-push hook in $repo_dir" 24 | else 25 | echo "Failed to create pre-push hook in $repo_dir. Error: $ln_output" 26 | fi 27 | done 28 | -------------------------------------------------------------------------------- /githooks/pre-push: -------------------------------------------------------------------------------- 1 | #!/bin/zsh 2 | 3 | # Function to run Pylint on a single Python file 4 | run_pylint() { 5 | pylint "$1" --output-format=text --disable=R --score=False | xargs 6 | } 7 | 8 | # Retrieve the root directory of the repository 9 | repo_root=$(git rev-parse --show-toplevel) 10 | 11 | # Iterate over all Python files in the repository 12 | files=$(git ls-tree --full-tree -r --name-only HEAD | grep '\.py$') 13 | non_refactor_found=false 14 | 15 | echo -e "Executing pre-push hook...\n" 16 | total_files=$(echo "$files" | wc -l) 17 | progress=0 18 | 19 | for file in $files; do 20 | # Run Pylint and store the output 21 | pylint_output=$(run_pylint "$repo_root/$file") 22 | 23 | # Check if any non-refactor messages are present or pylint command failed 24 | if [[ -n "$pylint_output" ]]; then 25 | printf "Pylint found issues in $file:\n" 26 | echo "$pylint_output\n" 27 | non_refactor_found=true 28 | fi 29 | 30 | # Update progress bar 31 | progress=$((progress + 1)) 32 | percentage=$((progress * 100 / total_files)) 33 | printf "Progress: [%-50s] %d%%\r" "$(printf '#%.0s' $(seq 1 $((progress * 50 / total_files))))" "$percentage" 34 | sleep 0.1 # Delay for demonstration purposes, adjust as needed 35 | done 36 | 37 | echo # Print a newline after the progress bar 38 | 39 | # If non-refactor issues found, block the push 40 | if [ "$non_refactor_found" = true ]; then 41 | echo "Please fix all issues before pushing." 42 | exit 1 43 | fi 44 | -------------------------------------------------------------------------------- /github/README.md: -------------------------------------------------------------------------------- 1 | # GitHub 2 | 3 | Provides recurrent tasks for GitHub repositories. 4 | 5 | ## Setup 6 | ``` 7 | gh auth login 8 | ``` 9 | 10 | ## Usage 11 | ``` 12 | python3 new_repo.py 13 | ``` -------------------------------------------------------------------------------- /github/new_repo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Run for new repository creation and branch protection 5 | """ 6 | 7 | import subprocess 8 | import sys 9 | from typing import Tuple, Optional 10 | 11 | def run_command(command: str) -> Tuple[bool, str]: 12 | """ 13 | Execute a shell command and return its success status and output. 14 | 15 | Args: 16 | command: The shell command to execute 17 | 18 | Returns: 19 | Tuple containing: 20 | - Boolean indicating if command succeeded 21 | - String containing command output or error message 22 | """ 23 | try: 24 | result = subprocess.run( 25 | command, 26 | shell=True, 27 | check=True, 28 | capture_output=True, 29 | text=True 30 | ) 31 | return True, result.stdout.strip() 32 | except subprocess.CalledProcessError as e: 33 | return False, e.stderr.strip() 34 | 35 | def repo_exists(repo_name: str) -> bool: 36 | """ 37 | Check if a GitHub repository exists. 38 | 39 | Args: 40 | repo_name: Name of the repository to check 41 | 42 | Returns: 43 | Boolean indicating if repository exists 44 | """ 45 | command = f"gh repo view tylerjwoodfin/{repo_name} --json name" 46 | success, _ = run_command(command) 47 | return success 48 | 49 | def create_repo(repo_name: str) -> Tuple[bool, str]: 50 | """ 51 | Create a new GitHub repository if it doesn't exist. 52 | 53 | Args: 54 | repo_name: Name of the repository to create 55 | 56 | Returns: 57 | Tuple containing: 58 | - Boolean indicating if creation succeeded 59 | - String containing success or error message 60 | """ 61 | if repo_exists(repo_name): 62 | return True, f"Repository {repo_name} already exists" 63 | 64 | command = f"gh repo create tylerjwoodfin/{repo_name} --confirm" 65 | return run_command(command) 66 | 67 | def protect_branch(repo_name: str) -> Tuple[bool, str]: 68 | """ 69 | Apply branch protection rules to prevent direct pushes to main. 70 | 71 | Args: 72 | repo_name: Name of the repository to protect 73 | 74 | Returns: 75 | Tuple containing: 76 | - Boolean indicating if protection succeeded 77 | - String containing success or error message 78 | """ 79 | command = f""" 80 | gh api \ 81 | --method PUT \ 82 | --header "Accept: application/vnd.github+json" \ 83 | "/repos/tylerjwoodfin/{repo_name}/branches/main/protection" \ 84 | --field required_status_checks=null \ 85 | --field required_pull_request_reviews=null \ 86 | --field enforce_admins=true \ 87 | --field restrictions=null \ 88 | --field allow_force_pushes=false \ 89 | --field block_creations=false 90 | """ 91 | return run_command(command) 92 | 93 | def initialize_main_branch(repo_name: str) -> Tuple[bool, str]: 94 | """ 95 | Initialize the main branch with a README file. 96 | 97 | Args: 98 | repo_name: Name of the repository to initialize 99 | 100 | Returns: 101 | Tuple containing: 102 | - Boolean indicating if initialization succeeded 103 | - String containing success or error message 104 | """ 105 | # Create a temporary directory for the repo 106 | commands = [ 107 | f"rm -rf /tmp/{repo_name}", # Clean up any existing directory 108 | f"mkdir -p /tmp/{repo_name}", 109 | f"cd /tmp/{repo_name}", 110 | "git init", 111 | f"echo '# {repo_name}' > README.md", 112 | "git add README.md", 113 | 'git commit -m "Initial commit"', 114 | f"git remote add origin https://github.com/tylerjwoodfin/{repo_name}.git", 115 | "git push -u origin main", 116 | f"rm -rf /tmp/{repo_name}" # Clean up 117 | ] 118 | 119 | command = " && ".join(commands) 120 | return run_command(command) 121 | 122 | def main(repo_name: Optional[str] = None) -> int: 123 | """ 124 | Main function to create and protect a GitHub repository. 125 | 126 | Args: 127 | repo_name: Optional name of repository (will use sys.argv[1] if not provided) 128 | 129 | Returns: 130 | Exit code (0 for success, 1 for failure) 131 | """ 132 | # get repo name from argument if not provided 133 | if not repo_name: 134 | if len(sys.argv) != 2: 135 | print("Usage: python script.py REPO_NAME") 136 | return 1 137 | repo_name = sys.argv[1] 138 | 139 | # create repo if it doesn't exist 140 | success, message = create_repo(repo_name) 141 | print(message) 142 | if not success: 143 | return 1 144 | 145 | # initialize main branch if repo was just created 146 | if "already exists" not in message: 147 | success, init_message = initialize_main_branch(repo_name) 148 | if not success: 149 | print(f"Failed to initialize main branch: {init_message}") 150 | return 1 151 | print("Successfully initialized main branch") 152 | 153 | # apply branch protection 154 | success, message = protect_branch(repo_name) 155 | if success: 156 | print(f"Successfully protected main branch for {repo_name}") 157 | else: 158 | print(f"Failed to protect main branch for {repo_name}") 159 | print(f"Error: {message}") 160 | return 1 161 | 162 | return 0 163 | 164 | if __name__ == "__main__": 165 | sys.exit(main()) -------------------------------------------------------------------------------- /immich/Immich.shortcut: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tylerjwoodfin/tools/0dba22dd0a68db30d0eb16e7b53474f9b121ae4c/immich/Immich.shortcut -------------------------------------------------------------------------------- /immich/immich-wrapper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/zsh 2 | 3 | # load zsh-specific environment 4 | if [[ -f $HOME/.zshrc ]]; then 5 | source $HOME/.zshrc 6 | fi 7 | 8 | # load nvm 9 | export NVM_DIR="$HOME/.nvm" 10 | [[ -s "$NVM_DIR/nvm.sh" ]] && source "$NVM_DIR/nvm.sh" 11 | 12 | # set PATH explicitly 13 | export PATH="/usr/local/bin:/usr/bin:/bin:$HOME/.nvm/versions/node/v20.18.0/bin:$PATH" 14 | 15 | # use nvm to set Node version 16 | nvm use 20 17 | 18 | # call immich 19 | /usr/local/bin/immich "$@" -------------------------------------------------------------------------------- /immich/immich.sh: -------------------------------------------------------------------------------- 1 | #!/bin/zsh 2 | 3 | # This script watches a directory for new files and uploads them to Immich 4 | # using the immich CLI tool. 5 | 6 | # Requirements: 7 | # - immich CLI tool 8 | # - inotify-tools 9 | # - nvm 10 | # - Node.js v20.18.0 11 | # - zsh 12 | # - cabinet - https://github.com/tylerjwoodfin/cabinet 13 | 14 | # upload new media to immich 15 | WATCH_DIR="$HOME/syncthing/photos/upload-queue" 16 | LOG_DIR="$HOME/git/log" 17 | LOG_FILE="$LOG_DIR/immich.log" 18 | IMMICH_PATH="$HOME/git/tools/immich/immich-wrapper.sh" 19 | 20 | # ensure log directory exists 21 | mkdir -p "$LOG_DIR" 22 | 23 | # load zsh-specific environment 24 | [[ -f "$HOME/.zshrc" ]] && source "$HOME/.zshrc" 25 | 26 | # load nvm and set the correct node.js version 27 | export NVM_DIR="$HOME/.nvm" 28 | [[ -s "$NVM_DIR/nvm.sh" ]] && source "$NVM_DIR/nvm.sh" 29 | export PATH="/usr/local/bin:/usr/bin:/bin:$HOME/.nvm/versions/node/v20.18.0/bin:$PATH" 30 | nvm use 20 31 | 32 | # check if immich executable is available 33 | if [[ ! -f "$IMMICH_PATH" ]]; then 34 | $HOME/.local/bin/cabinet --log "Immich not found at $IMMICH_PATH" --level "error" 35 | exit 1 36 | fi 37 | 38 | # monitor the directory for new files and run immich upload on changes 39 | inotifywait -m -e create "$WATCH_DIR" | while read path action file; do 40 | $HOME/.local/bin/cabinet --log "Immich File Detected: $file" 41 | "$IMMICH_PATH" upload "$WATCH_DIR" --delete >> "$LOG_FILE" 2>&1 42 | done 43 | -------------------------------------------------------------------------------- /lifelog/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | LifeLog - A simple tool to log certain life events. 3 | """ 4 | 5 | from datetime import datetime 6 | from cabinet import Cabinet 7 | 8 | class LifeLog: 9 | """ 10 | LifeLog class to log certain life events. 11 | """ 12 | 13 | def __init__(self): 14 | self.cabinet = Cabinet() 15 | self.path_csv: str | None = self.cabinet.get("lifelog", "file", return_type=str) 16 | self.options: list[str] | None = self.cabinet.get("lifelog", "options", return_type=list) 17 | 18 | def present_options(self) -> int: 19 | """ 20 | Present the options to the user. 21 | """ 22 | if not self.options: 23 | raise ValueError("Options are not set.") 24 | 25 | print("Choose an option:") 26 | for i, option in enumerate(self.options): 27 | print(f"{i + 1}. {option}") 28 | 29 | selected_option: int = int(input("\n")) 30 | if selected_option not in range(1, len(self.options) + 1): 31 | raise ValueError("Invalid option selected.") 32 | 33 | return selected_option 34 | 35 | def update_log(self, event: str) -> None: 36 | """ 37 | Write the event to the log file. 38 | 39 | Args: 40 | event (str): event (one of self.options) to log 41 | """ 42 | if self.options and event not in self.options: 43 | raise ValueError(f"Event '{event}' is not in the options.") 44 | 45 | # create the file if it doesn't exist 46 | if not self.path_csv: 47 | raise FileNotFoundError("Path to the log file is not set.") 48 | 49 | with open(self.path_csv, "a", encoding="utf-8") as file: 50 | current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") 51 | file.write(f"{current_time},{event}\n") 52 | 53 | print(f"Event '{event}' logged to '{self.path_csv}'.") 54 | 55 | def main() -> None: 56 | """ 57 | Main function to run the LifeLog tool. 58 | """ 59 | lifelog = LifeLog() 60 | event_index: int = lifelog.present_options() 61 | 62 | if not lifelog.options: 63 | raise ValueError("Options are not set.") 64 | 65 | lifelog.update_log(lifelog.options[event_index - 1]) 66 | 67 | if __name__ == "__main__": 68 | main() 69 | -------------------------------------------------------------------------------- /lofi.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env zsh 2 | command -v mpv &>/dev/null || { echo "Error: mpv is not installed." && exit 1; } 3 | 4 | lofi_json=$($HOME/.local/bin/cabinet -g lofi) 5 | 6 | declare -A urls 7 | while IFS="=" read -r key value; do 8 | urls[$key]=$value 9 | done < <(echo "$lofi_json" | jq -r 'to_entries | .[] | "\(.key)=\(.value)"') 10 | 11 | stations=(study chill game) 12 | current_station_index=1 13 | 14 | set_station() { 15 | station_name=${stations[$current_station_index]} 16 | url=${urls[$station_name]} 17 | } 18 | 19 | open_browser() { 20 | local url=$1 21 | if [[ "$OSTYPE" == "darwin"* ]]; then 22 | open "$url" 23 | elif [[ "$OSTYPE" == "linux-gnu"* ]]; then 24 | xdg-open "$url" 25 | else 26 | echo "Unsupported OS. Please open a browser and go to: $url" 27 | fi 28 | } 29 | 30 | create_input_conf() { 31 | cat < /tmp/mpv_input.conf 32 | n quit 42 33 | b quit 43 34 | q quit 35 | EOF 36 | } 37 | 38 | play_stream() { 39 | local url=$2 40 | echo "Playing ${station_name} - $url" 41 | echo "'n' - next'" 42 | echo "'b' - open in browser" 43 | echo "'q' - quit" 44 | 45 | create_input_conf 46 | 47 | mpv --no-video --really-quiet --input-conf=/tmp/mpv_input.conf "$url" 48 | } 49 | 50 | cleanup() { 51 | rm -f /tmp/mpv_input.conf 52 | echo -e "\nExiting..." 53 | exit 0 54 | } 55 | 56 | main() { 57 | trap cleanup SIGINT SIGTERM EXIT 58 | 59 | local video=false 60 | set_station "study" 61 | 62 | # Play streams in a loop 63 | while true; do 64 | station_name=${stations[$current_station_index]} 65 | url=${urls[$station_name]} 66 | play_stream "$video" "$url" 67 | exit_status=$? 68 | 69 | if [ $exit_status -eq 42 ]; then 70 | # User pressed 'n', move to next station 71 | echo "\n" 72 | current_station_index=$(( (current_station_index + 1) % ${#stations[@]} + 1)) 73 | elif [ $exit_status -eq 43 ]; then 74 | # User pressed 'b', open in browser 75 | open_browser "$url" 76 | # Don't change the station, replay the same one 77 | elif [ $exit_status -eq 0 ]; then 78 | # Normal exit (user pressed 'q') 79 | break 80 | else 81 | # Stream failed, try next 82 | echo "Failed to play $station_name stream. Trying next..." 83 | current_station_index=$(( (current_station_index + 1) % ${#stations[@]} )) 84 | fi 85 | done 86 | } 87 | 88 | main "$@" -------------------------------------------------------------------------------- /openai/README.md: -------------------------------------------------------------------------------- 1 | # OpenAI (GPT3) 2 | 3 | ## dependencies 4 | 5 | - [cabinet](https://pypi.org/project/cabinet/) 6 | - `cabinet --configure` for setup 7 | 8 | ## setup 9 | 10 | - using `cabinet`, store the API key you generated from the reference website in `keys -> openai`. 11 | 12 | - for example, if you have no properties in your settings.json file, it should look like: 13 | 14 | ``` 15 | { 16 | "keys": { 17 | "openai": "yourkeyhere" 18 | } 19 | } 20 | ``` 21 | 22 | ## usage 23 | 24 | - `../path/to/main.py` for an interactive cli 25 | - `../path/to/main.py do you think we're in a simulation` to send "do you think we're in a simulation" to openai using your key 26 | 27 | ## reference 28 | 29 | - https://beta.openai.com/docs/quickstart/build-your-application 30 | -------------------------------------------------------------------------------- /openai/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | openai 3 | 4 | see README.md for instructions 5 | """ 6 | 7 | import os 8 | import sys 9 | import openai 10 | #pylint: disable=wrong-import-order 11 | from cabinet import Cabinet 12 | 13 | cab = Cabinet() 14 | 15 | openai.api_key = cab.get("keys", "openai") 16 | 17 | 18 | def submit(query, log="", debug=False): 19 | """ 20 | submits `query` to openai 21 | """ 22 | response = openai.Completion.create( 23 | model="text-davinci-002", 24 | prompt=f"""{log}\n{query}""", 25 | temperature=0.6, 26 | max_tokens=1024 27 | ) 28 | 29 | # debugging 30 | if debug: 31 | print(".......") 32 | print(query) 33 | print(".......") 34 | print(response) 35 | print(".......") 36 | 37 | to_return = response["choices"][0]["text"] 38 | if "\n\n" in to_return: 39 | to_return = to_return.split("\n\n")[1] 40 | return to_return 41 | 42 | 43 | def cli(): 44 | """ 45 | a back-and-forth interaction with GPT3 46 | """ 47 | 48 | log = "" 49 | print(f"""{submit("Please greet me.", "")}\n\n""") 50 | 51 | while True: 52 | try: 53 | user_input = input("> ") 54 | 55 | if user_input == 'clear': 56 | os.system('clear') 57 | 58 | output = submit(user_input, log) 59 | if not output: 60 | print("I don't have an answer for that.") 61 | 62 | print(f"""{output}\n\n""") 63 | log = f"{log}\n{output}" 64 | except KeyboardInterrupt: 65 | try: 66 | sys.exit(0) 67 | except SystemExit: 68 | sys.exit(0) 69 | 70 | 71 | if __name__ == "__main__": 72 | if len(sys.argv) > 1: 73 | response_simple = submit(' '.join(sys.argv[1:]), '') 74 | 75 | if '\n\n' in response_simple: 76 | response_simple = response_simple.split("\n\n")[1:] 77 | 78 | print(response_simple) 79 | else: 80 | cli() 81 | -------------------------------------------------------------------------------- /pihole/README.md: -------------------------------------------------------------------------------- 1 | # pihole block/allow 2 | 3 | ## motivation 4 | This script is used to enable/disable websites at predetermined times (by my crontab) to 5 | maintain a healthy relationship with screen time and avoid distracting websites at undesirable 6 | hours. 7 | 8 | ## usage 9 | For my use case: 10 | - `zsh downtime.sh block overnight` is run at a predetermined time in the evening 11 | - `zsh downtime.sh allow overnight` is run at at a predetermined time overnight such that I 12 | can wake up without restrictions. 13 | 14 | ## explanation 15 | - `overnight` is an alias that I've set using [cabinet](https://pypi.org/project/cabinet/). 16 | 17 | In my Cabinet file, I have something like: 18 | ``` 19 | { 20 | "path": { 21 | "blocklist": { 22 | "afternoon": "/path/to/syncthing/md/docs/network/pihole_blocklist_afternoon.md", 23 | "overnight": "/path/to/syncthing/md/docs/network/pihole_blocklist_overnight.md" 24 | } 25 | ... 26 | } 27 | ... 28 | } 29 | ``` 30 | - `cabinet -g path blocklist overnight` returns "/path/to/syncthing/md/docs/network/pihole_blocklist_overnight.md" 31 | - Each domain is looped through and allowed/disallowed, depending on $1. 32 | - I can expand my blocklist schedule without modifying the code by simply adding new items to Cabinet and changing the Crontab. -------------------------------------------------------------------------------- /pihole/downtime.sh: -------------------------------------------------------------------------------- 1 | #!/bin/zsh 2 | 3 | # Function to check if the script is being run from Python, crontab, or atd 4 | check_parent_process() { 5 | local parent_process grandparent_process 6 | 7 | # Get the parent and grandparent process names 8 | parent_process=$(ps -o comm= -p $PPID 2>/dev/null | tr -d ' ') 9 | grandparent_process=$(ps -o comm= -p $(ps -o ppid= -p $PPID 2>/dev/null) 2>/dev/null | tr -d ' ') 10 | 11 | # Check if the parent or grandparent process matches the allowed processes 12 | case "$parent_process" in 13 | cron|python*|atd) 14 | return 0 # Allowed process 15 | ;; 16 | esac 17 | 18 | case "$grandparent_process" in 19 | cron|python*|atd) 20 | return 0 # Allowed process 21 | ;; 22 | esac 23 | 24 | return 1 # Not allowed 25 | } 26 | 27 | # Function to clean up scheduled at jobs 28 | cleanup_scheduled_jobs() { 29 | local script_path=$0 30 | local mode=$2 31 | 32 | # List all pending at jobs 33 | atq | while read job_number rest; do 34 | # Check if this job contains our script and the reblock command 35 | at -c "$job_number" | grep -q "$script_path.*block.*$mode" && { 36 | atrm "$job_number" 37 | /home/tyler/.local/bin/cabinet --log "Removed scheduled job #$job_number for $mode" 38 | } 39 | done 40 | } 41 | 42 | # Check if the script is being run from Python, crontab, or atd 43 | check_parent_process 44 | if [ $? -ne 0 ] && [ "$1" != "block" ]; then 45 | echo "This script can only be run from a Python script or crontab." 46 | exit 1 47 | fi 48 | 49 | # Ensure the second argument is provided 50 | if [[ -z "$2" ]]; then 51 | echo "Error: Missing argument. Please provide the second argument." 52 | exit 1 53 | fi 54 | 55 | echo "starting" 56 | 57 | # If running in allow mode from crontab, clean up scheduled jobs 58 | if [[ "$1" == "allow" && "$parent_process" == "cron" ]]; then 59 | echo "Cleaning up scheduled jobs..." 60 | cleanup_scheduled_jobs "$@" 61 | fi 62 | 63 | home_directory=$(eval echo ~$USER) 64 | 65 | # $2 can be 'afternoon' or 'overnight', etc. to read the corresponding property 66 | blocklist_file=$(/home/tyler/.local/bin/cabinet -g path blocklist "$2") 67 | 68 | # Replace $HOME and ~ with the actual home directory path 69 | blocklist_file=$(echo "${blocklist_file}" | sed "s|~|$home_directory|g" | sed "s|\$HOME|$home_directory|g") 70 | 71 | echo "blocklist_file = '${blocklist_file}'" 72 | 73 | if [[ -z "${blocklist_file}" ]]; then 74 | echo "Error: blocklist_file (cabinet -g path blocklist $2) is empty" 75 | exit 1 76 | fi 77 | 78 | # Properly read lines into an array 79 | blocklist_domains=("${(@f)$(cat "${blocklist_file}")}") 80 | 81 | verify_command=(docker exec pihole pihole -q) 82 | 83 | if [[ "$1" == "allow" ]]; then 84 | for domain in $blocklist_domains; do 85 | echo "Unblocking: $domain" 86 | docker exec pihole pihole --regex -d "$domain" 87 | docker exec pihole pihole --wild -d "$domain" 88 | done 89 | elif [[ "$1" == "block" ]]; then 90 | for domain in $blocklist_domains; do 91 | echo "Blocking: $domain" 92 | docker exec pihole pihole --wild "$domain" 93 | done 94 | else 95 | echo "Invalid argument: $1. Use 'allow' or 'block'." 96 | exit 1 97 | fi 98 | 99 | echo "done" 100 | -------------------------------------------------------------------------------- /pihole/one_more_hour.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | This script allows me one hour of unblocking my "distraction list" 4 | in Pihole. After 1 hour, it reintroduces the block. 5 | """ 6 | 7 | import re 8 | import argparse 9 | import subprocess 10 | from datetime import datetime, timedelta 11 | from cabinet import Cabinet 12 | 13 | # Define common variables 14 | SCRIPT_PATH = "/home/tyler/git/tools/pihole/downtime.sh" 15 | CMD_UNBLOCK = f"zsh {SCRIPT_PATH} allow afternoon" 16 | CMD_REBLOCK = f"zsh {SCRIPT_PATH} block afternoon" 17 | 18 | cabinet = Cabinet() 19 | times_used = cabinet.get("pihole", "times_unblocked") or 0 20 | 21 | def execute_command(command): 22 | """ 23 | Execute a specified shell command. 24 | 25 | Args: 26 | command (str): A command string to be executed in the shell environment. 27 | """ 28 | subprocess.run(command, shell=True, check=True) 29 | 30 | def schedule_commands(): 31 | """ 32 | Schedule commands using the `at` command. This sets up two commands: 33 | one to execute immediately and another to execute one hour later. 34 | """ 35 | # Unblock immediately 36 | execute_command(CMD_UNBLOCK) 37 | 38 | # Schedule re-block in 1 hour 39 | reblock_time = datetime.now() + timedelta(hours=1) 40 | at_command = f"echo '{CMD_REBLOCK}' | at {reblock_time.strftime('%H:%M')}" 41 | 42 | # Log Times Used 43 | cabinet.put("pihole", "times_unblocked", times_used + 1) 44 | 45 | # Capture the at job ID 46 | result = subprocess.run(at_command, shell=True, check=True, capture_output=True, text=True) 47 | 48 | # Combine stdout and stderr 49 | job_str = result.stdout.strip() or result.stderr.strip() 50 | 51 | # Use regex to extract the job ID (first number in the output) 52 | match = re.search(r'job (\d+) ', job_str) 53 | if match: 54 | job_id = match.group(1) # Extract just the job number 55 | cabinet.put("pihole", "scheduled_reblock_job", job_id) 56 | cabinet.update_cache() 57 | print(f"Scheduled re-block with job ID: {job_id}") 58 | else: 59 | print("Failed to extract job ID.") 60 | 61 | # Store the job ID 62 | cabinet.put("pihole", "scheduled_reblock_job", job_id) 63 | cabinet.update_cache() 64 | 65 | print(f"Fine, unblocking, but you've used this {times_used} times before.") 66 | print("Sleep is important!") 67 | print("\n\nRun 'one-more-hour end' to end the unblock early.") 68 | 69 | 70 | def reblock(): 71 | """ 72 | Immediately re-block and cancel the scheduled re-block. 73 | """ 74 | # Retrieve and cancel the pending at job if it exists 75 | job_id = cabinet.get("pihole", "scheduled_reblock_job") 76 | if job_id: 77 | execute_command(f"atrm {job_id}") 78 | cabinet.remove("pihole", "scheduled_reblock_job") 79 | cabinet.update_cache() 80 | 81 | # Execute the re-block command immediately 82 | execute_command(CMD_REBLOCK) 83 | 84 | print("\nThank you. Get some rest, please.") 85 | 86 | # syntax in crontab to reblock: 87 | # 00 13 * * 1-5 zsh -c "atrm $(atq | awk '{print $1}') 2>/dev/null; \ 88 | # zsh $HOME/git/tools/pihole/downtime.sh allow afternoon" 89 | 90 | if __name__ == '__main__': 91 | parser = argparse.ArgumentParser(description="Manage Pihole unblock scheduling.") 92 | parser.add_argument('action', nargs='?', choices=['end'], help="Action to perform") 93 | args = parser.parse_args() 94 | 95 | if args.action == 'end': 96 | reblock() 97 | else: 98 | schedule_commands() 99 | -------------------------------------------------------------------------------- /renewCert.sh: -------------------------------------------------------------------------------- 1 | echo "1" | sudo certbot certonly --standalone -d tyler.cloud -d www.tyler.cloud 2 | -------------------------------------------------------------------------------- /shorten.py: -------------------------------------------------------------------------------- 1 | """ 2 | Used by tyler.cloud to provide a faster URL shortener 3 | 4 | If this is stored in a web server, one could store this in cabinet -> shorten_ssh: 5 | "ssh -oHostKeyAlgorithms=+ssh-dss -p {portNumber} {username}@{server} " 6 | """ 7 | 8 | import sys 9 | import random 10 | import string 11 | from os import system 12 | from cabinet import Cabinet 13 | 14 | cab = Cabinet() 15 | 16 | def get_url(): 17 | """ 18 | generates a random string of 5 characters 19 | """ 20 | return ''.join(random.choices(string.ascii_lowercase + 21 | string.ascii_uppercase + string.digits, k=5)) 22 | 23 | 24 | if len(sys.argv) < 2: 25 | print("Error- missing url; usage: `shorten url`") 26 | sys.exit(-1) 27 | 28 | if not sys.argv[1].startswith('http'): 29 | print("Error- make sure to provide the complete URL.") 30 | sys.exit(-1) 31 | 32 | DIRECTORY = get_url() 33 | system((f"""{cab.get('shorten_ssh')} "echo '\nRewriteCond""" 34 | f""" %{{REQUEST_URI}} ^/u/{DIRECTORY}.*\nRewriteRule (.*)""" 35 | f""" {sys.argv[1]}' >> www/.htaccess" """)) 36 | print(f"https://tyler.cloud/u/{DIRECTORY}") 37 | -------------------------------------------------------------------------------- /spotify_analytics/.gitignore: -------------------------------------------------------------------------------- 1 | .cache -------------------------------------------------------------------------------- /spotify_analytics/README.md: -------------------------------------------------------------------------------- 1 | # spotify-analytics 2 | Checks for duplicate songs, unplayable songs, and songs missing from playlists. 3 | 4 | ## dependencies 5 | - [Spotify API access](https://stevesie.com/docs/pages/spotify-client-id-secret-developer-api) 6 | - [Cabinet](https://github.com/tylerjwoodfin/cabinet) 7 | - [Spotipy](https://spotipy.readthedocs.io) 8 | 9 | ## setup 10 | 1. `pip3 install -r requirements.md` 11 | 2. Obtain [Spotify API access](https://stevesie.com/docs/pages/spotify-client-id-secret-developer-api). Make note of the client ID and secret. 12 | 3. Install [Cabinet](https://github.com/tylerjwoodfin/cabinet) 13 | 4. Setup `Spotipy` 14 | - Configure `spotipy` in Cabinet using the `Example` below as a template. 15 | - Find your `playlist IDs` by going to Spotify, right-clicking on a playlist, then clicking "Share". 16 | - The ID is the last part of the URL, for instance: https://open.spotify.com/playlist/6hywO4jlkShcGKdTrez9yr 17 | - The first column in `playlists` is the `playlist ID`. The second column is just a label- make it anything you want. Mine are named after my playlists. 18 | 5. Adjust the code as you see fit. Your musical tastes are your own. My code is specific to my own music setup, which is: 19 | - Each new song is added to `Tyler Radio`, `Last 25 Added`, and the appropriate `genre playlist` 20 | - No song should be in multiple `genre playlists` 21 | - No song should exist in `Tyler Radio` but not in a `genre playlist` 22 | - No song should exist in a `genre playlist` but not in `Tyler Radio` 23 | - No song should exist in `Last 25 Added` but not in `Tyler Radio` 24 | - No song should exist in `Removed` and `Tyler Radio` simultaneously. 25 | 26 | ## usage 27 | ```python3 main.py``` 28 | (Note: this will take a minute. Spotipy limits you to 100 songs at a time.) 29 | 30 | ## example 31 | ```bash 32 | { 33 | "spotipy": { 34 | "playlists": [ 35 | "6oqyTmCc2uf3aTDvZRk1T2,Tyler Radio", 36 | "3ZDXHUzUcW6rLqOFpfK7QO,Last 25 Added", 37 | "09jNP5fuQesZoC7xiIj5I4,Chill", 38 | "2OFLLecfoHrlwvJtfCJQoP,Hip Hop and Rap", 39 | "3e691JWNU3anPtZgNfmFss,Party and EDM", 40 | "2Aop8CO3DC7K9qyM1WgloX,Pop", 41 | "4E8EyyhmbBUCAh9tNIYMv0,R&B", 42 | "6hywO4jlkShcGKdTrez9yr,Rock", 43 | "3zr0wmZocFR6nD6teH0dlm,Removed" 44 | ], 45 | "client_secret": "your_secret_here", 46 | "client_id": "your_id_here", 47 | "username": "your_spotify_username" 48 | } 49 | } 50 | ``` 51 | -------------------------------------------------------------------------------- /spotify_analytics/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | spotipy-analytics 3 | A tool to backup and analyze Spotify library data including title, artist, and year information. 4 | Requires spotipy library and appropriate Spotify API credentials. 5 | """ 6 | 7 | import os 8 | import datetime 9 | import json 10 | from dataclasses import dataclass, asdict 11 | from typing import List, Dict, Optional 12 | from statistics import mean 13 | import logging 14 | from pathlib import Path 15 | from collections import Counter 16 | 17 | import spotipy 18 | from spotipy.oauth2 import SpotifyClientCredentials 19 | from cabinet import Cabinet 20 | 21 | @dataclass 22 | class Track: 23 | """Represents a Spotify track with essential metadata.""" 24 | index: int 25 | artist: str 26 | name: str 27 | release_date: str 28 | spotify_url: str 29 | 30 | @classmethod 31 | def from_spotify_track(cls, index: int, track: Dict) -> 'Track': 32 | """Create a Track instance from Spotify API track data.""" 33 | return cls( 34 | index=index, 35 | artist=track['artists'][0]['name'], 36 | name=track['name'], 37 | release_date=str(track['album']['release_date']), 38 | spotify_url=track['external_urls']['spotify'] if not track['is_local'] else '' 39 | ) 40 | 41 | @dataclass 42 | class PlaylistData: 43 | """Represents a Spotify playlist with its tracks.""" 44 | name: str 45 | tracks: List[str] # List of Spotify URLs 46 | 47 | class SpotifyAnalyzer: 48 | """Handles Spotify playlist analysis and backup.""" 49 | 50 | def __init__(self, cabinet: Cabinet): 51 | self.cab = cabinet 52 | self.logger = self._setup_logging() 53 | self.spotify_client = self._initialize_spotify_client() 54 | self.main_tracks: List[Track] = [] 55 | self.playlist_data: List[PlaylistData] = [] 56 | self.song_years: List[int] = [] 57 | 58 | def _setup_logging(self) -> logging.Logger: 59 | """Configure logging for the application.""" 60 | logger = logging.getLogger('spotify_analyzer') 61 | logger.setLevel(logging.INFO) 62 | return logger 63 | 64 | def _initialize_spotify_client(self) -> spotipy.Spotify: 65 | """Initialize and return Spotify client with proper credentials.""" 66 | try: 67 | client_id = self.cab.get("spotipy", "client_id") 68 | client_secret = self.cab.get("spotipy", "client_secret") 69 | if client_id is None: 70 | raise ValueError("Spotify client ID is not set in cabinet") 71 | if client_secret is None: 72 | raise ValueError("Spotify client secret is not set in cabinet") 73 | os.environ['SPOTIPY_CLIENT_ID'] = client_id 74 | os.environ['SPOTIPY_CLIENT_SECRET'] = client_secret 75 | os.environ['SPOTIPY_REDIRECT_URI'] = 'http://localhost:8888' 76 | 77 | credentials_manager = SpotifyClientCredentials() 78 | return spotipy.Spotify(client_credentials_manager=credentials_manager) 79 | 80 | except Exception as e: 81 | self.cab.log(f"Failed to initialize Spotify client: {str(e)}", level="error") 82 | raise 83 | 84 | def _get_playlist(self, playlist_id: str) -> Optional[Dict]: 85 | """Fetch playlist data from Spotify.""" 86 | max_retries = 3 87 | for attempt in range(max_retries): 88 | try: 89 | return self.spotify_client.playlist(playlist_id) 90 | except Exception as e: # pylint: disable=broad-except 91 | self.cab.log(f"Attempt {attempt + 1} failed: {str(e)}", level="warning") 92 | if attempt == max_retries - 1: 93 | self.cab.log(f"Failed to fetch playlist {playlist_id} after 3 attempts", 94 | level="error") 95 | raise 96 | 97 | def _check_duplicates(self, tracks: List[str], playlist_name: str): 98 | """Check for duplicate tracks within a playlist.""" 99 | track_counts = Counter(tracks) 100 | duplicates = {track: count for track, count in track_counts.items() if count > 1} 101 | 102 | if duplicates: 103 | for track, count in duplicates.items(): 104 | self.cab.log( 105 | f"Duplicate found in {playlist_name}: {track} appears {count} times", 106 | level="warning" 107 | ) 108 | 109 | def _process_tracks(self, tracks: Dict, playlist_name: str, 110 | playlist_index: int, total_tracks: int) -> List[str]: 111 | """Process tracks from a playlist and return track URLs.""" 112 | track_urls = [] 113 | 114 | for _, item in enumerate(tracks['items']): 115 | track = item['track'] 116 | if not track: 117 | continue 118 | 119 | if not track['is_local']: 120 | track_urls.append(track['external_urls']['spotify']) 121 | 122 | if playlist_index == 0: # Main playlist 123 | track_obj = Track.from_spotify_track(len(self.main_tracks) + 1, track) 124 | self.main_tracks.append(track_obj) 125 | 126 | if track['album']['release_date']: 127 | try: 128 | year = int(track['album']['release_date'].split("-")[0]) 129 | self.song_years.append(year) 130 | except ValueError: 131 | self.cab.log(f"Invalid release date format for track: {track['name']}", 132 | level="debug", is_quiet=True) 133 | 134 | print(f"Processed {len(self.main_tracks)} of {total_tracks} in {playlist_name}") 135 | 136 | return track_urls 137 | 138 | def analyze_playlists(self): 139 | """Main method to analyze all configured playlists.""" 140 | playlists = self.cab.get("spotipy", "playlists") 141 | if not playlists or len(playlists) < 2: 142 | self.cab.log("Insufficient playlist configuration", level="error") 143 | raise ValueError("At least two playlists must be configured") 144 | 145 | for index, item in enumerate(playlists): 146 | if ',' not in item: 147 | continue 148 | 149 | playlist_id, playlist_name = item.split(',') 150 | self.cab.log(f"Processing playlist: {playlist_name}") 151 | 152 | playlist_data = self._get_playlist(playlist_id) 153 | if not playlist_data: 154 | continue 155 | 156 | tracks = playlist_data['tracks'] 157 | total_tracks = tracks['total'] 158 | 159 | if index == 0: 160 | self.cab.put("spotipy", "total_tracks", total_tracks) 161 | 162 | playlist_tracks = [] 163 | while True: 164 | if not tracks: 165 | self.cab.log("No tracks found in playlist", level="warning") 166 | break 167 | playlist_tracks.extend(self._process_tracks(tracks, 168 | playlist_name, 169 | index, 170 | total_tracks)) 171 | if not tracks['next']: 172 | break 173 | tracks = self.spotify_client.next(tracks) 174 | 175 | # Check for duplicates in the playlist 176 | self._check_duplicates(playlist_tracks, playlist_name) 177 | 178 | self.playlist_data.append(PlaylistData(name=playlist_name, tracks=playlist_tracks)) 179 | 180 | self._save_data() 181 | self._update_statistics() 182 | 183 | def _save_data(self): 184 | """Save processed track data to JSON file.""" 185 | log_backup_path: str = self.cab.get('path', 'cabinet', 'log-backup') or str(Path.home()) 186 | output_path = Path(log_backup_path) / "songs" 187 | output_path.mkdir(parents=True, exist_ok=True) 188 | 189 | output_file = output_path / f"{datetime.date.today()}.json" 190 | track_data = [asdict(track) for track in self.main_tracks] 191 | 192 | with open(output_file, 'w', encoding='utf-8') as f: 193 | json.dump(track_data, f, indent=2, ensure_ascii=False) 194 | 195 | self.cab.log(f"Saved track data to {output_file}") 196 | 197 | def _update_statistics(self): 198 | """Update and log statistics about the analyzed tracks.""" 199 | if self.song_years: 200 | avg_year = mean(self.song_years) 201 | self.cab.put("spotipy", "average_year", avg_year) 202 | 203 | log_path = Path(self.cab.get('path', 'log') or str(Path.home())) 204 | log_entry = f"{datetime.datetime.now().strftime('%Y-%m-%d')},{avg_year}" 205 | 206 | self.cab.log(log_entry, log_name="SPOTIPY_AVERAGE_YEAR_LOG", 207 | log_folder_path=str(log_path)) 208 | 209 | # Get the last 3 days of data 210 | log_backup_path: str = self.cab.get('path', 'cabinet', 'log-backup') or str(Path.home()) 211 | songs_path = Path(log_backup_path) / "songs" 212 | 213 | if songs_path.exists(): 214 | # Get today and previous 2 days 215 | today = datetime.date.today() 216 | dates = [today - datetime.timedelta(days=i) for i in range(3)] 217 | avg_years = [] 218 | total_tracks = [] 219 | 220 | for date in dates: 221 | json_file = songs_path / f"{date}.json" 222 | if json_file.exists(): 223 | try: 224 | with open(json_file, 'r', encoding='utf-8') as f: 225 | data = json.load(f) 226 | if data: 227 | years = [] 228 | for track in data: 229 | release_date = track.get('release_date') 230 | if release_date and release_date != 'None': 231 | try: 232 | year = int(release_date.split('-')[0]) 233 | years.append(year) 234 | except (ValueError, AttributeError): 235 | self.cab.log(f"Invalid release date format for track in {json_file}: {release_date}", 236 | level="debug", is_quiet=True) 237 | if years: 238 | avg_years.append(mean(years)) 239 | total_tracks.append(len(data)) 240 | except (json.JSONDecodeError, KeyError, ValueError) as e: 241 | self.cab.log(f"Error reading {json_file}: {str(e)}", level="error") 242 | continue 243 | 244 | # If we have all 3 days of data and the average years are equal 245 | if len(avg_years) == 3 and len(set(avg_years)) == 1: 246 | # Check if track counts have changed 247 | if len(set(total_tracks)) > 1: 248 | self.cab.log( 249 | f"Average year ({avg_years[0]:.1f}) has remained the same for 3 days " 250 | f"while track count changed from {total_tracks[2]} to {total_tracks[0]}", 251 | level="warning" 252 | ) 253 | 254 | def validate_playlists(self): 255 | """Validate playlist contents according to business rules.""" 256 | self._validate_playlist_inclusion() 257 | self._validate_removed_tracks() 258 | self._validate_genre_assignments() 259 | 260 | def _validate_playlist_inclusion(self): 261 | """Verify that tracks from each genre playlist are in the main playlist.""" 262 | main_playlist = self.playlist_data[0] 263 | for playlist in self.playlist_data[1:8]: # Genre playlists 264 | self._check_playlist_subset(playlist, main_playlist) 265 | 266 | def _validate_removed_tracks(self): 267 | """Verify that removed tracks are not in the main playlist.""" 268 | if len(self.playlist_data) > 8: 269 | self._check_playlist_exclusion(self.playlist_data[8], self.playlist_data[0]) 270 | 271 | def _validate_genre_assignments(self): 272 | """Verify that each track appears in exactly one genre playlist.""" 273 | main_tracks = set(self.playlist_data[0].tracks) 274 | genre_assignments = {} 275 | 276 | for playlist in self.playlist_data[2:8]: # Genre playlists 277 | for track in playlist.tracks: 278 | if track in genre_assignments: 279 | genres = f"{playlist.name} and {genre_assignments[track]}" 280 | self.cab.log(f"Track {track} found in multiple genres: {genres}", 281 | level="warning") 282 | genre_assignments[track] = playlist.name 283 | 284 | for track in main_tracks: 285 | if track not in genre_assignments: 286 | self.cab.log(f"Track {track} missing genre assignment", level="warning") 287 | 288 | def _check_playlist_subset(self, subset: PlaylistData, superset: PlaylistData): 289 | """Verify that all tracks in subset appear in superset.""" 290 | missing = set(subset.tracks) - set(superset.tracks) 291 | if missing: 292 | self.cab.log(f"Tracks from {subset.name} missing from {superset.name}: {missing}", level="warning") 293 | 294 | def _check_playlist_exclusion(self, excluded: PlaylistData, main_playlist: PlaylistData): 295 | """Verify that no tracks from excluded appear in main.""" 296 | present = set(excluded.tracks) & set(main_playlist.tracks) 297 | if present: 298 | self.cab.log(f"Removed tracks still present in {main_playlist.name}: {present}", level="warning") 299 | 300 | def main(): 301 | """Main entry point for the script.""" 302 | cab = Cabinet() 303 | analyzer = SpotifyAnalyzer(cab) 304 | 305 | try: 306 | analyzer.analyze_playlists() 307 | analyzer.validate_playlists() 308 | except Exception as e: 309 | logging.error("Analysis failed: %s", str(e)) 310 | raise 311 | 312 | if __name__ == '__main__': 313 | main() 314 | -------------------------------------------------------------------------------- /spotify_analytics/requirements.md: -------------------------------------------------------------------------------- 1 | cabinet 2 | spotipy -------------------------------------------------------------------------------- /steps/post.php: -------------------------------------------------------------------------------- 1 | format('Y-m-d'); 12 | 13 | // Check if log file exists, if not create it 14 | if(!file_exists($log_file)){ 15 | $file = fopen($log_file, 'w'); 16 | if(!$file) { 17 | echo 'Error: Unable to open log file for writing.'; 18 | exit; 19 | } 20 | fputcsv($file, ['date', 'steps']); 21 | fclose($file); 22 | } 23 | 24 | // Read the file content and check if steps are already logged for today 25 | $file = fopen($log_file, 'r'); 26 | if(!$file) { 27 | echo 'Error: Unable to open log file for reading.'; 28 | exit; 29 | } 30 | $data = []; 31 | $steps_exists_today = false; 32 | while (($line = fgetcsv($file)) !== FALSE) { 33 | if($line[0] == $date_str) { 34 | $line[1] = $steps; // update the steps 35 | $steps_exists_today = true; 36 | } 37 | $data[] = $line; 38 | } 39 | fclose($file); 40 | 41 | // Append a new row if steps are not yet logged for today 42 | if(!$steps_exists_today) { 43 | $data[] = [$date_str, $steps]; 44 | } 45 | 46 | // Rewrite the csv file 47 | $file = fopen($log_file, 'w'); 48 | if(!$file) { 49 | echo 'Error: Unable to open log file for writing.'; 50 | exit; 51 | } 52 | foreach($data as $line) { 53 | fputcsv($file, $line); 54 | } 55 | fclose($file); 56 | 57 | // After updating the $log_file, copy it to $syncthing_file 58 | if(!copy($log_file, $syncthing_file)) { 59 | echo 'Error: Unable to copy log file to Syncthing directory.'; 60 | exit; 61 | } 62 | 63 | echo 'Steps updated'; 64 | } else { 65 | echo 'Invalid request'; 66 | } 67 | ?> 68 | 69 | -------------------------------------------------------------------------------- /utils/get_largest_files.zsh: -------------------------------------------------------------------------------- 1 | #!/bin/zsh 2 | 3 | # Usage: 4 | # find_largest_files.zsh [count] 5 | # 6 | # Description: 7 | # Lists the largest files under the specified directory. 8 | # - Directory to search in (required) 9 | # [count] - Number of results to display (optional, default: 20) 10 | 11 | if [[ -z "$1" ]]; then 12 | echo "Usage: $0 [count]" 13 | exit 1 14 | fi 15 | 16 | DIR="$1" 17 | COUNT="${2:-20}" 18 | 19 | echo "Scanning $DIR for largest files..." 20 | sudo find "$DIR" -type f -printf '%s %p\n' 2>/dev/null | \ 21 | sort -nr | \ 22 | head -n "$COUNT" | \ 23 | awk '{ printf("%10d MB %s\n", $1/1024/1024, $2) }' 24 | 25 | -------------------------------------------------------------------------------- /weather.py: -------------------------------------------------------------------------------- 1 | """ 2 | Uses open-source APIs to check the weather and write to Cabinet. 3 | 4 | Learn more about Cabinet: https://github.com/tylerjwoodfin/cabinet 5 | """ 6 | 7 | import sys 8 | 9 | from datetime import datetime 10 | from typing import Tuple, Optional 11 | from pytz import timezone 12 | import requests 13 | from cabinet import Cabinet 14 | 15 | def get_sunrise_sunset(lat: float, lon: float) -> Tuple[Optional[str], Optional[str]]: 16 | """Fetch sunrise and sunset times in UTC for the given latitude and longitude.""" 17 | url_sunrise_sunset = f"https://api.sunrise-sunset.org/json?lat={lat}&lng={lon}&formatted=0" 18 | response_sunrise_sunset = requests.get(url_sunrise_sunset, timeout=10) 19 | if response_sunrise_sunset.status_code != 200: 20 | return None, None 21 | data = response_sunrise_sunset.json()['results'] 22 | return data['sunrise'], data['sunset'] 23 | 24 | def convert_to_local_time(utc_time_str: str, local_tz_str: str) -> str: 25 | """Convert a UTC time string to a local time string based on the given timezone.""" 26 | utc_time = datetime.fromisoformat(utc_time_str) 27 | local_tz = timezone(local_tz_str) 28 | local_time = utc_time.astimezone(local_tz) 29 | return local_time.strftime('%Y-%m-%d %I:%M %p') 30 | 31 | def update_weather_data(): 32 | """Fetch weather data and update the Cabinet properties with the results.""" 33 | cab = Cabinet() 34 | cab.log("Checking weather") 35 | 36 | # fetch latitude and longitude from the cabinet 37 | lat: float = cab.get("weather", "latitude", return_type=float) or -1 38 | lon: float = cab.get("weather", "longitude", return_type=float) or -1 39 | 40 | if lat == -1 or lon == -1: 41 | cab.log("Could not fetch lat/lon from Cabinet", level="error") 42 | sys.exit() 43 | 44 | # get grid points and local timezone 45 | url_request_points = f"https://api.weather.gov/points/{lat},{lon}" 46 | response_points = requests.get(url_request_points, timeout=10) 47 | if response_points.status_code != 200: 48 | print(f"Error: {response_points.json().get('detail', 'Unknown error')}") 49 | return 50 | 51 | response_points = response_points.json() 52 | grid_id: str = response_points['properties']['gridId'] 53 | grid_x: int = response_points['properties']['gridX'] 54 | grid_y: int = response_points['properties']['gridY'] 55 | local_tz: str = response_points['properties']['timeZone'] 56 | 57 | # fetch weather forecast 58 | url_forecast = f"https://api.weather.gov/gridpoints/{grid_id}/{grid_x},{grid_y}/forecast" 59 | response_forecast = requests.get(url_forecast, timeout=10) 60 | if response_forecast.status_code != 200: 61 | cab.log(f"Could not get weather: {response_forecast.json().get('detail', 'Unknown error')}", 62 | level="info") 63 | return 64 | 65 | response_forecast = response_forecast.json() 66 | 67 | # extract current weather conditions 68 | current_conditions = response_forecast['properties']['periods'][0] 69 | current_temp: int = current_conditions['temperature'] 70 | current_condition: str = current_conditions['shortForecast'] 71 | current_icon: str = current_conditions['icon'] 72 | current_humidity: Optional[int] = current_conditions.get('relativeHumidity', 73 | {}).get('value', None) 74 | 75 | # extract tomorrow's forecast (assume it is the second period in the list) 76 | forecast_tomorrow = response_forecast['properties']['periods'][1] 77 | high_temp: int = forecast_tomorrow['temperature'] 78 | short_forecast: str = forecast_tomorrow['shortForecast'] 79 | 80 | # fetch and convert sunrise and sunset times 81 | sunrise_utc, sunset_utc = get_sunrise_sunset(lat, lon) 82 | if sunrise_utc and sunset_utc: 83 | sunrise_local: str = convert_to_local_time(sunrise_utc, local_tz) 84 | sunset_local: str = convert_to_local_time(sunset_utc, local_tz) 85 | else: 86 | sunrise_local = "Unavailable" 87 | sunset_local = "Unavailable" 88 | cab.log("Unable to get sunrise/sunset data", level="error") 89 | 90 | # update cabinet properties with the results 91 | cab.put("weather", "data", "current_temperature", current_temp) 92 | cab.put("weather", "data", "current_conditions", current_condition) 93 | cab.put("weather", "data", "current_conditions_icon", current_icon) 94 | if current_humidity is not None: 95 | cab.put("weather", "data", "current_humidity", current_humidity) 96 | cab.put("weather", "data", "tomorrow_high", high_temp) 97 | cab.put("weather", "data", "tomorrow_conditions", short_forecast) 98 | cab.put("weather", "data", "tomorrow_sunrise", sunrise_local) 99 | cab.put("weather", "data", "tomorrow_sunset", sunset_local) 100 | 101 | # format output as HTML 102 | formatted_output: str = f""" 103 |
104 | High:    {high_temp}° and {short_forecast}
105 | Sunrise: {sunrise_local}
106 | Sunset:  {sunset_local}
107 |             
108 | """ 109 | 110 | # update cabinet with formatted HTML output 111 | cab.put("weather", "data", "tomorrow_formatted", formatted_output) 112 | 113 | # update the last_checked property with the current datetime 114 | last_checked: str = datetime.now().strftime('%Y-%m-%d %H:%M:%S') 115 | cab.put("weather", "data", "last_checked", last_checked) 116 | cab.log("Checked weather successfully") 117 | 118 | update_weather_data() 119 | -------------------------------------------------------------------------------- /workout/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Sends a workout description based on a workout markdown file; scheduled in crontab 3 | 4 | Format: 5 | 6 | ## DAYW 7 | ### Type 8 | 9 | - instruction 1 10 | - instruction 2 11 | 12 | ## DAYW + 1 13 | ... 14 | """ 15 | 16 | import sys 17 | import time 18 | import datetime 19 | from cabinet import Cabinet, Mail 20 | 21 | cab = Cabinet() 22 | mail = Mail() 23 | 24 | TODAY = datetime.date.today() 25 | DAY_EPOCH = int(int(time.time())/60/60/24) 26 | WORKOUT_FILE = cab.get_file_as_array( 27 | "workout.md", "notes") or "" 28 | WORKOUT_FILE_FORMATTED = '
'.join(WORKOUT_FILE) 29 | WORKOUT_TODAY = list(filter(None, WORKOUT_FILE_FORMATTED.split( 30 | "
## ")[(TODAY.weekday())+2].split("
"))) 31 | 32 | WORKOUT_MSG = '
'.join(WORKOUT_TODAY[2:]) 33 | WORKOUT_TYPE = WORKOUT_TODAY[1].replace("### ", "") 34 | 35 | cab.log("Checking workout") 36 | 37 | if TODAY.weekday() == 5: 38 | # ignore no-obligation Saturdays 39 | cab.log("Saturday - no workout to be sent") 40 | sys.exit(0) 41 | 42 | message = f"Hi Tyler,

Here's your {WORKOUT_TYPE} workout for today:

{WORKOUT_MSG}" 43 | 44 | mail.send(f"{WORKOUT_TYPE} for {TODAY}", message) 45 | -------------------------------------------------------------------------------- /youtube/.gitignore: -------------------------------------------------------------------------------- 1 | *.mp3 2 | *.webm -------------------------------------------------------------------------------- /youtube/README.md: -------------------------------------------------------------------------------- 1 | # YouTube Downloader 2 | 3 | A simple Python script to download YouTube videos and audio using the `yt_dlp` library. 4 | 5 | Install the required packages from `requirements.md`: 6 | ```bash 7 | pip3 install -r requirements.md 8 | ``` 9 | 10 | # Usage 11 | 12 | ## Download a specific video 13 | 14 | To download a specific video or audio, specify the `audio` or `video` type and the YouTube URL: 15 | 16 | ```bash 17 | python3 main.py {audio | video} {url} [-d ] 18 | ``` 19 | 20 | Examples: 21 | ```bash 22 | python3 main.py audio https://www.youtube.com/watch?v=dQw4w9WgXcQ 23 | python3 main.py video https://www.youtube.com/watch?v=dQw4w9WgXcQ -d ~/Downloads 24 | ``` -------------------------------------------------------------------------------- /youtube/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | youtube downloader - see README.md 5 | """ 6 | import sys 7 | import os 8 | import argparse 9 | import time 10 | import yt_dlp 11 | 12 | 13 | def parse_arguments(): 14 | """parse command line arguments""" 15 | parser = argparse.ArgumentParser(description="YouTube Downloader") 16 | parser.add_argument("media_type", choices=["audio", "video"], help="Type of media to download") 17 | parser.add_argument("url", help="YouTube URL to download from") 18 | parser.add_argument("-d", "--destination", default=".", 19 | help="Destination directory for downloaded files") 20 | return parser.parse_args() 21 | 22 | 23 | def download_media(url, is_video, destination): 24 | """download media from youtube""" 25 | # set options for audio download 26 | ydl_opts = { 27 | 'format': 'mp3/bestaudio/best', 28 | 'postprocessors': [{ 29 | 'key': 'FFmpegExtractAudio', 30 | 'preferredcodec': 'mp3', 31 | }], 32 | 'outtmpl': os.path.join(destination, '%(title)s.%(ext)s'), 33 | } 34 | 35 | # use default options for video download 36 | options = ydl_opts if not is_video else \ 37 | {'outtmpl': os.path.join(destination, '%(title)s.%(ext)s')} 38 | 39 | # perform the download 40 | with yt_dlp.YoutubeDL(options) as ydl: 41 | return ydl.download([url]) 42 | 43 | 44 | def main(): 45 | """main function to run the script""" 46 | print("Hint: use `plex` instead to save as a video to Plex library") 47 | 48 | time.sleep(2) 49 | 50 | args = parse_arguments() 51 | 52 | # create destination directory if it doesn't exist 53 | os.makedirs(args.destination, exist_ok=True) 54 | 55 | # download the media 56 | error_code = download_media(args.url, args.media_type == "video", args.destination) 57 | 58 | # check for errors 59 | if error_code: 60 | print(f"An error occurred. Error code: {error_code}") 61 | sys.exit(1) 62 | 63 | print(f"Download completed successfully. Files saved to: {args.destination}") 64 | 65 | if __name__ == "__main__": 66 | main() 67 | -------------------------------------------------------------------------------- /youtube/requirements.md: -------------------------------------------------------------------------------- 1 | yt-dlp --------------------------------------------------------------------------------