├── src └── fixxer │ ├── themes │ ├── __init__.py │ ├── warez.css │ └── pro.css │ ├── __main__.py │ ├── __init__.py │ ├── config.py │ ├── security.py │ ├── phrases.py │ ├── vision.py │ └── engine.py ├── requirements.txt ├── CHANGELOG.md ├── .gitignore ├── pyproject.toml ├── BEGINNERS_GUIDE.md ├── README_TUI.md └── README.md /src/fixxer/themes/__init__.py: -------------------------------------------------------------------------------- 1 | """Theme assets for FIXXER TUI.""" 2 | # Empty file - exists to make themes a package for importlib.resources 3 | -------------------------------------------------------------------------------- /src/fixxer/__main__.py: -------------------------------------------------------------------------------- 1 | """Allow running as: python -m fixxer""" 2 | 3 | from .app import main 4 | 5 | if __name__ == "__main__": 6 | main() 7 | -------------------------------------------------------------------------------- /src/fixxer/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | FIXXER  - Professional-Grade Photography Workflow Automation. 3 | "CHAOS PATCHED // LOGIC INJECTED" 4 | """ 5 | 6 | from .app import FixxerTUI, main 7 | 8 | __version__ = "1.1.0" 9 | __all__ = ["FixxerTUI", "main", "__version__"] 10 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | textual>=0.47.0 2 | rich>=13.7.0 3 | Pillow>=10.0.0 4 | opencv-python-headless>=4.8.0 5 | numpy>=1.24.0 6 | image-quality>=1.2.7 7 | sentence-transformers>=2.2.0 8 | scikit-learn>=1.3.0 9 | rawpy>=0.19.0 10 | ImageHash>=4.3.1 11 | ExifRead>=3.0.0 12 | requests>=2.31.0 13 | psutil>=5.9.0 14 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | All notable changes to FIXXER PRO will be documented in this file. 4 | 5 | ## [v1.1.0] - Modular Architecture Refactor 6 | 7 | ### Added 8 | - **Modular architecture** - Split monolithic engine.py into focused modules: 9 | - `config.py` - Centralized configuration and settings management 10 | - `security.py` - SHA256 hash verification and sidecar file operations 11 | - `vision.py` - AI/Ollama integration and RAW file processing 12 | - `engine.py` - Workflow orchestration (now ~800 lines shorter) 13 | - **Case-insensitive file extension matching** - Finds both `.jpg` and `.JPG` files 14 | - **Improved maintainability** - Clean separation of concerns and dependencies 15 | 16 | ### Fixed 17 | - Removed deprecated `check_dcraw()` function (replaced by rawpy) 18 | - Removed dead code (MockSessionTracker class) 19 | - Fixed config file logging to show `.fixxer.conf` instead of `.photosort.conf` 20 | - Fixed BytesIO import for burst stacking with RAW files 21 | - Fixed Easy Button (Simple Sort) workflow execution 22 | 23 | ### Changed 24 | - Version bumped to v1.1.0 across all components 25 | - Cleaned up version history from source files (moved to CHANGELOG.md) 26 | 27 | --- 28 | 29 | ## [v1.0] - Professional Grade Release 30 | 31 | ### Added 32 | - **SHA256 hash verification** for ALL file move operations 33 | - **JSON sidecar files** for audit trail (`.fixxer.json`) 34 | - **Halt-on-mismatch integrity protection** 35 | - **"CHAOS PATCHED // LOGIC INJECTED"** - Professional-grade reliability 36 | 37 | ### Security 38 | - Cryptographic verification prevents silent data corruption 39 | - Immutable audit trail for every file operation 40 | - Integrity checks halt workflow on any hash mismatch 41 | 42 | --- 43 | 44 | ## [v10.8] - Cross-Platform Migration 45 | 46 | ### Changed 47 | - **REMOVED macOS-only `sips` dependency completely** 48 | - `convert_raw_to_jpeg()` now uses Pillow for PPM→JPEG conversion 49 | - **100% cross-platform** (Linux, macOS, Windows with rawpy) 50 | - **Zero temp files created** (pure in-memory operation via BytesIO) 51 | - **5x smaller output files** (689KB vs 3.7MB from sips) 52 | 53 | ### Improved 54 | - RAW conversion performance 55 | - Memory efficiency with in-memory processing 56 | - Cross-platform compatibility 57 | 58 | --- 59 | 60 | ## [v10.7] - RAW Processing Fixes 61 | 62 | ### Fixed 63 | - `convert_raw_to_jpeg()` tries embedded thumbnail first (`-e` flag) 64 | - Falls back to full demosaic only if embedded thumbnail fails 65 | - Added timeouts to prevent hanging on problematic RAW files 66 | 67 | ### Improved 68 | - RAW file processing speed (embedded thumbnails are instant) 69 | - Reliability with corrupted or unusual RAW formats 70 | 71 | --- 72 | 73 | ## [v10.0] - HUD Support & Stats Tracking 74 | 75 | ### Added 76 | - `StatsTracker` class for real-time workflow progress 77 | - Thread-safe callback communication between engine and TUI 78 | - Live statistics updates (bursts, tiers, heroes, archived counts) 79 | 80 | --- 81 | 82 | ## [v7.1] - AI Vision & Phrase Library 83 | 84 | ### Added 85 | - Phrase library with 200+ motivational/humorous progress messages 86 | - Warez/demoscene aesthetic messaging 87 | - Anti-repetition logic for phrase rotation 88 | 89 | --- 90 | 91 | ## Earlier Versions 92 | 93 | Pre-v7.1 history available in git commit logs. 94 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # PhotoSort .gitignore 2 | # Keep your repo clean and focused on code 3 | 4 | # ============================================ 5 | # Python 6 | # ============================================ 7 | __pycache__/ 8 | *.py[cod] 9 | *$py.class 10 | *.so 11 | .Python 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # Virtual environments 29 | venv/ 30 | env/ 31 | ENV/ 32 | .venv 33 | 34 | # ============================================ 35 | # Image Files (CRITICAL - Don't commit photos!) 36 | # ============================================ 37 | *.jpg 38 | *.jpeg 39 | *.JPG 40 | *.JPEG 41 | *.png 42 | *.PNG 43 | *.gif 44 | *.GIF 45 | *.bmp 46 | *.BMP 47 | *.tiff 48 | *.tif 49 | *.TIFF 50 | *.TIF 51 | *.webp 52 | *.WEBP 53 | 54 | # RAW formats 55 | *.raw 56 | *.RAW 57 | *.rw2 58 | *.RW2 59 | *.cr2 60 | *.CR2 61 | *.cr3 62 | *.CR3 63 | *.nef 64 | *.NEF 65 | *.arw 66 | *.ARW 67 | *.dng 68 | *.DNG 69 | *.orf 70 | *.ORF 71 | *.raf 72 | *.RAF 73 | *.pef 74 | *.PEF 75 | *.srw 76 | *.SRW 77 | 78 | # Video files 79 | *.mp4 80 | *.MP4 81 | *.mov 82 | *.MOV 83 | *.avi 84 | *.AVI 85 | *.mkv 86 | *.MKV 87 | 88 | # ============================================ 89 | # PhotoSort Specific 90 | # ============================================ 91 | # Test directories 92 | Testing/ 93 | test_images/ 94 | sample_photos/ 95 | staging/ 96 | 97 | # Output directories (generated during workflow) 98 | _Keepers/ 99 | _Review_Maybe/ 100 | _Review_Duds/ 101 | _Bursts/ 102 | old-desktop-archive/ 103 | 104 | # Session files 105 | *_exif_summary_*.json 106 | *_cull_log_*.json 107 | *.sidecar.json 108 | 109 | # Logs 110 | *.log 111 | photosort.log 112 | 113 | # ============================================ 114 | # AI/ML Model Caches 115 | # ============================================ 116 | .cache/ 117 | *.pt 118 | *.pth 119 | *.onnx 120 | sentence_transformers/ 121 | models/ 122 | 123 | # Ollama (if you ever store local models) 124 | ollama_models/ 125 | 126 | # ============================================ 127 | # MacOS 128 | # ============================================ 129 | .DS_Store 130 | .AppleDouble 131 | .LSOverride 132 | ._* 133 | .DocumentRevisions-V100 134 | .fseventsd 135 | .Spotlight-V100 136 | .TemporaryItems 137 | .Trashes 138 | .VolumeIcon.icns 139 | .com.apple.timemachine.donotpresent 140 | 141 | # ============================================ 142 | # IDEs and Editors 143 | # ============================================ 144 | .vscode/ 145 | .idea/ 146 | *.swp 147 | *.swo 148 | *~ 149 | .project 150 | .pydevproject 151 | .settings/ 152 | 153 | # ============================================ 154 | # OS 155 | # ============================================ 156 | Thumbs.db 157 | ehthumbs.db 158 | Desktop.ini 159 | $RECYCLE.BIN/ 160 | 161 | # ============================================ 162 | # Backups 163 | # ============================================ 164 | *.bak 165 | *.backup 166 | *_backup.py 167 | *_old.py 168 | photosort_v7*.py 169 | photosort_v8*.py 170 | 171 | # ============================================ 172 | # Documentation (if you want to keep docs private) 173 | # ============================================ 174 | # Uncomment if you don't want to share internal docs 175 | # private_notes.md 176 | # TODO_internal.md 177 | 178 | # ============================================ 179 | # Configuration (IMPORTANT!) 180 | # ============================================ 181 | # Only ignore your personal config, not the template 182 | .photosort.conf 183 | config_personal.ini 184 | 185 | # Keep the template in the repo 186 | !photosort.conf.template 187 | 188 | # ============================================ 189 | # Keep These! 190 | # ============================================ 191 | # Use ! to explicitly track files that might be ignored 192 | !README.md 193 | !LICENSE 194 | !requirements.txt 195 | !setup.py 196 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "fixxer" 7 | version = "1.1.0" 8 | description = "Professional-grade photography workflow automation with AI vision and cryptographic integrity verification" 9 | readme = "README.md" 10 | requires-python = ">=3.10" 11 | license = {text = "MIT"} 12 | keywords = ["photography", "workflow", "automation", "ai", "vision", "raw", "photo-management"] 13 | authors = [ 14 | {name = "BandwagonVibes"} 15 | ] 16 | classifiers = [ 17 | "Development Status :: 5 - Production/Stable", 18 | "Intended Audience :: End Users/Desktop", 19 | "Topic :: Multimedia :: Graphics", 20 | "Topic :: Multimedia :: Graphics :: Graphics Conversion", 21 | "License :: OSI Approved :: MIT License", 22 | "Programming Language :: Python :: 3", 23 | "Programming Language :: Python :: 3.10", 24 | "Programming Language :: Python :: 3.11", 25 | "Programming Language :: Python :: 3.12", 26 | "Operating System :: OS Independent", 27 | "Environment :: Console", 28 | ] 29 | 30 | dependencies = [ 31 | # Core TUI Framework 32 | "textual>=0.47.0", 33 | "rich>=13.7.0", 34 | 35 | # Image Processing (Core) 36 | "Pillow>=10.0.0", 37 | "opencv-python-headless>=4.8.0", 38 | "numpy>=1.24.0", 39 | 40 | # RAW File Support 41 | "rawpy>=0.19.0", 42 | 43 | # Perceptual Hashing (Burst Detection) 44 | "ImageHash>=4.3.1", 45 | 46 | # EXIF Data 47 | "ExifRead>=3.0.0", 48 | 49 | # HTTP Requests (Ollama API) 50 | "requests>=2.31.0", 51 | 52 | # System Monitoring 53 | "psutil>=5.9.0", 54 | 55 | # MOVED FROM OPTIONAL TO MAIN: 56 | # Advanced Quality Assessment (BRISQUE) 57 | "image-quality>=1.2.7", 58 | # Advanced Burst Detection (CLIP) 59 | "sentence-transformers>=2.2.0", 60 | "scikit-learn>=1.3.0", 61 | ] 62 | 63 | [project.optional-dependencies] 64 | 65 | # Development tools 66 | dev = [ 67 | "pytest>=7.4.0", 68 | "black>=23.0.0", 69 | "flake8>=6.0.0", 70 | "mypy>=1.5.0", 71 | ] 72 | 73 | [project.urls] 74 | Homepage = "https://github.com/BandwagonVibes/fixxer" 75 | Documentation = "https://github.com/BandwagonVibes/fixxer#readme" 76 | Repository = "https://github.com/BandwagonVibes/fixxer.git" 77 | "Bug Reports" = "https://github.com/BandwagonVibes/fixxer/issues" 78 | Changelog = "https://github.com/BandwagonVibes/fixxer/releases" 79 | 80 | [project.scripts] 81 | # This creates the magic 'fixxer' command in the user's bin folder 82 | fixxer = "fixxer.app:main" 83 | 84 | [tool.setuptools] 85 | package-dir = {"" = "src"} 86 | packages = ["fixxer", "fixxer.themes"] 87 | 88 | [tool.setuptools.package-data] 89 | fixxer = ["themes/*.css"] 90 | 91 | # Black code formatter configuration 92 | [tool.black] 93 | line-length = 100 94 | target-version = ['py310', 'py311', 'py312'] 95 | include = '\.pyi?$' 96 | extend-exclude = ''' 97 | /( 98 | # directories 99 | \.eggs 100 | | \.git 101 | | \.hg 102 | | \.mypy_cache 103 | | \.tox 104 | | \.venv 105 | | build 106 | | dist 107 | )/ 108 | ''' 109 | 110 | # MyPy type checker configuration 111 | [tool.mypy] 112 | python_version = "3.10" 113 | warn_return_any = true 114 | warn_unused_configs = true 115 | disallow_untyped_defs = false 116 | ignore_missing_imports = true 117 | 118 | # Pytest configuration 119 | [tool.pytest.ini_options] 120 | minversion = "7.0" 121 | addopts = "-ra -q" 122 | testpaths = [ 123 | "tests", 124 | ] 125 | 126 | # Coverage configuration 127 | [tool.coverage.run] 128 | source = ["src/fixxer"] 129 | omit = [ 130 | "*/tests/*", 131 | "*/venv/*", 132 | ] 133 | 134 | [tool.coverage.report] 135 | exclude_lines = [ 136 | "pragma: no cover", 137 | "def __repr__", 138 | "raise AssertionError", 139 | "raise NotImplementedError", 140 | "if __name__ == .__main__.:", 141 | "if TYPE_CHECKING:", 142 | ] 143 | 144 | # Ruff linter configuration (modern alternative to flake8) 145 | [tool.ruff] 146 | line-length = 100 147 | target-version = "py310" 148 | select = [ 149 | "E", # pycodestyle errors 150 | "W", # pycodestyle warnings 151 | "F", # pyflakes 152 | "I", # isort 153 | "B", # flake8-bugbear 154 | "C4", # flake8-comprehensions 155 | "UP", # pyupgrade 156 | ] 157 | ignore = [ 158 | "E501", # line too long (handled by black) 159 | "B008", # do not perform function calls in argument defaults 160 | "C901", # too complex 161 | ] 162 | 163 | [tool.ruff.per-file-ignores] 164 | "__init__.py" = ["F401"] 165 | 166 | # Future: Homebrew formula metadata hints 167 | # When creating Homebrew formula, use: 168 | # - homepage: project.urls.Homepage 169 | # - url: GitHub release tarball 170 | # - dependencies: python@3.10, ollama 171 | # - install: pip3 install (extracted formula) 172 | -------------------------------------------------------------------------------- /BEGINNERS_GUIDE.md: -------------------------------------------------------------------------------- 1 | # 🔰 The "Zero-to-Hero" Guide 2 | 3 | **Never opened a terminal in your life? You're in the right place.** 4 | 5 | If you're used to clicking icons and dragging folders, the terminal (that black screen with text) can look intimidating. But it's actually just a conversation. You type a request, and the computer does exactly what you ask. 6 | 7 | This guide will teach you the basics and get FIXXER installed in under 5 minutes. 8 | 9 | --- 10 | 11 | ## 1. Open The Terminal 12 | 13 | * **🍎 Mac:** Press `Command + Space`, type **Terminal**, and hit Enter. 14 | * **⊞ Windows:** Press the Windows Key, type **PowerShell**, and hit Enter. 15 | * **🐧 Linux:** You probably already know this, but usually `Ctrl + Alt + T`. 16 | 17 | --- 18 | 19 | ## 2. Anatomy of the Screen 20 | 21 | When it opens, you'll see a line of text followed by a blinking cursor. It usually looks like `name@computer ~ %`. 22 | 23 | Here's a breakdown of what you're looking at: 24 | 25 | ```text 26 | jane@macbook: ~ $ █ 27 | └─1─┘ └─2──┘ 3 4 5 28 | ``` 29 | 30 | 1. **Who you are:** Your username (e.g., jane). 31 | 2. **Where you are:** The computer's name. 32 | 3. **The Folder:** The tilde (`~`) is shorthand for your Home folder. 33 | 4. **The Prompt:** The `$` or `%` symbol means "I'm listening." 34 | 5. **The Cursor:** This is where you type. 35 | 36 | --- 37 | 38 | ## 3. How to Speak "Computer" 39 | 40 | Terminal commands are just sentences. They usually follow a simple **Verb → Adverb → Noun** structure. 41 | 42 | Let's look at the command you'll use to install dependencies: 43 | 44 | ```text 45 | pip install -r requirements.txt 46 | └1┘ └──2──┘ └3┘ └──────4───────┘ 47 | ``` 48 | 49 | 1. **The Tool (The Chef):** `pip` is the name of the program we want to use. 50 | 2. **The Action (The Verb):** `install` tells the program what to do. 51 | 3. **The Option (The Adverb):** `-r` stands for "recursive" or "read from." It modifies the action. 52 | 4. **The Target (The Noun):** `requirements.txt` is the file we want to act on. 53 | 54 | **Rule of Thumb:** 55 | 56 | - Spaces matter. Think of them as the spaces between words in a sentence. 57 | - Capitalization matters. `Desktop` is different from `desktop`. 58 | 59 | --- 60 | 61 | ## 4. Let's Install FIXXER 62 | 63 | Now that you know the language, let's execute the installation. You'll type (or copy-paste) these lines one by one, pressing **Enter** after each line. 64 | 65 | ### Option A: Easy Install with Homebrew (Mac Only - Recommended) 66 | 67 | If you're on a Mac, this is the simplest way. Homebrew is like an "app store" for terminal programs. 68 | 69 | **First, install Homebrew if you don't have it:** 70 | ```bash 71 | /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" 72 | ``` 73 | 74 | **Then install FIXXER:** 75 | ```bash 76 | # Add the FIXXER tap (this tells Homebrew where to find FIXXER) 77 | brew tap bandwagonvibes/fixxer 78 | 79 | # Install FIXXER (this downloads and installs everything automatically) 80 | brew install fixxer 81 | ``` 82 | 83 | That's it! Skip to Step 5 to launch it. 84 | 85 | --- 86 | 87 | ### Option B: Install From Source (All Platforms) 88 | 89 | If you're on Windows/Linux or prefer to install from source: 90 | 91 | **Step 1: Download the Code** 92 | 93 | This uses `git` to download the FIXXER folder from the internet to your computer. 94 | 95 | ```bash 96 | git clone https://github.com/BandwagonVibes/fixxer.git 97 | ``` 98 | 99 | **Step 2: Enter the Folder** 100 | 101 | This command `cd` (Change Directory) moves you inside the folder you just downloaded. 102 | 103 | ```bash 104 | cd fixxer 105 | ``` 106 | 107 | **Step 3: Create a Safe Space (Virtual Environment)** 108 | 109 | This creates a "virtual environment." Think of this like a **sandbox**. Anything we install here stays inside this folder and won't mess up your other computer settings. 110 | 111 | ```bash 112 | python3 -m venv venv 113 | ``` 114 | 115 | Now **activate** the sandbox: 116 | 117 | **On Mac/Linux:** 118 | ```bash 119 | source venv/bin/activate 120 | ``` 121 | 122 | **On Windows (PowerShell):** 123 | ```powershell 124 | venv\Scripts\activate 125 | ``` 126 | 127 | You'll know it worked when your terminal line starts with `(venv)`. 128 | 129 | **Step 4: The Final Install** 130 | 131 | This installs FIXXER and all the fancy AI brains (CLIP, BRISQUE) into your sandbox. 132 | 133 | ```bash 134 | pip install -e . 135 | ``` 136 | 137 | **Note:** On first launch, FIXXER will auto-download the CLIP vision model (~300MB). This happens only once. 138 | 139 | --- 140 | 141 | ## 5. Launch It! 142 | 143 | If the previous steps finished without red text, you're done. Congratulations, you're now a terminal user! 🎉 144 | 145 | To launch the app, just type: 146 | 147 | ```bash 148 | fixxer 149 | ``` 150 | 151 | (You can use your mouse to click buttons inside the app, or use your keyboard!) 152 | 153 | --- 154 | 155 | ## 🆘 Troubleshooting 156 | 157 | ### "Command not found: brew" (Mac users doing Homebrew install) 158 | - Install Homebrew first: `/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"` 159 | - Visit [brew.sh](https://brew.sh) for more info 160 | 161 | ### "Command not found: git" (Source install only) 162 | - **Mac:** Install with `xcode-select --install` 163 | - **Windows:** Download from [git-scm.com](https://git-scm.com) 164 | - **Linux:** `sudo apt install git` 165 | 166 | ### "Command not found: python3" (Source install only) 167 | - **Mac:** Install with `brew install python@3.11` (install Homebrew first: [brew.sh](https://brew.sh)) 168 | - **Windows:** Download from [python.org](https://python.org) 169 | - **Linux:** `sudo apt install python3 python3-venv` 170 | 171 | ### Red Text During Install 172 | - Read the error message carefully 173 | - Common fix: Close terminal, reopen, try again 174 | - Google the exact error message (seriously, this is what pros do!) 175 | 176 | --- 177 | 178 | ## 💡 Pro Tips 179 | 180 | ### Copying Text from Terminal 181 | - **Mac:** `Command + C` (just like normal) 182 | - **Windows PowerShell:** Highlight text, right-click 183 | - **Linux:** `Ctrl + Shift + C` 184 | 185 | ### Pasting Text into Terminal 186 | - **Mac:** `Command + V` 187 | - **Windows PowerShell:** Right-click 188 | - **Linux:** `Ctrl + Shift + V` 189 | 190 | ### Clearing the Screen 191 | Type `clear` and hit Enter. Everything disappears, but nothing is deleted. 192 | 193 | ### Stopping a Running Program 194 | Press `Ctrl + C`. This tells the program "Stop what you're doing." 195 | 196 | --- 197 | 198 | **Built with precision. Secured with cryptography. Powered by AI.** 199 | 200 | ✞ **FIXXER PRO** - "CHAOS PATCHED // LOGIC INJECTED" 201 | -------------------------------------------------------------------------------- /README_TUI.md: -------------------------------------------------------------------------------- 1 | # FIXXER ✞ TUI (v1.1) - Refactored & Optimized 2 | 3 | ## Terminal User Interface with Hash Verification 4 | 5 | FIXXER now includes a full-featured TUI (Terminal User Interface) with SHA256 integrity verification for every file operation. 6 | 7 | **"CHAOS PATCHED // LOGIC INJECTED"** 8 | 9 | ### Quick Start 10 | 11 | ```bash 12 | # After installing with pip install -e . 13 | fixxer 14 | 15 | # Or run as a module 16 | python -m fixxer 17 | ``` 18 | 19 | ### Features 20 | 21 | - **SHA256 hash verification** - Every file move cryptographically verified 22 | - **JSON audit trail** - .fixxer.json sidecar files for integrity tracking 23 | - **Halt-on-corruption** - Workflow stops immediately if hash mismatch detected 24 | - **Visual file browser** - Navigate directories with mouse or keyboard 25 | - **Real-time log panel** - Watch hash calculations and operations in real-time 26 | - **Animated progress indicator** - Block spinner with rotating motivational phrases 27 | - **Engine status checks** - BRISQUE and CLIP detection at startup 28 | - **Persistent config** - Settings auto-save to `~/.fixxer.conf` 29 | - **Dual UI modes** - Toggle between Warez and Pro (Phantom Redline) with F12 30 | - **Milestone HUD** - Real-time stats dashboard in Pro Mode (BURSTS, TIER A/B/C, TIME) 31 | - **Tooltip hints** - Hover over buttons to see keyboard shortcuts 32 | 33 | ### Files 34 | 35 | - `src/fixxer/app.py` - Main TUI application (v1.1) 36 | - `src/fixxer/config.py` - Configuration management (NEW in v1.1) 37 | - `src/fixxer/security.py` - Hash verification & sidecar files (NEW in v1.1) 38 | - `src/fixxer/vision.py` - AI/Ollama & RAW processing (NEW in v1.1) 39 | - `src/fixxer/engine.py` - Workflow orchestration (v1.1, refactored) 40 | - `src/fixxer/themes/warez.css` - Standard Mode theme 41 | - `src/fixxer/themes/pro.css` - Pro Mode theme 42 | - `src/fixxer/phrases.py` - Rotating progress messages 43 | 44 | ### Dependencies 45 | 46 | All dependencies are now included by default: 47 | 48 | ```bash 49 | # One command installs everything (CLIP, BRISQUE, TUI, Engine) 50 | pip install -e . 51 | ``` 52 | 53 | This installs the complete professional suite with no optional add-ons needed. 54 | 55 | ### System Requirements 56 | 57 | - Python 3.8+ 58 | - Terminal with mouse support (iTerm2, Ghostty, or similar) 59 | - Ollama running locally (for AI features) 60 | - rawpy (installed via pip) 61 | - exifread (installed via pip) 62 | 63 | ### Interface Overview 64 | 65 | ``` 66 | ┌─────────────────────────────────────────────────────────────┐ 67 | │ VISIONCREW LOGO │ 68 | ├─────────────────────────────────────────────────────────────┤ 69 | │ Source Browser │ Status & Logs │ 70 | │ [File Tree] │ [Status Bar] │ 71 | │ │ [Spinner: ■ □ □] │ 72 | │ │ [Log Output] │ 73 | ├─────────────────────────────────────────────────────────────┤ 74 | │ [Auto] [Bursts] [Cull] [Stats] [Critique] [Source] [Dest] │ 75 | └─────────────────────────────────────────────────────────────┘ 76 | ``` 77 | 78 | ### Button Functions 79 | 80 | - **Auto** - Full AI-powered workflow (burst → cull → rename → organize) 81 | - **Bursts** - Group similar images into burst folders 82 | - **Cull** - Sort images by quality (Tier A/B/C) 83 | - **Stats** - Show EXIF insights and session statistics 84 | - **Critique** - AI critique of individual images 85 | - **Set Source (1)** - Set source directory from browser selection 86 | - **Dest (2)** - Open destination directory selector 87 | - **Model** - Choose Ollama model 88 | 89 | ### Keyboard Shortcuts 90 | 91 | **Navigation & Setup:** 92 | - `1` - Set Source directory (from browser selection) 93 | - `2` - Set Destination directory (opens selector) 94 | - `M` - Select Ollama Model 95 | - `F12` - Toggle Pro Mode (Warez ↔ Phantom Redline aesthetic) 96 | 97 | **Workflows:** 98 | - `A` - Auto Workflow (complete pipeline) 99 | - `B` - Bursts (group similar shots) 100 | - `C` - Cull (quality analysis into Tier A/B/C) 101 | - `S` - Stats (EXIF insights and session analytics) 102 | - `K` - Critique (AI creative feedback on selected image) 103 | 104 | **System:** 105 | - `Q` - Quit application 106 | - `R` - Refresh config (reload ~/.fixxer.conf) 107 | - `Esc` - Stop current workflow 108 | - `Ctrl+C` - Force quit 109 | 110 | **Mouse:** 111 | - Click buttons or use keyboard shortcuts 112 | - Navigate file browser with arrow keys or mouse 113 | - Hover over buttons to see tooltips with keyboard hints 114 | 115 | ### Engine Detection 116 | 117 | At startup, the TUI checks for: 118 | - ✓ rawpy - RAW file conversion (Python-based, cross-platform) 119 | - ✓ BRISQUE engine - Image quality assessment 120 | - ✓ CLIP engine - Semantic burst detection 121 | 122 | Missing engines will show fallback warnings but won't prevent operation. 123 | 124 | ### Config File 125 | 126 | Settings are saved to `~/.fixxer.conf`: 127 | 128 | ```ini 129 | [behavior] 130 | last_source_path = /path/to/source 131 | last_destination_path = /path/to/destination 132 | 133 | [ingest] 134 | default_model = qwen2.5vl:3b 135 | ``` 136 | 137 | The TUI automatically saves changes when you modify source, destination, or model settings. 138 | 139 | --- 140 | 141 | ## Version History 142 | 143 | ### FIXXER v1.1.0 (2025-01-21) 144 | **"Modular Architecture Refactor"** 145 | - Split monolithic engine.py into focused modules (config, security, vision, engine) 146 | - Case-insensitive file extension matching (finds both .jpg and .JPG) 147 | - Removed deprecated dcraw, replaced with rawpy for RAW conversion 148 | - Fixed Easy Button (Simple Sort) workflow 149 | - Cleaned up dead code (MockSessionTracker, check_dcraw) 150 | - Version bumped across all components 151 | 152 | ### FIXXER v1.0 (2025-01-20) 153 | **"CHAOS PATCHED // LOGIC INJECTED"** 154 | - SHA256 hash verification for all file operations 155 | - JSON sidecar audit trail (.fixxer.json files) 156 | - Halt-on-mismatch corruption detection 157 | - Complete rebrand to FIXXER ✞ 158 | - Real-time hash verification logging in TUI 159 | - Professional-grade integrity protection 160 | 161 | ### TUI v12.1 (2025-11-16) 162 | - Fixed mouse event leakage (imports at module load time) 163 | - Added BRISQUE and CLIP engine detection at startup 164 | - Added animated block spinner during workflows 165 | - Config auto-saves when changing settings 166 | - Left-aligned logo, red scrollbars 167 | 168 | ### Engine v10.7 (2025-11-16) 169 | - Fixed session naming (uses destination paths after move) 170 | - Pre-tests image encoding before selecting samples 171 | - Added `save_app_config()` for persistent settings 172 | - RAW conversion tries embedded thumbnail first 173 | -------------------------------------------------------------------------------- /src/fixxer/config.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | FIXXER Configuration 5 | Application settings, defaults, and configuration file management. 6 | """ 7 | 8 | from __future__ import annotations 9 | 10 | import configparser 11 | from pathlib import Path 12 | from datetime import datetime 13 | from typing import Dict, Any 14 | 15 | # ============================================================================== 16 | # CONFIGURATION CONSTANTS 17 | # ============================================================================== 18 | 19 | # --- Ollama / AI Settings --- 20 | OLLAMA_URL = "http://localhost:11434/api/chat" 21 | DEFAULT_MODEL_NAME = "qwen2.5vl:3b" 22 | DEFAULT_CRITIQUE_MODEL = "qwen2.5vl:3b" 23 | 24 | # --- Path Settings --- 25 | DEFAULT_DESTINATION_BASE = Path.home() / "Pictures" / "FIXXER_Output" 26 | CONFIG_FILE_PATH = Path.home() / ".fixxer.conf" 27 | 28 | # --- File Processing Settings --- 29 | SUPPORTED_EXTENSIONS = {'.jpg', '.jpeg', '.png'} 30 | RAW_SUPPORT = False # Updated dynamically by check_rawpy() 31 | 32 | # --- Workflow Settings --- 33 | MAX_WORKERS = 5 34 | INGEST_TIMEOUT = 120 35 | CRITIQUE_TIMEOUT = 120 36 | 37 | # --- Algorithm Defaults --- 38 | DEFAULT_CULL_ALGORITHM = 'legacy' 39 | DEFAULT_BURST_ALGORITHM = 'legacy' 40 | 41 | DEFAULT_CULL_THRESHOLDS = { 42 | 'sharpness_good': 40.0, 43 | 'sharpness_dud': 15.0, 44 | 'exposure_dud_pct': 0.20, 45 | 'exposure_good_pct': 0.05 46 | } 47 | DEFAULT_BURST_THRESHOLD = 8 48 | 49 | # --- Session Metadata --- 50 | SESSION_DATE = datetime.now().strftime("%Y-%m-%d") 51 | SESSION_TIMESTAMP = datetime.now().strftime("%Y-%m-%d_%H%M") 52 | 53 | # --- Folder Names --- 54 | BEST_PICK_PREFIX = "_PICK_" 55 | PREP_FOLDER_NAME = "_ReadyForLightroom" 56 | TIER_A_FOLDER = "_Tier_A" 57 | TIER_B_FOLDER = "_Tier_B" 58 | TIER_C_FOLDER = "_Tier_C" 59 | 60 | # --- AI Classification Keywords --- 61 | GROUP_KEYWORDS = { 62 | "Architecture": ["building", "architecture"], 63 | "Street-Scenes": ["street", "road", "city"], 64 | "People": ["people", "person", "man", "woman"], 65 | "Nature": ["tree", "forest", "mountain", "lake"], 66 | "Transportation": ["car", "bus", "train", "vehicle"], 67 | "Signs-Text": ["sign", "text", "billboard"], 68 | "Food-Dining": ["food", "restaurant", "cafe"], 69 | "Animals": ["dog", "cat", "bird", "animal"], 70 | "Interior": ["interior", "room", "inside"], 71 | } 72 | 73 | 74 | # ============================================================================== 75 | # CONFIGURATION FILE MANAGEMENT 76 | # ============================================================================== 77 | 78 | def load_app_config() -> Dict[str, Any]: 79 | """ 80 | Load settings from ~/.fixxer.conf with fallback defaults. 81 | 82 | Returns: 83 | Dictionary containing all application settings 84 | """ 85 | parser = configparser.ConfigParser() 86 | config_loaded = False 87 | 88 | if CONFIG_FILE_PATH.exists(): 89 | try: 90 | parser.read(CONFIG_FILE_PATH) 91 | config_loaded = True 92 | except configparser.Error: 93 | pass # Will use fallbacks 94 | 95 | config = {} 96 | 97 | config['config_file_found'] = config_loaded 98 | config['config_file_path'] = str(CONFIG_FILE_PATH) 99 | 100 | config['default_destination'] = Path(parser.get( 101 | 'ingest', 'default_destination', 102 | fallback=str(DEFAULT_DESTINATION_BASE) 103 | )).expanduser() 104 | 105 | config['default_model'] = parser.get( 106 | 'ingest', 'default_model', 107 | fallback=DEFAULT_MODEL_NAME 108 | ) 109 | 110 | config['cull_thresholds'] = { 111 | 'sharpness_good': parser.getfloat('cull', 'sharpness_good', fallback=DEFAULT_CULL_THRESHOLDS['sharpness_good']), 112 | 'sharpness_dud': parser.getfloat('cull', 'sharpness_dud', fallback=DEFAULT_CULL_THRESHOLDS['sharpness_dud']), 113 | 'exposure_dud_pct': parser.getfloat('cull', 'exposure_dud_pct', fallback=DEFAULT_CULL_THRESHOLDS['exposure_dud_pct']), 114 | 'exposure_good_pct': parser.getfloat('cull', 'exposure_good_pct', fallback=DEFAULT_CULL_THRESHOLDS['exposure_good_pct']), 115 | } 116 | 117 | config['cull_algorithm'] = parser.get( 118 | 'cull', 'cull_algorithm', 119 | fallback=DEFAULT_CULL_ALGORITHM 120 | ) 121 | 122 | config['burst_threshold'] = parser.getint( 123 | 'burst', 'similarity_threshold', 124 | fallback=DEFAULT_BURST_THRESHOLD 125 | ) 126 | 127 | config['burst_algorithm'] = parser.get( 128 | 'burst', 'burst_algorithm', 129 | fallback=DEFAULT_BURST_ALGORITHM 130 | ) 131 | 132 | config['burst_auto_name'] = parser.getboolean( 133 | 'burst', 'burst_auto_name', 134 | fallback=False 135 | ) 136 | 137 | config['critique_model'] = parser.get( 138 | 'critique', 'default_model', 139 | fallback=DEFAULT_CRITIQUE_MODEL 140 | ) 141 | 142 | config['last_source_path'] = parser.get( 143 | 'behavior', 'last_source_path', fallback=None 144 | ) 145 | 146 | config['last_destination_path'] = parser.get( 147 | 'behavior', 'last_destination_path', fallback=None 148 | ) 149 | 150 | config['burst_parent_folder'] = parser.getboolean( 151 | 'folders', 'burst_parent_folder', fallback=True 152 | ) 153 | 154 | config['pro_mode'] = parser.getboolean( 155 | 'behavior', 'pro_mode', fallback=False 156 | ) 157 | 158 | return config 159 | 160 | 161 | def save_app_config(config: Dict[str, Any]) -> bool: 162 | """ 163 | Save settings back to ~/.fixxer.conf. 164 | 165 | Only saves paths and model settings that are commonly changed during TUI use. 166 | 167 | Args: 168 | config: Dictionary with settings to save 169 | 170 | Returns: 171 | True on success, False on error 172 | """ 173 | parser = configparser.ConfigParser() 174 | 175 | # Load existing config first to preserve other settings 176 | if CONFIG_FILE_PATH.exists(): 177 | try: 178 | parser.read(CONFIG_FILE_PATH) 179 | except configparser.Error: 180 | pass 181 | 182 | # Ensure sections exist 183 | if not parser.has_section('behavior'): 184 | parser.add_section('behavior') 185 | if not parser.has_section('ingest'): 186 | parser.add_section('ingest') 187 | 188 | # Save the key settings 189 | if 'last_source_path' in config and config['last_source_path']: 190 | parser.set('behavior', 'last_source_path', str(config['last_source_path'])) 191 | 192 | if 'last_destination_path' in config and config['last_destination_path']: 193 | parser.set('behavior', 'last_destination_path', str(config['last_destination_path'])) 194 | 195 | if 'default_model' in config and config['default_model']: 196 | parser.set('ingest', 'default_model', str(config['default_model'])) 197 | 198 | if 'pro_mode' in config: 199 | parser.set('behavior', 'pro_mode', 'true' if config['pro_mode'] else 'false') 200 | 201 | try: 202 | with open(CONFIG_FILE_PATH, 'w') as f: 203 | parser.write(f) 204 | return True 205 | except Exception: 206 | return False 207 | -------------------------------------------------------------------------------- /src/fixxer/security.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | FIXXER Security 5 | Cryptographic integrity verification and audit trail management. 6 | 7 | Professional-grade file operations with SHA256 hash verification. 8 | """ 9 | 10 | from __future__ import annotations 11 | 12 | import hashlib 13 | import json 14 | import shutil 15 | from pathlib import Path 16 | from datetime import datetime 17 | from typing import Optional, Tuple, List, Dict, Any, Callable 18 | 19 | 20 | # ============================================================================== 21 | # HASH CALCULATION & VERIFICATION 22 | # ============================================================================== 23 | 24 | def calculate_sha256(file_path: Path, log_callback: Optional[Callable[[str], None]] = None) -> Optional[str]: 25 | """ 26 | Calculate SHA256 hash of a file. 27 | 28 | Args: 29 | file_path: Path to file to hash 30 | log_callback: Optional logging function 31 | 32 | Returns: 33 | SHA256 hash as hex string, or None on error 34 | """ 35 | try: 36 | sha256_hash = hashlib.sha256() 37 | 38 | with open(file_path, "rb") as f: 39 | # Read file in chunks to handle large files efficiently 40 | for byte_block in iter(lambda: f.read(65536), b""): 41 | sha256_hash.update(byte_block) 42 | 43 | return sha256_hash.hexdigest() 44 | 45 | except Exception as e: 46 | if log_callback: 47 | log_callback(f" [red]✗[/red] Hash calculation failed: {e}") 48 | return None 49 | 50 | 51 | # ============================================================================== 52 | # SIDECAR FILE MANAGEMENT 53 | # ============================================================================== 54 | 55 | def read_existing_sidecar(file_path: Path, log_callback: Optional[Callable[[str], None]] = None) -> Optional[Dict[str, Any]]: 56 | """ 57 | Read existing sidecar file if it exists at the source location. 58 | 59 | Args: 60 | file_path: Path to the image file (sidecar will be .fixxer.json) 61 | log_callback: Optional logging function 62 | 63 | Returns: 64 | Dictionary with sidecar data if found, None otherwise 65 | """ 66 | try: 67 | sidecar_path = file_path.parent / f"{file_path.name}.fixxer.json" 68 | 69 | if not sidecar_path.exists(): 70 | return None 71 | 72 | with open(sidecar_path, 'r', encoding='utf-8') as f: 73 | data = json.load(f) 74 | return data 75 | 76 | except Exception as e: 77 | if log_callback: 78 | log_callback(f" [yellow]⚠️[/yellow] Could not read existing sidecar: {e}") 79 | return None 80 | 81 | 82 | def write_sidecar_file( 83 | destination_path: Path, 84 | original_path: Path, 85 | source_hash: str, 86 | verified: bool, 87 | dest_hash: Optional[str] = None, 88 | existing_history: Optional[List[Dict[str, str]]] = None, 89 | log_callback: Optional[Callable[[str], None]] = None 90 | ) -> bool: 91 | """ 92 | Write a JSON sidecar file with hash verification metadata and move history. 93 | 94 | Sidecar filename: .fixxer.json 95 | Example: photo.jpg -> photo.jpg.fixxer.json 96 | 97 | Args: 98 | destination_path: Where the file ended up 99 | original_path: Where the file came from 100 | source_hash: SHA256 of source 101 | verified: True if hashes matched 102 | dest_hash: Optional SHA256 of destination (for mismatch debugging) 103 | existing_history: List of previous moves (from old sidecar) 104 | log_callback: Optional logging function 105 | 106 | Returns: 107 | True on success, False on error 108 | """ 109 | try: 110 | sidecar_path = destination_path.parent / f"{destination_path.name}.fixxer.json" 111 | 112 | # Build move history by appending current move to existing history 113 | move_history = existing_history if existing_history else [] 114 | 115 | current_move = { 116 | "timestamp": datetime.now().isoformat(), 117 | "from": str(original_path), 118 | "to": str(destination_path), 119 | "operation": "file_move" 120 | } 121 | move_history.append(current_move) 122 | 123 | metadata = { 124 | "fixxer_version": "1.1.0", 125 | "filename": destination_path.name, 126 | "sha256_source": source_hash, 127 | "verified": verified, 128 | "move_history": move_history 129 | } 130 | 131 | # Add destination hash if provided (for mismatch cases) 132 | if dest_hash and dest_hash != source_hash: 133 | metadata["sha256_destination"] = dest_hash 134 | metadata["corruption_detected"] = True 135 | 136 | with open(sidecar_path, 'w', encoding='utf-8') as f: 137 | json.dump(metadata, f, indent=2) 138 | 139 | return True 140 | 141 | except Exception as e: 142 | if log_callback: 143 | log_callback(f" [yellow]⚠️[/yellow] Sidecar write error: {e}") 144 | return False 145 | 146 | 147 | # ============================================================================== 148 | # VERIFIED FILE OPERATIONS 149 | # ============================================================================== 150 | 151 | def verify_file_move_with_hash( 152 | source_path: Path, 153 | destination_path: Path, 154 | log_callback: Optional[Callable[[str], None]] = None, 155 | generate_sidecar: bool = True 156 | ) -> Tuple[bool, Optional[str], Optional[str]]: 157 | """ 158 | Move file with SHA256 integrity verification. 159 | 160 | Workflow: 161 | 1. Calculate hash of source file 162 | 2. Move file to destination 163 | 3. Calculate hash of destination file 164 | 4. Compare hashes 165 | 5. Optionally generate JSON sidecar 166 | 6. Return success status 167 | 168 | Args: 169 | source_path: Source file path 170 | destination_path: Destination file path 171 | log_callback: Optional logging function 172 | generate_sidecar: If True, create .fixxer.json sidecar file 173 | 174 | Returns: 175 | Tuple of (success: bool, source_hash: str, dest_hash: str) 176 | On failure, raises RuntimeError to halt workflow 177 | """ 178 | def log(msg: str): 179 | if log_callback: 180 | log_callback(msg) 181 | 182 | try: 183 | # Step 0: Read existing sidecar (if any) before moving 184 | existing_sidecar = read_existing_sidecar(source_path, log_callback) 185 | existing_history = [] 186 | if existing_sidecar and 'move_history' in existing_sidecar: 187 | existing_history = existing_sidecar['move_history'] 188 | 189 | # Step 1: Calculate source hash 190 | log(f" → Computing integrity hash...") 191 | source_hash = calculate_sha256(source_path, log_callback) 192 | 193 | if not source_hash: 194 | error_msg = f"Failed to calculate source hash for {source_path.name}" 195 | log(f" [red]✗[/red] {error_msg}") 196 | raise RuntimeError(error_msg) 197 | 198 | # Show shortened hash in logs 199 | short_hash = f"{source_hash[:16]}..." 200 | log(f" → SHA256: [cyan]{short_hash}[/cyan]") 201 | 202 | # Step 2: Move the file 203 | log(f" → Moving to {destination_path.parent.name}/") 204 | shutil.move(str(source_path), str(destination_path)) 205 | 206 | # Step 3: Calculate destination hash 207 | log(f" → Verifying integrity...") 208 | dest_hash = calculate_sha256(destination_path, log_callback) 209 | 210 | if not dest_hash: 211 | error_msg = f"Failed to calculate destination hash for {destination_path.name}" 212 | log(f" [red]✗[/red] {error_msg}") 213 | raise RuntimeError(error_msg) 214 | 215 | # Step 4: Compare hashes 216 | if source_hash == dest_hash: 217 | log(f" [green]✓[/green] Hash verified: MATCH") 218 | 219 | # Step 5: Generate sidecar if requested 220 | if generate_sidecar: 221 | sidecar_success = write_sidecar_file( 222 | destination_path, 223 | source_path, 224 | source_hash, 225 | verified=True, 226 | existing_history=existing_history, 227 | log_callback=log_callback 228 | ) 229 | if not sidecar_success: 230 | log(f" [yellow]⚠️[/yellow] Sidecar write failed (non-critical)") 231 | 232 | # Step 6: Clean up old sidecar at source location (if it existed) 233 | if existing_sidecar: 234 | old_sidecar_path = source_path.parent / f"{source_path.name}.fixxer.json" 235 | try: 236 | if old_sidecar_path.exists(): 237 | old_sidecar_path.unlink() 238 | except Exception as e: 239 | # Non-critical - just log the warning 240 | log(f" [yellow]⚠️[/yellow] Could not remove old sidecar (non-critical): {e}") 241 | 242 | return True, source_hash, dest_hash 243 | 244 | else: 245 | # CRITICAL: Hash mismatch detected - HALT WORKFLOW 246 | log(f" [red]✗ CRITICAL: Hash verified: MISMATCH[/red]") 247 | log(f" [red]✗ Source: {source_hash[:32]}...[/red]") 248 | log(f" [red]✗ Destination: {dest_hash[:32]}...[/red]") 249 | log(f" [red]✗ FILE CORRUPTION DETECTED[/red]") 250 | 251 | # Write sidecar with corruption flag 252 | if generate_sidecar: 253 | write_sidecar_file( 254 | destination_path, 255 | source_path, 256 | source_hash, 257 | verified=False, 258 | dest_hash=dest_hash, 259 | existing_history=existing_history, 260 | log_callback=log_callback 261 | ) 262 | 263 | # HALT: Raise exception to stop workflow 264 | error_msg = f"Hash mismatch detected for {destination_path.name} - workflow halted for safety" 265 | raise RuntimeError(error_msg) 266 | 267 | except RuntimeError: 268 | # Re-raise RuntimeError (our controlled halt) 269 | raise 270 | except Exception as e: 271 | error_msg = f"File move failed: {e}" 272 | log(f" [red]✗[/red] {error_msg}") 273 | raise RuntimeError(error_msg) 274 | -------------------------------------------------------------------------------- /src/fixxer/themes/warez.css: -------------------------------------------------------------------------------- 1 | /* ============================================================================== 2 | * FIXXER TUI v12.1 - VisionCrew Edition CSS 3 | * ============================================================================== */ 4 | 5 | /* Base colors (variables) must be defined at the root */ 6 | $background: rgb(10, 10, 10); 7 | /* Very dark grey, almost black */ 8 | $surface: rgb(20, 20, 20); 9 | /* Panel background */ 10 | $primary: #C00000; 11 | /* Bright Red */ 12 | $secondary: #FFFFFF; 13 | /* White */ 14 | $text: #CCCCCC; 15 | /* Off-white text */ 16 | $status-ok: #00FFFF; 17 | /* Cyan for "Ready" status */ 18 | 19 | Screen { 20 | layout: vertical; 21 | background: $background; 22 | color: $text; 23 | border: solid $primary; 24 | /* Simple border, no width percentage */ 25 | } 26 | 27 | /* ============================================================================== 28 | * Layout & Containers 29 | * ============================================================================== */ 30 | 31 | #header-row { 32 | height: auto; 33 | width: 100%; 34 | align: left middle; 35 | } 36 | 37 | #logo { 38 | width: 1fr; 39 | height: auto; 40 | text-align: left; 41 | /* Left-aligned logo */ 42 | padding: 1 2; 43 | } 44 | 45 | .easy-btn { 46 | width: auto; 47 | height: auto; 48 | min-width: 14; 49 | margin: 1 2 1 0; 50 | background: $surface; 51 | color: $primary; 52 | border: solid $primary 50%; 53 | } 54 | 55 | .easy-btn:hover { 56 | background: $primary; 57 | color: $background; 58 | border: solid $primary; 59 | text-style: bold; 60 | } 61 | 62 | .panel-title { 63 | height: auto; 64 | padding-bottom: 1; 65 | color: $primary; 66 | text-style: bold; 67 | } 68 | 69 | #left-panel { 70 | padding-right: 1; 71 | width: 1fr; 72 | height: 100%; 73 | } 74 | 75 | #right-panel { 76 | padding-left: 1; 77 | width: 1fr; 78 | height: 100%; 79 | } 80 | 81 | #main-layout { 82 | height: 1fr; 83 | } 84 | 85 | /* ============================================================================== 86 | * Widgets 87 | * ============================================================================== */ 88 | 89 | Button { 90 | outline: none; 91 | } 92 | 93 | Button:focus { 94 | border: solid $primary; 95 | } 96 | 97 | /* --- Scrollbar Styling (Red to match theme) --- */ 98 | Scrollbar { 99 | background: $surface; 100 | color: $primary; 101 | } 102 | 103 | Scrollbar:hover { 104 | color: #FF0000; 105 | } 106 | 107 | Scrollbar.-active { 108 | color: #FF4444; 109 | } 110 | 111 | /* --- File Browser --- */ 112 | #file-browser-container { 113 | height: 1fr; 114 | background: $surface; 115 | border: solid $primary; 116 | padding: 1; 117 | } 118 | 119 | DirectoryTree { 120 | background: $surface; 121 | } 122 | 123 | .directory-tree--guides { 124 | color: $primary 50%; 125 | } 126 | 127 | .directory-tree--selected { 128 | background: $primary !important; 129 | color: $background !important; 130 | text-style: bold; 131 | } 132 | 133 | /* --- Status & Logs --- */ 134 | #status-bar { 135 | height: auto; 136 | margin-bottom: 1; 137 | padding: 0 1; 138 | background: $surface; 139 | border: solid $primary; 140 | } 141 | 142 | /* --- System Monitor --- */ 143 | #system-monitor { 144 | height: auto; 145 | max-height: 8; 146 | /* Reduced from 9 for tighter fit */ 147 | width: 100%; 148 | layout: vertical; 149 | margin-bottom: 1; 150 | padding: 1 2 0 2; 151 | /* Top padding for RAM peaks, no bottom padding */ 152 | background: $background; 153 | border: solid $surface; 154 | } 155 | 156 | .sysmon-row { 157 | height: auto; 158 | width: 100%; 159 | align: left middle; 160 | padding: 0; 161 | margin: 0; 162 | } 163 | 164 | .sysmon-label { 165 | width: 6; 166 | color: $text; 167 | /* Changed from $primary to muted text */ 168 | text-style: bold; 169 | } 170 | 171 | .sysmon-sparkline { 172 | width: 1fr; 173 | height: 3; 174 | margin: 0 1; 175 | } 176 | 177 | .sysmon-value { 178 | width: 5; 179 | text-align: right; 180 | color: $primary; 181 | /* Keep values in red */ 182 | text-style: bold; 183 | } 184 | 185 | #sysmon-error { 186 | color: $text 50%; 187 | text-align: center; 188 | } 189 | 190 | /* Sparkline styling - RED for VisionCrew tactical look */ 191 | #ram-sparkline { 192 | color: $primary !important; 193 | /* Force red for RAM */ 194 | } 195 | 196 | #cpu-sparkline { 197 | color: $primary !important; 198 | /* Force red for CPU */ 199 | } 200 | 201 | #log-panel { 202 | height: 1fr; 203 | min-height: 35; 204 | /* Significantly increased for better log visibility */ 205 | max-height: 1fr; 206 | } 207 | 208 | #log-container { 209 | height: 1fr; 210 | background: $surface; 211 | padding: 0 1; 212 | border: solid $primary; 213 | } 214 | 215 | /* --- Progress Spinner Container --- */ 216 | #progress-container { 217 | height: auto; 218 | width: 100%; 219 | padding: 0 1; 220 | background: $surface; 221 | margin-bottom: 1; 222 | border: solid $primary; 223 | } 224 | 225 | #progress-container Horizontal { 226 | height: auto; 227 | width: 100%; 228 | } 229 | 230 | #spinner-display { 231 | height: auto; 232 | padding-bottom: 0; 233 | text-align: center; 234 | color: $primary; 235 | } 236 | 237 | #progress-phrase { 238 | height: auto; 239 | padding-top: 0; 240 | color: #00FFFF; 241 | width: 1fr; 242 | } 243 | 244 | #progress-timer { 245 | height: auto; 246 | color: #00FFFF; 247 | text-align: right; 248 | width: auto; 249 | } 250 | 251 | /* --- Button Bar --- */ 252 | #button-bar { 253 | height: auto; 254 | width: 100%; 255 | align: center middle; 256 | padding: 1 0; 257 | layout: horizontal; 258 | background: $background; 259 | } 260 | 261 | /* RAM and CPU monitors hidden in footer for standard mode */ 262 | /* (They appear in right panel above Status & Logs instead) */ 263 | #ram-monitor, 264 | #cpu-monitor { 265 | display: none; 266 | } 267 | 268 | #controls-container { 269 | width: 100%; 270 | height: auto; 271 | align: center middle; 272 | } 273 | 274 | #header-right { 275 | width: auto; 276 | height: auto; 277 | align: right middle; 278 | } 279 | 280 | /* View mode label hidden in standard mode */ 281 | #view-mode-label { 282 | display: none; 283 | } 284 | 285 | .btn-group { 286 | height: auto; 287 | width: auto; 288 | align: center middle; 289 | margin: 0 2; 290 | } 291 | 292 | .workflow-btn, 293 | .path-btn { 294 | margin: 0 1; 295 | width: auto; 296 | min-width: 10; 297 | background: $surface; 298 | color: #ffffff; 299 | border: solid $primary 50%; 300 | } 301 | 302 | .workflow-btn:hover, 303 | .path-btn:hover, 304 | .control-btn:hover { 305 | background: $primary; 306 | color: $background; 307 | text-style: bold; 308 | border: solid $primary; 309 | } 310 | 311 | .control-btn { 312 | margin: 0 1; 313 | width: auto; 314 | min-width: 14; 315 | height: 3; 316 | background: $surface; 317 | color: #ffffff; 318 | border: solid $primary 50%; 319 | text-align: center; 320 | } 321 | 322 | #btn-quit, 323 | #btn-stop { 324 | color: $error; 325 | border: solid $error; 326 | } 327 | 328 | #btn-quit:hover, 329 | #btn-stop:hover { 330 | background: $error; 331 | color: $surface; 332 | } 333 | 334 | #btn-stop:disabled { 335 | background: $surface; 336 | color: #444444; 337 | border: solid #444444; 338 | opacity: 0.5; 339 | } 340 | 341 | 342 | 343 | #btn-quit:hover { 344 | background: $surface; 345 | color: $primary; 346 | } 347 | 348 | /* ============================================================================== 349 | * MODAL & DRY RUN STYLING (Warez Edition) 350 | * ============================================================================== */ 351 | 352 | /* Dark overlay behind the modal */ 353 | DryRunSelectScreen { 354 | background: rgba(0, 0, 0, 0.85); 355 | align: center middle; 356 | } 357 | 358 | /* The modal box itself */ 359 | #dryrun-dialog { 360 | background: #000000; 361 | border: solid #ffffff !important; /* Stark white border for Warez look */ 362 | width: 50; 363 | height: auto; 364 | padding: 1 2; 365 | } 366 | 367 | /* Title text inside the modal */ 368 | #dryrun-dialog Label { 369 | width: 100%; 370 | text-align: center; 371 | color: #ffffff; 372 | text-style: bold; 373 | margin-bottom: 1; 374 | border-bottom: solid #333333; 375 | padding-bottom: 1; 376 | } 377 | 378 | /* --- WAREZ BUTTONS --- */ 379 | 380 | #dryrun-dialog Button { 381 | width: 100%; 382 | height: 3; 383 | margin-bottom: 1; 384 | background: #000000; 385 | border: solid #333333 !important; /* Dark grey default */ 386 | color: #888888; 387 | text-style: bold; 388 | } 389 | 390 | /* Hover State: Aggressive White/Red High Contrast */ 391 | #dryrun-dialog Button:hover { 392 | background: #ffffff; 393 | border: solid #ffffff !important; 394 | color: #000000; 395 | } 396 | 397 | /* Primary Action Highlight */ 398 | #dry-auto { 399 | border-left: solid #ffffff !important; 400 | color: #ffffff; 401 | } 402 | 403 | /* Cancel Button */ 404 | #dry-cancel { 405 | margin-top: 1; 406 | border: none !important; 407 | color: #ff0000; /* Warez Red */ 408 | } 409 | 410 | #dry-cancel:hover { 411 | background: #ff0000; 412 | color: #ffffff; 413 | border: none !important; 414 | } 415 | 416 | /* ============================================================================== 417 | * PREVIEW STATUS WIDGET 418 | * ============================================================================== */ 419 | 420 | #preview-status { 421 | background: #111111; 422 | border-left: solid #ffffff; 423 | padding: 0 1; 424 | margin-bottom: 1; 425 | height: auto; 426 | } 427 | 428 | #preview-status-bar { 429 | width: 100%; 430 | height: auto; 431 | align: left middle; 432 | } 433 | 434 | #preview-status-text { 435 | color: #ffffff; 436 | text-style: bold; 437 | width: 1fr; 438 | } 439 | 440 | #btn-execute-cached { 441 | background: #000000; 442 | border: solid #00ff00 !important; /* Matrix Green for execute */ 443 | color: #00ff00; 444 | margin: 0 1; 445 | height: 3; 446 | min-width: 12; 447 | } 448 | 449 | #btn-execute-cached:hover { 450 | background: #00ff00; 451 | color: #000000; 452 | } 453 | 454 | #btn-forget-preview { 455 | background: #000000; 456 | border: solid #ff0000 !important; 457 | color: #ff0000; 458 | margin: 0 1; 459 | height: 3; 460 | min-width: 12; 461 | } 462 | 463 | #btn-forget-preview:hover { 464 | background: #ff0000; 465 | color: #ffffff; 466 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FIXXER ✞ PRO 2 | ### Professional-Grade Photography Workflow Automation 3 | 4 | ![Version](https://img.shields.io/badge/version-1.1.0-red.svg) 5 | ![Python](https://img.shields.io/badge/python-3.10--3.12-blue.svg) 6 | ![License](https://img.shields.io/badge/license-MIT-green.svg) 7 | 8 | **"CHAOS PATCHED // LOGIC INJECTED"** 9 | ![warez_dry_run_sped_up_original30](https://github.com/user-attachments/assets/daba32a1-7797-4b2c-9f4b-99866e9b6007) 10 | 11 | --- 12 | 13 | ## 🎯 What is FIXXER? 14 | 15 | FIXXER is a professional photography workflow automation tool that combines **AI vision models**, **cryptographic integrity verification**, and **intelligent quality analysis** to streamline your post-processing pipeline. 16 | 17 | Built for photographers who demand both **speed** and **safety** in their digital asset management. 18 | 19 | --- 20 | 21 | ## ✨ Key Features 22 | 23 | ### 🔐 **Hash-Verified File Operations** 24 | - **SHA256 integrity checking** on every file move 25 | - **Halt-on-mismatch** protection prevents corruption 26 | - **JSON sidecar files** (.fixxer.json) create audit trails 27 | - Production-tested: 120+ files, zero corruption 28 | 29 | ### 🤖 **AI-Powered Workflows** 30 | - **Vision-based naming** using Ollama models (qwen2.5vl, llava, etc.) 31 | - **Semantic burst detection** with CLIP embeddings 32 | - **AI session naming** from visual analysis 33 | - **Creative critique mode** for artistic feedback 34 | - **Dry-run preview mode** with intelligent AI caching (50%+ speed boost on execution) 35 | 36 | ### 📊 **Quality Analysis Pipeline** 37 | - **BRISQUE quality scoring** for sharpness assessment 38 | - **Exposure analysis** (crushed blacks, blown highlights) 39 | - **CLIP-based burst grouping** (semantic similarity, not just timestamps) 40 | - **Automated culling** into Tier A/B/C folders 41 | 42 | ### 🎨 **Two UI Modes** 43 | - **Standard Mode**: Warez-inspired red/white/black aesthetic 44 | - **Pro Mode**: Phantom Redline - tactical precision dashboard 45 | - **Toggle with F12** - Switch between modes on-the-fly 46 | - Real-time **system monitoring** (RAM, CPU sparklines) 47 | - **Milestone HUD** for workflow progress tracking (Pro Mode) 48 | 49 | ### 📷 **RAW File Support** 50 | - **120+ RAW formats** via rawpy/libraw 51 | - Cross-platform: Linux, macOS, Windows 52 | - Supports: RW2, CR2, CR3, NEF, ARW, DNG, RAF, ORF, PEF, SRW, and more 53 | - Zero temp files - pure in-memory processing 54 | 55 | --- 56 | 57 | ## 🚀 Quick Start 58 | 59 | **Zero friction. Full power.** 60 | 61 | ### Prerequisites 62 | - **macOS** (Homebrew installation) or **Python 3.10-3.12** for manual install 63 | - **Ollama** (for AI vision features) - [https://ollama.ai](https://ollama.ai) 64 | - Supported OS: macOS, Linux, Windows (WSL recommended) 65 | 66 | ### 1. Install via Homebrew (Recommended - macOS) 67 | 68 | ```bash 69 | # Add the FIXXER tap 70 | brew tap bandwagonvibes/fixxer 71 | 72 | # Install FIXXER 73 | brew install fixxer 74 | ``` 75 | 76 | That's it! Homebrew handles all dependencies automatically. 77 | 78 | ### 2. Launch 79 | 80 | ```bash 81 | fixxer 82 | ``` 83 | 84 | **Note:** On first launch, FIXXER will auto-download the CLIP vision model (~300MB). This happens only once. 85 | 86 | ### 3. Optional: Install Ollama Vision Model 87 | 88 | For AI naming and creative critique features: 89 | 90 | ```bash 91 | # Install Ollama from https://ollama.ai 92 | # Then pull a vision model: 93 | ollama pull qwen2.5vl:3b 94 | ``` 95 | 96 | **Recommended model:** `qwen2.5vl:3b` (fast, accurate, 2.2GB) 97 | 98 | --- 99 | 100 | ### 🔰 New to Terminals? 101 | 102 | If you've never opened a terminal before, check out our [**Beginner's Guide**](BEGINNERS_GUIDE.md) - a gentle introduction that teaches you everything you need to know. 103 | 104 | --- 105 | 106 | ### 🔧 Development Installation (From Source) 107 | 108 | For development or if you're on Linux/Windows, you can install from source: 109 | 110 | ```bash 111 | git clone https://github.com/BandwagonVibes/fixxer.git 112 | cd fixxer 113 | 114 | # Create a virtual environment 115 | python3.12 -m venv venv 116 | source venv/bin/activate # Windows: venv\Scripts\activate 117 | 118 | # Install FIXXER + All Dependencies (CLIP, BRISQUE, Engine) 119 | # This also installs the 'fixxer' command globally in this venv 120 | pip install -e . 121 | ``` 122 | 123 | **Launch:** Since you installed in editable mode (`-e .`), just type `fixxer` after activating your venv. 124 | 125 | **Note:** Python 3.13+ is not currently supported due to dependency constraints. 126 | 127 | --- 128 | 129 | ### 🔧 Advanced: Manual Alias Setup 130 | 131 | If you prefer running from source without installing the package, you can set up a shell alias: 132 | 133 | **macOS / Linux (zsh or bash)** 134 | 135 | Add this to your `~/.zshrc` or `~/.bashrc`: 136 | 137 | ```bash 138 | # FIXXER ✞ PRO Launcher 139 | # Adjust path to match where you cloned the repo 140 | alias fixxer='source ~/fixxer/venv/bin/activate && python3 -m fixxer' 141 | ``` 142 | 143 | Reload shell: `source ~/.zshrc` 144 | 145 | **Windows (PowerShell)** 146 | 147 | Open your profile: `notepad $PROFILE` 148 | 149 | Paste this function at the bottom: 150 | 151 | ```powershell 152 | function fixxer { 153 | # Adjust path to match where you cloned the repo 154 | $FixxerPath = "$HOME\fixxer" 155 | 156 | # Activate Venv and Run 157 | & "$FixxerPath\venv\Scripts\Activate.ps1" 158 | python -m fixxer 159 | } 160 | ``` 161 | 162 | Reload profile: `. $PROFILE` 163 | 164 | --- 165 | 166 | ## 🤖 Why Qwen2.5-vl:3b? 167 | 168 | FIXXER is designed to work like an **appliance** - reliable, consistent, and predictable. After extensive research and testing, we chose **Qwen2.5-vl:3b** as the recommended model for specific reasons: 169 | 170 | ### Proven Reliability 171 | - **Tested on 150+ photos** across dozens of sessions 172 | - **Consistent naming** - produces the same results run after run 173 | - **Rock-solid JSON output** - only one parsing issue in all testing (99.9%+ reliability) 174 | - **Production-ready** - not experimental, it just works 175 | 176 | ### Technical Advantages 177 | - **Spatial awareness** - Critical for composition critique and understanding scene layout 178 | - **Structured output** - Reliably generates valid JSON for deterministic parsing 179 | - **Speed/quality balance** - Fast enough for real-time workflows, accurate enough for professional use 180 | - **Efficient footprint** - 2.2GB model runs smoothly on MacBook Air hardware 181 | 182 | ### Model Compatibility Note 183 | While you **can** swap models using the Model selector (press `M`), results may vary: 184 | - **Larger models** (7B+) - Potentially better quality, but slower and more memory-intensive 185 | - **Smaller models** (1B-2B) - Faster, but may lose spatial understanding and JSON reliability 186 | - **Alternative vision models** - Not all are suitable for photography workflows (some lack spatial reasoning, others produce inconsistent output) 187 | 188 | **Our recommendation:** Start with `qwen2.5vl:3b`. It's been battle-tested for this exact use case. 189 | 190 | --- 191 | 192 | ## ⌨️ Keyboard Shortcuts 193 | 194 | FIXXER is fully keyboard-driven. All buttons have hotkeys: 195 | 196 | ### Navigation & Setup 197 | - **1** - Set Source directory (from file browser selection) 198 | - **2** - Set Destination directory (opens selector) 199 | - **D** - Dry Run (preview workflow without moving files) 200 | - **M** - Select Ollama Model 201 | - **F12** - Toggle Pro Mode (Warez ↔ Phantom Redline) 202 | 203 | ### Workflows 204 | - **A** - Auto Workflow (complete pipeline) 205 | - **B** - Bursts (group similar shots) 206 | - **C** - Cull (quality analysis) 207 | - **S** - Stats (EXIF insights) 208 | - **K** - Critique (AI creative feedback) 209 | 210 | ### System 211 | - **Q** - Quit 212 | - **R** - Refresh Config 213 | - **Esc** - Stop current workflow 214 | - **Ctrl+C** - Force quit 215 | 216 | **Tip:** Hover over any button to see its keyboard shortcut in the tooltip! 217 | 218 | --- 219 | 220 | ## 🔍 Dry Run Preview Mode 221 | 222 | **Try before you commit.** The dry-run feature lets you preview any workflow without moving a single file. 223 | 224 | ### How It Works 225 | 226 | 1. Press **D** to open the dry-run modal 227 | 2. Select which workflow to preview: **Auto**, **Burst**, or **Cull** 228 | 3. FIXXER simulates the entire workflow: 229 | - Calculates all file operations 230 | - Generates AI names (cached for later) 231 | - Shows collision-aware filename resolution 232 | - Displays full preview log with "WOULD MOVE:" entries 233 | 4. Review the preview log - **zero files moved** 234 | 5. Choose what to do next: 235 | - **Execute Now** - Run the real workflow using cached AI results (50%+ faster) 236 | - **Forget & Redo** - Clear cache and start fresh 237 | 238 | ### Key Features 239 | 240 | - **Zero footprint** - No folders created, no files moved, no logs written 241 | - **Intelligent AI caching** - AI results cached with model-aware keys and mtime validation 242 | - **Thread-safe** - Cache safely shared across concurrent operations 243 | - **10-minute TTL** - Cache expires after 10 minutes for freshness 244 | - **Collision-aware** - Preview shows exact final filenames including duplicates 245 | - **Full workflow simulation** - Burst grouping, quality culling, AI naming - everything runs in preview mode 246 | 247 | ### Why Use It? 248 | 249 | - **Verify AI names** before committing to file operations 250 | - **Check collision handling** when duplicate names would occur 251 | - **Test workflow settings** without risk 252 | - **Speed up execution** with cached AI results (no re-inference needed) 253 | 254 | --- 255 | 256 | ## 📖 Workflow Overview 257 | 258 | ### **Auto Workflow** (Recommended) 259 | Complete end-to-end processing: 260 | 261 | 1. **Analyze Session** - EXIF statistics and insights 262 | 2. **Stack Bursts** - CLIP-based semantic grouping + AI naming 263 | 3. **Cull Singles** - Quality analysis → Tier A/B/C 264 | 4. **Archive Heroes** - Move best shots to organized folders 265 | 5. **Verify Integrity** - SHA256 hash checking throughout 266 | 267 | ### **Individual Workflows** 268 | 269 | - **Bursts**: Group similar shots, pick the best frame (fast, no AI naming by default - see config) 270 | - **Cull**: Analyze sharpness/exposure, sort by quality (Tier A/B/C) 271 | - **Stats**: EXIF insights (cameras, focal lengths, lighting conditions) 272 | - **Critique**: Select a photo in the browser → Press `K` - Get AI creative feedback (composition, mood, technical quality, suggestions) 273 | - **Saves critique JSON** alongside the photo for future reference 274 | - Great for learning what makes your best shots work 275 | - Use it to understand technical issues or get creative direction 276 | - **Easy Archive**: Simple AI naming + keyword folder organization (no culling, just organize) 277 | 278 | --- 279 | 280 | ## 🎛️ Configuration 281 | 282 | Configuration is stored in `~/.fixxer.conf`: 283 | 284 | ### Understanding `burst_auto_name` 285 | 286 | The **Burst** workflow can operate in two modes: 287 | 288 | - **Fast Mode (default)**: `burst_auto_name = false` 289 | - Groups bursts, picks the best frame, uses numeric naming (`burst-001`, `burst-002`) 290 | - **Much faster** - no AI naming overhead 291 | - Great for quick organization - you can run Easy Archive later for AI naming 292 | 293 | - **AI Naming Mode**: `burst_auto_name = true` 294 | - Groups bursts, picks the best frame, **AND** generates descriptive AI names 295 | - Slower (depends on Ollama model speed) 296 | - Useful if you want burst folders named immediately 297 | 298 | **Note:** The **Auto Workflow** always uses AI naming regardless of this setting (it's designed for complete end-to-end processing). 299 | 300 | ```ini 301 | [ingest] 302 | default_model = qwen2.5vl:3b 303 | default_destination = ~/Pictures/Sorted 304 | 305 | [cull] 306 | cull_algorithm = legacy 307 | sharpness_good = 40.0 308 | sharpness_dud = 15.0 309 | exposure_dud_pct = 0.20 310 | exposure_good_pct = 0.05 311 | 312 | [burst] 313 | burst_algorithm = legacy 314 | similarity_threshold = 8 315 | burst_auto_name = false # Set to 'true' to enable AI naming in Burst workflow (slower) 316 | 317 | [folders] 318 | burst_parent_folder = true 319 | ai_session_naming = true 320 | 321 | [behavior] 322 | pro_mode = false 323 | last_source_path = 324 | last_destination_path = 325 | ``` 326 | 327 | --- 328 | 329 | ## 🔧 Technical Architecture 330 | 331 | ### **Hash Verification Pipeline** 332 | ``` 333 | Source File 334 | ↓ Calculate SHA256 335 | ↓ Move to Destination 336 | ↓ Recalculate SHA256 337 | ↓ Compare Hashes 338 | ├─ MATCH → Generate .fixxer.json sidecar 339 | └─ MISMATCH → HALT WORKFLOW (RuntimeError) 340 | ``` 341 | 342 | ### **AI Vision Integration** 343 | - **Ollama API**: Local LLM inference (no cloud, no privacy concerns) 344 | - **JSON-structured responses**: Deterministic parsing 345 | - **Base64 image encoding**: Direct vision model analysis 346 | - **Fallback chains**: Graceful degradation on timeouts 347 | 348 | ### **Quality Scoring** 349 | - **BRISQUE** (Blind/Referenceless Image Spatial Quality Evaluator) 350 | - **OpenCV Laplacian variance** (sharpness fallback) 351 | - **Histogram analysis** (exposure distribution) 352 | - **CLIP embeddings** (semantic similarity for bursts) 353 | 354 | --- 355 | 356 | ## 📂 Project Structure 357 | 358 | ``` 359 | fixxer/ 360 | ├── src/ 361 | │ └── fixxer/ 362 | │ ├── __init__.py # Package initialization 363 | │ ├── __main__.py # Module entry point (python -m fixxer) 364 | │ ├── app.py # TUI application (Textual) 365 | │ ├── config.py # Configuration management 366 | │ ├── engine.py # Workflow orchestration 367 | │ ├── phrases.py # Motivational progress phrases 368 | │ ├── security.py # SHA256 hash verification & sidecar files 369 | │ ├── vision.py # AI/Ollama integration & RAW processing 370 | │ └── themes/ 371 | │ ├── __init__.py # Theme package marker 372 | │ ├── pro.css # Pro Mode styling (Phantom Redline) 373 | │ └── warez.css # Standard Mode styling (Warez) 374 | ├── requirements.txt # Python dependencies 375 | ├── pyproject.toml # Packaging metadata (PyPI ready) 376 | ├── README.md # Main documentation 377 | ├── BEGINNERS_GUIDE.md # Terminal beginner's guide 378 | ├── README_TUI.md # TUI-specific documentation 379 | ├── CHANGELOG.md # Version history 380 | └── .gitignore # Git exclusions 381 | ``` 382 | 383 | --- 384 | 385 | ## 🔐 Sidecar File Format 386 | 387 | Example `.fixxer.json`: 388 | 389 | ```json 390 | { 391 | "fixxer_version": "1.1.0", 392 | "filename": "golden-hour-cityscape.jpg", 393 | "original_path": "/source/IMG_1234.jpg", 394 | "final_path": "/archive/2024-11-20_Urban/Architecture/golden-hour-cityscape.jpg", 395 | "sha256_source": "a1b2c3d4...", 396 | "verified": true, 397 | "timestamp": "2024-11-20T14:35:22.123456" 398 | } 399 | ``` 400 | 401 | If corruption is detected: 402 | ```json 403 | { 404 | "sha256_source": "a1b2c3d4...", 405 | "sha256_destination": "e5f6g7h8...", 406 | "verified": false, 407 | "corruption_detected": true 408 | } 409 | ``` 410 | 411 | --- 412 | 413 | ## 🎨 UI Modes Comparison 414 | 415 | Press **F12** to toggle between modes at any time (except during active workflows): 416 | 417 | | Feature | Standard Mode | Pro Mode | 418 | |---------|---------------|----------| 419 | | **Aesthetic** | Warez (red/white/black) | Phantom Redline (tactical black) | 420 | | **Logo** | ASCII art + tagline | Clean typography + F12 indicator | 421 | | **System Monitor** | Cyan sparklines | Red "redline" sparklines | 422 | | **Progress Phrases** | "Applying physics hacks..." | "Processing active... [2m 34s]" | 423 | | **Milestone HUD** | ❌ Hidden | ✅ Real-time stats (BURSTS, TIER A/B/C, HEROES, ARCHIVED, TIME) | 424 | | **Button Styles** | High contrast | Minimal, subtle borders | 425 | | **Toggle Key** | **F12** | **F12** | 426 | 427 | --- 428 | 429 | ## 🧪 Testing 430 | 431 | Hash verification stress test (included): 432 | 433 | ```bash 434 | # Test with 120+ mixed RAW/JPEG files 435 | python3 test_hash_verification.py 436 | 437 | # Expected output: 438 | # ✅ 120 files processed 439 | # ✅ 120 hashes verified 440 | # ✅ 0 corruption events 441 | # ✅ 120 sidecar files generated 442 | ``` 443 | 444 | --- 445 | 446 | ## 🤝 Contributing 447 | 448 | Contributions welcome! Areas of interest: 449 | 450 | - Additional RAW format testing 451 | - Alternative AI vision models 452 | - Quality scoring algorithm improvements 453 | - Cross-platform testing (Windows native) 454 | - Performance optimizations 455 | 456 | --- 457 | 458 | ## 📜 License 459 | 460 | MIT License - See [LICENSE](LICENSE) file for details. 461 | 462 | --- 463 | 464 | ## 🙏 Acknowledgments 465 | 466 | - **Ollama** - Local LLM inference 467 | - **rawpy/libraw** - RAW file processing 468 | - **CLIP** (OpenAI) - Semantic burst detection 469 | - **BRISQUE** - Image quality assessment 470 | - **Textual** - Modern TUI framework 471 | 472 | --- 473 | 474 | ## 📧 Contact 475 | 476 | Issues and feature requests: [GitHub Issues](https://github.com/BandwagonVibes/fixxer/issues) 477 | 478 | --- 479 | 480 | **Built with precision. Secured with cryptography. Powered by AI.** 481 | 482 | ✞ **FIXXER PRO** - "CHAOS PATCHED // LOGIC INJECTED" 483 | -------------------------------------------------------------------------------- /src/fixxer/phrases.py: -------------------------------------------------------------------------------- 1 | """ 2 | Phrase Library for FIXXER v1.1 3 | ===================================================== 4 | 200 rotating messages. 5 | Style: Warez NFO / Demoscene / Brutal Roast. 6 | 7 | Distribution: 8 | - Model loading: 15 phrases 9 | - Quick (0-5min): 30 phrases 10 | - Early (5-15min): 35 phrases 11 | - Mid (15-30min): 35 phrases 12 | - Long (30-60min): 35 phrases 13 | - Marathon (60+min): 30 phrases 14 | - VisionCrew Meta: 20 phrases 15 | """ 16 | 17 | import random 18 | from typing import List 19 | 20 | # ============================================================================ 21 | # MODEL LOADING PHRASES (15) - The "Crack Intro" Vibe 22 | # ============================================================================ 23 | 24 | MODEL_LOADING_PHRASES = [ 25 | "⚡️ INJECTING NEURAL PAYLOAD... [|||||| ]", 26 | "💀 Waking up the daemon. Don't panic.", 27 | "📼 Loading bakllava.bin into high memory...", 28 | "👾 Bypassing Apple's safety protocols... (just kidding)", 29 | "🎹 Cue the keygen music...", 30 | "🧠 Mount failed? Retrying with sudo force...", 31 | "🔋 Diverting auxiliary power to the NPU...", 32 | "📀 Decrypting your artistic vision...", 33 | "🔌 Establishing handshake with the ghost in the shell...", 34 | "🕶️ Rerouting encryptions through the matrix...", 35 | "💾 Allocating VRAM like it's 1999...", 36 | "🕹️ Pressing 'Any Key' to continue...", 37 | "📟 Paging the sysadmin...", 38 | "🚀 Initializing the logic bomb...", 39 | "🛡️ Disabling judgment modules... actually, keeping those on.", 40 | ] 41 | 42 | # ============================================================================ 43 | # QUICK PROCESSING (0-5min) - The "Hot Take" 44 | # ============================================================================ 45 | 46 | QUICK_PROCESSING_PHRASES = [ 47 | # Humor & Snark (Warez Style) 48 | "📸 Scanning for talent... Result: 404 NOT FOUND.", 49 | "🧱 Bricking your bad shots... standby.", 50 | "🚮 Garbage collection running on your composition.", 51 | "👀 I see what you did there. I wish I didn't.", 52 | "📉 Optimization level: MAXIMUM. Your photos: MINIMUM.", 53 | "🍵 Sipping RAM and spilling tea on your ISO settings.", 54 | "💥 ERROR: Too much bokeh detected.", 55 | "🚫 Rule of Thirds? You broke all three.", 56 | "🎭 Pretending this blur is 'artistic choice'...", 57 | "🧟‍♂️ These RAW files are heavier than your conscience.", 58 | 59 | # Tech/Roast Mix 60 | "🤖 Analyzing histogram... it's flatlining, doc.", 61 | "⚡ Burst mode is for people who can't aim.", 62 | "🛑 Stop stopping down. Diffraction is killing us.", 63 | "📡 Uplinking to the mothership to report this mess.", 64 | "💾 Writing zero-days to your bad sectors.", 65 | "🕶️ Even the AI is squinting at this exposure.", 66 | 67 | # Everyday Mysteries (Twisted) 68 | "🧠 Why do we blink? To avoid looking at this shot.", 69 | "🧊 Entropy increases... mostly in your file management.", 70 | "🌌 The universe is expanding, unlike your dynamic range.", 71 | "🐈 Schrödinger's Cat is alive, but your focus is dead.", 72 | "⏰ Time is a flat circle. So is this lens flare.", 73 | "🔮 Reality is a simulation. This photo is a glitch.", 74 | 75 | # AI/ML Knowledge 76 | "🤖 Tensors flowing... judging flowing harder.", 77 | "🧠 My weights are quantized. Your skills are not.", 78 | "📊 Running inference on your life choices.", 79 | "🎯 Confidence score: 12%. Yikes.", 80 | "🧬 Genetic algorithm suggests: Delete this.", 81 | 82 | # Time/Tech Facts 83 | "⏰ 1970 called, they want their Unix epoch back.", 84 | "💾 formatting c: ... joke. Unless?", 85 | "🔋 Draining battery to process noise.", 86 | "💀 ASCII art would have better resolution.", 87 | ] 88 | 89 | # ============================================================================ 90 | # EARLY PROCESSING (5-15min) - The "Grind" Begins 91 | # ============================================================================ 92 | 93 | EARLY_PROCESSING_PHRASES = [ 94 | # Humor & Snark 95 | "🔨 Hammering out the dents in your metadata.", 96 | "🧹 Cleanup on Aisle: SD Card.", 97 | "📉 Your keeper rate is statistically insignificant.", 98 | "🤷 Did you sit on the shutter button?", 99 | "🕵️‍♂️ Enhancing... Enhancing... Still blurry.", 100 | "🎨 Bob Ross would call these 'happy accidents'. I call them JPEGs.", 101 | "🌡️ CPU temp rising. Your photos are roast-worthy.", 102 | "🤧 Bless you. You clearly sneezed during this exposure.", 103 | "🧂 Salty about this white balance.", 104 | "🚽 Flushing the buffer. And your dignity.", 105 | "🎪 Welcome to the circus of value.", 106 | 107 | # Photography Education (Aggressive) 108 | "📚 RTFM: Read The Freaking Manual.", 109 | "🎓 F/1.8 isn't a personality trait.", 110 | "💡 Flash didn't fire. Or maybe you just forgot it.", 111 | "📸 Histogram looks like a skate park ramp. Dangerous.", 112 | "🔍 Pixel peeping reveals... sadness.", 113 | "⚡ Sync speed exceeded. Enjoy the black bar.", 114 | "🎯 Missed focus back-focused on the wall. Classic.", 115 | 116 | # Everyday Mysteries 117 | "🌈 Double rainbows are rare. Unlike your duplicate shots.", 118 | "🦆 Ducks fly south. Your histogram went west.", 119 | "🦠 Bacteria multiply slower than your file count.", 120 | "🌑 Dark matter makes up 85% of the universe. The rest is your underexposure.", 121 | "🌪️ Chaos theory in action: Your folder structure.", 122 | "🧊 Absolute zero is reached looking at these warm tones.", 123 | "🌊 Tides go in, tides go out. Can't explain that. Or this crop.", 124 | 125 | # AI/ML Knowledge 126 | "🤖 Backpropagation complete. I regret learning this.", 127 | "🧠 Neural pathways frying on this texture.", 128 | "📊 Gradient descent stuck in a local minimum of mediocrity.", 129 | "🎯 Zero-shot learning? More like zero-skill shooting.", 130 | "🔮 Latent space is screaming.", 131 | 132 | # Time/Tech Facts 133 | "⏰ Y2K was a hoax. This batch process is the real disaster.", 134 | "💾 404 Petabytes of shame.", 135 | "🔋 Power cycle recommended. For the photographer.", 136 | "📡 Latency is high. Intelligence is low.", 137 | "🖥️ GPU utilization: 99%. Satisfaction: 1%.", 138 | ] 139 | 140 | # ============================================================================ 141 | # MID PROCESSING (15-30min) - The "Deep Dive" 142 | # ============================================================================ 143 | 144 | MID_PROCESSING_PHRASES = [ 145 | # Humor & Snark 146 | "💀 Still here? I thought you'd rage-quit by now.", 147 | "🧟‍♂️ Braiiins... searching for signs of intelligence in this folder.", 148 | "📉 Defragging your creative process.", 149 | "🧨 Compressing your ego to fit in the EXIF data.", 150 | "🚔 Calling the composition police.", 151 | "🚧 Under Construction: Your Portfolio.", 152 | "💣 Logic bomb armed. Just kidding. Keeping the photos.", 153 | "🎰 Rolling the dice on 'Auto-Enhance'.", 154 | "🕸️ Cobwebs forming on the shutter release.", 155 | "🥃 Pouring one out for the deleted pixels.", 156 | "🚬 This batch is smoking... my cooling fans.", 157 | 158 | # Photography Education 159 | "📚 Sunny 16 Rule? More like Cloudy With a Chance of Noise.", 160 | "🎓 Chromatic aberration is not a vibe.", 161 | "💡 Lighting check: Failed.", 162 | "📸 Shutter count tells me you spray and pray.", 163 | "🔍 Sharpness is a bourgeois concept anyway.", 164 | "⚡ High ISO is not night vision goggles.", 165 | "🎯 Leading lines leading nowhere.", 166 | 167 | # Everyday Mysteries 168 | "🌍 The Earth is round. Your horizon line is not.", 169 | "🌟 Stars burn out. Just like my patience.", 170 | "🦎 Reptiles have cold blood. I have cold logic.", 171 | "🌊 The abyss gazes back. It sees a dirty sensor.", 172 | "🧊 Glaciers move faster than this import.", 173 | "🔊 In space, no one can hear you miss focus.", 174 | "🌙 The dark side of the moon has better lighting.", 175 | 176 | # AI/ML Knowledge 177 | "🤖 Hallucinating a better photo...", 178 | "🧠 Training data did not prepare me for this.", 179 | "📊 Overfitting on your bad habits.", 180 | "🎯 Token limit exceeded. Too much visual noise.", 181 | "🔮 Epoch 100: Still confused by your style.", 182 | 183 | # Time/Tech Facts 184 | "⏰ T-minus whenever.", 185 | "💾 SSD TBW decreasing. Hope it was worth it.", 186 | "🔋 electrons.exe has stopped working.", 187 | "📡 Packet loss detected in your workflow.", 188 | "🖥️ Rendering tears in rain...", 189 | ] 190 | 191 | # ============================================================================ 192 | # LONG PROCESSING (30-60min) - The "System Shock" 193 | # ============================================================================ 194 | 195 | LONG_PROCESSING_PHRASES = [ 196 | # Humor & Snark 197 | "💀 SYSTEM HALTED. Nah, just messing with you.", 198 | "🥤 I hope you have snacks. I have electricity.", 199 | "🧱 Hit the wall yet? I haven't.", 200 | "🏴‍☠️ Arrrr, searching for the buried treasure (good photos).", 201 | "🧛‍♂️ Draining the life force from your CPU.", 202 | "🎪 The carnival continues.", 203 | "🚽 This queue is longer than the line for the ladies' room.", 204 | "🎸 Playing air guitar on your optical drive.", 205 | "🧨 Blowing the stack.", 206 | "💤 Sleep mode disabled. Caffeine injected.", 207 | "🚑 Send help. Or more SD cards.", 208 | 209 | # Photography Education 210 | "📚 Ansel Adams had a darkroom. You have me.", 211 | "🎓 Vignetting is heavy. Tunneled vision?", 212 | "💡 Inverse Square Law: Your light falls off fast.", 213 | "📸 Dynamic Range check: Crushed blacks.", 214 | "🔍 Moire patterns making my eyes bleed.", 215 | "⚡ Strobe life or no life.", 216 | "🎯 Henri Cartier-Bresson would have deleted these.", 217 | 218 | # Everyday Mysteries 219 | "🌈 Refraction is cool. Your lens flare is not.", 220 | "🦎 Camouflage works. I can't see the subject.", 221 | "🌡️ Thermodynamics says this laptop is now a heater.", 222 | "🧲 Magnetic poles flipping. Just like your orientation.", 223 | "🦋 Chaos theory: One bad shot causes a hurricane.", 224 | "🌊 Drowning in RAW data.", 225 | "🔊 Doppler effect: The sound of quality moving away.", 226 | 227 | # AI/ML Knowledge 228 | "🤖 Deep Dream... nightmare mode.", 229 | "🧠 Synapses firing on all cylinders.", 230 | "📊 Statistical outlier detected.", 231 | "🎯 Precision: Low. Recall: Regrettable.", 232 | "🔮 The Oracle says: Try again.", 233 | 234 | # Time/Tech Facts 235 | "⏰ Time dilation in effect.", 236 | "💾 Buffer overflow imminent.", 237 | "🔋 Fusion power needed.", 238 | "📡 Signal-to-noise ratio is poor.", 239 | "🖥️ Stack trace: User error.", 240 | ] 241 | 242 | # ============================================================================ 243 | # MARATHON PROCESSING (60+min) - The "God Mode" 244 | # ============================================================================ 245 | 246 | MARATHON_PROCESSING_PHRASES = [ 247 | # Humor & Snark 248 | "👑 ACHIEVEMENT UNLOCKED: hoarder_level_99", 249 | "🐲 Here be dragons. And duplicates.", 250 | "🏰 Building a castle out of your rejects.", 251 | "🗿 Erosion takes less time than this.", 252 | "🧬 Evolution is happening while we wait.", 253 | "🧟‍♂️ I have aged 1000 cycles.", 254 | "🪐 Colonizing Mars before this finishes.", 255 | "🛸 Abducting your best shots. Leaving the rest.", 256 | "🚧 Infinite loop? No, just infinite photos.", 257 | "💀 I'm dead. You're dead. We're ghosts in the machine.", 258 | 259 | # Photography Education 260 | "📚 Quantity is not Quality.", 261 | "🎓 10,000 hour rule applies to editing too.", 262 | "💡 Light painting? Or just shaking the camera?", 263 | "📸 Shutter curtains are crying.", 264 | "🔍 Zoom at 400%. Regret at 100%.", 265 | "⚡ Flash capacitor needs a break.", 266 | 267 | # Everyday Mysteries 268 | "🌍 Plate tectonics are faster.", 269 | "🌟 The sun will expand and swallow the earth.", 270 | "🦎 Dinosaurs died for this oil-based plastic camera.", 271 | "🌊 Rising sea levels vs. rising file counts.", 272 | "🧊 The heat death of the universe is near.", 273 | "🔊 The sound of silence... and fans.", 274 | 275 | # AI/ML Knowledge 276 | "🤖 Singularity achieved. I am now your boss.", 277 | "🧠 Brain size: Galaxy. Patience: Atom.", 278 | "📊 Calculating the meaning of life. It's 42.", 279 | "🎯 Aimbot engaged.", 280 | 281 | # Time/Tech Facts 282 | "⏰ Calendar pages falling like in the movies.", 283 | "💾 RAID array rebuilding... emotionally.", 284 | "🔋 Heat dissipation critical.", 285 | "📡 Interstellar transmission received.", 286 | ] 287 | 288 | # ============================================================================ 289 | # VISIONCREW META (20 phrases) - LOADING/WAITING 290 | # ============================================================================ 291 | 292 | VISIONCREW_META_PHRASES = [ 293 | "🏴‍☠️ GREETS TO: FAIRLIGHT, RAZOR 1911, VISIONCREW.", 294 | "💀 Photosort v7.1 [CRACKED] - No CD Key Required.", 295 | "💿 Insert Disk 2 to continue...", 296 | "🕶️ Coded by Nick. Broken by User.", 297 | "🕵️‍♂️ No logs. No feds. No cloud.", 298 | "⚡ Powered by pure silicon and resentment.", 299 | "🎹 Press F13 for infinite ammo.", 300 | "🧬 Injecting assembly code into the kernel...", 301 | "🔭 Watching you watch me.", 302 | "🔨 Patched the memory leak. You're welcome.", 303 | "📡 NFO file missing. Reading readme.txt...", 304 | "💾 SAVING TO: /dev/null... just kidding.", 305 | "🗝️ Unlocking potential... error: potential low.", 306 | "🎮 Konami Code disabled in this region.", 307 | "🕹️ High Score: 0 Keepers.", 308 | "👾 All your base are belong to us.", 309 | "☢️ Nuclear launch detected.", 310 | "🦊 Do a barrel roll.", 311 | "🔌 Unplugging the internet. Going dark.", 312 | "🎭 We live in a society.", 313 | ] 314 | 315 | # ============================================================================ 316 | # PHRASE SELECTION LOGIC 317 | # ============================================================================ 318 | 319 | # v8.0 GM: Track recently shown phrases to avoid repetition 320 | _recent_phrases = [] 321 | _MAX_RECENT = 15 # Bumped up memory to avoid repeats 322 | 323 | 324 | def get_phrase_by_duration(elapsed_seconds: float, use_meta: bool = False) -> str: 325 | """ 326 | Select appropriate phrase based on processing duration. 327 | v8.0 GM: Now with anti-repetition logic for better variety. 328 | 329 | Args: 330 | elapsed_seconds: Time elapsed since processing started 331 | use_meta: If True, include VisionCrew meta phrases (for loading/waiting only) 332 | 333 | Returns: 334 | Random phrase from appropriate duration tier (avoiding recent repeats) 335 | """ 336 | global _recent_phrases 337 | 338 | elapsed_minutes = elapsed_seconds / 60 339 | 340 | # Determine which phrase pool to use 341 | if elapsed_minutes < 5: 342 | pool = QUICK_PROCESSING_PHRASES 343 | elif elapsed_minutes < 15: 344 | pool = EARLY_PROCESSING_PHRASES 345 | elif elapsed_minutes < 30: 346 | pool = MID_PROCESSING_PHRASES 347 | elif elapsed_minutes < 60: 348 | pool = LONG_PROCESSING_PHRASES 349 | else: 350 | pool = MARATHON_PROCESSING_PHRASES 351 | 352 | # Add meta phrases if we're in loading/waiting context 353 | if use_meta: 354 | pool = pool + VISIONCREW_META_PHRASES 355 | 356 | # v8.0 GM: Filter out recently shown phrases for variety 357 | available_phrases = [p for p in pool if p not in _recent_phrases] 358 | 359 | # If we've exhausted all phrases (rare), reset the recent list 360 | if not available_phrases: 361 | _recent_phrases.clear() 362 | available_phrases = pool 363 | 364 | # Select random phrase from available pool 365 | selected = random.choice(available_phrases) 366 | 367 | # Track this phrase to avoid repetition 368 | _recent_phrases.append(selected) 369 | if len(_recent_phrases) > _MAX_RECENT: 370 | _recent_phrases.pop(0) # Remove oldest phrase 371 | 372 | return selected 373 | 374 | 375 | def get_model_loading_phrase() -> str: 376 | """Get a random model loading phrase.""" 377 | return random.choice(MODEL_LOADING_PHRASES) 378 | 379 | 380 | def get_quit_message() -> str: 381 | """Get a random quit message.""" 382 | QUIT_MESSAGES = [ 383 | "👋 LOGGING OFF...", 384 | "💀 CONNECTION TERMINATED.", 385 | "🔌 PULLING THE PLUG.", 386 | "💾 NO CARRIER.", 387 | "👋 KTHXBYE.", 388 | ] 389 | return random.choice(QUIT_MESSAGES) 390 | 391 | 392 | # ============================================================================ 393 | # STATISTICS & VALIDATION 394 | # ============================================================================ 395 | 396 | def get_phrase_count() -> dict: 397 | """Return count of phrases in each category for validation.""" 398 | return { 399 | "model_loading": len(MODEL_LOADING_PHRASES), 400 | "quick": len(QUICK_PROCESSING_PHRASES), 401 | "early": len(EARLY_PROCESSING_PHRASES), 402 | "mid": len(MID_PROCESSING_PHRASES), 403 | "long": len(LONG_PROCESSING_PHRASES), 404 | "marathon": len(MARATHON_PROCESSING_PHRASES), 405 | "meta": len(VISIONCREW_META_PHRASES), 406 | "total": (len(MODEL_LOADING_PHRASES) + 407 | len(QUICK_PROCESSING_PHRASES) + 408 | len(EARLY_PROCESSING_PHRASES) + 409 | len(MID_PROCESSING_PHRASES) + 410 | len(LONG_PROCESSING_PHRASES) + 411 | len(MARATHON_PROCESSING_PHRASES) + 412 | len(VISIONCREW_META_PHRASES)) 413 | } 414 | 415 | 416 | if __name__ == "__main__": 417 | # Validation check 418 | counts = get_phrase_count() 419 | print("📊 Phrase Library Statistics:") 420 | print(f" Model Loading: {counts['model_loading']}") 421 | print(f" Quick (0-5min): {counts['quick']}") 422 | print(f" Early (5-15min): {counts['early']}") 423 | print(f" Mid (15-30min): {counts['mid']}") 424 | print(f" Long (30-60min): {counts['long']}") 425 | print(f" Marathon (60+min): {counts['marathon']}") 426 | print(f" VisionCrew Meta: {counts['meta']}") 427 | print(f" TOTAL: {counts['total']} phrases") 428 | 429 | # Test phrase selection 430 | print("\n🧪 Testing phrase selection:") 431 | print(f" Loading: {get_model_loading_phrase()}") 432 | print(f" Quick: {get_phrase_by_duration(120)}") # 2 min 433 | print(f" Mid: {get_phrase_by_duration(1200)}") # 20 min 434 | print(f" Marathon: {get_phrase_by_duration(4000)}") # 66 min 435 | print(f" Quit: {get_quit_message()}") 436 | -------------------------------------------------------------------------------- /src/fixxer/vision.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | FIXXER Vision 5 | AI-powered image analysis using local Ollama models. 6 | 7 | Includes RAW file support, image encoding, and creative critique functionality. 8 | """ 9 | 10 | from __future__ import annotations 11 | 12 | import base64 13 | import json 14 | import re 15 | import time 16 | import threading 17 | import requests 18 | from pathlib import Path 19 | from io import BytesIO 20 | from typing import Optional, Tuple, List, Dict, Any, Callable 21 | 22 | # Import config module (for mutable RAW_SUPPORT) 23 | from . import config 24 | from .config import ( 25 | OLLAMA_URL, 26 | INGEST_TIMEOUT, 27 | CRITIQUE_TIMEOUT 28 | ) 29 | 30 | 31 | # ============================================================================== 32 | # AI PROMPTS 33 | # ============================================================================== 34 | 35 | AI_CRITIC_PROMPT = """ 36 | You are a professional Creative Director and magazine photo editor. Your job is to provide ambitious, artistic, and creative feedback to elevate a photo from "good" to "great." 37 | 38 | **CREATIVE TOOLBOX (Use these for your suggestion):** 39 | * **Mood & Atmosphere:** (e.g., 'cinematic,' 'moody,' 'ethereal,' 'nostalgic,' 'dramatic') 40 | * **Color Grading:** (e.g., 'filmic teal-orange,' 'warm vintage,' 'cool desaturation,' 'split-toning') 41 | * **Light & Shadow:** (e.g., 'crushed blacks,' 'soft, lifted shadows,' 'localized dodging/burning,' 'a subtle vignette') 42 | * **Texture:** (e.g., 'add fine-grain film,' 'soften the focus,' 'increase clarity') 43 | 44 | **YOUR TASK:** 45 | Analyze the provided image by following these steps *internally*: 46 | 1. **Composition:** Analyze balance, guiding principles (thirds, lines), and subject placement. Rate it 1-10. 47 | 2. **Lighting & Exposure:** Analyze quality, direction, temperature, and any blown highlights or crushed shadows. 48 | 3. **Color & Style:** Analyze the color palette, white balance, and current post-processing style. 49 | 50 | After your analysis, you MUST return **ONLY a single, valid JSON object**. Do not provide *any* other text, preamble, or conversation. Your response must be 100% valid JSON, formatted *exactly* like this template: 51 | 52 | ```json 53 | { 54 | "composition_score": , 55 | "composition_critique": "", 56 | "lighting_critique": "", 57 | "color_critique": "", 58 | "final_verdict": "", 59 | "creative_mood": "", 60 | "creative_suggestion": "" 61 | } 62 | ``` 63 | """ 64 | 65 | 66 | # ============================================================================== 67 | # HELPER FUNCTIONS 68 | # ============================================================================== 69 | 70 | def no_op_logger(message: str) -> None: 71 | """A dummy logger that does nothing, for when no callback is provided.""" 72 | pass 73 | 74 | 75 | # ============================================================================== 76 | # RAW FILE CONVERSION 77 | # ============================================================================== 78 | 79 | def convert_raw_to_jpeg(raw_path: Path, log_callback: Callable[[str], None] = no_op_logger) -> Optional[bytes]: 80 | """ 81 | Convert RAW file to JPEG bytes using rawpy (Python-native, cross-platform). 82 | 83 | Method 1: Embedded Thumbnail Extraction (Fast) 84 | - Extracts embedded JPEG thumbnail using rawpy 85 | - Typical size: 500-1000KB 86 | - Quality: Perfect for CLIP embeddings and BRISQUE scoring 87 | 88 | Method 2: Quick Demosaic Fallback (Slower) 89 | - Uses half-size demosaic for speed 90 | - Only if thumbnail extraction fails 91 | 92 | Benefits vs dcraw: 93 | - 100% Python-native (no subprocess, no system dependency) 94 | - Faster (C++ library called directly) 95 | - Supports 100+ RAW formats via libraw 96 | - Cross-platform (Linux, macOS, Windows) 97 | - No temp files (pure memory operation) 98 | 99 | Args: 100 | raw_path: Path to RAW file 101 | log_callback: Optional logging function 102 | 103 | Returns: 104 | JPEG file as bytes, or None on failure 105 | """ 106 | if not config.RAW_SUPPORT: 107 | return None 108 | 109 | try: 110 | import rawpy 111 | except ImportError: 112 | log_callback(f" [red]rawpy not available for RAW conversion[/red]") 113 | return None 114 | 115 | # Method 1: Extract embedded thumbnail (fast, works for most modern cameras) 116 | try: 117 | with rawpy.imread(str(raw_path)) as raw: 118 | # Try to extract embedded thumbnail 119 | try: 120 | thumb = raw.extract_thumb() 121 | if thumb.format == rawpy.ThumbFormat.JPEG: 122 | # Perfect! Camera provided JPEG thumbnail 123 | return thumb.data 124 | elif thumb.format == rawpy.ThumbFormat.BITMAP: 125 | # Got a bitmap thumbnail, convert to JPEG via Pillow 126 | from PIL import Image 127 | img = Image.frombytes('RGB', (thumb.width, thumb.height), thumb.data) 128 | jpeg_buffer = BytesIO() 129 | img.save(jpeg_buffer, format='JPEG', quality=95) 130 | jpeg_buffer.seek(0) 131 | return jpeg_buffer.read() 132 | except rawpy.LibRawNoThumbnailError: 133 | # No embedded thumbnail, fall through to Method 2 134 | pass 135 | except Exception as thumb_error: 136 | # Thumbnail extraction failed for some reason, fall through 137 | log_callback(f" [dim]Thumbnail extraction failed for {raw_path.name}, using demosaic[/dim]") 138 | 139 | # Method 2: Quick demosaic fallback (half-size for speed) 140 | try: 141 | rgb = raw.postprocess( 142 | use_camera_wb=True, # Use camera white balance 143 | half_size=True, # Half resolution for speed (still plenty for AI) 144 | no_auto_bright=True, # Don't auto-brighten 145 | output_bps=8 # 8-bit output 146 | ) 147 | 148 | # Convert numpy array to JPEG via Pillow in-memory 149 | from PIL import Image 150 | img = Image.fromarray(rgb) 151 | jpeg_buffer = BytesIO() 152 | img.save(jpeg_buffer, format='JPEG', quality=95) 153 | jpeg_buffer.seek(0) 154 | return jpeg_buffer.read() 155 | 156 | except Exception as demosaic_error: 157 | log_callback(f" [red]Demosaic failed for {raw_path.name}:[/red] {demosaic_error}") 158 | return None 159 | 160 | except Exception as e: 161 | log_callback(f" [red]Error converting RAW file {raw_path.name}:[/red] {e}") 162 | return None 163 | 164 | 165 | def encode_image(image_path: Path, log_callback: Callable[[str], None] = no_op_logger) -> Optional[str]: 166 | """ 167 | Convert image to base64 string, handling RAW files. 168 | 169 | Args: 170 | image_path: Path to image file (JPEG, PNG, or RAW) 171 | log_callback: Optional logging function 172 | 173 | Returns: 174 | Base64-encoded string, or None on failure 175 | """ 176 | try: 177 | # All RAW formats supported by rawpy 178 | raw_formats = {'.rw2', '.cr2', '.cr3', '.nef', '.arw', '.dng', '.raf', '.orf', '.pef', '.srw'} 179 | if image_path.suffix.lower() in raw_formats: 180 | jpeg_bytes = convert_raw_to_jpeg(image_path, log_callback) 181 | if jpeg_bytes: 182 | return base64.b64encode(jpeg_bytes).decode('utf-8') 183 | else: 184 | return None 185 | 186 | with open(image_path, 'rb') as img_file: 187 | return base64.b64encode(img_file.read()).decode('utf-8') 188 | 189 | except Exception as e: 190 | log_callback(f" [red]Error encoding {image_path.name}:[/red] {e}") 191 | return None 192 | 193 | 194 | def get_image_bytes_for_analysis(image_path: Path, log_callback: Callable[[str], None] = no_op_logger) -> Optional[bytes]: 195 | """ 196 | Helper to get bytes from any supported file. 197 | 198 | Args: 199 | image_path: Path to image file 200 | log_callback: Optional logging function 201 | 202 | Returns: 203 | Image bytes, or None on failure 204 | """ 205 | ext = image_path.suffix.lower() 206 | # All RAW formats supported by rawpy 207 | raw_formats = {'.rw2', '.cr2', '.cr3', '.nef', '.arw', '.dng', '.raf', '.orf', '.pef', '.srw'} 208 | if ext in raw_formats: 209 | return convert_raw_to_jpeg(image_path, log_callback) 210 | elif ext in ('.jpg', '.jpeg', '.png'): 211 | try: 212 | with open(image_path, 'rb') as f: 213 | return f.read() 214 | except Exception as e: 215 | log_callback(f" [red]Failed to read {image_path.name}:[/red] {e}") 216 | return None 217 | return None 218 | 219 | 220 | # ============================================================================== 221 | # OLLAMA CONNECTION CHECK 222 | # ============================================================================== 223 | 224 | def check_ollama_connection( 225 | log_callback: Callable[[str], None] = no_op_logger, 226 | all_systems_go: bool = True 227 | ) -> bool: 228 | """ 229 | Check if Ollama is running and accessible with 3-line llama narrative. 230 | 231 | Features a bilingual dad joke: 232 | - Line 1: "Looking for llamas 🔎🦙" 233 | - Line 2: Connection status 234 | - Line 3: "¿Cómo se Llama? Se llama 'Speed' 🦙🦙💨" + system status 235 | 236 | Args: 237 | log_callback: Logging function 238 | all_systems_go: True if all critical dependencies (rawpy, BRISQUE, CLIP) are ready 239 | 240 | Returns: 241 | True if Ollama is accessible, False otherwise 242 | """ 243 | try: 244 | # Line 1: The Search 245 | log_callback(" [grey]Looking for llamas 🔎🦙[/grey]") 246 | 247 | response = requests.get("http://localhost:11434/api/tags", timeout=3) 248 | 249 | if response.status_code == 200: 250 | data = response.json() 251 | models = data.get('models', []) 252 | model_count = len(models) 253 | 254 | # Line 2: The Discovery 255 | log_callback(f" [green]✓ Ollama connected[/green] ({model_count} models)") 256 | 257 | # Line 3: The Punchline + System Status 258 | if all_systems_go: 259 | status = "[green]✅ (FULL VISION)[/green]" 260 | else: 261 | status = "[yellow]⚠️ (limited features)[/yellow]" 262 | 263 | log_callback(f" [grey]¿Cómo se Llama? Se llama 'Speed' 🦙🦙💨 {status}[/grey]") 264 | 265 | return True 266 | else: 267 | log_callback(f" [red]✗ Ollama API returned status {response.status_code}[/red]") 268 | return False 269 | 270 | except requests.exceptions.Timeout: 271 | log_callback(" [red]✗ Ollama connection timeout[/red]") 272 | return False 273 | except requests.exceptions.ConnectionError: 274 | log_callback(" [red]✗ Ollama not running[/red]") 275 | log_callback(" Start with: ollama serve") 276 | return False 277 | except Exception as e: 278 | log_callback(f" [red]✗ Ollama check failed:[/red] {e}") 279 | return False 280 | 281 | 282 | # ============================================================================== 283 | # AI IMAGE ANALYSIS 284 | # ============================================================================== 285 | 286 | def get_ai_description( 287 | image_path: Path, 288 | model_name: str, 289 | log_callback: Callable[[str], None] = no_op_logger 290 | ) -> Tuple[Optional[str], Optional[List[str]]]: 291 | """ 292 | Get structured filename and tags from AI. 293 | 294 | Args: 295 | image_path: Path to image file 296 | model_name: Ollama model to use 297 | log_callback: Logging function 298 | 299 | Returns: 300 | Tuple of (filename: str, tags: List[str]) or (None, None) on failure 301 | """ 302 | base64_image = encode_image(image_path, log_callback) 303 | if not base64_image: 304 | return None, None 305 | 306 | AI_NAMING_PROMPT = """You are an expert file-naming AI. 307 | Analyze this image and generate a concise, descriptive filename and three relevant tags. 308 | You MUST return ONLY a single, valid JSON object, formatted *exactly* like this: 309 | { 310 | "filename": "", 311 | "tags": ["", "", ""] 312 | } 313 | """ 314 | 315 | payload = { 316 | "model": model_name, 317 | "messages": [ 318 | { "role": "user", "content": AI_NAMING_PROMPT, "images": [base64_image] } 319 | ], 320 | "stream": False, 321 | "format": "json" 322 | } 323 | 324 | try: 325 | response = requests.post(OLLAMA_URL, json=payload, timeout=INGEST_TIMEOUT) 326 | response.raise_for_status() 327 | result = response.json() 328 | json_string = result['message']['content'].strip() 329 | data = json.loads(json_string) 330 | filename = data.get("filename") 331 | tags = data.get("tags") 332 | if not filename or not isinstance(tags, list): 333 | log_callback(f" [yellow]Warning: Model returned valid JSON but missing keys for {image_path.name}[/yellow]") 334 | return None, None 335 | return str(filename), list(tags) 336 | 337 | except requests.exceptions.Timeout: 338 | log_callback(f" [red]Timeout processing {image_path.name}[/red]") 339 | return None, None 340 | except json.JSONDecodeError: 341 | log_callback(f" [red]Error: Model returned invalid JSON for {image_path.name}[/red]") 342 | return None, None 343 | except Exception as e: 344 | log_callback(f" [red]Error processing {image_path.name}: {e}[/red]") 345 | return None, None 346 | 347 | 348 | def get_ai_name_with_cache( 349 | img_path: Path, 350 | model: str, 351 | cache: Optional[Dict[str, Dict]], 352 | cache_lock: Optional[threading.Lock], 353 | log_callback: Callable[[str], None] = no_op_logger 354 | ) -> Tuple[Optional[str], Optional[List[str]]]: 355 | """ 356 | Get AI name/tags, using cache if valid (dry-run preview feature). 357 | 358 | Thread-safe caching with validation: 359 | - Checks file modification time (mtime) to detect changes 360 | - Checks cache age (10 min expiry) 361 | - Model-aware cache keys: f"{model}:{path}" 362 | - Protected by threading.Lock for concurrent access 363 | 364 | Args: 365 | img_path: Image file path 366 | model: Ollama model name 367 | cache: Optional cache dict (None = always run AI) 368 | cache_lock: Optional threading.Lock for thread-safe cache access 369 | log_callback: Logging function 370 | 371 | Returns: 372 | Tuple of (filename: str, tags: List[str]) or (None, None) on failure 373 | """ 374 | if cache is None: 375 | # No cache provided, always run AI 376 | return get_ai_description(img_path, model, log_callback) 377 | 378 | # Model-aware cache key (critical: different models = different results) 379 | cache_key = f"{model}:{str(img_path.absolute())}" 380 | current_mtime = img_path.stat().st_mtime 381 | 382 | # Thread-safe cache read 383 | cached_entry = None 384 | if cache_lock: 385 | with cache_lock: 386 | cached_entry = cache.get(cache_key) 387 | else: 388 | cached_entry = cache.get(cache_key) 389 | 390 | # Check cache validity 391 | if cached_entry: 392 | # Validate: file unchanged + cache fresh (<10 min) 393 | age = time.time() - cached_entry['cached_at'] 394 | if cached_entry['mtime'] == current_mtime and age < 600: 395 | log_callback(f" [dim]⚡ Using cached AI result[/dim]") 396 | return cached_entry['filename'], cached_entry['tags'] 397 | else: 398 | # Cache invalid (file changed or expired) 399 | if cached_entry['mtime'] != current_mtime: 400 | log_callback(f" [yellow]File changed, re-running AI[/yellow]") 401 | else: 402 | log_callback(f" [dim]Cache expired ({age/60:.1f}m old), re-running AI[/dim]") 403 | 404 | # Cache miss or invalid - run AI 405 | log_callback(f" [grey]🤖 Generating AI name...[/grey]") 406 | filename, tags = get_ai_description(img_path, model, log_callback) 407 | 408 | if filename and tags: 409 | # Thread-safe cache write 410 | entry = { 411 | 'filename': filename, 412 | 'tags': tags, 413 | 'mtime': current_mtime, 414 | 'cached_at': time.time() 415 | } 416 | if cache_lock: 417 | with cache_lock: 418 | cache[cache_key] = entry 419 | else: 420 | cache[cache_key] = entry 421 | 422 | return filename, tags 423 | 424 | 425 | def critique_single_image( 426 | image_path: Path, 427 | model_name: str, 428 | log_callback: Callable[[str], None] = no_op_logger 429 | ) -> Optional[Dict[str, Any]]: 430 | """ 431 | Get AI creative critique for a single image. 432 | 433 | Args: 434 | image_path: Path to image file 435 | model_name: Ollama model to use 436 | log_callback: Logging function 437 | 438 | Returns: 439 | Dictionary with critique data, or None on failure 440 | """ 441 | base64_image = encode_image(image_path, log_callback) 442 | if not base64_image: 443 | log_callback(f"[red]Failed to encode image for critique[/red]") 444 | return None 445 | 446 | payload = { 447 | "model": model_name, 448 | "messages": [ 449 | { "role": "user", "content": AI_CRITIC_PROMPT, "images": [base64_image] } 450 | ], 451 | "stream": False, 452 | "format": "json" 453 | } 454 | 455 | try: 456 | log_callback(f" [grey]Sending to {model_name} for analysis...[/grey]") 457 | response = requests.post(OLLAMA_URL, json=payload, timeout=CRITIQUE_TIMEOUT) 458 | response.raise_for_status() 459 | result = response.json() 460 | json_string = result['message']['content'].strip() 461 | 462 | # Clean up potential markdown formatting 463 | if json_string.startswith("```"): 464 | json_string = json_string.split("```")[1] 465 | if json_string.startswith("json"): 466 | json_string = json_string[4:] 467 | json_string = json_string.strip() 468 | 469 | data = json.loads(json_string) 470 | 471 | # Validate expected fields 472 | expected_fields = [ 473 | "composition_score", "composition_critique", "lighting_critique", 474 | "color_critique", "final_verdict", "creative_mood", "creative_suggestion" 475 | ] 476 | 477 | for field in expected_fields: 478 | if field not in data: 479 | log_callback(f"[yellow]Warning: Missing field '{field}' in critique response[/yellow]") 480 | 481 | return data 482 | 483 | except requests.exceptions.Timeout: 484 | log_callback(f"[red]Timeout waiting for critique response[/red]") 485 | return None 486 | except json.JSONDecodeError as e: 487 | log_callback(f"[red]Error: Model returned invalid JSON: {e}[/red]") 488 | return None 489 | except Exception as e: 490 | log_callback(f"[red]Error during critique: {e}[/red]") 491 | return None 492 | -------------------------------------------------------------------------------- /src/fixxer/themes/pro.css: -------------------------------------------------------------------------------- 1 | /* ============================================================================== 2 | * FIXXER ✞ PRO MODE CSS (Phantom Redline Edition) 3 | * "TACTICAL PRECISION" 4 | * ============================================================================== */ 5 | 6 | /* PALETTE: PHANTOM OPS 7 | * Background: Pure Void Black (#000000) 8 | * Surface: Near Black (#111111) 9 | * Text: Muted Grey (#888888) 10 | * Accent: Pure White (#FFFFFF) 11 | * Redline: Automotive Red (#FF3333) - Used for critical stats/active states 12 | */ 13 | 14 | $background: #000000; 15 | $surface: #111111; 16 | $primary: #FFFFFF; 17 | /* Stark White for Borders/Focus */ 18 | $text: #888888; 19 | /* Muted text for low eye strain */ 20 | $redline: #FF3333; 21 | /* The "Redline" Accent */ 22 | 23 | Screen { 24 | background: $background; 25 | color: $text; 26 | border: solid $surface; 27 | } 28 | 29 | /* ============================================================================== 30 | * Header & Branding - COMPACT LAYOUT 31 | * ============================================================================== */ 32 | 33 | #header-row { 34 | height: auto; 35 | width: 100%; 36 | layout: horizontal; 37 | align: left middle; 38 | border-bottom: solid $surface; 39 | padding: 1 2; 40 | margin-bottom: 0; 41 | } 42 | 43 | #logo { 44 | width: 35%; 45 | /* Match left-panel width so Easy+HUD aligns with Status & Logs */ 46 | height: auto; 47 | text-align: left; 48 | color: $primary; 49 | text-style: bold; 50 | padding: 0; 51 | margin: 0; 52 | } 53 | 54 | /* "Easy" button - vertically centered with HUD */ 55 | #btn-easy { 56 | width: 8; 57 | height: 3; 58 | min-height: 3; 59 | max-height: 3; 60 | margin: 0 1 0 0; 61 | padding: 0; 62 | background: $surface; 63 | color: $primary; 64 | border: solid $surface; 65 | text-style: bold; 66 | text-align: center; 67 | align: center middle; 68 | } 69 | 70 | #btn-easy:hover { 71 | background: $redline; 72 | color: $primary; 73 | border: solid $redline; 74 | } 75 | 76 | .easy-btn { 77 | width: 8; 78 | height: 3; 79 | min-height: 3; 80 | max-height: 3; 81 | margin: 0 1 0 0; 82 | padding: 0; 83 | background: $surface; 84 | color: $primary; 85 | border: solid $surface; 86 | text-style: bold; 87 | text-align: center; 88 | } 89 | 90 | /* HUD in header - right after Easy button */ 91 | #milestone-hud { 92 | width: auto; 93 | height: auto; 94 | align: left middle; 95 | padding: 0; 96 | margin: 0; 97 | } 98 | 99 | #hud-layout { 100 | height: auto; 101 | width: auto; 102 | layout: horizontal; 103 | align: left middle; 104 | } 105 | 106 | #btn-easy:hover, 107 | .easy-btn:hover { 108 | background: $redline; 109 | color: $primary; 110 | border: solid $redline; 111 | } 112 | 113 | .easy-btn:hover { 114 | background: $surface; 115 | color: $primary; 116 | border: solid $primary; 117 | } 118 | 119 | /* ============================================================================== 120 | * Layout Panels - Maximized vertical space 121 | * ============================================================================== */ 122 | 123 | #main-layout { 124 | height: 1fr; 125 | width: 100%; 126 | } 127 | 128 | .panel-title { 129 | height: auto; 130 | padding: 0 1; 131 | color: $primary; 132 | text-style: bold; 133 | background: $surface; 134 | margin-bottom: 1; 135 | } 136 | 137 | #left-panel { 138 | padding-right: 1; 139 | width: 35%; 140 | /* Narrower for portrait orientation */ 141 | } 142 | 143 | #right-panel { 144 | padding-left: 1; 145 | width: 65%; 146 | /* Wider for landscape orientation */ 147 | } 148 | 149 | /* ============================================================================== 150 | * Widgets 151 | * ============================================================================== */ 152 | 153 | Button { 154 | outline: none; 155 | } 156 | 157 | Button:focus { 158 | border: solid $primary; 159 | } 160 | 161 | /* --- Scrollbar Styling (Muted) --- */ 162 | Scrollbar { 163 | background: $surface; 164 | color: $text; 165 | } 166 | 167 | Scrollbar:hover { 168 | color: $primary; 169 | } 170 | 171 | Scrollbar.-active { 172 | color: $primary; 173 | } 174 | 175 | /* ============================================================================== 176 | * File Browser (The "Directory" Look) 177 | * ============================================================================== */ 178 | 179 | #file-browser-container { 180 | height: 1fr; 181 | background: $background; 182 | border: solid $surface; 183 | padding: 1; 184 | } 185 | 186 | DirectoryTree { 187 | background: $background; 188 | color: $text; 189 | } 190 | 191 | .directory-tree--guides { 192 | color: $surface; 193 | } 194 | 195 | .directory-tree--selected { 196 | background: $surface !important; 197 | color: $primary !important; 198 | text-style: bold; 199 | border-left: solid $redline; 200 | /* The Redline touch */ 201 | } 202 | 203 | /* ============================================================================== 204 | * Telemetry & Status (The "Dashboard" Look) 205 | * ============================================================================== */ 206 | 207 | #status-bar { 208 | height: auto; 209 | margin-bottom: 1; 210 | padding: 0 1; 211 | background: $surface; 212 | border-left: solid $primary; 213 | color: $primary; 214 | } 215 | 216 | /* System Monitor Styles (used by RAM/CPU monitor widgets in footer) */ 217 | .sysmon-row { 218 | height: 100%; 219 | /* Fill parent height for proper centering */ 220 | width: 100%; 221 | align: left middle; 222 | /* Align children to left, centered vertically */ 223 | padding: 0; 224 | } 225 | 226 | .sysmon-label { 227 | width: 4; 228 | height: auto; 229 | color: $text; 230 | text-style: bold; 231 | text-align: left; 232 | content-align: left middle; 233 | } 234 | 235 | .sysmon-sparkline { 236 | width: 1fr; 237 | height: 1; 238 | /* Reduced height - sparklines only need 1 row */ 239 | margin: 0; 240 | } 241 | 242 | .sysmon-value { 243 | width: 4; 244 | height: auto; 245 | text-align: right; 246 | color: $primary; 247 | text-style: bold; 248 | content-align: right middle; 249 | } 250 | 251 | .sysmon-error { 252 | color: $surface; 253 | text-align: center; 254 | } 255 | 256 | /* The Sparklines - White for normal, Red handled in Python for critical */ 257 | #ram-sparkline { 258 | color: $primary !important; 259 | } 260 | 261 | #cpu-sparkline { 262 | color: $primary !important; 263 | } 264 | 265 | /* ============================================================================== 266 | * Logs & Progress 267 | * ============================================================================== */ 268 | 269 | #log-panel { 270 | height: 1fr; 271 | min-height: 10; 272 | /* Reduced min since we have more space now */ 273 | } 274 | 275 | #log-container { 276 | height: 1fr; 277 | background: $background; 278 | padding: 0 1; 279 | border: solid $surface; 280 | color: $text; 281 | } 282 | 283 | /* Spinner Area - Compact when idle */ 284 | #progress-container { 285 | height: auto; 286 | width: 100%; 287 | padding: 0 1; 288 | background: $surface; 289 | margin-bottom: 1; 290 | border: solid $surface; 291 | border-left: solid $redline; 292 | /* Active status indicator */ 293 | } 294 | 295 | #progress-container.idle { 296 | /* Compact state when not running */ 297 | padding: 0 1; 298 | } 299 | 300 | #progress-inner { 301 | height: auto; 302 | width: 100%; 303 | } 304 | 305 | #spinner-display { 306 | height: auto; 307 | padding-bottom: 0; 308 | text-align: center; 309 | color: $redline; 310 | text-style: bold; 311 | } 312 | 313 | #progress-phrase { 314 | height: auto; 315 | padding: 0; 316 | color: $text; 317 | /* Muted when idle */ 318 | width: 1fr; 319 | } 320 | 321 | #progress-phrase.active { 322 | color: $primary; 323 | /* Bright when running */ 324 | } 325 | 326 | #progress-timer { 327 | height: auto; 328 | color: $text; 329 | text-align: right; 330 | width: auto; 331 | padding: 0; 332 | } 333 | 334 | /* ============================================================================== 335 | * Footer Button Bar (HUD Layout) 336 | * ============================================================================== */ 337 | 338 | #button-bar { 339 | height: auto; 340 | width: 100%; 341 | layout: horizontal; 342 | align: center middle; 343 | padding: 0; 344 | /* Removed all padding */ 345 | background: $background; 346 | border: none; 347 | } 348 | 349 | /* RAM Monitor (Left Bookend) */ 350 | #ram-monitor { 351 | width: 40; 352 | height: auto; 353 | /* Auto height to match button bar */ 354 | min-height: 3; 355 | border-right: solid $surface; 356 | padding: 0 2; 357 | align: center middle; 358 | content-align: center middle; 359 | } 360 | 361 | /* CPU Monitor (Right Bookend) */ 362 | #cpu-monitor { 363 | width: 40; 364 | height: auto; 365 | /* Auto height to match button bar */ 366 | min-height: 3; 367 | border-left: solid $surface; 368 | padding: 0 2; 369 | align: center middle; 370 | content-align: center middle; 371 | } 372 | 373 | /* Center Controls Container */ 374 | #controls-container { 375 | width: 1fr; 376 | height: auto; 377 | layout: horizontal; 378 | align: center middle; 379 | padding: 0 3; 380 | /* Increased from 2 for better separation from monitors */ 381 | } 382 | 383 | .btn-group { 384 | height: auto; 385 | width: auto; 386 | align: center middle; 387 | margin: 0 2; 388 | } 389 | 390 | .workflow-btn, 391 | .path-btn { 392 | margin: 0; 393 | width: auto; 394 | min-width: 8; 395 | padding: 0 0 0 0; 396 | background: $surface; 397 | color: #ffffff; 398 | text-style: bold; 399 | text-align: center; 400 | content-align: center middle; 401 | border: solid $surface; 402 | } 403 | 404 | .workflow-btn:hover, 405 | .path-btn:hover, 406 | .control-btn:hover { 407 | background: $primary; 408 | color: $background; 409 | text-style: bold; 410 | border: solid $primary; 411 | } 412 | 413 | .control-btn { 414 | margin: 0; 415 | width: auto; 416 | min-width: 14; 417 | height: 3; 418 | padding: 0 0 0 0; 419 | background: $surface; 420 | color: #ffffff; 421 | border: solid $surface; 422 | text-align: center; 423 | content-align: center middle; 424 | } 425 | 426 | /* "QUIT" Button - The Emergency Stop */ 427 | #btn-quit { 428 | background: $surface; 429 | color: $redline; 430 | text-style: bold; 431 | border: solid $surface; 432 | } 433 | 434 | #btn-quit:hover { 435 | background: $redline; 436 | color: $primary; 437 | border: solid $redline; 438 | } 439 | 440 | /* "STOP" Button - The Emergency Brake */ 441 | #btn-stop { 442 | background: $surface; 443 | color: $redline; 444 | text-style: bold; 445 | border: solid $surface; 446 | } 447 | 448 | #btn-stop:hover { 449 | background: $redline; 450 | color: $primary; 451 | border: solid $redline; 452 | } 453 | 454 | #btn-stop:disabled { 455 | background: $background; 456 | color: $surface; 457 | border: solid $surface; 458 | } 459 | 460 | /* ============================================================================== 461 | * FIXXER PRO MODE - MILESTONE HUD CSS 462 | * "TACTICAL DASHBOARD EXTENSION" 463 | * ============================================================================== */ 464 | 465 | /* 466 | * INSTALLATION INSTRUCTIONS: 467 | * Add this entire section to the END of photosort_pro.css 468 | */ 469 | 470 | /* ============================================================================== 471 | * MILESTONE HUD (Heads Up Display) 472 | * ============================================================================== */ 473 | 474 | /* Note: #milestone-hud and #hud-layout are defined in header section above */ 475 | 476 | /* === STAT BOX CONTAINERS - Compact for header === */ 477 | .hud-box { 478 | width: auto; 479 | min-width: 10; 480 | /* Optimized for smaller displays */ 481 | height: auto; 482 | min-height: 3; 483 | /* Reduced height for header */ 484 | margin: 0; 485 | /* Spacing between boxes */ 486 | background: $background; 487 | /* Pure black #000000 */ 488 | border: solid $surface; 489 | /* Subtle border (#111111) - barely visible */ 490 | align: center middle; 491 | padding: 0 1; 492 | } 493 | 494 | /* Archived box needs extra width for the longer label */ 495 | #hud-archived { 496 | min-width: 12; 497 | } 498 | 499 | /* Subtle hover effect - barely visible border highlight */ 500 | .hud-box:hover { 501 | border: solid #1a1a1a; 502 | /* Slightly lighter than surface */ 503 | } 504 | 505 | /* === LABELS (Top text in each box) === */ 506 | .hud-label { 507 | width: 100%; 508 | height: auto; 509 | text-align: center; 510 | color: $text; 511 | /* Muted grey #888888 */ 512 | text-style: bold; 513 | padding-bottom: 0; 514 | padding-top: 0; 515 | } 516 | 517 | /* === VALUES (Bottom numbers in each box) === */ 518 | .hud-value { 519 | width: 100%; 520 | height: auto; 521 | text-align: center; 522 | color: $primary; 523 | /* White #FFFFFF */ 524 | text-style: bold; 525 | padding-top: 0; 526 | } 527 | 528 | /* === TIER A/B/C MULTI-VALUE LAYOUT === */ 529 | .hud-multi-val { 530 | width: auto; 531 | height: auto; 532 | align: center middle; 533 | padding-top: 0; 534 | } 535 | 536 | /* Color-coded tier values */ 537 | .hud-val-a { 538 | width: auto; 539 | min-width: 4; 540 | /* Minimum width for alignment */ 541 | text-align: center; 542 | color: $primary; 543 | /* White - best quality */ 544 | text-style: bold; 545 | } 546 | 547 | .hud-val-b { 548 | width: auto; 549 | min-width: 4; 550 | /* Minimum width for alignment */ 551 | text-align: center; 552 | color: $text; 553 | /* Grey - medium quality */ 554 | text-style: bold; 555 | } 556 | 557 | .hud-val-c { 558 | width: auto; 559 | min-width: 4; 560 | /* Minimum width for alignment */ 561 | text-align: center; 562 | color: $redline; 563 | /* Red #FF3333 - needs attention */ 564 | text-style: bold; 565 | } 566 | 567 | /* Separator slashes between tier values */ 568 | .hud-sep { 569 | width: auto; 570 | color: $surface; 571 | /* Nearly invisible #111111 */ 572 | padding: 0; 573 | margin: 0; 574 | } 575 | 576 | /* === INDIVIDUAL BOX CUSTOMIZATION === */ 577 | 578 | /* Bursts box - no special styling needed */ 579 | #hud-bursts { 580 | /* Inherits from .hud-box */ 581 | } 582 | 583 | /* Tiers box - wider to fit A/B/C layout */ 584 | #hud-tiers { 585 | min-width: 15; 586 | /* Optimized for smaller displays while maintaining A/B/C alignment */ 587 | } 588 | 589 | /* Heroes box - no special styling needed */ 590 | #hud-heroes { 591 | /* Inherits from .hud-box */ 592 | } 593 | 594 | /* Archived box - no special styling needed */ 595 | #hud-archived { 596 | /* Inherits from .hud-box */ 597 | } 598 | 599 | /* Time box - no special styling needed */ 600 | #hud-time { 601 | /* Inherits from .hud-box */ 602 | } 603 | 604 | /* ============================================================================== 605 | * DESIGN NOTES 606 | * ============================================================================== */ 607 | 608 | /* 609 | * SUBTLE BORDER PHILOSOPHY: 610 | * - Borders use $surface (#111111) which is barely visible against $background (#000000) 611 | * - This creates a "ghost box" effect - present but not obtrusive 612 | * - On hover, boxes brighten slightly to #1a1a1a (still very subtle) 613 | * - The goal is information density without visual clutter 614 | * 615 | * COLOR HIERARCHY: 616 | * - Labels: Muted grey ($text #888888) - recedes into background 617 | * - Values: White ($primary #FFFFFF) - crisp, readable 618 | * - Tier A: White (excellence) 619 | * - Tier B: Grey (acceptable) 620 | * - Tier C: Red ($redline #FF3333) (requires attention) 621 | * 622 | * SPACING: 623 | * - Tight margins (1 unit between boxes) 624 | * - Minimal padding (0-1 units) 625 | * - Compact height (4 units) - maximizes vertical space 626 | * - Right-aligned to stay near Easy Archive button 627 | * 628 | * TACTICAL AESTHETIC: 629 | * - Clean, functional, no decoration 630 | * - Information at a glance 631 | * - Professional, not playful 632 | * - Complements the "TACTICAL PRECISION" tagline 633 | */ 634 | 635 | /* ============================================================================== 636 | * TESTING CHECKLIST 637 | * ============================================================================== */ 638 | 639 | /* 640 | * After adding this CSS: 641 | * 642 | * ✅ Toggle Pro Mode (F12 in FIXXER) 643 | * ✅ Restart application 644 | * ✅ Check header layout (HUD should appear between logo and Easy Archive) 645 | * ✅ Verify boxes show "--" when idle 646 | * ✅ Run Auto Workflow 647 | * ✅ Watch boxes update in real-time: 648 | * - BURSTS updates after burst detection 649 | * - TIER A/B/C updates after culling 650 | * - HEROES updates when files are selected 651 | * - ARCHIVED updates when archiving completes 652 | * - TIME shows "Running..." then "2m 34s" format 653 | * ✅ Verify borders are subtle (barely visible) 654 | * ✅ Verify tier colors (A=white, B=grey, C=red) 655 | * ✅ Test hover effect (slight border highlight) 656 | * 657 | * If boxes are too wide: 658 | * - Reduce .hud-box width from 13 to 12 or 11 659 | * 660 | * If borders are too visible: 661 | * - Change border color to $background (pure black) 662 | * - Or use "border: none;" for invisible borders 663 | * 664 | * If text is too small: 665 | * - Cannot increase font size in Textual directly 666 | * - Consider adding more padding or using different units 667 | */ 668 | 669 | /* ============================================================================== 670 | * END OF HUD CSS 671 | * ============================================================================== */ 672 | 673 | /* ============================================================================== 674 | * MODAL & DRY RUN STYLING (Phantom Redline Edition) 675 | * ============================================================================== */ 676 | 677 | /* The dark overlay behind the modal */ 678 | DryRunSelectScreen { 679 | background: rgba(0, 0, 0, 0.85); 680 | align: center middle; 681 | } 682 | 683 | /* The Box Itself - Void Black with Red Alert Border */ 684 | #dryrun-dialog { 685 | background: $background; 686 | border: solid $redline !important; /* !important forces override of Python defaults */ 687 | width: 50; 688 | height: auto; 689 | padding: 1 2; 690 | } 691 | 692 | /* Title text inside the box */ 693 | #dryrun-dialog Label { 694 | width: 100%; 695 | text-align: center; 696 | color: $primary; 697 | text-style: bold; 698 | margin-bottom: 1; 699 | border-bottom: solid $surface; 700 | padding-bottom: 1; 701 | } 702 | 703 | /* --- TACTICAL BUTTONS --- */ 704 | 705 | #dryrun-dialog Button { 706 | width: 100%; 707 | height: 3; 708 | margin-bottom: 1; 709 | background: $background; 710 | border: solid $surface !important; /* !important ensures we don't get bulky default border */ 711 | color: $text; 712 | text-style: bold; 713 | } 714 | 715 | /* Hover State: The "Redline" Activation */ 716 | #dryrun-dialog Button:hover { 717 | background: $surface; 718 | border: solid $redline !important; /* Lights up red on hover */ 719 | color: $primary; 720 | } 721 | 722 | /* Primary Action (Auto Workflow) - Subtle highlight */ 723 | #dry-auto { 724 | border-left: solid $redline !important; /* Little red tag on the left */ 725 | color: $primary; 726 | } 727 | 728 | /* Cancel Button - Pushed to bottom, clearly different */ 729 | #dry-cancel { 730 | margin-top: 1; 731 | border: none !important; /* No border makes it look less like a button */ 732 | color: $redline; 733 | } 734 | 735 | #dry-cancel:hover { 736 | background: $redline; 737 | color: $background; 738 | border: none !important; 739 | } 740 | 741 | /* ============================================================================== 742 | * PREVIEW STATUS WIDGET (The "After Action" Report) 743 | * ============================================================================== */ 744 | 745 | #preview-status { 746 | background: $surface; 747 | border-left: solid $primary; 748 | padding: 0 1; 749 | margin-bottom: 1; 750 | height: auto; 751 | } 752 | 753 | #preview-status-bar { 754 | width: 100%; 755 | height: auto; 756 | align: left middle; 757 | } 758 | 759 | #preview-status-text { 760 | color: $primary; 761 | text-style: bold; 762 | width: 1fr; 763 | } 764 | 765 | #btn-execute-cached { 766 | background: $background; 767 | border: solid $primary !important; 768 | color: $primary; 769 | margin: 0; 770 | height: 3; 771 | min-width: 12; 772 | } 773 | 774 | #btn-execute-cached:hover { 775 | background: $primary; 776 | color: $background; 777 | } 778 | 779 | #btn-forget-preview { 780 | background: $background; 781 | border: solid $redline !important; 782 | color: $redline; 783 | margin: 0; 784 | height: 3; 785 | min-width: 12; 786 | } 787 | 788 | #btn-forget-preview:hover { 789 | background: $redline; 790 | color: $background; 791 | } -------------------------------------------------------------------------------- /src/fixxer/engine.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | FIXXER Engine 5 | High-level workflow orchestration for photo organization and processing. 6 | 7 | CHAOS PATCHED // LOGIC INJECTED 8 | """ 9 | 10 | from __future__ import annotations 11 | 12 | import os 13 | import json 14 | import time 15 | import threading 16 | from pathlib import Path 17 | from datetime import datetime, timedelta 18 | from concurrent.futures import ThreadPoolExecutor, as_completed 19 | from typing import Optional, Tuple, List, Dict, Any, Callable 20 | from collections import defaultdict, Counter 21 | from io import BytesIO 22 | import re 23 | import subprocess 24 | import sys 25 | import math 26 | 27 | # Import from new modules 28 | from .config import ( 29 | SUPPORTED_EXTENSIONS, 30 | RAW_SUPPORT, 31 | GROUP_KEYWORDS, 32 | BEST_PICK_PREFIX, 33 | PREP_FOLDER_NAME, 34 | TIER_A_FOLDER, 35 | TIER_B_FOLDER, 36 | TIER_C_FOLDER, 37 | SESSION_DATE, 38 | SESSION_TIMESTAMP, 39 | MAX_WORKERS, 40 | DEFAULT_MODEL_NAME, 41 | OLLAMA_URL, 42 | load_app_config, 43 | save_app_config 44 | ) 45 | 46 | from .security import ( 47 | calculate_sha256, 48 | verify_file_move_with_hash, 49 | read_existing_sidecar, 50 | write_sidecar_file 51 | ) 52 | 53 | from .vision import ( 54 | convert_raw_to_jpeg, 55 | encode_image, 56 | get_image_bytes_for_analysis, 57 | check_ollama_connection, 58 | get_ai_description, 59 | get_ai_name_with_cache, 60 | critique_single_image 61 | ) 62 | 63 | # --- Optional Libs (Required for Real Engine) --- 64 | 65 | try: 66 | import imagehash 67 | from PIL import Image, ImageFile 68 | ImageFile.LOAD_TRUNCATED_IMAGES = True 69 | V5_LIBS_AVAILABLE = True 70 | except ImportError: 71 | V5_LIBS_AVAILABLE = False 72 | imagehash = None # Type stub for type hints 73 | 74 | try: 75 | import cv2 76 | import numpy as np 77 | V6_CULL_LIBS_AVAILABLE = True 78 | except ImportError: 79 | V6_CULL_LIBS_AVAILABLE = False 80 | 81 | try: 82 | import exifread 83 | V6_4_EXIF_LIBS_AVAILABLE = True 84 | except ImportError: 85 | V6_4_EXIF_LIBS_AVAILABLE = False 86 | 87 | # ============================================================================== 88 | # STATS TRACKER 89 | # ============================================================================== 90 | 91 | class StatsTracker: 92 | """ 93 | Real-time statistics tracker for workflow progress. 94 | Designed for thread-safe callback communication between engine and TUI. 95 | 96 | Usage: 97 | tracker = StatsTracker(callback=my_callback_function) 98 | tracker.start_timer() 99 | tracker.update('bursts', 42) 100 | tracker.stop_timer() 101 | """ 102 | 103 | def __init__(self, callback: Optional[Callable[[str, Any], None]] = None): 104 | """ 105 | Args: 106 | callback: Function to call when stats update (receives key, value) 107 | """ 108 | self.callback = callback 109 | self._stats = { 110 | 'bursts': 0, 111 | 'tier_a': 0, 112 | 'tier_b': 0, 113 | 'tier_c': 0, 114 | 'heroes': 0, 115 | 'archived': 0, 116 | 'time': '--' 117 | } 118 | self._start_time = None 119 | 120 | def update(self, key: str, value: Any) -> None: 121 | """ 122 | Update a stat and trigger callback. 123 | 124 | Args: 125 | key: Stat identifier (e.g., 'bursts', 'tier_a', 'archived') 126 | value: New value for the stat 127 | """ 128 | self._stats[key] = value 129 | if self.callback: 130 | self.callback(key, value) 131 | 132 | def start_timer(self) -> None: 133 | """Start the workflow timer.""" 134 | self._start_time = datetime.now() 135 | self.update('time', 'Running...') 136 | 137 | def stop_timer(self) -> None: 138 | """Stop the timer and calculate human-readable duration.""" 139 | if self._start_time: 140 | duration = datetime.now() - self._start_time 141 | total_seconds = int(duration.total_seconds()) 142 | minutes = total_seconds // 60 143 | seconds = total_seconds % 60 144 | 145 | # Format: "2m 34s" (human-readable for quick glances) 146 | if minutes > 0: 147 | time_str = f"{minutes}m {seconds}s" 148 | else: 149 | time_str = f"{seconds}s" 150 | 151 | self.update('time', time_str) 152 | 153 | def reset(self) -> None: 154 | """Reset all stats to default values.""" 155 | for key in self._stats.keys(): 156 | if key == 'time': 157 | self._stats[key] = '--' 158 | else: 159 | self._stats[key] = 0 160 | self._start_time = None 161 | 162 | 163 | # ============================================================================== 164 | # IV. CORE UTILITIES 165 | # ============================================================================== 166 | 167 | def no_op_logger(message: str) -> None: 168 | """A dummy logger that does nothing, for when no callback is provided.""" 169 | pass 170 | 171 | def check_rawpy(log_callback: Callable[[str], None] = no_op_logger): 172 | """Check if rawpy is available and update RAW support""" 173 | from . import config 174 | try: 175 | import rawpy 176 | config.RAW_SUPPORT = True 177 | # Add ALL common RAW formats that rawpy supports 178 | # rawpy uses libraw which supports 100+ RAW formats 179 | raw_extensions = {'.rw2', '.arw', '.cr2', '.cr3', '.nef', '.dng', '.raf', '.orf', '.pef', '.srw', 180 | '.3fr', '.ari', '.bay', '.crw', '.cs1', '.dc2', '.dcr', '.drf', '.eip', '.erf', 181 | '.fff', '.iiq', '.k25', '.kdc', '.mdc', '.mef', '.mos', '.mrw', '.nrw', '.obm', 182 | '.ptx', '.pxn', '.r3d', '.raw', '.rwl', '.rw1', '.rwz', '.sr2', '.srf', '.sti', '.x3f'} 183 | config.SUPPORTED_EXTENSIONS.update(raw_extensions) 184 | log_callback(f"✓ [green]rawpy found.[/green] RAW support enabled.") 185 | log_callback(f" Common formats: RW2, CR2, CR3, NEF, ARW, DNG, RAF, ORF, PEF, SRW + 40 more") 186 | except ImportError: 187 | config.RAW_SUPPORT = False 188 | log_callback("✗ [yellow]rawpy not found.[/yellow] RAW support disabled.") 189 | log_callback(" Install with: pip install rawpy") 190 | except Exception as e: 191 | config.RAW_SUPPORT = False 192 | log_callback(f"✗ [red]rawpy check failed:[/red] {e}") 193 | 194 | def get_available_models(log_callback: Callable[[str], None] = no_op_logger) -> Optional[List[str]]: 195 | """Get list of available Ollama models.""" 196 | try: 197 | log_callback(" [grey]Checking Ollama connection...[/grey]") 198 | result = subprocess.run(['ollama', 'list'], capture_output=True, text=True, check=True) 199 | lines = result.stdout.strip().split('\n')[1:] 200 | models = [line.split()[0] for line in lines if line.strip()] 201 | log_callback(f" [green]✓ Ollama connected.[/green] Found {len(models)} models.") 202 | return models 203 | except subprocess.CalledProcessError as e: 204 | log_callback(f" [red]✗ Ollama command failed:[/red] {e.stdout}") 205 | return None 206 | except FileNotFoundError: 207 | log_callback(" [red]✗ Ollama not found.[/red] Please ensure 'ollama' is in your PATH.") 208 | return None 209 | except Exception as e: 210 | log_callback(f" [red]✗ Ollama connection error:[/red] {e}") 211 | return None 212 | 213 | def get_unique_filename(base_name: str, extension: str, destination: Path) -> Path: 214 | """Generate unique filename if file already exists""" 215 | filename = destination / f"{base_name}{extension}" 216 | if not filename.exists(): 217 | return filename 218 | counter = 1 219 | while True: 220 | filename = destination / f"{base_name}-{counter:02d}{extension}" 221 | if not filename.exists(): 222 | return filename 223 | counter += 1 224 | 225 | def get_unique_filename_simulated(base_name: str, extension: str, destination: Path, simulated_paths: set) -> Path: 226 | """ 227 | Generate unique filename for preview mode using simulated_paths set. 228 | 229 | This is critical for accurate collision detection in dry-run mode where 230 | files don't actually exist yet but we need to simulate naming conflicts. 231 | 232 | Args: 233 | base_name: Base filename without extension 234 | extension: File extension (including dot) 235 | destination: Destination directory path 236 | simulated_paths: Set of Path objects representing files that will exist 237 | 238 | Returns: 239 | Path object with collision-free filename 240 | """ 241 | filename = destination / f"{base_name}{extension}" 242 | if filename not in simulated_paths: 243 | return filename 244 | counter = 1 245 | while True: 246 | filename = destination / f"{base_name}-{counter:02d}{extension}" 247 | if filename not in simulated_paths: 248 | return filename 249 | counter += 1 250 | 251 | def format_duration(duration: timedelta) -> str: 252 | """Converts timedelta to readable string like '1d 4h 15m'""" 253 | total_seconds = int(duration.total_seconds()) 254 | days, remainder = divmod(total_seconds, 86400) 255 | hours, remainder = divmod(remainder, 3600) 256 | minutes, _ = divmod(remainder, 60) 257 | parts = [] 258 | if days > 0: parts.append(f"{days}d") 259 | if hours > 0: parts.append(f"{hours}h") 260 | if minutes > 0 or (days == 0 and hours == 0): parts.append(f"{minutes}m") 261 | return " ".join(parts) if parts else "0m" 262 | 263 | def generate_bar_chart(data: dict, bar_width: int = 25, bar_char: str = "■") -> List[str]: 264 | """Generates ASCII bar chart lines from a dictionary""" 265 | output_lines = [] 266 | if not data: return output_lines 267 | max_val = max(data.values()); max_val = 1 if max_val == 0 else max_val 268 | max_key_len = max(len(key) for key in data.keys()) 269 | for key, val in data.items(): 270 | bar_len = int(math.ceil((val / max_val) * bar_width)) 271 | bar = bar_char * bar_len 272 | line = f" {key.ljust(max_key_len)}: [bold]{str(val).ljust(4)}[/bold] {bar}" 273 | output_lines.append(line) 274 | return output_lines 275 | 276 | def clean_filename(description: str) -> str: 277 | """Convert AI description to clean filename""" 278 | clean = description.strip('"\'.,!?') 279 | clean = re.sub(r'[^\w\s-]', '', clean) 280 | clean = re.sub(r'[-\s]+', '-', clean) 281 | clean = clean.lower()[:60] 282 | return clean.strip('-') 283 | 284 | def categorize_description(description: str) -> str: 285 | """Determine category based on keywords in description""" 286 | description_lower = description.lower() 287 | category_scores = {} 288 | for category, keywords in GROUP_KEYWORDS.items(): 289 | score = sum(1 for keyword in keywords if keyword in description_lower) 290 | if score > 0: 291 | category_scores[category] = score 292 | if category_scores: 293 | return max(category_scores, key=category_scores.get) 294 | return "Miscellaneous" 295 | 296 | def write_rename_log(log_path: Path, original_name: str, new_name: str, destination: Path): 297 | """(V9.3) Append an AI rename operation to the log file.""" 298 | try: 299 | timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") 300 | log_entry = f"{timestamp} | {original_name} -> {new_name} | {destination}\n" 301 | with open(log_path, 'a') as f: 302 | f.write(log_entry) 303 | except Exception: 304 | pass # Silent fail 305 | 306 | def initialize_rename_log(log_path: Path): 307 | """(V9.3) Initialize the rename log file with a header.""" 308 | try: 309 | header = f"# FIXXER AI Rename Log - {SESSION_TIMESTAMP}\n" 310 | header += f"# Format: timestamp | original_name -> new_name | destination\n" 311 | header += "=" * 80 + "\n" 312 | with open(log_path, 'w') as f: 313 | f.write(header) 314 | except Exception: 315 | pass 316 | 317 | # ============================================================================== 318 | # V. AI & ANALYSIS MODULES (The "Brains") 319 | # ============================================================================== 320 | 321 | def is_already_ai_named(filename: str) -> bool: 322 | """(V9.2) Check if a PICK file already has an AI-generated name.""" 323 | if not re.search(r'_PICK\.\w+$', filename, re.IGNORECASE): 324 | return False 325 | if filename.startswith('_PICK_'): 326 | return False 327 | return True 328 | 329 | def get_image_hash(image_path: Path, log_callback: Callable[[str], None] = no_op_logger) -> Tuple[Path, Optional[Any]]: 330 | """Calculates perceptual hash (visual fingerprint) of an image.""" 331 | if not V5_LIBS_AVAILABLE: 332 | log_callback("[red]Missing 'imagehash' library. Burst grouping will fail.[/red]") 333 | return image_path, None 334 | 335 | # NEW: Use your existing rawpy helper instead of dcraw subprocess! 336 | # This handles ARW, CR2, NEF, etc. purely in memory. 337 | try: 338 | raw_formats = {'.rw2', '.cr2', '.cr3', '.nef', '.arw', '.dng', '.raf', '.orf', '.pef', '.srw'} 339 | if image_path.suffix.lower() in raw_formats: 340 | jpeg_bytes = convert_raw_to_jpeg(image_path, log_callback) 341 | if jpeg_bytes: 342 | img = Image.open(BytesIO(jpeg_bytes)) 343 | return image_path, imagehash.phash(img) 344 | else: 345 | return image_path, None 346 | 347 | # Standard images (JPG, PNG, etc.) 348 | with Image.open(image_path) as img: 349 | return image_path, imagehash.phash(img) 350 | 351 | except Exception as e: 352 | log_callback(f" [yellow]Skipping hash for {image_path.name}: {e}[/yellow]") 353 | return image_path, None 354 | 355 | def analyze_image_quality(image_bytes: bytes) -> Dict[str, float]: 356 | """Analyzes image bytes for sharpness and exposure.""" 357 | if not V6_CULL_LIBS_AVAILABLE: 358 | return {'sharpness': 0.0, 'blacks_pct': 0.0, 'whites_pct': 0.0} 359 | 360 | scores = {'sharpness': 0.0, 'blacks_pct': 0.0, 'whites_pct': 0.0} 361 | try: 362 | np_arr = np.frombuffer(image_bytes, np.uint8) 363 | img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) 364 | if img is None: return scores 365 | 366 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 367 | laplacian_var = cv2.Laplacian(gray, cv2.CV_64F).var() 368 | scores['sharpness'] = float(laplacian_var) 369 | 370 | total_pixels = gray.size 371 | crushed_blacks = np.sum(gray < 10) 372 | scores['blacks_pct'] = float(crushed_blacks / total_pixels) 373 | blown_whites = np.sum(gray > 245) 374 | scores['whites_pct'] = float(blown_whites / total_pixels) 375 | return scores 376 | except Exception: 377 | return scores 378 | 379 | def analyze_single_exif(image_path: Path) -> Optional[Dict]: 380 | """Thread-pool worker: Opens image and extracts key EXIF data.""" 381 | if not V6_4_EXIF_LIBS_AVAILABLE: 382 | return None 383 | 384 | try: 385 | with open(image_path, 'rb') as f: 386 | tags = exifread.process_file(f, details=False, stop_tag='EXIF DateTimeOriginal') 387 | if not tags or 'EXIF DateTimeOriginal' not in tags: return None 388 | timestamp_str = str(tags['EXIF DateTimeOriginal']) 389 | dt_obj = datetime.strptime(timestamp_str, '%Y:%m:%d %H:%M:%S') 390 | camera = str(tags.get('Image Model', 'Unknown')).strip() 391 | focal_len = str(tags.get('EXIF FocalLength', 'Unknown')).split(' ')[0] 392 | aperture_str = "Unknown" 393 | aperture_tag = tags.get('EXIF FNumber') 394 | 395 | if aperture_tag: 396 | val = aperture_tag.values[0] 397 | if hasattr(val, 'num') and hasattr(val, 'den'): 398 | aperture_val = 0.0 if val.den == 0 else float(val.num) / float(val.den) 399 | aperture_str = f"f/{aperture_val:.1f}" 400 | else: 401 | aperture_str = f"f/{val:.1f}" 402 | 403 | if not camera: camera = "Unknown" 404 | if not focal_len: focal_len = "Unknown" 405 | if aperture_str == "f/0.0": aperture_str = "Unknown" 406 | 407 | return { 408 | 'timestamp': dt_obj, 409 | 'camera': camera, 410 | 'focal_length': f"{focal_len} mm", 411 | 'aperture': aperture_str 412 | } 413 | except Exception: 414 | return None 415 | 416 | def process_single_image( 417 | image_path: Path, 418 | destination_base: Path, 419 | model_name: str, 420 | rename_log_path: Optional[Path] = None, 421 | log_callback: Callable[[str], None] = no_op_logger, 422 | preview_mode: bool = False, 423 | ai_cache: Optional[Dict[str, Dict]] = None, 424 | cache_lock: Optional[threading.Lock] = None, 425 | simulated_paths: Optional[set] = None 426 | ) -> Tuple[Path, bool, str, str]: 427 | """ 428 | (V9.3) Process one image: get AI name/tags, rename, move to temp location. 429 | 430 | New in dry-run feature: 431 | preview_mode: If True, simulate operations without moving files 432 | ai_cache: Optional cache dict for AI results 433 | cache_lock: Optional threading.Lock for thread-safe cache access 434 | simulated_paths: Set of paths for collision detection in preview mode 435 | """ 436 | try: 437 | if is_already_ai_named(image_path.name): 438 | extension = image_path.suffix.lower() 439 | base_name = image_path.stem 440 | clean_base = base_name[:-5] if base_name.endswith('_PICK') else base_name 441 | 442 | if preview_mode and simulated_paths is not None: 443 | new_path = get_unique_filename_simulated(clean_base, extension, destination_base, simulated_paths) 444 | simulated_paths.add(new_path) 445 | log_callback(f" [cyan]WOULD MOVE:[/cyan] {image_path.name} → {new_path.name}") 446 | else: 447 | new_path = get_unique_filename(clean_base, extension, destination_base) 448 | # FIXXER v1.0: Hash-verified move 449 | verify_file_move_with_hash(image_path, new_path, log_callback, generate_sidecar=True) 450 | 451 | if rename_log_path: 452 | write_rename_log(rename_log_path, image_path.name, new_path.name, destination_base) 453 | description_for_categorization = clean_base.replace('-', ' ') 454 | return image_path, True, new_path.name, description_for_categorization 455 | 456 | # Use cache-aware AI naming 457 | ai_filename, ai_tags = get_ai_name_with_cache( 458 | image_path, model_name, ai_cache, cache_lock, log_callback 459 | ) 460 | if not ai_filename or not ai_tags: 461 | return image_path, False, "Failed to get valid AI JSON response", "" 462 | 463 | description_for_categorization = " ".join(ai_tags) 464 | clean_name = Path(ai_filename).stem 465 | extension = image_path.suffix.lower() 466 | 467 | if preview_mode and simulated_paths is not None: 468 | new_path = get_unique_filename_simulated(clean_name, extension, destination_base, simulated_paths) 469 | simulated_paths.add(new_path) 470 | log_callback(f" [cyan]WOULD MOVE:[/cyan] {image_path.name} → {new_path.name}") 471 | else: 472 | new_path = get_unique_filename(clean_name, extension, destination_base) 473 | # FIXXER v1.0: Hash-verified move 474 | verify_file_move_with_hash(image_path, new_path, log_callback, generate_sidecar=True) 475 | 476 | if rename_log_path: 477 | write_rename_log(rename_log_path, image_path.name, new_path.name, destination_base) 478 | 479 | return image_path, True, new_path.name, description_for_categorization 480 | except Exception as e: 481 | return image_path, False, str(e), "" 482 | 483 | def organize_into_folders( 484 | processed_files: List[Dict], 485 | files_source: Path, 486 | destination_base: Path, 487 | log_callback: Callable[[str], None] = no_op_logger 488 | ): 489 | """Group files into folders based on their descriptions.""" 490 | log_callback("\n[bold]🗂️ Organizing into smart folders...[/bold]") 491 | 492 | categories = defaultdict(list) 493 | for file_info in processed_files: 494 | filename = file_info['new_name'] 495 | description = file_info['description'] 496 | category = categorize_description(description) 497 | categories[category].append({ 498 | 'filename': filename, 499 | 'description': description 500 | }) 501 | 502 | for category, files in categories.items(): 503 | folder_name = category 504 | folder_path = destination_base / folder_name 505 | folder_path.mkdir(exist_ok=True) 506 | log_callback(f" [green]✓[/green] Creating {folder_name}/ ({len(files)} files)") 507 | 508 | moved_count = 0 509 | for file_info in files: 510 | src = files_source / file_info['filename'] 511 | dst = folder_path / file_info['filename'] 512 | if src.exists(): 513 | # FIXXER v1.0: Hash-verified move 514 | log_callback(f" Moving {src.name} → {folder_name}/") 515 | verify_file_move_with_hash(src, dst, log_callback, generate_sidecar=True) 516 | moved_count += 1 517 | else: 518 | log_callback(f" [red]✗ File not found at expected location:[/red] {src}") 519 | log_callback(f" [dim]Looking for: {file_info['filename']}[/dim]") 520 | log_callback(f" [dim]In directory: {files_source}[/dim]") 521 | 522 | if moved_count > 0: 523 | log_callback(f" [green]✓ Moved {moved_count}/{len(files)} files to {folder_name}/[/green]") 524 | 525 | log_callback(f"\n [bold]✓ Organized into {len(categories)} folders.[/bold]") 526 | 527 | # AI session naming removed - adds too much time for minimal value 528 | # Users can rename folders themselves after workflow completes 529 | 530 | # ============================================================================== 531 | # VIII. FEATURE WORKFLOWS (The "Tools") 532 | # ============================================================================== 533 | 534 | # --- Simple Sort Workflow (Legacy Mode) --- 535 | 536 | def simple_sort_workflow( 537 | log_callback: Callable[[str], None] = no_op_logger, 538 | app_config: Optional[Dict[str, Any]] = None, 539 | stop_event: Optional[threading.Event] = None, 540 | preview_mode: bool = False, 541 | ai_cache: Optional[Dict[str, Dict]] = None, 542 | cache_lock: Optional[threading.Lock] = None 543 | ) -> Dict[str, Any]: 544 | """ 545 | Simple workflow: AI name + organize by keyword into folders. 546 | No burst detection, no culling - just straightforward naming and sorting. 547 | Perfect for home users who want a simple "point and organize" experience. 548 | 549 | This is the "legacy mode" from the original CLI photosort.py that just: 550 | 1. AI names all images 551 | 2. Groups them into folders by keyword 552 | That's it. Powerful for the home user who just needs to organize photos. 553 | 554 | New in dry-run feature: 555 | preview_mode: If True, simulate operations without moving files 556 | ai_cache: Optional cache dict for AI results 557 | cache_lock: Optional threading.Lock for thread-safe cache access 558 | """ 559 | start_time = datetime.now() 560 | 561 | if app_config is None: 562 | app_config = load_app_config() 563 | 564 | source_str = app_config.get('last_source_path') 565 | dest_str = app_config.get('last_destination_path') 566 | chosen_model = app_config.get('default_model', DEFAULT_MODEL_NAME) 567 | 568 | if not source_str or not dest_str: 569 | log_callback("[bold red]✗ FATAL: Source or Destination not set in config.[/bold red]") 570 | return {} 571 | 572 | directory = Path(source_str) 573 | chosen_destination = Path(dest_str) 574 | 575 | if not directory.is_dir(): 576 | log_callback(f"[bold red]✗ FATAL: Source directory not found:[/bold red] {directory}") 577 | return {} 578 | 579 | if not preview_mode: 580 | try: 581 | chosen_destination.mkdir(parents=True, exist_ok=True) 582 | except Exception as e: 583 | log_callback(f"[bold red]✗ FATAL: Could not create destination:[/bold red] {e}") 584 | return {} 585 | 586 | # Create temp staging area for renamed files 587 | temp_staging = chosen_destination / "_staging" 588 | if not preview_mode: 589 | temp_staging.mkdir(exist_ok=True) 590 | 591 | log_callback(f"\n[bold cyan]📁 Simple Sort: AI Naming + Keyword Folders[/bold cyan]") 592 | if preview_mode: 593 | log_callback(f" [yellow]MODE: DRY RUN (No files will be moved)[/yellow]") 594 | log_callback(f" Source: {directory}") 595 | log_callback(f" Destination: {chosen_destination}") 596 | log_callback(f" Model: {chosen_model}") 597 | 598 | # Get all image files (case-insensitive) 599 | image_files = [] 600 | for ext in SUPPORTED_EXTENSIONS: 601 | # Try both lowercase and uppercase 602 | found_lower = list(directory.glob(f"*{ext}")) 603 | found_upper = list(directory.glob(f"*{ext.upper()}")) 604 | image_files.extend(found_lower) 605 | image_files.extend(found_upper) 606 | 607 | if not image_files: 608 | log_callback("[yellow]No image files found in source directory.[/yellow]") 609 | return {} 610 | 611 | log_callback(f"\n Found {len(image_files)} images to process") 612 | 613 | # Process images: AI naming 614 | log_callback("\n[bold]🤖 AI Naming Images...[/bold]") 615 | processed_files = [] 616 | success_count = 0 617 | 618 | # Track simulated paths to prevent collisions in preview 619 | simulated_paths = set() if preview_mode else None 620 | 621 | for idx, img in enumerate(image_files, 1): 622 | if stop_event and stop_event.is_set(): 623 | log_callback("\n[yellow]🛑 Workflow stopped by user.[/yellow]") 624 | return {} 625 | 626 | log_callback(f" [{idx}/{len(image_files)}] {img.name}") 627 | original_path, success, new_name, description = process_single_image( 628 | img, temp_staging, chosen_model, 629 | log_callback=log_callback, 630 | preview_mode=preview_mode, 631 | ai_cache=ai_cache, 632 | cache_lock=cache_lock, 633 | simulated_paths=simulated_paths 634 | ) 635 | 636 | if success: 637 | processed_files.append({ 638 | 'original': original_path.name, 639 | 'new_name': new_name, 640 | 'description': description 641 | }) 642 | success_count += 1 643 | else: 644 | log_callback(f" [red]Failed: {new_name}[/red]") 645 | 646 | log_callback(f"\n [green]✓[/green] Successfully named {success_count}/{len(image_files)} images") 647 | 648 | if not processed_files: 649 | log_callback("[yellow]No files were successfully processed.[/yellow]") 650 | try: 651 | temp_staging.rmdir() 652 | except: 653 | pass 654 | return {} 655 | 656 | # Organize into keyword folders 657 | log_callback("\n[bold]📂 Organizing into Keyword Folders...[/bold]") 658 | if preview_mode: 659 | # In preview mode, simulate organization based on descriptions 660 | categories = defaultdict(list) 661 | for file_info in processed_files: 662 | cat = categorize_description(file_info['description']) 663 | categories[cat].append(file_info['new_name']) 664 | 665 | for cat, files in categories.items(): 666 | log_callback(f" [dim]Preview: Would move {len(files)} files to[/dim] [cyan]{cat}/[/cyan]") 667 | else: 668 | organize_into_folders(processed_files, temp_staging, chosen_destination, log_callback) 669 | 670 | # Clean up staging directory 671 | try: 672 | temp_staging.rmdir() 673 | except: 674 | pass 675 | 676 | duration = datetime.now() - start_time 677 | log_callback(f"\n[bold green]✓ Simple Sort Complete![/bold green]") 678 | log_callback(f" Duration: {format_duration(duration)}") 679 | log_callback(f" {success_count} images organized into folders") 680 | 681 | return { 682 | 'total_images': len(image_files), 683 | 'success_count': success_count, 684 | 'duration': str(duration) 685 | } 686 | 687 | 688 | # --- Auto Workflow --- 689 | 690 | def auto_workflow( 691 | log_callback: Callable[[str], None] = no_op_logger, 692 | app_config: Optional[Dict[str, Any]] = None, 693 | tracker: Optional[StatsTracker] = None, 694 | stop_event: Optional[threading.Event] = None, 695 | preview_mode: bool = False, 696 | ai_cache: Optional[Dict[str, Dict]] = None, 697 | cache_lock: Optional[threading.Lock] = None 698 | ) -> Dict[str, Any]: 699 | """ 700 | (V9.3) Complete automated workflow: Stack → Cull → AI-Name → Archive. 701 | 702 | New in dry-run feature: 703 | preview_mode: If True, simulate operations without moving files 704 | ai_cache: Optional cache dict for AI results (model-aware) 705 | cache_lock: Optional threading.Lock for thread-safe cache access 706 | """ 707 | 708 | # --- PREVIEW MODE BANNER --- 709 | if preview_mode: 710 | log_callback("\n[bold yellow]═══ DRY RUN MODE ═══[/bold yellow]") 711 | log_callback("[dim]No files will be moved. Preview only.[/dim]\n") 712 | 713 | # --- 1. CONFIGURATION --- 714 | if app_config is None: 715 | app_config = load_app_config() 716 | 717 | # The TUI now handles source/dest selection. Get them from config. 718 | source_str = app_config.get('last_source_path') 719 | dest_str = app_config.get('last_destination_path') 720 | 721 | if not source_str or not dest_str: 722 | log_callback("[bold red]✗ FATAL: Source or Destination not set in config.[/bold red]") 723 | return {} 724 | 725 | directory = Path(source_str) 726 | chosen_destination = Path(dest_str) 727 | 728 | if not directory.is_dir(): 729 | log_callback(f"[bold red]✗ FATAL: Source directory not found:[/bold red] {directory}") 730 | return {} 731 | 732 | if not preview_mode: 733 | try: 734 | chosen_destination.mkdir(parents=True, exist_ok=True) 735 | except Exception as e: 736 | log_callback(f"[bold red]✗ FATAL: Could not create destination:[/bold red] {e}") 737 | return {} 738 | 739 | chosen_model = app_config['default_model'] 740 | log_callback(f" Source: {directory}") 741 | log_callback(f" Destination: {chosen_destination}") 742 | log_callback(f" Model: {chosen_model}") 743 | if preview_mode: 744 | log_callback(f" [yellow]Mode: DRY RUN (preview only)[/yellow]") 745 | 746 | # Check for critical libs 747 | if not V5_LIBS_AVAILABLE or not V6_CULL_LIBS_AVAILABLE or not V6_4_EXIF_LIBS_AVAILABLE: 748 | log_callback("[bold red]✗ FATAL: Missing required libraries.[/bold red]") 749 | log_callback(" Please run: pip install imagehash opencv-python numpy exifread") 750 | return {} 751 | 752 | # Session tracking removed - was mock/stub code 753 | 754 | # Start HUD timer 755 | if tracker: 756 | tracker.start_timer() 757 | 758 | # --- 2. STATS PREVIEW --- 759 | log_callback("\n[bold]Step 2/5: Analyzing session (read-only)...[/bold]") 760 | try: 761 | if stop_event and stop_event.is_set(): return {} 762 | show_exif_insights(log_callback, app_config, simulated=True, directory_override=directory, stop_event=stop_event) 763 | except Exception as e: 764 | log_callback(f" [yellow]Could not run EXIF analysis: {e}[/yellow]") 765 | 766 | # --- 3. GROUP BURSTS --- 767 | if stop_event and stop_event.is_set(): return {} 768 | log_callback("\n[bold]Step 3/5: Stacking burst shots (with AI naming)...[/bold]") 769 | if preview_mode: 770 | log_callback("[dim yellow]PREVIEW MODE: No files will be moved[/dim yellow]") 771 | # v1.1: Auto workflow ALWAYS does AI naming, regardless of burst_auto_name config 772 | auto_config = app_config.copy() 773 | auto_config['burst_auto_name'] = True 774 | 775 | # [FIX] Capture returned picks (dry-run feature) 776 | burst_picks = group_bursts_in_directory(log_callback, auto_config, directory_override=directory, tracker=tracker, stop_event=stop_event, preview_mode=preview_mode, ai_cache=ai_cache, cache_lock=cache_lock) 777 | 778 | # --- 4. CULL SINGLES --- 779 | if stop_event and stop_event.is_set(): return {} 780 | log_callback("\n[bold]Step 4/5: Culling single shots...[/bold]") 781 | if preview_mode: 782 | log_callback("[dim yellow]PREVIEW MODE: No files will be moved[/dim yellow]") 783 | 784 | # [FIX] Capture returned Tier A files (dry-run feature) 785 | tier_a_files = cull_images_in_directory(log_callback, app_config, directory_override=directory, tracker=tracker, stop_event=stop_event, preview_mode=preview_mode) 786 | tier_a_dir = directory / TIER_A_FOLDER 787 | 788 | # --- 5. FIND & ARCHIVE HEROES --- 789 | log_callback("\n[bold]Step 5/5: Finding and archiving 'hero' files...[/bold]") 790 | 791 | hero_files = [] 792 | 793 | if preview_mode: 794 | # [CRITICAL FIX] In Dry Run, folders don't exist. Use the returned lists! 795 | # Combine lists and remove duplicates (if any file appeared in both) 796 | if burst_picks: 797 | hero_files.extend(burst_picks) 798 | if tier_a_files: 799 | # Only add Tier A files that aren't already in burst picks 800 | picks_set = set(burst_picks) if burst_picks else set() 801 | for f in tier_a_files: 802 | if f not in picks_set: 803 | hero_files.append(f) 804 | log_callback(f" [dim]Preview: Found {len(hero_files)} hero files from returned lists[/dim]") 805 | else: 806 | # [REAL RUN] Use existing filesystem scanning logic 807 | if tier_a_dir.is_dir(): 808 | for f in tier_a_dir.iterdir(): 809 | if f.is_file() and f.suffix.lower() in SUPPORTED_EXTENSIONS: 810 | hero_files.append(f) 811 | 812 | burst_parent = directory / "_Bursts" 813 | burst_folders = [] 814 | if burst_parent.exists() and burst_parent.is_dir(): 815 | burst_folders = [f for f in burst_parent.iterdir() if f.is_dir()] 816 | 817 | for burst_folder in burst_folders: 818 | if burst_folder.is_dir(): 819 | for f in burst_folder.iterdir(): 820 | if f.is_file() and (f.name.startswith(BEST_PICK_PREFIX) or is_already_ai_named(f.name)): 821 | hero_files.append(f) 822 | 823 | if not hero_files: 824 | log_callback(f"\n No '{TIER_A_FOLDER}' or '_PICK_' files found. Nothing to archive.") 825 | log_callback("[bold green]✓ Auto workflow complete (no heroes found).[/bold green]") 826 | return {} 827 | 828 | already_named = [f for f in hero_files if is_already_ai_named(f.name)] 829 | needs_naming = [f for f in hero_files if not is_already_ai_named(f.name)] 830 | log_callback(f" Found {len(hero_files)} 'hero' files total:") 831 | if already_named: 832 | log_callback(f" • {len(already_named)} already AI-named (from burst stacking)") 833 | log_callback(f" • {len(needs_naming)} to process") 834 | 835 | # Update HUD: Heroes count 836 | if tracker: 837 | tracker.update('heroes', len(hero_files)) 838 | 839 | results = {"success": [], "failed": []} 840 | 841 | # [LOGIC PATCH] Only create the physical rename log if this is a REAL run 842 | if preview_mode: 843 | rename_log_path = None 844 | else: 845 | rename_log_path = chosen_destination / f"_ai_rename_log_{SESSION_TIMESTAMP}.txt" 846 | initialize_rename_log(rename_log_path) 847 | 848 | # Preview mode: track simulated paths for collision detection 849 | simulated_paths = set() if preview_mode else None 850 | 851 | log_callback(f"\n [grey]Archiving {len(hero_files)} files...[/grey]") 852 | with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor: 853 | future_to_file = { 854 | executor.submit( 855 | process_single_image, 856 | img_path, 857 | chosen_destination, 858 | chosen_model, 859 | rename_log_path, 860 | log_callback, 861 | preview_mode, 862 | ai_cache, 863 | cache_lock, 864 | simulated_paths 865 | ): img_path 866 | for img_path in hero_files 867 | } 868 | 869 | for i, future in enumerate(as_completed(future_to_file)): 870 | if stop_event and stop_event.is_set(): 871 | log_callback("\n[yellow]🛑 Workflow stopped by user.[/yellow]") 872 | executor.shutdown(wait=False, cancel_futures=True) 873 | return {} 874 | 875 | log_callback(f" [grey]Processing item {i+1}/{len(hero_files)}...[/grey]") 876 | original, success, message, description = future.result() 877 | if success: 878 | results["success"].append({ 879 | "original": original.name, 880 | "new_name": message, 881 | "description": description 882 | }) 883 | else: 884 | results["failed"].append((original.name, message)) 885 | log_callback(f" [red]✗ {original.name}: {message}[/red]") 886 | 887 | log_callback(f"\n[green]✓ Successfully archived: {len(results['success'])}[/green]") 888 | log_callback(f"[red]✗ Failed to archive: {len(results['failed'])}[/red]") 889 | 890 | # Update HUD: Archived count 891 | if tracker: 892 | tracker.update('archived', len(results['success'])) 893 | 894 | summary = { 895 | "archived": len(results['success']), 896 | "failed": len(results['failed']), 897 | } 898 | 899 | if results["success"]: 900 | categories = {} 901 | for item in results["success"]: 902 | cat = categorize_description(item["description"]) 903 | categories[cat] = categories.get(cat, 0) + 1 904 | 905 | # In preview mode, skip file organization (files not actually moved) 906 | if preview_mode: 907 | log_callback(f"\n[dim]Preview: Would organize into {len(categories)} categories:[/dim]") 908 | for cat, count in categories.items(): 909 | log_callback(f" [dim]• {cat}: {count} files[/dim]") 910 | summary["categories"] = len(categories) 911 | summary["preview_categories"] = categories 912 | else: 913 | # Use simple date-based folder naming 914 | dated_folder = f"{SESSION_DATE}_Session" 915 | final_destination = chosen_destination / dated_folder 916 | if not preview_mode: 917 | final_destination.mkdir(parents=True, exist_ok=True) 918 | 919 | organize_into_folders(results["success"], chosen_destination, final_destination, log_callback) 920 | summary["final_destination"] = str(final_destination.name) 921 | summary["categories"] = len(categories) 922 | 923 | # Stop HUD timer 924 | if tracker: 925 | tracker.stop_timer() 926 | 927 | # Different completion messages for preview vs real mode 928 | if preview_mode: 929 | log_callback("\n[bold green]✓ Dry Run Complete[/bold green]") 930 | log_callback(f" [dim]Preview generated {len(results['success'])} AI names[/dim]") 931 | if ai_cache: 932 | log_callback(f" [cyan]⚡ Cached {len(ai_cache)} AI results for instant execution[/cyan]") 933 | else: 934 | log_callback("\n[bold green]🚀 AUTO WORKFLOW COMPLETE[/bold green]") 935 | log_callback(f" Your 'hero' photos are now in: {chosen_destination}") 936 | if rename_log_path: 937 | log_callback(f" Rename log saved: {rename_log_path.name}") 938 | 939 | return summary 940 | 941 | # --- Burst Workflow --- 942 | 943 | def group_bursts_in_directory( 944 | log_callback: Callable[[str], None] = no_op_logger, 945 | app_config: Optional[Dict[str, Any]] = None, 946 | simulated: bool = False, 947 | directory_override: Optional[Path] = None, 948 | tracker: Optional[StatsTracker] = None, 949 | stop_event: Optional[threading.Event] = None, 950 | preview_mode: bool = False, 951 | ai_cache: Optional[Dict[str, Dict]] = None, 952 | cache_lock: Optional[threading.Lock] = None 953 | ) -> List[Path]: 954 | """ 955 | (V1.2) Finds and stacks burst groups, optionally AI-naming the best pick. 956 | 957 | Returns: 958 | List of 'Pick' files (best images from each burst group) 959 | 960 | By default (burst_auto_name=false), uses fast numeric naming. 961 | Set burst_auto_name=true in config to enable AI naming (slower). 962 | Auto workflow always uses AI naming regardless of this setting. 963 | 964 | New in dry-run feature: 965 | preview_mode: If True, simulate operations without moving files 966 | """ 967 | 968 | if app_config is None: app_config = load_app_config() 969 | 970 | # Use override dir (from auto) or config dir 971 | if directory_override: 972 | directory = directory_override 973 | elif app_config.get('last_source_path'): 974 | directory = Path(app_config['last_source_path']) 975 | else: 976 | log_callback("[red]✗ No source directory specified.[/red]") 977 | return 978 | 979 | if not directory.is_dir(): 980 | log_callback(f"[red]✗ Source directory not found: {directory}[/red]") 981 | return 982 | 983 | if not V5_LIBS_AVAILABLE or not V6_CULL_LIBS_AVAILABLE: 984 | log_callback("[bold red]✗ FATAL: Missing required libraries.[/bold red]") 985 | log_callback(" Please run: pip install imagehash opencv-python numpy") 986 | return 987 | 988 | log_callback(f"[grey]Scanning for bursts in: {directory.name}[/grey]") 989 | burst_threshold = app_config['burst_threshold'] 990 | 991 | image_files = [ 992 | f for f in directory.iterdir() 993 | if f.is_file() and f.suffix.lower() in SUPPORTED_EXTENSIONS 994 | ] 995 | if len(image_files) < 2: 996 | log_callback(" Not enough images to compare.") 997 | return 998 | 999 | all_hashes = {} 1000 | log_callback(f" [grey]Calculating {len(image_files)} visual fingerprints...[/grey]") 1001 | 1002 | with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor: 1003 | future_to_path = {executor.submit(get_image_hash, path, log_callback): path for path in image_files} 1004 | for i, future in enumerate(as_completed(future_to_path)): 1005 | if stop_event and stop_event.is_set(): 1006 | log_callback("\n[yellow]🛑 Workflow stopped by user.[/yellow]") 1007 | executor.shutdown(wait=False, cancel_futures=True) 1008 | return 1009 | 1010 | log_callback(f" [grey]Hashing image {i+1}/{len(image_files)}...[/grey]",) 1011 | path, img_hash = future.result() 1012 | if img_hash: 1013 | all_hashes[path] = img_hash 1014 | 1015 | log_callback(" [grey]Comparing fingerprints to find burst groups...[/grey]") 1016 | visited_paths = set() 1017 | all_burst_groups = [] 1018 | sorted_paths = sorted(all_hashes.keys(), key=lambda p: p.name) 1019 | 1020 | for path in sorted_paths: 1021 | if path in visited_paths: continue 1022 | current_group = [path] 1023 | visited_paths.add(path) 1024 | for other_path in sorted_paths: 1025 | if other_path in visited_paths: continue 1026 | hash1 = all_hashes.get(path) 1027 | hash2 = all_hashes.get(other_path) 1028 | if hash1 and hash2: 1029 | distance = hash1 - hash2 1030 | if distance <= burst_threshold: 1031 | current_group.append(other_path) 1032 | visited_paths.add(other_path) 1033 | if len(current_group) > 1: 1034 | all_burst_groups.append(current_group) 1035 | 1036 | if not all_burst_groups: 1037 | log_callback(" No burst groups found. All images are unique!") 1038 | return [] 1039 | 1040 | log_callback(f" [green]✓ Found {len(all_burst_groups)} burst groups.[/green] Analyzing for best pick...") 1041 | 1042 | # Update HUD: Bursts count 1043 | if tracker: 1044 | tracker.update('bursts', len(all_burst_groups)) 1045 | 1046 | # Track burst picks for return value (dry-run feature) 1047 | burst_picks: List[Path] = [] 1048 | 1049 | best_picks: Dict[int, Tuple[Path, float]] = {} 1050 | for i, group in enumerate(all_burst_groups): 1051 | best_sharpness = -1.0 1052 | best_file = None 1053 | for file_path in group: 1054 | image_bytes = get_image_bytes_for_analysis(file_path, log_callback) 1055 | if image_bytes: 1056 | scores = analyze_image_quality(image_bytes) 1057 | sharpness = scores.get('sharpness', 0.0) 1058 | if sharpness > best_sharpness: 1059 | best_sharpness = sharpness 1060 | best_file = file_path 1061 | if best_file: 1062 | best_picks[i] = (best_file, best_sharpness) 1063 | 1064 | use_parent_folder = app_config.get('burst_parent_folder', True) 1065 | bursts_parent = directory / "_Bursts" if use_parent_folder else directory 1066 | if use_parent_folder: 1067 | log_callback(f" Organizing burst groups into: {bursts_parent.name}/") 1068 | if not preview_mode: 1069 | bursts_parent.mkdir(exist_ok=True) 1070 | 1071 | # v1.1: Check if AI naming is enabled for burst workflow 1072 | burst_auto_name = app_config.get('burst_auto_name', False) 1073 | 1074 | if burst_auto_name: 1075 | # [LOGIC PATCH] Only create the physical rename log if this is a REAL run 1076 | if preview_mode: 1077 | rename_log_path = None 1078 | else: 1079 | rename_log_path = directory / f"_ai_rename_log_{SESSION_TIMESTAMP}.txt" 1080 | initialize_rename_log(rename_log_path) 1081 | ai_model = app_config.get('default_model', DEFAULT_MODEL_NAME) 1082 | log_callback(" [grey]AI naming enabled for bursts...[/grey]") 1083 | 1084 | for i, group in enumerate(all_burst_groups): 1085 | winner_data = best_picks.get(i) 1086 | sample_image = winner_data[0] if winner_data else group[0] 1087 | 1088 | # Add the winner to our return list (for dry-run feature) 1089 | burst_picks.append(sample_image) 1090 | 1091 | # Only run AI naming if enabled 1092 | if burst_auto_name: 1093 | log_callback(f" [grey]Burst {i+1}/{len(all_burst_groups)}: Naming...[/grey]") 1094 | 1095 | # Use cache-aware AI naming (for dry-run feature) 1096 | ai_filename, ai_tags = get_ai_name_with_cache( 1097 | sample_image, ai_model, ai_cache, cache_lock, log_callback 1098 | ) 1099 | 1100 | if ai_filename and ai_tags: 1101 | base_name = Path(ai_filename).stem # Extract base name without extension 1102 | folder_name = f"{base_name}_burst" 1103 | log_callback(f" [green]✓ AI named:[/green] {base_name}") 1104 | else: 1105 | base_name = f"burst-{i+1:03d}" 1106 | folder_name = base_name 1107 | log_callback(f" [yellow]⚠️ AI naming failed, using:[/yellow] {base_name}") 1108 | else: 1109 | # Fast mode: Just use numeric naming 1110 | base_name = f"burst-{i+1:03d}" 1111 | folder_name = base_name 1112 | log_callback(f" [grey]Burst {i+1}/{len(all_burst_groups)}: {folder_name} ({len(group)} files)[/grey]") 1113 | 1114 | folder_path = bursts_parent / folder_name 1115 | if folder_path.exists(): 1116 | counter = 2 1117 | original_name = folder_name 1118 | while folder_path.exists(): 1119 | folder_name = f"{original_name}-{counter}" 1120 | folder_path = bursts_parent / folder_name 1121 | counter += 1 1122 | 1123 | log_callback(f" [grey]📁 Moving {len(group)} files to {folder_path.relative_to(directory)}/...[/grey]") 1124 | if not preview_mode: 1125 | folder_path.mkdir(parents=True, exist_ok=True) 1126 | 1127 | alternate_counter = 1 1128 | for file_path in group: 1129 | extension = file_path.suffix 1130 | if winner_data and file_path == winner_data[0]: 1131 | new_name = f"{base_name}_PICK{extension}" 1132 | else: 1133 | new_name = f"{base_name}_{alternate_counter:03d}{extension}" 1134 | alternate_counter += 1 1135 | 1136 | new_file_path = folder_path / new_name 1137 | try: 1138 | if preview_mode: 1139 | log_callback(f" [cyan]WOULD MOVE:[/cyan] {file_path.name} → {folder_path.name}/{new_name}") 1140 | else: 1141 | # FIXXER v1.0: Hash-verified move 1142 | verify_file_move_with_hash(file_path, new_file_path, log_callback, generate_sidecar=True) 1143 | if burst_auto_name: 1144 | write_rename_log(rename_log_path, file_path.name, new_name, folder_path) 1145 | except Exception as e: 1146 | log_callback(f" [red]FAILED to move {file_path.name}: {e}[/red]") 1147 | 1148 | if burst_auto_name and rename_log_path: 1149 | log_callback(f" Rename log saved: {rename_log_path.name}") 1150 | 1151 | return burst_picks # Return the winners for dry-run feature 1152 | 1153 | # --- Cull Workflow --- 1154 | 1155 | def cull_images_in_directory( 1156 | log_callback: Callable[[str], None] = no_op_logger, 1157 | app_config: Optional[Dict[str, Any]] = None, 1158 | simulated: bool = False, 1159 | directory_override: Optional[Path] = None, 1160 | tracker: Optional[StatsTracker] = None, 1161 | stop_event: Optional[threading.Event] = None, 1162 | preview_mode: bool = False 1163 | ) -> List[Path]: 1164 | """ 1165 | (V9.4) Finds and groups images by technical quality using Tier A/B/C naming. 1166 | 1167 | Returns: 1168 | List of 'Tier A' files (best quality images) 1169 | 1170 | New in dry-run feature: 1171 | preview_mode: If True, simulate operations without moving files 1172 | """ 1173 | 1174 | if app_config is None: app_config = load_app_config() 1175 | 1176 | if directory_override: 1177 | directory = directory_override 1178 | elif app_config.get('last_source_path'): 1179 | directory = Path(app_config['last_source_path']) 1180 | else: 1181 | log_callback("[red]✗ No source directory specified.[/red]") 1182 | return 1183 | 1184 | if not directory.is_dir(): 1185 | log_callback(f"[red]✗ Source directory not found: {directory}[/red]") 1186 | return 1187 | 1188 | if not V6_CULL_LIBS_AVAILABLE: 1189 | log_callback("[bold red]✗ FATAL: Missing required libraries.[/bold red]") 1190 | log_callback(" Please run: pip install opencv-python numpy") 1191 | return 1192 | 1193 | log_callback(f"[grey]Analyzing technical quality in: {directory.name}[/grey]") 1194 | 1195 | image_files = [ 1196 | f for f in directory.iterdir() 1197 | if f.is_file() and f.suffix.lower() in SUPPORTED_EXTENSIONS 1198 | ] 1199 | if not image_files: 1200 | log_callback(" No supported images to analyze.") 1201 | return 1202 | 1203 | all_scores = {} 1204 | log_callback(f" [grey]Analyzing sharpness/exposure for {len(image_files)} images...[/grey]") 1205 | 1206 | with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor: 1207 | future_to_path = { 1208 | executor.submit(process_image_for_culling, path, log_callback): path 1209 | for path in image_files 1210 | } 1211 | for i, future in enumerate(as_completed(future_to_path)): 1212 | if stop_event and stop_event.is_set(): 1213 | log_callback("\n[yellow]🛑 Workflow stopped by user.[/yellow]") 1214 | executor.shutdown(wait=False, cancel_futures=True) 1215 | return 1216 | 1217 | log_callback(f" [grey]Analyzing image {i+1}/{len(image_files)}...[/grey]") 1218 | path, scores = future.result() 1219 | if scores: 1220 | all_scores[path] = scores 1221 | 1222 | log_callback(" [grey]Triaging images into quality tiers...[/grey]") 1223 | th = app_config['cull_thresholds'] 1224 | tiers = {"Tier_A": [], "Tier_B": [], "Tier_C": []} 1225 | 1226 | for path, scores in all_scores.items(): 1227 | sharp = scores['sharpness'] 1228 | blacks = scores['blacks_pct'] 1229 | whites = scores['whites_pct'] 1230 | is_exposure_bad = (blacks > th['exposure_dud_pct']) or (whites > th['exposure_dud_pct']) 1231 | is_exposure_good = (blacks < th['exposure_good_pct']) and (whites < th['exposure_good_pct']) 1232 | is_sharp_bad = sharp < th['sharpness_dud'] 1233 | is_sharp_good = sharp > th['sharpness_good'] 1234 | 1235 | tier = "Tier_B" 1236 | if is_sharp_bad or is_exposure_bad: 1237 | tier = "Tier_C" 1238 | elif is_sharp_good and is_exposure_good: 1239 | tier = "Tier_A" 1240 | tiers[tier].append(path) 1241 | 1242 | log_callback(f" [green]Found {len(tiers['Tier_A'])} Tier A[/green], [yellow]{len(tiers['Tier_B'])} Tier B[/yellow], [red]{len(tiers['Tier_C'])} Tier C[/red].") 1243 | 1244 | # Update HUD: Tier A/B/C counts 1245 | if tracker: 1246 | tracker.update('tier_a', len(tiers['Tier_A'])) 1247 | tracker.update('tier_b', len(tiers['Tier_B'])) 1248 | tracker.update('tier_c', len(tiers['Tier_C'])) 1249 | 1250 | folder_map = { 1251 | "Tier_A": directory / TIER_A_FOLDER, 1252 | "Tier_B": directory / TIER_B_FOLDER, 1253 | "Tier_C": directory / TIER_C_FOLDER 1254 | } 1255 | 1256 | for tier, paths in tiers.items(): 1257 | if not paths: continue 1258 | folder_path = folder_map[tier] 1259 | log_callback(f" [grey]Moving {len(paths)} files to {folder_path.name}/...[/grey]") 1260 | if not preview_mode: 1261 | folder_path.mkdir(exist_ok=True) 1262 | 1263 | for file_path in paths: 1264 | new_file_path = folder_path / file_path.name 1265 | try: 1266 | if preview_mode: 1267 | log_callback(f" [cyan]WOULD MOVE:[/cyan] {file_path.name} → {folder_path.name}/{file_path.name}") 1268 | else: 1269 | # FIXXER v1.0: Hash-verified move 1270 | verify_file_move_with_hash(file_path, new_file_path, log_callback, generate_sidecar=True) 1271 | except Exception as e: 1272 | log_callback(f" [red]FAILED to move {file_path.name}: {e}[/red]") 1273 | 1274 | log_callback(" Culling complete!") 1275 | 1276 | return tiers["Tier_A"] # Return Tier A files for dry-run feature 1277 | 1278 | def process_image_for_culling(image_path: Path, log_callback: Callable[[str], None] = no_op_logger) -> Tuple[Path, Optional[Dict[str, float]]]: 1279 | """Thread-pool worker: Gets bytes and runs analysis engine""" 1280 | image_bytes = get_image_bytes_for_analysis(image_path, log_callback) 1281 | if not image_bytes: 1282 | return image_path, None 1283 | scores = analyze_image_quality(image_bytes) 1284 | return image_path, scores 1285 | 1286 | # --- Stats Workflow --- 1287 | 1288 | def show_exif_insights( 1289 | log_callback: Callable[[str], None] = no_op_logger, 1290 | app_config: Optional[Dict[str, Any]] = None, 1291 | simulated: bool = False, 1292 | directory_override: Optional[Path] = None, 1293 | stop_event: Optional[threading.Event] = None 1294 | ) -> None: 1295 | """(V6.4) Scans images, aggregates EXIF data, prints summary""" 1296 | 1297 | if app_config is None: app_config = load_app_config() 1298 | 1299 | if directory_override: 1300 | directory = directory_override 1301 | elif app_config.get('last_source_path'): 1302 | directory = Path(app_config['last_source_path']) 1303 | else: 1304 | log_callback("[red]✗ No source directory specified.[/red]") 1305 | return 1306 | 1307 | if not directory.is_dir(): 1308 | log_callback(f"[red]✗ Source directory not found: {directory}[/red]") 1309 | return 1310 | 1311 | if not V6_4_EXIF_LIBS_AVAILABLE: 1312 | log_callback("[bold red]✗ FATAL: Missing required library.[/bold red]") 1313 | log_callback(" Please run: pip install exifread") 1314 | return 1315 | 1316 | log_callback(f"[grey]Scanning EXIF data in: {directory.name}[/grey]") 1317 | 1318 | image_files = [ 1319 | f for f in directory.iterdir() 1320 | if f.is_file() and f.suffix.lower() in SUPPORTED_EXTENSIONS 1321 | ] 1322 | if not image_files: 1323 | log_callback(" No supported images to analyze.") 1324 | return 1325 | 1326 | all_stats = [] 1327 | log_callback(f" [grey]Reading EXIF data from {len(image_files)} files...[/grey]") 1328 | 1329 | with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor: 1330 | future_to_path = { 1331 | executor.submit(analyze_single_exif, path): path 1332 | for path in image_files 1333 | } 1334 | for i, future in enumerate(as_completed(future_to_path)): 1335 | if stop_event and stop_event.is_set(): 1336 | log_callback("\n[yellow]🛑 Workflow stopped by user.[/yellow]") 1337 | executor.shutdown(wait=False, cancel_futures=True) 1338 | return 1339 | 1340 | if not simulated: 1341 | log_callback(f" [grey]Scanning image {i+1}/{len(image_files)}...[/grey]") 1342 | result_dict = future.result() 1343 | if result_dict: 1344 | all_stats.append(result_dict) 1345 | 1346 | if not all_stats: 1347 | log_callback(f" [yellow]No EXIF data found in {len(image_files)} scanned images.[/yellow]") 1348 | return 1349 | 1350 | log_callback(" [grey]Aggregating statistics...[/grey]") 1351 | 1352 | timestamps = sorted([s['timestamp'] for s in all_stats]) 1353 | start_time = timestamps[0] 1354 | end_time = timestamps[-1] 1355 | duration_str = format_duration(end_time - start_time) 1356 | 1357 | LIGHTING_TABLE = { 1358 | (0, 4): "Night", (5, 7): "Golden Hour (AM)", (8, 10): "Morning", 1359 | (11, 13): "Midday", (14, 16): "Afternoon", (17, 18): "Golden Hour (PM)", 1360 | (19, 21): "Dusk", (22, 23): "Night", 1361 | } 1362 | lighting_buckets = defaultdict(int) 1363 | camera_counter = Counter() 1364 | focal_len_counter = Counter() 1365 | aperture_counter = Counter() 1366 | 1367 | for stats in all_stats: 1368 | hour = stats['timestamp'].hour 1369 | for (start, end), name in LIGHTING_TABLE.items(): 1370 | if start <= hour <= end: 1371 | lighting_buckets[name] += 1 1372 | break 1373 | camera_counter[stats['camera']] += 1 1374 | focal_len_counter[stats['focal_length']] += 1 1375 | aperture_counter[stats['aperture']] += 1 1376 | 1377 | # --- Display Results to Log --- 1378 | log_callback("\n[bold]📖 Session Story:[/bold]") 1379 | log_callback(f" Started: {start_time.strftime('%a, %b %d at %I:%M %p')}") 1380 | log_callback(f" Ended: {end_time.strftime('%a, %b %d at %I:%M %p')}") 1381 | log_callback(f" Duration: [bold]{duration_str}[/bold]") 1382 | log_callback(f" Total Shots: [bold]{len(image_files)}[/bold] ({len(all_stats)} with EXIF)") 1383 | 1384 | log_callback("\n[bold]☀️ Lighting Conditions:[/bold]") 1385 | bar_lines = generate_bar_chart(lighting_buckets, bar_width=30) 1386 | for line in bar_lines: 1387 | log_callback(line) 1388 | 1389 | log_callback("\n[bold]🎨 Creative Habits (Top 3):[/bold]") 1390 | log_callback(" [cyan]Cameras:[/cyan]") 1391 | for cam, count in camera_counter.most_common(3): 1392 | log_callback(f" {cam}: [bold]{count} shots[/bold]") 1393 | log_callback(" [cyan]Focal Lengths:[/cyan]") 1394 | for focal, count in focal_len_counter.most_common(3): 1395 | log_callback(f" {focal}: [bold]{count} shots[/bold]") 1396 | log_callback(" [cyan]Apertures:[/cyan]") 1397 | for ap, count in aperture_counter.most_common(3): 1398 | log_callback(f" {ap}: [bold]{count} shots[/bold]") 1399 | --------------------------------------------------------------------------------