├── .gitignore ├── CHANGELOG.md ├── LICENSE.md ├── README.md ├── ROADMAP.md ├── civitai_manager ├── __init__.py ├── main.py └── src │ ├── __init__.py │ ├── core │ ├── __init__.py │ └── metadata_manager.py │ ├── migrations │ └── v1_5_0 │ │ ├── README.md │ │ ├── __init__.py │ │ └── migrate_filenames.py │ └── utils │ ├── __init__.py │ ├── config.py │ ├── file_tracker.py │ ├── html_generators │ ├── __init__.py │ ├── browser_page.py │ └── model_page.py │ └── string_utils.py ├── config.json ├── config_examples ├── config.first-use.json └── config.update.json ├── poetry.lock ├── pyproject.toml └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # Python 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | .Python 6 | build/ 7 | develop-eggs/ 8 | dist/ 9 | downloads/ 10 | eggs/ 11 | .eggs/ 12 | lib/ 13 | lib64/ 14 | parts/ 15 | sdist/ 16 | var/ 17 | wheels/ 18 | *.egg-info/ 19 | .installed.cfg 20 | *.egg 21 | 22 | # Virtual Environment 23 | .env 24 | .venv 25 | env/ 26 | venv/ 27 | ENV/ 28 | 29 | # IDE 30 | .idea/ 31 | .vscode/ 32 | *.swp 33 | *.swo 34 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/). 6 | 7 | ## [1.6.3] - 2025-02-16 8 | ### Chore 9 | - Migrate project to Poetry for dependency management. 10 | 11 | ## [1.6.2] - 2025-02-16 12 | ### Refactor 13 | - Centralize version management in package root. 14 | 15 | ## [1.6.1] - 2025-02-16 16 | ### Fix 17 | - Update `sanitize_filename` for new project structure. 18 | 19 | ## [1.6.0] - 2025-02-16 20 | ### Refactor 21 | - Reorganize project structure to follow Python conventions. 22 | 23 | ## [1.5.5] - 2025-02-09 24 | ### Fix 25 | - Handle `None` values in global summary generation. 26 | 27 | ## [1.5.4] - 2024-12-30 28 | ### Feature 29 | - Persist sort preference in `localStorage`. 30 | 31 | ## [1.5.3] - 2024-12-30 32 | ### Feature 33 | - Add file size display and sorting. 34 | 35 | ## [1.5.2] - 2024-12-30 36 | ### Feature 37 | - Add migration script for filename sanitization (fix breaking changes from v1.5.0). 38 | 39 | ## [1.5.1] - 2024-12-26 40 | ### Feature 41 | - Add sorting options to browser page. 42 | 43 | ## [1.5.0] - 2024-12-26 44 | ### Fix 45 | - Improve filename handling. 46 | 47 | ## [1.4.4] - 2024-12-25 48 | ### Fix 49 | - Prioritize `config.json` over CLI arguments. 50 | 51 | ## [1.4.3] - 2024-12-20 52 | ### Fix 53 | - Path handling in main function (fixed TypeError when processing command line arguments). 54 | 55 | ## [1.4.2] - 2024-12-20 56 | ### Feature 57 | - Add `--noconfig` flag to override config file. 58 | 59 | ## [1.4.1] - 2024-12-20 60 | ### Fix 61 | - Fixed event listener persistence for arrow key navigation between images. 62 | 63 | ## [1.4.0] - 2024-12-20 64 | ### Feature 65 | - Add JSON configuration file support. 66 | 67 | ## [1.3.6] - 2024-12-20 68 | ### Feature 69 | - Add keyboard navigation for preview images. 70 | 71 | ## [1.3.5] - 2024-12-20 72 | ### Refactor 73 | - Decrease the default delay between models (from 6-12 to 3-6 seconds). 74 | 75 | ## [1.3.4] - 2024-12-20 76 | ### Fix 77 | - Improve preview image metadata handling. 78 | 79 | ## [1.3.3] - 2024-11-29 80 | ### Refactor 81 | - Increase model's thumbnail size in the model browser. 82 | 83 | ## [1.3.2] - 2024-11-29 84 | ### Fix 85 | - Handle duplicate file message in clean operation. 86 | 87 | ## [1.3.1] - 2024-11-29 88 | ### Fix 89 | - The scale effect in the model's gallery now affects videos. 90 | 91 | ## [1.3.0] - 2024-11-29 92 | ### Feature 93 | - Enhance image modal with detailed metadata display (seed, prompt used, etc.). 94 | 95 | ## [1.2.5] - 2024-11-29 96 | ### Feature 97 | - Enhance model browser search functionality. 98 | 99 | ## [1.2.4] - 2024-11-29 100 | ### Feature 101 | - Enhance `--clean` to detect and handle duplicate models. 102 | 103 | ## [1.2.3] - 2024-11-29 104 | ### Feature 105 | - Add toggleable cover images to model browser. 106 | 107 | ## [1.2.2] - 2024-11-29 108 | ### Feature 109 | - Add `--skipmissing` option for optimizing model checks. 110 | 111 | ## [1.2.1] - 2024-11-29 112 | ### Fix 113 | - Prevent missing models from appearing in multiple sections. 114 | 115 | ## [1.2.0] - 2024-11-29 116 | ### Feature 117 | - Add video preview support for model galleries. 118 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Jeremy M. Sultan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Civitai Data Manager 2 | 3 | A lightweight tool to locally manage, back up, and organize SafeTensors model metadata from Civitai. 4 | 5 | ## 🖥️ Demo 6 | 7 | ![Civitai Data Manager Demo](https://i.imgur.com/FfmROxQ.gif) 8 | 9 | ## ✨ Features 10 | 11 | - **📂 Backup Crucial Info**: Save model metadata, trigger words, usage notes, examples and authors. 12 | - **🖼️ HTML Browser**: Generate interactive HTML pages for browsing your collection. 13 | - **🚀 Smart Updates**: Update only when new data is available. 14 | - **✔️ Broad Compatibility**: Supports `.safetensors` models on Civitai (Checkpoints, LoRA, LyCORIS, etc.). 15 | - **🔒 Lightweight & Free**: No API key required, and highly efficient. 16 | 17 | ## 🚀 Getting Started 18 | 19 | ### Requirements 20 | - Python 3.10 or higher 21 | 22 | ### Installation 23 | 24 | 1. Clone the repository: 25 | ```bash 26 | git clone https://github.com/jeremysltn/civitai-data-manager.git 27 | ``` 28 | 29 | 2. Navigate to the project directory: 30 | ```bash 31 | cd civitai-data-manager 32 | ``` 33 | 34 | 3. Install dependencies: 35 | ```bash 36 | # Using Poetry (recommended): 37 | poetry install 38 | 39 | # Using pip: 40 | pip install -r requirements.txt 41 | ``` 42 | 43 | 4. To verify the installation, try running the script with the help flag: 44 | ```bash 45 | python -m civitai_manager.main --help 46 | ``` 47 | 48 | You should see the available command-line options displayed. 49 | 50 | ## 📖 Usage Guide 51 | 52 | You have two ways to use this tool: using a config.json file or options arguments. 53 | 54 | ### Configuration File (Recommended) 55 | 56 | Edit the `config.json` file in the script directory with your preferred settings. If present, the config file takes precedence over command-line arguments. 57 | 58 | Example config for first use (save as `config.json`): 59 | ```json 60 | { 61 | "all": "path/to/models/directory", 62 | "output": "path/to/output/directory", 63 | "images": true 64 | } 65 | ``` 66 | 67 | Examples of configuration are located in the `config_examples` directory. 68 | 69 | ### Basic Usage 70 | 71 | - The first time, edit the `config.json` file and simply run: 72 | ```bash 73 | python -m civitai_manager.main 74 | - To update with the data from newly added models, run periodically to catch updates with the update config (file present in `/config_examples/config.update.json`) 75 | 76 | ### Command Options 77 | #### Input and Output: 78 | - `--single`: Process a single model file: 79 | ```bash 80 | python -m civitai_manager.main --single "path/to/model.safetensors" 81 | ``` 82 | - `--all`: Process all models in a directory: 83 | ```bash 84 | python -m civitai_manager.main --all "path/to/model/directory" 85 | ``` 86 | - `--output`: Set an output directory: 87 | ```bash 88 | python -m civitai_manager.main --all "path/to/models" --output "path/to/output" 89 | ``` 90 | 91 | #### Image Options: 92 | - `--images`: Download all preview images. 93 | - `--noimages`: Skip downloading preview images. 94 | - `--generateimagejson`: Create JSON metadata for preview images. 95 | 96 | #### Processing Options: 97 | - `--onlynew`: Process only new files. 98 | - `--skipmissing`: Skip previously missing models. 99 | - `--onlyupdate`: Update metadata for processed models. 100 | - `--clean`: Remove data for models no longer in the source directory. 101 | 102 | #### HTML Generation: 103 | - `--onlyhtml`: Generate HTML files from existing data only. 104 | 105 | #### Advanced Options: 106 | - `--noconfig`: Ignore `config.json` and use only command-line arguments. 107 | - `--notimeout`: Disable rate limiting protection (use cautiously). 108 | 109 | 110 | ### Recommended Organization 111 | 112 | For better organization, run separately for each model category: 113 | 114 | ```bash 115 | # For checkpoints 116 | python -m civitai_manager.main --all "path/to/checkpoints/sdxl" --output "path/to/backup/checkpoints/sdxl" --noconfig 117 | python -m civitai_manager.main --all "path/to/checkpoints/flux" --output "path/to/backup/checkpoints/flux" --noconfig 118 | 119 | # For Loras 120 | python -m civitai_manager.main --all "path/to/loras/sdxl" --output "path/to/backup/loras/sdxl" --noconfig 121 | python -m civitai_manager.main --all "path/to/loras/flux" --output "path/to/backup/loras/flux" --noconfig 122 | ``` 123 | 124 | ### Best Practices 125 | 126 | - If you want to update only the Civitai data, use the options `--onlyupdate` and `--noimages` 127 | - Just in case, always back up the generated data directory with your models 128 | - Monitor `missing_from_civitai.txt` and `duplicate_models.txt` for manual documentation needs 129 | 130 | ## 🗃️ Output Structure 131 | 132 | ### Directory Layout 133 | 134 | ``` 135 | output_directory/ 136 | ├── model_name/ 137 | │ ├── model_name_metadata.json # SafeTensors metadata 138 | │ ├── model_name_hash.json # SHA256 hash 139 | │ ├── model_name_civitai_model_version.json # Model-versions endpoint data from Civitai 140 | │ ├── model_name_civitai_model.json # Full model data from Civitai 141 | │ ├── model_name_preview_0.jpeg # First preview image 142 | │ ├── model_name_preview_0.json # Metadata for first preview image 143 | │ ├── model_name_preview_x.jpeg # Additional preview images (if --images used) 144 | │ ├── model_name_preview_x.json # Metadata for additional preview images (if --images used) 145 | │ └── model_name.html # Model-specific HTML page 146 | ├── index.html # Model browser 147 | ├── missing_from_civitai.txt # List of models not found on Civitai 148 | ├── duplicate_models.txt # List of duplicate models 149 | └── processed_files.json # List of processed files 150 | ``` 151 | 152 | ## 🔍 Features in Detail 153 | 154 | ### Rate Limiting Protection 155 | - Default: random delay between 3-6 seconds after each model and 1 second between each picture 156 | - Disable with `--notimeout` flag (use cautiously) 157 | 158 | For example, processing 10 models (with 10 pictures each) would take: 159 | - Minimum time: ~127 seconds 160 | - Maximum time: ~154 seconds 161 | - Average time: ~140.5 seconds 162 | 163 | **Note about Rate Limiting:** While Civitai's exact rate limiting policies are not publicly documented, these delays are implemented as a precautionary measure to: 164 | - Be respectful to Civitai's servers 165 | - Avoid getting your requests blocked 166 | 167 | If you do not have a lot of files to process, you can disable these delays using the `--notimeout` flag. 168 | 169 | ### Update Checking 170 | - The script compares Civitai's `updatedAt` timestamp with local data and only processes models with new versions 171 | - Prevents unnecessary API calls and downloads 172 | 173 | ### Download Images 174 | - The script can download all preview images for your models using the `--images` flag. 175 | - You can disable the images downloading by using the `--noimages` flag. 176 | - By default, only the first preview image will be downloaded. 177 | 178 | ### HTML Generation 179 | - Individual HTML files for each model showing detailed information and image gallery with generation details 180 | - Global model browser with: 181 | - Models grouped by type (checkpoint, lora, etc.) 182 | - Search functionality (tag, filename, model name) 183 | - By default, models are sorted by Download count 184 | - Links to individual model and direct download pages 185 | 186 | ### Processed Files Tracking 187 | - Maintains a JSON file listing all processed models 188 | - Enables selective processing of new files with `--onlynew` 189 | - Records processing timestamp for each file 190 | 191 | ## ❓ FAQ 192 | 193 | ### How can you retrieve trigger words for a deleted Lora from Civitai? 194 | 195 | If the Lora model has been deleted from Civitai, the script can still generate a `metadata.json` file. Inside this file, look for the JSON properties `"ss_datasets.tag_frequency"` and `"ss_tag_frequency"`, where you'll find the tags associated with the model. It is not certain that these properties are present. In the future, consider using this script regularly to archive all useful information. 196 | 197 | ### How does this tool compare to other models or Civitai managers? 198 | 199 | This tool stands out for its simplicity and lightweight design. It requires no configuration and operates independently of any WebUI (such as A1111, ComfyUI, etc.). With a single command, it scans your models directory, gathers informations on Civitai, and generates an interactive model browser (`index.html`). 200 | 201 | ## 📘 Additional Information 202 | 203 | ### Features 204 | [Changelog](CHANGELOG.md) / [Roadmap](ROADMAP.md) 205 | 206 | ### Contributing 207 | Feel free to open issues or submit pull requests with improvements. 208 | 209 | ### License 210 | [MIT License](LICENSE.md) 211 | 212 | ### Acknowledgments 213 | In accordance with Civitai's Terms of Service, this tool adheres to the restriction of not accessing, searching, or utilizing any part of the service through unauthorized engines, software, or tools. It only uses the search agents provided by Civitai through their [official open API](https://github.com/civitai/civitai/wiki/REST-API-Reference), ensuring full compliance with the terms. 214 | -------------------------------------------------------------------------------- /ROADMAP.md: -------------------------------------------------------------------------------- 1 | # Roadmap 2 | 3 | This roadmap outlines the planned features and improvements for this project. 4 | 5 | ## 🚀 Upcoming Features 6 | 7 | - **🔥 Manual Model Page**: Add a way for users to manually add models (via JSON file and directory scan). This is useful for models that were never available on Civitai but were downloaded elsewhere. 8 | - **Handling Multiple `config.json`**: Implement the best way to efficiently handle multiple configuration files for testing and various use cases. 9 | - **File Sorting**: Add an option to select the default type of sorting in the generated model browser, with the future possibility of configuring this through a `config.json` file. 10 | - **Dark Mode**: Integrate dark mode in templates for a more user-friendly and customizable experience. 11 | 12 | ## 🛠️ Miscellaneous Improvements 13 | 14 | - **Implement Logging**: Add better logging functionality to improve tracking and debugging processes. 15 | - **Add Progress Tracking**: Integrate a progress bar to display the status of file processing, providing users with clearer feedback. 16 | - **Code Refactor**: Clean up and move certain functions to a `utils` file to improve code structure and maintainability. 17 | -------------------------------------------------------------------------------- /civitai_manager/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Civitai Data Manager - A lightweight tool to locally manage, back up, and organize SafeTensors model metadata from Civitai. 3 | """ 4 | 5 | __version__ = "1.6.3" 6 | -------------------------------------------------------------------------------- /civitai_manager/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import argparse 6 | from pathlib import Path 7 | from civitai_manager import __version__ 8 | from civitai_manager.src.core.metadata_manager import ( 9 | process_single_file, 10 | process_directory, 11 | clean_output_directory, 12 | generate_image_json_files, 13 | get_output_path 14 | ) 15 | from civitai_manager.src.utils.html_generators.browser_page import generate_global_summary 16 | from civitai_manager.src.utils.config import load_config, ConfigValidationError 17 | 18 | def parse_cli_args(require_args=False): 19 | """Parse and validate command line arguments.""" 20 | parser = argparse.ArgumentParser(description='Process SafeTensors files and fetch Civitai data') 21 | group = parser.add_mutually_exclusive_group(required=require_args) 22 | group.add_argument('--single', type=str, help='Path to a single .safetensors file') 23 | group.add_argument('--all', type=str, help='Path to directory containing .safetensors files') 24 | parser.add_argument('--notimeout', action='store_true', 25 | help='Disable timeout between files (warning: may trigger rate limiting)') 26 | parser.add_argument('--output', type=str, 27 | help='Output directory path (if not specified, will prompt for it)') 28 | parser.add_argument('--images', action='store_true', 29 | help='Download all available preview images instead of just the first one') 30 | parser.add_argument('--generateimagejson', action='store_true', 31 | help='Generate JSON files for all preview images from existing model version data') 32 | parser.add_argument('--noimages', action='store_true', 33 | help='Skip downloading any preview images') 34 | parser.add_argument('--onlynew', action='store_true', 35 | help='Only process new files that haven\'t been processed before') 36 | parser.add_argument('--skipmissing', action='store_true', 37 | help='Skip previously missing models when used with --onlynew') 38 | parser.add_argument('--onlyhtml', action='store_true', 39 | help='Only generate HTML files from existing JSON data') 40 | parser.add_argument('--onlyupdate', action='store_true', 41 | help='Only update previously processed files, skipping hash calculation') 42 | parser.add_argument('--clean', action='store_true', 43 | help='Remove data for models that no longer exist in the target directory') 44 | parser.add_argument('--noconfig', action='store_true', 45 | help='Ignore config.json and use command line arguments only') 46 | 47 | 48 | args = parser.parse_args() 49 | 50 | # Validate arguments 51 | if args.images and args.noimages: 52 | print("Error: Cannot use both --images and --noimages at the same time") 53 | sys.exit(1) 54 | if args.onlynew and args.onlyhtml: 55 | print("Error: Cannot use both --onlynew and --onlyhtml at the same time") 56 | sys.exit(1) 57 | if args.onlyupdate and args.onlynew: 58 | print("Error: Cannot use both --onlyupdate and --onlynew at the same time") 59 | sys.exit(1) 60 | if args.onlyupdate and args.onlyhtml: 61 | print("Error: Cannot use both --onlyupdate and --onlyhtml at the same time") 62 | sys.exit(1) 63 | if args.clean and args.single: 64 | print("Error: --clean option can only be used with --all") 65 | sys.exit(1) 66 | if args.clean and (args.onlyhtml or args.onlyupdate or args.onlynew): 67 | print("Error: --clean cannot be used with --onlyhtml, --onlyupdate, or --onlynew") 68 | sys.exit(1) 69 | 70 | return args 71 | 72 | def get_config(): 73 | """ 74 | Try to load config from file first, fall back to CLI args if no config found 75 | or if config is invalid. Respect --noconfig flag. 76 | """ 77 | # First check if --noconfig is specified without requiring other arguments 78 | args = parse_cli_args(require_args=False) 79 | 80 | if not args.noconfig: 81 | print("Attempting to load config.json...") 82 | try: 83 | config = load_config() 84 | if config: 85 | print("Successfully loaded configuration from config.json") 86 | print(f"Config contents: {config}") 87 | return config 88 | except ConfigValidationError as e: 89 | print(f"Error in config file: {str(e)}") 90 | except Exception as e: 91 | print(f"Error loading config file: {str(e)}") 92 | else: 93 | print("Using command line arguments (--noconfig specified)") 94 | 95 | # If we get here, either --noconfig was used or config loading failed 96 | # Now we require CLI arguments 97 | args = parse_cli_args(require_args=True) 98 | config = vars(args) 99 | config.pop('noconfig') 100 | return config 101 | 102 | def main(): 103 | config = get_config() 104 | 105 | # Get base output path either from config/argument or user input 106 | if config.get('output'): 107 | base_output_path = Path(config['output']) 108 | if not base_output_path.exists(): 109 | try: 110 | base_output_path.mkdir(parents=True, exist_ok=True) 111 | print(f"Created output directory: {base_output_path}") 112 | except Exception as e: 113 | print(f"Error creating output directory: {str(e)}") 114 | sys.exit(1) 115 | if not os.access(base_output_path, os.W_OK): 116 | print(f"Error: No write permission for directory {base_output_path}") 117 | sys.exit(1) 118 | else: 119 | base_output_path = get_output_path(clean=config.get('clean', False)) 120 | 121 | if config.get('single'): 122 | safetensors_path = Path(config['single']) 123 | process_single_file(safetensors_path, base_output_path, 124 | download_all_images=config.get('images', False), 125 | skip_images=config.get('noimages', False), 126 | html_only=config.get('onlyhtml', False), 127 | only_update=config.get('onlyupdate', False)) 128 | elif config.get('all'): 129 | directory_path = Path(config['all']) 130 | 131 | if config.get('clean', False): 132 | clean_output_directory(directory_path, base_output_path) 133 | elif config.get('generateimagejson', False): 134 | generate_image_json_files(base_output_path) 135 | else: 136 | process_directory(directory_path, base_output_path, 137 | config.get('notimeout', False), 138 | download_all_images=config.get('images', False), 139 | skip_images=config.get('noimages', False), 140 | only_new=config.get('onlynew', False), 141 | html_only=config.get('onlyhtml', False), 142 | only_update=config.get('onlyupdate', False), 143 | skip_missing=config.get('skipmissing', False)) 144 | 145 | if ('single' in config or 'all' in config): 146 | generate_global_summary(base_output_path) 147 | 148 | if __name__ == "__main__": 149 | main() 150 | -------------------------------------------------------------------------------- /civitai_manager/src/__init__.py: -------------------------------------------------------------------------------- 1 | from civitai_manager import __version__ 2 | -------------------------------------------------------------------------------- /civitai_manager/src/core/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Core functionality for the Civitai Data Manager. 3 | """ 4 | 5 | from .metadata_manager import ( 6 | process_single_file, 7 | process_directory, 8 | clean_output_directory, 9 | generate_image_json_files, 10 | get_output_path 11 | ) 12 | 13 | __all__ = [ 14 | 'process_single_file', 15 | 'process_directory', 16 | 'clean_output_directory', 17 | 'generate_image_json_files', 18 | 'get_output_path' 19 | ] 20 | -------------------------------------------------------------------------------- /civitai_manager/src/core/metadata_manager.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | import json 4 | import sys 5 | import hashlib 6 | import shutil 7 | from datetime import datetime 8 | import time 9 | import random 10 | 11 | from civitai_manager import __version__ 12 | from ..utils.file_tracker import ProcessedFilesManager 13 | from ..utils.string_utils import sanitize_filename 14 | from ..utils.html_generators.model_page import generate_html_summary 15 | 16 | try: 17 | import requests 18 | except ImportError: 19 | print("Error: 'requests' module not found. Please install it using:") 20 | print("pip install requests") 21 | sys.exit(1) 22 | 23 | def get_output_path(clean=False): 24 | """ 25 | Get output path from user and create necessary directories. 26 | If no path is provided (empty input), use current directory. 27 | 28 | Returns: 29 | Path: Base output directory path 30 | """ 31 | while True: 32 | if clean: 33 | output_path = input("Enter the path you want to clean (press Enter for current directory): ").strip() 34 | else: 35 | output_path = input("Enter the path where you want to save the exported files (press Enter for current directory): ").strip() 36 | 37 | # Use current directory if input is empty 38 | if not output_path: 39 | path = Path.cwd() / 'output' 40 | print(f"Using current directory: {path}") 41 | else: 42 | path = Path(output_path) 43 | 44 | if not path.exists(): 45 | try: 46 | create = input(f"Directory {path} doesn't exist. Create it? (y/n): ").lower() 47 | if create == 'y': 48 | path.mkdir(parents=True, exist_ok=True) 49 | else: 50 | continue 51 | except Exception as e: 52 | print(f"Error creating directory: {str(e)}") 53 | continue 54 | 55 | if not os.access(path, os.W_OK): 56 | print("Error: No write permission for this directory") 57 | continue 58 | 59 | return path 60 | 61 | def setup_export_directories(base_path, safetensors_path): 62 | """ 63 | Create dated export directory and model-specific subdirectory 64 | 65 | Args: 66 | base_path (Path): Base output directory path 67 | safetensors_path (Path): Path to the safetensors file 68 | 69 | Returns: 70 | Path: Path to the model-specific directory 71 | """ 72 | 73 | # Create dated directory 74 | # current_date = datetime.now() 75 | # dated_dir = base_path / f"safetensors-export-{current_date.year}-{current_date.month:02d}-{current_date.day:02d}" 76 | # dated_dir.mkdir(exist_ok=True) 77 | 78 | # Create model-specific directory using sanitized safetensors filename 79 | sanitized_name = sanitize_filename(safetensors_path.stem) 80 | model_dir = base_path / sanitized_name 81 | model_dir.mkdir(exist_ok=True) 82 | 83 | return model_dir 84 | 85 | def calculate_sha256(file_path, buffer_size=65536): 86 | """Calculate SHA256 hash of a file""" 87 | sha256_hash = hashlib.sha256() 88 | with open(file_path, 'rb') as f: 89 | while True: 90 | data = f.read(buffer_size) 91 | if not data: 92 | break 93 | sha256_hash.update(data) 94 | return sha256_hash.hexdigest() 95 | 96 | def extract_metadata(file_path, output_dir): 97 | """ 98 | Extract metadata from a .safetensors file 99 | 100 | Args: 101 | file_path (str): Path to the .safetensors file 102 | output_dir (Path): Directory to save the output 103 | Returns: 104 | bool: True if successful, False otherwise 105 | """ 106 | try: 107 | path = Path(file_path) 108 | 109 | if not path.exists(): 110 | raise FileNotFoundError(f"File {path} not found") 111 | 112 | if path.suffix != '.safetensors': 113 | raise ValueError("File must have .safetensors extension") 114 | 115 | base_name = sanitize_filename(path.stem) 116 | metadata_path = output_dir / f"{base_name}_metadata.json" 117 | 118 | # Read just the first line for metadata 119 | with open(path, 'rb') as f: 120 | # Read header length (8 bytes, little-endian) 121 | header_length = int.from_bytes(f.read(8), 'little') 122 | 123 | # Read the header 124 | header_bytes = f.read(header_length) 125 | header_str = header_bytes.decode('utf-8') 126 | 127 | try: 128 | # Parse the JSON header 129 | header_data = json.loads(header_str) 130 | 131 | # Write metadata to JSON file 132 | with open(metadata_path, 'w', encoding='utf-8') as f: 133 | if "__metadata__" in header_data: 134 | json.dump(header_data["__metadata__"], f, indent=4) 135 | else: 136 | json.dump(header_data, f, indent=4) 137 | print(f"Metadata successfully extracted to {metadata_path}") 138 | return True 139 | 140 | except json.JSONDecodeError: 141 | print("Error: Could not parse metadata JSON") 142 | return False 143 | 144 | except Exception as e: 145 | print(f"Error: {str(e)}") 146 | return False 147 | 148 | def extract_hash(file_path, output_dir): 149 | """ 150 | Calculate hash of a .safetensors file and save it as JSON 151 | 152 | Args: 153 | file_path (str): Path to the .safetensors file 154 | output_dir (Path): Directory to save the output 155 | Returns: 156 | str: Hash value if successful, None otherwise 157 | """ 158 | try: 159 | path = Path(file_path) 160 | 161 | if not path.exists(): 162 | raise FileNotFoundError(f"File {path} not found") 163 | 164 | hash_value = calculate_sha256(path) 165 | base_name = sanitize_filename(path.stem) # Gets sanitized filename without extension 166 | hash_path = output_dir / f"{base_name}_hash.json" 167 | 168 | # Create hash JSON object 169 | hash_data = { 170 | "hash_type": "SHA256", 171 | "hash_value": hash_value, 172 | "filename": path.name, 173 | "timestamp": datetime.now().isoformat() 174 | } 175 | 176 | # Write hash to JSON file 177 | with open(hash_path, 'w', encoding='utf-8') as f: 178 | json.dump(hash_data, f, indent=4) 179 | print(f"Hash successfully saved to {hash_path}") 180 | 181 | return hash_value 182 | 183 | except Exception as e: 184 | print(f"Error: {str(e)}") 185 | return None 186 | 187 | def download_preview_image(image_url, output_dir, base_name, index=None, is_video=False, image_data=None): 188 | """ 189 | Download a preview image from Civitai 190 | 191 | Args: 192 | image_url (str): URL of the image to download 193 | output_dir (Path): Directory to save the image 194 | base_name (str): Base name of the safetensors file 195 | index (int, optional): Image index for multiple images 196 | Returns: 197 | bool: True if successful, False otherwise 198 | """ 199 | try: 200 | if not image_url: 201 | return False 202 | 203 | # Remove the width parameter to get full size image 204 | url_parts = image_url.split('/') 205 | if 'width=' in url_parts[-2]: 206 | url_parts.pop(-2) 207 | full_size_url = '/'.join(url_parts) 208 | 209 | print(f"\nDownloading preview image:") 210 | print(f"URL: {full_size_url}") 211 | 212 | response = requests.get(full_size_url, stream=True) 213 | if response.status_code == 200: 214 | # Get the extension from the URL 215 | # image_name = url_parts[-1] 216 | ext = '.mp4' if is_video else Path(url_parts[-1]).suffix 217 | # Add index to sanitized filename if provided 218 | sanitized_base = sanitize_filename(base_name) 219 | image_filename = f"{sanitized_base}_preview{f'_{index}' if index is not None else ''}{ext}" 220 | image_path = output_dir / image_filename 221 | 222 | # Download and save the image 223 | with open(image_path, 'wb') as f: 224 | for chunk in response.iter_content(chunk_size=8192): 225 | if chunk: 226 | f.write(chunk) 227 | 228 | # Download and save the metadata associated with the image 229 | if image_data: 230 | json_filename = f"{Path(image_filename).stem}.json" 231 | json_path = output_dir / json_filename 232 | with open(json_path, 'w', encoding='utf-8') as f: 233 | json.dump(image_data, f, indent=4) 234 | 235 | print(f"Preview image successfully saved to {image_path}") 236 | return True 237 | else: 238 | print(f"Error: Could not download image (Status code: {response.status_code})") 239 | return False 240 | 241 | except Exception as e: 242 | print(f"Error downloading preview image: {str(e)}") 243 | return False 244 | 245 | def generate_image_json_files(base_output_path): 246 | """ 247 | Generate JSON files for all preview images from existing model version data 248 | 249 | Args: 250 | base_output_path (Path): Base output directory path 251 | """ 252 | print("\nGenerating JSON files for preview images...") 253 | 254 | # Find all model version JSON files 255 | version_files = list(Path(base_output_path).glob('*/*_civitai_model_version.json')) 256 | total_generated = 0 257 | 258 | for version_file in version_files: 259 | try: 260 | # Read version data 261 | with open(version_file, 'r', encoding='utf-8') as f: 262 | version_data = json.load(f) 263 | 264 | # Get model directory 265 | model_dir = version_file.parent 266 | 267 | # Process each image in the version data 268 | if 'images' in version_data: 269 | for i, image_data in enumerate(version_data['images']): 270 | # Determine the preview file extension 271 | ext = '.mp4' if image_data.get('type') == 'video' else '.jpeg' 272 | preview_file = model_dir / f"{model_dir.name}_preview_{i}{ext}" 273 | 274 | # Only create JSON if the preview file exists 275 | if preview_file.exists(): 276 | json_file = preview_file.with_suffix('.json') 277 | 278 | # Write image metadata 279 | with open(json_file, 'w', encoding='utf-8') as f: 280 | json.dump(image_data, f, indent=4) 281 | total_generated += 1 282 | 283 | except Exception as e: 284 | print(f"Error processing {version_file}: {str(e)}") 285 | continue 286 | 287 | print(f"\nGenerated {total_generated} JSON files for preview images") 288 | return True 289 | 290 | def update_missing_files_list(base_path, safetensors_path, status_code): 291 | """ 292 | Update the list of files missing from Civitai 293 | 294 | Args: 295 | base_path (Path): Base output directory path 296 | safetensors_path (Path): Path to the safetensors file 297 | status_code (int): HTTP status code from Civitai API 298 | """ 299 | missing_file = base_path / "missing_from_civitai.txt" 300 | timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") 301 | 302 | # Read existing entries if file exists, skipping header lines 303 | entries = [] 304 | if missing_file.exists(): 305 | with open(missing_file, 'r', encoding='utf-8') as f: 306 | for line in f: 307 | line = line.strip() 308 | if line and not line.startswith('#'): 309 | # Extract filename from the entry 310 | filename = line.split(' | ')[-1] 311 | # Keep entries for other files 312 | if filename != safetensors_path.name: 313 | entries.append(line) 314 | 315 | # Add new entry if file is missing 316 | if status_code is not None: 317 | new_entry = f"{timestamp} | Status {status_code} | {safetensors_path.name}" 318 | entries.append(new_entry) 319 | 320 | if entries: 321 | # Write updated list with headers 322 | with open(missing_file, 'w', encoding='utf-8') as f: 323 | f.write("# Files not found on Civitai\n") 324 | f.write("# Format: Timestamp | Status Code | Filename\n") 325 | f.write("# This file is automatically updated when the script runs\n") 326 | f.write("# A file is removed from this list when it becomes available again\n\n") 327 | 328 | # Write entries sorted by timestamp (newest first) 329 | for entry in sorted(entries, reverse=True): 330 | f.write(f"{entry}\n") 331 | elif missing_file.exists(): 332 | # Delete the file if there are no entries 333 | missing_file.unlink() 334 | print("\nAll models are now available on Civitai. Removed missing_from_civitai.txt") 335 | 336 | def find_duplicate_models(directory_path, base_output_path): 337 | """ 338 | Find models with duplicate hashes 339 | 340 | Args: 341 | directory_path (Path): Directory containing safetensors files 342 | base_output_path (Path): Base output directory path 343 | 344 | Returns: 345 | dict: Dictionary mapping hashes to lists of model info 346 | """ 347 | hash_map = {} 348 | 349 | # Scan all processed models 350 | for model_dir in base_output_path.iterdir(): 351 | if not model_dir.is_dir(): 352 | continue 353 | 354 | hash_file = model_dir / f"{model_dir.name}_hash.json" 355 | if not hash_file.exists(): 356 | continue 357 | 358 | try: 359 | with open(hash_file, 'r', encoding='utf-8') as f: 360 | hash_data = json.load(f) 361 | hash_value = hash_data.get('hash_value') 362 | if not hash_value: 363 | continue 364 | 365 | # Find corresponding safetensors file 366 | safetensors_file = None 367 | for file in Path(directory_path).glob('**/*.safetensors'): 368 | if file.stem == model_dir.name: 369 | safetensors_file = file 370 | break 371 | 372 | if not safetensors_file: 373 | continue 374 | 375 | if hash_value not in hash_map: 376 | hash_map[hash_value] = [] 377 | 378 | hash_map[hash_value].append({ 379 | 'model_dir': model_dir, 380 | 'safetensors_file': safetensors_file, 381 | 'processed_time': hash_data.get('timestamp') 382 | }) 383 | 384 | except Exception as e: 385 | print(f"Error reading hash file {hash_file}: {e}") 386 | continue 387 | 388 | return {k: v for k, v in hash_map.items() if len(v) > 1} 389 | 390 | def clean_output_directory(directory_path, base_output_path): 391 | """ 392 | Clean up output directory by removing data for models that no longer exist 393 | 394 | Args: 395 | directory_path (Path): Directory containing the safetensors files 396 | base_output_path (Path): Base output directory path 397 | """ 398 | 399 | print("\nStarting cleanup process (duplicates)...") 400 | 401 | # First handle duplicates 402 | duplicates = find_duplicate_models(directory_path, base_output_path) 403 | duplicate_file = None 404 | 405 | if duplicates: 406 | duplicate_file = base_output_path / "duplicate_models.txt" 407 | with open(duplicate_file, 'w', encoding='utf-8') as f: 408 | f.write("# Duplicate models found in input directory\n") 409 | f.write("# Format: Hash | Kept Model | Removed Duplicates\n") 410 | f.write("# This file is automatically updated when running --clean\n\n") 411 | 412 | for hash_value, models in duplicates.items(): 413 | # Sort by processed time, newest first 414 | sorted_models = sorted(models, 415 | key=lambda x: x['processed_time'] if x['processed_time'] else '', 416 | reverse=True 417 | ) 418 | 419 | # Keep the newest one, remove others 420 | kept_model = sorted_models[0] 421 | removed_models = sorted_models[1:] 422 | 423 | # Write to duplicates file 424 | f.write(f"Hash: {hash_value}\n") 425 | f.write(f"Kept: {kept_model['safetensors_file']}\n") 426 | f.write("Removed:\n") 427 | 428 | for model in removed_models: 429 | f.write(f" - {model['safetensors_file']}\n") 430 | print(f"Removing duplicate model: {model['model_dir'].name}") 431 | try: 432 | shutil.rmtree(model['model_dir']) 433 | except Exception as e: 434 | print(f"Error removing directory {model['model_dir']}: {e}") 435 | f.write("\n") 436 | 437 | print(f"\nDuplicate models list saved to {duplicate_file}") 438 | else: 439 | print("\nNo duplicates to remove") 440 | 441 | print("\nStarting cleanup process (removed models)...") 442 | # Then handle missing models 443 | # Get list of all current safetensors files (without extension) 444 | existing_models = { 445 | Path(file).stem 446 | for file in Path(directory_path).glob('**/*.safetensors') 447 | } 448 | 449 | # Check each directory in output 450 | output_dirs = [d for d in base_output_path.iterdir() if d.is_dir()] 451 | cleaned_dirs = [] 452 | 453 | for output_dir in output_dirs: 454 | if output_dir.name not in existing_models: 455 | print(f"Removing directory: {output_dir.name} (model not found)") 456 | try: 457 | shutil.rmtree(output_dir) 458 | cleaned_dirs.append(str(output_dir)) 459 | except Exception as e: 460 | print(f"Error removing directory {output_dir}: {e}") 461 | 462 | # Update processed_files.json 463 | if cleaned_dirs: 464 | files_manager = ProcessedFilesManager(base_output_path) 465 | new_processed_files = [ 466 | f for f in files_manager.processed_files['files'] 467 | if Path(f).stem in existing_models 468 | ] 469 | files_manager.processed_files['files'] = new_processed_files 470 | files_manager.save_processed_files() 471 | 472 | print(f"\nCleaned up {len(cleaned_dirs)} directories") 473 | else: 474 | print("\nNo directories to clean") 475 | 476 | return True 477 | 478 | def fetch_version_data(hash_value, output_dir, base_path, safetensors_path, download_all_images=False, skip_images=False): 479 | """ 480 | Fetch version data from Civitai API using file hash 481 | 482 | Args: 483 | hash_value (str): SHA256 hash of the file 484 | output_dir (Path): Directory to save the output 485 | base_path (Path): Base output directory path 486 | safetensors_path (Path): Path to the safetensors file 487 | download_all_images (bool): Whether to download all available preview images 488 | skip_images (bool): Whether to skip downloading images completely 489 | Returns: 490 | int or None: modelId if successful, None otherwise 491 | """ 492 | try: 493 | civitai_url = f"https://civitai.com/api/v1/model-versions/by-hash/{hash_value}" 494 | print(f"\nFetching version data from Civitai API:") 495 | print(civitai_url) 496 | 497 | response = requests.get(civitai_url) 498 | base_name = sanitize_filename(safetensors_path.stem) 499 | civitai_path = output_dir / f"{base_name}_civitai_model_version.json" 500 | 501 | if response.status_code == 200: 502 | with open(civitai_path, 'w', encoding='utf-8') as f: 503 | response_data = response.json() 504 | json.dump(response_data, f, indent=4) 505 | print(f"Version data successfully saved to {civitai_path}") 506 | 507 | # Remove from missing files list if it was there before 508 | update_missing_files_list(base_path, safetensors_path, None) # Pass None to indicate file is back 509 | 510 | # Handle image downloads based on flags 511 | if not skip_images and 'images' in response_data and response_data['images']: 512 | if download_all_images: 513 | print(f"\nDownloading all preview images ({len(response_data['images'])} images found)") 514 | for i, image_data in enumerate(response_data['images']): 515 | if 'url' in image_data: 516 | is_video = image_data.get('type') == 'video' 517 | download_preview_image(image_data['url'], output_dir, base_name, i, is_video, image_data) 518 | # Add a small delay between downloads to be nice to the server 519 | if i < len(response_data['images']) - 1: 520 | time.sleep(1) 521 | else: 522 | # Download only the first image 523 | if 'url' in response_data['images'][0]: 524 | is_video = response_data['images'][0].get('type') == 'video' 525 | download_preview_image(response_data['images'][0]['url'], output_dir, base_name, 0, is_video, response_data['images'][0]) 526 | 527 | 528 | # Return modelId if it exists 529 | return response_data.get('modelId') 530 | else: 531 | error_message = { 532 | "error": "Failed to fetch Civitai data", 533 | "status_code": response.status_code, 534 | "timestamp": datetime.now().isoformat() 535 | } 536 | with open(civitai_path, 'w', encoding='utf-8') as f: 537 | json.dump(error_message, f, indent=4) 538 | print(f"Error: Failed to fetch Civitai data (Status code: {response.status_code})") 539 | 540 | # Update missing files list 541 | update_missing_files_list(base_path, safetensors_path, response.status_code) 542 | return None 543 | 544 | except Exception as e: 545 | print(f"Error fetching version data: {str(e)}") 546 | # Update missing files list for connection errors 547 | # update_missing_files_list(base_path, safetensors_path, 0) # Use 0 for connection errors 548 | return None 549 | 550 | def fetch_model_details(model_id, output_dir, safetensors_path): 551 | """ 552 | Fetch detailed model information from Civitai API 553 | 554 | Args: 555 | model_id (int): The model ID from Civitai 556 | output_dir (Path): Directory to save the output 557 | safetensors_path (Path): Path to the safetensors file 558 | Returns: 559 | bool: True if successful, False otherwise 560 | """ 561 | try: 562 | civitai_model_url = f"https://civitai.com/api/v1/models/{model_id}" 563 | print(f"\nFetching model details from Civitai API:") 564 | print(civitai_model_url) 565 | 566 | response = requests.get(civitai_model_url) 567 | base_name = sanitize_filename(safetensors_path.stem) 568 | model_data_path = output_dir / f"{base_name}_civitai_model.json" 569 | 570 | with open(model_data_path, 'w', encoding='utf-8') as f: 571 | if response.status_code == 200: 572 | json.dump(response.json(), f, indent=4) 573 | print(f"Model details successfully saved to {model_data_path}") 574 | return True 575 | else: 576 | error_data = { 577 | "error": "Failed to fetch model details", 578 | "status_code": response.status_code, 579 | "timestamp": datetime.now().isoformat() 580 | } 581 | json.dump(error_data, f, indent=4) 582 | print(f"Error: Could not fetch model details (Status code: {response.status_code})") 583 | return False 584 | 585 | except Exception as e: 586 | print(f"Error fetching model details: {str(e)}") 587 | return False 588 | 589 | def check_for_updates(safetensors_path, output_dir, hash_value): 590 | """ 591 | Check if the model needs to be updated by comparing updatedAt timestamps 592 | 593 | Args: 594 | safetensors_path (Path): Path to the safetensors file 595 | output_dir (Path): Directory where files are saved 596 | hash_value (str): SHA256 hash of the safetensors file 597 | 598 | Returns: 599 | bool: True if update is needed, False if files are up to date 600 | """ 601 | try: 602 | # Check if files exist 603 | civitai_version_file = output_dir / "civitai_version.txt" 604 | if not civitai_version_file.exists(): 605 | return True 606 | 607 | # Read existing version data 608 | try: 609 | with open(civitai_version_file, 'r', encoding='utf-8') as f: 610 | existing_data = json.load(f) 611 | existing_updated_at = existing_data.get('updatedAt') 612 | if not existing_updated_at: 613 | return True 614 | except (json.JSONDecodeError, KeyError): 615 | return True 616 | 617 | # Fetch current version data from Civitai 618 | civitai_url = f"https://civitai.com/api/v1/model-versions/by-hash/{hash_value}" 619 | print(f"\nChecking for updates from Civitai API:") 620 | print(civitai_url) 621 | 622 | response = requests.get(civitai_url) 623 | if response.status_code != 200: 624 | print(f"Error checking for updates (Status code: {response.status_code})") 625 | return True 626 | 627 | current_data = response.json() 628 | current_updated_at = current_data.get('updatedAt') 629 | 630 | if not current_updated_at: 631 | return True 632 | 633 | # Compare timestamps 634 | if current_updated_at == existing_updated_at: 635 | print(f"\nModel {safetensors_path.name} is up to date (Last updated: {existing_updated_at})") 636 | return False 637 | else: 638 | print(f"\nUpdate available for {safetensors_path.name}") 639 | print(f"Current version: {existing_updated_at}") 640 | print(f"New version: {current_updated_at}") 641 | return True 642 | 643 | except Exception as e: 644 | print(f"Error checking for updates: {str(e)}") 645 | return True # If there's any error, proceed with update 646 | 647 | def process_single_file(safetensors_path, base_output_path, download_all_images=False, 648 | skip_images=False, html_only=False, only_update=False): 649 | """ 650 | Process a single safetensors file 651 | 652 | Args: 653 | safetensors_path (Path): Path to the safetensors file 654 | base_output_path (Path): Base path for output 655 | download_all_images (bool): Whether to download all available preview images 656 | skip_images (bool): Whether to skip downloading images completely 657 | html_only (bool): Whether to only generate HTML files 658 | only_update (bool): Whether to only update existing processed files 659 | """ 660 | 661 | if not safetensors_path.exists(): 662 | print(f"Error: File {safetensors_path} not found") 663 | return False 664 | 665 | if safetensors_path.suffix != '.safetensors': 666 | print(f"Error: File {safetensors_path} is not a safetensors file") 667 | return False 668 | 669 | # Setup export directories 670 | model_output_dir = setup_export_directories(base_output_path, safetensors_path) 671 | print(f"\nProcessing: {safetensors_path.name}") 672 | if not html_only: 673 | print(f"Files will be saved in: {model_output_dir}") 674 | 675 | if html_only: 676 | # Check if required files exist 677 | base_name = sanitize_filename(safetensors_path.stem) 678 | required_files = [ 679 | model_output_dir / f"{base_name}_civitai_model.json", 680 | model_output_dir / f"{base_name}_civitai_model_version.json", 681 | model_output_dir / f"{base_name}_hash.json" 682 | ] 683 | 684 | if not all(f.exists() for f in required_files): 685 | print(f"Error: Missing required JSON files for {safetensors_path.name}") 686 | return False 687 | 688 | # Generate HTML only 689 | generate_html_summary(model_output_dir, safetensors_path) 690 | return True 691 | 692 | if only_update: 693 | # Check if hash file exists 694 | hash_file = model_output_dir / f"{safetensors_path.stem}_hash.json" 695 | if not hash_file.exists(): 696 | print(f"Skipping {safetensors_path.name} (not previously processed)") 697 | return False 698 | 699 | # Read existing hash 700 | try: 701 | with open(hash_file, 'r') as f: 702 | hash_data = json.load(f) 703 | hash_value = hash_data.get('hash_value') 704 | if not hash_value: 705 | raise ValueError("Invalid hash file") 706 | except Exception as e: 707 | print(f"Error reading hash file: {e}") 708 | return False 709 | else: 710 | # Normal processing mode 711 | hash_value = extract_hash(safetensors_path, model_output_dir) 712 | if not hash_value: 713 | print("Error: Failed to extract hash") 714 | return False 715 | 716 | # Check if update is needed 717 | if not check_for_updates(safetensors_path, model_output_dir, hash_value): 718 | print("Skipping file (no updates available)") 719 | return True 720 | 721 | # Process the file 722 | if only_update or extract_metadata(safetensors_path, model_output_dir): 723 | model_id = fetch_version_data(hash_value, model_output_dir, base_output_path, 724 | safetensors_path, download_all_images, skip_images) 725 | if model_id: 726 | fetch_model_details(model_id, model_output_dir, safetensors_path) 727 | generate_html_summary(model_output_dir, safetensors_path) 728 | return True 729 | 730 | return False 731 | 732 | def process_directory(directory_path, base_output_path, no_timeout=False, 733 | download_all_images=False, skip_images=False, only_new=False, 734 | html_only=False, only_update=False, skip_missing=False): 735 | """ 736 | Process all safetensors files in a directory 737 | 738 | Args: 739 | directory_path (Path): Path to the directory containing safetensors files 740 | base_output_path (Path): Base path for output 741 | no_timeout (bool): If True, disable timeout between files 742 | download_all_images (bool): Whether to download all available preview images 743 | skip_images (bool): Whether to skip downloading images completely 744 | only_new (bool): Whether to only process new models 745 | html_only (bool): Whether to only generate HTML files 746 | only_update (bool): Whether to only update existing processed files 747 | """ 748 | 749 | if not directory_path.exists(): 750 | print(f"Error: Directory {directory_path} not found") 751 | return False 752 | 753 | # Initialize processed files manager if needed 754 | files_manager = None if html_only else ProcessedFilesManager(base_output_path) 755 | 756 | # Get files to process 757 | if only_new and not html_only: 758 | safetensors_files = files_manager.get_new_files(directory_path) 759 | if not safetensors_files: 760 | print("No new files to process") 761 | return True 762 | 763 | if skip_missing: 764 | # Read missing models file 765 | missing_file = Path(base_output_path) / 'missing_from_civitai.txt' 766 | missing_models = set() 767 | if missing_file.exists(): 768 | with open(missing_file, 'r', encoding='utf-8') as f: 769 | for line in f: 770 | if line.strip() and not line.startswith('#'): 771 | filename = line.strip().split(' | ')[-1] 772 | missing_models.add(filename) 773 | 774 | # Filter out previously missing models 775 | safetensors_files = [ 776 | f for f in safetensors_files 777 | if f.name not in missing_models 778 | ] 779 | if not safetensors_files: 780 | print("No new non-missing files to process") 781 | return True 782 | 783 | print(f"\nFound {len(safetensors_files)} new .safetensors files") 784 | elif only_update: 785 | # Only get previously processed files 786 | safetensors_files = [] 787 | all_files = list(directory_path.glob('**/*.safetensors')) 788 | for file_path in all_files: 789 | hash_file = Path(base_output_path) / file_path.stem / f"{file_path.stem}_hash.json" 790 | if hash_file.exists(): 791 | safetensors_files.append(file_path) 792 | print(f"\nFound {len(safetensors_files)} previously processed files") 793 | else: 794 | safetensors_files = list(directory_path.glob('**/*.safetensors')) 795 | if not safetensors_files: 796 | print(f"No .safetensors files found in {directory_path}") 797 | return False 798 | print(f"\nFound {len(safetensors_files)} .safetensors files") 799 | 800 | if html_only: 801 | print("HTML only mode: Skipping data fetching") 802 | 803 | files_processed = 0 804 | for i, file_path in enumerate(safetensors_files, 1): 805 | print(f"\n[{i}/{len(safetensors_files)}] Processing: {file_path.relative_to(directory_path)}") 806 | success = process_single_file(file_path, base_output_path, 807 | download_all_images, skip_images, html_only, only_update) 808 | 809 | if success: 810 | files_processed += 1 811 | if not html_only: 812 | if not only_update: 813 | files_manager.add_processed_file(file_path) 814 | else: 815 | files_manager.update_timestamp() 816 | 817 | # Add timeout between files (except for the last file) if not in HTML only mode 818 | if not html_only and not no_timeout and i < len(safetensors_files): 819 | timeout = random.uniform(3, 6) 820 | print(f"\nWaiting {timeout:.1f} seconds before processing next file (rate limiting protection)...") 821 | print("(You can use --notimeout to disable this waiting time)") 822 | time.sleep(timeout) 823 | 824 | # Save the updated processed files list if not in HTML only mode 825 | if not (html_only or only_update): 826 | files_manager.save_processed_files() 827 | 828 | return True 829 | -------------------------------------------------------------------------------- /civitai_manager/src/migrations/v1_5_0/README.md: -------------------------------------------------------------------------------- 1 | # Migrations v1.5.0 2 | 3 | This directory contains migration scripts for handling breaking changes in version 1.5.0. 4 | 5 | ## migrate_filenames.py 6 | 7 | This script handles the migration of filenames to use the new sanitization rules. It renames both directories and files to match the new format while preserving the original model structure. 8 | 9 | ### Usage 10 | 11 | From the project root directory (where main.py is located): 12 | 13 | ```bash 14 | # First run in dry-run mode to see what changes would be made 15 | python -m src.civitai_manager.migrations.v1_5_0.migrate_filenames "path/to/models/directory" "path/to/output/directory" 16 | 17 | # Then run with --execute to actually perform the changes 18 | python -m src.civitai_manager.migrations.v1_5_0.migrate_filenames "path/to/models/directory" "path/to/output/directory" --execute 19 | 20 | # Finally, run the main script with the flag --onlyhtml to regenerate all the paths and html files 21 | python main.py --all "path/to/models/directory" --output "path/to/output/directory" --onlyhtml 22 | ``` 23 | 24 | Arguments: 25 | - `input_dir`: Directory containing the original model files (safetensors) 26 | - `output_dir`: Directory containing the processed files (where HTML and JSON files are) 27 | - `--execute`: Optional flag to actually perform the changes. Without this flag, runs in dry-run mode 28 | 29 | Note: Make sure you run this from the project root directory (where main.py is located), not from inside the src directory. 30 | 31 | ### What it does 32 | 33 | For each model in the input directory, the script: 34 | 35 | 1. Gets the sanitized version of the model name 36 | 2. Finds the corresponding directory in the output folder 37 | 3. Renames the following files to use the sanitized name: 38 | - model_name.html 39 | - model_name_civitai_model.json 40 | - model_name_civitai_model_version.json 41 | - model_name_hash.json 42 | - model_name_metadata.json 43 | - model_name_preview_*.jpeg/jpg/png/mp4 44 | - model_name_preview_*.json 45 | - model_name_preview_*.jpeg/jpg/png/mp4.json 46 | 47 | 4. Updates processed_files.json: 48 | - Finds processed_files.json in the output directory 49 | - Updates each file path to use the sanitized filename 50 | - Preserves the original directory paths 51 | - Example: 52 | ``` 53 | From: "C:\Models\loras\[Style] Model v1.0.safetensors" 54 | To: "C:\Models\loras\Style_Model_v1.0.safetensors" 55 | ``` 56 | 57 | ### Examples of changes 58 | 59 | Original files: 60 | ``` 61 | "Model Name SDXL.html" 62 | "/model-name-%20F/model-name-%20F_preview_1.jpeg" 63 | "model-name- F_preview_1.jpeg.json" 64 | ``` 65 | 66 | After migration: 67 | ``` 68 | "Model_Name_SDXL.html" 69 | "/model-name-_F/model-name-_F_preview_1.jpeg" 70 | "model_name-_F_preview_1.json" 71 | ``` 72 | 73 | The script handles both JSON file patterns: 74 | 1. `model_name_preview_N.json` - Standard pattern 75 | 2. `model_name_preview_N.ext.json` - Pattern with media extension caused by a previous bug 76 | 77 | ### Safety Features 78 | 79 | 1. Dry-run mode by default 80 | - Shows what changes would be made without actually making them 81 | - Use --execute flag to actually perform the changes 82 | 83 | 2. Validation checks 84 | - Verifies input and output directories exist 85 | - Checks if files exist before attempting to rename 86 | - Skips models that don't need changes 87 | 88 | 3. Error handling 89 | - Handles errors for individual files without stopping the entire process 90 | - Reports errors clearly for troubleshooting 91 | -------------------------------------------------------------------------------- /civitai_manager/src/migrations/v1_5_0/__init__.py: -------------------------------------------------------------------------------- 1 | """Migration scripts for handling breaking changes""" 2 | -------------------------------------------------------------------------------- /civitai_manager/src/migrations/v1_5_0/migrate_filenames.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import argparse 3 | import json 4 | from civitai_manager.src.utils.string_utils import sanitize_filename 5 | 6 | def update_processed_files(output_dir: Path, dry_run: bool = True): 7 | """ 8 | Update processed_files.json with sanitized filenames 9 | 10 | Args: 11 | output_dir (Path): Directory containing processed_files.json 12 | dry_run (bool): If True, only print what would be done without making changes 13 | """ 14 | processed_files_path = output_dir / "processed_files.json" 15 | if not processed_files_path.exists(): 16 | print("\nNo processed_files.json found - skipping") 17 | return 18 | 19 | print("\nUpdating processed_files.json...") 20 | 21 | try: 22 | with open(processed_files_path, 'r', encoding='utf-8') as f: 23 | data = json.load(f) 24 | 25 | updated_files = [] 26 | for file_path in data.get('files', []): 27 | # Split path into directory and filename 28 | path = Path(file_path) 29 | dir_path = path.parent 30 | 31 | # Sanitize just the filename portion 32 | sanitized_name = sanitize_filename(path.stem) + path.suffix 33 | new_path = str(dir_path / sanitized_name) 34 | 35 | if new_path != file_path: 36 | if dry_run: 37 | print(f"Would update path:\nFrom: {file_path}\nTo: {new_path}\n") 38 | else: 39 | print(f"Updating path:\nFrom: {file_path}\nTo: {new_path}\n") 40 | 41 | updated_files.append(new_path) 42 | 43 | if not dry_run: 44 | data['files'] = updated_files 45 | with open(processed_files_path, 'w', encoding='utf-8') as f: 46 | json.dump(data, f, indent=4) 47 | print("Updated processed_files.json") 48 | 49 | except Exception as e: 50 | print(f"Error updating processed_files.json: {e}") 51 | 52 | def migrate_model_files(input_dir: Path, output_dir: Path, dry_run: bool = True): 53 | """ 54 | Migrate model files to use sanitized filenames 55 | 56 | Args: 57 | input_dir (Path): Directory containing the model files 58 | output_dir (Path): Directory containing the output files 59 | dry_run (bool): If True, only print what would be done without making changes 60 | """ 61 | print(f"\nScanning input directory: {input_dir}") 62 | print(f"Output directory: {output_dir}") 63 | if dry_run: 64 | print("\nDRY RUN MODE - No files will be modified") 65 | 66 | # Get all safetensors files 67 | safetensors_files = list(input_dir.glob('**/*.safetensors')) 68 | 69 | for safetensors_file in safetensors_files: 70 | base_name = safetensors_file.stem 71 | sanitized_name = sanitize_filename(base_name) 72 | 73 | # Check if output directory exists 74 | model_dir = output_dir / base_name 75 | if not model_dir.exists(): 76 | print(f"\nSkipping {base_name} - No output directory found") 77 | continue 78 | 79 | print(f"\nProcessing: {base_name}") 80 | print(f"Sanitized name: {sanitized_name}") 81 | 82 | if base_name == sanitized_name: 83 | print("No changes needed - name already sanitized") 84 | continue 85 | 86 | # Get all files in the model directory 87 | files_to_rename = [] 88 | 89 | # Core files 90 | core_files = [ 91 | f"{base_name}.html", 92 | f"{base_name}_civitai_model.json", 93 | f"{base_name}_civitai_model_version.json", 94 | f"{base_name}_hash.json", 95 | f"{base_name}_metadata.json" 96 | ] 97 | 98 | # Find preview files by checking what exists 99 | preview_index = 0 100 | while True: 101 | preview_file = model_dir / f"{base_name}_preview_{preview_index}.jpeg" 102 | json_file = model_dir / f"{base_name}_preview_{preview_index}.json" 103 | 104 | if not preview_file.exists(): 105 | # Try other extensions 106 | found = False 107 | for ext in ['.jpg', '.png', '.mp4']: 108 | alt_file = model_dir / f"{base_name}_preview_{preview_index}{ext}" 109 | if alt_file.exists(): 110 | preview_file = alt_file 111 | found = True 112 | break 113 | if not found: 114 | break 115 | 116 | files_to_rename.append(( 117 | preview_file.name, 118 | f"{sanitized_name}_preview_{preview_index}{preview_file.suffix}" 119 | )) 120 | 121 | # Check for both JSON file patterns 122 | json_file_patterns = [ 123 | f"{base_name}_preview_{preview_index}.json", # Standard pattern 124 | f"{base_name}_preview_{preview_index}{preview_file.suffix}.json" # With extension pattern (a previous bug added image extension to the json filename) 125 | ] 126 | 127 | for json_pattern in json_file_patterns: 128 | json_file = model_dir / json_pattern 129 | if json_file.exists(): 130 | new_json_name = (f"{sanitized_name}_preview_{preview_index}{preview_file.suffix}.json" 131 | if '.json' in json_pattern 132 | else f"{sanitized_name}_preview_{preview_index}.json") 133 | files_to_rename.append((json_file.name, new_json_name)) 134 | 135 | preview_index += 1 136 | 137 | # Add core files that exist 138 | for file in core_files: 139 | file_path = model_dir / file 140 | if file_path.exists(): 141 | new_name = file.replace(base_name, sanitized_name) 142 | files_to_rename.append((file, new_name)) 143 | 144 | # Rename directory first 145 | new_dir = output_dir / sanitized_name 146 | if not dry_run: 147 | try: 148 | model_dir.rename(new_dir) 149 | print(f"Renamed directory: {model_dir.name} -> {new_dir.name}") 150 | except Exception as e: 151 | print(f"Error renaming directory: {e}") 152 | continue 153 | else: 154 | print(f"Would rename directory: {model_dir.name} -> {new_dir.name}") 155 | 156 | # Then rename all files 157 | for old_name, new_name in files_to_rename: 158 | old_path = model_dir if dry_run else new_dir 159 | old_path = old_path / old_name 160 | new_path = new_dir / new_name 161 | 162 | if not dry_run: 163 | try: 164 | old_path.rename(new_path) 165 | print(f"Renamed file: {old_name} -> {new_name}") 166 | except Exception as e: 167 | print(f"Error renaming {old_name}: {e}") 168 | else: 169 | print(f"Would rename file: {old_name} -> {new_name}") 170 | 171 | def main(): 172 | parser = argparse.ArgumentParser(description='Migrate model files to use sanitized filenames') 173 | parser.add_argument('input_dir', help='Directory containing the model files') 174 | parser.add_argument('output_dir', help='Directory containing the output files') 175 | parser.add_argument('--execute', action='store_true', help='Execute the migration (without this flag, runs in dry-run mode)') 176 | 177 | args = parser.parse_args() 178 | 179 | input_dir = Path(args.input_dir) 180 | output_dir = Path(args.output_dir) 181 | 182 | if not input_dir.exists(): 183 | print(f"Error: Input directory {input_dir} does not exist") 184 | return 185 | 186 | if not output_dir.exists(): 187 | print(f"Error: Output directory {output_dir} does not exist") 188 | return 189 | 190 | migrate_model_files(input_dir, output_dir, dry_run=not args.execute) 191 | update_processed_files(output_dir, dry_run=not args.execute) 192 | 193 | if __name__ == '__main__': 194 | main() 195 | -------------------------------------------------------------------------------- /civitai_manager/src/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jeremysltn/civitai-data-manager/5660dcb63d4caaf9bd897728cb31265ad9bb114c/civitai_manager/src/utils/__init__.py -------------------------------------------------------------------------------- /civitai_manager/src/utils/config.py: -------------------------------------------------------------------------------- 1 | import json 2 | from pathlib import Path 3 | from typing import Dict, Optional, Union 4 | 5 | class ConfigValidationError(Exception): 6 | pass 7 | 8 | def validate_config(config: Dict) -> Dict: 9 | """Validates the configuration and returns a normalized config dict.""" 10 | # Required: either single or all path 11 | if not ('single' in config) ^ ('all' in config): # XOR - exactly one must be present 12 | raise ConfigValidationError("Config must specify either 'single' or 'all' path, but not both") 13 | 14 | # Convert paths to strings if they're not already 15 | if 'single' in config: 16 | config['single'] = str(config['single']) 17 | if 'all' in config: 18 | config['all'] = str(config['all']) 19 | if 'output' in config: 20 | config['output'] = str(config['output']) 21 | 22 | # Boolean flags 23 | bool_flags = [ 24 | 'notimeout', 'images', 'generateimagejson', 'noimages', 'onlynew', 25 | 'skipmissing', 'onlyhtml', 'onlyupdate', 'clean' 26 | ] 27 | for flag in bool_flags: 28 | if flag in config and not isinstance(config[flag], bool): 29 | raise ConfigValidationError(f"'{flag}' must be a boolean value") 30 | 31 | # Validate conflicting options - same checks as CLI arguments 32 | if config.get('images', False) and config.get('noimages', False): 33 | raise ConfigValidationError("Cannot use both 'images' and 'noimages' at the same time") 34 | 35 | if config.get('onlynew', False) and config.get('onlyhtml', False): 36 | raise ConfigValidationError("Cannot use both 'onlynew' and 'onlyhtml' at the same time") 37 | 38 | if config.get('onlyupdate', False) and config.get('onlynew', False): 39 | raise ConfigValidationError("Cannot use both 'onlyupdate' and 'onlynew' at the same time") 40 | 41 | if config.get('onlyupdate', False) and config.get('onlyhtml', False): 42 | raise ConfigValidationError("Cannot use both 'onlyupdate' and 'onlyhtml' at the same time") 43 | 44 | if config.get('clean', False): 45 | if 'single' in config: 46 | raise ConfigValidationError("'clean' option can only be used with 'all'") 47 | if any(config.get(opt, False) for opt in ['onlyhtml', 'onlyupdate', 'onlynew']): 48 | raise ConfigValidationError("'clean' cannot be used with 'onlyhtml', 'onlyupdate', or 'onlynew'") 49 | 50 | return config 51 | 52 | def load_config(config_path: Optional[Union[str, Path]] = None) -> Optional[Dict]: 53 | """ 54 | Load and validate configuration from a JSON file. 55 | If no path is provided, looks for 'config.json' in the script directory. 56 | Returns None if no config file is found. 57 | """ 58 | if config_path is None: 59 | # Get the absolute path to the project root directory 60 | project_root = Path.cwd() 61 | config_path = project_root / 'config.json' 62 | import sys 63 | sys.stderr.write(f"Looking for config at: {config_path.absolute()}\n") 64 | sys.stderr.flush() 65 | else: 66 | config_path = Path(config_path) 67 | 68 | if not config_path.exists(): 69 | import sys 70 | sys.stderr.write(f"Config file not found at: {config_path}\n") 71 | sys.stderr.flush() 72 | return None 73 | 74 | try: 75 | with open(config_path, 'r') as f: 76 | config = json.load(f) 77 | return validate_config(config) 78 | except json.JSONDecodeError as e: 79 | raise ConfigValidationError(f"Invalid JSON in config file: {str(e)}") 80 | except Exception as e: 81 | raise ConfigValidationError(f"Error loading config: {str(e)}") 82 | -------------------------------------------------------------------------------- /civitai_manager/src/utils/file_tracker.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import json 3 | from datetime import datetime 4 | 5 | class ProcessedFilesManager: 6 | def __init__(self, output_dir): 7 | self.output_dir = Path(output_dir) 8 | self.processed_file = self.output_dir / 'processed_files.json' 9 | self.processed_files = self._load_processed_files() 10 | 11 | def _load_processed_files(self): 12 | """Load the list of processed files from JSON""" 13 | if self.processed_file.exists(): 14 | try: 15 | with open(self.processed_file, 'r') as f: 16 | return json.load(f) 17 | except: 18 | return {'files': [], 'last_update': None} 19 | return {'files': [], 'last_update': None} 20 | 21 | def save_processed_files(self): 22 | """Save the current list of processed files""" 23 | with open(self.processed_file, 'w') as f: 24 | self.processed_files['last_update'] = datetime.now().isoformat() 25 | json.dump(self.processed_files, f, indent=4) 26 | 27 | def is_file_processed(self, file_path): 28 | """Check if a file has been processed before""" 29 | return str(file_path) in self.processed_files['files'] 30 | 31 | def add_processed_file(self, file_path): 32 | """Add a file to the processed list""" 33 | if str(file_path) not in self.processed_files['files']: 34 | self.processed_files['files'].append(str(file_path)) 35 | 36 | def get_new_files(self, directory_path): 37 | """Get list of new safetensors files that haven't been processed""" 38 | all_files = list(Path(directory_path).glob('**/*.safetensors')) 39 | return [f for f in all_files if not self.is_file_processed(f)] 40 | 41 | def update_timestamp(self): 42 | """Update the last_update timestamp without modifying the files list""" 43 | self.processed_files['last_update'] = datetime.now().isoformat() 44 | self.save_processed_files() -------------------------------------------------------------------------------- /civitai_manager/src/utils/html_generators/__init__.py: -------------------------------------------------------------------------------- 1 | from .model_page import generate_html_summary 2 | from .browser_page import generate_global_summary -------------------------------------------------------------------------------- /civitai_manager/src/utils/html_generators/browser_page.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import html 3 | from ..string_utils import sanitize_filename 4 | import json 5 | from datetime import datetime 6 | from civitai_manager import __version__ 7 | 8 | def generate_global_summary(output_dir): 9 | """ 10 | Generate an HTML summary of all models in the output directory 11 | 12 | Args: 13 | output_dir (Path): Directory containing the JSON files 14 | """ 15 | try: 16 | # Find all model.json files 17 | model_files = list(Path(output_dir).glob('*/*/civitai_model.json')) + \ 18 | list(Path(output_dir).glob('*/*_civitai_model.json')) 19 | 20 | # Read missing models file 21 | missing_models = set() 22 | missing_file = Path(output_dir) / 'missing_from_civitai.txt' 23 | if missing_file.exists(): 24 | with open(missing_file, 'r', encoding='utf-8') as f: 25 | for line in f: 26 | if line.strip() and not line.startswith('#'): 27 | # Extract filename from the line 28 | filename = line.strip().split(' | ')[-1] 29 | missing_models.add(filename) 30 | 31 | # Dictionary to store models by type 32 | models_by_type = {} 33 | 34 | for model_file in model_files: 35 | try: 36 | # Get paths for model, version, and hash files 37 | base_name = model_file.parent.name 38 | version_file = model_file.parent / f"{base_name}_civitai_model_version.json" 39 | hash_file = model_file.parent / f"{base_name}_hash.json" 40 | html_file = model_file.parent / f"{base_name}.html" 41 | 42 | # Read all files 43 | with open(model_file, 'r', encoding='utf-8') as f: 44 | model_data = json.load(f) 45 | 46 | version_data = {} 47 | if version_file.exists(): 48 | with open(version_file, 'r', encoding='utf-8') as f: 49 | version_data = json.load(f) 50 | 51 | hash_data = {} 52 | if hash_file.exists(): 53 | with open(hash_file, 'r', encoding='utf-8') as f: 54 | hash_data = json.load(f) 55 | 56 | model_type = model_data.get('type', 'Unknown') 57 | 58 | if model_type not in models_by_type: 59 | models_by_type[model_type] = [] 60 | 61 | models_by_type[model_type].append({ 62 | # Add model data 63 | 'name': model_data.get('name') or 'Unknown', 64 | 'creator': model_data.get('creator', {}).get('username', 'Unknown'), 65 | 'base_name': base_name, 66 | 'html_file': f"{base_name}.html", 67 | 'tags': model_data.get('tags', []), 68 | # Add version data 69 | 'version_name': version_data.get('name', ''), 70 | 'downloads': version_data.get('stats', {}).get('downloadCount', 0), 71 | 'has_html': html_file.exists(), 72 | 'added_date': hash_data.get('timestamp', ''), 73 | 'file_size': version_data.get('files', [{}])[0].get('sizeKB', None) 74 | }) 75 | except: 76 | continue 77 | 78 | # Process missing models 79 | if missing_models: 80 | if 'Missing from Civitai' not in models_by_type: 81 | models_by_type['Missing from Civitai'] = [] 82 | 83 | for filename in missing_models: 84 | base_name = Path(filename).stem 85 | html_file = Path(output_dir) / base_name / f"{base_name}.html" 86 | html_exists = html_file.exists() 87 | 88 | models_by_type['Missing from Civitai'].append({ 89 | 'name': base_name, 90 | 'creator': 'Unknown', 91 | 'downloads': 0, 92 | 'base_name': base_name, 93 | 'html_file': f"{base_name}.html" if html_exists else '', 94 | 'tags': [], 95 | 'baseModel': 'Unknown', 96 | 'trainedWords': [], 97 | 'createdAt': 'Unknown', 98 | 'updatedAt': 'Unknown', 99 | 'missing': True, 100 | 'has_html': html_exists 101 | }) 102 | 103 | # Sort each type's models 104 | for model_type in models_by_type: 105 | if model_type != 'Missing from Civitai': 106 | models_by_type[model_type].sort(key=lambda x: x['downloads'], reverse=True) 107 | else: 108 | models_by_type[model_type].sort(key=lambda x: (x['name'] or '').lower()) 109 | 110 | # Create sections HTML for each type 111 | type_sections = '' 112 | total_models = sum(len(models) for models in models_by_type.values()) 113 | 114 | for model_type, models in sorted(models_by_type.items()): 115 | # Create model cards first 116 | model_cards = [] 117 | for model in models: 118 | sanitized_base = sanitize_filename(model["base_name"]) 119 | html_name = f"{sanitized_base}.html" 120 | model_name = ( 121 | f'{html.escape(model["name"])}' 122 | if model.get('has_html', False) or not model.get('missing') 123 | else '' + html.escape(model['name']) + '' 124 | ) 125 | 126 | dates_section = '' 127 | if model.get('createdAt', "Unknown") != "Unknown": 128 | created_date = model['createdAt'][:10] 129 | updated_date = f" | Updated: {model['updatedAt'][:10]}" if model['updatedAt'] != "Unknown" else '' 130 | dates_section = f'
Created: {created_date}{updated_date}
' 131 | 132 | base_model_section = '' 133 | if model.get('baseModel', 'Unknown') != 'Unknown': 134 | base_model_section = f'
Base Model: {model["baseModel"]}
' 135 | 136 | downloads_section = '' 137 | if not model.get('missing'): 138 | downloads_section = f'
Downloads: {model["downloads"]:,}
' 139 | 140 | filesize_section = '' 141 | if model.get('file_size'): 142 | filesize_section = f'
Size: {model.get('file_size', 0)/1024:.2f} MB
' 143 | 144 | trained_words_section = '' 145 | if model.get('trainedWords'): 146 | trained_words_section = f'
{", ".join(model["trainedWords"])}
' 147 | 148 | tags_html = ''.join(f'{tag}' for tag in model['tags']) 149 | 150 | sanitized_base_name = sanitize_filename(model['base_name']) 151 | preview_path = f"{sanitized_base_name}/{sanitized_base_name}_preview_0.jpeg" 152 | 153 | card_html = f""" 154 |
162 | 163 |

{model_name}

164 | {model.get('version_name', '')} 165 |
by {model['creator']}
166 | {base_model_section} 167 | {downloads_section} 168 | {filesize_section} 169 | {dates_section} 170 |
171 | {tags_html} 172 |
173 | {trained_words_section} 174 |
175 | """ 176 | model_cards.append(card_html) 177 | 178 | # Create section with all model cards 179 | type_sections += f""" 180 |
181 |

{model_type} ({len(models)} models)

182 |
183 | {''.join(model_cards)} 184 |
185 |
186 | """ 187 | 188 | # Update the HTML content with type sections 189 | html_content = f""" 190 | 191 | 192 | 193 | 194 | 195 | Civitai Data Manager 196 | 345 | 346 | 347 |
348 |
349 |

Civitai Data Manager

350 |
351 |

({total_models} models)

352 | 353 |
354 | 355 | 367 | 368 |
369 |
370 | {type_sections} 371 |
372 | 373 | 378 | 379 | 506 | 507 | 508 | """ 509 | 510 | # Write the summary file 511 | summary_path = Path(output_dir) / 'index.html' 512 | with open(summary_path, 'w', encoding='utf-8') as f: 513 | f.write(html_content) 514 | 515 | print(f"\nGlobal summary generated: {summary_path}") 516 | return True 517 | 518 | except Exception as e: 519 | print(f"Error generating global summary: {str(e)}") 520 | return False 521 | -------------------------------------------------------------------------------- /civitai_manager/src/utils/html_generators/model_page.py: -------------------------------------------------------------------------------- 1 | import json 2 | import html 3 | from ..string_utils import sanitize_filename 4 | from datetime import datetime 5 | from civitai_manager import __version__ 6 | 7 | def generate_html_summary(output_dir, safetensors_path): 8 | """ 9 | Generate an HTML summary of the model information 10 | 11 | Args: 12 | output_dir (Path): Directory containing the JSON files 13 | safetensors_path (Path): Path to the safetensors file 14 | """ 15 | try: 16 | base_name = sanitize_filename(safetensors_path.stem) 17 | model_path = output_dir / f"{base_name}_civitai_model.json" 18 | version_path = output_dir / f"{base_name}_civitai_model_version.json" 19 | hash_path = output_dir / f"{base_name}_hash.json" 20 | html_path = output_dir / f"{base_name}.html" 21 | 22 | # Find all preview images 23 | preview_images = sorted(output_dir.glob(f"{base_name}_preview*.jpg")) + \ 24 | sorted(output_dir.glob(f"{base_name}_preview*.jpeg")) + \ 25 | sorted(output_dir.glob(f"{base_name}_preview*.png")) + \ 26 | sorted(output_dir.glob(f"{base_name}_preview*.mp4")) 27 | 28 | 29 | # Check if all required files exist 30 | if not all(p.exists() for p in [model_path, version_path, hash_path]): 31 | print("Error: Missing required JSON files for HTML generation") 32 | return False 33 | 34 | # Read JSON data 35 | try: 36 | with open(model_path, 'r', encoding='utf-8') as f: 37 | model_data = json.load(f) 38 | with open(version_path, 'r', encoding='utf-8') as f: 39 | version_data = json.load(f) 40 | with open(hash_path, 'r', encoding='utf-8') as f: 41 | hash_data = json.load(f) 42 | 43 | # Get stats data 44 | model_version = next((version for version in model_data["modelVersions"] if version["id"] == version_data.get('id')), None) 45 | stats = model_version.get('stats', {}) 46 | fileSizeKB = model_version.get('files', [{}])[0].get('sizeKB', None) 47 | fileSizeMB = fileSizeKB / 1024 48 | 49 | # Generate image gallery HTML 50 | gallery_html = "" 51 | if preview_images: 52 | gallery_html = """ 53 |
54 |

Preview Images

55 |
" 88 | 89 | # CSS helpers 90 | color_map = { 91 | 0: '#27ae60', 92 | 1: '#e93826' 93 | } 94 | background_color = color_map.get(model_data.get('nsfw', 'N/A'), '#95a5a6') 95 | 96 | # HTML template 97 | html_content = f""" 98 | 99 | 100 | 101 | 102 | 103 | {model_data.get('name', 'Model Information')} 104 | 384 | 385 | 386 |
387 |
388 | 389 |

{model_data.get('name', 'Model Name')}

390 |
{version_data.get('name', 'Version Name')}
391 |
by 392 | {'' + model_data.get('creator', {}).get('username', 'Unknown Creator') + '' if model_data.get('creator', {}).get('username', 'Unknown Creator') != 'Unknown Creator' else model_data.get('creator', {}).get('username', 'Unknown Creator')} 393 |
394 |
395 | 396 | {gallery_html} 397 | 398 |
399 |

Model Information

400 |
Type:
401 |
{model_data.get('type', 'N/A')}
402 | 403 |
Model ID:
404 |
{model_data.get('id', 'N/A')}
405 | 406 |
Version ID:
407 |
{version_data.get('id', 'N/A')}
408 | 409 | 413 | 414 | 418 | 419 |
Description:
420 |
{model_data.get('description', 'No description available')}
421 | 422 |
Tags:
423 |
424 | {' '.join(f'{tag}' for tag in model_data.get('tags', []))} 425 |
426 |
427 | 428 |
429 |

Model Statistics

430 |
431 |
432 |
Downloads
433 |
{stats.get('downloadCount', 0):,}
434 |
435 |
436 |
Favorites
437 |
{stats.get('favoriteCount', 0):,}
438 |
439 |
440 |
Comments
441 |
{stats.get('commentCount', 0):,}
442 |
443 |
444 |
Tips Received
445 |
{stats.get('tippedAmountCount', 0):,}
446 |
447 |
448 | 449 |
450 |
Rating Distribution
451 |
452 | 👍 {stats.get('thumbsUpCount', 0):,} 453 | 456 | 👎 {stats.get('thumbsDownCount', 0):,} 457 |
458 |
459 |
460 | 461 |
462 |

Version Information

463 |
Created At:
464 |
{version_data.get('createdAt', 'N/A')}
465 | 466 |
Updated At:
467 |
{version_data.get('updatedAt', 'N/A')}
468 | 469 |
Base Model:
470 |
{version_data.get('baseModel', 'N/A')}
471 | 472 | {'
Trained Words:
' + ' '.join(f'{word}' for word in version_data.get('trainedWords', [])) + '
' if version_data.get('trainedWords') else ''} 473 |
474 | 475 |
476 |

File Information

477 |
Filename:
478 |
{base_name}
479 | 480 |
SHA256 Hash:
481 |
{hash_data.get('hash_value', 'N/A')}
482 | 483 |
File size:
484 |
{round(fileSizeMB, 2)} MB
485 |
486 | 487 |
488 |

Links

489 |
490 | Civitai Model Page 491 |
492 | Civitai Download URL 493 |
494 |
495 |
496 | 497 | 502 | 503 | 504 | 520 | 521 | 748 | 749 | 750 | """ 751 | # Write HTML file 752 | with open(html_path, 'w', encoding='utf-8') as f: 753 | f.write(html_content) 754 | 755 | print(f"HTML summary generated: {html_path}") 756 | return True 757 | 758 | except json.JSONDecodeError as e: 759 | print(f"Error parsing JSON data: {str(e)}") 760 | return False 761 | 762 | except Exception as e: 763 | print(f"Error generating HTML summary: {str(e)}") 764 | return False 765 | -------------------------------------------------------------------------------- /civitai_manager/src/utils/string_utils.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | def sanitize_filename(filename): 4 | """ 5 | Create a clean, filesystem-friendly filename 6 | 7 | Args: 8 | filename (str): Original filename 9 | 10 | Returns: 11 | str: Sanitized filename 12 | """ 13 | # Remove or replace problematic characters 14 | # 1. Replace brackets, quotes, and special characters with underscores 15 | sanitized = re.sub(r'[\[\]\(\)\{\}\'"#]', '_', filename) 16 | # 2. Replace Windows-unsafe characters 17 | sanitized = re.sub(r'[<>:"/\\|?*]', '_', sanitized) 18 | # 3. Replace other problematic characters (spaces, dots, etc) 19 | sanitized = re.sub(r'[^\w\-]', '_', sanitized) 20 | 21 | # Remove any leading/trailing underscores or dots 22 | sanitized = sanitized.strip('._') 23 | 24 | # Replace multiple underscores with a single one 25 | sanitized = re.sub(r'_+', '_', sanitized) 26 | 27 | return sanitized 28 | -------------------------------------------------------------------------------- /config.json: -------------------------------------------------------------------------------- 1 | { 2 | "all": "path/to/models/directory", 3 | "output": "path/to/output/directory", 4 | "images": true 5 | } 6 | -------------------------------------------------------------------------------- /config_examples/config.first-use.json: -------------------------------------------------------------------------------- 1 | { 2 | "all": "path/to/models/directory", 3 | "output": "path/to/output/directory", 4 | "images": true 5 | } 6 | -------------------------------------------------------------------------------- /config_examples/config.update.json: -------------------------------------------------------------------------------- 1 | { 2 | "all": "path/to/models/directory", 3 | "output": "path/to/output/directory", 4 | "images": true, 5 | "onlynew": true, 6 | "skipmissing": true 7 | } 8 | -------------------------------------------------------------------------------- /poetry.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Poetry 2.1.0 and should not be changed by hand. 2 | 3 | [[package]] 4 | name = "certifi" 5 | version = "2025.1.31" 6 | description = "Python package for providing Mozilla's CA Bundle." 7 | optional = false 8 | python-versions = ">=3.6" 9 | groups = ["main"] 10 | files = [ 11 | {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, 12 | {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, 13 | ] 14 | 15 | [[package]] 16 | name = "charset-normalizer" 17 | version = "3.4.1" 18 | description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." 19 | optional = false 20 | python-versions = ">=3.7" 21 | groups = ["main"] 22 | files = [ 23 | {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, 24 | {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, 25 | {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"}, 26 | {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"}, 27 | {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"}, 28 | {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"}, 29 | {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"}, 30 | {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"}, 31 | {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"}, 32 | {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"}, 33 | {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"}, 34 | {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"}, 35 | {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"}, 36 | {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"}, 37 | {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"}, 38 | {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"}, 39 | {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"}, 40 | {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"}, 41 | {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"}, 42 | {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"}, 43 | {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"}, 44 | {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"}, 45 | {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"}, 46 | {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"}, 47 | {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"}, 48 | {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"}, 49 | {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"}, 50 | {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"}, 51 | {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"}, 52 | {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"}, 53 | {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"}, 54 | {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"}, 55 | {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"}, 56 | {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"}, 57 | {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"}, 58 | {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"}, 59 | {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"}, 60 | {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"}, 61 | {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"}, 62 | {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"}, 63 | {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"}, 64 | {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"}, 65 | {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"}, 66 | {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"}, 67 | {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"}, 68 | {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"}, 69 | {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"}, 70 | {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"}, 71 | {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"}, 72 | {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"}, 73 | {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"}, 74 | {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"}, 75 | {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"}, 76 | {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"}, 77 | {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"}, 78 | {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"}, 79 | {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"}, 80 | {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"}, 81 | {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"}, 82 | {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"}, 83 | {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"}, 84 | {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"}, 85 | {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"}, 86 | {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"}, 87 | {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"}, 88 | {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"}, 89 | {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"}, 90 | {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"}, 91 | {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"}, 92 | {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"}, 93 | {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"}, 94 | {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"}, 95 | {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"}, 96 | {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"}, 97 | {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"}, 98 | {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"}, 99 | {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"}, 100 | {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"}, 101 | {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"}, 102 | {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"}, 103 | {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"}, 104 | {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"}, 105 | {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"}, 106 | {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"}, 107 | {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"}, 108 | {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"}, 109 | {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"}, 110 | {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"}, 111 | {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"}, 112 | {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"}, 113 | {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"}, 114 | {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, 115 | ] 116 | 117 | [[package]] 118 | name = "idna" 119 | version = "3.10" 120 | description = "Internationalized Domain Names in Applications (IDNA)" 121 | optional = false 122 | python-versions = ">=3.6" 123 | groups = ["main"] 124 | files = [ 125 | {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, 126 | {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, 127 | ] 128 | 129 | [package.extras] 130 | all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] 131 | 132 | [[package]] 133 | name = "requests" 134 | version = "2.32.3" 135 | description = "Python HTTP for Humans." 136 | optional = false 137 | python-versions = ">=3.8" 138 | groups = ["main"] 139 | files = [ 140 | {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, 141 | {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, 142 | ] 143 | 144 | [package.dependencies] 145 | certifi = ">=2017.4.17" 146 | charset-normalizer = ">=2,<4" 147 | idna = ">=2.5,<4" 148 | urllib3 = ">=1.21.1,<3" 149 | 150 | [package.extras] 151 | socks = ["PySocks (>=1.5.6,!=1.5.7)"] 152 | use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] 153 | 154 | [[package]] 155 | name = "urllib3" 156 | version = "2.3.0" 157 | description = "HTTP library with thread-safe connection pooling, file post, and more." 158 | optional = false 159 | python-versions = ">=3.9" 160 | groups = ["main"] 161 | files = [ 162 | {file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"}, 163 | {file = "urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d"}, 164 | ] 165 | 166 | [package.extras] 167 | brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] 168 | h2 = ["h2 (>=4,<5)"] 169 | socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] 170 | zstd = ["zstandard (>=0.18.0)"] 171 | 172 | [metadata] 173 | lock-version = "2.1" 174 | python-versions = ">=3.10" 175 | content-hash = "66d0fe0a9a5af1ad1a1a567daf53025ce16bb17fb276c09cd936cd1a68ea2853" 176 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "civitai-data-manager" 3 | version = "1.6.2" 4 | description = "A lightweight tool to locally manage, back up, and organize SafeTensors model metadata from Civitai." 5 | authors = [ 6 | {name = "Jeremy",email = "185207395+jeremysltn@users.noreply.github.com"} 7 | ] 8 | license = {text = "MIT"} 9 | readme = "README.md" 10 | requires-python = ">=3.10" 11 | dependencies = [ 12 | "requests (>=2.32.3,<3.0.0)" 13 | ] 14 | 15 | [tool.poetry] 16 | package-mode = false 17 | 18 | 19 | [build-system] 20 | requires = ["poetry-core>=2.0.0,<3.0.0"] 21 | build-backend = "poetry.core.masonry.api" 22 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests>=2.32.3 --------------------------------------------------------------------------------