├── LICENSE ├── README.md ├── Release Notes 0.8.12.md ├── data ├── YOLO11n-model-yaml │ ├── coco8.yaml │ └── download_YOLO_models.txt ├── download_SAM_models.txt └── project1 │ └── images │ ├── 3D-EM-platelet-train-8bit_Z1.png │ ├── dogs.jpg │ ├── horse_on_street.jpg │ ├── marbles.jpg │ └── newton.jpg ├── requirements.txt ├── screenshots ├── SAM_Model_loaded.jpg ├── czi_dimensions_window.jpg ├── digitalsreeni-image-annotator-dark.jpg ├── digitalsreeni-image-annotator-demo.gif ├── digitalsreeni-image-annotator.jpg └── tiff_dimensions_window.jpg ├── setup.py └── src └── digitalsreeni_image_annotator ├── __init__.py ├── __pycache__ ├── __init__.cpython-310.pyc ├── annotation_statistics.cpython-310.pyc ├── annotator_window.cpython-310.pyc ├── coco_json_combiner.cpython-310.pyc ├── dataset_splitter.cpython-310.pyc ├── default_stylesheet.cpython-310.pyc ├── dicom_converter.cpython-310.pyc ├── export_formats.cpython-310.pyc ├── help_window.cpython-310.pyc ├── image_augmenter.cpython-310.pyc ├── image_label.cpython-310.pyc ├── image_patcher.cpython-310.pyc ├── import_formats.cpython-310.pyc ├── main.cpython-310.pyc ├── project_details.cpython-310.pyc ├── project_search.cpython-310.pyc ├── sam_utils.cpython-310.pyc ├── slice_registration.cpython-310.pyc ├── snake_game.cpython-310.pyc ├── soft_dark_stylesheet.cpython-310.pyc ├── stack_interpolator.cpython-310.pyc ├── stack_to_slices.cpython-310.pyc ├── utils.cpython-310.pyc └── yolo_trainer.cpython-310.pyc ├── annotation_statistics.py ├── annotation_utils.py ├── annotator_window.py ├── coco_json_combiner.py ├── constants.py ├── dataset_splitter.py ├── default_stylesheet.py ├── dicom_converter.py ├── export_formats.py ├── help_window.py ├── image_augmenter.py ├── image_label.py ├── image_patcher.py ├── import_formats.py ├── main.py ├── project_details.py ├── project_search.py ├── sam_utils.py ├── slice_registration.py ├── snake_game.py ├── soft_dark_stylesheet.py ├── stack_interpolator.py ├── stack_to_slices.py ├── utils.py └── yolo_trainer.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Dr. Sreenivas Bhattiprolu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- 24 | 25 | CITATION REQUEST: 26 | 27 | If you use this software in your research, please cite it as follows: 28 | 29 | Bhattiprolu, S. (2024). Image Annotator [Computer software]. 30 | https://github.com/bnsreenu/digitalsreeni-image-annotator 31 | 32 | BibTeX: 33 | @software{image_annotator, 34 | author = {Bhattiprolu, Sreenivas}, 35 | title = {Image Annotator}, 36 | year = {2024}, 37 | url = {https://github.com/bnsreenu/digitalsreeni-image-annotator} 38 | } 39 | 40 | While not required by the license, citation is appreciated and helps support the 41 | continued development and maintenance of this software. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DigitalSreeni Image Annotator and Toolkit 2 | 3 |  4 |  5 |  6 | 7 | A powerful and user-friendly tool for annotating images with polygons and rectangles, built with PyQt5. Now with additional supporting tools for comprehensive image processing and dataset management. 8 | 9 | ## Support the Project 10 | 11 | If you find this project helpful, consider supporting it: 12 | 13 | [](https://www.paypal.com/donate/?business=FGQL3CNJGJP9C&no_recurring=0&item_name=If+you+find+this+Image+Annotator+project+helpful%2C+consider+supporting+it%3A¤cy_code=USD) 14 | 15 |  16 | 17 | ## Watch the demo (of v0.8.0): 18 | [](https://youtu.be/aArn1f1YIQk) 19 | 20 | @DigitalSreeni 21 | Dr. Sreenivas Bhattiprolu 22 | 23 | ## Features 24 | 25 | - Semi-automated annotations with SAM-2 assistance (Segment Anything Model) — Because who doesn't love a helpful AI sidekick? 26 | - Manual annotations with polygons and rectangles — For when you want to show SAM-2 who's really in charge. 27 | - Paint brush and Eraser tools with adjustable pen sizes (use - and = on your keyboard) 28 | - Merge annotations - For when SAM-2's guesswork needs a little human touch. 29 | - Save and load projects for continued work. 30 | - Save As... and Autosave functionality. 31 | - A secret game, for when you are bored. 32 | - Import existing COCO JSON annotations with images. 33 | - Export annotations to various formats (COCO JSON, YOLO v8/v11, Labeled images, Semantic labels, Pascal VOC). 34 | - Handle multi-dimensional images (TIFF stacks and CZI files). 35 | - Zoom and pan for detailed annotations. 36 | - Support for multiple classes with customizable colors. 37 | - User-friendly interface with intuitive controls. 38 | - Change the application font size on the fly — Make your annotations as big or small as your caffeine level requires. 39 | - Dark mode for those late-night annotation marathons — Who needs sleep when you have dark mode? 40 | - Pick appropriate pre-trained SAM2 model for flexible and improved semi-automated annotations. 41 | - Change the class of an annotation to a different class. 42 | - Turn visibility of a class ON and OFF. 43 | - YOLO (beta) training using current annotations and loading trained model to segment images. 44 | - Area measurements for annotations displayed next to the Annotation name. 45 | - Sort annotations by name/number or area. 46 | - Additional supporting tools: 47 | - Annotation statistics for current annotations 48 | - COCO JSON combiner 49 | - Dataset splitter 50 | - Stack to slices converter 51 | - Image patcher 52 | - Image augmenter 53 | - Project Details: View and edit project metadata, including creation date, last modified date, image information, and custom notes. 54 | - Advanced Project Search: Search through multiple projects using complex queries with logical operators (AND, OR) and parentheses. 55 | - Slice Registration 56 | - Align image slices in a stack with multiple registration methods 57 | - Support for various reference frames and transformation types 58 | - Stack Interpolation 59 | - Adjust Z-spacing in image stacks 60 | - Multiple interpolation methods with memory-efficient processing 61 | - DICOM Converter 62 | - Convert DICOM files to TIFF format (single stack or individual slices) 63 | - Preserve metadata and physical dimensions 64 | - Export metadata to JSON for reference 65 | 66 | 67 | ## Operating System Requirements 68 | This application is built using PyQt5 and has been tested on macOS and Windows. It may experience compatibility issues on Linux systems, particularly related to the XCB plugin for PyQt5. Extensive testing on Linux systems has not been done yet. 69 | 70 | ## Installation 71 | 72 | ### Watch the installation walkthough video: 73 | [](https://youtu.be/VI6V95eUUpY) 74 | 75 | You can install the DigitalSreeni Image Annotator directly from PyPI: 76 | 77 | ```bash 78 | pip install digitalsreeni-image-annotator 79 | ``` 80 | 81 | The application uses the Ultralytics library, so there's no need to separately install SAM2 or PyTorch, or download SAM2 models manually. 82 | 83 | ## Usage 84 | 85 | 1. Run the DigitalSreeni Image Annotator application: 86 | ```bash 87 | digitalsreeni-image-annotator 88 | ``` 89 | or 90 | ```bash 91 | sreeni 92 | ``` 93 | or 94 | ```bash 95 | python -m digitalsreeni_image_annotator.main 96 | ``` 97 | 98 | 2. Using the application: 99 | - Click "New Project" or use Ctrl+N to start a new project. 100 | - Use "Add New Images" to import images, including TIFF stacks and CZI files. 101 | - Add classes using the "Add Classes" button. 102 | - Select a class and use the Polygon or Rectangle or Paint Brush tool to create manual annotations. 103 | - To use SAM2-assisted annotation: 104 | - Select a model from the "Pick a SAM Model" dropdown. It's recommended to use smaller models like SAM2 tiny or SAM2 small. SAM2 large is not recommended as it may crash the application on systems with limited resources. 105 | - Note: When you select a model for the first time, the application needs to download it. This process may take a few seconds to a minute, depending on your internet connection speed. Subsequent uses of the same model will be faster as it will already be cached locally, in your working directory. 106 | - Click the "SAM-Assisted" button to activate the tool. 107 | - Draw a rectangle around objects of interest to allow SAM2 to automatically detect objects. 108 | - Note that SAM2 provides various outputs with different scores, and only the top-scoring region will be displayed. If the desired result isn't achieved on the first try, draw again. 109 | - For low-quality images where SAM2 may not auto-detect objects, manual tools may be necessary. 110 | - When SAM2 auto-detect partial objects, use polygon or paint brush tools to manually define the remaining region and use the Merge tool to combine both annotations into one. 111 | - When SAM2 over-annotates objects, extending the annotation beyond object's boundaries, use the Eraser tool to clean up the edges. 112 | - Both paint brush and eraser tools can be adjusted for pen size by using - or = keys on your keyboard. 113 | - Edit existing annotations by double-clicking on them. 114 | - Edit existing annotations using the Eraser tool. Adjust the eraser size by using - or = keys on your keyboard. 115 | - Merge connected annotations by selecting them from the Annotations list and clicking the Merge button. 116 | - Change the class of an annotation to a different class. 117 | - Turn visibility of a class ON and OFF. 118 | - Use YOLO (beta) training with current annotations and load the trained model to segment images and convert segmentations to annotations. (Currently not implemented for slices or stacks, just single images.) 119 | - Accept/reject one or select class predictions at a time to add them as annotations. 120 | - View area measurements for annotations displayed next to the Annotation name. 121 | - Sort annotations by name/number or area. 122 | - Save your project using "Save Project" or Ctrl+S. Alternatively, you can use Save As... to save the project with a different name. 123 | - Use "Open Project" or Ctrl+O to load a previously saved project. 124 | - Click "Import Annotations with Images" to load existing COCO JSON annotations along with their images. 125 | - Use "Export Annotations" to save annotations in various formats (COCO JSON, YOLO v8/v11, Labeled images, Semantic labels, Pascal VOC). 126 | - Note: YOLO export (and import) is now compatible with YOLOv11 structure. (Project directory includes data.yaml, train, and valid directories, with train and valid both having images and labels subdirectories.) 127 | - Project Details: 128 | - Access project details by selecting "Project Details" from the Project menu. 129 | - View project metadata such as creation date, last modified date, and image information. 130 | - Add or edit custom project notes. 131 | - Project details are automatically saved when you make changes to the notes. 132 | - Advanced Project Search: 133 | - Access the search functionality by selecting "Search Projects" from the Project menu. 134 | - Search through multiple projects using complex queries. 135 | - Use logical operators (AND, OR) and parentheses for advanced search criteria. 136 | - Search covers project name, class names, image names, and project notes. 137 | - Example queries: 138 | - "cells AND dog": Find projects containing both "cells" and "dog" 139 | - "cells OR bacteria": Find projects containing either "cells" or "bacteria" 140 | - "cells AND (dog OR monkey)": Find projects containing "cells" and either "dog" or "monkey" 141 | - "(project1 OR project2) AND (cells OR bacteria)": More complex nested queries 142 | - Double-click on search results to open the corresponding project. 143 | - Access additional tools under the Tools menu bar: 144 | - Annotation Statistics 145 | - COCO JSON Combiner 146 | - Dataset Splitter 147 | - Stack to Slices Converter 148 | - Image Patcher 149 | - Image Augmenter 150 | - Each tool opens a separate UI to guide you through the respective task. 151 | - Access the help documentation by clicking the "Help" button or pressing F1. 152 | - Explore the interface – you might stumble upon some hidden gems and secret features! 153 | 154 | 3. Keyboard shortcuts: 155 | - Ctrl + N: Create a new project 156 | - Ctrl + O: Open an existing project 157 | - Ctrl + S: Save the current project 158 | - Ctrl + W: Close the current project 159 | - Ctrl + Shift + S: Open Annotation Statistics 160 | - F1: Open the help window 161 | - Ctrl + Wheel: Zoom in/out 162 | - Hold Ctrl and drag: Pan the image 163 | - Esc: Cancel current annotation, exit edit mode, or exit SAM-assisted annotation 164 | - Enter: Finish current annotation, exit edit mode, or accept SAM-generated mask 165 | - Up/Down Arrow Keys: Navigate through slices in multi-dimensional images 166 | - - and =: Adjust pen size for paint brush and eraser tools 167 | 168 | ## Known Issues and Bug Fixes 169 | 170 | - The application may not work correctly on Linux systems. Extensive testing has not been done yet. 171 | - When loading a YOLO model trained on different classes compared to the loaded YAML file, the application now gives a message to the user about the mismatch instead of crashing. 172 | - Various other bugs have been addressed to improve overall stability and performance. 173 | 174 | ## Development 175 | 176 | For development purposes, you can clone the repository and install it in editable mode: 177 | 178 | 1. Clone the repository: 179 | ```bash 180 | git clone https://github.com/bnsreenu/digitalsreeni-image-annotator.git 181 | cd digitalsreeni-image-annotator 182 | ``` 183 | 184 | 2. Create a virtual environment (optional but recommended): 185 | ```bash 186 | python -m venv venv 187 | source venv/bin/activate # On Windows, use `venv\Scripts\activate` 188 | ``` 189 | 190 | 3. Install the package and its dependencies in editable mode: 191 | ```bash 192 | pip install -e . 193 | ``` 194 | 195 | ## Contributing 196 | 197 | Contributions are welcome! Please feel free to submit a Pull Request. 198 | 199 | 1. Fork the repository 200 | 2. Create your feature branch (`git checkout -b feature/AmazingFeature`) 201 | 3. Commit your changes (`git commit -m 'Add some AmazingFeature'`) 202 | 4. Push to the branch (`git push origin feature/AmazingFeature`) 203 | 5. Open a Pull Request 204 | 205 | ## License 206 | 207 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. 208 | 209 | ## Acknowledgments 210 | 211 | - Thanks to all my [YouTube](http://www.youtube.com/c/DigitalSreeni) subscribers who inspired me to work on this project 212 | - Inspired by the need for efficient image annotation in computer vision tasks 213 | 214 | ## Contact 215 | 216 | Dr. Sreenivas Bhattiprolu - [@DigitalSreeni](https://twitter.com/DigitalSreeni) 217 | 218 | Project Link: [https://github.com/bnsreenu/digitalsreeni-image-annotator](https://github.com/bnsreenu/digitalsreeni-image-annotator) 219 | 220 | ## Citing 221 | 222 | If you use this software in your research, please cite it as follows: 223 | 224 | Bhattiprolu, S. (2024). DigitalSreeni Image Annotator [Computer software]. 225 | https://github.com/bnsreenu/digitalsreeni-image-annotator 226 | 227 | ```bibtex 228 | @software{digitalsreeni_image_annotator, 229 | author = {Bhattiprolu, Sreenivas}, 230 | title = {DigitalSreeni Image Annotator}, 231 | year = {2024}, 232 | url = {https://github.com/bnsreenu/digitalsreeni-image-annotator} 233 | } 234 | ``` 235 | -------------------------------------------------------------------------------- /Release Notes 0.8.12.md: -------------------------------------------------------------------------------- 1 | # Release Notes 2 | ## Version 0.8.12 3 | 4 | ### New Features and Enhancements 5 | - Same as version 0.8.9 except changed the requirements file to define specific version numbers for the libraies used. 6 | - The following bug fixes and optimizations correspond to version 0.8.9 7 | 8 | ### Bug Fixes and Optimizations 9 | 1. **Project Corruption Prevention** 10 | - Fixed critical issue where projects could become corrupted if application was terminated during loading 11 | - Disabled auto-save functionality during project loading process 12 | - Enhanced project loading stability for large datasets 13 | - Protected project integrity when handling multiple classes and images 14 | 15 | ### Notes 16 | - All existing tools continue to support both Windows and macOS operating systems 17 | - Improved reliability of project file handling 18 | - Critical update recommended for users working with large projects -------------------------------------------------------------------------------- /data/YOLO11n-model-yaml/coco8.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] 3 | path: ../datasets/coco8 # dataset root dir 4 | train: images/train # train images (relative to 'path') 4 images 5 | val: images/val # val images (relative to 'path') 4 images 6 | test: # test images (optional) 7 | 8 | # Classes 9 | names: 10 | 0: person 11 | 1: bicycle 12 | 2: car 13 | 3: motorcycle 14 | 4: airplane 15 | 5: bus 16 | 6: train 17 | 7: truck 18 | 8: boat 19 | 9: traffic light 20 | 10: fire hydrant 21 | 11: stop sign 22 | 12: parking meter 23 | 13: bench 24 | 14: bird 25 | 15: cat 26 | 16: dog 27 | 17: horse 28 | 18: sheep 29 | 19: cow 30 | 20: elephant 31 | 21: bear 32 | 22: zebra 33 | 23: giraffe 34 | 24: backpack 35 | 25: umbrella 36 | 26: handbag 37 | 27: tie 38 | 28: suitcase 39 | 29: frisbee 40 | 30: skis 41 | 31: snowboard 42 | 32: sports ball 43 | 33: kite 44 | 34: baseball bat 45 | 35: baseball glove 46 | 36: skateboard 47 | 37: surfboard 48 | 38: tennis racket 49 | 39: bottle 50 | 40: wine glass 51 | 41: cup 52 | 42: fork 53 | 43: knife 54 | 44: spoon 55 | 45: bowl 56 | 46: banana 57 | 47: apple 58 | 48: sandwich 59 | 49: orange 60 | 50: broccoli 61 | 51: carrot 62 | 52: hot dog 63 | 53: pizza 64 | 54: donut 65 | 55: cake 66 | 56: chair 67 | 57: couch 68 | 58: potted plant 69 | 59: bed 70 | 60: dining table 71 | 61: toilet 72 | 62: tv 73 | 63: laptop 74 | 64: mouse 75 | 65: remote 76 | 66: keyboard 77 | 67: cell phone 78 | 68: microwave 79 | 69: oven 80 | 70: toaster 81 | 71: sink 82 | 72: refrigerator 83 | 73: book 84 | 74: clock 85 | 75: vase 86 | 76: scissors 87 | 77: teddy bear 88 | 78: hair drier 89 | 79: toothbrush 90 | 91 | # Download script/URL (optional) 92 | download: https://github.com/ultralytics/assets/releases/download/v0.0.0/coco8.zip 93 | -------------------------------------------------------------------------------- /data/YOLO11n-model-yaml/download_YOLO_models.txt: -------------------------------------------------------------------------------- 1 | https://docs.ultralytics.com/tasks/segment/#models 2 | 3 | Recommended model: https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-seg.pt -------------------------------------------------------------------------------- /data/download_SAM_models.txt: -------------------------------------------------------------------------------- 1 | It is recommended to pre-download SAM models and place them in your working director - the directory from where you are starting this application. This avoids downloading the models multiple times. 2 | 3 | Download models from: https://docs.ultralytics.com/models/sam-2/ 4 | 5 | Direct Download links: 6 | Tiny model: https://github.com/ultralytics/assets/releases/download/v8.2.0/sam2_t.pt 7 | Small: https://github.com/ultralytics/assets/releases/download/v8.2.0/sam2_s.pt 8 | Base: https://github.com/ultralytics/assets/releases/download/v8.2.0/sam2_b.pt 9 | Large: https://github.com/ultralytics/assets/releases/download/v8.2.0/sam2_l.pt 10 | 11 | Be cautious with the large model as it demands higher computing and memory resources from your system. -------------------------------------------------------------------------------- /data/project1/images/3D-EM-platelet-train-8bit_Z1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/data/project1/images/3D-EM-platelet-train-8bit_Z1.png -------------------------------------------------------------------------------- /data/project1/images/dogs.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/data/project1/images/dogs.jpg -------------------------------------------------------------------------------- /data/project1/images/horse_on_street.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/data/project1/images/horse_on_street.jpg -------------------------------------------------------------------------------- /data/project1/images/marbles.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/data/project1/images/marbles.jpg -------------------------------------------------------------------------------- /data/project1/images/newton.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/data/project1/images/newton.jpg -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | PyQt5==5.15.11 2 | Pillow==11.0.0 3 | numpy==2.1.3 4 | tifffile==2023.3.15 5 | czifile==2019.7.2 6 | opencv-python==4.10.0.84 7 | pyyaml==6.0.2 8 | scikit-image==0.24.0 9 | ultralytics==8.3.27 10 | plotly==5.24.1 11 | shapely==2.0.6 12 | pystackreg==0.2.8 13 | pydicom==3.0.1 -------------------------------------------------------------------------------- /screenshots/SAM_Model_loaded.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/screenshots/SAM_Model_loaded.jpg -------------------------------------------------------------------------------- /screenshots/czi_dimensions_window.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/screenshots/czi_dimensions_window.jpg -------------------------------------------------------------------------------- /screenshots/digitalsreeni-image-annotator-dark.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/screenshots/digitalsreeni-image-annotator-dark.jpg -------------------------------------------------------------------------------- /screenshots/digitalsreeni-image-annotator-demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/screenshots/digitalsreeni-image-annotator-demo.gif -------------------------------------------------------------------------------- /screenshots/digitalsreeni-image-annotator.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/screenshots/digitalsreeni-image-annotator.jpg -------------------------------------------------------------------------------- /screenshots/tiff_dimensions_window.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/screenshots/tiff_dimensions_window.jpg -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | Setup file for the DigitalSreeni Image Annotator package. 3 | @DigitalSreeni 4 | Dr. Sreenivas Bhattiprolu 5 | """ 6 | from setuptools import setup, find_packages 7 | 8 | with open("README.md", "r", encoding="utf-8") as fh: 9 | long_description = fh.read() 10 | 11 | setup( 12 | name="digitalsreeni-image-annotator", 13 | version="0.8.12", # Updated version number 14 | author="Dr. Sreenivas Bhattiprolu", 15 | author_email="digitalsreeni@gmail.com", 16 | description="A tool for annotating images using manual and automated tools, supporting multi-dimensional images and SAM2-assisted annotations", 17 | long_description=long_description, 18 | long_description_content_type="text/markdown", 19 | url="https://github.com/bnsreenu/digitalsreeni-image-annotator", 20 | packages=find_packages(where="src"), 21 | package_dir={"": "src"}, 22 | classifiers=[ 23 | "Development Status :: 3 - Alpha", 24 | "Intended Audience :: Science/Research", 25 | "Intended Audience :: Developers", 26 | "License :: OSI Approved :: MIT License", 27 | "Operating System :: OS Independent", 28 | "Programming Language :: Python :: 3.10", 29 | ], 30 | python_requires=">=3.10", 31 | install_requires=[ 32 | "PyQt5==5.15.11", 33 | "numpy==2.1.3", 34 | "Pillow==11.0.0", 35 | "tifffile==2023.3.15", 36 | "czifile==2019.7.2", 37 | "opencv-python==4.10.0.84", 38 | "pyyaml==6.0.2", 39 | "scikit-image==0.24.0", 40 | "ultralytics==8.3.27", 41 | "plotly==5.24.1", 42 | "shapely==2.0.6", 43 | "pystackreg==0.2.8", 44 | "pydicom==3.0.1" 45 | ], 46 | entry_points={ 47 | "console_scripts": [ 48 | "digitalsreeni-image-annotator=digitalsreeni_image_annotator.main:main", 49 | "sreeni=digitalsreeni_image_annotator.main:main", 50 | ], 51 | }, 52 | ) -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Image Annotator 3 | =============== 4 | A tool for annotating images with polygons and rectangles. 5 | This package provides a GUI application for image annotation, 6 | supporting polygon and rectangle annotations in a COCO-compatible format. 7 | @DigitalSreeni 8 | Dr. Sreenivas Bhattiprolu 9 | """ 10 | __version__ = "0.8.12" 11 | __author__ = "Dr. Sreenivas Bhattiprolu" 12 | 13 | from .annotator_window import ImageAnnotator 14 | from .image_label import ImageLabel 15 | from .utils import calculate_area, calculate_bbox 16 | from .sam_utils import SAMUtils 17 | 18 | __all__ = ['ImageAnnotator', 'ImageLabel', 'calculate_area', 'calculate_bbox', 'SAMUtils'] # Add 'SAMUtils' to this list -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/annotation_statistics.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/annotation_statistics.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/annotator_window.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/annotator_window.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/coco_json_combiner.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/coco_json_combiner.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/dataset_splitter.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/dataset_splitter.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/default_stylesheet.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/default_stylesheet.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/dicom_converter.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/dicom_converter.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/export_formats.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/export_formats.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/help_window.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/help_window.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/image_augmenter.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/image_augmenter.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/image_label.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/image_label.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/image_patcher.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/image_patcher.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/import_formats.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/import_formats.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/main.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/main.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/project_details.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/project_details.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/project_search.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/project_search.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/sam_utils.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/sam_utils.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/slice_registration.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/slice_registration.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/snake_game.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/snake_game.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/soft_dark_stylesheet.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/soft_dark_stylesheet.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/stack_interpolator.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/stack_interpolator.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/stack_to_slices.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/stack_to_slices.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/utils.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/utils.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/__pycache__/yolo_trainer.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bnsreenu/digitalsreeni-image-annotator/77b8b12b17d16d9dffe1049f43e637db70e2f2a4/src/digitalsreeni_image_annotator/__pycache__/yolo_trainer.cpython-310.pyc -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/annotation_statistics.py: -------------------------------------------------------------------------------- 1 | import plotly.graph_objects as go 2 | from plotly.subplots import make_subplots 3 | from PyQt5.QtWidgets import QDialog, QVBoxLayout, QTextBrowser, QPushButton, QHBoxLayout 4 | from PyQt5.QtCore import Qt 5 | import tempfile 6 | import os 7 | import webbrowser 8 | 9 | class AnnotationStatisticsDialog(QDialog): 10 | def __init__(self, parent=None): 11 | super().__init__(parent) 12 | self.setWindowTitle("Annotation Statistics") 13 | self.setGeometry(100, 100, 600, 400) 14 | self.setWindowFlags(self.windowFlags() | Qt.Window) 15 | self.initUI() 16 | 17 | def initUI(self): 18 | layout = QVBoxLayout() 19 | self.text_browser = QTextBrowser() 20 | layout.addWidget(self.text_browser) 21 | 22 | button_layout = QHBoxLayout() 23 | self.show_plot_button = QPushButton("Show Interactive Plot") 24 | self.show_plot_button.clicked.connect(self.show_interactive_plot) 25 | button_layout.addWidget(self.show_plot_button) 26 | 27 | layout.addLayout(button_layout) 28 | self.setLayout(layout) 29 | 30 | self.plot_file = None 31 | 32 | def show_centered(self, parent): 33 | parent_geo = parent.geometry() 34 | self.move(parent_geo.center() - self.rect().center()) 35 | self.show() 36 | 37 | def generate_statistics(self, annotations): 38 | try: 39 | # Class distribution 40 | class_distribution = {} 41 | objects_per_image = {} 42 | total_objects = 0 43 | 44 | for image, image_annotations in annotations.items(): 45 | objects_in_image = 0 46 | for class_name, class_annotations in image_annotations.items(): 47 | class_count = len(class_annotations) 48 | class_distribution[class_name] = class_distribution.get(class_name, 0) + class_count 49 | objects_in_image += class_count 50 | total_objects += class_count 51 | objects_per_image[image] = objects_in_image 52 | 53 | avg_objects_per_image = total_objects / len(annotations) if annotations else 0 54 | 55 | # Create plots 56 | fig = make_subplots(rows=2, cols=1, subplot_titles=("Class Distribution", "Objects per Image")) 57 | 58 | # Class distribution plot 59 | fig.add_trace(go.Bar(x=list(class_distribution.keys()), y=list(class_distribution.values()), name="Classes"), 60 | row=1, col=1) 61 | 62 | # Objects per image plot 63 | fig.add_trace(go.Bar( 64 | x=list(objects_per_image.keys()), 65 | y=list(objects_per_image.values()), 66 | name="Images", 67 | hovertext=[f"{img}: {count}" for img, count in objects_per_image.items()], 68 | hoverinfo="text" 69 | ), row=2, col=1) 70 | 71 | # Update layout 72 | fig.update_layout(height=800, title_text="Annotation Statistics") 73 | 74 | # Hide x-axis labels for the second subplot (Objects per Image) 75 | fig.update_xaxes(showticklabels=False, title_text="Images", row=2, col=1) 76 | 77 | # Update y-axis title for the second subplot 78 | fig.update_yaxes(title_text="Number of Objects", row=2, col=1) 79 | 80 | # Save the plot to a temporary HTML file 81 | with tempfile.NamedTemporaryFile(mode="w", suffix=".html", delete=False) as tmp: 82 | fig.write_html(tmp.name) 83 | self.plot_file = tmp.name 84 | 85 | # Display statistics in the text browser 86 | stats_text = f"Total objects: {total_objects}\n" 87 | stats_text += f"Average objects per image: {avg_objects_per_image:.2f}\n\n" 88 | stats_text += "Class distribution:\n" 89 | for class_name, count in class_distribution.items(): 90 | stats_text += f" {class_name}: {count}\n" 91 | 92 | self.text_browser.setPlainText(stats_text) 93 | 94 | except Exception as e: 95 | self.text_browser.setPlainText(f"An error occurred while generating statistics: {str(e)}") 96 | self.show_plot_button.setEnabled(False) 97 | 98 | def show_interactive_plot(self): 99 | if self.plot_file and os.path.exists(self.plot_file): 100 | webbrowser.open('file://' + os.path.realpath(self.plot_file)) 101 | else: 102 | self.text_browser.append("Error: Plot file not found.") 103 | 104 | def closeEvent(self, event): 105 | if self.plot_file and os.path.exists(self.plot_file): 106 | os.unlink(self.plot_file) 107 | super().closeEvent(event) 108 | 109 | def show_annotation_statistics(parent, annotations): 110 | dialog = AnnotationStatisticsDialog(parent) 111 | dialog.generate_statistics(annotations) 112 | dialog.show_centered(parent) 113 | return dialog -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/annotation_utils.py: -------------------------------------------------------------------------------- 1 | from PyQt5.QtWidgets import QListWidgetItem 2 | from PyQt5.QtGui import QColor 3 | from PyQt5.QtCore import Qt 4 | 5 | class AnnotationUtils: 6 | @staticmethod 7 | def update_annotation_list(self, image_name=None): 8 | self.annotation_list.clear() 9 | current_name = image_name or self.current_slice or self.image_file_name 10 | annotations = self.all_annotations.get(current_name, {}) 11 | for class_name, class_annotations in annotations.items(): 12 | color = self.image_label.class_colors.get(class_name, QColor(Qt.white)) 13 | for i, annotation in enumerate(class_annotations, start=1): 14 | item_text = f"{class_name} - {i}" 15 | item = QListWidgetItem(item_text) 16 | item.setData(Qt.UserRole, annotation) 17 | item.setForeground(color) 18 | self.annotation_list.addItem(item) 19 | 20 | @staticmethod 21 | def update_slice_list_colors(self): 22 | for i in range(self.slice_list.count()): 23 | item = self.slice_list.item(i) 24 | slice_name = item.text() 25 | if slice_name in self.all_annotations and any(self.all_annotations[slice_name].values()): 26 | item.setForeground(QColor(Qt.green)) 27 | else: 28 | item.setForeground(QColor(Qt.black) if not self.dark_mode else QColor(Qt.white)) 29 | 30 | @staticmethod 31 | def update_annotation_list_colors(self, class_name=None, color=None): 32 | for i in range(self.annotation_list.count()): 33 | item = self.annotation_list.item(i) 34 | annotation = item.data(Qt.UserRole) 35 | if class_name is None or annotation['category_name'] == class_name: 36 | item_color = color if class_name else self.image_label.class_colors.get(annotation['category_name'], QColor(Qt.white)) 37 | item.setForeground(item_color) 38 | 39 | @staticmethod 40 | def load_image_annotations(self): 41 | self.image_label.annotations.clear() 42 | current_name = self.current_slice or self.image_file_name 43 | if current_name in self.all_annotations: 44 | self.image_label.annotations = self.all_annotations[current_name].copy() 45 | self.image_label.update() 46 | 47 | @staticmethod 48 | def save_current_annotations(self): 49 | current_name = self.current_slice or self.image_file_name 50 | if current_name: 51 | if self.image_label.annotations: 52 | self.all_annotations[current_name] = self.image_label.annotations.copy() 53 | elif current_name in self.all_annotations: 54 | del self.all_annotations[current_name] 55 | AnnotationUtils.update_slice_list_colors(self) 56 | 57 | @staticmethod 58 | def add_annotation_to_list(self, annotation): 59 | class_name = annotation['category_name'] 60 | color = self.image_label.class_colors.get(class_name, QColor(Qt.white)) 61 | annotations = self.image_label.annotations.get(class_name, []) 62 | item_text = f"{class_name} - {len(annotations)}" 63 | item = QListWidgetItem(item_text) 64 | item.setData(Qt.UserRole, annotation) 65 | item.setForeground(color) 66 | self.annotation_list.addItem(item) -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/coco_json_combiner.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from PyQt5.QtWidgets import (QDialog, QVBoxLayout, QHBoxLayout, QPushButton, 4 | QFileDialog, QLabel, QMessageBox, QApplication) 5 | from PyQt5.QtCore import Qt 6 | 7 | class COCOJSONCombinerDialog(QDialog): 8 | def __init__(self, parent=None): 9 | super().__init__(parent) 10 | self.setWindowTitle("COCO JSON Combiner") 11 | self.setGeometry(100, 100, 400, 300) 12 | self.setWindowFlags(self.windowFlags() | Qt.Window) 13 | self.setWindowModality(Qt.ApplicationModal) 14 | self.json_files = [] 15 | self.initUI() 16 | 17 | def initUI(self): 18 | layout = QVBoxLayout() 19 | 20 | self.file_labels = [] 21 | for i in range(5): 22 | file_layout = QHBoxLayout() 23 | label = QLabel(f"File {i+1}: Not selected") 24 | self.file_labels.append(label) 25 | file_layout.addWidget(label) 26 | select_button = QPushButton(f"Select File {i+1}") 27 | select_button.clicked.connect(lambda checked, x=i: self.select_file(x)) 28 | file_layout.addWidget(select_button) 29 | layout.addLayout(file_layout) 30 | 31 | self.combine_button = QPushButton("Combine JSON Files") 32 | self.combine_button.clicked.connect(self.combine_json_files) 33 | self.combine_button.setEnabled(False) 34 | layout.addWidget(self.combine_button) 35 | 36 | self.setLayout(layout) 37 | 38 | def select_file(self, index): 39 | file_name, _ = QFileDialog.getOpenFileName(self, f"Select COCO JSON File {index+1}", "", "JSON Files (*.json)") 40 | if file_name: 41 | if file_name not in self.json_files: 42 | self.json_files.append(file_name) 43 | self.file_labels[index].setText(f"File {index+1}: {os.path.basename(file_name)}") 44 | self.combine_button.setEnabled(True) 45 | else: 46 | QMessageBox.warning(self, "Duplicate File", "This file has already been selected.") 47 | QApplication.processEvents() 48 | 49 | 50 | def combine_json_files(self): 51 | if not self.json_files: 52 | QMessageBox.warning(self, "No Files", "Please select at least one JSON file to combine.") 53 | return 54 | 55 | combined_data = { 56 | "images": [], 57 | "annotations": [], 58 | "categories": [] 59 | } 60 | image_file_names = set() 61 | next_image_id = 1 62 | next_annotation_id = 1 63 | 64 | try: 65 | for file_path in self.json_files: 66 | with open(file_path, 'r') as f: 67 | data = json.load(f) 68 | 69 | # Combine categories 70 | category_id_map = {} 71 | for category in data.get('categories', []): 72 | existing_category = next((c for c in combined_data['categories'] if c['name'] == category['name']), None) 73 | if existing_category: 74 | category_id_map[category['id']] = existing_category['id'] 75 | else: 76 | new_id = len(combined_data['categories']) + 1 77 | category_id_map[category['id']] = new_id 78 | category['id'] = new_id 79 | combined_data['categories'].append(category) 80 | 81 | # Combine images and annotations 82 | image_id_map = {} 83 | for image in data.get('images', []): 84 | if image['file_name'] not in image_file_names: 85 | image_file_names.add(image['file_name']) 86 | image_id_map[image['id']] = next_image_id 87 | image['id'] = next_image_id 88 | combined_data['images'].append(image) 89 | next_image_id += 1 90 | 91 | for annotation in data.get('annotations', []): 92 | if annotation['image_id'] in image_id_map: 93 | annotation['id'] = next_annotation_id 94 | annotation['image_id'] = image_id_map[annotation['image_id']] 95 | annotation['category_id'] = category_id_map[annotation['category_id']] 96 | combined_data['annotations'].append(annotation) 97 | next_annotation_id += 1 98 | 99 | output_file, _ = QFileDialog.getSaveFileName(self, "Save Combined JSON", "", "JSON Files (*.json)") 100 | if output_file: 101 | with open(output_file, 'w') as f: 102 | json.dump(combined_data, f, indent=2) 103 | QMessageBox.information(self, "Success", f"Combined JSON saved to {output_file}") 104 | 105 | except Exception as e: 106 | QMessageBox.critical(self, "Error", f"An error occurred while combining JSON files: {str(e)}") 107 | 108 | 109 | 110 | def show_centered(self, parent): 111 | parent_geo = parent.geometry() 112 | self.move(parent_geo.center() - self.rect().center()) 113 | self.show() 114 | 115 | def show_coco_json_combiner(parent): 116 | dialog = COCOJSONCombinerDialog(parent) 117 | dialog.show_centered(parent) 118 | return dialog -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/constants.py: -------------------------------------------------------------------------------- 1 | """ 2 | Constants for the Image Annotator application. 3 | 4 | This module contains constant values used across the application. 5 | 6 | @DigitalSreeni 7 | Dr. Sreenivas Bhattiprolu 8 | """ 9 | 10 | # File dialog filters 11 | IMAGE_FILE_FILTER = "Image Files (*.png *.jpg *.bmp)" 12 | JSON_FILE_FILTER = "JSON Files (*.json)" 13 | 14 | # Default window size 15 | DEFAULT_WINDOW_WIDTH = 1400 16 | DEFAULT_WINDOW_HEIGHT = 800 17 | 18 | # Zoom settings 19 | MIN_ZOOM = 10 20 | MAX_ZOOM = 500 21 | DEFAULT_ZOOM = 100 22 | 23 | # Annotation settings 24 | DEFAULT_FILL_OPACITY = 0.3 25 | 26 | -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/dataset_splitter.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import shutil 4 | import random 5 | from PyQt5.QtWidgets import (QDialog, QVBoxLayout, QHBoxLayout, QPushButton, QFileDialog, 6 | QLabel, QSpinBox, QRadioButton, QButtonGroup, QMessageBox, QComboBox) 7 | from PyQt5.QtCore import Qt 8 | import yaml 9 | from PIL import Image 10 | 11 | class DatasetSplitterTool(QDialog): 12 | def __init__(self, parent=None): 13 | super().__init__(parent) 14 | self.setWindowTitle("Dataset Splitter") 15 | self.setGeometry(100, 100, 500, 300) 16 | self.setWindowFlags(self.windowFlags() | Qt.Window) 17 | self.initUI() 18 | 19 | def initUI(self): 20 | layout = QVBoxLayout() 21 | 22 | # Option selection 23 | options_layout = QVBoxLayout() 24 | self.images_only_radio = QRadioButton("Images Only") 25 | options_layout.addWidget(self.images_only_radio) 26 | 27 | images_annotations_layout = QHBoxLayout() 28 | self.images_annotations_radio = QRadioButton("Images and Annotations") 29 | images_annotations_layout.addWidget(self.images_annotations_radio) 30 | self.select_json_button = QPushButton("Upload COCO JSON File") 31 | self.select_json_button.clicked.connect(self.select_json_file) 32 | self.select_json_button.setEnabled(False) 33 | images_annotations_layout.addWidget(self.select_json_button) 34 | options_layout.addLayout(images_annotations_layout) 35 | 36 | layout.addLayout(options_layout) 37 | 38 | option_group = QButtonGroup(self) 39 | option_group.addButton(self.images_only_radio) 40 | option_group.addButton(self.images_annotations_radio) 41 | 42 | self.images_only_radio.setChecked(True) 43 | 44 | # Percentage inputs 45 | train_layout = QHBoxLayout() 46 | train_layout.addWidget(QLabel("Train %:")) 47 | self.train_percent = QSpinBox() 48 | self.train_percent.setRange(0, 100) 49 | self.train_percent.setValue(70) 50 | train_layout.addWidget(self.train_percent) 51 | layout.addLayout(train_layout) 52 | 53 | val_layout = QHBoxLayout() 54 | val_layout.addWidget(QLabel("Validation %:")) 55 | self.val_percent = QSpinBox() 56 | self.val_percent.setRange(0, 100) 57 | self.val_percent.setValue(30) 58 | val_layout.addWidget(self.val_percent) 59 | layout.addLayout(val_layout) 60 | 61 | test_layout = QHBoxLayout() 62 | test_layout.addWidget(QLabel("Test %:")) 63 | self.test_percent = QSpinBox() 64 | self.test_percent.setRange(0, 100) 65 | self.test_percent.setValue(0) 66 | test_layout.addWidget(self.test_percent) 67 | layout.addLayout(test_layout) 68 | 69 | # Format selection 70 | self.format_selection_layout = QHBoxLayout() 71 | self.format_label = QLabel("Output Format:") 72 | self.format_combo = QComboBox() 73 | self.format_combo.addItems(["COCO JSON", "YOLO"]) 74 | self.format_combo.setEnabled(False) 75 | self.format_selection_layout.addWidget(self.format_label) 76 | self.format_selection_layout.addWidget(self.format_combo) 77 | options_layout.addLayout(self.format_selection_layout) 78 | 79 | # Buttons 80 | self.select_input_button = QPushButton("Select Input Directory") 81 | self.select_input_button.clicked.connect(self.select_input_directory) 82 | layout.addWidget(self.select_input_button) 83 | 84 | self.select_output_button = QPushButton("Select Output Directory") 85 | self.select_output_button.clicked.connect(self.select_output_directory) 86 | layout.addWidget(self.select_output_button) 87 | 88 | self.split_button = QPushButton("Split Dataset") 89 | self.split_button.clicked.connect(self.split_dataset) 90 | layout.addWidget(self.split_button) 91 | 92 | self.setLayout(layout) 93 | 94 | self.input_directory = "" 95 | self.output_directory = "" 96 | self.json_file = "" 97 | 98 | # Connect radio buttons to enable/disable JSON selection 99 | self.images_only_radio.toggled.connect(self.toggle_json_selection) 100 | self.images_annotations_radio.toggled.connect(self.toggle_json_selection) 101 | 102 | def toggle_json_selection(self): 103 | is_annotations = self.images_annotations_radio.isChecked() 104 | self.select_json_button.setEnabled(is_annotations) 105 | self.format_combo.setEnabled(is_annotations) 106 | 107 | def select_input_directory(self): 108 | self.input_directory = QFileDialog.getExistingDirectory(self, "Select Input Directory") 109 | 110 | def select_output_directory(self): 111 | self.output_directory = QFileDialog.getExistingDirectory(self, "Select Output Directory") 112 | 113 | def select_json_file(self): 114 | self.json_file, _ = QFileDialog.getOpenFileName(self, "Select COCO JSON File", "", "JSON Files (*.json)") 115 | 116 | def split_dataset(self): 117 | if not self.input_directory or not self.output_directory: 118 | QMessageBox.warning(self, "Error", "Please select input and output directories.") 119 | return 120 | 121 | if self.images_annotations_radio.isChecked() and not self.json_file: 122 | QMessageBox.warning(self, "Error", "Please select a COCO JSON file.") 123 | return 124 | 125 | train_percent = self.train_percent.value() 126 | val_percent = self.val_percent.value() 127 | test_percent = self.test_percent.value() 128 | 129 | if train_percent + val_percent + test_percent != 100: 130 | QMessageBox.warning(self, "Error", "Percentages must add up to 100%.") 131 | return 132 | 133 | if self.images_only_radio.isChecked(): 134 | self.split_images_only() 135 | else: 136 | self.split_images_and_annotations() 137 | 138 | def split_images_only(self): 139 | image_files = [f for f in os.listdir(self.input_directory) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.tif', '.tiff'))] 140 | random.shuffle(image_files) 141 | 142 | train_split = int(len(image_files) * self.train_percent.value() / 100) 143 | val_split = int(len(image_files) * self.val_percent.value() / 100) 144 | 145 | train_images = image_files[:train_split] 146 | val_images = image_files[train_split:train_split + val_split] 147 | test_images = image_files[train_split + val_split:] 148 | 149 | for subset, images in [("train", train_images), 150 | ("val", val_images), 151 | ("test", test_images)]: 152 | if images: # Only create directories and copy images if there are images for this split 153 | subset_dir = os.path.join(self.output_directory, subset) 154 | os.makedirs(subset_dir, exist_ok=True) 155 | self.copy_images(images, subset, images_only=True) 156 | 157 | QMessageBox.information(self, "Success", "Dataset split successfully!") 158 | 159 | def split_images_and_annotations(self): 160 | with open(self.json_file, 'r') as f: 161 | coco_data = json.load(f) 162 | 163 | image_files = [img['file_name'] for img in coco_data['images']] 164 | random.shuffle(image_files) 165 | 166 | train_split = int(len(image_files) * self.train_percent.value() / 100) 167 | val_split = int(len(image_files) * self.val_percent.value() / 100) 168 | 169 | train_images = image_files[:train_split] 170 | val_images = image_files[train_split:train_split + val_split] 171 | test_images = image_files[train_split + val_split:] 172 | 173 | # Create main directories 174 | os.makedirs(self.output_directory, exist_ok=True) 175 | 176 | if self.format_combo.currentText() == "COCO JSON": 177 | self.split_coco_format(coco_data, train_images, val_images, test_images) 178 | else: # YOLO format 179 | self.split_yolo_format(coco_data, train_images, val_images, test_images) 180 | 181 | def copy_images(self, image_list, subset, images_only=False): 182 | if not image_list: 183 | return 184 | 185 | if images_only: 186 | subset_dir = os.path.join(self.output_directory, subset) 187 | else: 188 | subset_dir = os.path.join(self.output_directory, subset, "images") 189 | os.makedirs(subset_dir, exist_ok=True) 190 | 191 | for image in image_list: 192 | src = os.path.join(self.input_directory, image) 193 | dst = os.path.join(subset_dir, image) 194 | shutil.copy2(src, dst) 195 | 196 | def create_subset_annotations(self, coco_data, subset_images): 197 | subset_images_data = [img for img in coco_data['images'] if img['file_name'] in subset_images] 198 | subset_image_ids = [img['id'] for img in subset_images_data] 199 | 200 | return { 201 | "images": subset_images_data, 202 | "annotations": [ann for ann in coco_data['annotations'] if ann['image_id'] in subset_image_ids], 203 | "categories": coco_data['categories'] 204 | } 205 | 206 | def split_coco_format(self, coco_data, train_images, val_images, test_images): 207 | # Only create directories and save annotations for non-empty splits 208 | for subset, images in [("train", train_images), 209 | ("val", val_images), 210 | ("test", test_images)]: 211 | if images: # Only process if there are images in this split 212 | subset_dir = os.path.join(self.output_directory, subset) 213 | os.makedirs(subset_dir, exist_ok=True) # Create the subset directory first 214 | os.makedirs(os.path.join(subset_dir, "images"), exist_ok=True) 215 | self.copy_images(images, subset, images_only=False) 216 | 217 | # Create and save annotations for this subset 218 | subset_data = self.create_subset_annotations(coco_data, images) 219 | self.save_coco_annotations(subset_data, subset) 220 | 221 | QMessageBox.information(self, "Success", "Dataset and COCO annotations split successfully!") 222 | 223 | def save_coco_annotations(self, data, subset): 224 | subset_dir = os.path.join(self.output_directory, subset) 225 | os.makedirs(subset_dir, exist_ok=True) 226 | output_file = os.path.join(subset_dir, f"{subset}_annotations.json") 227 | with open(output_file, 'w') as f: 228 | json.dump(data, f, indent=2) 229 | 230 | def split_yolo_format(self, coco_data, train_images, val_images, test_images): 231 | # Create directories only for non-empty splits 232 | yaml_paths = {} 233 | for subset, images in [("train", train_images), 234 | ("val", val_images), 235 | ("test", test_images)]: 236 | if images: # Only create directories if there are images for this split 237 | subset_dir = os.path.join(self.output_directory, subset) 238 | os.makedirs(os.path.join(subset_dir, "images"), exist_ok=True) 239 | os.makedirs(os.path.join(subset_dir, "labels"), exist_ok=True) 240 | yaml_paths[subset] = f'./{subset}/images' 241 | 242 | # Create class mapping (COCO to YOLO indices) 243 | categories = {cat["id"]: i for i, cat in enumerate(coco_data["categories"])} 244 | 245 | # Process each non-empty subset 246 | for subset, images in [("train", train_images), 247 | ("val", val_images), 248 | ("test", test_images)]: 249 | if not images: # Skip if no images in this split 250 | continue 251 | 252 | images_dir = os.path.join(self.output_directory, subset, "images") 253 | labels_dir = os.path.join(self.output_directory, subset, "labels") 254 | 255 | for image_file in images: 256 | # Copy image 257 | src = os.path.join(self.input_directory, image_file) 258 | shutil.copy2(src, os.path.join(images_dir, image_file)) 259 | 260 | # Get image dimensions 261 | img = Image.open(src) 262 | img_width, img_height = img.size 263 | 264 | # Get annotations for this image 265 | image_id = next(img["id"] for img in coco_data["images"] if img["file_name"] == image_file) 266 | annotations = [ann for ann in coco_data["annotations"] if ann["image_id"] == image_id] 267 | 268 | # Create YOLO format labels 269 | label_file = os.path.join(labels_dir, os.path.splitext(image_file)[0] + ".txt") 270 | with open(label_file, "w") as f: 271 | for ann in annotations: 272 | # Convert COCO class id to YOLO class id 273 | yolo_class = categories[ann["category_id"]] 274 | 275 | # Convert COCO bbox to YOLO format 276 | x, y, w, h = ann["bbox"] 277 | x_center = (x + w/2) / img_width 278 | y_center = (y + h/2) / img_height 279 | w = w / img_width 280 | h = h / img_height 281 | 282 | f.write(f"{yolo_class} {x_center:.6f} {y_center:.6f} {w:.6f} {h:.6f}\n") 283 | 284 | # Create data.yaml with only the relevant paths 285 | yaml_data = { 286 | 'nc': len(categories), 287 | 'names': [cat["name"] for cat in sorted(coco_data["categories"], key=lambda x: categories[x["id"]])] 288 | } 289 | yaml_data.update(yaml_paths) # Add only paths for non-empty splits 290 | 291 | with open(os.path.join(self.output_directory, 'data.yaml'), 'w') as f: 292 | yaml.dump(yaml_data, f, default_flow_style=False) 293 | 294 | QMessageBox.information(self, "Success", "Dataset and YOLO annotations split successfully!") 295 | 296 | def show_centered(self, parent): 297 | parent_geo = parent.geometry() 298 | self.move(parent_geo.center() - self.rect().center()) 299 | self.show() -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/default_stylesheet.py: -------------------------------------------------------------------------------- 1 | default_stylesheet = """ 2 | QWidget { 3 | background-color: #F0F0F0; 4 | color: #333333; 5 | font-family: Arial, sans-serif; 6 | } 7 | 8 | QMainWindow { 9 | background-color: #FFFFFF; 10 | } 11 | 12 | QPushButton { 13 | background-color: #E0E0E0; 14 | border: 1px solid #BBBBBB; 15 | padding: 5px 10px; 16 | border-radius: 3px; 17 | color: #333333; 18 | } 19 | 20 | QPushButton:hover { 21 | background-color: #D0D0D0; 22 | } 23 | 24 | QPushButton:pressed { 25 | background-color: #C0C0C0; 26 | } 27 | 28 | QPushButton:checked { 29 | background-color: #A0A0A0; 30 | border: 2px solid #808080; 31 | color: #FFFFFF; 32 | } 33 | 34 | 35 | QListWidget, QTreeWidget { 36 | background-color: #FFFFFF; 37 | border: 1px solid #CCCCCC; 38 | border-radius: 3px; 39 | } 40 | 41 | 42 | QListWidget::item:selected { 43 | background-color: #E0E0E0; 44 | color: #333333; 45 | } 46 | 47 | 48 | QLabel { 49 | color: #333333; 50 | } 51 | 52 | QLabel.section-header { 53 | font-weight: bold; 54 | font-size: 14px; 55 | padding: 5px 0; 56 | color: #333333; /* Dark color for visibility in light mode */ 57 | } 58 | 59 | 60 | QLineEdit, QTextEdit, QPlainTextEdit { 61 | background-color: #FFFFFF; 62 | border: 1px solid #CCCCCC; 63 | color: #333333; 64 | padding: 2px; 65 | border-radius: 3px; 66 | } 67 | 68 | QSlider::groove:horizontal { 69 | background: #CCCCCC; 70 | height: 8px; 71 | border-radius: 4px; 72 | } 73 | 74 | QSlider::handle:horizontal { 75 | background: #888888; 76 | width: 18px; 77 | margin-top: -5px; 78 | margin-bottom: -5px; 79 | border-radius: 9px; 80 | } 81 | 82 | QSlider::handle:horizontal:hover { 83 | background: #666666; 84 | } 85 | 86 | QScrollBar:vertical, QScrollBar:horizontal { 87 | background-color: #F0F0F0; 88 | width: 12px; 89 | height: 12px; 90 | } 91 | 92 | QScrollBar::handle:vertical, QScrollBar::handle:horizontal { 93 | background-color: #CCCCCC; 94 | border-radius: 6px; 95 | min-height: 20px; 96 | } 97 | 98 | QScrollBar::handle:vertical:hover, QScrollBar::handle:horizontal:hover { 99 | background-color: #BBBBBB; 100 | } 101 | 102 | QScrollBar::add-line, QScrollBar::sub-line { 103 | background: none; 104 | } 105 | 106 | QMenuBar { 107 | background-color: #F0F0F0; 108 | } 109 | 110 | QMenuBar::item { 111 | padding: 5px 10px; 112 | background-color: transparent; 113 | } 114 | 115 | QMenuBar::item:selected { 116 | background-color: #E0E0E0; 117 | } 118 | 119 | QMenu { 120 | background-color: #FFFFFF; 121 | border: 1px solid #CCCCCC; 122 | } 123 | 124 | QMenu::item { 125 | padding: 5px 20px 5px 20px; 126 | } 127 | 128 | QMenu::item:selected { 129 | background-color: #E0E0E0; 130 | } 131 | 132 | QToolTip { 133 | background-color: #FFFFFF; 134 | color: #333333; 135 | border: 1px solid #CCCCCC; 136 | } 137 | 138 | QStatusBar { 139 | background-color: #F0F0F0; 140 | color: #666666; 141 | } 142 | 143 | QListWidget::item { 144 | color: none; 145 | } 146 | """ -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/dicom_converter.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import numpy as np 4 | from datetime import datetime 5 | from PyQt5.QtWidgets import (QDialog, QVBoxLayout, QHBoxLayout, QPushButton, QFileDialog, 6 | QLabel, QProgressDialog, QRadioButton, QButtonGroup, 7 | QMessageBox, QApplication, QGroupBox) 8 | from PyQt5.QtCore import Qt 9 | import pydicom 10 | from pydicom.pixel_data_handlers.util import apply_voi_lut 11 | import tifffile 12 | 13 | class DicomConverter(QDialog): 14 | def __init__(self, parent=None): 15 | super().__init__(parent) 16 | self.setWindowTitle("DICOM to TIFF Converter") 17 | self.setGeometry(100, 100, 600, 300) 18 | self.setWindowFlags(self.windowFlags() | Qt.Window) 19 | self.setWindowModality(Qt.ApplicationModal) # Add modal behavior 20 | 21 | # Initialize variables first 22 | self.input_file = "" 23 | self.output_directory = "" 24 | 25 | self.initUI() 26 | 27 | def initUI(self): 28 | layout = QVBoxLayout() 29 | layout.setSpacing(10) # Add consistent spacing 30 | 31 | # File Selection Group 32 | file_group = QGroupBox("File Selection") 33 | file_layout = QVBoxLayout() 34 | 35 | # Input file selection 36 | input_layout = QHBoxLayout() 37 | self.input_label = QLabel("No DICOM file selected") 38 | self.input_label.setMinimumWidth(100) 39 | self.input_label.setMaximumWidth(300) 40 | self.input_label.setWordWrap(True) 41 | self.select_input_btn = QPushButton("Select DICOM File") 42 | self.select_input_btn.clicked.connect(self.select_input) 43 | input_layout.addWidget(self.select_input_btn) 44 | input_layout.addWidget(self.input_label, 1) 45 | file_layout.addLayout(input_layout) 46 | 47 | # Output directory selection 48 | output_layout = QHBoxLayout() 49 | self.output_label = QLabel("No output directory selected") 50 | self.output_label.setMinimumWidth(100) 51 | self.output_label.setMaximumWidth(300) 52 | self.output_label.setWordWrap(True) 53 | self.select_output_btn = QPushButton("Select Output Directory") 54 | self.select_output_btn.clicked.connect(self.select_output) 55 | output_layout.addWidget(self.select_output_btn) 56 | output_layout.addWidget(self.output_label, 1) 57 | file_layout.addLayout(output_layout) 58 | 59 | file_group.setLayout(file_layout) 60 | layout.addWidget(file_group) 61 | 62 | # Output Format Group 63 | format_group = QGroupBox("Output Format") 64 | format_layout = QVBoxLayout() 65 | 66 | self.stack_radio = QRadioButton("Single TIFF Stack") 67 | self.individual_radio = QRadioButton("Individual TIFF Files") 68 | self.stack_radio.setChecked(True) 69 | 70 | format_layout.addWidget(self.stack_radio) 71 | format_layout.addWidget(self.individual_radio) 72 | format_group.setLayout(format_layout) 73 | layout.addWidget(format_group) 74 | 75 | # Metadata info 76 | metadata_group = QGroupBox("Metadata Information") 77 | metadata_layout = QVBoxLayout() 78 | metadata_label = QLabel("DICOM metadata will be saved as JSON file in the output directory") 79 | metadata_label.setStyleSheet("color: gray; font-style: italic;") 80 | metadata_label.setWordWrap(True) 81 | metadata_layout.addWidget(metadata_label) 82 | metadata_group.setLayout(metadata_layout) 83 | layout.addWidget(metadata_group) 84 | 85 | # Convert button 86 | self.convert_btn = QPushButton("Convert") 87 | self.convert_btn.clicked.connect(self.convert_dicom) 88 | layout.addWidget(self.convert_btn) 89 | 90 | self.setLayout(layout) 91 | 92 | def select_input(self): 93 | try: 94 | file_filter = "DICOM files (*.dcm *.DCM);;All files (*.*)" 95 | file_name, _ = QFileDialog.getOpenFileName( 96 | self, 97 | "Select DICOM File", 98 | "", 99 | file_filter, 100 | options=QFileDialog.Options() 101 | ) 102 | 103 | if file_name: 104 | self.input_file = file_name 105 | self.input_label.setText(self.truncate_path(file_name)) 106 | self.input_label.setToolTip(file_name) 107 | QApplication.processEvents() 108 | 109 | except Exception as e: 110 | QMessageBox.critical(self, "Error", f"Error selecting input file: {str(e)}") 111 | 112 | def select_output(self): 113 | try: 114 | directory = QFileDialog.getExistingDirectory( 115 | self, 116 | "Select Output Directory", 117 | "", 118 | QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks 119 | ) 120 | 121 | if directory: 122 | self.output_directory = directory 123 | self.output_label.setText(self.truncate_path(directory)) 124 | self.output_label.setToolTip(directory) 125 | QApplication.processEvents() 126 | 127 | except Exception as e: 128 | QMessageBox.critical(self, "Error", f"Error selecting output directory: {str(e)}") 129 | 130 | def truncate_path(self, path, max_length=40): 131 | if len(path) <= max_length: 132 | return path 133 | 134 | filename = os.path.basename(path) 135 | directory = os.path.dirname(path) 136 | 137 | if len(filename) > max_length - 5: 138 | return f"...{filename[-(max_length-5):]}" 139 | 140 | available_length = max_length - len(filename) - 5 141 | return f"...{directory[-available_length:]}{os.sep}{filename}" 142 | 143 | def extract_metadata(self, ds): 144 | """Extract relevant metadata from DICOM dataset.""" 145 | metadata = { 146 | "PatientID": getattr(ds, "PatientID", "Unknown"), 147 | "PatientName": str(getattr(ds, "PatientName", "Unknown")), 148 | "StudyDate": getattr(ds, "StudyDate", "Unknown"), 149 | "SeriesDescription": getattr(ds, "SeriesDescription", "Unknown"), 150 | "Modality": getattr(ds, "Modality", "Unknown"), 151 | "Manufacturer": getattr(ds, "Manufacturer", "Unknown"), 152 | "InstitutionName": getattr(ds, "InstitutionName", "Unknown"), 153 | "PixelSpacing": getattr(ds, "PixelSpacing", [1, 1]), 154 | "SliceThickness": getattr(ds, "SliceThickness", 1), 155 | "ImageOrientation": getattr(ds, "ImageOrientationPatient", [1,0,0,0,1,0]), 156 | "ImagePosition": getattr(ds, "ImagePositionPatient", [0,0,0]), 157 | "WindowCenter": getattr(ds, "WindowCenter", None), 158 | "WindowWidth": getattr(ds, "WindowWidth", None), 159 | "RescaleIntercept": getattr(ds, "RescaleIntercept", 0), 160 | "RescaleSlope": getattr(ds, "RescaleSlope", 1), 161 | "BitsAllocated": getattr(ds, "BitsAllocated", 16), 162 | "PixelRepresentation": getattr(ds, "PixelRepresentation", 0), 163 | "ConversionDate": datetime.now().strftime("%Y-%m-%d %H:%M:%S") 164 | } 165 | return metadata 166 | 167 | def apply_window_level(self, image, ds): 168 | """Apply window/level if present in DICOM.""" 169 | try: 170 | if hasattr(ds, 'WindowCenter') and hasattr(ds, 'WindowWidth'): 171 | return apply_voi_lut(image, ds) 172 | except: 173 | pass 174 | return image 175 | 176 | 177 | def convert_dicom(self): 178 | if not self.input_file or not self.output_directory: 179 | QMessageBox.warning(self, "Error", "Please select both input file and output directory") 180 | return 181 | 182 | try: 183 | # Create progress dialog 184 | progress = QProgressDialog("Processing DICOM file...", "Cancel", 0, 100, self) 185 | progress.setWindowModality(Qt.WindowModal) 186 | progress.setMinimumWidth(400) 187 | progress.show() 188 | 189 | # Verify DICOM file 190 | if not pydicom.misc.is_dicom(self.input_file): 191 | raise ValueError("Selected file is not a valid DICOM file") 192 | 193 | # Read DICOM data 194 | print("Reading DICOM file...") 195 | progress.setLabelText("Reading DICOM file...") 196 | progress.setValue(20) 197 | 198 | ds = pydicom.dcmread(self.input_file) 199 | series_metadata = self.extract_metadata(ds) 200 | 201 | # Process pixel data 202 | print("Processing pixel data...") 203 | progress.setLabelText("Processing pixel data...") 204 | progress.setValue(40) 205 | 206 | pixel_array = ds.pixel_array 207 | original_dtype = pixel_array.dtype 208 | print(f"Original data type: {original_dtype}") 209 | print(f"Original data range: {pixel_array.min()} to {pixel_array.max()}") 210 | 211 | # Apply rescale slope and intercept 212 | if hasattr(ds, 'RescaleSlope') or hasattr(ds, 'RescaleIntercept'): 213 | slope = getattr(ds, 'RescaleSlope', 1) 214 | intercept = getattr(ds, 'RescaleIntercept', 0) 215 | print(f"Applying rescale slope ({slope}) and intercept ({intercept})") 216 | pixel_array = (pixel_array * slope + intercept) 217 | 218 | # Apply window/level 219 | print("Applying window/level adjustments...") 220 | pixel_array = self.apply_window_level(pixel_array, ds) 221 | print(f"Adjusted data range: {pixel_array.min()} to {pixel_array.max()}") 222 | 223 | print(f"Image shape: {pixel_array.shape}") 224 | print(f"Original dtype: {original_dtype}") 225 | 226 | # Save metadata 227 | progress.setLabelText("Saving metadata...") 228 | progress.setValue(60) 229 | 230 | metadata_file = os.path.join(self.output_directory, 231 | os.path.splitext(os.path.basename(self.input_file))[0] + 232 | "_metadata.json") 233 | with open(metadata_file, 'w') as f: 234 | json.dump(series_metadata, f, indent=2) 235 | 236 | # Get physical sizes from metadata 237 | pixel_spacing = series_metadata.get("PixelSpacing", [1, 1]) 238 | slice_thickness = series_metadata.get("SliceThickness", 1) 239 | 240 | print(f"Pixel spacing: {pixel_spacing}") 241 | print(f"Slice thickness: {slice_thickness}") 242 | 243 | # Save TIFF 244 | progress.setLabelText("Saving TIFF file(s)...") 245 | progress.setValue(80) 246 | 247 | # Convert back to original dtype if needed 248 | if np.issubdtype(original_dtype, np.integer): 249 | print("Converting back to original integer dtype...") 250 | data_min = pixel_array.min() 251 | data_max = pixel_array.max() 252 | 253 | if data_max != data_min: 254 | pixel_array = ((pixel_array - data_min) / (data_max - data_min) * 255 | np.iinfo(original_dtype).max).astype(original_dtype) 256 | else: 257 | pixel_array = np.zeros_like(pixel_array, dtype=original_dtype) 258 | 259 | print(f"Final data range: {pixel_array.min()} to {pixel_array.max()}") 260 | 261 | # Prepare ImageJ metadata 262 | imagej_metadata = { 263 | 'axes': 'YX', # Will be updated to ZYX for 3D data 264 | 'spacing': float(slice_thickness), # Only used for 3D data 265 | 'unit': 'um', 266 | 'finterval': float(pixel_spacing[0]) # XY pixel size 267 | } 268 | 269 | base_name = os.path.splitext(os.path.basename(self.input_file))[0] 270 | 271 | if self.stack_radio.isChecked(): 272 | # Save as single stack 273 | output_file = os.path.join(self.output_directory, f"{base_name}.tif") 274 | 275 | # Update axes for 3D data 276 | if len(pixel_array.shape) > 2: 277 | imagej_metadata['axes'] = 'ZYX' 278 | 279 | print(f"Saving stack with metadata: {imagej_metadata}") 280 | 281 | tifffile.imwrite( 282 | output_file, 283 | pixel_array, 284 | imagej=True, 285 | metadata=imagej_metadata, 286 | resolution=(1.0/float(pixel_spacing[0]), 1.0/float(pixel_spacing[1])) 287 | ) 288 | 289 | print(f"Saved stack to: {output_file}") 290 | print(f"Stack shape: {pixel_array.shape}") 291 | 292 | # Replace the individual slices saving section in convert_dicom method with this: 293 | else: 294 | # For multi-slice DICOM, save individual slices 295 | if len(pixel_array.shape) > 2: 296 | imagej_metadata['axes'] = 'YX' # Reset to 2D for individual slices 297 | 298 | total_slices = pixel_array.shape[0] 299 | for i in range(total_slices): 300 | progress.setLabelText(f"Saving slice {i+1}/{total_slices}...") 301 | # Fix: Convert float to integer for progress value 302 | progress_value = int(80 + (i/total_slices)*15) 303 | progress.setValue(progress_value) 304 | QApplication.processEvents() 305 | 306 | if progress.wasCanceled(): 307 | print("Operation cancelled by user") 308 | return 309 | 310 | output_file = os.path.join(self.output_directory, 311 | f"{base_name}_slice_{i+1:03d}.tif") 312 | 313 | print(f"Saving slice {i+1} with metadata: {imagej_metadata}") 314 | 315 | tifffile.imwrite( 316 | output_file, 317 | pixel_array[i], 318 | imagej=True, 319 | metadata=imagej_metadata, 320 | resolution=(1.0/float(pixel_spacing[0]), 1.0/float(pixel_spacing[1])) 321 | ) 322 | 323 | print(f"Saved {total_slices} individual slices") 324 | 325 | else: 326 | # Single slice DICOM 327 | output_file = os.path.join(self.output_directory, f"{base_name}.tif") 328 | 329 | print(f"Saving single slice with metadata: {imagej_metadata}") 330 | 331 | tifffile.imwrite( 332 | output_file, 333 | pixel_array, 334 | imagej=True, 335 | metadata=imagej_metadata, 336 | resolution=(1.0/float(pixel_spacing[0]), 1.0/float(pixel_spacing[1])) 337 | ) 338 | 339 | print(f"Saved single slice to: {output_file}") 340 | 341 | progress.setValue(100) 342 | 343 | # Construct success message 344 | msg = "Conversion complete!\n\n" 345 | msg += f"DICOM file: {os.path.basename(self.input_file)}\n" 346 | msg += f"Output directory: {self.truncate_path(self.output_directory)}\n\n" 347 | 348 | if self.stack_radio.isChecked(): 349 | msg += f"Saved as: {os.path.basename(output_file)}\n" 350 | else: 351 | if len(pixel_array.shape) > 2: 352 | msg += f"Saved {pixel_array.shape[0]} individual slices\n" 353 | else: 354 | msg += f"Saved as: {os.path.basename(output_file)}\n" 355 | 356 | msg += f"\nMetadata saved as: {os.path.basename(metadata_file)}\n" 357 | msg += f"Pixel spacing: {pixel_spacing[0]}x{pixel_spacing[1]} µm\n" 358 | if len(pixel_array.shape) > 2: 359 | msg += f"Slice thickness: {slice_thickness} µm" 360 | 361 | QMessageBox.information(self, "Success", msg) 362 | 363 | except Exception as e: 364 | QMessageBox.critical(self, "Error", str(e)) 365 | print(f"Error occurred: {str(e)}") 366 | import traceback 367 | traceback.print_exc() 368 | 369 | 370 | def show_centered(self, parent): 371 | parent_geo = parent.geometry() 372 | self.move(parent_geo.center() - self.rect().center()) 373 | self.show() 374 | QApplication.processEvents() # Ensure window displays properly 375 | 376 | def show_dicom_converter(parent): 377 | dialog = DicomConverter(parent) 378 | dialog.show_centered(parent) 379 | return dialog 380 | -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/help_window.py: -------------------------------------------------------------------------------- 1 | from PyQt5.QtWidgets import QDialog, QVBoxLayout, QTextBrowser 2 | from PyQt5.QtCore import Qt 3 | from .soft_dark_stylesheet import soft_dark_stylesheet 4 | from .default_stylesheet import default_stylesheet 5 | 6 | class HelpWindow(QDialog): 7 | def __init__(self, dark_mode=False, font_size=10): 8 | super().__init__() 9 | self.setWindowTitle("Help") 10 | self.setModal(False) # Make it non-modal 11 | self.setGeometry(100, 100, 800, 600) 12 | layout = QVBoxLayout() 13 | self.text_browser = QTextBrowser() 14 | self.text_browser.setOpenExternalLinks(True) 15 | layout.addWidget(self.text_browser) 16 | self.setLayout(layout) 17 | 18 | if dark_mode: 19 | self.setStyleSheet(soft_dark_stylesheet) 20 | else: 21 | self.setStyleSheet(default_stylesheet) 22 | 23 | self.font_size = font_size 24 | self.apply_font_size() 25 | self.load_help_content() 26 | 27 | def show_centered(self, parent): 28 | parent_geo = parent.geometry() 29 | self.move(parent_geo.center() - self.rect().center()) 30 | self.show() 31 | 32 | def apply_font_size(self): 33 | self.setStyleSheet(f"QWidget {{ font-size: {self.font_size}pt; }}") 34 | font = self.text_browser.font() 35 | font.setPointSize(self.font_size) 36 | self.text_browser.setFont(font) 37 | 38 | def load_help_content(self): 39 | help_text = """ 40 |
Image Annotator is a user-friendly GUI tool designed for generating masks for image segmentation and object detection. It allows users to create, edit, and save annotations in various formats, including COCO-style JSON, YOLO v8, and Pascal VOC. Annotations can be defined using manual tools like the polygon tool or in a semi-automated way with the assistance of the Segment Anything Model (SAM-2) pre-trained model. The tool supports multi-dimensional images such as TIFF stacks and CZI files and provides dark mode and adjustable application font sizes for enhanced GUI experience.
44 | 45 |The Tools menu provides access to various useful tools for dataset management and image processing. Each tool opens an intuitive GUI to guide you through the process:
127 |If you encounter any issues or have suggestions for improvement, please open an issue on our GitHub repository or contact the development team.
155 | """ 156 | self.text_browser.setHtml(help_text) 157 | -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/image_patcher.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from PyQt5.QtWidgets import (QDialog, QVBoxLayout, QHBoxLayout, QPushButton, QLabel, QFileDialog, 4 | QSpinBox, QProgressBar, QMessageBox, QListWidget, QDialogButtonBox, 5 | QGridLayout, QComboBox, QApplication, QScrollArea, QWidget) 6 | 7 | 8 | from PyQt5.QtCore import Qt, QThread, pyqtSignal 9 | from PyQt5.QtCore import QTimer, QEventLoop 10 | from tifffile import TiffFile, imsave 11 | from PIL import Image 12 | import traceback 13 | 14 | class DimensionDialog(QDialog): 15 | def __init__(self, shape, file_name, parent=None): 16 | super().__init__(parent) 17 | self.shape = shape 18 | self.file_name = file_name 19 | self.initUI() 20 | 21 | def initUI(self): 22 | layout = QVBoxLayout() 23 | self.setLayout(layout) 24 | 25 | layout.addWidget(QLabel(f"File: {self.file_name}")) 26 | layout.addWidget(QLabel(f"Image shape: {self.shape}")) 27 | layout.addWidget(QLabel("Assign dimensions:")) 28 | 29 | grid_layout = QGridLayout() 30 | self.combos = [] 31 | dimensions = ['T', 'Z', 'C', 'H', 'W'] 32 | for i, dim in enumerate(self.shape): 33 | grid_layout.addWidget(QLabel(f"Dimension {i} (size {dim}):"), i, 0) 34 | combo = QComboBox() 35 | combo.addItems(dimensions) 36 | grid_layout.addWidget(combo, i, 1) 37 | self.combos.append(combo) 38 | layout.addLayout(grid_layout) 39 | 40 | self.button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel) 41 | self.button_box.accepted.connect(self.accept) 42 | self.button_box.rejected.connect(self.reject) 43 | layout.addWidget(self.button_box) 44 | 45 | def get_dimensions(self): 46 | return [combo.currentText() for combo in self.combos] 47 | 48 | class PatchingThread(QThread): 49 | progress = pyqtSignal(int) 50 | error = pyqtSignal(str) 51 | finished = pyqtSignal() 52 | dimension_required = pyqtSignal(object, str) 53 | 54 | 55 | def __init__(self, input_files, output_dir, patch_size, overlap, dimensions): 56 | super().__init__() 57 | self.input_files = input_files 58 | self.output_dir = output_dir 59 | self.patch_size = patch_size 60 | self.overlap = overlap # Changed to tuple (to handle overlap_x, overlap_y independently) - Sreeni 61 | self.dimensions = dimensions 62 | 63 | def run(self): 64 | try: 65 | total_files = len(self.input_files) 66 | for i, file_path in enumerate(self.input_files): 67 | self.patch_image(file_path) 68 | self.progress.emit(int((i + 1) / total_files * 100)) 69 | self.finished.emit() 70 | except Exception as e: 71 | self.error.emit(str(e)) 72 | traceback.print_exc() 73 | 74 | def patch_image(self, file_path): 75 | file_name = os.path.basename(file_path) 76 | file_name_without_ext, file_extension = os.path.splitext(file_name) 77 | 78 | if file_extension.lower() in ['.tif', '.tiff']: 79 | with TiffFile(file_path) as tif: 80 | images = tif.asarray() 81 | if images.ndim > 2: 82 | if file_path not in self.dimensions: 83 | self.dimension_required.emit(images.shape, file_name) 84 | self.wait() 85 | dimensions = self.dimensions.get(file_path) 86 | if dimensions: 87 | if 'H' in dimensions and 'W' in dimensions: 88 | h_index = dimensions.index('H') 89 | w_index = dimensions.index('W') 90 | for idx in np.ndindex(images.shape[:h_index] + images.shape[h_index+2:]): 91 | slice_idx = idx[:h_index] + (slice(None), slice(None)) + idx[h_index:] 92 | image = images[slice_idx] 93 | slice_name = '_'.join([f'{dim}{i+1}' for dim, i in zip(dimensions, idx) if dim not in ['H', 'W']]) 94 | self.save_patches(image, f"{file_name_without_ext}_{slice_name}", file_extension) 95 | else: 96 | raise ValueError("You must assign both H and W dimensions.") 97 | else: 98 | raise ValueError("Dimensions were not properly assigned.") 99 | else: 100 | self.save_patches(images, file_name_without_ext, file_extension) 101 | else: 102 | with Image.open(file_path) as img: 103 | image = np.array(img) 104 | self.save_patches(image, file_name_without_ext, file_extension) 105 | 106 | def save_patches(self, image, base_name, extension): 107 | h, w = image.shape[:2] 108 | patch_h, patch_w = self.patch_size 109 | overlap_x, overlap_y = self.overlap 110 | 111 | for i in range(0, h - overlap_y, patch_h - overlap_y): 112 | for j in range(0, w - overlap_x, patch_w - overlap_x): 113 | if i + patch_h <= h and j + patch_w <= w: # Only save full-sized patches 114 | patch = image[i:i+patch_h, j:j+patch_w] 115 | patch_name = f"{base_name}_patch_{i}_{j}{extension}" 116 | output_path = os.path.join(self.output_dir, patch_name) 117 | 118 | if extension.lower() in ['.tif', '.tiff']: 119 | imsave(output_path, patch) 120 | else: 121 | Image.fromarray(patch).save(output_path) 122 | 123 | class ImagePatcherTool(QDialog): 124 | def __init__(self, parent=None): 125 | super().__init__(parent) 126 | self.setWindowModality(Qt.ApplicationModal) 127 | self.dimensions = {} 128 | self.input_files = [] 129 | self.output_dir = "" 130 | self.initUI() 131 | 132 | def initUI(self): 133 | layout = QVBoxLayout() 134 | self.setLayout(layout) 135 | 136 | # Input files selection 137 | input_layout = QHBoxLayout() 138 | self.input_label = QLabel("Input Files:") 139 | self.input_button = QPushButton("Select Files") 140 | self.input_button.clicked.connect(self.select_input_files) 141 | input_layout.addWidget(self.input_label) 142 | input_layout.addWidget(self.input_button) 143 | layout.addLayout(input_layout) 144 | 145 | # Output directory selection 146 | output_layout = QHBoxLayout() 147 | self.output_label = QLabel("Output Directory:") 148 | self.output_button = QPushButton("Select Directory") 149 | self.output_button.clicked.connect(self.select_output_directory) 150 | output_layout.addWidget(self.output_label) 151 | output_layout.addWidget(self.output_button) 152 | layout.addLayout(output_layout) 153 | 154 | # Patch size inputs 155 | patch_layout = QHBoxLayout() 156 | patch_layout.addWidget(QLabel("Patch Size (W x H):")) 157 | self.patch_w = QSpinBox() 158 | self.patch_w.setRange(1, 10000) 159 | self.patch_w.setValue(256) 160 | self.patch_h = QSpinBox() 161 | self.patch_h.setRange(1, 10000) 162 | self.patch_h.setValue(256) 163 | patch_layout.addWidget(self.patch_w) 164 | patch_layout.addWidget(self.patch_h) 165 | layout.addLayout(patch_layout) 166 | 167 | # Overlap inputs 168 | overlap_layout = QHBoxLayout() 169 | overlap_layout.addWidget(QLabel("Overlap (X, Y):")) 170 | self.overlap_x = QSpinBox() 171 | self.overlap_x.setRange(0, 1000) 172 | self.overlap_x.setValue(0) 173 | self.overlap_y = QSpinBox() 174 | self.overlap_y.setRange(0, 1000) 175 | self.overlap_y.setValue(0) 176 | overlap_layout.addWidget(self.overlap_x) 177 | overlap_layout.addWidget(self.overlap_y) 178 | layout.addLayout(overlap_layout) 179 | 180 | # Create a scroll area for patch info 181 | scroll_area = QScrollArea() 182 | scroll_area.setWidgetResizable(True) 183 | scroll_area.setMinimumHeight(200) # Set a minimum height for the scroll area 184 | 185 | # Create a widget to hold the patch info label 186 | self.patch_info_container = QWidget() 187 | patch_info_layout = QVBoxLayout(self.patch_info_container) 188 | 189 | # Add the patch info label to the container 190 | self.patch_info_label = QLabel() 191 | self.patch_info_label.setAlignment(Qt.AlignLeft | Qt.AlignTop) 192 | patch_info_layout.addWidget(self.patch_info_label) 193 | 194 | # Set the container as the scroll area's widget 195 | scroll_area.setWidget(self.patch_info_container) 196 | 197 | # Add the scroll area to the main layout 198 | layout.addWidget(scroll_area) 199 | 200 | # Start button 201 | self.start_button = QPushButton("Start Patching") 202 | self.start_button.clicked.connect(self.start_patching) 203 | layout.addWidget(self.start_button) 204 | 205 | # Progress bar 206 | self.progress_bar = QProgressBar() 207 | layout.addWidget(self.progress_bar) 208 | 209 | self.setWindowTitle('Image Patcher Tool') 210 | self.setMinimumWidth(500) # Set a minimum width for the dialog 211 | self.setMinimumHeight(600) # Set a minimum height for the dialog 212 | 213 | # Connect value changed signals 214 | self.patch_w.valueChanged.connect(self.update_patch_info) 215 | self.patch_h.valueChanged.connect(self.update_patch_info) 216 | self.overlap_x.valueChanged.connect(self.update_patch_info) 217 | self.overlap_y.valueChanged.connect(self.update_patch_info) 218 | 219 | def select_input_files(self): 220 | file_dialog = QFileDialog() 221 | self.input_files, _ = file_dialog.getOpenFileNames(self, "Select Input Files", "", "Image Files (*.png *.jpg *.bmp *.tif *.tiff)") 222 | self.input_label.setText(f"Input Files: {len(self.input_files)} selected") 223 | QApplication.processEvents() 224 | self.process_tiff_files() 225 | self.update_patch_info() 226 | 227 | def process_tiff_files(self): 228 | for file_path in self.input_files: 229 | if file_path.lower().endswith(('.tif', '.tiff')): 230 | self.check_tiff_dimensions(file_path) 231 | QApplication.processEvents() 232 | 233 | 234 | 235 | def check_tiff_dimensions(self, file_path): 236 | with TiffFile(file_path) as tif: 237 | images = tif.asarray() 238 | if images.ndim > 2: 239 | file_name = os.path.basename(file_path) 240 | dialog = DimensionDialog(images.shape, file_name, self) 241 | dialog.setWindowModality(Qt.ApplicationModal) 242 | result = dialog.exec_() 243 | if result == QDialog.Accepted: 244 | dimensions = dialog.get_dimensions() 245 | if 'H' in dimensions and 'W' in dimensions: 246 | self.dimensions[file_path] = dimensions 247 | else: 248 | QMessageBox.warning(self, "Invalid Dimensions", f"You must assign both H and W dimensions for {file_name}.") 249 | QApplication.processEvents() 250 | 251 | 252 | 253 | def select_output_directory(self): 254 | file_dialog = QFileDialog() 255 | self.output_dir = file_dialog.getExistingDirectory(self, "Select Output Directory") 256 | dir_name = os.path.basename(self.output_dir) if self.output_dir else "" 257 | self.output_label.setText(f"Output Directory: {dir_name}") 258 | QApplication.processEvents() 259 | self.update_patch_info() 260 | 261 | 262 | def start_patching(self): 263 | if not self.input_files: 264 | QMessageBox.warning(self, "No Input Files", "Please select input files.") 265 | return 266 | if not self.output_dir: 267 | QMessageBox.warning(self, "No Output Directory", "Please select an output directory.") 268 | return 269 | 270 | patch_size = (self.patch_h.value(), self.patch_w.value()) 271 | overlap = (self.overlap_x.value(), self.overlap_y.value()) 272 | 273 | self.patching_thread = PatchingThread(self.input_files, self.output_dir, patch_size, overlap, self.dimensions) 274 | self.patching_thread.progress.connect(self.update_progress) 275 | self.patching_thread.error.connect(self.show_error) 276 | self.patching_thread.finished.connect(self.patching_finished) 277 | self.patching_thread.dimension_required.connect(self.get_dimensions) 278 | self.patching_thread.start() 279 | 280 | self.start_button.setEnabled(False) 281 | 282 | 283 | def get_dimensions(self, shape, file_name): 284 | dialog = DimensionDialog(shape, file_name, self) 285 | dialog.setWindowModality(Qt.ApplicationModal) 286 | result = dialog.exec_() 287 | 288 | if result == QDialog.Accepted: 289 | dimensions = dialog.get_dimensions() 290 | if 'H' in dimensions and 'W' in dimensions: 291 | self.dimensions[file_name] = dimensions 292 | else: 293 | QMessageBox.warning(self, "Invalid Dimensions", f"You must assign both H and W dimensions for {file_name}.") 294 | QApplication.processEvents() 295 | self.patching_thread.wake() 296 | 297 | 298 | 299 | def get_patch_info(self): 300 | patch_info = {} 301 | patch_w = self.patch_w.value() 302 | patch_h = self.patch_h.value() 303 | overlap_x = self.overlap_x.value() 304 | overlap_y = self.overlap_y.value() 305 | 306 | for file_path in self.input_files: 307 | file_name = os.path.basename(file_path) 308 | if file_path.lower().endswith(('.tif', '.tiff')): 309 | with TiffFile(file_path) as tif: 310 | images = tif.asarray() 311 | if images.ndim > 2: 312 | dimensions = self.dimensions.get(file_path) 313 | if dimensions: 314 | h_index = dimensions.index('H') 315 | w_index = dimensions.index('W') 316 | h, w = images.shape[h_index], images.shape[w_index] 317 | else: 318 | h, w = images.shape[-2], images.shape[-1] 319 | else: 320 | h, w = images.shape 321 | else: 322 | with Image.open(file_path) as img: 323 | w, h = img.size 324 | 325 | patches_x = (w - overlap_x) // (patch_w - overlap_x) 326 | patches_y = (h - overlap_y) // (patch_h - overlap_y) 327 | leftover_x = w - (patches_x * (patch_w - overlap_x) + overlap_x) 328 | leftover_y = h - (patches_y * (patch_h - overlap_y) + overlap_y) 329 | 330 | patch_info[file_name] = { 331 | 'patches_x': patches_x, 332 | 'patches_y': patches_y, 333 | 'leftover_x': leftover_x, 334 | 'leftover_y': leftover_y 335 | } 336 | 337 | return patch_info 338 | 339 | def update_patch_info(self): 340 | if not self.input_files: 341 | self.patch_info_label.setText("No input files selected") 342 | return 343 | 344 | patch_info = self.get_patch_info() 345 | if patch_info: 346 | info_text = "Patch Information:
" 347 | for file_name, info in patch_info.items(): 348 | info_text += f"File: {file_name}
"
349 | info_text += f"Patches: X: {info['patches_x']}, Y: {info['patches_y']}
"
350 | info_text += f"Leftover pixels: X: {info['leftover_x']}, Y: {info['leftover_y']}
{key}:{value}
") 87 | else: 88 | formatted_stats.append(f"{line}
") 89 | stats_label = QLabel("".join(formatted_stats)) 90 | stats_label.setTextFormat(Qt.RichText) 91 | stats_label.setWordWrap(True) 92 | scroll_layout.addWidget(stats_label) 93 | 94 | scroll_area.setWidget(scroll_content) 95 | layout.addWidget(scroll_area) 96 | 97 | # Project notes 98 | layout.addWidget(bold_label("Project Notes:")) 99 | self.notes_edit = QTextEdit() 100 | self.notes_edit.setPlainText(getattr(self.parent, 'project_notes', '')) 101 | layout.addWidget(self.notes_edit) 102 | 103 | # Buttons 104 | button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel) 105 | button_box.accepted.connect(self.accept) 106 | button_box.rejected.connect(self.reject) 107 | layout.addWidget(button_box) 108 | 109 | def get_notes(self): 110 | return self.notes_edit.toPlainText() 111 | 112 | def were_changes_made(self): 113 | return self.get_notes() != self.original_notes -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/project_search.py: -------------------------------------------------------------------------------- 1 | from PyQt5.QtWidgets import (QDialog, QVBoxLayout, QHBoxLayout, QLineEdit, QPushButton, 2 | QDateEdit, QLabel, QListWidget, QDialogButtonBox, QFormLayout, 3 | QFileDialog, QMessageBox) 4 | from PyQt5.QtCore import Qt, QDate 5 | import os 6 | import json 7 | from datetime import datetime 8 | 9 | class ProjectSearchDialog(QDialog): 10 | def __init__(self, parent=None): 11 | super().__init__(parent) 12 | self.parent = parent 13 | self.setWindowTitle("Search Projects") 14 | self.setModal(True) 15 | self.setMinimumSize(600, 400) 16 | self.search_directory = "" 17 | self.setup_ui() 18 | 19 | def setup_ui(self): 20 | layout = QVBoxLayout(self) 21 | 22 | # Search criteria 23 | form_layout = QFormLayout() 24 | self.keyword_edit = QLineEdit() 25 | self.keyword_edit.setPlaceholderText("Enter search query (e.g., monkey AND dog AND (project_animals OR project_zoo))") 26 | form_layout.addRow("Search Query:", self.keyword_edit) 27 | 28 | self.start_date = QDateEdit() 29 | self.start_date.setCalendarPopup(True) 30 | self.start_date.setDate(QDate.currentDate().addYears(-1)) 31 | form_layout.addRow("Start Date:", self.start_date) 32 | 33 | self.end_date = QDateEdit() 34 | self.end_date.setCalendarPopup(True) 35 | self.end_date.setDate(QDate.currentDate()) 36 | form_layout.addRow("End Date:", self.end_date) 37 | 38 | layout.addLayout(form_layout) 39 | 40 | # Directory selection 41 | dir_layout = QHBoxLayout() 42 | self.dir_edit = QLineEdit() 43 | dir_layout.addWidget(self.dir_edit) 44 | dir_button = QPushButton("Browse") 45 | dir_button.clicked.connect(self.browse_directory) 46 | dir_layout.addWidget(dir_button) 47 | layout.addLayout(dir_layout) 48 | 49 | # Search button 50 | search_button = QPushButton("Search") 51 | search_button.clicked.connect(self.perform_search) 52 | layout.addWidget(search_button) 53 | 54 | # Results list 55 | self.results_list = QListWidget() 56 | self.results_list.itemDoubleClicked.connect(self.open_selected_project) 57 | layout.addWidget(self.results_list) 58 | 59 | # Buttons 60 | button_box = QDialogButtonBox(QDialogButtonBox.Close) 61 | button_box.rejected.connect(self.reject) 62 | layout.addWidget(button_box) 63 | 64 | def browse_directory(self): 65 | directory = QFileDialog.getExistingDirectory(self, "Select Directory to Search") 66 | if directory: 67 | self.search_directory = directory 68 | self.dir_edit.setText(directory) 69 | 70 | def perform_search(self): 71 | if not self.search_directory: 72 | QMessageBox.warning(self, "No Directory", "Please select a directory to search.") 73 | return 74 | 75 | query = self.keyword_edit.text() 76 | start_date = self.start_date.date().toPyDate() 77 | end_date = self.end_date.date().toPyDate() 78 | 79 | self.results_list.clear() 80 | 81 | for root, dirs, files in os.walk(self.search_directory): 82 | for filename in files: 83 | if filename.endswith('.iap'): 84 | project_path = os.path.join(root, filename) 85 | try: 86 | with open(project_path, 'r') as f: 87 | project_data = json.load(f) 88 | 89 | if self.project_matches(project_data, query, start_date, end_date): 90 | self.results_list.addItem(project_path) 91 | except Exception as e: 92 | print(f"Error reading project file {filename}: {str(e)}") 93 | 94 | if self.results_list.count() == 0: 95 | QMessageBox.information(self, "Search Results", "No matching projects found.") 96 | else: 97 | QMessageBox.information(self, "Search Results", f"{self.results_list.count()} matching projects found.") 98 | 99 | def project_matches(self, project_data, query, start_date, end_date): 100 | # Check date range 101 | creation_date = project_data.get('creation_date', '') 102 | if creation_date: 103 | try: 104 | creation_date = datetime.fromisoformat(creation_date).date() 105 | if creation_date < start_date or creation_date > end_date: 106 | return False 107 | except ValueError: 108 | print(f"Invalid date format in project: {creation_date}") 109 | 110 | if not query: 111 | return True 112 | 113 | return self.evaluate_query(query.lower(), project_data) 114 | 115 | def term_matches(self, term, project_data): 116 | # Search in project name 117 | if term in os.path.basename(project_data.get('current_project_file', '')).lower(): 118 | return True 119 | 120 | # Search in classes 121 | if any(term in class_info['name'].lower() for class_info in project_data.get('classes', [])): 122 | return True 123 | 124 | # Search in image names 125 | if any(term in img['file_name'].lower() for img in project_data.get('images', [])): 126 | return True 127 | 128 | # Search in project notes 129 | if term in project_data.get('notes', '').lower(): 130 | return True 131 | 132 | return False 133 | 134 | 135 | def evaluate_query(self, query, project_data): 136 | tokens = self.tokenize_query(query) 137 | return self.evaluate_tokens(tokens, project_data) 138 | 139 | def tokenize_query(self, query): 140 | tokens = [] 141 | current_token = "" 142 | for char in query: 143 | if char in '()': 144 | if current_token: 145 | tokens.append(current_token) 146 | current_token = "" 147 | tokens.append(char) 148 | elif char.isspace(): 149 | if current_token: 150 | tokens.append(current_token) 151 | current_token = "" 152 | else: 153 | current_token += char 154 | if current_token: 155 | tokens.append(current_token) 156 | return tokens 157 | 158 | def evaluate_tokens(self, tokens, project_data): 159 | def evaluate_expression(): 160 | nonlocal i 161 | result = True 162 | current_op = 'and' 163 | 164 | while i < len(tokens): 165 | if tokens[i] == '(': 166 | i += 1 167 | sub_result = evaluate_expression() 168 | if current_op == 'and': 169 | result = result and sub_result 170 | else: 171 | result = result or sub_result 172 | elif tokens[i] == ')': 173 | return result 174 | elif tokens[i].lower() in ['and', 'or']: 175 | current_op = tokens[i].lower() 176 | else: 177 | term_result = self.term_matches(tokens[i], project_data) 178 | if current_op == 'and': 179 | result = result and term_result 180 | else: 181 | result = result or term_result 182 | i += 1 183 | return result 184 | 185 | i = 0 186 | return evaluate_expression() 187 | 188 | 189 | def keyword_matches(self, keyword, project_data): 190 | # Search in project name 191 | if keyword in os.path.basename(project_data.get('current_project_file', '')).lower().split(): 192 | return True 193 | 194 | # Search in classes 195 | if any(keyword in class_info['name'].lower().split() for class_info in project_data.get('classes', [])): 196 | return True 197 | 198 | # Search in image names 199 | if any(keyword in img['file_name'].lower().split() for img in project_data.get('images', [])): 200 | return True 201 | 202 | # Search in project notes 203 | if keyword in project_data.get('notes', '').lower().split(): 204 | return True 205 | 206 | # Search in creation date and last modified date 207 | if keyword in project_data.get('creation_date', '').lower().split() or keyword in project_data.get('last_modified', '').lower().split(): 208 | return True 209 | 210 | return False 211 | 212 | def open_selected_project(self, item): 213 | project_file = item.text() 214 | self.parent.open_specific_project(project_file) 215 | self.accept() 216 | 217 | def show_project_search(parent): 218 | dialog = ProjectSearchDialog(parent) 219 | dialog.exec_() -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/sam_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from PyQt5.QtGui import QImage, QColor 3 | from ultralytics import SAM 4 | 5 | class SAMUtils: 6 | def __init__(self): 7 | self.sam_models = { 8 | "SAM 2 tiny": "sam2_t.pt", 9 | "SAM 2 small": "sam2_s.pt", 10 | "SAM 2 base": "sam2_b.pt", 11 | "SAM 2 large": "sam2_l.pt" 12 | } 13 | self.current_sam_model = None 14 | self.sam_model = None 15 | 16 | def change_sam_model(self, model_name): 17 | if model_name != "Pick a SAM Model": 18 | self.current_sam_model = model_name 19 | self.sam_model = SAM(self.sam_models[self.current_sam_model]) 20 | print(f"Changed SAM model to: {model_name}") 21 | else: 22 | self.current_sam_model = None 23 | self.sam_model = None 24 | print("SAM model unset") 25 | 26 | def qimage_to_numpy(self, qimage): 27 | width = qimage.width() 28 | height = qimage.height() 29 | fmt = qimage.format() 30 | 31 | if fmt == QImage.Format_Grayscale16: 32 | buffer = qimage.constBits().asarray(height * width * 2) 33 | image = np.frombuffer(buffer, dtype=np.uint16).reshape((height, width)) 34 | image_8bit = self.normalize_16bit_to_8bit(image) 35 | return np.stack((image_8bit,) * 3, axis=-1) 36 | 37 | elif fmt == QImage.Format_RGB16: 38 | buffer = qimage.constBits().asarray(height * width * 2) 39 | image = np.frombuffer(buffer, dtype=np.uint16).reshape((height, width)) 40 | image_8bit = self.normalize_16bit_to_8bit(image) 41 | return np.stack((image_8bit,) * 3, axis=-1) 42 | 43 | elif fmt == QImage.Format_Grayscale8: 44 | buffer = qimage.constBits().asarray(height * width) 45 | image = np.frombuffer(buffer, dtype=np.uint8).reshape((height, width)) 46 | return np.stack((image,) * 3, axis=-1) 47 | 48 | elif fmt in [QImage.Format_RGB32, QImage.Format_ARGB32, QImage.Format_ARGB32_Premultiplied]: 49 | buffer = qimage.constBits().asarray(height * width * 4) 50 | image = np.frombuffer(buffer, dtype=np.uint8).reshape((height, width, 4)) 51 | return image[:, :, :3] 52 | 53 | elif fmt == QImage.Format_RGB888: 54 | buffer = qimage.constBits().asarray(height * width * 3) 55 | image = np.frombuffer(buffer, dtype=np.uint8).reshape((height, width, 3)) 56 | return image 57 | 58 | elif fmt == QImage.Format_Indexed8: 59 | buffer = qimage.constBits().asarray(height * width) 60 | image = np.frombuffer(buffer, dtype=np.uint8).reshape((height, width)) 61 | color_table = qimage.colorTable() 62 | rgb_image = np.zeros((height, width, 3), dtype=np.uint8) 63 | for y in range(height): 64 | for x in range(width): 65 | rgb_image[y, x] = QColor(color_table[image[y, x]]).getRgb()[:3] 66 | return rgb_image 67 | 68 | else: 69 | converted_image = qimage.convertToFormat(QImage.Format_RGB32) 70 | buffer = converted_image.constBits().asarray(height * width * 4) 71 | image = np.frombuffer(buffer, dtype=np.uint8).reshape((height, width, 4)) 72 | return image[:, :, :3] 73 | 74 | def normalize_16bit_to_8bit(self, array): 75 | return ((array - array.min()) / (array.max() - array.min()) * 255).astype(np.uint8) 76 | 77 | def apply_sam_prediction(self, image, bbox): 78 | try: 79 | image_np = self.qimage_to_numpy(image) 80 | results = self.sam_model(image_np, bboxes=[bbox]) 81 | mask = results[0].masks.data[0].cpu().numpy() 82 | 83 | if mask is not None: 84 | print(f"Mask shape: {mask.shape}, Mask sum: {mask.sum()}") 85 | contours = self.mask_to_polygon(mask) 86 | print(f"Contours generated: {len(contours)} contour(s)") 87 | 88 | if not contours: 89 | print("No valid contours found") 90 | return None 91 | 92 | prediction = { 93 | "segmentation": contours[0], 94 | "score": float(results[0].boxes.conf[0]) 95 | } 96 | return prediction 97 | else: 98 | print("Failed to generate mask") 99 | return None 100 | except Exception as e: 101 | print(f"Error in applying SAM prediction: {str(e)}") 102 | import traceback 103 | traceback.print_exc() 104 | return None 105 | 106 | def mask_to_polygon(self, mask): 107 | import cv2 108 | contours, _ = cv2.findContours((mask > 0).astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 109 | polygons = [] 110 | for contour in contours: 111 | if cv2.contourArea(contour) > 10: 112 | polygon = contour.flatten().tolist() 113 | if len(polygon) >= 6: 114 | polygons.append(polygon) 115 | print(f"Generated {len(polygons)} valid polygons") 116 | return polygons -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/slice_registration.py: -------------------------------------------------------------------------------- 1 | from PyQt5.QtWidgets import (QDialog, QVBoxLayout, QHBoxLayout, QPushButton, QFileDialog, 2 | QLabel, QComboBox, QMessageBox, QProgressDialog, QRadioButton, 3 | QButtonGroup, QSpinBox, QApplication, QGroupBox, QDoubleSpinBox) 4 | from PyQt5.QtCore import Qt 5 | from pystackreg import StackReg 6 | from skimage import io 7 | import tifffile 8 | from PIL import Image 9 | import numpy as np 10 | import os 11 | 12 | class SliceRegistrationTool(QDialog): 13 | def __init__(self, parent=None): 14 | super().__init__(parent) 15 | self.setWindowTitle("Slice Registration") 16 | self.setGeometry(100, 100, 600, 400) 17 | self.setWindowFlags(self.windowFlags() | Qt.Window) 18 | self.setWindowModality(Qt.ApplicationModal) # Add modal behavior 19 | 20 | # Initialize variables first 21 | self.input_path = "" 22 | self.output_directory = "" 23 | 24 | self.initUI() 25 | 26 | def initUI(self): 27 | layout = QVBoxLayout() 28 | layout.setSpacing(10) # Add consistent spacing 29 | 30 | # Input selection 31 | input_group = QGroupBox("Input Selection") 32 | input_layout = QVBoxLayout() 33 | 34 | self.dir_radio = QRadioButton("Directory of Image Files") 35 | self.stack_radio = QRadioButton("TIFF Stack") 36 | 37 | input_group = QButtonGroup(self) 38 | input_group.addButton(self.dir_radio) 39 | input_group.addButton(self.stack_radio) 40 | 41 | input_layout.addWidget(self.dir_radio) 42 | input_layout.addWidget(self.stack_radio) 43 | self.dir_radio.setChecked(True) 44 | 45 | # Input/Output file selection with labels 46 | self.input_label = QLabel("No input selected") 47 | self.output_label = QLabel("No output directory selected") 48 | 49 | file_select_layout = QVBoxLayout() 50 | 51 | input_file_layout = QHBoxLayout() 52 | self.select_input_btn = QPushButton("Select Input") 53 | self.select_input_btn.clicked.connect(self.select_input) 54 | input_file_layout.addWidget(self.select_input_btn) 55 | input_file_layout.addWidget(self.input_label) 56 | 57 | output_file_layout = QHBoxLayout() 58 | self.select_output_btn = QPushButton("Select Output Directory") 59 | self.select_output_btn.clicked.connect(self.select_output) 60 | output_file_layout.addWidget(self.select_output_btn) 61 | output_file_layout.addWidget(self.output_label) 62 | 63 | file_select_layout.addLayout(input_file_layout) 64 | file_select_layout.addLayout(output_file_layout) 65 | input_layout.addLayout(file_select_layout) 66 | 67 | layout.addLayout(input_layout) 68 | 69 | # Transform type 70 | transform_group = QGroupBox("Transformation Settings") 71 | transform_layout = QVBoxLayout() 72 | 73 | transform_combo_layout = QHBoxLayout() 74 | transform_combo_layout.addWidget(QLabel("Type:")) 75 | self.transform_combo = QComboBox() 76 | self.transform_combo.addItems([ 77 | "Translation (X-Y Translation Only)", 78 | "Rigid Body (Translation + Rotation)", 79 | "Scaled Rotation (Translation + Rotation + Scaling)", 80 | "Affine (Translation + Rotation + Scaling + Shearing)", 81 | "Bilinear (Non-linear; Does not preserve straight lines)" 82 | ]) 83 | transform_combo_layout.addWidget(self.transform_combo) 84 | transform_layout.addLayout(transform_combo_layout) 85 | transform_group.setLayout(transform_layout) 86 | layout.addWidget(transform_group) 87 | 88 | # Reference type 89 | ref_group = QGroupBox("Reference Settings") 90 | ref_layout = QVBoxLayout() 91 | 92 | ref_combo_layout = QHBoxLayout() 93 | ref_combo_layout.addWidget(QLabel("Reference:")) 94 | self.ref_combo = QComboBox() 95 | self.ref_combo.addItems([ 96 | "Previous Frame", 97 | "First Frame", 98 | "Mean of All Frames", 99 | "Mean of First N Frames", 100 | "Mean of First N Frames + Moving Average" 101 | ]) 102 | ref_combo_layout.addWidget(self.ref_combo) 103 | ref_layout.addLayout(ref_combo_layout) 104 | 105 | # N frames settings 106 | n_frames_layout = QHBoxLayout() 107 | n_frames_layout.addWidget(QLabel("N Frames:")) 108 | self.n_frames_spin = QSpinBox() 109 | self.n_frames_spin.setRange(1, 100) 110 | self.n_frames_spin.setValue(10) 111 | self.n_frames_spin.setEnabled(False) 112 | n_frames_layout.addWidget(self.n_frames_spin) 113 | ref_layout.addLayout(n_frames_layout) 114 | 115 | # Moving average settings 116 | moving_avg_layout = QHBoxLayout() 117 | moving_avg_layout.addWidget(QLabel("Moving Average Window:")) 118 | self.moving_avg_spin = QSpinBox() 119 | self.moving_avg_spin.setRange(1, 100) 120 | self.moving_avg_spin.setValue(10) 121 | self.moving_avg_spin.setEnabled(False) 122 | moving_avg_layout.addWidget(self.moving_avg_spin) 123 | ref_layout.addLayout(moving_avg_layout) 124 | 125 | ref_group.setLayout(ref_layout) 126 | layout.addWidget(ref_group) 127 | 128 | # Connect reference combo box 129 | self.ref_combo.currentTextChanged.connect(self.on_ref_changed) 130 | 131 | # Add spacing group 132 | spacing_group = QGroupBox("Pixel/Voxel Size") 133 | spacing_layout = QVBoxLayout() 134 | 135 | # XY pixel size 136 | xy_size_layout = QHBoxLayout() 137 | xy_size_layout.addWidget(QLabel("XY Pixel Size:")) 138 | self.xy_size_value = QDoubleSpinBox() 139 | self.xy_size_value.setRange(0.001, 1000.0) 140 | self.xy_size_value.setValue(1.0) 141 | self.xy_size_value.setDecimals(3) 142 | xy_size_layout.addWidget(self.xy_size_value) 143 | 144 | # Z spacing 145 | z_size_layout = QHBoxLayout() 146 | z_size_layout.addWidget(QLabel("Z Spacing:")) 147 | self.z_size_value = QDoubleSpinBox() 148 | self.z_size_value.setRange(0.001, 1000.0) 149 | self.z_size_value.setValue(1.0) 150 | self.z_size_value.setDecimals(3) 151 | z_size_layout.addWidget(self.z_size_value) 152 | 153 | # Unit selector 154 | unit_layout = QHBoxLayout() 155 | unit_layout.addWidget(QLabel("Unit:")) 156 | self.size_unit = QComboBox() 157 | self.size_unit.addItems(["nm", "µm", "mm"]) 158 | self.size_unit.setCurrentText("µm") 159 | unit_layout.addWidget(self.size_unit) 160 | 161 | spacing_layout.addLayout(xy_size_layout) 162 | spacing_layout.addLayout(z_size_layout) 163 | spacing_layout.addLayout(unit_layout) 164 | spacing_group.setLayout(spacing_layout) 165 | layout.addWidget(spacing_group) 166 | 167 | # Register button 168 | self.register_btn = QPushButton("Register") 169 | self.register_btn.clicked.connect(self.register_slices) 170 | layout.addWidget(self.register_btn) 171 | 172 | self.setLayout(layout) 173 | 174 | def on_ref_changed(self, text): 175 | uses_n_frames = text in ["Mean of First N Frames", "Mean of First N Frames + Moving Average"] 176 | self.n_frames_spin.setEnabled(uses_n_frames) 177 | self.moving_avg_spin.setEnabled(text == "Mean of First N Frames + Moving Average") 178 | QApplication.processEvents() # Ensure UI updates 179 | 180 | 181 | def on_transform_changed(self, text): 182 | if text == "Bilinear" and self.ref_combo.currentText() == "Previous": 183 | QMessageBox.warning(self, "Warning", 184 | "Bilinear transformation cannot be used with 'Previous' reference. " 185 | "Please select a different reference type.") 186 | self.transform_combo.setCurrentText("Rigid Body") 187 | 188 | 189 | def select_input(self): 190 | try: 191 | if self.dir_radio.isChecked(): 192 | path = QFileDialog.getExistingDirectory( 193 | self, 194 | "Select Directory with Images", 195 | "", 196 | QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks 197 | ) 198 | else: 199 | path, _ = QFileDialog.getOpenFileName( 200 | self, 201 | "Select TIFF Stack", 202 | "", 203 | "TIFF Files (*.tif *.tiff)", 204 | options=QFileDialog.Options() 205 | ) 206 | 207 | if path: 208 | self.input_path = path 209 | self.input_label.setText(f"Selected: {os.path.basename(path)}") 210 | self.input_label.setToolTip(path) 211 | QApplication.processEvents() 212 | 213 | except Exception as e: 214 | QMessageBox.critical(self, "Error", f"Error selecting input: {str(e)}") 215 | 216 | def select_output(self): 217 | try: 218 | directory = QFileDialog.getExistingDirectory( 219 | self, 220 | "Select Output Directory", 221 | "", 222 | QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks 223 | ) 224 | 225 | if directory: 226 | self.output_directory = directory 227 | self.output_label.setText(f"Selected: {os.path.basename(directory)}") 228 | self.output_label.setToolTip(directory) 229 | QApplication.processEvents() 230 | 231 | except Exception as e: 232 | QMessageBox.critical(self, "Error", f"Error selecting output directory: {str(e)}") 233 | 234 | def register_slices(self): 235 | if not self.input_path or not self.output_directory: 236 | QMessageBox.warning(self, "Error", "Please select both input and output paths") 237 | return 238 | 239 | try: 240 | progress = QProgressDialog(self) 241 | progress.setWindowTitle("Registration Progress") 242 | progress.setLabelText("Loading images...") 243 | progress.setMinimum(0) 244 | progress.setMaximum(100) 245 | progress.setWindowModality(Qt.WindowModal) 246 | progress.setMinimumWidth(400) 247 | progress.show() 248 | QApplication.processEvents() 249 | 250 | # Load images using scikit-image's imread 251 | if self.stack_radio.isChecked(): 252 | progress.setLabelText("Loading TIFF stack...") 253 | img0 = io.imread(self.input_path) 254 | else: 255 | progress.setLabelText("Loading images from directory...") 256 | image_files = sorted([f for f in os.listdir(self.input_path) 257 | if f.lower().endswith(('.png', '.jpg', '.jpeg', '.tif', '.tiff'))]) 258 | first_img = io.imread(os.path.join(self.input_path, image_files[0])) 259 | img0 = np.zeros((len(image_files), *first_img.shape), dtype=first_img.dtype) 260 | img0[0] = first_img 261 | for i, fname in enumerate(image_files[1:], 1): 262 | img0[i] = io.imread(os.path.join(self.input_path, fname)) 263 | 264 | # Store original properties 265 | original_dtype = img0.dtype 266 | print(f"Original image properties:") 267 | print(f"Dtype: {original_dtype}") 268 | print(f"Range: {img0.min()} - {img0.max()}") 269 | print(f"Shape: {img0.shape}") 270 | 271 | progress.setValue(30) 272 | progress.setLabelText("Performing registration...") 273 | QApplication.processEvents() 274 | 275 | # Set up StackReg with selected transformation 276 | transform_types = { 277 | "Translation (X-Y Translation Only)": StackReg.TRANSLATION, 278 | "Rigid Body (Translation + Rotation)": StackReg.RIGID_BODY, 279 | "Scaled Rotation (Translation + Rotation + Scaling)": StackReg.SCALED_ROTATION, 280 | "Affine (Translation + Rotation + Scaling + Shearing)": StackReg.AFFINE, 281 | "Bilinear (Non-linear; Does not preserve straight lines)": StackReg.BILINEAR 282 | } 283 | 284 | transform_type = transform_types[self.transform_combo.currentText()] 285 | sr = StackReg(transform_type) 286 | 287 | # Register images 288 | selected_ref = self.ref_combo.currentText() 289 | progress.setLabelText(f"Registering images using {selected_ref}...") 290 | progress.setValue(40) 291 | QApplication.processEvents() 292 | 293 | # Register and transform 294 | if selected_ref == "Previous Frame": 295 | out_registered = sr.register_transform_stack(img0, reference='previous') 296 | elif selected_ref == "First Frame": 297 | out_registered = sr.register_transform_stack(img0, reference='first') 298 | elif selected_ref == "Mean of All Frames": 299 | out_registered = sr.register_transform_stack(img0, reference='mean') 300 | elif selected_ref == "Mean of First N Frames": 301 | n_frames = self.n_frames_spin.value() 302 | out_registered = sr.register_transform_stack(img0, reference='first', n_frames=n_frames) 303 | elif selected_ref == "Mean of First N Frames + Moving Average": 304 | n_frames = self.n_frames_spin.value() 305 | moving_avg = self.moving_avg_spin.value() 306 | out_registered = sr.register_transform_stack(img0, reference='first', 307 | n_frames=n_frames, 308 | moving_average=moving_avg) 309 | 310 | progress.setValue(80) 311 | progress.setLabelText("Saving registered images...") 312 | QApplication.processEvents() 313 | 314 | # Convert back to original dtype without changing values 315 | out_registered = out_registered.astype(original_dtype) 316 | 317 | print(f"Output image properties:") 318 | print(f"Dtype: {out_registered.dtype}") 319 | print(f"Range: {out_registered.min()} - {out_registered.max()}") 320 | print(f"Shape: {out_registered.shape}") 321 | 322 | # Save output 323 | if self.stack_radio.isChecked(): 324 | output_name = os.path.splitext(os.path.basename(self.input_path))[0] 325 | else: 326 | output_name = "registered_stack" 327 | 328 | output_path = os.path.join(self.output_directory, f"{output_name}_registered.tif") 329 | 330 | # Get pixel sizes in micrometers (convert if necessary) 331 | xy_size = self.xy_size_value.value() 332 | z_size = self.z_size_value.value() 333 | unit = self.size_unit.currentText() 334 | 335 | # Convert to micrometers based on selected unit 336 | if unit == "nm": 337 | xy_size = xy_size / 1000 338 | z_size = z_size / 1000 339 | elif unit == "mm": 340 | xy_size = xy_size * 1000 341 | z_size = z_size * 1000 342 | 343 | # Save the stack 344 | tifffile.imwrite( 345 | output_path, 346 | out_registered, 347 | imagej=True, 348 | metadata={ 349 | 'axes': 'ZYX', 350 | 'spacing': z_size, # Z spacing in micrometers 351 | 'unit': 'um', 352 | 'finterval': xy_size # XY pixel size in micrometers 353 | }, 354 | resolution=(1.0/xy_size, 1.0/xy_size) # XY Resolution in pixels per unit 355 | ) 356 | 357 | progress.setValue(100) 358 | QApplication.processEvents() 359 | 360 | # Include both XY and Z size info in success message 361 | QMessageBox.information(self, "Success", 362 | f"Registration completed successfully!\n" 363 | f"Output saved to:\n{output_path}\n" 364 | f"XY Pixel size: {self.xy_size_value.value()} {unit}\n" 365 | f"Z Spacing: {self.z_size_value.value()} {unit}") 366 | 367 | except Exception as e: 368 | print(f"Error occurred: {str(e)}") 369 | import traceback 370 | traceback.print_exc() 371 | QMessageBox.critical(self, "Error", str(e)) 372 | 373 | 374 | def update_progress(self, progress_dialog, current_iteration, end_iteration): 375 | """Helper function to update progress during registration""" 376 | if end_iteration > 0: 377 | percent = int(40 + (current_iteration / end_iteration) * 40) # Scale to 40-80% range 378 | progress_dialog.setValue(percent) 379 | progress_dialog.setLabelText(f"Processing image {current_iteration}/{end_iteration}...") 380 | QApplication.processEvents() 381 | 382 | 383 | def load_images(self): 384 | print("Starting image loading...") 385 | try: 386 | if self.stack_radio.isChecked(): 387 | print(f"Loading TIFF stack from: {self.input_path}") 388 | # Explicitly use scikit-image's imread for TIFF stacks 389 | stack = io.imread(self.input_path) 390 | if stack.dtype != np.float32: 391 | stack = stack.astype(np.float32) 392 | print(f"Loaded TIFF stack shape: {stack.shape}") 393 | return stack 394 | else: 395 | # Load individual images 396 | valid_extensions = ('.png', '.jpg', '.jpeg', '.bmp', '.tif', '.tiff') 397 | images = [] 398 | files = sorted([f for f in os.listdir(self.input_path) 399 | if f.lower().endswith(valid_extensions)]) 400 | 401 | print(f"Found {len(files)} image files") 402 | 403 | if not files: 404 | raise ValueError("No valid image files found in directory") 405 | 406 | # Check first image size 407 | first_path = os.path.join(self.input_path, files[0]) 408 | print(f"Loading first image: {first_path}") 409 | first_img = np.array(Image.open(first_path)) 410 | ref_shape = first_img.shape 411 | images.append(first_img) 412 | print(f"First image shape: {ref_shape}") 413 | 414 | # Load remaining images and check sizes 415 | for f in files[1:]: 416 | img_path = os.path.join(self.input_path, f) 417 | print(f"Loading: {f}") 418 | img = np.array(Image.open(img_path)) 419 | if img.shape != ref_shape: 420 | raise ValueError(f"Image {f} has different dimensions from the first image") 421 | images.append(img) 422 | 423 | stack = np.stack(images) 424 | print(f"Final stack shape: {stack.shape}") 425 | return stack 426 | 427 | except Exception as e: 428 | print(f"Error in load_images: {str(e)}") 429 | raise 430 | 431 | 432 | def show_centered(self, parent): 433 | parent_geo = parent.geometry() 434 | self.move(parent_geo.center() - self.rect().center()) 435 | self.show() 436 | QApplication.processEvents() # Ensure window displays properly 437 | 438 | -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/snake_game.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import random 3 | from PyQt5.QtWidgets import QApplication, QWidget, QDesktopWidget, QMessageBox 4 | from PyQt5.QtGui import QPainter, QColor 5 | from PyQt5.QtCore import Qt, QTimer 6 | 7 | class SnakeGame(QWidget): 8 | def __init__(self): 9 | super().__init__() 10 | self.initUI() 11 | 12 | def initUI(self): 13 | self.setWindowTitle('Secret Snake Game') 14 | self.setFixedSize(600, 400) # Increased size 15 | self.center() 16 | 17 | self.snake = [(300, 200), (290, 200), (280, 200)] 18 | self.direction = 'RIGHT' 19 | self.food = self.place_food() 20 | self.score = 0 21 | 22 | self.timer = QTimer(self) 23 | self.timer.timeout.connect(self.update_game) 24 | self.timer.start(100) 25 | 26 | self.setFocusPolicy(Qt.StrongFocus) 27 | self.show() 28 | 29 | def center(self): 30 | qr = self.frameGeometry() 31 | cp = QDesktopWidget().availableGeometry().center() 32 | qr.moveCenter(cp) 33 | self.move(qr.topLeft()) 34 | 35 | def paintEvent(self, event): 36 | painter = QPainter(self) 37 | painter.setRenderHint(QPainter.Antialiasing) 38 | 39 | # Draw snake 40 | painter.setBrush(QColor(0, 255, 0)) 41 | for segment in self.snake: 42 | painter.drawRect(segment[0], segment[1], 10, 10) 43 | 44 | # Draw food 45 | painter.setBrush(QColor(255, 0, 0)) 46 | painter.drawRect(self.food[0], self.food[1], 10, 10) 47 | 48 | # Draw score 49 | painter.setPen(QColor(0, 0, 0)) 50 | painter.drawText(10, 20, f"Score: {self.score}") 51 | 52 | def keyPressEvent(self, event): 53 | key = event.key() 54 | 55 | if key == Qt.Key_Left and self.direction != 'RIGHT': 56 | self.direction = 'LEFT' 57 | elif key == Qt.Key_Right and self.direction != 'LEFT': 58 | self.direction = 'RIGHT' 59 | elif key == Qt.Key_Up and self.direction != 'DOWN': 60 | self.direction = 'UP' 61 | elif key == Qt.Key_Down and self.direction != 'UP': 62 | self.direction = 'DOWN' 63 | elif key == Qt.Key_Escape: 64 | self.close() 65 | 66 | def update_game(self): 67 | head = self.snake[0] 68 | 69 | if self.direction == 'LEFT': 70 | new_head = (head[0] - 10, head[1]) 71 | elif self.direction == 'RIGHT': 72 | new_head = (head[0] + 10, head[1]) 73 | elif self.direction == 'UP': 74 | new_head = (head[0], head[1] - 10) 75 | else: # DOWN 76 | new_head = (head[0], head[1] + 10) 77 | 78 | # Check if snake hit the edge 79 | if (new_head[0] < 0 or new_head[0] >= 600 or 80 | new_head[1] < 0 or new_head[1] >= 400): 81 | self.game_over() 82 | return 83 | 84 | self.snake.insert(0, new_head) 85 | 86 | if new_head == self.food: 87 | self.score += 1 88 | self.food = self.place_food() 89 | else: 90 | self.snake.pop() 91 | 92 | if new_head in self.snake[1:]: 93 | self.game_over() 94 | return 95 | 96 | self.update() 97 | 98 | def place_food(self): 99 | while True: 100 | x = random.randint(0, 59) * 10 101 | y = random.randint(0, 39) * 10 102 | if (x, y) not in self.snake: 103 | return (x, y) 104 | 105 | def game_over(self): 106 | self.timer.stop() 107 | QMessageBox.information(self, "Game Over", f"Your score: {self.score}") 108 | self.close() 109 | 110 | if __name__ == '__main__': 111 | app = QApplication(sys.argv) 112 | ex = SnakeGame() 113 | sys.exit(app.exec_()) -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/soft_dark_stylesheet.py: -------------------------------------------------------------------------------- 1 | # soft_dark_stylesheet.py 2 | 3 | soft_dark_stylesheet = """ 4 | QWidget { 5 | background-color: #2F2F2F; 6 | color: #E0E0E0; 7 | font-family: Arial, sans-serif; 8 | } 9 | 10 | QMainWindow { 11 | background-color: #2A2A2A; 12 | } 13 | 14 | QPushButton { 15 | background-color: #4A4A4A; 16 | border: 1px solid #5E5E5E; 17 | padding: 5px 10px; 18 | border-radius: 3px; 19 | color: #E0E0E0; 20 | } 21 | 22 | QPushButton:hover { 23 | background-color: #545454; 24 | } 25 | 26 | QPushButton:pressed { 27 | background-color: #404040; 28 | } 29 | 30 | QPushButton:checked { 31 | background-color: #606060; 32 | border: 2px solid #808080; 33 | color: #FFFFFF; 34 | } 35 | 36 | QListWidget, QTreeWidget { 37 | background-color: #3A3A3A; 38 | border: 1px solid #4A4A4A; 39 | border-radius: 3px; 40 | color: #E0E0E0; 41 | } 42 | 43 | QListWidget::item, QTreeWidget::item { 44 | color: #E0E0E0; 45 | } 46 | 47 | QListWidget::item:selected, QTreeWidget::item:selected { 48 | background-color: #4A4A4A; 49 | color: #FFFFFF; /* Make selected items a bit brighter */ 50 | } 51 | 52 | QLabel { 53 | color: #E0E0E0; 54 | } 55 | 56 | QLabel.section-header { 57 | font-weight: bold; 58 | font-size: 14px; 59 | padding: 5px 0; 60 | color: #FFFFFF; /* Bright white color for better visibility in dark mode */ 61 | } 62 | 63 | QLineEdit, QTextEdit, QPlainTextEdit { 64 | background-color: #3A3A3A; 65 | border: 1px solid #4A4A4A; 66 | color: #E0E0E0; 67 | padding: 2px; 68 | border-radius: 3px; 69 | } 70 | 71 | QSlider::groove:horizontal { 72 | background: #4A4A4A; 73 | height: 8px; 74 | border-radius: 4px; 75 | } 76 | 77 | QSlider::handle:horizontal { 78 | background: #6A6A6A; 79 | width: 18px; 80 | margin-top: -5px; 81 | margin-bottom: -5px; 82 | border-radius: 9px; 83 | } 84 | 85 | QSlider::handle:horizontal:hover { 86 | background: #7A7A7A; 87 | } 88 | 89 | QScrollBar:vertical, QScrollBar:horizontal { 90 | background-color: #3A3A3A; 91 | width: 12px; 92 | height: 12px; 93 | } 94 | 95 | QScrollBar::handle:vertical, QScrollBar::handle:horizontal { 96 | background-color: #5A5A5A; 97 | border-radius: 6px; 98 | min-height: 20px; 99 | } 100 | 101 | QScrollBar::handle:vertical:hover, QScrollBar::handle:horizontal:hover { 102 | background-color: #6A6A6A; 103 | } 104 | 105 | QScrollBar::add-line, QScrollBar::sub-line { 106 | background: none; 107 | } 108 | 109 | QMenuBar { 110 | background-color: #2F2F2F; 111 | } 112 | 113 | QMenuBar::item { 114 | padding: 5px 10px; 115 | background-color: transparent; 116 | } 117 | 118 | QMenuBar::item:selected { 119 | background-color: #3A3A3A; 120 | } 121 | 122 | QMenu { 123 | background-color: #2F2F2F; 124 | border: 1px solid #3A3A3A; 125 | } 126 | 127 | QMenu::item { 128 | padding: 5px 20px 5px 20px; 129 | } 130 | 131 | QMenu::item:selected { 132 | background-color: #3A3A3A; 133 | } 134 | 135 | QToolTip { 136 | background-color: #2F2F2F; 137 | color: #E0E0E0; 138 | border: 1px solid #3A3A3A; 139 | } 140 | 141 | QStatusBar { 142 | background-color: #2A2A2A; 143 | color: #B0B0B0; 144 | } 145 | 146 | QListWidget::item { 147 | color: none; 148 | } 149 | """ -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/stack_interpolator.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from PyQt5.QtWidgets import (QDialog, QVBoxLayout, QHBoxLayout, QPushButton, QFileDialog, 4 | QLabel, QComboBox, QMessageBox, QProgressDialog, QRadioButton, 5 | QButtonGroup, QGroupBox, QDoubleSpinBox, QApplication) 6 | from PyQt5.QtCore import Qt 7 | from scipy.interpolate import RegularGridInterpolator 8 | from skimage import io 9 | import tifffile 10 | 11 | class StackInterpolator(QDialog): 12 | def __init__(self, parent=None): 13 | super().__init__(parent) 14 | self.setWindowTitle("Stack Interpolator") 15 | self.setGeometry(100, 100, 600, 400) 16 | self.setWindowFlags(self.windowFlags() | Qt.Window) 17 | self.setWindowModality(Qt.ApplicationModal) # Added window modality 18 | 19 | # Initialize variables 20 | self.input_path = "" 21 | self.output_directory = "" 22 | 23 | self.initUI() 24 | 25 | def initUI(self): 26 | layout = QVBoxLayout() 27 | layout.setSpacing(10) # Add consistent spacing 28 | 29 | # Input selection 30 | input_group = QGroupBox("Input Selection") 31 | input_layout = QVBoxLayout() 32 | 33 | # Radio buttons for input type 34 | self.dir_radio = QRadioButton("Directory of Image Files") 35 | self.stack_radio = QRadioButton("TIFF Stack") 36 | 37 | input_group_buttons = QButtonGroup(self) 38 | input_group_buttons.addButton(self.dir_radio) 39 | input_group_buttons.addButton(self.stack_radio) 40 | 41 | input_layout.addWidget(self.dir_radio) 42 | input_layout.addWidget(self.stack_radio) 43 | self.dir_radio.setChecked(True) 44 | 45 | input_group.setLayout(input_layout) 46 | layout.addWidget(input_group) 47 | 48 | # Interpolation method 49 | method_group = QGroupBox("Interpolation Settings") 50 | method_layout = QVBoxLayout() 51 | 52 | method_combo_layout = QHBoxLayout() 53 | method_combo_layout.addWidget(QLabel("Method:")) 54 | self.method_combo = QComboBox() 55 | self.method_combo.addItems([ 56 | "linear", 57 | "nearest", 58 | "slinear", 59 | "cubic", 60 | "quintic", 61 | "pchip" 62 | ]) 63 | method_combo_layout.addWidget(self.method_combo) 64 | method_layout.addLayout(method_combo_layout) 65 | 66 | method_group.setLayout(method_layout) 67 | layout.addWidget(method_group) 68 | 69 | # Original dimensions group 70 | orig_group = QGroupBox("Original Dimensions") 71 | orig_layout = QVBoxLayout() 72 | 73 | orig_xy_layout = QHBoxLayout() 74 | orig_xy_layout.addWidget(QLabel("XY Pixel Size:")) 75 | self.orig_xy_size = QDoubleSpinBox() 76 | self.orig_xy_size.setRange(0.001, 1000.0) 77 | self.orig_xy_size.setValue(1.0) 78 | self.orig_xy_size.setDecimals(3) 79 | orig_xy_layout.addWidget(self.orig_xy_size) 80 | 81 | orig_z_layout = QHBoxLayout() 82 | orig_z_layout.addWidget(QLabel("Z Spacing:")) 83 | self.orig_z_size = QDoubleSpinBox() 84 | self.orig_z_size.setRange(0.001, 1000.0) 85 | self.orig_z_size.setValue(1.0) 86 | self.orig_z_size.setDecimals(3) 87 | orig_z_layout.addWidget(self.orig_z_size) 88 | 89 | orig_layout.addLayout(orig_xy_layout) 90 | orig_layout.addLayout(orig_z_layout) 91 | orig_group.setLayout(orig_layout) 92 | layout.addWidget(orig_group) 93 | 94 | # New dimensions group 95 | new_group = QGroupBox("New Dimensions") 96 | new_layout = QVBoxLayout() 97 | 98 | new_xy_layout = QHBoxLayout() 99 | new_xy_layout.addWidget(QLabel("XY Pixel Size:")) 100 | self.new_xy_size = QDoubleSpinBox() 101 | self.new_xy_size.setRange(0.001, 1000.0) 102 | self.new_xy_size.setValue(1.0) 103 | self.new_xy_size.setDecimals(3) 104 | new_xy_layout.addWidget(self.new_xy_size) 105 | 106 | new_z_layout = QHBoxLayout() 107 | new_z_layout.addWidget(QLabel("Z Spacing:")) 108 | self.new_z_size = QDoubleSpinBox() 109 | self.new_z_size.setRange(0.001, 1000.0) 110 | self.new_z_size.setValue(1.0) 111 | self.new_z_size.setDecimals(3) 112 | new_z_layout.addWidget(self.new_z_size) 113 | 114 | new_layout.addLayout(new_xy_layout) 115 | new_layout.addLayout(new_z_layout) 116 | new_group.setLayout(new_layout) 117 | layout.addWidget(new_group) 118 | 119 | # Units selector 120 | unit_group = QGroupBox("Unit Settings") 121 | unit_layout = QHBoxLayout() 122 | unit_layout.addWidget(QLabel("Unit:")) 123 | self.size_unit = QComboBox() 124 | self.size_unit.addItems(["nm", "µm", "mm"]) 125 | self.size_unit.setCurrentText("µm") 126 | unit_layout.addWidget(self.size_unit) 127 | unit_group.setLayout(unit_layout) 128 | layout.addWidget(unit_group) 129 | 130 | # Input/Output buttons 131 | button_group = QGroupBox("File Selection") 132 | button_layout = QVBoxLayout() 133 | 134 | # Input selection 135 | input_file_layout = QHBoxLayout() 136 | self.input_label = QLabel("No input selected") 137 | self.select_input_btn = QPushButton("Select Input") 138 | self.select_input_btn.clicked.connect(self.select_input) 139 | input_file_layout.addWidget(self.select_input_btn) 140 | input_file_layout.addWidget(self.input_label) 141 | button_layout.addLayout(input_file_layout) 142 | 143 | # Output selection 144 | output_file_layout = QHBoxLayout() 145 | self.output_label = QLabel("No output directory selected") 146 | self.select_output_btn = QPushButton("Select Output Directory") 147 | self.select_output_btn.clicked.connect(self.select_output) 148 | output_file_layout.addWidget(self.select_output_btn) 149 | output_file_layout.addWidget(self.output_label) 150 | button_layout.addLayout(output_file_layout) 151 | 152 | button_group.setLayout(button_layout) 153 | layout.addWidget(button_group) 154 | 155 | # Interpolate button 156 | self.interpolate_btn = QPushButton("Interpolate") 157 | self.interpolate_btn.clicked.connect(self.interpolate_stack) 158 | layout.addWidget(self.interpolate_btn) 159 | 160 | self.setLayout(layout) 161 | 162 | def select_input(self): 163 | try: 164 | if self.dir_radio.isChecked(): 165 | path = QFileDialog.getExistingDirectory( 166 | self, 167 | "Select Directory with Images", 168 | "", 169 | QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks 170 | ) 171 | else: 172 | path, _ = QFileDialog.getOpenFileName( 173 | self, 174 | "Select TIFF Stack", 175 | "", 176 | "TIFF Files (*.tif *.tiff)", 177 | options=QFileDialog.Options() 178 | ) 179 | 180 | if path: 181 | self.input_path = path 182 | self.input_label.setText(f"Selected: {os.path.basename(path)}") 183 | self.input_label.setToolTip(path) 184 | QApplication.processEvents() 185 | 186 | except Exception as e: 187 | QMessageBox.critical(self, "Error", f"Error selecting input: {str(e)}") 188 | 189 | def select_output(self): 190 | try: 191 | directory = QFileDialog.getExistingDirectory( 192 | self, 193 | "Select Output Directory", 194 | "", 195 | QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks 196 | ) 197 | 198 | if directory: 199 | self.output_directory = directory 200 | self.output_label.setText(f"Selected: {os.path.basename(directory)}") 201 | self.output_label.setToolTip(directory) 202 | QApplication.processEvents() 203 | 204 | except Exception as e: 205 | QMessageBox.critical(self, "Error", f"Error selecting output directory: {str(e)}") 206 | 207 | def load_images(self): 208 | try: 209 | progress = QProgressDialog("Loading images...", "Cancel", 0, 100, self) 210 | progress.setWindowModality(Qt.WindowModal) 211 | progress.show() 212 | QApplication.processEvents() 213 | 214 | if self.stack_radio.isChecked(): 215 | progress.setLabelText("Loading TIFF stack...") 216 | progress.setValue(20) 217 | QApplication.processEvents() 218 | 219 | # Load stack preserving original dtype 220 | stack = io.imread(self.input_path) 221 | print(f"Loaded stack dtype: {stack.dtype}") 222 | print(f"Value range: [{stack.min()}, {stack.max()}]") 223 | 224 | progress.setValue(90) 225 | QApplication.processEvents() 226 | return stack 227 | else: 228 | valid_extensions = ('.png', '.jpg', '.jpeg', '.bmp', '.tif', '.tiff') 229 | files = sorted([f for f in os.listdir(self.input_path) 230 | if f.lower().endswith(valid_extensions)]) 231 | 232 | if not files: 233 | raise ValueError("No valid image files found in directory") 234 | 235 | progress.setMaximum(len(files)) 236 | 237 | # Load first image to get dimensions and dtype 238 | first_img = io.imread(os.path.join(self.input_path, files[0])) 239 | stack = np.zeros((len(files), *first_img.shape), dtype=first_img.dtype) 240 | stack[0] = first_img 241 | 242 | print(f"Created stack with dtype: {stack.dtype}") 243 | print(f"First image range: [{first_img.min()}, {first_img.max()}]") 244 | 245 | # Load remaining images 246 | for i, fname in enumerate(files[1:], 1): 247 | progress.setValue(i) 248 | progress.setLabelText(f"Loading image {i+1}/{len(files)}") 249 | QApplication.processEvents() 250 | 251 | if progress.wasCanceled(): 252 | raise InterruptedError("Loading cancelled by user") 253 | 254 | img = io.imread(os.path.join(self.input_path, fname)) 255 | if img.shape != first_img.shape: 256 | raise ValueError(f"Image {fname} has different dimensions from the first image") 257 | if img.dtype != first_img.dtype: 258 | raise ValueError(f"Image {fname} has different bit depth from the first image") 259 | stack[i] = img 260 | 261 | return stack 262 | 263 | except Exception as e: 264 | raise ValueError(f"Error loading images: {str(e)}") 265 | finally: 266 | progress.close() 267 | QApplication.processEvents() 268 | 269 | def interpolate_stack(self): 270 | if not self.input_path or not self.output_directory: 271 | QMessageBox.warning(self, "Missing Paths", "Please select both input and output paths") 272 | return 273 | 274 | try: 275 | # Create progress dialog 276 | progress = QProgressDialog("Processing...", "Cancel", 0, 100, self) 277 | progress.setWindowModality(Qt.WindowModal) 278 | progress.setWindowTitle("Interpolation Progress") 279 | progress.setMinimumDuration(0) 280 | progress.setMinimumWidth(400) 281 | progress.show() 282 | QApplication.processEvents() 283 | 284 | # Load images 285 | progress.setLabelText("Loading images...") 286 | progress.setValue(10) 287 | QApplication.processEvents() 288 | 289 | input_stack = self.load_images() 290 | original_dtype = input_stack.dtype 291 | type_range = np.iinfo(original_dtype) if np.issubdtype(original_dtype, np.integer) else None 292 | 293 | print(f"Original data type: {original_dtype}") 294 | print(f"Original shape: {input_stack.shape}") 295 | print(f"Original range: {input_stack.min()} - {input_stack.max()}") 296 | 297 | # Normalize input data to float64 for interpolation 298 | input_stack_normalized = input_stack.astype(np.float64) 299 | if type_range is not None: 300 | input_stack_normalized = input_stack_normalized / type_range.max 301 | 302 | progress.setLabelText("Calculating dimensions...") 303 | progress.setValue(20) 304 | QApplication.processEvents() 305 | 306 | # Calculate dimensions and coordinates 307 | z_old = np.arange(input_stack.shape[0]) * self.orig_z_size.value() 308 | y_old = np.arange(input_stack.shape[1]) * self.orig_xy_size.value() 309 | x_old = np.arange(input_stack.shape[2]) * self.orig_xy_size.value() 310 | 311 | z_new = np.arange(z_old[0], z_old[-1] + self.new_z_size.value(), self.new_z_size.value()) 312 | y_new = np.arange(0, input_stack.shape[1] * self.orig_xy_size.value(), self.new_xy_size.value()) 313 | x_new = np.arange(0, input_stack.shape[2] * self.orig_xy_size.value(), self.new_xy_size.value()) 314 | 315 | y_new = y_new[y_new < y_old[-1] + self.new_xy_size.value()] 316 | x_new = x_new[x_new < x_old[-1] + self.new_xy_size.value()] 317 | 318 | new_shape = (len(z_new), len(y_new), len(x_new)) 319 | print(f"New dimensions will be: {new_shape}") 320 | 321 | # Initialize output array 322 | interpolated_data = np.zeros(new_shape, dtype=np.float64) 323 | 324 | method = self.method_combo.currentText() 325 | 326 | # For higher-order methods, use a hybrid approach 327 | if method in ['cubic', 'quintic', 'pchip']: 328 | progress.setLabelText("Using hybrid interpolation approach...") 329 | progress.setValue(30) 330 | QApplication.processEvents() 331 | 332 | from scipy.interpolate import interp1d 333 | 334 | # Process each XY point 335 | total_points = input_stack.shape[1] * input_stack.shape[2] 336 | points_processed = 0 337 | 338 | temp_stack = np.zeros((len(z_new), input_stack.shape[1], input_stack.shape[2]), dtype=np.float64) 339 | 340 | for y in range(input_stack.shape[1]): 341 | for x in range(input_stack.shape[2]): 342 | if progress.wasCanceled(): 343 | return 344 | 345 | points_processed += 1 346 | if points_processed % 1000 == 0: 347 | progress_val = 30 + (points_processed / total_points * 30) 348 | progress.setValue(int(progress_val)) 349 | progress.setLabelText(f"Interpolating Z dimension: {points_processed}/{total_points} points") 350 | QApplication.processEvents() 351 | 352 | z_profile = input_stack_normalized[:, y, x] 353 | f = interp1d(z_old, z_profile, kind=method, bounds_error=False, fill_value='extrapolate') 354 | temp_stack[:, y, x] = f(z_new) 355 | 356 | progress.setLabelText("Interpolating XY planes...") 357 | progress.setValue(60) 358 | QApplication.processEvents() 359 | 360 | for z in range(len(z_new)): 361 | if progress.wasCanceled(): 362 | return 363 | 364 | progress.setValue(60 + int((z / len(z_new)) * 30)) 365 | progress.setLabelText(f"Processing XY plane {z+1}/{len(z_new)}") 366 | QApplication.processEvents() 367 | 368 | interpolator = RegularGridInterpolator( 369 | (y_old, x_old), 370 | temp_stack[z], 371 | method='linear', 372 | bounds_error=False, 373 | fill_value=0 374 | ) 375 | 376 | yy, xx = np.meshgrid(y_new, x_new, indexing='ij') 377 | pts = np.stack([yy.ravel(), xx.ravel()], axis=-1) 378 | 379 | interpolated_data[z] = interpolator(pts).reshape(len(y_new), len(x_new)) 380 | 381 | del temp_stack 382 | 383 | else: # For linear and nearest neighbor 384 | progress.setLabelText("Creating interpolator...") 385 | progress.setValue(30) 386 | QApplication.processEvents() 387 | 388 | interpolator = RegularGridInterpolator( 389 | (z_old, y_old, x_old), 390 | input_stack_normalized, 391 | method=method, 392 | bounds_error=False, 393 | fill_value=0 394 | ) 395 | 396 | slices_per_batch = max(1, len(z_new) // 20) 397 | total_batches = (len(z_new) + slices_per_batch - 1) // slices_per_batch 398 | 399 | for batch_idx in range(total_batches): 400 | if progress.wasCanceled(): 401 | return 402 | 403 | start_idx = batch_idx * slices_per_batch 404 | end_idx = min((batch_idx + 1) * slices_per_batch, len(z_new)) 405 | 406 | progress.setLabelText(f"Interpolating batch {batch_idx + 1}/{total_batches}") 407 | progress_value = int(40 + (batch_idx/total_batches)*40) 408 | progress.setValue(progress_value) 409 | QApplication.processEvents() 410 | 411 | zz, yy, xx = np.meshgrid( 412 | z_new[start_idx:end_idx], 413 | y_new, 414 | x_new, 415 | indexing='ij' 416 | ) 417 | 418 | pts = np.stack([zz.ravel(), yy.ravel(), xx.ravel()], axis=-1) 419 | 420 | interpolated_data[start_idx:end_idx] = interpolator(pts).reshape( 421 | end_idx - start_idx, 422 | len(y_new), 423 | len(x_new) 424 | ) 425 | 426 | # Convert back to original dtype 427 | progress.setLabelText("Converting to original bit depth...") 428 | progress.setValue(90) 429 | QApplication.processEvents() 430 | 431 | if np.issubdtype(original_dtype, np.integer): 432 | # Scale back to original range 433 | interpolated_data = np.clip(interpolated_data, 0, 1) 434 | interpolated_data = (interpolated_data * type_range.max).astype(original_dtype) 435 | else: 436 | interpolated_data = interpolated_data.astype(original_dtype) 437 | 438 | print(f"Final dtype: {interpolated_data.dtype}") 439 | print(f"Final range: [{interpolated_data.min()}, {interpolated_data.max()}]") 440 | 441 | # Save output 442 | progress.setLabelText("Saving interpolated stack...") 443 | progress.setValue(95) 444 | QApplication.processEvents() 445 | 446 | if self.stack_radio.isChecked(): 447 | output_name = os.path.splitext(os.path.basename(self.input_path))[0] 448 | else: 449 | output_name = "interpolated_stack" 450 | 451 | output_path = os.path.join(self.output_directory, f"{output_name}_interpolated.tif") 452 | 453 | # Convert sizes to micrometers for metadata 454 | unit = self.size_unit.currentText() 455 | xy_size = self.new_xy_size.value() 456 | z_size = self.new_z_size.value() 457 | 458 | if unit == "nm": 459 | xy_size /= 1000 460 | z_size /= 1000 461 | elif unit == "mm": 462 | xy_size *= 1000 463 | z_size *= 1000 464 | 465 | # Save with metadata 466 | tifffile.imwrite( 467 | output_path, 468 | interpolated_data, 469 | imagej=True, 470 | metadata={ 471 | 'axes': 'ZYX', 472 | 'spacing': z_size, 473 | 'unit': 'um', 474 | 'finterval': xy_size 475 | }, 476 | resolution=(1.0/xy_size, 1.0/xy_size) 477 | ) 478 | 479 | progress.setValue(100) 480 | QApplication.processEvents() 481 | 482 | QMessageBox.information( 483 | self, 484 | "Success", 485 | f"Interpolation completed successfully!\n" 486 | f"Output saved to:\n{output_path}\n" 487 | f"New dimensions: {interpolated_data.shape}\n" 488 | f"Bit depth: {interpolated_data.dtype}\n" 489 | f"XY Pixel size: {self.new_xy_size.value()} {unit}\n" 490 | f"Z Spacing: {self.new_z_size.value()} {unit}" 491 | ) 492 | 493 | except Exception as e: 494 | QMessageBox.critical(self, "Error", str(e)) 495 | print(f"Error occurred: {str(e)}") 496 | import traceback 497 | traceback.print_exc() 498 | finally: 499 | progress.close() 500 | QApplication.processEvents() 501 | 502 | 503 | def show_centered(self, parent): 504 | parent_geo = parent.geometry() 505 | self.move(parent_geo.center() - self.rect().center()) 506 | self.show() 507 | QApplication.processEvents() # Ensure UI updates 508 | 509 | 510 | # Helper function to create the dialog 511 | def show_stack_interpolator(parent): 512 | dialog = StackInterpolator(parent) 513 | dialog.show_centered(parent) 514 | return dialog -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/stack_to_slices.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from PyQt5.QtWidgets import (QDialog, QVBoxLayout, QHBoxLayout, QPushButton, 4 | QFileDialog, QLabel, QMessageBox, QComboBox, QGridLayout, QWidget, 5 | QProgressDialog, QApplication) 6 | from PyQt5.QtCore import Qt 7 | from tifffile import TiffFile 8 | from czifile import CziFile 9 | from PIL import Image 10 | 11 | class DimensionDialog(QDialog): 12 | def __init__(self, shape, file_name, parent=None): 13 | super().__init__(parent) 14 | self.setWindowTitle("Assign Dimensions") 15 | self.shape = shape 16 | self.initUI(file_name) 17 | 18 | def initUI(self, file_name): 19 | layout = QVBoxLayout() 20 | 21 | file_name_label = QLabel(f"File: {file_name}") 22 | file_name_label.setWordWrap(True) 23 | layout.addWidget(file_name_label) 24 | 25 | dim_widget = QWidget() 26 | dim_layout = QGridLayout(dim_widget) 27 | self.combos = [] 28 | dimensions = ['T', 'Z', 'C', 'S', 'H', 'W'] 29 | for i, dim in enumerate(self.shape): 30 | dim_layout.addWidget(QLabel(f"Dimension {i} (size {dim}):"), i, 0) 31 | combo = QComboBox() 32 | combo.addItems(dimensions) 33 | dim_layout.addWidget(combo, i, 1) 34 | self.combos.append(combo) 35 | layout.addWidget(dim_widget) 36 | 37 | self.button = QPushButton("OK") 38 | self.button.clicked.connect(self.accept) 39 | layout.addWidget(self.button) 40 | 41 | self.setLayout(layout) 42 | 43 | def get_dimensions(self): 44 | return [combo.currentText() for combo in self.combos] 45 | 46 | 47 | class StackToSlicesDialog(QDialog): 48 | def __init__(self, parent=None): 49 | super().__init__(parent) 50 | self.setWindowTitle("Stack to Slices") 51 | self.setGeometry(100, 100, 400, 200) 52 | self.setWindowFlags(self.windowFlags() | Qt.Window) 53 | self.setWindowModality(Qt.ApplicationModal) 54 | self.dimensions = None 55 | self.initUI() 56 | 57 | def initUI(self): 58 | layout = QVBoxLayout() 59 | 60 | self.file_label = QLabel("No file selected") 61 | layout.addWidget(self.file_label) 62 | 63 | select_button = QPushButton("Select Stack File") 64 | select_button.clicked.connect(self.select_file) 65 | layout.addWidget(select_button) 66 | 67 | self.convert_button = QPushButton("Convert to Slices") 68 | self.convert_button.clicked.connect(self.convert_to_slices) 69 | self.convert_button.setEnabled(False) 70 | layout.addWidget(self.convert_button) 71 | 72 | self.setLayout(layout) 73 | 74 | def select_file(self): 75 | self.file_name, _ = QFileDialog.getOpenFileName(self, "Select Stack File", "", "Image Files (*.tif *.tiff *.czi)") 76 | if self.file_name: 77 | self.file_label.setText(f"Selected file: {os.path.basename(self.file_name)}") 78 | QApplication.processEvents() 79 | self.process_file() 80 | 81 | def process_file(self): 82 | if self.file_name.lower().endswith(('.tif', '.tiff')): 83 | self.process_tiff() 84 | elif self.file_name.lower().endswith('.czi'): 85 | self.process_czi() 86 | 87 | def process_tiff(self): 88 | with TiffFile(self.file_name) as tif: 89 | image_array = tif.asarray() 90 | 91 | self.get_dimensions(image_array.shape) 92 | 93 | def process_czi(self): 94 | with CziFile(self.file_name) as czi: 95 | image_array = czi.asarray() 96 | 97 | self.get_dimensions(image_array.shape) 98 | 99 | def get_dimensions(self, shape): 100 | dialog = DimensionDialog(shape, os.path.basename(self.file_name), self) 101 | dialog.setWindowModality(Qt.ApplicationModal) 102 | if dialog.exec_(): 103 | self.dimensions = dialog.get_dimensions() 104 | self.convert_button.setEnabled(True) 105 | else: 106 | self.dimensions = None 107 | self.convert_button.setEnabled(False) 108 | QApplication.processEvents() 109 | 110 | def convert_to_slices(self): 111 | if not hasattr(self, 'file_name') or not self.dimensions: 112 | QMessageBox.warning(self, "Invalid Input", "Please select a file and assign dimensions first.") 113 | return 114 | 115 | output_dir = QFileDialog.getExistingDirectory(self, "Select Output Directory") 116 | if not output_dir: 117 | return 118 | 119 | if self.file_name.lower().endswith(('.tif', '.tiff')): 120 | with TiffFile(self.file_name) as tif: 121 | image_array = tif.asarray() 122 | elif self.file_name.lower().endswith('.czi'): 123 | with CziFile(self.file_name) as czi: 124 | image_array = czi.asarray() 125 | 126 | self.save_slices(image_array, output_dir) 127 | 128 | def save_slices(self, image_array, output_dir): 129 | base_name = os.path.splitext(os.path.basename(self.file_name))[0] 130 | 131 | slice_indices = [i for i, dim in enumerate(self.dimensions) if dim not in ['H', 'W']] 132 | 133 | total_slices = np.prod([image_array.shape[i] for i in slice_indices]) 134 | 135 | progress = QProgressDialog("Saving slices...", "Cancel", 0, total_slices, self) 136 | progress.setWindowModality(Qt.WindowModal) 137 | progress.setWindowTitle("Progress") 138 | progress.setMinimumDuration(0) 139 | progress.setValue(0) 140 | progress.show() 141 | 142 | try: 143 | for idx, _ in enumerate(np.ndindex(tuple(image_array.shape[i] for i in slice_indices))): 144 | if progress.wasCanceled(): 145 | break 146 | 147 | full_idx = [slice(None)] * len(self.dimensions) 148 | for i, val in zip(slice_indices, _): 149 | full_idx[i] = val 150 | 151 | slice_array = image_array[tuple(full_idx)] 152 | 153 | if slice_array.ndim > 2: 154 | slice_array = slice_array.squeeze() 155 | 156 | if slice_array.dtype == np.uint16: 157 | mode = 'I;16' 158 | elif slice_array.dtype == np.uint8: 159 | mode = 'L' 160 | else: 161 | slice_array = ((slice_array - slice_array.min()) / (slice_array.max() - slice_array.min()) * 65535).astype(np.uint16) 162 | mode = 'I;16' 163 | 164 | slice_name = f"{base_name}_{'_'.join([f'{self.dimensions[i]}{val+1}' for i, val in zip(slice_indices, _)])}.png" 165 | img = Image.fromarray(slice_array, mode=mode) 166 | img.save(os.path.join(output_dir, slice_name)) 167 | 168 | progress.setValue(idx + 1) 169 | QApplication.processEvents() 170 | 171 | if progress.wasCanceled(): 172 | QMessageBox.warning(self, "Conversion Interrupted", "The conversion process was interrupted.") 173 | else: 174 | QMessageBox.information(self, "Conversion Complete", f"All slices have been saved to {output_dir}") 175 | 176 | finally: 177 | progress.close() 178 | 179 | def show_centered(self, parent): 180 | parent_geo = parent.geometry() 181 | self.move(parent_geo.center() - self.rect().center()) 182 | self.show() 183 | 184 | def show_stack_to_slices(parent): 185 | dialog = StackToSlicesDialog(parent) 186 | dialog.show_centered(parent) 187 | return dialog -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility functions for the Image Annotator application. 3 | 4 | This module contains helper functions used across the application. 5 | 6 | @DigitalSreeni 7 | Dr. Sreenivas Bhattiprolu 8 | """ 9 | 10 | import numpy as np 11 | 12 | def calculate_area(annotation): 13 | if "segmentation" in annotation: 14 | # Polygon area 15 | x, y = annotation["segmentation"][0::2], annotation["segmentation"][1::2] 16 | return 0.5 * abs(sum(x[i] * y[i+1] - x[i+1] * y[i] for i in range(-1, len(x)-1))) 17 | elif "bbox" in annotation: 18 | # Rectangle area 19 | x, y, w, h = annotation["bbox"] 20 | return w * h 21 | return 0 22 | 23 | def calculate_bbox(segmentation): 24 | x_coordinates, y_coordinates = segmentation[0::2], segmentation[1::2] 25 | x_min, y_min = min(x_coordinates), min(y_coordinates) 26 | x_max, y_max = max(x_coordinates), max(y_coordinates) 27 | width, height = x_max - x_min, y_max - y_min 28 | return [x_min, y_min, width, height] 29 | 30 | def normalize_image(image_array): 31 | """Normalize image array to 8-bit range.""" 32 | if image_array.dtype != np.uint8: 33 | image_array = ((image_array - image_array.min()) / (image_array.max() - image_array.min()) * 255).astype(np.uint8) 34 | return image_array 35 | 36 | -------------------------------------------------------------------------------- /src/digitalsreeni_image_annotator/yolo_trainer.py: -------------------------------------------------------------------------------- 1 | import os 2 | from ultralytics import YOLO 3 | from PyQt5.QtWidgets import QFileDialog, QMessageBox 4 | from PyQt5.QtWidgets import (QDialog, QVBoxLayout, QHBoxLayout, QPushButton, 5 | QLineEdit, QLabel, QFileDialog, QDialogButtonBox) 6 | import yaml 7 | import numpy as np 8 | from pathlib import Path 9 | from .export_formats import export_yolo_v5plus 10 | 11 | 12 | from collections import deque 13 | 14 | 15 | from PyQt5.QtWidgets import QDialog, QVBoxLayout, QTextEdit, QPushButton 16 | from PyQt5.QtCore import Qt, pyqtSignal, QObject 17 | 18 | class TrainingInfoDialog(QDialog): 19 | stop_signal = pyqtSignal() 20 | 21 | def __init__(self, parent=None): 22 | super().__init__(parent) 23 | self.setWindowTitle("Training Progress") 24 | self.setModal(False) 25 | self.layout = QVBoxLayout(self) 26 | 27 | self.info_text = QTextEdit(self) 28 | self.info_text.setReadOnly(True) 29 | self.layout.addWidget(self.info_text) 30 | 31 | self.stop_button = QPushButton("Stop Training", self) 32 | self.stop_button.clicked.connect(self.stop_training) 33 | self.layout.addWidget(self.stop_button) 34 | 35 | self.close_button = QPushButton("Close", self) 36 | self.close_button.clicked.connect(self.hide) 37 | self.layout.addWidget(self.close_button) 38 | 39 | self.setMinimumSize(400, 300) 40 | 41 | def update_info(self, text): 42 | self.info_text.append(text) 43 | self.info_text.verticalScrollBar().setValue(self.info_text.verticalScrollBar().maximum()) 44 | 45 | def stop_training(self): 46 | self.stop_signal.emit() 47 | self.stop_button.setEnabled(False) 48 | self.stop_button.setText("Stopping...") 49 | 50 | def closeEvent(self, event): 51 | event.ignore() 52 | self.hide() 53 | 54 | class LoadPredictionModelDialog(QDialog): 55 | def __init__(self, parent=None): 56 | super().__init__(parent) 57 | self.setWindowTitle("Load Prediction Model and YAML") 58 | self.model_path = "" 59 | self.yaml_path = "" 60 | 61 | layout = QVBoxLayout(self) 62 | 63 | # Model file selection 64 | model_layout = QHBoxLayout() 65 | self.model_edit = QLineEdit() 66 | model_button = QPushButton("Browse") 67 | model_button.clicked.connect(self.browse_model) 68 | model_layout.addWidget(QLabel("Model File:")) 69 | model_layout.addWidget(self.model_edit) 70 | model_layout.addWidget(model_button) 71 | layout.addLayout(model_layout) 72 | 73 | # YAML file selection 74 | yaml_layout = QHBoxLayout() 75 | self.yaml_edit = QLineEdit() 76 | yaml_button = QPushButton("Browse") 77 | yaml_button.clicked.connect(self.browse_yaml) 78 | yaml_layout.addWidget(QLabel("YAML File:")) 79 | yaml_layout.addWidget(self.yaml_edit) 80 | yaml_layout.addWidget(yaml_button) 81 | layout.addLayout(yaml_layout) 82 | 83 | # OK and Cancel buttons 84 | self.button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel) 85 | self.button_box.accepted.connect(self.accept) 86 | self.button_box.rejected.connect(self.reject) 87 | layout.addWidget(self.button_box) 88 | 89 | def browse_model(self): 90 | file_name, _ = QFileDialog.getOpenFileName(self, "Select YOLO Model", "", "YOLO Model (*.pt)") 91 | if file_name: 92 | self.model_path = file_name 93 | self.model_edit.setText(file_name) 94 | 95 | def browse_yaml(self): 96 | file_name, _ = QFileDialog.getOpenFileName(self, "Select YAML File", "", "YAML Files (*.yaml *.yml)") 97 | if file_name: 98 | self.yaml_path = file_name 99 | self.yaml_edit.setText(file_name) 100 | 101 | class YOLOTrainer(QObject): 102 | progress_signal = pyqtSignal(str) 103 | 104 | def __init__(self, project_dir, main_window): 105 | super().__init__() 106 | self.project_dir = project_dir 107 | self.main_window = main_window 108 | self.model = None 109 | self.dataset_path = os.path.join(project_dir, "yolo_dataset") 110 | self.model_path = os.path.join(project_dir, "yolo_model") 111 | self.yaml_path = None 112 | self.yaml_data = None 113 | self.epoch_info = deque(maxlen=10) 114 | self.progress_callback = None 115 | self.total_epochs = None 116 | self.conf_threshold = 0.25 117 | self.stop_training = False 118 | self.class_names = None 119 | 120 | def load_model(self, model_path=None): 121 | if model_path is None: 122 | model_path, _ = QFileDialog.getOpenFileName(self.main_window, "Select YOLO Model", "", "YOLO Model (*.pt)") 123 | if model_path: 124 | try: 125 | self.model = YOLO(model_path) 126 | return True 127 | except Exception as e: 128 | QMessageBox.critical(self.main_window, "Error Loading Model", f"Could not load the model. Error: {str(e)}") 129 | return False 130 | 131 | def prepare_dataset(self): 132 | output_dir, yaml_path = export_yolo_v5plus( 133 | self.main_window.all_annotations, 134 | self.main_window.class_mapping, 135 | self.main_window.image_paths, 136 | self.main_window.slices, 137 | self.main_window.image_slices, 138 | self.dataset_path 139 | ) 140 | 141 | yaml_path = Path(yaml_path) 142 | with yaml_path.open('r') as f: 143 | yaml_content = yaml.safe_load(f) 144 | 145 | # Update paths for new YOLO v5+ structure 146 | yaml_content['train'] = 'images/train' # Changed from train/images 147 | yaml_content['val'] = 'images/val' # Changed from train/images 148 | yaml_content['test'] = '../test/images' 149 | 150 | with yaml_path.open('w') as f: 151 | yaml.dump(yaml_content, f, default_flow_style=False) 152 | 153 | self.yaml_path = str(yaml_path) 154 | return self.yaml_path 155 | 156 | def load_yaml(self, yaml_path=None): 157 | if yaml_path is None: 158 | yaml_path, _ = QFileDialog.getOpenFileName(self.main_window, "Select YOLO Dataset YAML", "", "YAML Files (*.yaml *.yml)") 159 | if yaml_path and os.path.exists(yaml_path): 160 | with open(yaml_path, 'r') as f: 161 | try: 162 | yaml_data = yaml.safe_load(f) 163 | print(f"Loaded YAML contents: {yaml_data}") 164 | 165 | # Ensure paths are relative 166 | for key in ['train', 'val', 'test']: 167 | if key in yaml_data and os.path.isabs(yaml_data[key]): 168 | yaml_data[key] = os.path.relpath(yaml_data[key], start=os.path.dirname(yaml_path)) 169 | 170 | print(f"Updated YAML contents: {yaml_data}") 171 | 172 | # Save the updated YAML data 173 | self.yaml_data = yaml_data 174 | self.yaml_path = yaml_path 175 | 176 | # Write the updated YAML back to the file 177 | with open(yaml_path, 'w') as f: 178 | yaml.dump(yaml_data, f, default_flow_style=False) 179 | 180 | return True 181 | except yaml.YAMLError as e: 182 | QMessageBox.critical(self.main_window, "Error Loading YAML", f"Invalid YAML file. Error: {str(e)}") 183 | return False 184 | 185 | def on_train_epoch_end(self, trainer): 186 | epoch = trainer.epoch + 1 # Add 1 to start from 1 instead of 0 187 | total_epochs = trainer.epochs 188 | loss = trainer.loss.item() 189 | progress_text = f"Epoch {epoch}/{total_epochs}, Loss: {loss:.4f}" 190 | 191 | # Only emit the signal, don't call the callback directly 192 | self.progress_signal.emit(progress_text) 193 | 194 | if self.stop_training: 195 | trainer.model.stop = True 196 | self.stop_training = False 197 | return False 198 | return True 199 | 200 | def train_model(self, epochs=100, imgsz=640): 201 | if self.model is None: 202 | raise ValueError("No model loaded. Please load a model first.") 203 | if self.yaml_path is None or not Path(self.yaml_path).exists(): 204 | raise FileNotFoundError("Dataset YAML not found. Please prepare or load a dataset first.") 205 | 206 | self.stop_training = False 207 | self.total_epochs = epochs 208 | self.epoch_info.clear() 209 | 210 | # Add the callback 211 | self.model.add_callback("on_train_epoch_end", self.on_train_epoch_end) 212 | 213 | try: 214 | yaml_path = Path(self.yaml_path) 215 | yaml_dir = yaml_path.parent 216 | 217 | print(f"Training with YAML: {yaml_path}") 218 | print(f"YAML directory: {yaml_dir}") 219 | 220 | with yaml_path.open('r') as f: 221 | yaml_content = yaml.safe_load(f) 222 | print(f"YAML content: {yaml_content}") 223 | 224 | # For now, use train as val since we don't have separate validation set 225 | train_dir = str(yaml_dir / 'images' / 'train') 226 | 227 | # Update YAML content with correct paths 228 | yaml_content['train'] = train_dir 229 | yaml_content['val'] = train_dir # Use same directory for validation 230 | 231 | # Create the val directory structure if it doesn't exist 232 | val_img_dir = yaml_dir / 'images' / 'val' 233 | val_label_dir = yaml_dir / 'labels' / 'val' 234 | val_img_dir.mkdir(parents=True, exist_ok=True) 235 | val_label_dir.mkdir(parents=True, exist_ok=True) 236 | 237 | # Write updated YAML with adjusted paths 238 | temp_yaml_path = yaml_dir / 'temp_train.yaml' 239 | with temp_yaml_path.open('w') as f: 240 | yaml.dump(yaml_content, f, default_flow_style=False) 241 | 242 | print(f"Training with updated YAML: {temp_yaml_path}") 243 | print(f"Updated YAML content: {yaml_content}") 244 | 245 | results = self.model.train(data=str(temp_yaml_path), epochs=epochs, imgsz=imgsz) 246 | return results 247 | finally: 248 | # Clear the callback 249 | self.model.callbacks["on_train_epoch_end"] = [] 250 | # Remove temporary YAML file 251 | if 'temp_yaml_path' in locals(): 252 | temp_yaml_path.unlink(missing_ok=True) 253 | 254 | def verify_dataset_structure(self): 255 | yaml_path = Path(self.yaml_path) 256 | yaml_dir = yaml_path.parent 257 | 258 | with yaml_path.open('r') as f: 259 | yaml_content = yaml.safe_load(f) 260 | 261 | # Use paths from YAML content 262 | train_images_dir = yaml_dir / yaml_content.get('train', 'images/train') 263 | val_images_dir = yaml_dir / yaml_content.get('val', 'images/val') 264 | train_labels_dir = yaml_dir / 'labels' / 'train' # Labels directory corresponds to images 265 | val_labels_dir = yaml_dir / 'labels' / 'val' # Labels directory corresponds to images 266 | 267 | # Check both train and val directories 268 | missing_dirs = [] 269 | if not train_images_dir.exists(): 270 | missing_dirs.append(f"Training images directory: {train_images_dir}") 271 | if not train_labels_dir.exists(): 272 | missing_dirs.append(f"Training labels directory: {train_labels_dir}") 273 | if not val_images_dir.exists(): 274 | missing_dirs.append(f"Validation images directory: {val_images_dir}") 275 | if not val_labels_dir.exists(): 276 | missing_dirs.append(f"Validation labels directory: {val_labels_dir}") 277 | 278 | if missing_dirs: 279 | raise FileNotFoundError(f"The following directories were not found:\n" + "\n".join(missing_dirs)) 280 | 281 | print(f"Dataset structure verified:") 282 | print(f"Train images: {train_images_dir}") 283 | print(f"Train labels: {train_labels_dir}") 284 | print(f"Val images: {val_images_dir}") 285 | print(f"Val labels: {val_labels_dir}") 286 | 287 | def check_ultralytics_settings(self): 288 | settings_path = Path.home() / ".config" / "Ultralytics" / "settings.yaml" 289 | if settings_path.exists(): 290 | with settings_path.open('r') as f: 291 | settings = yaml.safe_load(f) 292 | print(f"Ultralytics settings: {settings}") 293 | else: 294 | print("Ultralytics settings file not found.") 295 | 296 | def stop_training_signal(self): 297 | self.stop_training = True 298 | self.progress_signal.emit("Stopping training...") 299 | 300 | def set_progress_callback(self, callback): 301 | self.progress_callback = callback 302 | 303 | 304 | def stop_training_callback(self, trainer): 305 | if getattr(self, 'stop_training', False): 306 | trainer.model.stop = True 307 | self.stop_training = False 308 | 309 | 310 | 311 | def on_epoch_end(self, trainer): 312 | # Get current epoch 313 | epoch = trainer.epoch if hasattr(trainer, 'epoch') else trainer.current_epoch 314 | 315 | # Get total epochs 316 | total_epochs = self.total_epochs # Use the value we set in train_model 317 | 318 | # Get loss 319 | if hasattr(trainer, 'metrics') and 'train/box_loss' in trainer.metrics: 320 | loss = trainer.metrics['train/box_loss'] 321 | elif hasattr(trainer, 'loss'): 322 | loss = trainer.loss 323 | else: 324 | loss = 0 # Default value if loss can't be found 325 | 326 | # Ensure loss is a number 327 | loss = float(loss) 328 | 329 | info = f"Epoch {epoch}/{total_epochs}, Loss: {loss:.4f}" 330 | self.epoch_info.append(info) 331 | 332 | display_text = f"Current Progress:\n" + "\n".join(self.epoch_info) 333 | if self.progress_callback: 334 | self.progress_callback(display_text) 335 | 336 | 337 | def save_model(self): 338 | if self.model is None: 339 | raise ValueError("No model to save. Please train a model first.") 340 | save_path, _ = QFileDialog.getSaveFileName(self.main_window, "Save YOLO Model", "", "YOLO Model (*.pt)") 341 | if save_path: 342 | self.model.export(save_path) 343 | return True 344 | return False 345 | 346 | def load_prediction_model(self, model_path, yaml_path): 347 | try: 348 | self.model = YOLO(model_path) 349 | with open(yaml_path, 'r') as f: 350 | self.prediction_yaml = yaml.safe_load(f) 351 | 352 | if 'names' not in self.prediction_yaml: 353 | raise ValueError("The YAML file does not contain a 'names' section for class names.") 354 | 355 | self.class_names = self.prediction_yaml['names'] 356 | print(f"Loaded class names: {self.class_names}") 357 | 358 | # Verify that the number of classes in the YAML matches the model 359 | if len(self.class_names) != len(self.model.names): 360 | mismatch_message = (f"Warning: Number of classes in YAML ({len(self.class_names)}) " 361 | f"does not match the model ({len(self.model.names)}). " 362 | "This may cause issues during prediction.") 363 | print(mismatch_message) 364 | return True, mismatch_message 365 | 366 | return True, None 367 | except Exception as e: 368 | error_message = f"Error loading model or YAML: {str(e)}" 369 | print(error_message) 370 | return False, error_message 371 | 372 | def predict(self, input_data): 373 | if self.model is None: 374 | raise ValueError("No model loaded. Please load a model first.") 375 | if isinstance(input_data, str): 376 | # It's a file path 377 | results = self.model(input_data, task='segment', conf=self.conf_threshold, save=False, show=False) 378 | elif isinstance(input_data, np.ndarray): 379 | # It's a numpy array 380 | results = self.model(input_data, task='segment', conf=self.conf_threshold, save=False, show=False) 381 | else: 382 | raise ValueError("Invalid input type. Expected file path or numpy array.") 383 | 384 | # Get the input size used for prediction and the original image size 385 | input_size = results[0].orig_shape 386 | original_size = results[0].orig_img.shape[:2] 387 | return results, input_size, original_size 388 | 389 | def set_conf_threshold(self, conf): 390 | self.conf_threshold = conf --------------------------------------------------------------------------------