├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Docs ├── GIMP_StableDiffusion_Usage.md ├── figs │ ├── OpenVINO_installation.png │ ├── OpenVINO_installation_archives.png │ ├── VC_runtime_close.png │ ├── VC_runtime_intallation.png │ ├── VC_runtime_processing.png │ ├── add.png │ ├── download_SD1.5.png │ ├── gimp_create_image.png │ ├── gimp_execute_SD.png │ ├── gimp_installation_click_continue.png │ ├── gimp_installation_finish.png │ ├── gimp_installation_install.png │ ├── gimp_installation_install_for_all.png │ ├── gimp_installation_processing.png │ ├── gimp_installation_select_english.png │ ├── gimp_launch.png │ ├── gimp_load_model.png │ ├── gimp_ok.png │ ├── gimp_sd_ui.png │ ├── git_installation_close.png │ ├── git_installation_setup_1.png │ ├── git_installation_setup_2.png │ ├── git_installation_setup_3.png │ ├── model_downloding_SD1.5.png │ ├── python_installation_close.png │ ├── python_installation_processing.png │ ├── python_installation_setup.png │ ├── sd3_med_directory.png │ ├── sd3_med_turbo_gimp.png │ ├── stable-diffusion_3.png │ ├── stable-diffusion_model1.png │ ├── stable-diffusion_model2.png │ ├── stable-diffusion_model3.png │ ├── stable-diffusion_model3_1.png │ ├── stable-diffusion_model4.png │ ├── standalone1.png │ └── standalone2.png ├── linux_install_guide.md ├── stable-diffusion-v3.md └── user_guide_for_windows_users.md ├── LICENSE.md ├── MANIFEST.in ├── README.md ├── gifs ├── controlnet-openpose.png ├── semantic-segmentation.webp ├── stable-diffusion.png ├── stable-diffusion.webp ├── stable-diffusion1.png ├── stable-diffusion2.png └── super-res.webp ├── gimp-screenshot.PNG ├── gimpopenvino ├── __init__.py ├── install_utils.py └── plugins │ ├── __init__.py │ ├── openvino_utils │ ├── __init__.py │ ├── images │ │ ├── __init__.py │ │ ├── error_icon.png │ │ └── plugin_logo.png │ ├── model_management_window.py │ ├── plugin_utils.py │ └── tools │ │ ├── __init__.py │ │ ├── complete_install.py │ │ ├── model_management_server.py │ │ ├── model_manager.py │ │ ├── openvino_common │ │ ├── __init__.py │ │ ├── adapters │ │ │ ├── __init__.py │ │ │ ├── model_adapter.py │ │ │ ├── openvino_adapter.py │ │ │ └── utils.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── image_model.py │ │ │ ├── model.py │ │ │ ├── types.py │ │ │ └── utils.py │ │ ├── models_ov │ │ │ ├── __init__.py │ │ │ ├── controlnet_canny_edge.py │ │ │ ├── controlnet_cannyedge_advanced.py │ │ │ ├── controlnet_openpose.py │ │ │ ├── controlnet_openpose_advanced.py │ │ │ ├── controlnet_scribble.py │ │ │ ├── segmentation.py │ │ │ ├── stable_diffusion_3.py │ │ │ ├── stable_diffusion_engine.py │ │ │ ├── stable_diffusion_engine_genai.py │ │ │ ├── stable_diffusion_engine_inpainting.py │ │ │ ├── stable_diffusion_engine_inpainting_advanced.py │ │ │ └── stable_diffusion_engine_inpainting_genai.py │ │ ├── performance_metrics.py │ │ ├── pipelines │ │ │ ├── __init__.py │ │ │ └── async_pipeline.py │ │ ├── semseg_run_ov.py │ │ ├── superes_run_ov.py │ │ └── tokens_bert.py │ │ ├── semseg_ov.py │ │ ├── stable_diffusion_ov_server.py │ │ ├── superresolution_ov.py │ │ ├── test_client.py │ │ └── tools_utils.py │ ├── semseg_ov │ ├── __init__.py │ └── semseg_ov.py │ ├── stable_diffusion_ov │ ├── __init__.py │ └── stable_diffusion_ov.py │ └── superresolution_ov │ ├── __init__.py │ └── superresolution_ov.py ├── install.bat ├── install.ps1 ├── install.sh ├── model_setup.py ├── requirements.txt ├── sampleinput ├── car.jpg ├── haze.png └── img.png ├── screenshot.png ├── security.md ├── setup.py ├── testscases ├── StableDiffusion │ ├── README.md │ └── stable_diffusion_engine_tc.py ├── output │ ├── interpolateframes │ │ ├── img0.png │ │ ├── img1.png │ │ ├── img10.png │ │ ├── img11.png │ │ ├── img12.png │ │ ├── img13.png │ │ ├── img14.png │ │ ├── img15.png │ │ ├── img16.png │ │ ├── img2.png │ │ ├── img3.png │ │ ├── img4.png │ │ ├── img5.png │ │ ├── img6.png │ │ ├── img7.png │ │ ├── img8.png │ │ └── img9.png │ ├── tmp-canny.jpg │ ├── tmp-deblur.jpg │ ├── tmp-deepcolor.jpg │ ├── tmp-dehaze.jpg │ ├── tmp-denoise.jpg │ ├── tmp-depth.png │ ├── tmp-edge.jpg │ ├── tmp-enlighten.jpg │ ├── tmp-inpaint.png │ ├── tmp-kmeans.jpg │ ├── tmp-matting.png │ ├── tmp-object-detect.png │ ├── tmp-parseface.png │ ├── tmp-semseg.png │ └── tmp-super.png └── sampleinput │ ├── alpha.png │ ├── blur.jpg │ ├── car.jpg │ ├── chocolates.jpg │ ├── examples │ ├── celeba │ │ ├── images │ │ │ ├── celeba_01.png │ │ │ ├── celeba_02.png │ │ │ ├── celeba_03.png │ │ │ ├── celeba_04.png │ │ │ └── celeba_05.png │ │ └── masks │ │ │ ├── celeba_01.png │ │ │ ├── celeba_02.png │ │ │ ├── celeba_03.png │ │ │ ├── celeba_04.png │ │ │ └── celeba_05.png │ ├── places2 │ │ ├── images │ │ │ ├── places2_01.png │ │ │ ├── places2_02.png │ │ │ ├── places2_03.png │ │ │ ├── places2_04.png │ │ │ ├── places2_05.png │ │ │ └── places2_06.png │ │ └── masks │ │ │ ├── places2_01.png │ │ │ ├── places2_02.png │ │ │ ├── places2_03.png │ │ │ ├── places2_04.png │ │ │ ├── places2_05.png │ │ │ └── places2_06.png │ └── psv │ │ ├── images │ │ ├── psv_01.png │ │ ├── psv_02.png │ │ ├── psv_03.png │ │ ├── psv_04.png │ │ └── psv_05.png │ │ └── masks │ │ ├── psv_01.png │ │ ├── psv_02.png │ │ ├── psv_03.png │ │ ├── psv_04.png │ │ └── psv_05.png │ ├── face.png │ ├── haze.png │ ├── im1.png │ ├── im2.png │ ├── img.png │ └── portrait.jpg ├── thirdparty ├── LICENSE.md └── LICENSE_MODEL.md └── weights ├── semseg-ov ├── deeplabv3.bin ├── deeplabv3.mapping ├── deeplabv3.xml ├── semantic-segmentation-adas-0001.bin └── semantic-segmentation-adas-0001.xml └── superresolution-ov ├── realesrgan.bin ├── realesrgan.mapping ├── realesrgan.xml ├── single-image-super-resolution-1032.bin ├── single-image-super-resolution-1032.xml ├── single-image-super-resolution-1033.bin └── single-image-super-resolution-1033.xml /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, caste, color, religion, or sexual 10 | identity and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the overall 26 | community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or advances of 31 | any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email address, 35 | without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | CommunityCodeOfConduct AT intel DOT com. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series of 86 | actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or permanent 93 | ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within the 113 | community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.1, available at 119 | [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. 120 | 121 | Community Impact Guidelines were inspired by 122 | [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. 123 | 124 | For answers to common questions about this code of conduct, see the FAQ at 125 | [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at 126 | [https://www.contributor-covenant.org/translations][translations]. 127 | 128 | [homepage]: https://www.contributor-covenant.org 129 | [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html 130 | [Mozilla CoC]: https://github.com/mozilla/diversity 131 | [FAQ]: https://www.contributor-covenant.org/faq 132 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | ### License 4 | 5 | openvino-ai-plugins-gimp is licensed under the terms in [Apache 2.0] https://github.com/intel/openvino-ai-plugins-gimp/LICENSE.md. By contributing to the project, you agree to the license and copyright terms therein and release your contribution under these terms. 6 | 7 | ### Sign your work 8 | 9 | Please use the sign-off line at the end of the patch. Your signature certifies that you wrote the patch or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you can certify 10 | the below (from [developercertificate.org](http://developercertificate.org/)): 11 | 12 | ``` 13 | Developer Certificate of Origin 14 | Version 1.1 15 | 16 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 17 | 660 York Street, Suite 102, 18 | San Francisco, CA 94110 USA 19 | 20 | Everyone is permitted to copy and distribute verbatim copies of this 21 | license document, but changing it is not allowed. 22 | 23 | Developer's Certificate of Origin 1.1 24 | 25 | By making a contribution to this project, I certify that: 26 | 27 | (a) The contribution was created in whole or in part by me and I 28 | have the right to submit it under the open source license 29 | indicated in the file; or 30 | 31 | (b) The contribution is based upon previous work that, to the best 32 | of my knowledge, is covered under an appropriate open source 33 | license and I have the right under that license to submit that 34 | work with modifications, whether created in whole or in part 35 | by me, under the same open source license (unless I am 36 | permitted to submit under a different license), as indicated 37 | in the file; or 38 | 39 | (c) The contribution was provided directly to me by some other 40 | person who certified (a), (b) or (c) and I have not modified 41 | it. 42 | 43 | (d) I understand and agree that this project and the contribution 44 | are public and that a record of the contribution (including all 45 | personal information I submit with it, including my sign-off) is 46 | maintained indefinitely and may be redistributed consistent with 47 | this project or the open source license(s) involved. 48 | ``` 49 | 50 | Then you just add a line to every git commit message: 51 | 52 | Signed-off-by: Joe Smith 53 | 54 | Use your real name (sorry, no pseudonyms or anonymous contributions.) 55 | 56 | If you set your `user.name` and `user.email` git configs, you can sign your 57 | commit automatically with `git commit -s`. 58 | -------------------------------------------------------------------------------- /Docs/GIMP_StableDiffusion_Usage.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Using Stable Diffusion in GIMP 4 | 5 | After installing GIMP and the GIMP AI plugins with OpenVINO™, you can now start GIMP application from Start Menu on Windows, or from the Application Launcher in Ubuntu. 6 | 7 | >Notes: This section runs `SD-1.5 Square (512x512)` as an example. 8 | 9 | ## Download & Install Stable Diffusion Models 10 | - Once GIMP application is started, Click "__Layer__" \> "__OpenVINO-AI-Plugins__" \> "__Stable diffusion__". 11 | - Once Stable Diffusion Plugin UI opens up, click on Model Button on the top left corner as show in the below picture: 12 | 13 | ![](figs/stable-diffusion_model1.png) 14 | - This now opens the Stable Diffusion Model Management window from where you can install the required models by clicking on "Install" button next to the model name. 15 | 16 | ![](figs/stable-diffusion_model2.png) 17 | 18 | - Download Stable Diffusion 1.5 Square as an instance. 19 | 20 | ![](figs/stable-diffusion_model3.png) 21 | 22 | - You can select multiple models at once, and the Model Manager will queue them for installation. 23 | 24 | ![](figs/stable-diffusion_model3_1.png) 25 | 26 | - Once installation process is completed, you will see the "Install" button is updated to "Installed" and you will find the models in the drop-down list 27 | 28 | ![](figs/stable-diffusion_model4.png) 29 | 30 | >**Notes:** 31 | > - The downloaded models include INT8, INT8A16, and FP16 precision 32 | > - Weights is saved at `C:\Users\\openvino-ai-plugins-gimp\weights`. 33 | 34 | 35 | 36 | ## Execute Stable-Diffusion – Stable Diffusion 1.5 INT8A16 37 | 38 | Please follow below steps to execute Stable-Diffusion - Stable Diffusion 1.5 INT8A16. For other features, please refer to [OpenVINO™ Image Generator Plugin with Stable Diffusion](https://github.com/intel/openvino-ai-plugins-gimp/tree/main?tab=readme-ov-file#openvino-image-generator-plugin-with-stable-diffusion) section. 39 | 40 | - Following previous section to launch GIMP 41 | 42 | ![](figs/gimp_launch.png) 43 | 44 | - Click "__Layer__" \> "__OpenVINO-AI-Plugins__" \> "__Stable diffusion__". 45 | 46 | ![](figs/gimp_execute_SD.png) 47 | 48 | - Change the selected fields to set "Stable Diffusion" configuration and choose the desired "Power Mode" you want, then click "Load Models" to the load models into the target devices based on your power mode selection. 49 | 50 | ![](figs/gimp_load_model.png) 51 | 52 | > **Notes:** It takes time in this step. 53 | 54 | - Finally, you can optionally enter any text or changes the parameters in the selected field, then click "Generate" to generate image. 55 | 56 | ![](figs/gimp_sd_ui.png) 57 | 58 | >**Notes:** 59 | > - Only Intel® devices are supported (CPU, NPU, integraged GPU, Intel® discrete GPUs) 60 | > - Power Mode is now enabled- Users can select between the following options depending on their use case: 61 | > - Best Performance 62 | > - Best Power Efficiency 63 | > - Balanced 64 | > - If you wish to generate more images in single run, please modify the Number of Images section. 65 | 66 | -------------------------------------------------------------------------------- /Docs/figs/OpenVINO_installation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/OpenVINO_installation.png -------------------------------------------------------------------------------- /Docs/figs/OpenVINO_installation_archives.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/OpenVINO_installation_archives.png -------------------------------------------------------------------------------- /Docs/figs/VC_runtime_close.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/VC_runtime_close.png -------------------------------------------------------------------------------- /Docs/figs/VC_runtime_intallation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/VC_runtime_intallation.png -------------------------------------------------------------------------------- /Docs/figs/VC_runtime_processing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/VC_runtime_processing.png -------------------------------------------------------------------------------- /Docs/figs/add.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/add.png -------------------------------------------------------------------------------- /Docs/figs/download_SD1.5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/download_SD1.5.png -------------------------------------------------------------------------------- /Docs/figs/gimp_create_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/gimp_create_image.png -------------------------------------------------------------------------------- /Docs/figs/gimp_execute_SD.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/gimp_execute_SD.png -------------------------------------------------------------------------------- /Docs/figs/gimp_installation_click_continue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/gimp_installation_click_continue.png -------------------------------------------------------------------------------- /Docs/figs/gimp_installation_finish.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/gimp_installation_finish.png -------------------------------------------------------------------------------- /Docs/figs/gimp_installation_install.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/gimp_installation_install.png -------------------------------------------------------------------------------- /Docs/figs/gimp_installation_install_for_all.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/gimp_installation_install_for_all.png -------------------------------------------------------------------------------- /Docs/figs/gimp_installation_processing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/gimp_installation_processing.png -------------------------------------------------------------------------------- /Docs/figs/gimp_installation_select_english.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/gimp_installation_select_english.png -------------------------------------------------------------------------------- /Docs/figs/gimp_launch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/gimp_launch.png -------------------------------------------------------------------------------- /Docs/figs/gimp_load_model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/gimp_load_model.png -------------------------------------------------------------------------------- /Docs/figs/gimp_ok.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/gimp_ok.png -------------------------------------------------------------------------------- /Docs/figs/gimp_sd_ui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/gimp_sd_ui.png -------------------------------------------------------------------------------- /Docs/figs/git_installation_close.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/git_installation_close.png -------------------------------------------------------------------------------- /Docs/figs/git_installation_setup_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/git_installation_setup_1.png -------------------------------------------------------------------------------- /Docs/figs/git_installation_setup_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/git_installation_setup_2.png -------------------------------------------------------------------------------- /Docs/figs/git_installation_setup_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/git_installation_setup_3.png -------------------------------------------------------------------------------- /Docs/figs/model_downloding_SD1.5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/model_downloding_SD1.5.png -------------------------------------------------------------------------------- /Docs/figs/python_installation_close.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/python_installation_close.png -------------------------------------------------------------------------------- /Docs/figs/python_installation_processing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/python_installation_processing.png -------------------------------------------------------------------------------- /Docs/figs/python_installation_setup.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/python_installation_setup.png -------------------------------------------------------------------------------- /Docs/figs/sd3_med_directory.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/sd3_med_directory.png -------------------------------------------------------------------------------- /Docs/figs/sd3_med_turbo_gimp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/sd3_med_turbo_gimp.png -------------------------------------------------------------------------------- /Docs/figs/stable-diffusion_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/stable-diffusion_3.png -------------------------------------------------------------------------------- /Docs/figs/stable-diffusion_model1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/stable-diffusion_model1.png -------------------------------------------------------------------------------- /Docs/figs/stable-diffusion_model2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/stable-diffusion_model2.png -------------------------------------------------------------------------------- /Docs/figs/stable-diffusion_model3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/stable-diffusion_model3.png -------------------------------------------------------------------------------- /Docs/figs/stable-diffusion_model3_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/stable-diffusion_model3_1.png -------------------------------------------------------------------------------- /Docs/figs/stable-diffusion_model4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/stable-diffusion_model4.png -------------------------------------------------------------------------------- /Docs/figs/standalone1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/standalone1.png -------------------------------------------------------------------------------- /Docs/figs/standalone2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/Docs/figs/standalone2.png -------------------------------------------------------------------------------- /Docs/linux_install_guide.md: -------------------------------------------------------------------------------- 1 | There are two methods for installing the plugins in Linux: 2 | 3 | * Install the plugins from the Snap Store 4 | * Build and install GIMP and the plugins manually 5 | 6 | # Snap Installation 7 | 8 | For Linux distributions supporting snaps (e.g. Ubuntu), the plugins can be installed along with the GIMP snap with these commands: 9 | 10 | ``` 11 | sudo snap install gimp --channel preview/stable 12 | sudo snap install intel-npu-driver --beta # for NPU support 13 | sudo snap install openvino-toolkit-2404 --beta 14 | sudo snap install openvino-ai-plugins-gimp --beta 15 | ``` 16 | 17 | More details can be found [here](https://github.com/snapcrafters/gimp/tree/preview?tab=readme-ov-file#openvino-ai-plugins). 18 | 19 | # Building and installing the plugins manually 20 | 21 | ## Build GIMP 22 | 1. Create a directory where you will download and build all of the sources for GIMP 23 | ```sh 24 | cd $HOME 25 | mkdir ./GIMP 26 | cd ./GIMP 27 | ``` 28 | 2. Install Dependencies 29 | ```sh 30 | sudo apt update 31 | sudo apt install -y cmake libgflags-dev patchelf python3-pip gettext git git-lfs build-essential meson ninja-build autoconf libcairo2-dev libxt-dev libgdk-pixbuf-2.0-dev libgexiv2-dev libgtk-3-dev libmypaint-dev mypaint-brushes libbz2-dev libatk1.0-dev libgirepository1.0-dev libx11-xcb-dev libwmf-dev libxcb-glx0-dev libxcb-dri2-0-dev libxxf86vm-dev valgrind libappstream-glib-dev libpugixml-dev libxmu-dev libpoppler-glib-dev xsltproc librsvg2-dev libopencv-dev libgirepository-2.0-dev python3-venv 32 | ``` 33 | 3. Clone, build, and install babl 34 | ```sh 35 | git clone https://gitlab.gnome.org/GNOME/babl 36 | cd babl 37 | git checkout tags/BABL_0_1_114 38 | meson _build 39 | ninja -C _build 40 | sudo ninja -C _build install 41 | cd .. 42 | ``` 43 | 4. Clone, build, and install gegl 44 | ```sh 45 | git clone https://gitlab.gnome.org/GNOME/gegl 46 | cd gegl 47 | git checkout tags/GEGL_0_4_62 48 | meson _build 49 | ninja -C _build 50 | sudo ninja -C _build install 51 | cd .. 52 | ``` 53 | 54 | 5. Clone, build, and install Gimp 55 | ```sh 56 | git clone https://gitlab.gnome.org/GNOME/gimp 57 | cd gimp 58 | git checkout tags/GIMP_3_0_4 59 | git submodule update --init 60 | export GI_TYPELIB_PATH=/usr/lib/x86_64-linux-gnu/girepository-1.0:/usr/local/lib/x86_64-linux-gnu/girepository-1.0 61 | sudo ldconfig 62 | meson _build 63 | ninja -C _build 64 | sudo ninja -C _build install 65 | cd .. 66 | ``` 67 | ## Install Plugins 68 | 1. Clone this repo 69 | ```sh 70 | cd $HOME/GIMP 71 | git clone https://github.com/intel/openvino-ai-plugins-gimp.git 72 | ``` 73 | 74 | 2. Run install script, and download models. The following steps will create the virtual environment "gimpenv3" and install all required packages. 75 | ```sh 76 | chmod +x openvino-ai-plugins-gimp/install.sh 77 | ./openvino-ai-plugins-gimp/install.sh 78 | ``` 79 | 80 | ## Verify Installation 81 | Start GIMP, ensuring to setup the environment variables correctly, and you should see 'OpenVINO-AI-Plugins' show up in 'Layer' menu. 82 | ```sh 83 | export GI_TYPELIB_PATH=/usr/lib/x86_64-linux-gnu/girepository-1.0:/usr/local/lib/x86_64-linux-gnu/girepository-1.0 84 | export LD_LIBRARY_PATH=/usr/local/lib/x86_64-linux-gnu 85 | gimp-3 86 | ``` 87 | 88 | 89 | 90 | -------------------------------------------------------------------------------- /Docs/stable-diffusion-v3.md: -------------------------------------------------------------------------------- 1 | # Image generation with Stable Diffusion 3.0 Medium & Stable Diffusion 3.5 Medium Turbo 2 | 3 | ## Stable Diffusion 3.0 Medium 4 | 5 | Stable Diffusion 3 Medium is a Multimodal Diffusion Transformer (MMDiT) text-to-image model that features greatly improved performance in image quality, typography, complex prompt understanding, and resource-efficiency. 6 | 7 | 8 | More details about model can be found in [model card](https://huggingface.co/stabilityai/stable-diffusion-3-medium-diffusers), [research paper](https://stability.ai/news/stable-diffusion-3-research-paper) and [Stability.AI blog post](https://stability.ai/news/stable-diffusion-3-medium). 9 | 10 | ## Stable Diffusion 3.5 Turbo 11 | 12 | TensorArt Stable Diffusion 3.5 Medium Turbo (SD3.5M Turbo) is a high-performance text-to-image model distilled from StabilityAI's stable-diffusion-3.5-medium. This model emphasizes stability and efficiency, making it suitable for a wide range of art styles and creative expression scenarios. 13 | More details about model can be found in [model card](https://huggingface.co/tensorart/stable-diffusion-3.5-medium-turbo) 14 | 15 | 16 | 17 | ## Enabling SD3 Medium in GIMP 18 | ### Converting the Models Using optimum-cli 19 | 20 | ``` 21 | powershell -Command "Invoke-WebRequest -Uri 'https://raw.githubusercontent.com/openvinotoolkit/openvino.genai/refs/heads/master/samples/deployment-requirements.txt' -OutFile deployment-requirements.txt" 22 | powershell -Command "Invoke-WebRequest -Uri 'https://raw.githubusercontent.com/openvinotoolkit/openvino.genai/refs/heads/master/samples/export-requirements.txt' -OutFile export-requirements.txt" 23 | powershell -Command "Invoke-WebRequest -Uri 'https://raw.githubusercontent.com/openvinotoolkit/openvino.genai/refs/heads/master/samples/requirements.txt' -OutFile requirements.txt" 24 | ``` 25 | 26 | Create & activate a python virtual env: 27 | ``` 28 | python -m venv sd_env 29 | sd_env\Scripts\activate 30 | ``` 31 | Install pip packages: 32 | ``` 33 | pip install -r requirements.txt 34 | ``` 35 | 36 | Note: For Stable Diffusion 3, you may need to log into Hugging Face using a token that has access to SD3 gated repo: 37 | ``` 38 | huggingface-cli login --token 39 | ``` 40 | 41 | ### Stable Diffusion 3 Medium Diffusers (FP16): 42 | 43 | ``` 44 | optimum-cli export openvino --model stabilityai/stable-diffusion-3-medium-diffusers --task stable-diffusion --weight-format fp16 %userprofile%\openvino-ai-plugins-gimp\weights\stable-diffusion-ov\stable-diffusion-3.0-medium\square_diffusers 45 | ``` 46 | 47 | ### Stable Diffusion 3.5 Medium Turbo (FP16): 48 | 49 | ``` 50 | optimum-cli export openvino --model tensorart/stable-diffusion-3.5-medium-turbo --task stable-diffusion --weight-format fp16 %userprofile%\openvino-ai-plugins-gimp\weights\stable-diffusion-ov\stable-diffusion-3.5-medium\square_turbo 51 | ``` 52 | 53 | 54 | ### Model Installation 55 | 56 | After the above models are created, create a file called `install_info.json` inside the square_turbo & square_diffusers directory. This will ensure that the GIMP plugin will recognize this as a valid model. It should contain the following text: 57 | ``` 58 | { 59 | "hf_repo_id": "none", 60 | "hf_commit_id": "none" 61 | } 62 | ``` 63 | 64 | Verify that the copied files and directory structure looks as follows: 65 | 66 | ![](figs/sd3_med_directory.png) 67 | 68 | ### Running with GIMP 69 | After completing model installation steps, SD3.5 Medium - Diffuser & Turbo will now be available in the Stable Diffusion UI. 70 | Note that for SD3.5 Medium Turbo - Select Guidance Scale between 0.0 - 1.0, as anything greater than 1.0 will result in a failure. Also, with the Turbo version one can generate valid images in as few as 4 iterations. 71 | See the screenshot below: 72 | ![](figs/sd3_med_turbo_gimp.png) 73 | 74 | 75 | 76 | 77 | -------------------------------------------------------------------------------- /Docs/user_guide_for_windows_users.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # User guide for Windows Users 4 | 5 | ## Install GIMP AI plugins with OpenVINO™ 6 | 7 | >__Notes:__ 8 | >This set of plugins only supports Intel® devices. Non-Intel GPUs and NPUs are not supported. 9 | >NPU is supported on all Intel® Core™ Ultra series processors. 10 | 11 | ### Pre-requisite for execution on Intel® NPU 12 | 13 | - Hardware: 14 | - Intel® Core™ Ultra platform 15 | - 16GB system memory as minimum requirement 16 | - internet connection is required for installation 17 | - Driver: 18 | - Intel® NPU driver: Use the most recent driver you have available. 19 | - Software and Package: 20 | - git 21 | - python 3.9-3.12 22 | - Note: This document will use python 3.9.13 as an example. 23 | - VC runtime 24 | - [GIMP 3.0.4](https://download.gimp.org/gimp/v3.0/windows/gimp-3.0.4-setup.exe) 25 | - [GIMP AI plugins with OpenVINO™ Backend](https://github.com/intel/openvino-ai-plugins-gimp) from Github. 26 | 27 | 28 | ### Install Python 29 | 30 | >__Notes:__ Use Python `3.9.13` as an example. 31 | 32 | Please download the prebuilt Windows x64 package from [link](https://www.python.org/ftp/python/3.9.13/python-3.9.13-amd64.exe). After downloading, please following below steps to install the Python package. 33 | 34 | - Check "Add Python 3.9 to PATH", then click "Install Now" 35 | 36 | ![](figs/python_installation_setup.png) 37 | ![](figs/python_installation_processing.png) 38 | 39 | 40 | - Click "Close" 41 | 42 | ![](figs/python_installation_close.png) 43 | 44 | 45 | ### Install Git 46 | 47 | >__Notes:__ Use Git `2.43.0` as an example. 48 | 49 | Please download the prebuilt Windows x64 package from [link](https://github.com/git-for-windows/git/releases/download/v2.43.0.windows.1/Git-2.43.0-64-bit.exe). After downloading, please following below steps to install the Git. 50 | 51 | - Click "Next", then click "Install" 52 | 53 | ![](figs/git_installation_setup_1.png) 54 | 55 | ![](figs/git_installation_setup_2.png) 56 | 57 | ![](figs/git_installation_setup_3.png) 58 | 59 | - Check out "View Release Notes", then click "Close" 60 | 61 | ![](figs/git_installation_close.png) 62 | 63 | 64 | ### Install Microsoft Visual C++ Redistributable 65 | 66 | Please download the latest Visual C++ Redistributable package from MSFT [site](https://aka.ms/vs/17/release/vc_redist.x64.exe). Then, install this package. 67 | 68 | ![](figs/VC_runtime_intallation.png) 69 | 70 | ![](figs/VC_runtime_processing.png) 71 | 72 | ![](figs/VC_runtime_close.png) 73 | 74 | 75 | ### Install GIMP 3.0.4 76 | 77 | Please download [GIMP 3.0.4](https://download.gimp.org/gimp/v3.0/windows/gimp-3.0.4-setup.exe) and follow below steps to install GIMP. 78 | 79 | - Click "Install for all users (recommended)" 80 | 81 | ![](figs/gimp_installation_install_for_all.png) 82 | 83 | - Select English and click "OK" 84 | 85 | ![](figs/gimp_installation_select_english.png) 86 | 87 | - Click "Continue" 88 | 89 | ![](figs/gimp_installation_click_continue.png) 90 | 91 | - Click "Install" 92 | 93 | ![](figs/gimp_installation_install.png) 94 | ![](figs/gimp_installation_processing.png) 95 | 96 | - Click "Finish" to complete the installation of GIMP 97 | 98 | ![](figs/gimp_installation_finish.png) 99 | 100 | 101 | 102 | ### Install GIMP AI plugins 103 | - Open command prompt and download the latest release from Github by git with below instruction. 104 | 105 | ```sh 106 | cd C:\Users\Public\ 107 | mkdir GIMP 108 | cd GIMP 109 | git clone https://github.com/intel/openvino-ai-plugins-gimp.git 110 | ``` 111 | > __Notes__: 112 | > This is an example that will create a `GIMP` folder in `C:\Users\Public\`, and then download the package to `C:\Users\Public\GIMP`, you still can define where to place this package by yourself. 113 | 114 | - Use same command prompt that used in previous steps and follow the command below to install it. 115 | 116 | ```sh 117 | openvino-ai-plugins-gimp\install.bat 118 | ``` 119 | 120 | This step will take time for downloading necessary packages. 121 | 122 | - After creating python environment, gimpenv3, and downloaded necessary packages, you can now launch GIMP application. See the [Usage Guide](GIMP_StableDiffusion_Usage.md) for running Stable Diffusion example. -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include gimpopenvino/plugins/openvino_utils/images/plugin_logo.png 2 | include gimpopenvino/plugins/openvino_utils/images/error_icon.png 3 | include gimpopenvino/plugins/openvino_utils/tools/gimp_openvino_config.json 4 | 5 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenVINO™ AI Plugins for GIMP [![Mentioned in Awesome OpenVINO](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/openvinotoolkit/awesome-openvino) 2 | 3 |
Dedicated for GIMP 3, Python 3 and OpenVINO™.
:star: :star: :star: :star: are welcome.
4 | 5 | ## Current list of plugins: 6 | 1. Super-Resolution 7 | 2. Semantic-Segmentation 8 | 3. Stable-Diffusion 9 | * SD 1.5 10 | * SD 1.5 LCM 11 | * SD 1.5 Inpainting 12 | * SD 1.5 Controlnet-OpenPose 13 | * SD 1.5 Controlnet-CannyEdge 14 | * SD 1.5 Controlnet-Scribble 15 | * SD 3.0 Medium 16 | * SD 3.5 Medium Turbo 17 | * SDXL (Turbo, Base) 18 | 19 | 20 | # Objectives 21 | 1. Provides a set of OpenVINO™ based plugins that add AI features to GIMP. 22 | 2. Serve as a reference code for how to make use of OpenVino in GIMP application for inferencing on Intel® Devices. 23 | 3. Add AI to routine image editing workflows. 24 | 25 | # Contribution 26 | Welcome people interested in contribution! 27 | Please raise a PR for any new features, modifications, or bug fixes. 28 | 29 | # Use with GIMP 30 | ![gimp-screenshot](gimp-screenshot.PNG) 31 | 32 | ## Installation Steps 33 | ### Windows 34 | For detailed steps and tips please refer [User guide for Windows](./Docs/user_guide_for_windows_users.md). 35 | 36 | ### Linux 37 | This plugin is tested on Ubuntu 24.04. Building GIMP from source is recommended. Flatpak is not supported.
38 | For detailed steps and tips please refer to [Linux Installation Guide](./Docs/linux_install_guide.md). 39 | 40 | # OpenVINO™ Image Generator Plugin with Stable Diffusion 41 | #### Power Mode is enabled for int8 SD models and systems that has a "NPU" only 42 | #### A. Prompt to Image 43 | 1. You can start with a blank canvas or create/choose a layer 44 | 2. Select Stable Diffusion from the drop down list in layers -> OpenVINO-AI-Plugins 45 | 3. Choose the desired Model and Power Mode from the drop down list. 46 | 4. Click on "Load Models" to compile & load the model on the device. Wait for it to complete. Please note that you need to perform this step only if you change the model or device or both. For any subsequent runs just click "Run Inference" 47 | 5. Enter prompt and other parameters 48 | 6. Click on “Generate”. Wait for the total inference steps to get completed. 49 | 50 | #### B. Image to Image 51 | 1. Create or choose a layer or open an image 52 | 2. Follow steps 2,3,4,5 from section A. 53 | 3. Select "Use Initial Image" 54 | 4. By default the opened image in canvas will be used as initial image to the model. You can also select a different image by browsing from files. 55 | 5. Click on “Generate”. Wait for the total inference steps to get completed. 56 | 57 | #### C. Stable-Diffusion-1.5 Inpainting - Make sure to download and convert the model during install process. 58 | 1. Choose a layer or Open an image of size 512x512. (Currently works best with this resolution) 59 | 2. Use "Free select tool" to select the area in your image that you wish to change. 60 | 3. Right click on your image and click on "Add layer mask". Then choose "Selection" in "Initialize layer Mask to". This should create a mask with your selection. 61 | 4. Follow steps 2,3,4,5 from section A. Please note that you will only see "sd_1.5_Inpainting" in model options if you added a mask layer to your image. 62 | 5. Click on “Generate”. Wait for the total inference steps to get completed. 63 | 64 | #### D. Stable-Diffusion-1.5 Controlnet-Openpose - Make sure to download and convert the model during install process. 65 | 1. Open an image with some pose that you want to see in new image. 66 | 2. Select Stable Diffusion from the drop down list in layers -> OpenVINO-AI-Plugins 67 | 3. Choose the controlnet_openpose model and device from the drop down list. 68 | 4. Make sure to select -- "Use Initial Image" option from the GUI. If not selected then it will fail. 69 | 5. Follow steps 4,5 from section A. 70 | 6. Click on “Generate”. Wait for the total inference steps to get completed. 71 | 72 | ![](gifs/controlnet-openpose.png) 73 | 74 | 75 | #### E. Stable-Diffusion-1.5 Controlnet-CannyEdge - Make sure to download and convert the model during install process. 76 | 1. Open an image that you want to use for generating the new image. 77 | 2. Select Stable Diffusion from the drop down list in layers -> OpenVINO-AI-Plugins 78 | 3. Choose the controlnet_canny model and device from the drop down list. 79 | 4. Make sure to select -- "Use Initial Image" option from the GUI. If not selected then it will fail. 80 | 5. Follow steps 4,5 from section A. 81 | 6. Click on “Generate”. Wait for the total inference steps to get completed. 82 | 83 | 84 | #### F. Stable-Diffusion-1.5 Controlnet-Scribble - Make sure to download and convert the model during install process. 85 | 1. Open an image that you want to use for generating the new image. 86 | 2. Select Stable Diffusion from the drop down list in layers -> OpenVINO-AI-Plugins 87 | 3. Choose the controlnet_scribble model and device from the drop down list. 88 | 4. Make sure to select -- "Use Initial Image" option from the GUI. If not selected then it will fail. 89 | 5. Follow steps 4,5 from section A. 90 | 6. Click on “Generate”. Wait for the total inference steps to get completed. 91 | 92 | #### G. Stable-Diffusion-3.0 Medium & Stable-Diffusion-3.5 Medium Turbo 93 | Please see [Stable Diffusion 3 User Guilde](./Docs/stable-diffusion-v3.md) for details 94 | _With Advanced Setting and Power Mode_ 95 | For SD3.5 Medium Turbo - Select Guidance Scale between 0.0 - 1.0, as anything greater than 1.0 will result in a failure. Also, with the Turbo version one can generate valid images in as few as 4 iterations.
96 | ⚠️ **Disclaimer** 97 | The very first time you do "Load Models", it may take a few minutes. Subsequent runs will be much faster once the model is cached. 98 | 99 | #### H. SDXL 100 | For SDXL Turbo - Please make sure to Select Guidance Scale between 0.0 - 1.0. Also, for number of inference steps use between 2-5 for best result.
101 | ⚠️ **Disclaimer** 102 | The very first time you do "Load Models", it may take a few minutes. Subsequent runs will be much faster once the model is cached. 103 | 104 | ![](gifs/stable-diffusion2.png) 105 | 106 | _Note that the screenshots below are based on the previous UI_ 107 | 108 | ### OpenVINO™ Semantic Segmentation Plugin 109 | ![](gifs/semantic-segmentation.webp) 110 | 111 | ### OpenVINO™ Super Resolution Plugin 112 | ![](gifs/super-res.webp) 113 | 114 | 115 | # Acknowledgements 116 | * Plugin architecture inspired from GIMP-ML - https://github.com/kritiksoman/GIMP-ML/tree/GIMP3-ML 117 | * Stable Diffusion Engine - https://github.com/bes-dev/stable_diffusion.openvino 118 | 119 | # License 120 | Apache 2.0 121 | 122 | # Disclaimer 123 | Stable Diffusion’s data model is governed by the Creative ML Open Rail M license, which is not an open source license. 124 | https://github.com/CompVis/stable-diffusion. Users are responsible for their own assessment whether their proposed use of the project code and model would be governed by and permissible under this license. 125 | -------------------------------------------------------------------------------- /gifs/controlnet-openpose.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gifs/controlnet-openpose.png -------------------------------------------------------------------------------- /gifs/semantic-segmentation.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gifs/semantic-segmentation.webp -------------------------------------------------------------------------------- /gifs/stable-diffusion.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gifs/stable-diffusion.png -------------------------------------------------------------------------------- /gifs/stable-diffusion.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gifs/stable-diffusion.webp -------------------------------------------------------------------------------- /gifs/stable-diffusion1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gifs/stable-diffusion1.png -------------------------------------------------------------------------------- /gifs/stable-diffusion2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gifs/stable-diffusion2.png -------------------------------------------------------------------------------- /gifs/super-res.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gifs/super-res.webp -------------------------------------------------------------------------------- /gimp-screenshot.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gimp-screenshot.PNG -------------------------------------------------------------------------------- /gimpopenvino/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gimpopenvino/__init__.py -------------------------------------------------------------------------------- /gimpopenvino/install_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (C) 2022-2023 Intel Corporation 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | """ 6 | Script to create and configure gimp_openvino_config.json 7 | """ 8 | import os 9 | import re 10 | import sys 11 | import json 12 | import uuid 13 | import shutil 14 | import platform 15 | import subprocess 16 | from pathlib import Path 17 | from enum import Enum 18 | 19 | import gimpopenvino 20 | import openvino as ov 21 | from gimpopenvino.plugins.openvino_utils.tools.tools_utils import base_model_dir, config_path_dir 22 | 23 | # Enum for NPU Arch - 24 | class NPUArchitecture(Enum): 25 | ARCH_3700 = "3700" # Keem Bay 26 | ARCH_3720 = "3720" # Meteor Lake and Arrow Lake 27 | ARCH_4000 = "4000" # Lunar Lake 28 | ARCH_NONE = "0000" # No NPU 29 | ARCH_NEXT = "FFFF" # Next Lake 30 | 31 | 32 | def install_base_models(base_model_dir, repo_weights_dir): 33 | for folder in os.scandir(repo_weights_dir): 34 | model_name = os.path.basename(folder) 35 | model_path = os.path.join(base_model_dir, model_name) 36 | if not os.path.isdir(model_path): 37 | print(f"Copying {model_name} to {base_model_dir}") 38 | shutil.copytree(Path(folder), model_path) 39 | print("Setup done for base models.") 40 | 41 | 42 | def filter_supported_devices(core): 43 | ZERO_UUID = uuid.UUID("00000000-0000-0000-0000-000000000000") 44 | valid_devices = [] 45 | 46 | for device in core.get_available_devices(): 47 | full_name = core.get_property(device, "FULL_DEVICE_NAME") 48 | supported_props = core.get_property(device, "SUPPORTED_PROPERTIES") 49 | 50 | # Skip if not Intel 51 | if "Intel" not in full_name: 52 | continue 53 | 54 | # skip device if it is NPU < 3720 55 | if "AI Boost" in full_name and "AUTO_DETECT" in core.get_property(device, "DEVICE_ARCHITECTURE"): 56 | if core.get_property(device, "DEVICE_GOPS")[ov.Type.i8] == 0: 57 | continue 58 | 59 | # Skip if device has a zero UUID (indicates a non-unique or invalid device) 60 | if "DEVICE_UUID" in supported_props: 61 | dev_uuid = uuid.UUID(core.get_property(device, "DEVICE_UUID")) 62 | if dev_uuid == ZERO_UUID: 63 | continue 64 | 65 | valid_devices.append(device) 66 | 67 | return valid_devices 68 | 69 | 70 | def get_npu_architecture(core): 71 | """ 72 | Retrieves the NPU architecture using the OpenVINO core. 73 | 74 | Args: 75 | core (ov.Core): The OpenVINO core instance. 76 | 77 | Returns: 78 | NPUArchitecture: The detected architecture, or None if not found. 79 | """ 80 | try: 81 | available_devices = core.get_available_devices() 82 | if 'NPU' in available_devices: 83 | architecture = core.get_property('NPU', 'DEVICE_ARCHITECTURE') 84 | for arch in NPUArchitecture: 85 | if arch.value in architecture: 86 | return arch 87 | if core.get_property("NPU", "DEVICE_GOPS")[ov.Type.i8] > 0: 88 | return NPUArchitecture.ARCH_NEXT 89 | else: 90 | return NPUArchitecture.ARCH_3700 91 | return NPUArchitecture.ARCH_NONE 92 | 93 | except Exception as e: 94 | logging.error(f"Error retrieving NPU architecture: {str(e)}") 95 | return NPUArchitecture.ARCH_NONE 96 | 97 | def get_plugin_version(file_dir=None): 98 | """ 99 | Retrieves the plugin version via git tags if available, ensuring 100 | the command is run from the directory where this Python file resides. 101 | 102 | Returns: 103 | str: Plugin version from git or "0.0.0dev0" if git is unavailable. 104 | 105 | Why use git describe for this? Because generates a human-readable string to 106 | identify a particular commit in a Git repository, using the closest (most recent) 107 | annotated tag reachable from that commit. Typically, it looks like: 108 | [--g] 109 | 110 | For example, if your commit is exactly tagged 1.0.0, running 111 | git describe might simply return 1.0.0. If there have been 10 112 | commits since the v1.0.0 tag, git describe might return something like: 113 | 1.0.0-10-g3ab12ef 114 | where: 115 | 116 | 1.0.0 is the closest tag in the commit history. 117 | 10 is how many commits you are ahead of that tag. 118 | g3ab12ef is the abbreviated hash of the current commit. 119 | 120 | we can then turn this into a PEP440 compliant string 121 | """ 122 | try: 123 | raw_version = subprocess.check_output( 124 | ["git", "describe", "--tags"], 125 | cwd=file_dir, 126 | encoding="utf-8" 127 | ).strip() 128 | 129 | # Normalize the git version to PEP 440 130 | match = re.match(r"v?(\d+\.\d+\.\d+)(?:-(\d+)-g[0-9a-f]+)?", raw_version) 131 | 132 | if match: 133 | version, dev_count = match.groups() 134 | if dev_count: 135 | return f"{version}.dev{dev_count}" # PEP 440 dev version 136 | return version 137 | else: 138 | raise ValueError(f"Invalid version format: {raw_version}") 139 | except Exception as e: 140 | print(f"Error obtaining version: {e}") 141 | return "0.0.0" # Fallback version 142 | 143 | 144 | def complete_install(repo_weights_dir=None): 145 | install_location = base_model_dir 146 | 147 | # Create the install directory if it doesn't exist 148 | os.makedirs(install_location, exist_ok=True) 149 | 150 | # Determine Python executable path 151 | python_path = sys.executable 152 | 153 | # Create the weights directory if it doesn't exist 154 | weight_path = os.path.join(install_location, "weights") 155 | os.makedirs(weight_path, exist_ok=True) 156 | 157 | plugin_version = "Unknown" 158 | # Install base models from a repo and get the plugin version number. 159 | if repo_weights_dir: 160 | install_base_models(weight_path, repo_weights_dir) 161 | plugin_version = get_plugin_version(repo_weights_dir) 162 | 163 | # print("\n##########\n") 164 | 165 | # Determine plugin location (where gimpopenvino is installed) 166 | plugin_loc = os.path.dirname(gimpopenvino.__file__) 167 | 168 | # Filter supported devices using OpenVINO runtime 169 | core = ov.Core() 170 | supported_devices = filter_supported_devices(core) 171 | npu_arch = get_npu_architecture(core) 172 | 173 | 174 | # Build the JSON config data 175 | py_dict = { 176 | "python_path": python_path, 177 | "weight_path": weight_path, 178 | "supported_devices": supported_devices, 179 | "plugin_version": plugin_version, 180 | "npu_architecture_version": npu_arch.value, 181 | } 182 | 183 | # Write config data to gimp_openvino_config.json 184 | govconfig_path = os.path.join(config_path_dir, "gimp_openvino_config.json") 185 | with open(govconfig_path, "w+") as file: 186 | json.dump(py_dict, file) 187 | 188 | # For Linux, add executable permissions to plugin scripts 189 | if platform.system() == "Linux": 190 | scripts = [ 191 | "plugins/superresolution_ov/superresolution_ov.py", 192 | "plugins/stable_diffusion_ov/stable_diffusion_ov.py", 193 | "plugins/semseg_ov/semseg_ov.py", 194 | ] 195 | for script in scripts: 196 | script_path = os.path.join(plugin_loc, script) 197 | subprocess.call(["chmod", "+x", script_path]) 198 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gimpopenvino/plugins/__init__.py -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gimpopenvino/plugins/openvino_utils/__init__.py -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/images/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gimpopenvino/plugins/openvino_utils/images/__init__.py -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/images/error_icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gimpopenvino/plugins/openvino_utils/images/error_icon.png -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/images/plugin_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gimpopenvino/plugins/openvino_utils/images/plugin_logo.png -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/plugin_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright(C) 2022-2023 Intel Corporation 2 | # SPDX - License - Identifier: Apache - 2.0 3 | 4 | import gi 5 | gi.require_version("Gimp", "3.0") 6 | gi.require_version("GimpUi", "3.0") 7 | gi.require_version("Gtk", "3.0") 8 | from gi.repository import Gimp, GimpUi, GObject, GLib, Gio, Gtk 9 | import gettext 10 | 11 | _ = gettext.gettext 12 | 13 | 14 | def show_dialog(message, title, icon="logo", image_paths=None): 15 | use_header_bar = Gtk.Settings.get_default().get_property("gtk-dialogs-use-header") 16 | dialog = GimpUi.Dialog(use_header_bar=use_header_bar, title=_(title)) 17 | # Add buttons 18 | dialog.add_button("_Cancel", Gtk.ResponseType.CANCEL) 19 | dialog.add_button("_OK", Gtk.ResponseType.APPLY) 20 | vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, homogeneous=False, spacing=10) 21 | dialog.get_content_area().add(vbox) 22 | vbox.show() 23 | 24 | # Create grid to set all the properties inside. 25 | grid = Gtk.Grid() 26 | grid.set_column_homogeneous(False) 27 | grid.set_border_width(10) 28 | grid.set_column_spacing(10) 29 | grid.set_row_spacing(10) 30 | vbox.add(grid) 31 | grid.show() 32 | 33 | # Show Logo 34 | logo = Gtk.Image.new_from_file(image_paths[icon]) 35 | # vbox.pack_start(logo, False, False, 1) 36 | grid.attach(logo, 0, 0, 1, 1) 37 | logo.show() 38 | # Show message 39 | label = Gtk.Label(label=_(message)) 40 | # vbox.pack_start(label, False, False, 1) 41 | grid.attach(label, 1, 0, 1, 1) 42 | label.show() 43 | dialog.show() 44 | dialog.run() 45 | return 46 | 47 | 48 | def save_image(image, drawable, file_path): 49 | interlace, compression = 0, 2 50 | pdb_proc = Gimp.get_pdb().lookup_procedure('file-png-export') 51 | pdb_config = pdb_proc.create_config() 52 | pdb_config.set_property('run-mode', Gimp.RunMode.NONINTERACTIVE) 53 | pdb_config.set_property('image', image) 54 | pdb_config.set_property('file', Gio.File.new_for_path(file_path)) 55 | pdb_config.set_property('options', None) 56 | pdb_config.set_property('interlaced', interlace) 57 | pdb_config.set_property('compression', compression) 58 | # write all PNG chunks except oFFs(ets) 59 | pdb_config.set_property('bkgd', True) 60 | pdb_config.set_property('offs', False) 61 | pdb_config.set_property('phys', True) 62 | pdb_config.set_property('time', True) 63 | pdb_config.set_property('save-transparent', True) 64 | pdb_proc.run(pdb_config) 65 | 66 | 67 | def N_(message): 68 | return message 69 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gimpopenvino/plugins/openvino_utils/tools/__init__.py -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/complete_install.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gimpopenvino/plugins/openvino_utils/tools/complete_install.py -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/model_management_server.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import sys 4 | import socket 5 | import ast 6 | import traceback 7 | import logging as log 8 | from pathlib import Path 9 | import psutil 10 | import threading 11 | 12 | sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), "openvino_common")]) 13 | sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), "..","openvino_utils","tools")]) 14 | 15 | from gimpopenvino.plugins.openvino_utils.tools.tools_utils import get_weight_path 16 | from model_manager import ModelManager 17 | 18 | HOST = "127.0.0.1" # Standard loopback interface address (localhost) 19 | PORT = 65434 # Port to listen on (stable_diffusion_ov_server uses port 65432 & 65433) 20 | 21 | # This function is run on a dedicated thread when a new connection is established. 22 | def run_connection_routine(model_manager, conn): 23 | with conn: 24 | while True: 25 | data = conn.recv(1024) 26 | 27 | if not data: 28 | break 29 | 30 | if data.decode() == "kill": 31 | os._exit(0) 32 | if data.decode() == "ping": 33 | conn.sendall(data) 34 | continue 35 | 36 | # request to get the details and state of all supported models 37 | if data.decode() == "get_all_model_details": 38 | 39 | # get the list of installed models, and installable model details. 40 | installed_models, installable_model_details = model_manager.get_all_model_details() 41 | 42 | # Send the list of installed models 43 | num_installed_models = len(installed_models) 44 | conn.sendall(bytes(str(num_installed_models), 'utf-8')) 45 | data = conn.recv(1024) # <-wait for ack 46 | for i in range(0, num_installed_models): 47 | for detail in ["name", "id"]: 48 | conn.sendall(bytes(installed_models[i][detail], 'utf-8')) 49 | data = conn.recv(1024) # <-wait for ack 50 | 51 | # Send the installable model details 52 | num_installable_models = len(installable_model_details) 53 | conn.sendall(bytes(str(num_installable_models), 'utf-8')) 54 | data = conn.recv(1024) # <-wait for ack 55 | for i in range(0, num_installable_models): 56 | for detail in ["name", "description", "id", "install_status"]: 57 | conn.sendall(bytes(installable_model_details[i][detail], 'utf-8')) 58 | data = conn.recv(1024) # <-wait for ack 59 | 60 | continue 61 | 62 | # request to install a model. 63 | if data.decode() == "install_model": 64 | #send ack 65 | conn.sendall(data) 66 | 67 | #get model id. 68 | #TODO: Need a timeout here. 69 | model_id = conn.recv(1024).decode() 70 | 71 | if model_id not in model_manager.model_install_status: 72 | 73 | # add it to the dictionary here (instead of in model_manager.install_model). 74 | # This will guarantee that it is present in the dictionary before sending the ack, 75 | # and avoiding a potential race condition where the GIMP UI side asks for the status 76 | # before the thread spawns. 77 | model_manager.model_install_status[model_id] = {"status": "Preparing to install..", "percent": 0.0} 78 | 79 | #Run the install on another thread. This allows the server to service other requests 80 | # while the install is taking place. 81 | install_thread = threading.Thread(target=model_manager.install_model, args=(model_id,)) 82 | install_thread.start() 83 | else: 84 | print(model_id, "is already currently installing!") 85 | 86 | #send ack 87 | conn.sendall(data) 88 | 89 | continue 90 | 91 | # request to get the status of a model that is getting installed. 92 | if data.decode() == "install_status": 93 | 94 | # send ack 95 | conn.sendall(data) 96 | 97 | # make a copy of this so that the number of entries doesn't change while we're 98 | # in this routine. 99 | model_install_status = model_manager.model_install_status.copy() 100 | 101 | # Get the model-id that we are interested in. 102 | data = conn.recv(1024) 103 | model_id = data.decode() 104 | 105 | if model_id in model_install_status: 106 | details = model_install_status[model_id] 107 | 108 | status = details["status"] 109 | perc = details["percent"] 110 | else: 111 | # the model_id is not found in the installer map... set status to "done" / 100.0 112 | # TODO: What about failure cases? 113 | status = "done" 114 | perc = 100.0 115 | 116 | # first, send the status 117 | conn.sendall(bytes(status, 'utf-8')) 118 | data = conn.recv(1024) # <- get ack 119 | 120 | # then, send the send the percent 121 | conn.sendall(bytes(str(perc), 'utf-8')) 122 | data = conn.recv(1024) # <- get ack 123 | 124 | continue 125 | 126 | if data.decode() == "error_details": 127 | # send ack 128 | conn.sendall(data) 129 | 130 | # Get the model-id that we are interested in. 131 | data = conn.recv(1024) 132 | model_id = data.decode() 133 | 134 | summary, details = model_manager.get_error_details(model_id) 135 | 136 | # first, send the summary 137 | conn.sendall(bytes(summary, 'utf-8')) 138 | data = conn.recv(1024) # <- get ack 139 | 140 | # then, send the send the details 141 | conn.sendall(bytes(details, 'utf-8')) 142 | data = conn.recv(1024) # <- get ack 143 | 144 | 145 | continue 146 | 147 | if data.decode() == "install_cancel": 148 | # send ack 149 | conn.sendall(data) 150 | 151 | # Get the model-id that we are interested in. 152 | data = conn.recv(1024) 153 | model_id = data.decode() 154 | 155 | #send ack 156 | conn.sendall(data) 157 | 158 | model_manager.cancel_install(model_id) 159 | 160 | continue 161 | 162 | print("Warning! Unsupported command sent: ", data.decode()) 163 | 164 | def run(): 165 | weight_path = get_weight_path() 166 | 167 | #Move to a temporary working directory in a known place. 168 | # This is where we'll be downloading stuff to, etc. 169 | tmp_working_dir=os.path.join(weight_path, '..', 'mms_tmp') 170 | 171 | #if this dir doesn't exist, create it. 172 | if not os.path.isdir(tmp_working_dir): 173 | os.mkdir(tmp_working_dir) 174 | 175 | # go there. 176 | os.chdir(tmp_working_dir) 177 | 178 | model_manager = ModelManager(weight_path) 179 | 180 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: 181 | s.bind((HOST, PORT)) 182 | s.listen() 183 | while True: 184 | conn, addr = s.accept() 185 | 186 | #Run this connection on a dedicated thread. This allows multiple connections to be present at once. 187 | conn_thread = threading.Thread(target=run_connection_routine, args=(model_manager, conn)) 188 | conn_thread.start() 189 | 190 | 191 | 192 | def start(): 193 | 194 | run_thread = threading.Thread(target=run, args=()) 195 | run_thread.start() 196 | 197 | gimp_proc = None 198 | for proc in psutil.process_iter(): 199 | if "gimp" in proc.name(): 200 | gimp_proc = proc 201 | break 202 | 203 | if gimp_proc: 204 | psutil.wait_procs([proc]) 205 | os._exit(0) 206 | 207 | run_thread.join() 208 | 209 | if __name__ == "__main__": 210 | start() 211 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gimpopenvino/plugins/openvino_utils/tools/openvino_common/__init__.py -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/adapters/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2021-2024 Intel Corporation 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | """ 16 | 17 | 18 | from .openvino_adapter import create_core, OpenvinoAdapter 19 | 20 | from .utils import Layout 21 | 22 | 23 | __all__ = [ 24 | 'create_core', 25 | 'Layout', 26 | 'OpenvinoAdapter', 27 | 28 | 29 | ] 30 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/adapters/model_adapter.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2021-2024 Intel Corporation 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | """ 16 | 17 | import abc 18 | from dataclasses import dataclass, field 19 | from typing import Dict, List, Set 20 | 21 | 22 | @dataclass 23 | class Metadata: 24 | names: Set[str] = field(default_factory=set) 25 | shape: List[int] = field(default_factory=list) 26 | layout: str = '' 27 | precision: str = '' 28 | type: str = '' 29 | meta: Dict = field(default_factory=dict) 30 | 31 | 32 | class ModelAdapter(metaclass=abc.ABCMeta): 33 | ''' 34 | An abstract Model Adapter with the following interface: 35 | 36 | - Reading the model from disk or other place 37 | - Loading the model to the device 38 | - Accessing the information about inputs/outputs 39 | - The model reshaping 40 | - Synchronous model inference 41 | - Asynchronous model inference 42 | ''' 43 | precisions = ('FP32', 'I32', 'FP16', 'I16', 'I8', 'U8') 44 | 45 | @abc.abstractmethod 46 | def __init__(self): 47 | ''' 48 | An abstract Model Adapter constructor. 49 | Reads the model from disk or other place. 50 | ''' 51 | 52 | @abc.abstractmethod 53 | def load_model(self): 54 | ''' 55 | Loads the model on the device. 56 | ''' 57 | 58 | @abc.abstractmethod 59 | def get_input_layers(self): 60 | ''' 61 | Gets the names of model inputs and for each one creates the Metadata structure, 62 | which contains the information about the input shape, layout, precision 63 | in OpenVINO format, meta (optional) 64 | 65 | Returns: 66 | - the dict containing Metadata for all inputs 67 | ''' 68 | 69 | @abc.abstractmethod 70 | def get_output_layers(self): 71 | ''' 72 | Gets the names of model outputs and for each one creates the Metadata structure, 73 | which contains the information about the output shape, layout, precision 74 | in OpenVINO format, meta (optional) 75 | 76 | Returns: 77 | - the dict containing Metadata for all outputs 78 | ''' 79 | 80 | @abc.abstractmethod 81 | def reshape_model(self, new_shape): 82 | ''' 83 | Reshapes the model inputs to fit the new input shape. 84 | 85 | Args: 86 | - new_shape (dict): the dictionary with inputs names as keys and 87 | list of new shape as values in the following format: 88 | { 89 | 'input_layer_name_1': [1, 128, 128, 3], 90 | 'input_layer_name_2': [1, 128, 128, 3], 91 | ... 92 | } 93 | ''' 94 | 95 | @abc.abstractmethod 96 | def infer_sync(self, dict_data): 97 | ''' 98 | Performs the synchronous model inference. The infer is a blocking method. 99 | 100 | Args: 101 | - dict_data: it's submitted to the model for inference and has the following format: 102 | { 103 | 'input_layer_name_1': data_1, 104 | 'input_layer_name_2': data_2, 105 | ... 106 | } 107 | 108 | Returns: 109 | - raw result (dict) - model raw output in the following format: 110 | { 111 | 'output_layer_name_1': raw_result_1, 112 | 'output_layer_name_2': raw_result_2, 113 | ... 114 | } 115 | ''' 116 | 117 | @abc.abstractmethod 118 | def infer_async(self, dict_data, callback_fn, callback_data): 119 | ''' 120 | Performs the asynchronous model inference and sets 121 | the callback for inference completion. Also, it should 122 | define get_raw_result() function, which handles the result 123 | of inference from the model. 124 | 125 | Args: 126 | - dict_data: it's submitted to the model for inference and has the following format: 127 | { 128 | 'input_layer_name_1': data_1, 129 | 'input_layer_name_2': data_2, 130 | ... 131 | } 132 | - callback_fn: the callback function, which is defined outside the adapter 133 | - callback_data: the data for callback, that will be taken after the model inference is ended 134 | ''' 135 | 136 | @abc.abstractmethod 137 | def is_ready(self): 138 | ''' 139 | In case of asynchronous execution checks if one can submit input data 140 | to the model for inference, or all infer requests are busy. 141 | 142 | Returns: 143 | - the boolean flag whether the input data can be 144 | submitted to the model for inference or not 145 | ''' 146 | 147 | @abc.abstractmethod 148 | def await_all(self): 149 | ''' 150 | In case of asynchronous execution waits the completion of all 151 | busy infer requests. 152 | ''' 153 | 154 | @abc.abstractmethod 155 | def await_any(self): 156 | ''' 157 | In case of asynchronous execution waits the completion of any 158 | busy infer request until it becomes available for the data submission. 159 | ''' 160 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/adapters/openvino_adapter.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2021-2024 Intel Corporation 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | """ 16 | 17 | import logging as log 18 | from pathlib import Path 19 | 20 | try: 21 | from openvino import AsyncInferQueue, Core, PartialShape, layout_helpers, get_version, Dimension 22 | openvino_absent = False 23 | except ImportError: 24 | openvino_absent = True 25 | 26 | from .model_adapter import ModelAdapter, Metadata 27 | from .utils import Layout 28 | from pipelines import parse_devices 29 | 30 | 31 | def create_core(): 32 | if openvino_absent: 33 | raise ImportError('The OpenVINO package is not installed') 34 | 35 | #log.info('OpenVINO Runtime') 36 | #log.info('\tbuild: {}'.format(get_version())) 37 | return Core() 38 | 39 | 40 | class OpenvinoAdapter(ModelAdapter): 41 | ''' 42 | Works with OpenVINO model 43 | ''' 44 | def __init__(self, core, model_path, weights_path=None, model_parameters = {}, device='CPU', plugin_config=None, max_num_requests=0): 45 | self.core = core 46 | self.model_path = model_path 47 | self.device = device 48 | self.plugin_config = plugin_config 49 | self.max_num_requests = max_num_requests 50 | self.model_parameters = model_parameters 51 | self.model_parameters['input_layouts'] = Layout.parse_layouts(self.model_parameters.get('input_layouts', None)) 52 | 53 | if isinstance(model_path, (str, Path)): 54 | if Path(model_path).suffix == ".onnx" and weights_path: 55 | log.warning('For model in ONNX format should set only "model_path" parameter.' 56 | 'The "weights_path" will be omitted') 57 | 58 | self.model_from_buffer = isinstance(model_path, bytes) and isinstance(weights_path, bytes) 59 | # log.info('Reading model {}'.format('from buffer' if self.model_from_buffer else model_path)) 60 | weights = weights_path if self.model_from_buffer else '' 61 | self.model = core.read_model(model_path, weights) 62 | 63 | def load_model(self): 64 | self.compiled_model = self.core.compile_model(self.model, self.device, self.plugin_config) 65 | self.async_queue = AsyncInferQueue(self.compiled_model, self.max_num_requests) 66 | if self.max_num_requests == 0: 67 | # +1 to use it as a buffer of the pipeline 68 | self.async_queue = AsyncInferQueue(self.compiled_model, len(self.async_queue) + 1) 69 | 70 | #log.info('The model {} is loaded to {}'.format("from buffer" if self.model_from_buffer else self.model_path, self.device)) 71 | #self.log_runtime_settings() 72 | 73 | def log_runtime_settings(self): 74 | devices = set(parse_devices(self.device)) 75 | if 'AUTO' not in devices: 76 | for device in devices: 77 | try: 78 | nstreams = self.compiled_model.get_property(device + '_THROUGHPUT_STREAMS') 79 | log.info('\tDevice: {}'.format(device)) 80 | log.info('\t\tNumber of streams: {}'.format(nstreams)) 81 | if device == 'CPU': 82 | nthreads = self.compiled_model.get_property('CPU_THREADS_NUM') 83 | log.info('\t\tNumber of threads: {}'.format(nthreads if int(nthreads) else 'AUTO')) 84 | except RuntimeError: 85 | pass 86 | log.info('\tNumber of model infer requests: {}'.format(len(self.async_queue))) 87 | 88 | def get_input_layers(self): 89 | inputs = {} 90 | for input in self.model.inputs: 91 | input_shape = get_input_shape(input) 92 | input_layout = self.get_layout_for_input(input, input_shape) 93 | inputs[input.get_any_name()] = Metadata(input.get_names(), input_shape, input_layout, input.get_element_type().get_type_name()) 94 | inputs = self._get_meta_from_ngraph(inputs) 95 | return inputs 96 | 97 | def get_layout_for_input(self, input, shape=None) -> str: 98 | input_layout = '' 99 | if self.model_parameters['input_layouts']: 100 | input_layout = Layout.from_user_layouts(input.get_names(), self.model_parameters['input_layouts']) 101 | if not input_layout: 102 | if not layout_helpers.get_layout(input).empty: 103 | input_layout = Layout.from_openvino(input) 104 | else: 105 | input_layout = Layout.from_shape(shape if shape is not None else input.shape) 106 | return input_layout 107 | 108 | def get_output_layers(self): 109 | outputs = {} 110 | for output in self.model.outputs: 111 | output_shape = output.partial_shape.get_min_shape() if self.model.is_dynamic() else output.shape 112 | outputs[output.get_any_name()] = Metadata(output.get_names(), list(output_shape), precision=output.get_element_type().get_type_name()) 113 | outputs = self._get_meta_from_ngraph(outputs) 114 | return outputs 115 | 116 | def reshape_model(self, new_shape): 117 | new_shape = {name: PartialShape( 118 | [Dimension(dim) if not isinstance(dim, tuple) else Dimension(dim[0], dim[1]) 119 | for dim in shape]) for name, shape in new_shape.items()} 120 | self.model.reshape(new_shape) 121 | 122 | def get_raw_result(self, request): 123 | return {key: request.get_tensor(key).data for key in self.get_output_layers()} 124 | 125 | def copy_raw_result(self, request): 126 | return {key: request.get_tensor(key).data.copy() for key in self.get_output_layers()} 127 | 128 | def infer_sync(self, dict_data): 129 | self.infer_request = self.async_queue[self.async_queue.get_idle_request_id()] 130 | self.infer_request.infer(dict_data) 131 | return self.get_raw_result(self.infer_request) 132 | 133 | def infer_async(self, dict_data, callback_data) -> None: 134 | self.async_queue.start_async(dict_data, (self.copy_raw_result, callback_data)) 135 | 136 | def set_callback(self, callback_fn): 137 | self.async_queue.set_callback(callback_fn) 138 | 139 | def is_ready(self) -> bool: 140 | return self.async_queue.is_ready() 141 | 142 | def await_all(self) -> None: 143 | self.async_queue.wait_all() 144 | 145 | def await_any(self) -> None: 146 | self.async_queue.get_idle_request_id() 147 | 148 | def _get_meta_from_ngraph(self, layers_info): 149 | for node in self.model.get_ordered_ops(): 150 | layer_name = node.get_friendly_name() 151 | if layer_name not in layers_info.keys(): 152 | continue 153 | layers_info[layer_name].meta = node.get_attributes() 154 | layers_info[layer_name].type = node.get_type_name() 155 | return layers_info 156 | 157 | def operations_by_type(self, operation_type): 158 | layers_info = {} 159 | for node in self.model.get_ordered_ops(): 160 | if node.get_type_name() == operation_type: 161 | layer_name = node.get_friendly_name() 162 | layers_info[layer_name] = Metadata(type=node.get_type_name(), meta=node.get_attributes()) 163 | return layers_info 164 | 165 | 166 | def get_input_shape(input_tensor): 167 | def string_to_tuple(string, casting_type=int): 168 | processed = string.replace(' ', '').replace('(', '').replace(')', '').split(',') 169 | processed = filter(lambda x: x, processed) 170 | return tuple(map(casting_type, processed)) if casting_type else tuple(processed) 171 | if not input_tensor.partial_shape.is_dynamic: 172 | return list(input_tensor.shape) 173 | ps = str(input_tensor.partial_shape) 174 | if ps[0] == '[' and ps[-1] == ']': 175 | ps = ps[1:-1] 176 | preprocessed = ps.replace('{', '(').replace('}', ')').replace('?', '-1') 177 | preprocessed = preprocessed.replace('(', '').replace(')', '') 178 | if '..' in preprocessed: 179 | shape_list = [] 180 | for dim in preprocessed.split(','): 181 | if '..' in dim: 182 | shape_list.append(string_to_tuple(dim.replace('..', ','))) 183 | else: 184 | shape_list.append(int(dim)) 185 | return shape_list 186 | return string_to_tuple(preprocessed) 187 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/adapters/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2022-2024 Intel Corporation 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | """ 16 | 17 | from typing import Optional 18 | from openvino import layout_helpers 19 | 20 | 21 | class Layout: 22 | def __init__(self, layout = '') -> None: 23 | self.layout = layout 24 | 25 | @staticmethod 26 | def from_shape(shape): 27 | ''' 28 | Create Layout from given shape 29 | ''' 30 | if len(shape) == 2: 31 | return 'NC' 32 | if len(shape) == 3: 33 | return 'CHW' if shape[0] in range(1, 5) else 'HWC' 34 | if len(shape) == 4: 35 | return 'NCHW' if shape[1] in range(1, 5) else 'NHWC' 36 | 37 | raise RuntimeError("Get layout from shape method doesn't support {}D shape".format(len(shape))) 38 | 39 | @staticmethod 40 | def from_openvino(input): 41 | ''' 42 | Create Layout from openvino input 43 | ''' 44 | return layout_helpers.get_layout(input).to_string().strip('[]').replace(',', '') 45 | 46 | @staticmethod 47 | def from_user_layouts(input_names: set, user_layouts: dict): 48 | ''' 49 | Create Layout for input based on user info 50 | ''' 51 | for input_name in input_names: 52 | if input_name in user_layouts: 53 | return user_layouts[input_name] 54 | return user_layouts.get('', '') 55 | 56 | @staticmethod 57 | def parse_layouts(layout_string: str) -> Optional[dict]: 58 | ''' 59 | Parse layout parameter in format "input0:NCHW,input1:NC" or "NCHW" (applied to all inputs) 60 | ''' 61 | if not layout_string: 62 | return None 63 | search_string = layout_string if layout_string.rfind(':') != -1 else ":" + layout_string 64 | colon_pos = search_string.rfind(':') 65 | user_layouts = {} 66 | while (colon_pos != -1): 67 | start_pos = search_string.rfind(',') 68 | input_name = search_string[start_pos + 1:colon_pos] 69 | input_layout = search_string[colon_pos + 1:] 70 | user_layouts[input_name] = input_layout 71 | search_string = search_string[:start_pos + 1] 72 | if search_string == "" or search_string[-1] != ',': 73 | break 74 | search_string = search_string[:-1] 75 | colon_pos = search_string.rfind(':') 76 | if search_string != "": 77 | raise ValueError("Can't parse input layout string: " + layout_string) 78 | return user_layouts 79 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/models/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2021-2024 Intel Corporation 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | """ 16 | 17 | 18 | 19 | from .model import Model 20 | from .utils import InputTransform, OutputTransform, RESIZE_TYPES 21 | 22 | 23 | from .image_model import ImageModel 24 | 25 | __all__ = [ 26 | 27 | 'Model', 28 | 'RESIZE_TYPES', 29 | 'OutputTransform', 30 | 'ImageModel', 31 | 'InputTransform', 32 | 33 | 34 | ] 35 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/models/image_model.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2021-2024 Intel Corporation 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | """ 16 | 17 | from .model import Model 18 | from .types import BooleanValue, ListValue, StringValue 19 | from .utils import RESIZE_TYPES, pad_image, InputTransform 20 | 21 | 22 | class ImageModel(Model): 23 | '''An abstract wrapper for an image-based model 24 | 25 | The ImageModel has 1 or more inputs with images - 4D tensors with NHWC or NCHW layout. 26 | It may support additional inputs - 2D tensors. 27 | 28 | The ImageModel implements basic preprocessing for an image provided as model input. 29 | See `preprocess` description. 30 | 31 | The `postprocess` method must be implemented in a specific inherited wrapper. 32 | 33 | Attributes: 34 | image_blob_names (List[str]): names of all image-like inputs (4D tensors) 35 | image_info_blob_names (List[str]): names of all secondary inputs (2D tensors) 36 | image_blob_name (str): name of the first image input 37 | nchw_layout (bool): a flag whether the model input layer has NCHW layout 38 | resize_type (str): the type for image resizing (see `RESIZE_TYPE` for info) 39 | resize (function): resizing function corresponding to the `resize_type` 40 | input_transform (InputTransform): instance of the `InputTransform` for image normalization 41 | ''' 42 | 43 | def __init__(self, model_adapter, configuration=None, preload=False): 44 | '''Image model constructor 45 | 46 | It extends the `Model` constructor. 47 | 48 | Args: 49 | model_adapter (ModelAdapter): allows working with the specified executor 50 | configuration (dict, optional): it contains values for parameters accepted by specific 51 | wrapper (`confidence_threshold`, `labels` etc.) which are set as data attributes 52 | preload (bool, optional): a flag whether the model is loaded to device while 53 | initialization. If `preload=False`, the model must be loaded via `load` method before inference 54 | 55 | Raises: 56 | WrapperError: if the wrapper failed to define appropriate inputs for images 57 | ''' 58 | super().__init__(model_adapter, configuration, preload) 59 | self.image_blob_names, self.image_info_blob_names = self._get_inputs() 60 | self.image_blob_name = self.image_blob_names[0] 61 | 62 | self.nchw_layout = self.inputs[self.image_blob_name].layout == 'NCHW' 63 | if self.nchw_layout: 64 | self.n, self.c, self.h, self.w = self.inputs[self.image_blob_name].shape 65 | else: 66 | self.n, self.h, self.w, self.c = self.inputs[self.image_blob_name].shape 67 | self.resize = RESIZE_TYPES[self.resize_type] 68 | self.input_transform = InputTransform(self.reverse_input_channels, self.mean_values, self.scale_values) 69 | 70 | @classmethod 71 | def parameters(cls): 72 | parameters = super().parameters() 73 | parameters.update({ 74 | 'mean_values': ListValue( 75 | default_value=None, 76 | description='Normalization values, which will be subtracted from image channels for image-input layer during preprocessing' 77 | ), 78 | 'scale_values': ListValue( 79 | default_value=None, 80 | description='Normalization values, which will divide the image channels for image-input layer' 81 | ), 82 | 'reverse_input_channels': BooleanValue(default_value=False, description='Reverse the channel order'), 83 | 'resize_type': StringValue( 84 | default_value='standard', choices=tuple(RESIZE_TYPES.keys()), 85 | description="Type of input image resizing" 86 | ), 87 | }) 88 | return parameters 89 | 90 | def _get_inputs(self): 91 | '''Defines the model inputs for images and additional info. 92 | 93 | Raises: 94 | WrapperError: if the wrapper failed to define appropriate inputs for images 95 | 96 | Returns: 97 | - list of inputs names for images 98 | - list of inputs names for additional info 99 | ''' 100 | image_blob_names, image_info_blob_names = [], [] 101 | for name, metadata in self.inputs.items(): 102 | if len(metadata.shape) == 4: 103 | image_blob_names.append(name) 104 | elif len(metadata.shape) == 2: 105 | image_info_blob_names.append(name) 106 | else: 107 | self.raise_error('Failed to identify the input for ImageModel: only 2D and 4D input layer supported') 108 | if not image_blob_names: 109 | self.raise_error('Failed to identify the input for the image: no 4D input layer found') 110 | return image_blob_names, image_info_blob_names 111 | 112 | def preprocess(self, inputs): 113 | '''Data preprocess method 114 | 115 | It performs basic preprocessing of a single image: 116 | - Resizes the image to fit the model input size via the defined resize type 117 | - Normalizes the image: subtracts means, divides by scales, switch channels BGR-RGB 118 | - Changes the image layout according to the model input layout 119 | 120 | Also, it keeps the size of original image and resized one as `original_shape` and `resized_shape` 121 | in the metadata dictionary. 122 | 123 | Note: 124 | It supports only models with single image input. If the model has more image inputs or has 125 | additional supported inputs, the `preprocess` should be overloaded in a specific wrapper. 126 | 127 | Args: 128 | inputs (ndarray): a single image as 3D array in HWC layout 129 | 130 | Returns: 131 | - the preprocessed image in the following format: 132 | { 133 | 'input_layer_name': preprocessed_image 134 | } 135 | - the input metadata, which might be used in `postprocess` method 136 | ''' 137 | image = inputs 138 | meta = {'original_shape': image.shape} 139 | resized_image = self.resize(image, (self.w, self.h)) 140 | meta.update({'resized_shape': resized_image.shape}) 141 | if self.resize_type == 'fit_to_window': 142 | resized_image = pad_image(resized_image, (self.w, self.h)) 143 | meta.update({'padded_shape': resized_image.shape}) 144 | resized_image = self.input_transform(resized_image) 145 | resized_image = self._change_layout(resized_image) 146 | dict_inputs = {self.image_blob_name: resized_image} 147 | return dict_inputs, meta 148 | 149 | def _change_layout(self, image): 150 | '''Changes the input image layout to fit the layout of the model input layer. 151 | 152 | Args: 153 | inputs (ndarray): a single image as 3D array in HWC layout 154 | 155 | Returns: 156 | - the image with layout aligned with the model layout 157 | ''' 158 | if self.nchw_layout: 159 | image = image.transpose((2, 0, 1)) # HWC->CHW 160 | image = image.reshape((1, self.c, self.h, self.w)) 161 | else: 162 | image = image.reshape((1, self.h, self.w, self.c)) 163 | return image 164 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/models/types.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2021-2024 Intel Corporation 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | """ 16 | 17 | class ConfigurableValueError(ValueError): 18 | def __init__(self, message, prefix=None): 19 | self.message = f'{prefix}: {message}' if prefix else message 20 | super().__init__(self.message) 21 | 22 | 23 | class BaseValue: 24 | def __init__(self, description="No description available", default_value=None) -> None: 25 | self.default_value = default_value 26 | self.description = description 27 | 28 | def update_default_value(self, default_value): 29 | self.default_value = default_value 30 | 31 | def validate(self, value): 32 | return [] 33 | 34 | def get_value(self, value): 35 | errors = self.validate(value) 36 | if len(errors) == 0: 37 | return value if value is not None else self.default_value 38 | 39 | def build_error(): 40 | pass 41 | 42 | def __str__(self) -> str: 43 | info = self.description 44 | if self.default_value: 45 | info += f"\nThe default value is '{self.default_value}'" 46 | return info 47 | 48 | 49 | class NumericalValue(BaseValue): 50 | def __init__(self, value_type=float, choices=(), min=None, max=None, **kwargs) -> None: 51 | super().__init__(**kwargs) 52 | self.choices = choices 53 | self.min = min 54 | self.max = max 55 | self.value_type = value_type 56 | 57 | def validate(self, value): 58 | errors = super().validate(value) 59 | if not value: 60 | return errors 61 | if not isinstance(value, self.value_type): 62 | errors.append(ConfigurableValueError(f'Incorrect value type {type(value)}: should be {self.value_type}')) 63 | return errors 64 | if len(self.choices): 65 | if value not in self.choices: 66 | errors.append(ConfigurableValueError(f'Incorrect value {value}: out of allowable list - {self.choices}')) 67 | if self.min is not None and value < self.min: 68 | errors.append(ConfigurableValueError(f'Incorrect value {value}: less than minimum allowable {self.min}')) 69 | if self.max is not None and value > self.max: 70 | errors.append(ConfigurableValueError(f'Incorrect value {value}: bigger than maximum allowable {self.min}')) 71 | return errors 72 | 73 | def __str__(self) -> str: 74 | info = super().__str__() 75 | info += f"\nAppropriate type is {self.value_type}" 76 | if self.choices: 77 | info += f"\nAppropriate values are {self.choices}" 78 | return info 79 | 80 | class StringValue(BaseValue): 81 | def __init__(self, choices=(), **kwargs): 82 | super().__init__(**kwargs) 83 | self.choices = choices 84 | for choice in self.choices: 85 | if not isinstance(choice, str): 86 | raise ValueError("Incorrect option in choice list - {}.". format(choice)) 87 | 88 | def validate(self, value): 89 | errors = super().validate(value) 90 | if not value: 91 | return errors 92 | if not isinstance(value, str): 93 | errors.append(ConfigurableValueError(f'Incorrect value type {type(value)}: should be "str"')) 94 | if len(self.choices)>0 and value not in self.choices: 95 | errors.append(ConfigurableValueError(f'Incorrect value {value}: out of allowable list - {self.choices}')) 96 | return errors 97 | 98 | def __str__(self) -> str: 99 | info = super().__str__() 100 | info += "\nAppropriate type is str" 101 | if self.choices: 102 | info += f"\nAppropriate values are {self.choices}" 103 | 104 | return info 105 | 106 | 107 | class BooleanValue(BaseValue): 108 | def __init__(self, **kwargs) -> None: 109 | super().__init__(**kwargs) 110 | 111 | def validate(self, value): 112 | errors = super().validate(value) 113 | if not value: 114 | return errors 115 | if not isinstance(value, bool): 116 | errors.append(ConfigurableValueError(f'Incorrect value type - {type(value)}: should be "bool"')) 117 | return errors 118 | 119 | 120 | class ListValue(BaseValue): 121 | def __init__(self, value_type=None, **kwargs) -> None: 122 | super().__init__(**kwargs) 123 | self.value_type = value_type 124 | 125 | def validate(self, value): 126 | errors = super().validate(value) 127 | if not value: 128 | return errors 129 | if not isinstance(value, (tuple, list)): 130 | errors.append(ConfigurableValueError(f'Incorrect value type - {type(value)}: should be list or tuple')) 131 | if self.value_type: 132 | if isinstance(self.value_type, BaseValue): 133 | for i, element in enumerate(value): 134 | temp_errors = self.value_type.validate(element) 135 | if len(temp_errors) > 0: 136 | errors.extend([ConfigurableValueError(f'Incorrect #{i} element of the list'), *temp_errors]) 137 | else: 138 | for i, element in enumerate(value): 139 | if not isinstance(element, self.value_type): 140 | errors.append(ConfigurableValueError(f'Incorrect #{i} element type - {type(element)}: should be {self.value_type}')) 141 | return errors 142 | 143 | 144 | class DictValue(BaseValue): 145 | def __init__(self, **kwargs) -> None: 146 | super().__init__(**kwargs) 147 | 148 | def validate(self, value): 149 | errors = super().validate(value) 150 | if not value: 151 | return errors 152 | if not isinstance(value, dict): 153 | errors.append(ConfigurableValueError(f'Incorrect value type - {type(value)}: should be "dict"')) 154 | return errors 155 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/models/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2020-2024 Intel Corporation 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | """ 16 | 17 | import cv2 18 | import numpy as np 19 | import math 20 | 21 | 22 | class Detection: 23 | def __init__(self, xmin, ymin, xmax, ymax, score, id): 24 | self.xmin = xmin 25 | self.ymin = ymin 26 | self.xmax = xmax 27 | self.ymax = ymax 28 | self.score = score 29 | self.id = id 30 | 31 | def bottom_left_point(self): 32 | return self.xmin, self.ymin 33 | 34 | def top_right_point(self): 35 | return self.xmax, self.ymax 36 | 37 | def get_coords(self): 38 | return self.xmin, self.ymin, self.xmax, self.ymax 39 | 40 | 41 | def clip_detections(detections, size): 42 | for detection in detections: 43 | detection.xmin = max(int(detection.xmin), 0) 44 | detection.ymin = max(int(detection.ymin), 0) 45 | detection.xmax = min(int(detection.xmax), size[1]) 46 | detection.ymax = min(int(detection.ymax), size[0]) 47 | return detections 48 | 49 | 50 | class DetectionWithLandmarks(Detection): 51 | def __init__(self, xmin, ymin, xmax, ymax, score, id, landmarks_x, landmarks_y): 52 | super().__init__(xmin, ymin, xmax, ymax, score, id) 53 | self.landmarks = [] 54 | for x, y in zip(landmarks_x, landmarks_y): 55 | self.landmarks.append((x, y)) 56 | 57 | 58 | class OutputTransform: 59 | def __init__(self, input_size, output_resolution): 60 | self.output_resolution = output_resolution 61 | if self.output_resolution: 62 | self.new_resolution = self.compute_resolution(input_size) 63 | 64 | def compute_resolution(self, input_size): 65 | self.input_size = input_size 66 | size = self.input_size[::-1] 67 | self.scale_factor = min(self.output_resolution[0] / size[0], 68 | self.output_resolution[1] / size[1]) 69 | return self.scale(size) 70 | 71 | def resize(self, image): 72 | if not self.output_resolution: 73 | return image 74 | curr_size = image.shape[:2] 75 | if curr_size != self.input_size: 76 | self.new_resolution = self.compute_resolution(curr_size) 77 | if self.scale_factor == 1: 78 | return image 79 | return cv2.resize(image, self.new_resolution) 80 | 81 | def scale(self, inputs): 82 | if not self.output_resolution or self.scale_factor == 1: 83 | return inputs 84 | return (np.array(inputs) * self.scale_factor).astype(np.int32) 85 | 86 | 87 | class InputTransform: 88 | def __init__(self, reverse_input_channels=False, mean_values=None, scale_values=None): 89 | self.reverse_input_channels = reverse_input_channels 90 | self.is_trivial = not (reverse_input_channels or mean_values or scale_values) 91 | self.means = np.array(mean_values, dtype=np.float32) if mean_values else np.array([0., 0., 0.]) 92 | self.std_scales = np.array(scale_values, dtype=np.float32) if scale_values else np.array([1., 1., 1.]) 93 | 94 | def __call__(self, inputs): 95 | if self.is_trivial: 96 | return inputs 97 | if self.reverse_input_channels: 98 | inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB) 99 | return (inputs - self.means) / self.std_scales 100 | 101 | 102 | def load_labels(label_file): 103 | with open(label_file, 'r') as f: 104 | labels_map = [x.strip() for x in f] 105 | return labels_map 106 | 107 | 108 | def resize_image(image, size, keep_aspect_ratio=False, interpolation=cv2.INTER_LINEAR): 109 | if not keep_aspect_ratio: 110 | resized_frame = cv2.resize(image, size, interpolation=interpolation) 111 | else: 112 | h, w = image.shape[:2] 113 | scale = min(size[1] / h, size[0] / w) 114 | resized_frame = cv2.resize(image, None, fx=scale, fy=scale, interpolation=interpolation) 115 | return resized_frame 116 | 117 | 118 | def resize_image_with_aspect(image, size, interpolation=cv2.INTER_LINEAR): 119 | return resize_image(image, size, keep_aspect_ratio=True, interpolation=interpolation) 120 | 121 | 122 | def pad_image(image, size): 123 | h, w = image.shape[:2] 124 | if h != size[1] or w != size[0]: 125 | image = np.pad(image, ((0, size[1] - h), (0, size[0] - w), (0, 0)), 126 | mode='constant', constant_values=0) 127 | return image 128 | 129 | 130 | def resize_image_letterbox(image, size, interpolation=cv2.INTER_LINEAR): 131 | ih, iw = image.shape[0:2] 132 | w, h = size 133 | scale = min(w / iw, h / ih) 134 | nw = int(iw * scale) 135 | nh = int(ih * scale) 136 | image = cv2.resize(image, (nw, nh), interpolation=interpolation) 137 | dx = (w - nw) // 2 138 | dy = (h - nh) // 2 139 | resized_image = np.pad(image, ((dy, dy + (h - nh) % 2), (dx, dx + (w - nw) % 2), (0, 0)), 140 | mode='constant', constant_values=0) 141 | return resized_image 142 | 143 | 144 | def crop_resize(image, size): 145 | desired_aspect_ratio = size[1] / size[0] # width / height 146 | if desired_aspect_ratio == 1: 147 | if (image.shape[0] > image.shape[1]): 148 | offset = (image.shape[0] - image.shape[1]) // 2 149 | cropped_frame = image[offset:image.shape[1] + offset] 150 | else: 151 | offset = (image.shape[1] - image.shape[0]) // 2 152 | cropped_frame = image[:, offset:image.shape[0] + offset] 153 | elif desired_aspect_ratio < 1: 154 | new_width = math.floor(image.shape[0] * desired_aspect_ratio) 155 | offset = (image.shape[1] - new_width) // 2 156 | cropped_frame = image[:, offset:new_width + offset] 157 | elif desired_aspect_ratio > 1: 158 | new_height = math.floor(image.shape[1] / desired_aspect_ratio) 159 | offset = (image.shape[0] - new_height) // 2 160 | cropped_frame = image[offset:new_height + offset] 161 | 162 | return cv2.resize(cropped_frame, size) 163 | 164 | 165 | RESIZE_TYPES = { 166 | 'crop' : crop_resize, 167 | 'standard': resize_image, 168 | 'fit_to_window': resize_image_with_aspect, 169 | 'fit_to_window_letterbox': resize_image_letterbox, 170 | } 171 | 172 | 173 | INTERPOLATION_TYPES = { 174 | 'LINEAR': cv2.INTER_LINEAR, 175 | 'CUBIC': cv2.INTER_CUBIC, 176 | 'NEAREST': cv2.INTER_NEAREST, 177 | 'AREA': cv2.INTER_AREA, 178 | } 179 | 180 | 181 | def nms(x1, y1, x2, y2, scores, thresh, include_boundaries=False, keep_top_k=None): 182 | b = 1 if include_boundaries else 0 183 | areas = (x2 - x1 + b) * (y2 - y1 + b) 184 | order = scores.argsort()[::-1] 185 | 186 | if keep_top_k: 187 | order = order[:keep_top_k] 188 | 189 | keep = [] 190 | while order.size > 0: 191 | i = order[0] 192 | keep.append(i) 193 | 194 | xx1 = np.maximum(x1[i], x1[order[1:]]) 195 | yy1 = np.maximum(y1[i], y1[order[1:]]) 196 | xx2 = np.minimum(x2[i], x2[order[1:]]) 197 | yy2 = np.minimum(y2[i], y2[order[1:]]) 198 | 199 | w = np.maximum(0.0, xx2 - xx1 + b) 200 | h = np.maximum(0.0, yy2 - yy1 + b) 201 | intersection = w * h 202 | 203 | union = (areas[i] + areas[order[1:]] - intersection) 204 | overlap = np.divide(intersection, union, out=np.zeros_like(intersection, dtype=float), where=union != 0) 205 | 206 | order = order[np.where(overlap <= thresh)[0] + 1] 207 | 208 | return keep 209 | 210 | 211 | def softmax(logits, axis=None, keepdims=False): 212 | exp = np.exp(logits - np.max(logits)) 213 | return exp / np.sum(exp, axis=axis, keepdims=keepdims) 214 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/models_ov/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gimpopenvino/plugins/openvino_utils/tools/openvino_common/models_ov/__init__.py -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/models_ov/segmentation.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2020-2024 Intel Corporation 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | """ 16 | 17 | import cv2 18 | import numpy as np 19 | 20 | from models.image_model import ImageModel 21 | from models.types import ListValue, StringValue 22 | from models.utils import load_labels 23 | 24 | 25 | class SegmentationModel(ImageModel): 26 | __model__ = 'Segmentation' 27 | 28 | def __init__(self, model_adapter, configuration=None, preload=False): 29 | super().__init__(model_adapter, configuration, preload) 30 | self._check_io_number(1, 1) 31 | if self.path_to_labels: 32 | self.labels = load_labels(self.path_to_labels) 33 | 34 | self.output_blob_name = self._get_outputs() 35 | 36 | def _get_outputs(self): 37 | layer_name = next(iter(self.outputs)) 38 | layer_shape = self.outputs[layer_name].shape 39 | 40 | if len(layer_shape) == 3: 41 | self.out_channels = 0 42 | elif len(layer_shape) == 4: 43 | self.out_channels = layer_shape[1] 44 | else: 45 | self.raise_error("Unexpected output layer shape {}. Only 4D and 3D output layers are supported".format(layer_shape)) 46 | 47 | return layer_name 48 | 49 | @classmethod 50 | def parameters(cls): 51 | parameters = super().parameters() 52 | parameters.update({ 53 | 'labels': ListValue(description="List of class labels"), 54 | 'path_to_labels': StringValue(description="Path to file with labels. Overrides the labels, if they sets via 'labels' parameter") 55 | }) 56 | 57 | return parameters 58 | 59 | def postprocess(self, outputs, meta): 60 | predictions = outputs[self.output_blob_name].squeeze() 61 | input_image_height = meta['original_shape'][0] 62 | input_image_width = meta['original_shape'][1] 63 | 64 | if self.out_channels < 2: # assume the output is already ArgMax'ed 65 | result = predictions.astype(np.uint8) 66 | else: 67 | result = np.argmax(predictions, axis=0).astype(np.uint8) 68 | 69 | result = cv2.resize(result, (input_image_width, input_image_height), 0, 0, interpolation=cv2.INTER_NEAREST) 70 | return result 71 | 72 | 73 | class SalientObjectDetectionModel(SegmentationModel): 74 | __model__ = 'Salient_Object_Detection' 75 | 76 | def postprocess(self, outputs, meta): 77 | input_image_height = meta['original_shape'][0] 78 | input_image_width = meta['original_shape'][1] 79 | result = outputs[self.output_blob_name].squeeze() 80 | result = 1/(1 + np.exp(-result)) 81 | result = cv2.resize(result, (input_image_width, input_image_height), 0, 0, interpolation=cv2.INTER_NEAREST) 82 | return result 83 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/models_ov/stable_diffusion_engine_genai.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright(C) 2022-2023 Intel Corporation 3 | SPDX - License - Identifier: Apache - 2.0 4 | 5 | """ 6 | 7 | import openvino_genai 8 | import numpy as np 9 | 10 | from PIL import Image 11 | 12 | import os 13 | from typing import Optional, List 14 | 15 | 16 | class StableDiffusionEngineGenai: 17 | def __init__(self, model: str, model_name: str, device: List = ["GPU", "GPU", "GPU"]): 18 | self.device = device 19 | ov_cache_dir = os.path.join(model, 'cache') 20 | self.pipe = openvino_genai.Text2ImagePipeline(model) 21 | properties = {"CACHE_DIR": ov_cache_dir} 22 | if model_name == "sdxl_turbo_square": 23 | self.pipe.reshape(1, 512, 512, 0) 24 | elif model_name == "sd_3.5_med_turbo_square": 25 | self.pipe.reshape(1, 512, 512, 0.5) 26 | elif model_name == "sdxl_base_1.0_square": 27 | self.pipe.reshape(1, 1024, 1024, self.pipe.get_generation_config().guidance_scale) 28 | else: 29 | self.pipe.reshape(1, 512, 512, self.pipe.get_generation_config().guidance_scale) 30 | self.pipe.compile(device[0], device[1], device[2], config=properties) 31 | 32 | def __call__( 33 | self, 34 | prompt, 35 | negative_prompt=None, 36 | num_inference_steps = 32, 37 | guidance_scale = 7.5, 38 | seed: Optional[int] = None, 39 | callback = None, 40 | callback_userdata = None 41 | ): 42 | print(f"Running Stable Diffusion with prompt: {prompt}") 43 | 44 | def callback_genai(step, num_inference_steps, latent): 45 | if callback: 46 | callback(step, callback_userdata) 47 | return False 48 | 49 | if negative_prompt is None: 50 | image_tensor = self.pipe.generate(prompt,num_inference_steps=num_inference_steps,guidance_scale=guidance_scale,rng_seed=seed,callback=callback_genai) 51 | else: 52 | image_tensor = self.pipe.generate(prompt,num_inference_steps=num_inference_steps,guidance_scale=guidance_scale,negative_prompt=negative_prompt,rng_seed=seed,callback=callback_genai) 53 | 54 | return Image.fromarray(image_tensor.data[0]) 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/models_ov/stable_diffusion_engine_inpainting_genai.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright(C) 2022-2023 Intel Corporation 3 | SPDX - License - Identifier: Apache - 2.0 4 | 5 | """ 6 | #from .model import Model 7 | 8 | 9 | import openvino 10 | import openvino_genai 11 | import numpy as np 12 | 13 | from PIL import Image 14 | 15 | 16 | 17 | 18 | class StableDiffusionEngineInpaintingGenai: 19 | def __init__(self, model: str, device: str = "GPU"): 20 | 21 | self.device = device 22 | self.pipe = openvino_genai.InpaintingPipeline(model, device) 23 | 24 | def read_image(self,path: str) -> openvino.Tensor: 25 | 26 | pic = Image.open(path).convert("RGB") 27 | width, height = pic.size 28 | print(f"Image loaded: {path} (Width: {width}, Height: {height})") 29 | 30 | # Convert to numpy array (H, W, C) → (1, C, H, W) for OpenVINO 31 | image_data = np.array(pic.getdata()).reshape(1, pic.size[1], pic.size[0], 3).astype(np.uint8) 32 | 33 | return openvino.Tensor(image_data) 34 | 35 | 36 | 37 | def __call__( 38 | self, 39 | prompt, 40 | image_path: str = None, 41 | mask_path: str = None, 42 | negative_prompt=None, 43 | scheduler=None, 44 | strength = 0.5, 45 | num_inference_steps = 32, 46 | guidance_scale = 7.5, 47 | callback = None, 48 | callback_userdata = None 49 | ): 50 | width = 768 51 | height = 432 52 | 53 | image = self.read_image(image_path) 54 | mask_image = self.read_image(mask_path) 55 | 56 | 57 | print(f"Running inpainting with prompt: {prompt}") 58 | 59 | 60 | def callback_genai(step, num_inference_steps, latent): 61 | #print(f"Image generation step: {step} / {num_inference_steps}") 62 | if callback: 63 | callback(step, callback_userdata) 64 | return False 65 | 66 | 67 | if (image.shape[1] == image.shape[2]): 68 | 69 | 70 | image_tensor = self.pipe.generate(prompt, image, mask_image,num_inference_steps=num_inference_steps,negative_prompt=negative_prompt,callback=callback_genai) 71 | else: 72 | image_tensor = self.pipe.generate(prompt, image, mask_image, width=width, height=height,num_inference_steps=num_inference_steps,negative_prompt=negative_prompt,callback=callback_genai) 73 | 74 | 75 | 76 | 77 | return Image.fromarray(image_tensor.data[0]) 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/performance_metrics.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2020-2024 Intel Corporation 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | """ 16 | 17 | import logging as log 18 | from time import perf_counter 19 | 20 | import cv2 21 | 22 | 23 | def put_highlighted_text(frame, message, position, font_face, font_scale, color, thickness): 24 | cv2.putText(frame, message, position, font_face, font_scale, (255, 255, 255), thickness + 1) # white border 25 | cv2.putText(frame, message, position, font_face, font_scale, color, thickness) 26 | 27 | 28 | class Statistic: 29 | def __init__(self): 30 | self.latency = 0.0 31 | self.period = 0.0 32 | self.frame_count = 0 33 | 34 | def combine(self, other): 35 | self.latency += other.latency 36 | self.period += other.period 37 | self.frame_count += other.frame_count 38 | 39 | 40 | class PerformanceMetrics: 41 | def __init__(self, time_window=1.0): 42 | # 'time_window' defines the length of the timespan over which the 'current fps' value is calculated 43 | self.time_window_size = time_window 44 | self.last_moving_statistic = Statistic() 45 | self.current_moving_statistic = Statistic() 46 | self.total_statistic = Statistic() 47 | self.last_update_time = None 48 | 49 | def update(self, last_request_start_time, frame=None): 50 | current_time = perf_counter() 51 | 52 | if self.last_update_time is None: 53 | self.last_update_time = last_request_start_time 54 | 55 | self.current_moving_statistic.latency += current_time - last_request_start_time 56 | self.current_moving_statistic.period = current_time - self.last_update_time 57 | self.current_moving_statistic.frame_count += 1 58 | 59 | if current_time - self.last_update_time > self.time_window_size: 60 | self.last_moving_statistic = self.current_moving_statistic 61 | self.total_statistic.combine(self.last_moving_statistic) 62 | self.current_moving_statistic = Statistic() 63 | self.last_update_time = current_time 64 | 65 | if frame is not None: 66 | self.paint_metrics(frame) 67 | 68 | def paint_metrics(self, frame, position=(15, 30), font_scale=0.75, color=(200, 10, 10), thickness=2): 69 | # Draw performance stats over frame 70 | current_latency, current_fps = self.get_last() 71 | if current_latency is not None: 72 | put_highlighted_text(frame, "Latency: {:.1f} ms".format(current_latency * 1e3), 73 | position, cv2.FONT_HERSHEY_COMPLEX, font_scale, color, thickness) 74 | if current_fps is not None: 75 | put_highlighted_text(frame, "FPS: {:.1f}".format(current_fps), 76 | (position[0], position[1]+30), cv2.FONT_HERSHEY_COMPLEX, font_scale, color, thickness) 77 | 78 | def get_last(self): 79 | return (self.last_moving_statistic.latency / self.last_moving_statistic.frame_count 80 | if self.last_moving_statistic.frame_count != 0 81 | else None, 82 | self.last_moving_statistic.frame_count / self.last_moving_statistic.period 83 | if self.last_moving_statistic.period != 0.0 84 | else None) 85 | 86 | def get_total(self): 87 | frame_count = self.total_statistic.frame_count + self.current_moving_statistic.frame_count 88 | return (((self.total_statistic.latency + self.current_moving_statistic.latency) / frame_count) 89 | if frame_count != 0 90 | else None, 91 | (frame_count / (self.total_statistic.period + self.current_moving_statistic.period)) 92 | if frame_count != 0 93 | else None) 94 | 95 | def get_latency(self): 96 | return self.get_total()[0] * 1e3 97 | 98 | def log_total(self): 99 | total_latency, total_fps = self.get_total() 100 | log.info('Metrics report:') 101 | log.info("\tLatency: {:.1f} ms".format(total_latency * 1e3) if total_latency is not None else "\tLatency: N/A") 102 | log.info("\tFPS: {:.1f}".format(total_fps) if total_fps is not None else "\tFPS: N/A") 103 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from .async_pipeline import AsyncPipeline, get_user_config, parse_devices 2 | 3 | __all__ = [ 4 | 'AsyncPipeline', 5 | 'get_user_config', 6 | 'parse_devices', 7 | ] 8 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/pipelines/async_pipeline.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2020-2024 Intel Corporation 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | """ 16 | 17 | from time import perf_counter 18 | from typing import Dict, Set 19 | 20 | from performance_metrics import PerformanceMetrics 21 | 22 | 23 | def parse_devices(device_string): 24 | colon_position = device_string.find(':') 25 | if colon_position != -1: 26 | device_type = device_string[:colon_position] 27 | if device_type == 'HETERO' or device_type == 'MULTI': 28 | comma_separated_devices = device_string[colon_position + 1:] 29 | devices = comma_separated_devices.split(',') 30 | for device in devices: 31 | parenthesis_position = device.find(':') 32 | if parenthesis_position != -1: 33 | device = device[:parenthesis_position] 34 | return devices 35 | return (device_string,) 36 | 37 | 38 | def parse_value_per_device(devices: Set[str], values_string: str)-> Dict[str, int]: 39 | """Format: :,: or just """ 40 | values_string_upper = values_string.upper() 41 | result = {} 42 | device_value_strings = values_string_upper.split(',') 43 | for device_value_string in device_value_strings: 44 | device_value_list = device_value_string.split(':') 45 | if len(device_value_list) == 2: 46 | if device_value_list[0] in devices: 47 | result[device_value_list[0]] = int(device_value_list[1]) 48 | elif len(device_value_list) == 1 and device_value_list[0] != '': 49 | for device in devices: 50 | result[device] = int(device_value_list[0]) 51 | elif device_value_list[0] != '': 52 | raise RuntimeError(f'Unknown string format: {values_string}') 53 | return result 54 | 55 | 56 | def get_user_config(flags_d: str, flags_nstreams: str, flags_nthreads: int)-> Dict[str, str]: 57 | from openvino import Core, properties 58 | config = {} 59 | 60 | devices = set(parse_devices(flags_d)) 61 | 62 | 63 | device_nstreams = parse_value_per_device(devices, flags_nstreams) 64 | core = Core() 65 | for device in devices: 66 | supported_properties = core.get_property(device, properties.supported_properties()) 67 | if device == 'CPU': # CPU supports a few special performance-oriented keys 68 | # limit threading for CPU portion of inference 69 | if flags_nthreads: 70 | config['CPU_THREADS_NUM'] = str(flags_nthreads) 71 | 72 | config['ENABLE_CPU_PINNING'] = 'NO' 73 | if "CPU_THROUGHPUT_STREAMS" in supported_properties: 74 | # for CPU execution, more throughput-oriented execution via streams 75 | config['CPU_THROUGHPUT_STREAMS'] = str(device_nstreams.get(device, 'CPU_THROUGHPUT_AUTO')) 76 | else: 77 | config["NUM_STREAMS"] = str(device_nstreams.get(device, -1)) 78 | elif device == 'GPU': 79 | if "GPU_THROUGHPUT_STREAMS" in supported_properties: 80 | config['GPU_THROUGHPUT_STREAMS'] = str(device_nstreams.get(device, 'GPU_THROUGHPUT_AUTO')) 81 | else: 82 | config["NUM_STREAMS"] = str(device_nstreams.get(device, -1)) 83 | if 'MULTI' in flags_d and 'CPU' in devices: 84 | # multi-device execution with the CPU + GPU performs best with GPU throttling hint, 85 | # which releases another CPU thread (that is otherwise used by the GPU driver for active polling) 86 | config['GPU_PLUGIN_THROTTLE'] = '1' 87 | return config 88 | 89 | 90 | class AsyncPipeline: 91 | def __init__(self, model): 92 | self.model = model 93 | self.model.load() 94 | 95 | self.completed_results = {} 96 | self.callback_exceptions = [] 97 | self.model.model_adapter.set_callback(self.callback) 98 | 99 | self.preprocess_metrics = PerformanceMetrics() 100 | self.inference_metrics = PerformanceMetrics() 101 | self.postprocess_metrics = PerformanceMetrics() 102 | 103 | def callback(self, request, callback_args): 104 | try: 105 | get_result_fn, (id, meta, preprocessing_meta, start_time) = callback_args 106 | self.completed_results[id] = (get_result_fn(request), meta, preprocessing_meta, start_time) 107 | except Exception as e: 108 | self.callback_exceptions.append(e) 109 | 110 | def submit_data(self, inputs, id, meta={}): 111 | preprocessing_start_time = perf_counter() 112 | inputs, preprocessing_meta = self.model.preprocess(inputs) 113 | self.preprocess_metrics.update(preprocessing_start_time) 114 | 115 | infer_start_time = perf_counter() 116 | callback_data = id, meta, preprocessing_meta, infer_start_time 117 | self.model.infer_async(inputs, callback_data) 118 | 119 | def get_raw_result(self, id): 120 | if id in self.completed_results: 121 | return self.completed_results.pop(id) 122 | return None 123 | 124 | def get_result(self, id): 125 | result = self.get_raw_result(id) 126 | if result: 127 | raw_result, meta, preprocess_meta, infer_start_time = result 128 | self.inference_metrics.update(infer_start_time) 129 | 130 | postprocessing_start_time = perf_counter() 131 | result = self.model.postprocess(raw_result, preprocess_meta), {**meta, **preprocess_meta} 132 | self.postprocess_metrics.update(postprocessing_start_time) 133 | return result 134 | return None 135 | 136 | def is_ready(self): 137 | return self.model.is_ready() 138 | 139 | def await_all(self): 140 | self.model.await_all() 141 | 142 | def await_any(self): 143 | self.model.await_any() 144 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/semseg_run_ov.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Copyright (C) 2018-2020 Intel Corporation 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); 6 | you may not use this file except in compliance with the License. 7 | You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | """ 17 | 18 | import logging 19 | import sys 20 | 21 | from time import perf_counter 22 | 23 | import cv2 24 | import numpy as np 25 | from openvino import AsyncInferQueue, Core, PartialShape, layout_helpers, get_version, Dimension 26 | from adapters import create_core, OpenvinoAdapter 27 | 28 | 29 | from models_ov.segmentation import SegmentationModel 30 | 31 | from pipelines import get_user_config, AsyncPipeline 32 | 33 | from performance_metrics import PerformanceMetrics 34 | from models import OutputTransform 35 | 36 | 37 | logging.basicConfig(format='[ %(levelname)s ] %(message)s', level=logging.INFO, stream=sys.stdout) 38 | log = logging.getLogger() 39 | 40 | 41 | class SegmentationVisualizer: 42 | pascal_voc_palette = [ 43 | (0, 0, 0), 44 | (128, 0, 0), 45 | (0, 128, 0), 46 | (128, 128, 0), 47 | (0, 0, 128), 48 | (128, 0, 128), 49 | (0, 128, 128), 50 | (128, 128, 128), 51 | (64, 0, 0), 52 | (192, 0, 0), 53 | (64, 128, 0), 54 | (192, 128, 0), 55 | (64, 0, 128), 56 | (192, 0, 128), 57 | (64, 128, 128), 58 | (255, 0, 0), 59 | (0, 64, 0), 60 | (128, 64, 0), 61 | (0, 192, 0), 62 | (128, 192, 0), 63 | (0, 64, 128) 64 | ] 65 | 66 | def __init__(self, colors_path=None): 67 | if colors_path: 68 | self.color_palette = self.get_palette_from_file(colors_path) 69 | else: 70 | self.color_palette = self.pascal_voc_palette 71 | self.color_map = self.create_color_map() 72 | 73 | def get_palette_from_file(self, colors_path): 74 | with open(colors_path, 'r') as file: 75 | colors = [] 76 | for line in file.readlines(): 77 | values = line[line.index('(')+1:line.index(')')].split(',') 78 | colors.append([int(v.strip()) for v in values]) 79 | return colors 80 | 81 | def create_color_map(self): 82 | classes = np.array(self.color_palette, dtype=np.uint8)[:, ::-1] # RGB to BGR 83 | color_map = np.zeros((256, 1, 3), dtype=np.uint8) 84 | classes_num = len(classes) 85 | color_map[:classes_num, 0, :] = classes 86 | color_map[classes_num:, 0, :] = np.random.uniform(0, 255, size=(256-classes_num, 3)) 87 | return color_map 88 | 89 | def apply_color_map(self, input): 90 | input_3d = cv2.merge([input, input, input]) 91 | return cv2.LUT(input_3d, self.color_map) 92 | 93 | 94 | class SaliencyMapVisualizer: 95 | def apply_color_map(self, input): 96 | saliency_map = (input * 255.0).astype(np.uint8) 97 | saliency_map = cv2.merge([saliency_map, saliency_map, saliency_map]) 98 | return saliency_map 99 | 100 | 101 | def render_segmentation(frame, masks, visualiser, only_masks=False): 102 | output = visualiser.apply_color_map(masks) 103 | 104 | return output 105 | 106 | 107 | def run(frame, model_path, device): 108 | plugin_config = get_user_config(device, '', None) 109 | model_adapter = OpenvinoAdapter(create_core(), model_path, device=device, plugin_config=plugin_config,max_num_requests=1, model_parameters={}) 110 | model = SegmentationModel.create_model('segmentation', model_adapter, None) 111 | visualizer = SegmentationVisualizer(None) 112 | #model.log_layers_info() 113 | 114 | #model, visualizer = get_model(ie, model_path) 115 | pipeline = AsyncPipeline(model) 116 | 117 | if pipeline.is_ready(): 118 | start_time = perf_counter() 119 | pipeline.submit_data(frame, 0, {'frame': frame, 'start_time': start_time}) 120 | else: 121 | # Wait for empty request 122 | pipeline.await_any() 123 | 124 | if pipeline.callback_exceptions: 125 | raise pipeline.callback_exceptions[0] 126 | 127 | pipeline.await_all() 128 | # Process all completed requests 129 | results = pipeline.get_result(0) 130 | 131 | while results is None: 132 | results = pipeline.get_result(0) 133 | 134 | if results: 135 | objects, frame_meta = results 136 | frame = frame_meta['frame'] 137 | start_time = frame_meta['start_time'] 138 | frame = render_segmentation(frame, objects, visualizer) 139 | 140 | 141 | return frame 142 | 143 | 144 | 145 | 146 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/superes_run_ov.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2018-2022 Intel Corporation 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | """ 16 | 17 | import logging 18 | import sys 19 | import numpy as np 20 | import openvino as ov 21 | import cv2 22 | import os 23 | 24 | logging.basicConfig(format='[ %(levelname)s ] %(message)s', level=logging.DEBUG, stream=sys.stdout) 25 | log = logging.getLogger() 26 | 27 | def convert_result_to_image(result, model_name) -> np.ndarray: 28 | """ 29 | Convert network result of floating point numbers to image with integer 30 | values from 0-255. Values outside this range are clipped to 0 and 255. 31 | 32 | :param result: a single superresolution network result in N,C,H,W shape 33 | :param model_name: name of the model to determine processing steps 34 | :return: resulting image as np.ndarray 35 | """ 36 | if "edsr" in model_name: 37 | result = result[0].transpose(1, 2, 0) 38 | else: 39 | result = result.squeeze(0).transpose(1, 2, 0) 40 | result *= 255 41 | 42 | result = np.clip(result, 0, 255).astype(np.uint8) 43 | return result 44 | 45 | def run(image, model_path, device, model_name): 46 | """ 47 | Run the superresolution model on the input image. 48 | 49 | :param image: input image as np.ndarray 50 | :param model_path: path to the model file 51 | :param device: device to run the inference on 52 | :param model_name: name of the model to determine processing steps 53 | :return: super resolution image as np.ndarray 54 | """ 55 | try: 56 | if model_name == "edsr": 57 | h, w = image.shape 58 | else: 59 | h, w, _ = image.shape 60 | 61 | core = ov.Core() 62 | 63 | if "esrgan" in model_name and "gpu" not in device.lower(): 64 | core.set_property({'CACHE_DIR': os.path.join(os.path.dirname(model_path), 'cache')}) 65 | 66 | model = core.read_model(model=model_path) 67 | 68 | if "esrgan" in model_name or "edsr" in model_name: 69 | original_image_key = model.inputs.pop() 70 | else: 71 | original_image_key, bicubic_image_key = model.inputs 72 | input_height, _ = list(original_image_key.shape)[2:] 73 | target_height, _ = list(bicubic_image_key.shape)[2:] 74 | upsample_factor = int(target_height / input_height) 75 | 76 | shapes = {} 77 | for input_layer in model.inputs: 78 | layer_name = input_layer.names.pop() 79 | if layer_name in ["0", "input.1", "x.1"]: 80 | shapes[input_layer] = input_layer.partial_shape 81 | shapes[input_layer][2] = h 82 | shapes[input_layer][3] = w 83 | elif layer_name == "1": 84 | shapes[input_layer] = input_layer.partial_shape 85 | shapes[input_layer][2] = upsample_factor * h 86 | shapes[input_layer][3] = upsample_factor * w 87 | 88 | model.reshape(shapes) 89 | compiled_model = core.compile_model(model=model, device_name=device) 90 | 91 | if "esrgan" in model_name or "edsr" in model_name: 92 | original_image_key = compiled_model.inputs.pop() 93 | else: 94 | original_image_key, bicubic_image_key = compiled_model.inputs 95 | 96 | output_key = compiled_model.output(0) 97 | 98 | if "edsr" in model_name: 99 | image = np.expand_dims(image, axis=-1) 100 | 101 | input_image_original = np.expand_dims(image.transpose(2, 0, 1), axis=0) 102 | if "esrgan" in model_name: 103 | input_image_original = input_image_original / 255.0 104 | 105 | inputs = {original_image_key.any_name: input_image_original} 106 | 107 | if not ("esrgan" in model_name or "edsr" in model_name): 108 | bicubic_image = cv2.resize(src=image, dsize=(w * upsample_factor, h * upsample_factor), interpolation=cv2.INTER_CUBIC) 109 | input_image_bicubic = np.expand_dims(bicubic_image.transpose(2, 0, 1), axis=0) 110 | inputs[bicubic_image_key.any_name] = input_image_bicubic 111 | 112 | 113 | result = compiled_model(inputs)[output_key] 114 | result_image = convert_result_to_image(result, model_name) 115 | return result_image 116 | 117 | except Exception as e: 118 | log.error(f"Error during model run: {e}") 119 | return None 120 | 121 | if __name__ == "__main__": 122 | img = cv2.imread(r"D:\git\\GIMP-OV\sampleinput\haze.png") 123 | if img is not None: 124 | result_image = run(img, r"C:\GIMP-OV\weights\superresolution-ov\realesrgan.xml", "NPU", "esrgan") 125 | if result_image is not None: 126 | cv2.imwrite("esrgan_ov.png", result_image) 127 | else: 128 | log.error("Failed to generate the super resolution image.") 129 | else: 130 | log.error("Failed to read the input image.") 131 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/openvino_common/tokens_bert.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2020 Intel Corporation 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | """ 16 | 17 | import unicodedata 18 | import string 19 | 20 | # load vocabulary file for encoding 21 | def load_vocab_file(vocab_file_name): 22 | with open(vocab_file_name, "r", encoding="utf-8") as r: 23 | return {t.rstrip("\n"): i for i, t in enumerate(r.readlines())} 24 | 25 | 26 | # split word by vocab items and get tok codes 27 | # iteratively return codes 28 | def encode_by_voc(w, vocab): 29 | # remove mark and control chars 30 | def clean_word(w): 31 | wo = "" # accumulator for output word 32 | for c in unicodedata.normalize("NFD", w): 33 | c_cat = unicodedata.category(c) 34 | # remove mark nonspacing code and controls 35 | if c_cat != "Mn" and c_cat[0] != "C": 36 | wo += c 37 | return wo 38 | 39 | w = clean_word(w) 40 | 41 | res = [] 42 | for s0, e0 in split_to_words(w): 43 | s, e = s0, e0 44 | tokens = [] 45 | while e > s: 46 | subword = w[s:e] if s == s0 else "##" + w[s:e] 47 | if subword in vocab: 48 | tokens.append(vocab[subword]) 49 | s, e = e, e0 50 | else: 51 | e -= 1 52 | if s < e0: 53 | tokens = [vocab['[UNK]']] 54 | res.extend(tokens) 55 | return res 56 | 57 | #split big text into words by spaces 58 | #iteratively return words 59 | def split_to_words(text): 60 | prev_is_sep = True # mark initial prev as space to start word from 0 char 61 | for i, c in enumerate(text + " "): 62 | is_punc = (c in string.punctuation or unicodedata.category(c)[0] == "P") 63 | cur_is_sep = (c.isspace() or is_punc) 64 | if prev_is_sep != cur_is_sep: 65 | if prev_is_sep: 66 | start = i 67 | else: 68 | yield start, i 69 | del start 70 | if is_punc: 71 | yield i, i+1 72 | prev_is_sep = cur_is_sep 73 | 74 | # get big text and return list of token id and start-end positions for each id in original texts 75 | def text_to_tokens(text, vocab): 76 | tokens_id = [] 77 | tokens_se = [] 78 | for s, e in split_to_words(text): 79 | for tok in encode_by_voc(text[s:e], vocab): 80 | tokens_id.append(tok) 81 | tokens_se.append((s, e)) 82 | 83 | return tokens_id, tokens_se 84 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/semseg_ov.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright(C) 2022-2023 Intel Corporation 3 | # SPDX - License - Identifier: Apache - 2.0 4 | 5 | import json 6 | import os 7 | import sys 8 | 9 | sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), "openvino_common")]) 10 | sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), "..","tools")]) 11 | from gimpopenvino.plugins.openvino_utils.tools.tools_utils import get_weight_path 12 | 13 | 14 | #from semseg_run import run 15 | from semseg_run_ov import run 16 | #import torch 17 | import cv2 18 | import os 19 | import traceback 20 | 21 | 22 | def get_seg(input_image, model_name="deeplabv3", device="CPU", weight_path=None): 23 | if weight_path is None: 24 | weight_path = get_weight_path() 25 | 26 | if model_name == "deeplabv3": 27 | out = run( 28 | input_image, 29 | os.path.join(weight_path, "semseg-ov", "deeplabv3.xml"), 30 | device, 31 | ) 32 | else: 33 | out = run( 34 | input_image, 35 | os.path.join(weight_path, "semseg-ov", "semantic-segmentation-adas-0001.xml"), 36 | device, 37 | ) 38 | 39 | return out 40 | 41 | 42 | if __name__ == "__main__": 43 | weight_path = get_weight_path() 44 | with open(os.path.join(weight_path, "..", "gimp_openvino_run.json"), "r") as file: 45 | data_output = json.load(file) 46 | device = data_output["device_name"] #sys.argv[1] 47 | model_name = data_output["model_name"] #sys.argv[2] 48 | 49 | image = cv2.imread(os.path.join(weight_path, "..", "cache.png"))[:, :, ::-1] 50 | try: 51 | output = get_seg(image, model_name=model_name, device=device, weight_path=weight_path) 52 | cv2.imwrite(os.path.join(weight_path, "..", "cache.png"), output[:, :, ::-1]) 53 | data_output["inference_status"] = "success" 54 | with open(os.path.join(weight_path, "..", "gimp_openvino_run.json"), "w") as file: 55 | json.dump(data_output, file) 56 | 57 | # Remove old temporary error files that were saved 58 | my_dir = os.path.join(weight_path, "..") 59 | for f_name in os.listdir(my_dir): 60 | if f_name.startswith("error_log"): 61 | os.remove(os.path.join(my_dir, f_name)) 62 | #sys.exit(0) 63 | 64 | except Exception as error: 65 | with open(os.path.join(weight_path, "..", "gimp_openvino_run.json"), "w") as file: 66 | json.dump({"inference_status": "failed"}, file) 67 | with open(os.path.join(weight_path, "..", "error_log.txt"), "w") as file: 68 | traceback.print_exception("DEBUG THE ERROR", file=file) 69 | # Uncoment below lines to debug 70 | #e_type, e_val, e_tb = sys.exc_info() 71 | #traceback.print_exception(e_type, e_val, e_tb, file=file) 72 | #sys.exit(1) -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/superresolution_ov.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright(C) 2022-2023 Intel Corporation 3 | # SPDX - License - Identifier: Apache - 2.0 4 | 5 | import json 6 | import os 7 | import sys 8 | import concurrent.futures 9 | 10 | sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), "openvino_common")]) 11 | sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), "..","tools")]) 12 | 13 | 14 | import cv2 15 | from superes_run_ov import run 16 | import torch 17 | from gimpopenvino.plugins.openvino_utils.tools.tools_utils import get_weight_path 18 | import traceback 19 | import numpy as np 20 | 21 | def get_sr(img,s, model_name="sr_1033", weight_path=None,device="CPU"): 22 | if weight_path is None: 23 | weight_path = get_weight_path() 24 | 25 | if model_name == "esrgan": 26 | with concurrent.futures.ThreadPoolExecutor() as executor: 27 | out_thread = executor.submit(run, img, 28 | os.path.join(weight_path, "superresolution-ov", "realesrgan.xml"), 29 | device, 30 | model_name 31 | ) 32 | 33 | out = out_thread.result() 34 | 35 | out = cv2.resize(out, (0, 0), fx=s / 4, fy=s / 4) 36 | elif model_name == "edsr": 37 | b, g, r = cv2.split(np.array(img)) 38 | channel_list = [b, g, r] 39 | output_list = [] 40 | for img_c in channel_list: 41 | output = run(img_c, os.path.join(weight_path, "superresolution-ov", "edsr.xml"), device, model_name) 42 | output_list.append(output) 43 | out = cv2.merge([output_list[0], output_list[1], output_list[2]], 3) 44 | out = cv2.resize(out, (0, 0), fx=s / 2, fy=s / 2) 45 | 46 | else: 47 | out = run(img, os.path.join(weight_path, "superresolution-ov", "single-image-super-resolution-1033.xml"), device, model_name) 48 | out = cv2.resize(out, (0, 0), fx=s / 3, fy=s / 3) 49 | return out 50 | 51 | 52 | if __name__ == "__main__": 53 | weight_path = get_weight_path() 54 | with open(os.path.join(weight_path, "..", "gimp_openvino_run.json"), "r") as file: 55 | data_output = json.load(file) 56 | 57 | device = data_output["device_name"] 58 | s = data_output["scale"] 59 | model_name = data_output["model_name"] 60 | 61 | 62 | image = cv2.imread(os.path.join(weight_path, "..", "cache.png"))[:, :, ::-1] 63 | try: 64 | output = get_sr(image, s, model_name=model_name, weight_path=weight_path, device=device) 65 | cv2.imwrite(os.path.join(weight_path, "..", "cache.png"), output[:, :, ::-1]) 66 | data_output["inference_status"] = "success" 67 | with open(os.path.join(weight_path, "..", "gimp_openvino_run.json"), "w") as file: 68 | json.dump(data_output, file) 69 | # Remove old temporary error files that were saved 70 | my_dir = os.path.join(weight_path, "..") 71 | for f_name in os.listdir(my_dir): 72 | if f_name.startswith("error_log"): 73 | os.remove(os.path.join(my_dir, f_name)) 74 | 75 | except Exception as error: 76 | with open(os.path.join(weight_path, "..", "gimp_openvino_run.json"), "w") as file: 77 | json.dump({"inference_status": "failed"}, file) 78 | with open(os.path.join(weight_path, "..", "error_log.txt"), "w") as file: 79 | traceback.print_exception("DEBUG THE ERROR", file=file) 80 | # Uncoment below lines to debug 81 | #e_type, e_val, e_tb = sys.exc_info() 82 | #traceback.print_exception(e_type, e_val, e_tb, file=file) 83 | 84 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/test_client.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright(C) 2022-2023 Intel Corporation 3 | # SPDX - License - Identifier: Apache - 2.0 4 | 5 | import socket 6 | 7 | HOST = "127.0.0.1" # The server's hostname or IP address 8 | PORT = 65432 # The port used by the server 9 | 10 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: 11 | s.connect((HOST, PORT)) 12 | s.sendall(b"Hello, GIMP") 13 | data = s.recv(1024) 14 | 15 | print(f"Received {data!r}") -------------------------------------------------------------------------------- /gimpopenvino/plugins/openvino_utils/tools/tools_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright(C) 2022-2023 Intel Corporation 3 | # SPDX - License - Identifier: Apache - 2.0 4 | import os 5 | import json 6 | 7 | base_model_dir = ( 8 | os.path.join(os.environ.get("GIMP_OPENVINO_MODELS_PATH")) 9 | if os.environ.get("GIMP_OPENVINO_MODELS_PATH") is not None 10 | else os.path.join(os.path.expanduser("~"), "openvino-ai-plugins-gimp") 11 | ) 12 | 13 | config_path_dir = ( 14 | os.path.join(os.environ.get("GIMP_OPENVINO_CONFIG_PATH")) 15 | if os.environ.get("GIMP_OPENVINO_CONFIG_PATH") is not None 16 | else os.path.join(os.path.dirname(__file__)) 17 | ) 18 | 19 | def get_weight_path(): 20 | config_path = config_path_dir 21 | #data={} 22 | with open(os.path.join(config_path, "gimp_openvino_config.json"), "r") as file: 23 | data = json.load(file) 24 | 25 | weight_path=data["weight_path"] 26 | 27 | return weight_path 28 | 29 | class SDOptionCache: 30 | def __init__(self, config_path): 31 | """ 32 | Initialize the OptionCache with a configuration path and load options. 33 | 34 | Parameters: 35 | config_path_output (dict): Configuration containing the weight path. 36 | """ 37 | self.default_options = dict( 38 | prompt="", 39 | negative_prompt="", 40 | num_images=1, 41 | num_infer_steps=20, 42 | num_infer_steps_turbo=5, 43 | guidance_scale=7.5, 44 | guidance_scale_turbo=0.5, 45 | model_name="", 46 | advanced_setting=False, 47 | power_mode="best power efficiency", 48 | initial_image=None, 49 | strength=0.8, 50 | seed="", 51 | inference_status="success", 52 | src_height=512, 53 | src_width=512, 54 | show_console=True, 55 | ) 56 | self.cache_path = config_path 57 | self.options = self.default_options.copy() 58 | self.load() 59 | 60 | def load(self): 61 | """ 62 | Load options from the JSON file if it exists, or use defaults. 63 | """ 64 | try: 65 | if os.path.exists(self.cache_path): 66 | with open(self.cache_path, "r") as file: 67 | json_data = json.load(file) 68 | self.options.update(json_data) 69 | except (FileNotFoundError, json.JSONDecodeError) as e: 70 | print(f"Error loading {self.cache_path}: {e}. Using default options.") 71 | 72 | def get(self, key, default=None): 73 | """ 74 | Get the value of a specific option. 75 | 76 | Parameters: 77 | key (str): The key to retrieve. 78 | default (any): The default value to return if the key is not found. 79 | 80 | Returns: 81 | The value of the option, or the default value. 82 | """ 83 | return self.options.get(key, default) 84 | 85 | def set(self, key, value): 86 | """ 87 | Set a specific key to a given value in the options. 88 | 89 | Parameters: 90 | key (str): The key to set. 91 | value (any): The value to set for the key. 92 | """ 93 | if key not in self.default_options: 94 | raise KeyError(f"'{key}' is not a valid option key.") 95 | self.options[key] = value 96 | 97 | def update(self, updates): 98 | """ 99 | Update options with a dictionary of key-value pairs. 100 | 101 | Parameters: 102 | updates (dict): A dictionary of key-value pairs to update. 103 | """ 104 | if not isinstance(updates, dict): 105 | raise ValueError("Updates must be a dictionary.") 106 | self.options.update(updates) 107 | 108 | def save(self): 109 | """ 110 | Save the current options to the JSON file. 111 | """ 112 | try: 113 | with open(self.cache_path, "w") as file: 114 | json.dump(self.options, file, indent=4) 115 | #print(f"Options written to {self.cache_path} successfully.") 116 | except IOError as e: 117 | print(f"Error writing to {self.cache_path}: {e}") 118 | 119 | 120 | if __name__ == "__main__": 121 | wgt = get_weight_path() 122 | 123 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/semseg_ov/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gimpopenvino/plugins/semseg_ov/__init__.py -------------------------------------------------------------------------------- /gimpopenvino/plugins/semseg_ov/semseg_ov.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright(C) 2022-2023 Intel Corporation 3 | # SPDX - License - Identifier: Apache - 2.0 4 | 5 | # coding: utf-8 6 | """ 7 | Performs semantic segmentation of the current layer. 8 | """ 9 | import gi 10 | gi.require_version("Gimp", "3.0") 11 | gi.require_version("GimpUi", "3.0") 12 | gi.require_version("Gtk", "3.0") 13 | from gi.repository import Gimp, GimpUi, GObject, GLib, Gio, Gtk 14 | import gettext 15 | import subprocess 16 | #import pickle 17 | import json 18 | import os 19 | import sys 20 | sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), "..","openvino_utils")]) 21 | from plugin_utils import * 22 | from tools.tools_utils import base_model_dir, config_path_dir 23 | 24 | _ = gettext.gettext 25 | image_paths = { 26 | "logo": os.path.join( 27 | os.path.dirname(os.path.realpath(__file__)), "..", "openvino_utils", "images", "plugin_logo.png" 28 | ), 29 | "error": os.path.join( 30 | os.path.dirname(os.path.realpath(__file__)), "..", "openvino_utils", "images", "error_icon.png" 31 | ), 32 | } 33 | 34 | 35 | class StringEnum: 36 | """ 37 | Helper class for when you want to use strings as keys of an enum. The values would be 38 | user facing strings that might undergo translation. 39 | 40 | The constructor accepts an even amount of arguments. Each pair of arguments 41 | is a key/value pair. 42 | """ 43 | 44 | def __init__(self, *args): 45 | self.keys = [] 46 | self.values = [] 47 | 48 | for i in range(len(args) // 2): 49 | self.keys.append(args[i * 2]) 50 | self.values.append(args[i * 2 + 1]) 51 | 52 | def get_tree_model(self): 53 | """Get a tree model that can be used in GTK widgets.""" 54 | tree_model = Gtk.ListStore(GObject.TYPE_STRING, GObject.TYPE_STRING) 55 | for i in range(len(self.keys)): 56 | tree_model.append([self.keys[i], self.values[i]]) 57 | return tree_model 58 | 59 | 60 | class DeviceEnum: 61 | def __init__(self, supported_devices): 62 | self.keys = [] 63 | self.values = [] 64 | for i in supported_devices: 65 | 66 | self.keys.append(i) 67 | self.values.append(i) 68 | 69 | def get_tree_model(self): 70 | """Get a tree model that can be used in GTK widgets.""" 71 | tree_model = Gtk.ListStore(GObject.TYPE_STRING, GObject.TYPE_STRING) 72 | for i in range(len(self.keys)): 73 | tree_model.append([self.keys[i], self.values[i]]) 74 | return tree_model 75 | 76 | model_name_enum = StringEnum( 77 | "deeplabv3", 78 | _("deeplabv3"), 79 | "sseg-adas-0001", 80 | _("sseg-adas-0001"), 81 | ) 82 | 83 | 84 | def semseg(procedure, image, drawable, device_name, model_name, progress_bar, config_path_output): 85 | # Save inference parameters and layers 86 | weight_path = config_path_output["weight_path"] 87 | python_path = config_path_output["python_path"] 88 | plugin_path = config_path_output["plugin_path"] 89 | 90 | Gimp.context_push() 91 | image.undo_group_start() 92 | 93 | save_image(image, drawable, os.path.join(weight_path, "..", "cache.png")) 94 | 95 | with open(os.path.join(weight_path, "..", "gimp_openvino_run.json"), "w") as file: 96 | json.dump({"device_name": device_name,"model_name": model_name, "inference_status": "started"}, file) 97 | 98 | # Run inference and load as layer 99 | if sys.platform == 'win32': 100 | creationflags = subprocess.CREATE_NO_WINDOW 101 | else: 102 | creationflags = 0 # N/A on linux 103 | 104 | subprocess.call([python_path, plugin_path], 105 | creationflags=creationflags, 106 | stdout=subprocess.PIPE, 107 | stderr=subprocess.PIPE, 108 | text=True) 109 | #data_output = subprocess.call([python_path, plugin_path, device_name, model_name]) 110 | with open(os.path.join(weight_path, "..", "gimp_openvino_run.json"), "r") as file: 111 | data_output = json.load(file) 112 | image.undo_group_end() 113 | Gimp.context_pop() 114 | if data_output["inference_status"] == "success": 115 | result = Gimp.file_load( 116 | Gimp.RunMode.NONINTERACTIVE, 117 | Gio.file_new_for_path(os.path.join(weight_path, "..", "cache.png")), 118 | ) 119 | result_layer = result.get_layers()[0] 120 | copy = Gimp.Layer.new_from_drawable(result_layer, image) 121 | copy.set_name("Semantic Segmentation") 122 | copy.set_mode(Gimp.LayerMode.NORMAL_LEGACY) # DIFFERENCE_LEGACY 123 | copy.set_opacity(50) 124 | image.insert_layer(copy, None, -1) 125 | 126 | # Remove temporary layers that were saved 127 | my_dir = os.path.join(weight_path, "..") 128 | for f_name in os.listdir(my_dir): 129 | if f_name.startswith("cache"): 130 | os.remove(os.path.join(my_dir, f_name)) 131 | 132 | return procedure.new_return_values(Gimp.PDBStatusType.SUCCESS, GLib.Error()) 133 | else: 134 | show_dialog( 135 | "Inference not successful. See error_log.txt in GIMP-OpenVINO folder.", 136 | "Error !", 137 | "error", 138 | image_paths 139 | ) 140 | return procedure.new_return_values(Gimp.PDBStatusType.SUCCESS, GLib.Error()) 141 | 142 | 143 | def run(procedure, run_mode, image, layer, config, data): 144 | device_name = config.get_property("device_name") # this is sketchy 145 | model_name = config.get_property("model_name") 146 | 147 | if run_mode == Gimp.RunMode.INTERACTIVE: 148 | # Get all paths 149 | with open(os.path.join(config_path_dir, "gimp_openvino_config.json"), "r") as file: 150 | config_path_output = json.load(file) 151 | 152 | plugin_version = config_path_output["plugin_version"] 153 | 154 | config_path_output["plugin_path"] = os.path.join( 155 | os.path.dirname(os.path.realpath(__file__)), 156 | "..", 157 | "openvino_utils", 158 | "tools", 159 | "semseg_ov.py") 160 | 161 | device_name_enum = DeviceEnum(config_path_output["supported_devices"]) 162 | 163 | config = procedure.create_config() 164 | 165 | GimpUi.init("semseg_ov.py") 166 | use_header_bar = Gtk.Settings.get_default().get_property( 167 | "gtk-dialogs-use-header" 168 | ) 169 | 170 | title_bar_label = "Semantic Segmentation : "+ plugin_version 171 | dialog = GimpUi.Dialog( 172 | use_header_bar=use_header_bar, title=_(title_bar_label) 173 | ) 174 | dialog.add_button("_Cancel", Gtk.ResponseType.CANCEL) 175 | dialog.add_button("_Help", Gtk.ResponseType.APPLY) 176 | dialog.add_button("_Generate", Gtk.ResponseType.OK) 177 | 178 | vbox = Gtk.Box( 179 | orientation=Gtk.Orientation.VERTICAL, homogeneous=False, spacing=10 180 | ) 181 | dialog.get_content_area().add(vbox) 182 | vbox.show() 183 | 184 | # Create grid to set all the properties inside. 185 | grid = Gtk.Grid() 186 | grid.set_column_homogeneous(False) 187 | grid.set_border_width(10) 188 | grid.set_column_spacing(10) 189 | grid.set_row_spacing(10) 190 | vbox.add(grid) 191 | grid.show() 192 | 193 | # Model Name parameter 194 | label = Gtk.Label.new_with_mnemonic(_("_Model Name")) 195 | grid.attach(label, 0, 1, 1, 1) 196 | label.show() 197 | combo = GimpUi.prop_string_combo_box_new( 198 | config, "model_name", model_name_enum.get_tree_model(), 0, 1 199 | ) 200 | grid.attach(combo, 1, 1, 1, 1) 201 | combo.show() 202 | 203 | # Device Name parameter 204 | label = Gtk.Label.new_with_mnemonic(_("_Device Name")) 205 | grid.attach(label, 2, 1, 1, 1) 206 | label.show() 207 | combo = GimpUi.prop_string_combo_box_new( 208 | config, "device_name", device_name_enum.get_tree_model(), 0, 1 209 | ) 210 | grid.attach(combo, 3, 1, 1, 1) 211 | combo.show() 212 | 213 | # Show Logo 214 | logo = Gtk.Image.new_from_file(image_paths["logo"]) 215 | # grid.attach(logo, 0, 0, 1, 1) 216 | vbox.pack_start(logo, False, False, 1) 217 | logo.show() 218 | 219 | progress_bar = Gtk.ProgressBar() 220 | vbox.add(progress_bar) 221 | progress_bar.show() 222 | 223 | # Wait for user to click 224 | dialog.show() 225 | while True: 226 | response = dialog.run() 227 | if response == Gtk.ResponseType.OK: 228 | device_name = config.get_property("device_name") 229 | model_name = config.get_property("model_name") 230 | 231 | result = semseg( 232 | procedure, image, layer, device_name, model_name, progress_bar, config_path_output 233 | ) 234 | return result 235 | elif response == Gtk.ResponseType.APPLY: 236 | url = "https://github.com/intel/openvino-ai-plugins-gimp.git/README.md" 237 | Gio.app_info_launch_default_for_uri(url, None) 238 | continue 239 | else: 240 | dialog.destroy() 241 | return procedure.new_return_values( 242 | Gimp.PDBStatusType.CANCEL, GLib.Error() 243 | ) 244 | 245 | 246 | class SemSeg(Gimp.PlugIn): 247 | ## GimpPlugIn virtual methods ## 248 | def do_query_procedures(self): 249 | return ["semseg-ov"] 250 | 251 | def do_set_i18n(self, procname): 252 | return True, 'gimp30-python', None 253 | 254 | def do_create_procedure(self, name): 255 | procedure = None 256 | if name == "semseg-ov": 257 | procedure = Gimp.ImageProcedure.new(self, name, 258 | Gimp.PDBProcType.PLUGIN, 259 | run, None) 260 | procedure.set_image_types("*") 261 | procedure.set_documentation( 262 | N_("Performs semantic segmentation of the current layer."), 263 | globals()[ 264 | "__doc__" 265 | ], # This includes the docstring, on the top of the file 266 | name, 267 | ) 268 | procedure.set_menu_label(N_("Semantic Segmentation")) 269 | procedure.set_attribution("Arisha Kumar", "OpenVINO-AI-Plugins", "2022") 270 | procedure.add_menu_path("/Layer/OpenVINO-AI-Plugins/") 271 | procedure.add_string_argument("device_name", _("Devce Name"), 272 | "Device Name: 'CPU', 'GPU'", 273 | "CPU", 274 | GObject.ParamFlags.READWRITE) 275 | procedure.add_string_argument("model_name", _("Model Name"), 276 | "Model Name: 'deeplabv3', 'sseg-adas-0001'", 277 | "deeplabv3", 278 | GObject.ParamFlags.READWRITE) 279 | 280 | return procedure 281 | 282 | 283 | Gimp.main(SemSeg.__gtype__, sys.argv) 284 | -------------------------------------------------------------------------------- /gimpopenvino/plugins/stable_diffusion_ov/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gimpopenvino/plugins/stable_diffusion_ov/__init__.py -------------------------------------------------------------------------------- /gimpopenvino/plugins/superresolution_ov/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/gimpopenvino/plugins/superresolution_ov/__init__.py -------------------------------------------------------------------------------- /install.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | set MODEL_SETUP=0 3 | if "%1" NEQ "" ( 4 | if /I "%1"=="-i" ( 5 | set MODEL_SETUP=1 6 | ) else ( 7 | if /I "%1"=="--install_models" ( 8 | set MODEL_SETUP=1 9 | ) else ( 10 | echo Invalid option: %1 11 | echo Use -i or --install_models to run model setup. 12 | exit /b 13 | ) 14 | ) 15 | ) 16 | 17 | REM Get the directory of the currently executing script 18 | set script_dir=%~dp0 19 | 20 | REM Remove the trailing backslash 21 | set script_dir=%script_dir:~0,-1% 22 | 23 | REM Get the current working directory 24 | set current_dir=%cd% 25 | 26 | REM Compare the directories 27 | if /i "%script_dir%"=="%current_dir%" ( 28 | REM If they are the same, move up one directory 29 | cd .. 30 | ) else ( 31 | echo. 32 | ) 33 | 34 | echo **** openvino-ai-plugins-gimp Setup started **** 35 | 36 | REM Create a virtual environment 37 | python -m venv gimpenv3 38 | 39 | call "gimpenv3\Scripts\activate" 40 | 41 | REM Install required packages 42 | pip install wmi 43 | pip install -r "%~dp0\requirements.txt" | find /V "already satisfied" 44 | pip install "%~dp0\." 45 | 46 | REM post install steps: 47 | python -c "from gimpopenvino import install_utils; install_utils.complete_install(repo_weights_dir=r'%script_dir%\weights')" 48 | 49 | echo **** openvino-ai-plugins-gimp Setup Ended **** 50 | call deactivate 51 | rem cls 52 | echo. 53 | REM copy to gimp plugin dir 54 | echo Installing plugin in "%appdata%\GIMP\3.0\plug-ins" 55 | for /d %%d in (openvino_utils semseg_ov stable_diffusion_ov superresolution_ov ) do ( robocopy "gimpenv3\Lib\site-packages\gimpopenvino\plugins\%%d" "%appdata%\GIMP\3.0\plug-ins\%%d" /mir /NFL /NDL /NJH /NJS /nc /ns /np ) 56 | 57 | echo *** openvino-ai-plugins-gimp Installed *** 58 | echo. 59 | 60 | if %MODEL_SETUP% EQU 1 ( 61 | echo **** OpenVINO MODEL SETUP STARTED **** 62 | gimpenv3\Scripts\python.exe "%~dp0\model_setup.py" 63 | ) 64 | 65 | REM return to the directory where we started. 66 | cd %current_dir% 67 | exit /b 68 | -------------------------------------------------------------------------------- /install.ps1: -------------------------------------------------------------------------------- 1 | # Get the directory of the currently executing script 2 | $script_dir = Split-Path -Parent $MyInvocation.MyCommand.Definition 3 | 4 | # Grab all of the argumemts 5 | $arguments = $args -join " " 6 | 7 | # Call install.bat with the passed arguments 8 | $process = Start-Process "cmd.exe" -ArgumentList "/c", "$script_dir\install.bat $arguments" -Wait -NoNewWindow -PassThru 9 | 10 | # Capture the exit code of the batch script 11 | $exitCode = $process.ExitCode 12 | 13 | # Return the exit code 14 | exit $exitCode 15 | -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | MODEL_SETUP=0 3 | 4 | # Check the first argument ($1) 5 | if [[ -n "$1" ]]; then 6 | if [[ "$1" == "-i" || "$1" == "--install_models" ]]; then 7 | MODEL_SETUP=1 8 | else 9 | echo "Invalid option: $1" 10 | echo "Use -i or --install_models to run model setup." 11 | exit 1 12 | fi 13 | fi 14 | 15 | # Get the directory of the currently executing script 16 | script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 17 | 18 | # Get the current working directory 19 | current_dir="$(pwd)" 20 | 21 | # Compare the directories 22 | if [[ "$script_dir" == "$current_dir" ]]; then 23 | # If they are the same, move up one directory 24 | cd .. 25 | fi 26 | 27 | echo "**** openvino-ai-plugins-gimp Setup started ****" 28 | 29 | # Create a virtual environment 30 | python3 -m venv gimpenv3 31 | 32 | # Activate the virtual environment 33 | source gimpenv3/bin/activate 34 | 35 | # Upgrade pip and install required packages 36 | pip3 install -r "$script_dir/requirements.txt" | grep -v "already satisfied" 37 | pip3 install "$script_dir/." 38 | 39 | # post installation steps 40 | python3 -c "from gimpopenvino import install_utils; install_utils.complete_install(repo_weights_dir=r'${script_dir}/weights')" 41 | 42 | echo "**** openvino-ai-plugins-gimp Setup Ended ****" 43 | # Deactivate the virtual environment 44 | deactivate 45 | 46 | # Copy to GIMP plugin dir 47 | echo "Installing plugin in $HOME/.config/GIMP/3.0/plug-ins" 48 | for d in openvino_utils semseg_ov stable_diffusion_ov superresolution_ov; do 49 | mkdir -p "$HOME/.config/GIMP/3.0/plug-ins/$d" 50 | rsync -a gimpenv3/lib/python*/site-packages/gimpopenvino/plugins/"$d" "$HOME/.config/GIMP/3.0/plug-ins/." 51 | done 52 | echo "*** openvino-ai-plugins-gimp Installed ***" 53 | 54 | # If MODEL_SETUP was set, run the model setup 55 | if [[ "$MODEL_SETUP" -eq 1 ]]; then 56 | echo "**** OpenVINO MODEL SETUP STARTED ****" 57 | gimpenv3/bin/python3 "$script_dir/model_setup.py" 58 | fi 59 | 60 | cd $current_dir 61 | 62 | exit 0 63 | -------------------------------------------------------------------------------- /model_setup.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import traceback 4 | import shutil 5 | from pathlib import Path 6 | 7 | from gimpopenvino.plugins.openvino_utils.tools.model_manager import ModelManager 8 | 9 | from gimpopenvino.install_utils import base_model_dir 10 | 11 | def main(): 12 | try: 13 | weight_path = os.path.join(base_model_dir,"weights") 14 | 15 | #Move to a temporary working directory in a known place. 16 | # This is where we'll be downloading stuff to, etc. 17 | tmp_working_dir=os.path.join(weight_path, '..', 'mms_tmp') 18 | 19 | #if this dir doesn't exist, create it. 20 | if not os.path.isdir(tmp_working_dir): 21 | os.mkdir(tmp_working_dir) 22 | 23 | # go there. 24 | os.chdir(tmp_working_dir) 25 | 26 | model_manager = ModelManager(weight_path) 27 | 28 | # we want to display progress bars in the running terminal. 29 | model_manager.show_hf_download_tqdm = True 30 | 31 | while True: 32 | installed_models, installable_model_details = model_manager.get_all_model_details() 33 | 34 | user_choice_to_model_id = {} 35 | for i in range(0, len(installable_model_details)): 36 | install_details = installable_model_details[i] 37 | user_choice_to_model_id[str(i + 1)] = install_details 38 | 39 | print("=========Choose Stable Diffusion models to download=========") 40 | for user_choice_val, install_details in user_choice_to_model_id.items(): 41 | install_detail_full_name = install_details["name"] 42 | install_status = install_details["install_status"] 43 | if install_status == "installed": 44 | install_status = "(Installed)" 45 | else: 46 | install_status = "" 47 | print(f"{user_choice_val} - {install_detail_full_name} {install_status}") 48 | 49 | print("0 - Exit Stable Diffusion Model setup") 50 | choice = input("Enter the number for the model you want to download.\nSpecify multiple options using spaces: ") 51 | 52 | choices = choice.split(" ") 53 | 54 | for ch in choices: 55 | if ch == "0": 56 | print("Exiting Model setup...") 57 | return 58 | 59 | if ch in user_choice_to_model_id: 60 | install_details = user_choice_to_model_id[ch] 61 | model_manager.install_model(install_details["id"]) 62 | else: 63 | print(f"Invalid choice: {ch.strip()}") 64 | except Exception as e: 65 | traceback.print_exc() 66 | return 67 | 68 | if __name__ == "__main__": 69 | main() 70 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate==0.33.0 2 | aiohappyeyeballs==2.6.1 3 | aiohttp==3.11.18 4 | aiosignal==1.3.2 5 | altair==5.5.0 6 | attrs==25.3.0 7 | beautifulsoup4==4.13.4 8 | blinker==1.9.0 9 | cachetools==5.5.2 10 | certifi==2025.4.26 11 | charset-normalizer==3.4.2 12 | click==8.2.0 13 | colorama==0.4.6 14 | contourpy==1.3.2 15 | controlnet-aux==0.0.9 16 | cycler==0.12.1 17 | datasets==3.6.0 18 | diffusers==0.33.1 19 | dill==0.3.8 20 | einops==0.8.0 21 | filelock==3.18.0 22 | fire==0.6.0 23 | fonttools==4.58.0 24 | frozenlist==1.6.0 25 | fsspec==2025.3.0 26 | ftfy==6.2.3 27 | gdown==5.2.0 28 | gitdb==4.0.12 29 | GitPython==3.1.44 30 | huggingface-hub==0.31.4 31 | idna==3.10 32 | imageio==2.37.0 33 | importlib_metadata==8.7.0 34 | intel-openmp==2021.4.0 35 | Jinja2==3.1.6 36 | jsonschema==4.23.0 37 | jsonschema-specifications==2025.4.1 38 | kiwisolver==1.4.8 39 | lazy_loader==0.4 40 | markdown-it-py==3.0.0 41 | MarkupSafe==3.0.2 42 | matplotlib==3.10.3 43 | mdurl==0.1.2 44 | mkl==2021.4.0 45 | mpmath==1.3.0 46 | multidict==6.4.4 47 | multiprocess==0.70.16 48 | narwhals==1.40.0 49 | networkx==3.4.2 50 | numpy==1.26.4 51 | opencv-python==4.11.0.86 52 | opencv-python-headless==4.11.0.86 53 | openvino==2025.1.0 54 | openvino-genai==2025.1.0.0 55 | openvino-telemetry==2025.1.0 56 | openvino-tokenizers==2025.1.0.0 57 | optimum==1.25.3 58 | optimum-intel @ git+https://github.com/huggingface/optimum-intel.git@dba7dced0145b539bb0563e5d5741d00daeb8025 59 | packaging==24.2 60 | pandas==2.2.3 61 | peft==0.12.0 62 | pillow==10.4.0 63 | propcache==0.3.1 64 | protobuf==5.27.3 65 | psutil==7.0.0 66 | pyarrow==20.0.0 67 | pydeck==0.9.1 68 | Pygments==2.19.1 69 | pyparsing==3.2.3 70 | PySocks==1.7.1 71 | python-dateutil==2.9.0.post0 72 | pytz==2025.2 73 | PyYAML==6.0.2 74 | referencing==0.36.2 75 | regex==2024.11.6 76 | requests==2.32.3 77 | rich==13.9.4 78 | rpds-py==0.25.0 79 | safetensors==0.4.4 80 | scikit-image==0.25.2 81 | scipy==1.15.3 82 | sentencepiece==0.2.0 83 | setuptools==80.7.1 84 | six==1.17.0 85 | smmap==5.0.2 86 | soupsieve==2.7 87 | streamlit==1.37.1 88 | sympy==1.14.0 89 | tbb==2021.13.1 90 | tenacity==8.5.0 91 | termcolor==3.1.0 92 | tifffile==2025.5.10 93 | timm==0.4.5 94 | tokenizers==0.21.1 95 | toml==0.10.2 96 | torch==2.7.0 97 | torchvision==0.22.0 98 | tornado==6.5 99 | tqdm==4.66.5 100 | transformers==4.51.3 101 | typing_extensions==4.13.2 102 | tzdata==2025.2 103 | urllib3==2.4.0 104 | watchdog==4.0.2 105 | wcwidth==0.2.13 106 | xxhash==3.5.0 107 | yarl==1.20.0 108 | zipp==3.21.0 109 | 110 | -------------------------------------------------------------------------------- /sampleinput/car.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/sampleinput/car.jpg -------------------------------------------------------------------------------- /sampleinput/haze.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/sampleinput/haze.png -------------------------------------------------------------------------------- /sampleinput/img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/sampleinput/img.png -------------------------------------------------------------------------------- /screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/screenshot.png -------------------------------------------------------------------------------- /security.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | Intel is committed to rapidly addressing security vulnerabilities affecting our customers and providing clear guidance on the solution, impact, severity and mitigation. 3 | 4 | ## Reporting a Vulnerability 5 | Please report any security vulnerabilities in this project utilizing the guidelines [here](https://www.intel.com/content/www/us/en/security-center/vulnerability-handling-guidelines.html). 6 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | import os 3 | from pathlib import Path 4 | import re 5 | import subprocess 6 | 7 | this_dir = Path(__file__).resolve().parent 8 | weights_dir = this_dir.joinpath("weights") 9 | readme = this_dir.joinpath("README.md") 10 | 11 | with open(readme, "r", encoding="utf8") as fh: 12 | long_description = fh.read() 13 | 14 | 15 | def get_plugin_version(file_dir=None): 16 | """ 17 | Retrieves the plugin version via git tags if available, ensuring 18 | the command is run from the directory where this Python file resides. 19 | 20 | Returns: 21 | str: Plugin version from git or "0.0.0dev0" if git is unavailable. 22 | 23 | Why use git describe for this? Because generates a human-readable string to 24 | identify a particular commit in a Git repository, using the closest (most recent) 25 | annotated tag reachable from that commit. Typically, it looks like: 26 | [--g] 27 | 28 | For example, if your commit is exactly tagged 1.0.0, running 29 | git describe might simply return 1.0.0. If there have been 10 30 | commits since the v1.0.0 tag, git describe might return something like: 31 | 1.0.0-10-g3ab12ef 32 | where: 33 | 34 | 1.0.0 is the closest tag in the commit history. 35 | 10 is how many commits you are ahead of that tag. 36 | g3ab12ef is the abbreviated hash of the current commit. 37 | 38 | we can then turn this into a PEP440 compliant string 39 | """ 40 | try: 41 | raw_version = subprocess.check_output( 42 | ["git", "describe", "--tags"], 43 | cwd=file_dir, 44 | encoding="utf-8" 45 | ).strip() 46 | 47 | # Normalize the git version to PEP 440 48 | match = re.match(r"v?(\d+\.\d+\.\d+)(?:-(\d+)-g[0-9a-f]+)?", raw_version) 49 | 50 | if match: 51 | version, dev_count = match.groups() 52 | if dev_count: 53 | return f"{version}.dev{dev_count}" # PEP 440 dev version 54 | return version 55 | else: 56 | raise ValueError(f"Invalid version format: {raw_version}") 57 | except Exception as e: 58 | print(f"Error obtaining version: {e}") 59 | return "0.0.0" # Fallback version 60 | 61 | plugin_version = get_plugin_version(this_dir) 62 | 63 | setup( 64 | name="gimpopenvino", # Required 65 | version=plugin_version, # Required 66 | description="OpenVINO™ AI Plugins for GIMP", # Optional 67 | long_description=long_description, # Optional 68 | long_description_content_type="text/markdown", # Optional (see note above) 69 | url="http://github.com/intel/openvino-ai-plugins-gimp", # Optional 70 | author="Arisha Kumar", # Optional 71 | author_email="", # Optional 72 | 73 | classifiers=[ 74 | "Intended Audience :: Developers", 75 | "Topic :: Software Development :: Build Tools", 76 | "Programming Language :: Python :: 3.7", # Specify supported versions 77 | "Programming Language :: Python :: 3.8", 78 | "Programming Language :: Python :: 3.9", 79 | "Programming Language :: Python :: 3.10", 80 | "Programming Language :: Python :: 3.11", 81 | "Programming Language :: Python :: 3.12", 82 | "License :: OSI Approved :: MIT License", 83 | ], 84 | keywords="openvino gimp ai plugins", 85 | packages=find_packages(), 86 | python_requires=">=3.7", # Update Python requirement 87 | include_package_data=True, 88 | install_requires=[ 89 | "numpy", 90 | "scipy", 91 | "gdown", 92 | "requests", 93 | "opencv-python>=4.8.1.78", 94 | "scikit-image", 95 | "timm==0.4.5", 96 | "transformers>=4.37.0", 97 | "diffusers", 98 | "controlnet-aux>=0.0.6", 99 | "openvino", 100 | "psutil", 101 | "matplotlib" 102 | ], 103 | ) 104 | 105 | -------------------------------------------------------------------------------- /testscases/StableDiffusion/README.md: -------------------------------------------------------------------------------- 1 | # Running Stable Diffusion without Installing GIMP 2 | 3 | For these instructions, we assume that you have cloned this repo inside `%userprofile%\GIMP` directory and that the virtual environment for GIMP is in `%userprofile%\GIMP\gimpenv3`. 4 | ## Install and Model Setup 5 | 6 | The following commands can be run in a new command window. 7 | 1. Navigate to %userprofile%\GIMP 8 | ``` 9 | cd %userprofile%\GIMP 10 | ``` 11 | 12 | 2. Run install.bat from %userprofile%\GIMP directory. 13 | 14 | ``` 15 | openvino-ai-plugins-gimp\install.bat --install_models 16 | ``` 17 | 3. As install.bat completes, you will be asked to download the models with a few prompts. There are now two options: 18 | 19 | Download the models for the first time, follow A. 20 | 21 | Recompile already downloaded models, follow B. 22 | 23 | A. Downloading for the first time
24 | Download "1", Stable Diffusion 1.5 Square as an instance.
25 | 26 | ![](../../Docs/figs/standalone2.png) 27 | 28 | B. Already installed but needs recompiling.
29 | If you have already downloaded the models, it will show as Installed. If you need to recompile just choose it again
30 | 31 | ![](../../Docs/figs/standalone1.png) 32 | 33 | 4. Activate the environment. 34 | ``` 35 | gimpenv3\Scripts\activate 36 | ``` 37 | ## Running the Stable Diffusion Commandline 38 | 39 | These commands should be run in the same window where you have activated the environment.
40 | ### Basic Options 41 | Run the python stable diffusion engine with the -h option to see the possible options. 42 | ``` 43 | python openvino-ai-plugins-gimp\testscases\StableDiffusion\stable_diffusion_engine_tc.py -h 44 | ``` 45 | You should see the following output: 46 | ![image](https://github.com/intel-sandbox/GIMP-ML-OV/assets/22227580/3ebb70d7-9e01-4494-8c55-89fa8f2c27d5) 47 | 48 | ### Example 1: Defaults 49 | Default options for INT8 on NPU 50 | ``` 51 | python openvino-ai-plugins-gimp\testscases\StableDiffusion\stable_diffusion_engine_tc.py -m sd_1.5_square_int8 -pm "best power efficiency" 52 | ``` 53 | ![image](https://github.com/intel-sandbox/GIMP-ML-OV/assets/22227580/72f730e5-775a-43d5-b197-4aa5cc235531) 54 | 55 | In the output above, you should see the iterations per second. This is an instantaenous measurement, and not an average. It gives you a general idea of the speed of Unet, but the more important number is the Image generation time (red arrow at the bottom). 56 | 57 | ### Example 2: Overriding the devlces 58 | Command line options allow you to over ride the devices in config.json without having to modify the config. For example, moving the VAE to GPU in the power efficiency mode: 59 | ``` 60 | python openvino-ai-plugins-gimp\testscases\StableDiffusion\stable_diffusion_engine_tc.py -m sd_1.5_square_int8 -pm "best power efficiency" -vd GPU 61 | ``` 62 | ![image](https://github.com/intel-sandbox/GIMP-ML-OV/assets/22227580/b053f7e7-9403-4c5e-84ff-6a1aa55f1c22) 63 | 64 | ### Example 3: Generating multiple images. 65 | In general, the first inference is slower. The recommendation is to generate multiple images in a row. The script will output the average image generation time. 66 | ``` 67 | python openvino-ai-plugins-gimp\testscases\StableDiffusion\stable_diffusion_engine_tc.py -m sd_1.5_square_int8 -pm "best power efficiency" -vd GPU -n 5 68 | ``` 69 | ![image](https://github.com/intel-sandbox/GIMP-ML-OV/assets/22227580/a1128323-c327-4c03-9f10-49676fbda811) 70 | 71 | ### Example 4: Saving images to check accuracy 72 | Use the -si option to save images in the directory that the script is run from. 73 | ``` 74 | python openvino-ai-plugins-gimp\testscases\StableDiffusion\stable_diffusion_engine_tc.py -m sd_1.5_square_int8 -pm "best power efficiency" -vd GPU -n 5 -si 75 | ``` 76 | ![image](https://github.com/intel-sandbox/GIMP-ML-OV/assets/22227580/6ce2140c-2fff-4364-825f-816672acd14e) 77 | 78 | 79 | -------------------------------------------------------------------------------- /testscases/output/interpolateframes/img0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/interpolateframes/img0.png -------------------------------------------------------------------------------- /testscases/output/interpolateframes/img1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/interpolateframes/img1.png -------------------------------------------------------------------------------- /testscases/output/interpolateframes/img10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/interpolateframes/img10.png -------------------------------------------------------------------------------- /testscases/output/interpolateframes/img11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/interpolateframes/img11.png -------------------------------------------------------------------------------- /testscases/output/interpolateframes/img12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/interpolateframes/img12.png -------------------------------------------------------------------------------- /testscases/output/interpolateframes/img13.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/interpolateframes/img13.png -------------------------------------------------------------------------------- /testscases/output/interpolateframes/img14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/interpolateframes/img14.png -------------------------------------------------------------------------------- /testscases/output/interpolateframes/img15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/interpolateframes/img15.png -------------------------------------------------------------------------------- /testscases/output/interpolateframes/img16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/interpolateframes/img16.png -------------------------------------------------------------------------------- /testscases/output/interpolateframes/img2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/interpolateframes/img2.png -------------------------------------------------------------------------------- /testscases/output/interpolateframes/img3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/interpolateframes/img3.png -------------------------------------------------------------------------------- /testscases/output/interpolateframes/img4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/interpolateframes/img4.png -------------------------------------------------------------------------------- /testscases/output/interpolateframes/img5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/interpolateframes/img5.png -------------------------------------------------------------------------------- /testscases/output/interpolateframes/img6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/interpolateframes/img6.png -------------------------------------------------------------------------------- /testscases/output/interpolateframes/img7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/interpolateframes/img7.png -------------------------------------------------------------------------------- /testscases/output/interpolateframes/img8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/interpolateframes/img8.png -------------------------------------------------------------------------------- /testscases/output/interpolateframes/img9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/interpolateframes/img9.png -------------------------------------------------------------------------------- /testscases/output/tmp-canny.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/tmp-canny.jpg -------------------------------------------------------------------------------- /testscases/output/tmp-deblur.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/tmp-deblur.jpg -------------------------------------------------------------------------------- /testscases/output/tmp-deepcolor.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/tmp-deepcolor.jpg -------------------------------------------------------------------------------- /testscases/output/tmp-dehaze.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/tmp-dehaze.jpg -------------------------------------------------------------------------------- /testscases/output/tmp-denoise.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/tmp-denoise.jpg -------------------------------------------------------------------------------- /testscases/output/tmp-depth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/tmp-depth.png -------------------------------------------------------------------------------- /testscases/output/tmp-edge.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/tmp-edge.jpg -------------------------------------------------------------------------------- /testscases/output/tmp-enlighten.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/tmp-enlighten.jpg -------------------------------------------------------------------------------- /testscases/output/tmp-inpaint.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/tmp-inpaint.png -------------------------------------------------------------------------------- /testscases/output/tmp-kmeans.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/tmp-kmeans.jpg -------------------------------------------------------------------------------- /testscases/output/tmp-matting.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/tmp-matting.png -------------------------------------------------------------------------------- /testscases/output/tmp-object-detect.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/tmp-object-detect.png -------------------------------------------------------------------------------- /testscases/output/tmp-parseface.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/tmp-parseface.png -------------------------------------------------------------------------------- /testscases/output/tmp-semseg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/tmp-semseg.png -------------------------------------------------------------------------------- /testscases/output/tmp-super.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/output/tmp-super.png -------------------------------------------------------------------------------- /testscases/sampleinput/alpha.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/alpha.png -------------------------------------------------------------------------------- /testscases/sampleinput/blur.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/blur.jpg -------------------------------------------------------------------------------- /testscases/sampleinput/car.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/car.jpg -------------------------------------------------------------------------------- /testscases/sampleinput/chocolates.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/chocolates.jpg -------------------------------------------------------------------------------- /testscases/sampleinput/examples/celeba/images/celeba_01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/celeba/images/celeba_01.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/celeba/images/celeba_02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/celeba/images/celeba_02.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/celeba/images/celeba_03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/celeba/images/celeba_03.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/celeba/images/celeba_04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/celeba/images/celeba_04.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/celeba/images/celeba_05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/celeba/images/celeba_05.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/celeba/masks/celeba_01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/celeba/masks/celeba_01.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/celeba/masks/celeba_02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/celeba/masks/celeba_02.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/celeba/masks/celeba_03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/celeba/masks/celeba_03.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/celeba/masks/celeba_04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/celeba/masks/celeba_04.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/celeba/masks/celeba_05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/celeba/masks/celeba_05.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/places2/images/places2_01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/places2/images/places2_01.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/places2/images/places2_02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/places2/images/places2_02.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/places2/images/places2_03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/places2/images/places2_03.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/places2/images/places2_04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/places2/images/places2_04.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/places2/images/places2_05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/places2/images/places2_05.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/places2/images/places2_06.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/places2/images/places2_06.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/places2/masks/places2_01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/places2/masks/places2_01.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/places2/masks/places2_02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/places2/masks/places2_02.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/places2/masks/places2_03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/places2/masks/places2_03.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/places2/masks/places2_04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/places2/masks/places2_04.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/places2/masks/places2_05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/places2/masks/places2_05.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/places2/masks/places2_06.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/places2/masks/places2_06.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/psv/images/psv_01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/psv/images/psv_01.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/psv/images/psv_02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/psv/images/psv_02.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/psv/images/psv_03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/psv/images/psv_03.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/psv/images/psv_04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/psv/images/psv_04.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/psv/images/psv_05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/psv/images/psv_05.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/psv/masks/psv_01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/psv/masks/psv_01.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/psv/masks/psv_02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/psv/masks/psv_02.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/psv/masks/psv_03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/psv/masks/psv_03.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/psv/masks/psv_04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/psv/masks/psv_04.png -------------------------------------------------------------------------------- /testscases/sampleinput/examples/psv/masks/psv_05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/examples/psv/masks/psv_05.png -------------------------------------------------------------------------------- /testscases/sampleinput/face.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/face.png -------------------------------------------------------------------------------- /testscases/sampleinput/haze.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/haze.png -------------------------------------------------------------------------------- /testscases/sampleinput/im1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/im1.png -------------------------------------------------------------------------------- /testscases/sampleinput/im2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/im2.png -------------------------------------------------------------------------------- /testscases/sampleinput/img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/img.png -------------------------------------------------------------------------------- /testscases/sampleinput/portrait.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/testscases/sampleinput/portrait.jpg -------------------------------------------------------------------------------- /weights/semseg-ov/deeplabv3.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/weights/semseg-ov/deeplabv3.bin -------------------------------------------------------------------------------- /weights/semseg-ov/semantic-segmentation-adas-0001.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/weights/semseg-ov/semantic-segmentation-adas-0001.bin -------------------------------------------------------------------------------- /weights/superresolution-ov/realesrgan.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/weights/superresolution-ov/realesrgan.bin -------------------------------------------------------------------------------- /weights/superresolution-ov/single-image-super-resolution-1032.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/weights/superresolution-ov/single-image-super-resolution-1032.bin -------------------------------------------------------------------------------- /weights/superresolution-ov/single-image-super-resolution-1033.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/intel/openvino-ai-plugins-gimp/a0369b4d7100eef1b0a9e8d4d0c6388bbd4bc25f/weights/superresolution-ov/single-image-super-resolution-1033.bin --------------------------------------------------------------------------------