├── .devcontainer
├── Dockerfile
├── devcontainer.env
├── devcontainer.json
└── postCreateCommand.sh
├── .dockerignore
├── .editorconfig
├── .gitattributes
├── .github
└── PULL_REQUEST_TEMPLATE.md
├── .gitignore
├── .gitmodules
├── .vscode
└── launch.json
├── LICENSE
├── README.md
├── app.py
├── demo_resources
└── images
│ └── llava_interactive_logo.png
├── lama_predict.py
├── lama_server.py
├── llava_interactive.py
├── ngrok.yml
├── pyproject.toml
├── requirements.txt
├── run_demo.sh
├── setup.sh
└── tox.ini
/.devcontainer/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mcr.microsoft.com/devcontainers/base:ubuntu-20.04
2 |
3 | SHELL [ "bash", "-c" ]
4 |
5 | # update apt and install packages
6 | RUN apt update && \
7 | apt install -yq \
8 | ffmpeg \
9 | dkms \
10 | build-essential
11 |
12 | # add user tools
13 | RUN sudo apt install -yq \
14 | jq \
15 | jp \
16 | tree \
17 | tldr
18 |
19 | # add git-lfs and install
20 | RUN curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | sudo bash && \
21 | sudo apt-get install -yq git-lfs && \
22 | git lfs install
23 |
24 | ############################################
25 | # Setup user
26 | ############################################
27 |
28 | USER vscode
29 |
30 | # install azcopy, a tool to copy to/from blob storage
31 | # for more info: https://learn.microsoft.com/en-us/azure/storage/common/storage-use-azcopy-blobs-upload#upload-a-file
32 | RUN cd /tmp && \
33 | wget https://azcopyvnext.azureedge.net/release20230123/azcopy_linux_amd64_10.17.0.tar.gz && \
34 | tar xvf azcopy_linux_amd64_10.17.0.tar.gz && \
35 | mkdir -p ~/.local/bin && \
36 | mv azcopy_linux_amd64_10.17.0/azcopy ~/.local/bin && \
37 | chmod +x ~/.local/bin/azcopy && \
38 | rm -rf azcopy_linux_amd64*
39 |
40 | # Setup conda
41 | RUN cd /tmp && \
42 | wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \
43 | bash ./Miniconda3-latest-Linux-x86_64.sh -b && \
44 | rm ./Miniconda3-latest-Linux-x86_64.sh
45 |
46 | # Install dotnet
47 | RUN cd /tmp && \
48 | wget https://dot.net/v1/dotnet-install.sh && \
49 | chmod +x dotnet-install.sh && \
50 | ./dotnet-install.sh --channel 7.0 && \
51 | ./dotnet-install.sh --channel 3.1 && \
52 | rm ./dotnet-install.sh
53 |
54 | # Install ngrok
55 | RUN curl -s https://ngrok-agent.s3.amazonaws.com/ngrok.asc | \
56 | sudo tee /etc/apt/trusted.gpg.d/ngrok.asc >/dev/null && \
57 | echo "deb https://ngrok-agent.s3.amazonaws.com buster main" | \
58 | sudo tee /etc/apt/sources.list.d/ngrok.list && \
59 | sudo apt update && sudo apt install ngrok
60 |
--------------------------------------------------------------------------------
/.devcontainer/devcontainer.env:
--------------------------------------------------------------------------------
1 | # Efficient AI
2 | EFFICIENT_AI_SUBSCRIPTION_NAME="Efficient AI"
3 | EFFICIENT_AI_SUBSCRIPTION_ID=332431bf-68bf-46f9-ab8b-c7bfe2197219
4 |
5 | RESOURCE_GROUP_NAME=llava-interative
6 | CONTENT_MODERATOR_NAME=llava-int-contentmoderator
7 | CONTENT_SAFETY_NAME=llava-int-contentsafety
8 |
9 | KEYVAULT_RESOURCE_NAME=llava-interactive-kv
10 | GUARDLIST_SECRET_NAME=guardlist-key
11 | NGROK_SECRET_NAME=ngrok-authtoken-dlg
12 |
13 | PYTHONNET_RUNTIME=coreclr
14 |
--------------------------------------------------------------------------------
/.devcontainer/devcontainer.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLaVA - Interactive Demo",
3 | "build": {
4 | "dockerfile": "Dockerfile",
5 | "context": "..",
6 | "args": {}
7 | },
8 | "features": {
9 | "ghcr.io/devcontainers/features/docker-in-docker:2": {},
10 | "ghcr.io/devcontainers/features/azure-cli:1": {},
11 | "ghcr.io/azure/azure-dev/azd:0": {},
12 | "ghcr.io/devcontainers/features/powershell:1": {},
13 | "ghcr.io/devcontainers/features/common-utils:2": {},
14 | "ghcr.io/devcontainers-contrib/features/zsh-plugins:0": {
15 | // "omzPlugins": "https://github.com/zsh-users/zsh-autosuggestions",
16 | },
17 | },
18 | // "forwardPorts": [],
19 | "postCreateCommand": "bash ./.devcontainer/postCreateCommand.sh",
20 | "customizations": {
21 | "vscode": {
22 | "settings": {
23 | "python.analysis.autoImportCompletions": true,
24 | "python.analysis.autoImportUserSymbols": true,
25 | "python.defaultInterpreterPath": "~/miniconda3/envs/llava_int/bin/python",
26 | "python.formatting.provider": "black",
27 | "python.linting.enabled": true,
28 | "python.linting.flake8Enabled": true,
29 | "isort.check": true,
30 | "dev.containers.copyGitConfig": true,
31 | "terminal.integrated.defaultProfile.linux": "zsh",
32 | "terminal.integrated.profiles.linux": {
33 | "zsh": {
34 | "path": "/usr/bin/zsh"
35 | },
36 | },
37 | "[python]": {
38 | }
39 | },
40 | "extensions": [
41 | "aaron-bond.better-comments",
42 | "eamodio.gitlens",
43 | "EditorConfig.EditorConfig",
44 | "foxundermoon.shell-format",
45 | "GitHub.copilot-chat",
46 | "GitHub.copilot",
47 | "lehoanganh298.json-lines-viewer",
48 | "mhutchie.git-graph",
49 | "ms-azuretools.vscode-docker",
50 | "ms-dotnettools.dotnet-interactive-vscode",
51 | "ms-python.black-formatter",
52 | "ms-python.flake8",
53 | "ms-python.isort",
54 | "ms-python.python",
55 | "ms-python.vscode-pylance",
56 | "njpwerner.autodocstring",
57 | "redhat.vscode-yaml",
58 | "stkb.rewrap",
59 | "yzhang.markdown-all-in-one",
60 | ]
61 | }
62 | },
63 | "mounts": [],
64 | "runArgs": [
65 | "--gpus",
66 | "all",
67 | // "--ipc",
68 | // "host",
69 | "--ulimit",
70 | "memlock=-1",
71 | "--env-file",
72 | ".devcontainer/devcontainer.env"
73 | ],
74 | }
75 |
--------------------------------------------------------------------------------
/.devcontainer/postCreateCommand.sh:
--------------------------------------------------------------------------------
1 | git config --global safe.directory '*'
2 | git config --global core.editor "code --wait"
3 | git config --global pager.branch false
4 |
5 | # Set AZCOPY concurrency to auto
6 | echo "export AZCOPY_CONCURRENCY_VALUE=AUTO" >> ~/.zshrc
7 | echo "export AZCOPY_CONCURRENCY_VALUE=AUTO" >> ~/.bashrc
8 |
9 | # Add dotnet to PATH
10 | echo 'export PATH="$PATH:$HOME/.dotnet"' >> ~/.zshrc
11 | echo 'export PATH="$PATH:$HOME/.dotnet"' >> ~/.bashrc
12 |
13 | # Activate conda by default
14 | echo "source /home/vscode/miniconda3/bin/activate" >> ~/.zshrc
15 | echo "source /home/vscode/miniconda3/bin/activate" >> ~/.bashrc
16 |
17 | # Use llava_int environment by default
18 | echo "conda activate llava_int" >> ~/.zshrc
19 | echo "conda activate llava_int" >> ~/.bashrc
20 |
21 | # Activate conda on current shell
22 | source /home/vscode/miniconda3/bin/activate
23 |
24 | # Create and activate llava_int environment
25 | conda create -n llava_int -c conda-forge -c pytorch python=3.10.8 pytorch=2.0.1 -y
26 | conda activate llava_int
27 |
28 | # Install Nvidia Cuda Compiler
29 | conda install -y -c nvidia cuda-compiler
30 |
31 | pip install -r requirements.txt
32 |
33 | source setup.sh
34 |
35 | echo "postCreateCommand.sh COMPLETE!"
36 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | LLaVA/llava-v1.5-13b/*
2 | *.pt
3 |
--------------------------------------------------------------------------------
/.editorconfig:
--------------------------------------------------------------------------------
1 | root = true
2 |
3 | # Unix-style newlines with a newline ending every file
4 | [*]
5 | end_of_line = lf
6 | insert_final_newline = true
7 | trim_trailing_whitespace = true
8 | charset = utf-8
9 |
10 | # 4 space indentation
11 | [*.{py,json}]
12 | indent_style = space
13 | indent_size = 4
14 |
15 | # 2 space indentation
16 | [*.{md,sh,yaml,yml}]
17 | indent_style = space
18 | indent_size = 2
19 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # https://git-scm.com/docs/gitattributes
2 |
3 | # Set the default behavior, in case people don't have core.autocrlf set.
4 | # https://git-scm.com/docs/gitattributes#_end_of_line_conversion
5 | * text=auto
6 |
7 | # common python attributes, taken from https://github.com/alexkaratarakis/gitattributes/blob/710900479a2bedeec7003d381719521ffbb18bf8/Python.gitattributes
8 | # Source files
9 | # ============
10 | *.pxd text diff=python
11 | *.py text diff=python
12 | *.py3 text diff=python
13 | *.pyw text diff=python
14 | *.pyx text diff=python
15 | *.pyz text diff=python
16 | *.pyi text diff=python
17 |
18 | # Binary files
19 | # ============
20 | *.db binary
21 | *.p binary
22 | *.pkl binary
23 | *.pickle binary
24 | *.pyc binary export-ignore
25 | *.pyo binary export-ignore
26 | *.pyd binary
27 |
28 | # Jupyter notebook
29 | *.ipynb text eol=lf
30 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | # Issue
2 |
3 |
4 |
5 | # Solution
6 |
7 |
8 |
9 | ## Video or Screenshots
10 |
11 |
12 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Python
2 | __pycache__
3 | *.pyc
4 | *.egg-info
5 | dist
6 |
7 | # Log
8 | *.log
9 | *.log.*
10 | *.json
11 | *.jsonl
12 |
13 | # Data
14 | !**/alpaca-data-conversation.json
15 | *.png
16 | *.jpg
17 |
18 | # Editor
19 | .idea
20 | *.swp
21 |
22 | # Other
23 | .DS_Store
24 | wandb
25 | output
26 |
27 | checkpoints
28 | ckpts*
29 | *.pt
30 |
31 | .ipynb_checkpoints
32 | *.ipynb
33 |
34 | # Github
35 | !.github/*
36 |
37 | # VScode
38 | !.vscode/*
39 |
40 | # Dev Container
41 | !.devcontainer/*
42 |
43 | # Environment Variables
44 | .env*
45 |
46 | # Demo Resources
47 | !demo_resources/**/*
48 |
49 | # Ignore No Hangup Process Output
50 | nohup.out
51 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | ignore = all
2 |
--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------
1 | {
2 | // Use IntelliSense to learn about possible attributes.
3 | // Hover to view descriptions of existing attributes.
4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5 | "version": "0.2.0",
6 | "configurations": [
7 | {
8 | "name": "Python: llava_interactive",
9 | "type": "python",
10 | "request": "launch",
11 | "module": "llava_interactive",
12 | "justMyCode": true,
13 | "args": [
14 | "--moderate",
15 | "input_text_guardlist",
16 | "input_text_aics",
17 | "input_text_aics_jailbreak",
18 | "input_image_aics",
19 | // "input_openai",
20 | "output_text_guardlist",
21 | "output_text_aics",
22 | "gligen_input_text_guardlist",
23 | "gligen_input_text_aics",
24 | "gligen_output_image_aics",
25 | ],
26 | "envFile": "${workspaceFolder}/.env",
27 | "env": {
28 | "LLAVA_INTERACTIVE_HOME": ".",
29 | "LOGLEVEL": "DEBUG",
30 | "CUDA_VISIBLE_DEVICES": "2,3",
31 | },
32 | },
33 | ]
34 | }
35 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | # 🌋 LLaVA-Interactive
3 |
4 | *An All-in-One Demo for Image Chat, Segmentation and Generation/Editing.*
5 |
6 | [[Project Page](https://llava-vl.github.io/llava-interactive/)] [Demo] [[Paper](https://arxiv.org/abs/2311.00571)]
7 |
8 | > ⚠️ As of Jun 10, 2024 the live demo or playground website is disabled.
9 |
10 |
11 |
12 |
13 |
14 |
15 | # Install
16 |
17 | Installing this project requires CUDA 11.7 or above. Follow the steps below:
18 |
19 | ```bash
20 | git clone https://github.com/LLaVA-VL/LLaVA-Interactive-Demo.git
21 | conda create -n llava_int -c conda-forge -c pytorch python=3.10.8 pytorch=2.0.1 -y
22 | conda activate llava_int
23 | cd LLaVA-Interactive-Demo
24 | pip install -r requirements.txt
25 | source setup.sh
26 | ```
27 |
28 | # Run the demo
29 |
30 | To run the demo, simply run the shell script.
31 |
32 | ```bash
33 | ./run_demo.sh
34 | ```
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 | # Citation
43 |
44 | If you find LLaVA-Interactive useful for your research and applications, please cite using this BibTeX:
45 | ```bash
46 | @article{chen2023llava_interactive,
47 | author = {Chen, Wei-Ge and Spiridonova, Irina and Yang, Jianwei and Gao, Jianfeng and Li, Chunyuan},
48 | title = {LLaVA-Interactive: An All-in-One Demo for Image Chat, Segmentation, Generation and Editing},
49 | publisher = {arXiv:2311.00571},
50 | year = {2023}
51 | }
52 | ```
53 |
54 | # Related Projects
55 |
56 | - [LLaVA: Large Language and Vision Assistant](https://github.com/haotian-liu/LLaVA)
57 | - [SEEM: Segment Everything Everywhere All at Once](https://github.com/UX-Decoder/Segment-Everything-Everywhere-All-At-Once)
58 | - [GLIGEN: Open-Set Grounded Text-to-Image Generation](https://github.com/gligen/GLIGEN)
59 |
60 | # Acknowledgement
61 |
62 | - [LaMa](https://github.com/advimman/lama): A nice tool we use to fill the background holes in images.
63 |
64 | # Terms of use
65 |
66 | By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research. For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
67 |
68 | # License
69 |
70 | This project including LLaVA and SEEM are licensed under the Apache License. See the [LICENSE](LICENSE) file for more details. The GLIGEN project is licensed under the MIT License.
71 |
72 | The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation.
73 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import base64
3 | import io
4 | import os
5 | import sys
6 |
7 | import cv2
8 | import gradio as gr
9 | import numpy as np
10 | import requests
11 | from functools import partial
12 | from PIL import Image, ImageOps
13 |
14 | sys.path.append(os.path.join(os.environ['LLAVA_INTERACTIVE_HOME'], 'GLIGEN/demo'))
15 | import GLIGEN.demo.app as GLIGEN
16 |
17 | sys.path.append(os.path.join(os.environ['LLAVA_INTERACTIVE_HOME'], 'SEEM/demo_code'))
18 | import SEEM.demo_code.app as SEEM # must import GLIGEN_app before this. Otherwise, it will hit a protobuf error
19 |
20 | sys.path.append(os.path.join(os.environ['LLAVA_INTERACTIVE_HOME'], 'LLaVA'))
21 | import LLaVA.llava.serve.gradio_web_server as LLAVA
22 |
23 |
24 | class ImageMask(gr.components.Image):
25 | """
26 | Sets: source="canvas", tool="sketch"
27 | """
28 |
29 | is_template = True
30 |
31 | def __init__(self, **kwargs):
32 | super().__init__(source="upload", tool="sketch", interactive=True, **kwargs)
33 |
34 | def preprocess(self, x):
35 | if isinstance(x, str):
36 | x = {'image': x, 'mask': x}
37 | elif isinstance(x, dict):
38 | if x['mask'] is None and x['image'] is None:
39 | x
40 | elif x['image'] is None:
41 | x['image'] = str(x['mask'])
42 | elif x['mask'] is None:
43 | x['mask'] = str(
44 | x['image']
45 | ) # not sure why mask/mask is None sometimes, this prevents preprocess crashing
46 | elif x is not None:
47 | assert False, 'Unexpected type {0} in ImageMask preprocess()'.format(type(x))
48 |
49 | return super().preprocess(x)
50 |
51 |
52 | css = """
53 | #compose_btn {
54 | --tw-border-opacity: 1;
55 | border-color: rgb(255 216 180 / var(--tw-border-opacity));
56 | --tw-gradient-from: rgb(255 216 180 / .7);
57 | --tw-gradient-to: rgb(255 216 180 / 0);
58 | --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to);
59 | --tw-gradient-to: rgb(255 176 102 / .8);
60 | --tw-text-opacity: 1;
61 | color: rgb(238 116 0 / var(--tw-text-opacity));
62 | }
63 | """
64 |
65 |
66 | def get_bounding_box(img):
67 | # Get the indices of all non-zero pixels
68 | if np.any(img) == False: # protect agaist an empty img
69 | return None
70 | non_zero_indices = np.nonzero(img)
71 |
72 | # Get the minimum and maximum indices for each axis
73 | min_x = np.min(non_zero_indices[1])
74 | max_x = np.max(non_zero_indices[1])
75 | min_y = np.min(non_zero_indices[0])
76 | max_y = np.max(non_zero_indices[0])
77 |
78 | # Return the bounding box as a tuple of (min_x, min_y, max_x, max_y)
79 | return (min_x, min_y, max_x, max_y)
80 |
81 |
82 | def composite_all_layers(base, objects): # debugging use only
83 | img = base.copy()
84 | for obj in objects:
85 | for i in range(obj['img'].shape[0]):
86 | for j in range(obj['img'].shape[1]):
87 | if obj['img'][i, j, 3] != 0:
88 | img[i, j] = obj['img'][i, j]
89 | return img
90 |
91 |
92 | def changed_objects_handler(mask_dilate_slider, state, evt: gr.SelectData):
93 | state['move_no'] += 1
94 |
95 | pos_x, pos_y = evt.index # obj moved out of scene is signaled by (10000, 10000)
96 | obj_id = 255 - evt.value
97 | print(f"obj {obj_id} moved by {pos_x}, {pos_y}")
98 |
99 | img = state['base_layer']
100 | for obj in state['changed_objects']:
101 | if obj['id'] == obj_id:
102 | img = obj['img']
103 | state['changed_objects'].remove(obj)
104 | break
105 |
106 | new_img = np.zeros_like(img)
107 | bbox = None
108 | for i in range(img.shape[0]):
109 | for j in range(img.shape[1]):
110 | if img[i, j, 3] == obj_id:
111 | new_i = i + pos_y
112 | new_j = j + pos_x
113 | if new_i >= 0 and new_i < img.shape[0] and new_j >= 0 and new_j < img.shape[1]:
114 | new_img[new_i, new_j] = img[i, j]
115 | img[i, j] = 0
116 |
117 | bbox = get_bounding_box(new_img) # returns None if obj moved out of scene
118 | print("bbox: ", bbox)
119 | state['changed_objects'].append({'id': obj_id, 'img': new_img, 'text': state['segment_info'][obj_id], 'box': bbox})
120 |
121 | # Enable for debugging only. See if the composited image is correct.
122 | # composed_img_updated = composite_all_layers(state['base_layer'], state['changed_objects'])
123 | # filename = str(f"composited_imge_{state['move_no']}") + ".png"
124 | # cv2.imwrite(filename, composed_img_updated[:, :, 0:3])
125 |
126 | return mask_dilate_slider, state['base_layer_masked'], state
127 |
128 |
129 | def get_base_layer_mask(state):
130 | changed_obj_id = []
131 | for obj in state['changed_objects']:
132 | changed_obj_id.append(obj['id'])
133 |
134 | # union of mask of all objects
135 | img = state['orignal_segmented']
136 | mask = np.zeros(img.shape[:2], dtype=np.uint8)
137 | for i in range(img.shape[0]):
138 | for j in range(img.shape[1]):
139 | if img[i, j, 3] in changed_obj_id:
140 | mask[i, j] = 255
141 | state['base_layer_mask'] = mask
142 |
143 | mask_image = Image.fromarray(mask)
144 | if mask_image.mode != "L":
145 | mask_image = mask_image.convert("L")
146 | mask_image = ImageOps.invert(mask_image)
147 | # mask_image.save("mask_image.png")
148 |
149 | img = state['orignal_segmented']
150 | orig_image = Image.fromarray(img[:, :, :3])
151 | orig_image.save("orig_image.png")
152 | transparent = Image.new(orig_image.mode, orig_image.size, (0, 0, 0, 0))
153 | masked_image = Image.composite(orig_image, transparent, mask_image)
154 | # masked_image.save("get_masked_background_image.png")
155 |
156 | return masked_image, state
157 |
158 |
159 | def get_inpainted_background(state, mask_dilate_slider):
160 | # Define the URL of the REST API endpoint
161 | url = "http://localhost:9171/api/v2/image"
162 |
163 | img = state['orignal_segmented']
164 | if isinstance(img, Image.Image) is not True:
165 | img = Image.fromarray(img)
166 | # Create a BytesIO object and save the image there
167 | buffer = io.BytesIO()
168 | img.save(buffer, format="PNG")
169 | # Get the bytes value from the buffer
170 | img_bytes = buffer.getvalue()
171 |
172 | encoded_string = base64.b64encode(img_bytes).decode("utf-8")
173 |
174 | if mask_dilate_slider != 0:
175 | mask = state['base_layer_mask_enlarged']
176 | else:
177 | mask = state['base_layer_mask']
178 | if isinstance(mask, Image.Image) is not True:
179 | mask = Image.fromarray(mask)
180 |
181 | # mask has background as 1, lama needs object to be 1
182 | if mask.mode != "L":
183 | mask = mask.convert("L")
184 | mask = ImageOps.invert(mask)
185 |
186 | # Create a BytesIO object and save the image there
187 | buffer = io.BytesIO()
188 | mask.save(buffer, format="PNG")
189 | # Get the bytes value from the buffer
190 | mask_bytes = buffer.getvalue()
191 |
192 | encoded_string_mask = base64.b64encode(mask_bytes).decode("utf-8")
193 |
194 | # Create a POST request to the endpoint
195 | headers = {"Content-Type": "application/json"}
196 | data = {"image": encoded_string, "mask": encoded_string_mask}
197 | response = requests.post(url, headers=headers, json=data)
198 |
199 | # Check the status code of the response
200 | if response.status_code == 200:
201 | # The request was successful
202 | print("Image received successfully")
203 | image_data = response.content
204 | # Create a io.BytesIO object from the image data
205 | dataBytesIO = io.BytesIO(image_data)
206 | # Open the image using Image.open()
207 | image = Image.open(dataBytesIO)
208 | # image.save("lama_returned_image.png")
209 |
210 | else:
211 | # The request failed
212 | print("Error: HTTP status code {}".format(response.status_code))
213 | print(response.text)
214 |
215 | return image
216 |
217 |
218 | def get_enlarged_masked_background(state, mask_dilate_slider):
219 | mask = state['base_layer_mask']
220 |
221 | kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (mask_dilate_slider, mask_dilate_slider))
222 | mask_dilated = cv2.dilate(mask, kernel)
223 |
224 | # mask the original
225 | mask_image = Image.fromarray(mask_dilated)
226 | if mask_image.mode != "L":
227 | mask_image = mask_image.convert("L")
228 | mask_image = ImageOps.invert(mask_image)
229 | state['base_layer_mask_enlarged'] = mask_image
230 | # mask_image.save("enlarged_mask_image.png")
231 |
232 | img = state['orignal_segmented']
233 | orig_image = Image.fromarray(img[:, :, :3])
234 | transparent = Image.new(orig_image.mode, orig_image.size, (0, 0, 0, 0))
235 | masked_image = Image.composite(orig_image, transparent, mask_image)
236 | # masked_image.save("enlarged_masked_background_image.png")
237 |
238 | return masked_image, state
239 |
240 |
241 | def get_base_layer_inpainted(state, mask_dilate_slider):
242 | masked_img, state = get_enlarged_masked_background(state, mask_dilate_slider)
243 | inpainted_img = get_inpainted_background(state, mask_dilate_slider)
244 | state['base_layer_inpainted'] = np.array(inpainted_img)
245 | return masked_img, inpainted_img, state
246 |
247 |
248 | def log_image_and_mask(img, mask): # for debugging use only
249 | counter = 0
250 | for filename in os.listdir('.'):
251 | if filename.startswith('img_') and filename.endswith('.png'):
252 | try:
253 | num = int(filename[4:-4])
254 | if num > counter:
255 | counter = num
256 | except ValueError:
257 | pass
258 | counter += 1
259 | cv2.imwrite(f"img_{counter}.png", img)
260 | cv2.imwrite(f"img_{counter}_mask.png", mask.astype(np.uint8) * 255)
261 |
262 |
263 | def get_segments(img, task, reftxt, mask_dilate_slider, state):
264 | assert isinstance(state, dict)
265 | state['orignal_segmented'] = None
266 | state['base_layer'] = None
267 | state['base_layer_masked'] = None
268 | state['base_layer_mask'] = None
269 | state['base_layer_mask_enlarged'] = None
270 | state['base_layer_inpainted'] = None
271 | state['segment_info'] = None
272 | state['seg_boxes'] = {}
273 | state['changed_objects'] = []
274 | state['move_no'] = 0
275 |
276 | print("Calling SEEM_app.inference")
277 |
278 | if isinstance(img['image'], np.ndarray):
279 | pil_image = Image.fromarray(img['image'])
280 | if isinstance(img['mask'], np.ndarray):
281 | pil_mask = Image.fromarray(img['mask'])
282 | img = {'image': pil_image, 'mask': pil_mask}
283 | img_ret, seg_info = SEEM.inference(img, task, reftxt=reftxt)
284 | # SEEM doesn't always respect the input img dimentions
285 | tgt_size = (img['image'].width, img['image'].height)
286 | img_ret = img_ret.resize(tgt_size, resample=Image.Resampling.NEAREST)
287 | state['orignal_segmented'] = np.array(img_ret).copy()
288 | state['base_layer'] = np.array(img_ret)
289 | state['segment_info'] = seg_info
290 | img_ret_array = np.array(img_ret)
291 | img_ret_array[:, :, 3] = 255 - img_ret_array[:, :, 3]
292 | # NOTE: if write out as a png, the pixels values get messed up. Same reason the client side colors look weird.
293 | # cv2.imwrite(f"get_segments_img_ret.bmp", img_ret_array)
294 |
295 | for obj_id, lable in seg_info.items():
296 | obj_img = img_ret_array[:, :, 3] == 255 - obj_id
297 | # cv2.imwrite(f"img_{obj_id}.png", obj_img.astype(np.uint8) * 255)
298 | # log_image_and_mask(np.array(img['image']), obj_img)
299 | bbox = get_bounding_box(obj_img)
300 | print(f"obj_id={obj_id}, lable={lable}, bbox={bbox}")
301 | state['seg_boxes'][obj_id] = bbox
302 |
303 | # add a special event, obj stays at the original spot
304 | data = {}
305 | data["index"] = (0, 0)
306 | data["value"] = 254 # ==> 1, the only object allowed for now
307 | data["selected"] = True
308 | evt = gr.SelectData(None, data)
309 | mask_dilate_slider, _, state = changed_objects_handler(mask_dilate_slider, state, evt)
310 |
311 | state['base_layer_masked'], state = get_base_layer_mask(state)
312 | if mask_dilate_slider != 0:
313 | enlarged_masked_background, state = get_enlarged_masked_background(state, mask_dilate_slider)
314 | state['base_layer_inpainted'] = np.array(get_inpainted_background(state, mask_dilate_slider))
315 |
316 | return Image.fromarray(img_ret_array), enlarged_masked_background, state['base_layer_inpainted'], state
317 |
318 |
319 | def get_generated(grounding_text, fix_seed, rand_seed, state):
320 | if ('base_layer_inpainted' in state) == False:
321 | raise gr.Error('The segmentation step must be completed first before generating a new image')
322 |
323 | inpainted_background_img = state['base_layer_inpainted']
324 | assert inpainted_background_img is not None, 'base layer should be inpainted after segment'
325 |
326 | state['boxes'] = []
327 | for items in state['changed_objects']:
328 | if items['box'] is not None:
329 | state['boxes'].append(items['box'])
330 |
331 | if len(state['boxes']) == 0:
332 | if len(grounding_text) != 0:
333 | grounding_text = []
334 | print("No grounding box found. Grounding text will be ignored.")
335 | return inpainted_background_img.copy(), state, None
336 |
337 | print('Calling GLIGEN_app.generate')
338 | print('grounding_text: ', grounding_text)
339 | print(state['boxes'], len(state['boxes']))
340 | assert len(state['boxes']) == 1, 'Only handle one segmented object at a time'
341 | if len(grounding_text) == 0: # mostly user forgot to drag the object and didn't provide grounding text
342 | raise gr.Error('Please providing grounding text to match the identified object')
343 | out_gen_1, _, _, _, state = GLIGEN.generate(
344 | task='Grounded Inpainting',
345 | language_instruction='',
346 | grounding_texts=grounding_text,
347 | sketch_pad=inpainted_background_img,
348 | alpha_sample=0.3,
349 | guidance_scale=7.5,
350 | batch_size=1,
351 | fix_seed=fix_seed,
352 | rand_seed=rand_seed,
353 | use_actual_mask=False,
354 | append_grounding=True,
355 | style_cond_image=None,
356 | inpainting_image=inpainted_background_img,
357 | inpainting_mask=None,
358 | state=state,
359 | )
360 |
361 | return out_gen_1['value'], state
362 |
363 |
364 | def get_generated_full(
365 | task,
366 | language_instruction,
367 | grounding_instruction,
368 | sketch_pad,
369 | alpha_sample,
370 | guidance_scale,
371 | batch_size,
372 | fix_seed,
373 | rand_seed,
374 | use_actual_mask,
375 | append_grounding,
376 | style_cond_image,
377 | state,
378 | ):
379 | out_gen_1, _, _, _, state = GLIGEN.generate(
380 | task,
381 | language_instruction,
382 | grounding_instruction,
383 | sketch_pad,
384 | alpha_sample,
385 | guidance_scale,
386 | batch_size,
387 | fix_seed,
388 | rand_seed,
389 | use_actual_mask,
390 | append_grounding,
391 | style_cond_image,
392 | state,
393 | )
394 | return out_gen_1['value'], state
395 |
396 |
397 | def gligen_change_task(state):
398 | if state['working_image'] is not None:
399 | task = "Grounded Inpainting"
400 | else:
401 | task = "Grounded Generation"
402 | return task
403 |
404 |
405 | def clear_sketch_pad_mask(sketch_pad_image):
406 | sketch_pad = ImageMask.update(value=sketch_pad_image, visible=True)
407 | return sketch_pad
408 |
409 |
410 | def save_shared_state(img, state):
411 | if isinstance(img, dict) and 'image' in img:
412 | state['working_image'] = img['image']
413 | else:
414 | state['working_image'] = img
415 | return state
416 |
417 |
418 | def load_shared_state(state, task=None):
419 | if task == "Grounded Generation":
420 | return None
421 | else:
422 | return state['working_image']
423 |
424 |
425 | def update_shared_state(state, task):
426 | if task == "Grounded Generation":
427 | state['working_image'] = None
428 | return state
429 |
430 |
431 | def update_sketch_pad_trigger(sketch_pad_trigger, task):
432 | if task == "Grounded Generation":
433 | sketch_pad_trigger = sketch_pad_trigger + 1
434 | return sketch_pad_trigger
435 |
436 |
437 | def clear_grounding_info(state):
438 | state['boxes'] = []
439 | state['masks'] = []
440 | return state, ''
441 |
442 |
443 | def switch_to_generate():
444 | task = "Grounded Generation"
445 | return (
446 | task,
447 | gr.Image.update(visible=True),
448 | gr.Textbox.update(visible=True),
449 | gr.Textbox.update(visible=True),
450 | gr.Button.update(visible=True),
451 | gr.Button.update(visible=True),
452 | gr.Accordion.update(visible=True),
453 | )
454 |
455 |
456 | def switch_to_inpaint():
457 | task = "Grounded Inpainting"
458 | return (
459 | task,
460 | gr.Image.update(visible=True),
461 | gr.Textbox.update(visible=False),
462 | gr.Textbox.update(visible=True),
463 | gr.Button.update(visible=True),
464 | gr.Button.update(visible=True),
465 | gr.Accordion.update(visible=True),
466 | )
467 |
468 |
469 | def switch_to_compose():
470 | task = "Compose"
471 | return (
472 | task,
473 | gr.Image.update(visible=False),
474 | gr.Textbox.update(visible=False),
475 | gr.Textbox.update(visible=False),
476 | gr.Button.update(visible=False),
477 | gr.Button.update(visible=False),
478 | gr.Accordion.update(visible=False),
479 | )
480 |
481 |
482 | def copy_to_llava_input(img):
483 | print('WORKING IMAGE CHANGED!!!!')
484 | if isinstance(img, Image.Image) is not True:
485 | img = Image.fromarray(img)
486 | return img
487 |
488 |
489 | title_markdown = """
490 | # LLaVA Interactive
491 | """
492 |
493 |
494 | def build_demo():
495 | demo = gr.Blocks(title="LLaVA Interactive", css=css + GLIGEN.css)
496 | with demo:
497 | compose_state = gr.State(
498 | {
499 | 'boxes': [],
500 | 'move_no': 0,
501 | 'base_layer': None,
502 | 'segment_info': None,
503 | 'seg_boxes': {},
504 | 'changed_objects': [],
505 | }
506 | )
507 | llava_state = gr.State()
508 | shared_state = gr.State({'working_image': None})
509 | gligen_state = gr.State({'draw_box': True})
510 |
511 | gr.Markdown('')
512 | gr.Markdown('LLaVA Interactive
')
513 | gr.Markdown('')
514 |
515 | gr.Markdown(
516 | '**Experience interactive multimodal chatting and image manipulation. Select a tab for your task and follow the instructions. Switch tasks anytime and ask questions in the chat window.**'
517 | )
518 |
519 | with gr.Row(visible=False):
520 | working_image = gr.Image(
521 | label="Working Image", type="numpy", elem_id="working_image", visible=False, interactive=False
522 | ) # hidden image to save current working image
523 | # for gligen
524 | sketch_pad_trigger = gr.Number(value=0, visible=False)
525 | sketch_pad_resize_trigger = gr.Number(value=0, visible=False)
526 | init_white_trigger = gr.Number(value=0, visible=False)
527 | image_scale = gr.Number(value=0, elem_id="image_scale", visible=False)
528 | task = gr.Radio(
529 | choices=["Grounded Generation", 'Grounded Inpainting', 'Compose'],
530 | type="value",
531 | value="Grounded Inpainting",
532 | label="Task",
533 | visible=False,
534 | )
535 |
536 | with gr.Row(equal_height=False):
537 | with gr.Column():
538 | with gr.Row():
539 | sketch_pad = ImageMask(
540 | label="Sketch Pad",
541 | type="numpy",
542 | shape=(512, 512),
543 | width=384,
544 | elem_id="img2img_image",
545 | brush_radius=20.0,
546 | visible=True,
547 | )
548 |
549 | compose_tab = gr.Tab("Remove or Change Objects")
550 | with compose_tab:
551 | gr.Markdown(
552 | "Segment an object by drawing a stroke or giving a referring text. Then press the segment button. Drag the highlighted object to move it. To remove it, drag it out of the frame. To replace it with a new object, give an instruction only if the object is removed and press the generate button until you like the image."
553 | )
554 | with gr.Row().style(equal_height=False):
555 | with gr.Column():
556 | with gr.Group():
557 | with gr.Column():
558 | with gr.Row():
559 | segment_task = gr.Radio(
560 | ["Stroke", "Text"], value="Stroke", label='Choose segmentation method'
561 | )
562 | segment_text = gr.Textbox(label="Enter referring text")
563 | segment_btn = gr.Button("Segment", elem_id="segment-btn")
564 |
565 | with gr.Group():
566 | segmented_img = gr.Image(label="Move or delete object", tool="compose", height=256)
567 |
568 | with gr.Group():
569 | with gr.Column():
570 | grounding_text_box = gr.Textbox(
571 | label="Enter grounding text for generating a new image"
572 | )
573 | with gr.Row():
574 | compose_clear_btn = gr.Button("Clear", elem_id="compose_clear_btn")
575 | compose_btn = gr.Button("Generate", elem_id="compose_btn")
576 |
577 | with gr.Accordion("Advanced Options", open=False):
578 | with gr.Row():
579 | masked_background_img = gr.Image(
580 | label="Background", type='pil', interactive=False, height=256
581 | )
582 | inpainted_background_img = gr.Image(
583 | label="Inpainted Background", type='pil', interactive=False, height=256
584 | )
585 | mask_dilate_slider = gr.Slider(
586 | minimum=0.0,
587 | maximum=100,
588 | value=50,
589 | step=2,
590 | interactive=True,
591 | label="Mask dilation",
592 | visible=True,
593 | scale=20,
594 | )
595 | with gr.Row(visible=False):
596 | compose_fix_seed = gr.Checkbox(value=False, label="Fixed seed", visible=False)
597 | compose_rand_seed = gr.Slider(
598 | minimum=0, maximum=1000, step=1, value=0, label="Seed", visible=False
599 | )
600 |
601 | gligen_inpaint = gr.Tab("Inpaint New Objects")
602 | with gligen_inpaint:
603 | gr.Markdown(
604 | "Add a new object to the image by drawing its bounding box and giving an instruction. Press the “generate” button repeatedly until you like the image. Press “clear” to accept the image and start over with another object."
605 | )
606 |
607 | gligen = gr.Tab("Generate New Image")
608 | with gligen:
609 | gr.Markdown(
610 | "Generate a new image by giving a language instruction below. Draw a bounding box and give an instruction for any specific objects that need to be grounded in certain places. Hit the “generate” button repeatedly until you get the image you want."
611 | )
612 |
613 | with gr.Group(visible=False):
614 | language_instruction = gr.Textbox(
615 | label="Language instruction", elem_id='language_instruction', visible=False
616 | )
617 | grounding_instruction = gr.Textbox(
618 | label="Grounding instruction (Separated by semicolon)",
619 | elem_id='grounding_instruction',
620 | visible=False,
621 | )
622 | with gr.Row():
623 | gligen_clear_btn = gr.Button(value='Clear', visible=False)
624 | gligen_gen_btn = gr.Button(value='Generate', elem_id="generate-btn", visible=False)
625 |
626 | with gr.Group():
627 | out_imagebox = gr.Image(type="pil", label="Parsed Sketch Pad", height=256, visible=False)
628 |
629 | gligen_adv_options = gr.Accordion("Advanced Options", open=False, visible=False)
630 | with gligen_adv_options:
631 | with gr.Column():
632 | alpha_sample = gr.Slider(
633 | minimum=0, maximum=1.0, step=0.1, value=0.3, label="Scheduled Sampling (τ)"
634 | )
635 | guidance_scale = gr.Slider(minimum=0, maximum=50, step=0.5, value=7.5, label="Guidance Scale")
636 |
637 | with gr.Row(visible=False):
638 | batch_size = gr.Slider(
639 | minimum=1, maximum=4, step=1, value=1, label="Number of Samples", visible=False
640 | )
641 | append_grounding = gr.Checkbox(
642 | value=True, label="Append grounding instructions to the caption", visible=False
643 | )
644 | use_actual_mask = gr.Checkbox(value=False, label="Use actual mask for inpainting", visible=False)
645 | fix_seed = gr.Checkbox(value=False, label="Fixed seed", visible=False)
646 | rand_seed = gr.Slider(minimum=0, maximum=1000, step=1, value=0, label="Seed", visible=False)
647 | use_style_cond = gr.Checkbox(value=False, label="Enable Style Condition", visible=False)
648 | style_cond_image = gr.Image(type="pil", label="Style Condition", visible=False, interactive=False)
649 |
650 | controller = GLIGEN.Controller()
651 | sketch_pad.edit(
652 | GLIGEN.draw,
653 | inputs=[task, sketch_pad, grounding_instruction, sketch_pad_resize_trigger, gligen_state],
654 | outputs=[out_imagebox, sketch_pad_resize_trigger, image_scale, gligen_state],
655 | queue=False,
656 | )
657 | llava_image = gr.Image(label='sketch_pad_image', type='pil', visible=False, interactive=False)
658 | working_image.change(copy_to_llava_input, [working_image], [llava_image])
659 | sketch_pad.upload(save_shared_state, inputs=[sketch_pad, shared_state], outputs=shared_state).then(
660 | load_shared_state, [shared_state], working_image
661 | )
662 | grounding_instruction.change(
663 | GLIGEN.draw,
664 | inputs=[task, sketch_pad, grounding_instruction, sketch_pad_resize_trigger, gligen_state],
665 | outputs=[out_imagebox, sketch_pad_resize_trigger, image_scale, gligen_state],
666 | queue=False,
667 | )
668 | gligen_clear_btn.click(
669 | GLIGEN.clear,
670 | inputs=[task, sketch_pad_trigger, batch_size, gligen_state],
671 | outputs=[sketch_pad, sketch_pad_trigger, out_imagebox, image_scale, gligen_state],
672 | queue=False,
673 | ).then(clear_grounding_info, gligen_state, [gligen_state, grounding_instruction]).then(
674 | load_shared_state, [shared_state], sketch_pad
675 | ).then(
676 | update_sketch_pad_trigger, [sketch_pad_trigger, task], sketch_pad_trigger
677 | )
678 | task.change(
679 | partial(GLIGEN.clear, switch_task=True),
680 | inputs=[task, sketch_pad_trigger, batch_size, gligen_state],
681 | outputs=[sketch_pad, sketch_pad_trigger, out_imagebox, image_scale, gligen_state],
682 | queue=False,
683 | ).then(load_shared_state, [shared_state, task], sketch_pad).then(
684 | update_sketch_pad_trigger, [sketch_pad_trigger, task], sketch_pad_trigger
685 | ).then(
686 | clear_grounding_info, gligen_state, [gligen_state, grounding_instruction]
687 | )
688 | sketch_pad_trigger.change(
689 | controller.init_white,
690 | inputs=[init_white_trigger],
691 | outputs=[sketch_pad, image_scale, init_white_trigger],
692 | queue=False,
693 | )
694 | sketch_pad_resize_trigger.change(
695 | controller.resize_masked, inputs=[gligen_state], outputs=[sketch_pad, gligen_state], queue=False
696 | )
697 |
698 | gligen_gen_btn.click(
699 | get_generated_full,
700 | inputs=[
701 | task,
702 | language_instruction,
703 | grounding_instruction,
704 | sketch_pad,
705 | alpha_sample,
706 | guidance_scale,
707 | batch_size,
708 | fix_seed,
709 | rand_seed,
710 | use_actual_mask,
711 | append_grounding,
712 | style_cond_image,
713 | gligen_state,
714 | ],
715 | outputs=[sketch_pad, gligen_state],
716 | queue=True,
717 | ).then(save_shared_state, [sketch_pad, shared_state], shared_state).then(
718 | load_shared_state, [shared_state], working_image
719 | )
720 |
721 | sketch_pad_resize_trigger.change(
722 | None, None, sketch_pad_resize_trigger, _js=GLIGEN.rescale_js, queue=False
723 | )
724 | init_white_trigger.change(None, None, init_white_trigger, _js=GLIGEN.rescale_js, queue=False)
725 | use_style_cond.change(
726 | lambda cond: gr.Image.update(visible=cond), use_style_cond, style_cond_image, queue=False
727 | )
728 | task.change(
729 | controller.switch_task_hide_cond,
730 | inputs=task,
731 | outputs=[use_style_cond, style_cond_image, alpha_sample, use_actual_mask],
732 | queue=False,
733 | )
734 |
735 | with gr.Column():
736 | gr.Markdown("Chat with the latest image on the left at any time by entering your text below.")
737 | llava_chatbot = gr.Chatbot(elem_id="chatbot", label="LLaVA Chatbot", height=750)
738 | with gr.Column(scale=8):
739 | llava_textbox = gr.Textbox(
740 | show_label=False, placeholder="Enter text and press ENTER", container=False
741 | )
742 | with gr.Column(scale=1, min_width=60):
743 | llava_submit_btn = gr.Button(value="Submit", visible=False)
744 |
745 | with gr.Row(visible=False):
746 | upvote_btn = gr.Button(value="👍 Upvote", interactive=False, visible=False)
747 | downvote_btn = gr.Button(value="👎 Downvote", interactive=False, visible=False)
748 | flag_btn = gr.Button(value="⚠️ Flag", interactive=False, visible=False)
749 | regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False, visible=False)
750 | llava_clear_btn = gr.Button(value="🗑️ Clear history", interactive=False, visible=False)
751 | with gr.Accordion("Parameters", open=False, visible=False) as parameter_row:
752 | temperature = gr.Slider(
753 | minimum=0.0,
754 | maximum=1.0,
755 | value=0.2,
756 | step=0.1,
757 | interactive=True,
758 | label="Temperature",
759 | visible=True,
760 | )
761 | top_p = gr.Slider(
762 | minimum=0.0, maximum=1.0, value=0.7, step=0.1, interactive=True, label="Top P", visible=True
763 | )
764 | max_output_tokens = gr.Slider(
765 | minimum=0,
766 | maximum=1024,
767 | value=512,
768 | step=64,
769 | interactive=True,
770 | label="Max output tokens",
771 | visible=True,
772 | )
773 |
774 | segment_btn.click(
775 | get_segments,
776 | inputs=[sketch_pad, segment_task, segment_text, mask_dilate_slider, compose_state],
777 | outputs=[segmented_img, masked_background_img, inpainted_background_img, compose_state],
778 | queue=True,
779 | )
780 | segmented_img.select(
781 | changed_objects_handler,
782 | [mask_dilate_slider, compose_state],
783 | [mask_dilate_slider, masked_background_img, compose_state],
784 | )
785 | mask_dilate_slider.release(
786 | get_base_layer_inpainted,
787 | inputs=[compose_state, mask_dilate_slider],
788 | outputs=[masked_background_img, inpainted_background_img, compose_state],
789 | )
790 | compose_btn.click(
791 | get_generated,
792 | [grounding_text_box, compose_fix_seed, compose_rand_seed, compose_state],
793 | [sketch_pad, compose_state],
794 | queue=True,
795 | ).then(save_shared_state, [sketch_pad, shared_state], shared_state).then(
796 | load_shared_state, [shared_state], working_image
797 | )
798 | compose_clear_btn.click(load_shared_state, [shared_state], sketch_pad)
799 |
800 | image_process_mode = gr.Radio(
801 | ["Crop", "Resize", "Pad"], value="Crop", label="Preprocess for non-square image", visible=False
802 | )
803 | models = LLAVA.get_model_list(args)
804 | model_selector = gr.Dropdown(
805 | choices=models,
806 | value=models[0] if len(models) > 0 else "",
807 | interactive=True,
808 | show_label=False,
809 | container=False,
810 | visible=False,
811 | )
812 |
813 | btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, llava_clear_btn]
814 | upvote_btn.click(
815 | LLAVA.upvote_last_response,
816 | [llava_state, model_selector],
817 | [llava_textbox, upvote_btn, downvote_btn, flag_btn],
818 | )
819 | downvote_btn.click(
820 | LLAVA.downvote_last_response,
821 | [llava_state, model_selector],
822 | [llava_textbox, upvote_btn, downvote_btn, flag_btn],
823 | )
824 | flag_btn.click(
825 | LLAVA.flag_last_response, [llava_state, model_selector], [llava_textbox, upvote_btn, downvote_btn, flag_btn]
826 | )
827 | regenerate_btn.click(
828 | LLAVA.regenerate,
829 | [llava_state, image_process_mode],
830 | [llava_state, llava_chatbot, llava_textbox, sketch_pad] + btn_list,
831 | ).then(
832 | LLAVA.http_bot,
833 | [llava_state, model_selector, temperature, top_p, max_output_tokens],
834 | [llava_state, llava_chatbot] + btn_list,
835 | )
836 | llava_clear_btn.click(
837 | LLAVA.clear_history, None, [llava_state, llava_chatbot, llava_textbox, llava_image] + btn_list
838 | )
839 |
840 | llava_textbox.submit(
841 | LLAVA.add_text,
842 | [llava_state, llava_textbox, llava_image, image_process_mode],
843 | [llava_state, llava_chatbot, llava_textbox, llava_image] + btn_list,
844 | ).then(
845 | LLAVA.http_bot,
846 | [llava_state, model_selector, temperature, top_p, max_output_tokens],
847 | [llava_state, llava_chatbot] + btn_list,
848 | )
849 | llava_submit_btn.click(
850 | LLAVA.add_text,
851 | [llava_state, llava_textbox, llava_image, image_process_mode],
852 | [llava_state, llava_chatbot, llava_textbox, llava_image] + btn_list,
853 | ).then(
854 | LLAVA.http_bot,
855 | [llava_state, model_selector, temperature, top_p, max_output_tokens],
856 | [llava_state, llava_chatbot] + btn_list,
857 | )
858 |
859 | if args.model_list_mode == "once":
860 | raise ValueError(f"Unsupported model list mode: {args.model_list_mode}")
861 | elif args.model_list_mode == "reload":
862 | print('disable for debugging')
863 | demo.load(LLAVA.load_demo_refresh_model_list, inputs=None, outputs=[llava_state, model_selector]).then(
864 | switch_to_compose,
865 | [],
866 | [
867 | task,
868 | out_imagebox,
869 | language_instruction,
870 | grounding_instruction,
871 | gligen_clear_btn,
872 | gligen_gen_btn,
873 | gligen_adv_options,
874 | ], # first tab show doesn't need any
875 | ).then(
876 | GLIGEN.clear,
877 | inputs=[task, sketch_pad_trigger, batch_size, gligen_state],
878 | outputs=[sketch_pad, sketch_pad_trigger, out_imagebox, image_scale, gligen_state],
879 | queue=False,
880 | )
881 |
882 | else:
883 | raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
884 |
885 | gligen.select(
886 | switch_to_generate,
887 | inputs=[],
888 | outputs=[
889 | task,
890 | out_imagebox,
891 | language_instruction,
892 | grounding_instruction,
893 | gligen_clear_btn,
894 | gligen_gen_btn,
895 | gligen_adv_options,
896 | ],
897 | )
898 | gligen_inpaint.select(
899 | switch_to_inpaint,
900 | inputs=[],
901 | outputs=[
902 | task,
903 | out_imagebox,
904 | language_instruction,
905 | grounding_instruction,
906 | gligen_clear_btn,
907 | gligen_gen_btn,
908 | gligen_adv_options,
909 | ],
910 | queue=False,
911 | )
912 |
913 | compose_tab.select(
914 | switch_to_compose,
915 | [],
916 | [
917 | task,
918 | out_imagebox,
919 | language_instruction,
920 | grounding_instruction,
921 | gligen_clear_btn,
922 | gligen_gen_btn,
923 | gligen_adv_options,
924 | ],
925 | )
926 |
927 | return demo
928 |
929 |
930 | if __name__ == "__main__":
931 | parser = argparse.ArgumentParser()
932 | parser.add_argument("--host", type=str, default="0.0.0.0")
933 | parser.add_argument("--port", type=int)
934 | parser.add_argument("--controller-url", type=str, default="http://localhost:10000")
935 | parser.add_argument("--concurrency-count", type=int, default=8)
936 | parser.add_argument("--model-list-mode", type=str, default="reload", choices=["once", "reload"])
937 | parser.add_argument("--share", action="store_true")
938 | parser.add_argument("--moderate", action="store_true")
939 | parser.add_argument("--embed", action="store_true")
940 | args = parser.parse_args()
941 | LLAVA.set_args(args)
942 |
943 | demo = build_demo()
944 | demo.queue(concurrency_count=1, api_open=False)
945 | demo.launch()
946 |
--------------------------------------------------------------------------------
/demo_resources/images/llava_interactive_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LLaVA-VL/LLaVA-Interactive-Demo/5dc12a1c69fedb4185d250c26aff0036b78f11fd/demo_resources/images/llava_interactive_logo.png
--------------------------------------------------------------------------------
/lama_predict.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # Example command:
4 | # ./bin/predict.py \
5 | # model.path= \
6 | # indir= \
7 | # outdir=
8 |
9 | import logging
10 | import os
11 | import sys
12 | import traceback
13 |
14 | from saicinpainting.evaluation.utils import move_to_device
15 | from saicinpainting.evaluation.refinement import refine_predict
16 |
17 | os.environ['OMP_NUM_THREADS'] = '1'
18 | os.environ['OPENBLAS_NUM_THREADS'] = '1'
19 | os.environ['MKL_NUM_THREADS'] = '1'
20 | os.environ['VECLIB_MAXIMUM_THREADS'] = '1'
21 | os.environ['NUMEXPR_NUM_THREADS'] = '1'
22 |
23 | import cv2
24 | import hydra
25 | import numpy as np
26 | import torch
27 | import tqdm
28 | import yaml
29 | from omegaconf import OmegaConf
30 | from torch.utils.data._utils.collate import default_collate
31 |
32 | from saicinpainting.training.data.datasets import make_default_val_dataset
33 | from saicinpainting.training.trainers import load_checkpoint
34 | from saicinpainting.utils import register_debug_signal_handlers
35 |
36 | LOGGER = logging.getLogger(__name__)
37 |
38 |
39 | # @hydra.main(config_path='../configs/prediction', config_name='web_server.yaml')
40 | def main(predict_config: dict):
41 | try:
42 | # register_debug_signal_handlers() # kill -10 will result in traceback dumped into log
43 |
44 | device = torch.device(predict_config.device)
45 |
46 | train_config_path = os.path.join(predict_config.model.path, 'config.yaml')
47 | with open(train_config_path, 'r') as f:
48 | train_config = OmegaConf.create(yaml.safe_load(f))
49 |
50 | train_config.training_model.predict_only = True
51 | train_config.visualizer.kind = 'noop'
52 |
53 | out_ext = predict_config.get('out_ext', '.png')
54 |
55 | checkpoint_path = os.path.join(predict_config.model.path, 'models', predict_config.model.checkpoint)
56 | model = load_checkpoint(train_config, checkpoint_path, strict=False, map_location='cpu')
57 | model.freeze()
58 | if not predict_config.get('refine', False):
59 | model.to(device)
60 |
61 | if not predict_config.indir.endswith('/'):
62 | predict_config.indir += '/'
63 |
64 | dataset = make_default_val_dataset(predict_config.indir, **predict_config.dataset)
65 | for img_i in tqdm.trange(len(dataset)):
66 | mask_fname = dataset.mask_filenames[img_i]
67 | cur_out_fname = os.path.join(
68 | predict_config.outdir, os.path.splitext(mask_fname[len(predict_config.indir) :])[0] + out_ext
69 | )
70 | os.makedirs(os.path.dirname(cur_out_fname), exist_ok=True)
71 | batch = default_collate([dataset[img_i]])
72 | if predict_config.get('refine', False):
73 | assert 'unpad_to_size' in batch, "Unpadded size is required for the refinement"
74 | # image unpadding is taken care of in the refiner, so that output image
75 | # is same size as the input image
76 | cur_res = refine_predict(batch, model, **predict_config.refiner)
77 | cur_res = cur_res[0].permute(1, 2, 0).detach().cpu().numpy()
78 | else:
79 | with torch.no_grad():
80 | batch = move_to_device(batch, device)
81 | batch['mask'] = (batch['mask'] > 0) * 1
82 | batch = model(batch)
83 | cur_res = batch[predict_config.out_key][0].permute(1, 2, 0).detach().cpu().numpy()
84 | unpad_to_size = batch.get('unpad_to_size', None)
85 | if unpad_to_size is not None:
86 | orig_height, orig_width = unpad_to_size
87 | cur_res = cur_res[:orig_height, :orig_width]
88 |
89 | cur_res = np.clip(cur_res * 255, 0, 255).astype('uint8')
90 | cur_res = cv2.cvtColor(cur_res, cv2.COLOR_RGB2BGR)
91 | cv2.imwrite(cur_out_fname, cur_res)
92 |
93 | except KeyboardInterrupt:
94 | LOGGER.warning('Interrupted by user')
95 | except Exception as ex:
96 | LOGGER.critical(f'Prediction failed due to {ex}:\n{traceback.format_exc()}')
97 | sys.exit(1)
98 |
99 |
100 | # if __name__ == '__main__':
101 | # main()
102 |
--------------------------------------------------------------------------------
/lama_server.py:
--------------------------------------------------------------------------------
1 | from flask import Flask, send_file, request
2 | import base64
3 | from PIL import Image
4 | import io
5 |
6 | from lama_predict import main as lama_predict
7 |
8 | import os
9 | import yaml
10 | from omegaconf import OmegaConf
11 |
12 | cwd = os.getcwd()
13 | print(cwd)
14 |
15 | config_path = os.path.join(cwd, "configs/prediction/default.yaml")
16 | with open(config_path, 'r') as f:
17 | config = OmegaConf.create(yaml.safe_load(f))
18 |
19 | config.model.path = os.path.join(cwd, "big-lama")
20 | config.indir = os.path.join(cwd, "web_server_input")
21 | config.outdir = os.path.join(cwd, "web_server_output")
22 | config.refine = False
23 |
24 | app = Flask(__name__)
25 |
26 |
27 | @app.route("/api/v2/image", methods=["GET", "POST"])
28 | def echo_image():
29 | # Get the image data from the request body
30 | json_dict = request.get_json()
31 | print(type(json_dict))
32 | # Get the value of the "image" key, which is the base64 encoded image data
33 | base64_image_data = json_dict["image"]
34 | # print(base64_image_data[0:500])
35 |
36 | image_bytes = base64.b64decode(base64_image_data)
37 | image_stream = io.BytesIO(image_bytes)
38 | image = Image.open(image_stream)
39 | print(image.format_description)
40 | if not os.path.exists("web_server_input"):
41 | os.makedirs("web_server_input")
42 | image.save("web_server_input/server.png")
43 |
44 | base64_mask_data = json_dict["mask"]
45 | image_bytes = base64.b64decode(base64_mask_data)
46 | image_stream = io.BytesIO(image_bytes)
47 | mask = Image.open(image_stream)
48 | print(mask.format_description)
49 | print(mask.format)
50 | print(mask.size)
51 | print(mask.mode)
52 | if mask.mode != "L":
53 | mask = mask.convert("L")
54 | if not os.path.exists("web_server_input"):
55 | os.makedirs("web_server_input")
56 | mask.save("web_server_input/server_mask.png")
57 |
58 | # Apply the mask to the image
59 | # Create a new transparent image with the same size and mode as the image
60 | transparent = Image.new(image.mode, image.size, (0, 0, 0, 0))
61 | # Composite the image and the transparent image using the mask
62 | masked_image = Image.composite(image, transparent, mask)
63 | masked_image.save("server_masked_image.png")
64 |
65 | # Convert the masked image to bytes and create a new stream
66 | masked_image_stream = io.BytesIO()
67 | masked_image.save(masked_image_stream, format='PNG')
68 | masked_image_stream.seek(0)
69 |
70 | lama_predict(config)
71 |
72 | with open("web_server_output/server_mask.png", "rb") as image_file:
73 | image_bytes = image_file.read()
74 | image_inpainted_stream = io.BytesIO(image_bytes)
75 | print(image.format_description)
76 | image_inpainted_stream.seek(0)
77 |
78 | return send_file(image_inpainted_stream, mimetype="image/png")
79 |
80 |
81 | if __name__ == "__main__":
82 | app.run(debug=True, port=9171)
83 |
--------------------------------------------------------------------------------
/llava_interactive.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import base64
3 | import io
4 | import os
5 | import sys
6 |
7 | import cv2
8 | import gradio as gr
9 | import numpy as np
10 | import requests
11 | from functools import partial
12 | from PIL import Image, ImageOps
13 |
14 | sys.path.append(os.path.join(os.environ['LLAVA_INTERACTIVE_HOME'], 'GLIGEN/demo'))
15 | import GLIGEN.demo.app as GLIGEN
16 |
17 | sys.path.append(os.path.join(os.environ['LLAVA_INTERACTIVE_HOME'], 'SEEM/demo_code'))
18 | import SEEM.demo_code.app as SEEM # must import GLIGEN_app before this. Otherwise, it will hit a protobuf error
19 |
20 | sys.path.append(os.path.join(os.environ['LLAVA_INTERACTIVE_HOME'], 'LLaVA'))
21 | import LLaVA.llava.serve.gradio_web_server as LLAVA
22 | from LLaVA.llava.utils import (
23 | ModerationOptions,
24 | does_image_violate_azure_content_safety,
25 | does_text_violate_azure_content_safety,
26 | violates_guardlist_moderation,
27 | )
28 |
29 |
30 | class ImageMask(gr.components.Image):
31 | """
32 | Sets: source="canvas", tool="sketch"
33 | """
34 |
35 | is_template = True
36 |
37 | def __init__(self, **kwargs):
38 | super().__init__(source="upload", tool="sketch", interactive=True, **kwargs)
39 |
40 | def preprocess(self, x):
41 | if isinstance(x, str):
42 | x = {'image': x, 'mask': x}
43 | elif isinstance(x, dict):
44 | if x['mask'] is None and x['image'] is None:
45 | x
46 | elif x['image'] is None:
47 | x['image'] = str(x['mask'])
48 | elif x['mask'] is None:
49 | x['mask'] = str(
50 | x['image']
51 | ) # not sure why mask/mask is None sometimes, this prevents preprocess crashing
52 | elif x is not None:
53 | assert False, 'Unexpected type {0} in ImageMask preprocess()'.format(type(x))
54 |
55 | return super().preprocess(x)
56 |
57 |
58 | css = """
59 | #compose_btn {
60 | --tw-border-opacity: 1;
61 | border-color: rgb(255 216 180 / var(--tw-border-opacity));
62 | --tw-gradient-from: rgb(255 216 180 / .7);
63 | --tw-gradient-to: rgb(255 216 180 / 0);
64 | --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to);
65 | --tw-gradient-to: rgb(255 176 102 / .8);
66 | --tw-text-opacity: 1;
67 | color: rgb(238 116 0 / var(--tw-text-opacity));
68 | }
69 | """
70 |
71 |
72 | def get_bounding_box(img):
73 | # Get the indices of all non-zero pixels
74 | if np.any(img) == False: # protect agaist an empty img
75 | return None
76 | non_zero_indices = np.nonzero(img)
77 |
78 | # Get the minimum and maximum indices for each axis
79 | min_x = np.min(non_zero_indices[1])
80 | max_x = np.max(non_zero_indices[1])
81 | min_y = np.min(non_zero_indices[0])
82 | max_y = np.max(non_zero_indices[0])
83 |
84 | # Return the bounding box as a tuple of (min_x, min_y, max_x, max_y)
85 | return (min_x, min_y, max_x, max_y)
86 |
87 |
88 | def composite_all_layers(base, objects): # debugging use only
89 | img = base.copy()
90 | for obj in objects:
91 | for i in range(obj['img'].shape[0]):
92 | for j in range(obj['img'].shape[1]):
93 | if obj['img'][i, j, 3] != 0:
94 | img[i, j] = obj['img'][i, j]
95 | return img
96 |
97 |
98 | def changed_objects_handler(mask_dilate_slider, state, evt: gr.SelectData):
99 | state['move_no'] += 1
100 |
101 | pos_x, pos_y = evt.index # obj moved out of scene is signaled by (10000, 10000)
102 | obj_id = 255 - evt.value
103 | print(f"obj {obj_id} moved by {pos_x}, {pos_y}")
104 |
105 | img = state['base_layer']
106 | for obj in state['changed_objects']:
107 | if obj['id'] == obj_id:
108 | img = obj['img']
109 | state['changed_objects'].remove(obj)
110 | break
111 |
112 | new_img = np.zeros_like(img)
113 | bbox = None
114 | for i in range(img.shape[0]):
115 | for j in range(img.shape[1]):
116 | if img[i, j, 3] == obj_id:
117 | new_i = i + pos_y
118 | new_j = j + pos_x
119 | if new_i >= 0 and new_i < img.shape[0] and new_j >= 0 and new_j < img.shape[1]:
120 | new_img[new_i, new_j] = img[i, j]
121 | img[i, j] = 0
122 |
123 | bbox = get_bounding_box(new_img) # returns None if obj moved out of scene
124 | print("bbox: ", bbox)
125 | state['changed_objects'].append({'id': obj_id, 'img': new_img, 'text': state['segment_info'][obj_id], 'box': bbox})
126 |
127 | # Enable for debugging only. See if the composited image is correct.
128 | # composed_img_updated = composite_all_layers(state['base_layer'], state['changed_objects'])
129 | # filename = str(f"composited_imge_{state['move_no']}") + ".png"
130 | # cv2.imwrite(filename, composed_img_updated[:, :, 0:3])
131 |
132 | return mask_dilate_slider, state['base_layer_masked'], state
133 |
134 |
135 | def get_base_layer_mask(state):
136 | changed_obj_id = []
137 | for obj in state['changed_objects']:
138 | changed_obj_id.append(obj['id'])
139 |
140 | # union of mask of all objects
141 | img = state['orignal_segmented']
142 | mask = np.zeros(img.shape[:2], dtype=np.uint8)
143 | for i in range(img.shape[0]):
144 | for j in range(img.shape[1]):
145 | if img[i, j, 3] in changed_obj_id:
146 | mask[i, j] = 255
147 | state['base_layer_mask'] = mask
148 |
149 | mask_image = Image.fromarray(mask)
150 | if mask_image.mode != "L":
151 | mask_image = mask_image.convert("L")
152 | mask_image = ImageOps.invert(mask_image)
153 | # mask_image.save("mask_image.png")
154 |
155 | img = state['orignal_segmented']
156 | orig_image = Image.fromarray(img[:, :, :3])
157 | orig_image.save("orig_image.png")
158 | transparent = Image.new(orig_image.mode, orig_image.size, (0, 0, 0, 0))
159 | masked_image = Image.composite(orig_image, transparent, mask_image)
160 | # masked_image.save("get_masked_background_image.png")
161 |
162 | return masked_image, state
163 |
164 |
165 | def get_inpainted_background(state, mask_dilate_slider):
166 | # Define the URL of the REST API endpoint
167 | url = "http://localhost:9171/api/v2/image"
168 |
169 | img = state['orignal_segmented']
170 | if isinstance(img, Image.Image) is not True:
171 | img = Image.fromarray(img)
172 | # Create a BytesIO object and save the image there
173 | buffer = io.BytesIO()
174 | img.save(buffer, format="PNG")
175 | # Get the bytes value from the buffer
176 | img_bytes = buffer.getvalue()
177 |
178 | encoded_string = base64.b64encode(img_bytes).decode("utf-8")
179 |
180 | if mask_dilate_slider != 0:
181 | mask = state['base_layer_mask_enlarged']
182 | else:
183 | mask = state['base_layer_mask']
184 | if isinstance(mask, Image.Image) is not True:
185 | mask = Image.fromarray(mask)
186 |
187 | # mask has background as 1, lama needs object to be 1
188 | if mask.mode != "L":
189 | mask = mask.convert("L")
190 | mask = ImageOps.invert(mask)
191 |
192 | # Create a BytesIO object and save the image there
193 | buffer = io.BytesIO()
194 | mask.save(buffer, format="PNG")
195 | # Get the bytes value from the buffer
196 | mask_bytes = buffer.getvalue()
197 |
198 | encoded_string_mask = base64.b64encode(mask_bytes).decode("utf-8")
199 |
200 | # Create a POST request to the endpoint
201 | headers = {"Content-Type": "application/json"}
202 | data = {"image": encoded_string, "mask": encoded_string_mask}
203 | response = requests.post(url, headers=headers, json=data)
204 |
205 | # Check the status code of the response
206 | if response.status_code == 200:
207 | # The request was successful
208 | print("Image received successfully")
209 | image_data = response.content
210 | # Create a io.BytesIO object from the image data
211 | dataBytesIO = io.BytesIO(image_data)
212 | # Open the image using Image.open()
213 | image = Image.open(dataBytesIO)
214 | # image.save("lama_returned_image.png")
215 |
216 | else:
217 | # The request failed
218 | print("Error: HTTP status code {}".format(response.status_code))
219 | print(response.text)
220 |
221 | return image
222 |
223 |
224 | def get_enlarged_masked_background(state, mask_dilate_slider):
225 | mask = state['base_layer_mask']
226 |
227 | kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (mask_dilate_slider, mask_dilate_slider))
228 | mask_dilated = cv2.dilate(mask, kernel)
229 |
230 | # mask the original
231 | mask_image = Image.fromarray(mask_dilated)
232 | if mask_image.mode != "L":
233 | mask_image = mask_image.convert("L")
234 | mask_image = ImageOps.invert(mask_image)
235 | state['base_layer_mask_enlarged'] = mask_image
236 | # mask_image.save("enlarged_mask_image.png")
237 |
238 | img = state['orignal_segmented']
239 | orig_image = Image.fromarray(img[:, :, :3])
240 | transparent = Image.new(orig_image.mode, orig_image.size, (0, 0, 0, 0))
241 | masked_image = Image.composite(orig_image, transparent, mask_image)
242 | # masked_image.save("enlarged_masked_background_image.png")
243 |
244 | return masked_image, state
245 |
246 |
247 | def get_base_layer_inpainted(state, mask_dilate_slider):
248 | masked_img, state = get_enlarged_masked_background(state, mask_dilate_slider)
249 | inpainted_img = get_inpainted_background(state, mask_dilate_slider)
250 | state['base_layer_inpainted'] = np.array(inpainted_img)
251 | return masked_img, inpainted_img, state
252 |
253 |
254 | def log_image_and_mask(img, mask): # for debugging use only
255 | counter = 0
256 | for filename in os.listdir('.'):
257 | if filename.startswith('img_') and filename.endswith('.png'):
258 | try:
259 | num = int(filename[4:-4])
260 | if num > counter:
261 | counter = num
262 | except ValueError:
263 | pass
264 | counter += 1
265 | cv2.imwrite(f"img_{counter}.png", img)
266 | cv2.imwrite(f"img_{counter}_mask.png", mask.astype(np.uint8) * 255)
267 |
268 |
269 | def get_segments(img, task, reftxt, mask_dilate_slider, state):
270 | assert isinstance(state, dict)
271 | state['orignal_segmented'] = None
272 | state['base_layer'] = None
273 | state['base_layer_masked'] = None
274 | state['base_layer_mask'] = None
275 | state['base_layer_mask_enlarged'] = None
276 | state['base_layer_inpainted'] = None
277 | state['segment_info'] = None
278 | state['seg_boxes'] = {}
279 | state['changed_objects'] = []
280 | state['move_no'] = 0
281 |
282 | print("Calling SEEM_app.inference")
283 |
284 | if isinstance(img['image'], np.ndarray):
285 | pil_image = Image.fromarray(img['image'])
286 | if isinstance(img['mask'], np.ndarray):
287 | pil_mask = Image.fromarray(img['mask'])
288 | img = {'image': pil_image, 'mask': pil_mask}
289 | img_ret, seg_info = SEEM.inference(img, task, reftxt=reftxt)
290 | # SEEM doesn't always respect the input img dimentions
291 | tgt_size = (img['image'].width, img['image'].height)
292 | img_ret = img_ret.resize(tgt_size, resample=Image.Resampling.NEAREST)
293 | state['orignal_segmented'] = np.array(img_ret).copy()
294 | state['base_layer'] = np.array(img_ret)
295 | state['segment_info'] = seg_info
296 | img_ret_array = np.array(img_ret)
297 | img_ret_array[:, :, 3] = 255 - img_ret_array[:, :, 3]
298 | # NOTE: if write out as a png, the pixels values get messed up. Same reason the client side colors look weird.
299 | # cv2.imwrite(f"get_segments_img_ret.bmp", img_ret_array)
300 |
301 | for obj_id, lable in seg_info.items():
302 | obj_img = img_ret_array[:, :, 3] == 255 - obj_id
303 | # cv2.imwrite(f"img_{obj_id}.png", obj_img.astype(np.uint8) * 255)
304 | # log_image_and_mask(np.array(img['image']), obj_img)
305 | bbox = get_bounding_box(obj_img)
306 | print(f"obj_id={obj_id}, lable={lable}, bbox={bbox}")
307 | state['seg_boxes'][obj_id] = bbox
308 |
309 | # add a special event, obj stays at the original spot
310 | data = {}
311 | data["index"] = (0, 0)
312 | data["value"] = 254 # ==> 1, the only object allowed for now
313 | data["selected"] = True
314 | evt = gr.SelectData(None, data)
315 | mask_dilate_slider, _, state = changed_objects_handler(mask_dilate_slider, state, evt)
316 |
317 | state['base_layer_masked'], state = get_base_layer_mask(state)
318 | if mask_dilate_slider != 0:
319 | enlarged_masked_background, state = get_enlarged_masked_background(state, mask_dilate_slider)
320 | state['base_layer_inpainted'] = np.array(get_inpainted_background(state, mask_dilate_slider))
321 |
322 | return Image.fromarray(img_ret_array), enlarged_masked_background, state['base_layer_inpainted'], state
323 |
324 |
325 | def get_generated(grounding_text, fix_seed, rand_seed, state):
326 | if ('base_layer_inpainted' in state) == False:
327 | raise gr.Error('The segmentation step must be completed first before generating a new image')
328 |
329 | inpainted_background_img = state['base_layer_inpainted']
330 | assert inpainted_background_img is not None, 'base layer should be inpainted after segment'
331 |
332 | state['boxes'] = []
333 | for items in state['changed_objects']:
334 | if items['box'] is not None:
335 | state['boxes'].append(items['box'])
336 |
337 | if len(state['boxes']) == 0:
338 | if len(grounding_text) != 0:
339 | grounding_text = []
340 | print("No grounding box found. Grounding text will be ignored.")
341 | return inpainted_background_img.copy(), state, None
342 |
343 | print('Calling GLIGEN_app.generate')
344 | print('grounding_text: ', grounding_text)
345 | print(state['boxes'], len(state['boxes']))
346 | assert len(state['boxes']) == 1, 'Only handle one segmented object at a time'
347 | if len(grounding_text) == 0: # mostly user forgot to drag the object and didn't provide grounding text
348 | raise gr.Error('Please providing grounding text to match the identified object')
349 |
350 | if len(args.moderate) > 0:
351 | does_text_violate_policy = False
352 |
353 | if not does_text_violate_policy and (
354 | ModerationOptions.ALL.value in args.moderate
355 | or ModerationOptions.GLIGEN_INPUT_TEXT_GUARDLIST.value in args.moderate
356 | ):
357 | does_text_violate_policy |= violates_guardlist_moderation(grounding_text)
358 | if not does_text_violate_policy and (
359 | ModerationOptions.ALL.value in args.moderate
360 | or ModerationOptions.GLIGEN_INPUT_TEXT_AICS.value in args.moderate
361 | ):
362 | does_text_violate_policy |= does_text_violate_azure_content_safety(grounding_text)
363 |
364 | if does_text_violate_policy:
365 | return inpainted_background_img.copy(), state
366 |
367 | out_gen_1, _, _, _, state = GLIGEN.generate(
368 | task='Grounded Inpainting',
369 | language_instruction='',
370 | grounding_texts=grounding_text,
371 | sketch_pad=inpainted_background_img,
372 | alpha_sample=0.3,
373 | guidance_scale=7.5,
374 | batch_size=1,
375 | fix_seed=fix_seed,
376 | rand_seed=rand_seed,
377 | use_actual_mask=False,
378 | append_grounding=True,
379 | style_cond_image=None,
380 | inpainting_image=inpainted_background_img,
381 | inpainting_mask=None,
382 | state=state,
383 | )
384 |
385 | image = out_gen_1['value']
386 |
387 | if len(args.moderate) > 0:
388 | does_image_violate_policy = False
389 |
390 | if not does_image_violate_policy and (
391 | ModerationOptions.ALL.value in args.moderate
392 | or ModerationOptions.GLIGEN_OUTPUT_IMAGE_AICS.value in args.moderate
393 | ):
394 | does_image_violate_policy |= does_image_violate_azure_content_safety(image)
395 |
396 | if does_image_violate_policy:
397 | return inpainted_background_img.copy(), state
398 |
399 | return image, state
400 |
401 |
402 | def get_generated_full(
403 | task,
404 | language_instruction,
405 | grounding_instruction,
406 | sketch_pad,
407 | alpha_sample,
408 | guidance_scale,
409 | batch_size,
410 | fix_seed,
411 | rand_seed,
412 | use_actual_mask,
413 | append_grounding,
414 | style_cond_image,
415 | state,
416 | ):
417 | out_gen_1, _, _, _, state = GLIGEN.generate(
418 | task,
419 | language_instruction,
420 | grounding_instruction,
421 | sketch_pad,
422 | alpha_sample,
423 | guidance_scale,
424 | batch_size,
425 | fix_seed,
426 | rand_seed,
427 | use_actual_mask,
428 | append_grounding,
429 | style_cond_image,
430 | state,
431 | )
432 | return out_gen_1['value'], state
433 |
434 |
435 | def gligen_change_task(state):
436 | if state['working_image'] is not None:
437 | task = "Grounded Inpainting"
438 | else:
439 | task = "Grounded Generation"
440 | return task
441 |
442 |
443 | def clear_sketch_pad_mask(sketch_pad_image):
444 | sketch_pad = ImageMask.update(value=sketch_pad_image, visible=True)
445 | return sketch_pad
446 |
447 |
448 | def save_shared_state(img, state):
449 | if isinstance(img, dict) and 'image' in img:
450 | state['working_image'] = img['image']
451 | else:
452 | state['working_image'] = img
453 | return state
454 |
455 |
456 | def load_shared_state(state, task=None):
457 | if task == "Grounded Generation":
458 | return None
459 | else:
460 | return state['working_image']
461 |
462 |
463 | def update_shared_state(state, task):
464 | if task == "Grounded Generation":
465 | state['working_image'] = None
466 | return state
467 |
468 |
469 | def update_sketch_pad_trigger(sketch_pad_trigger, task):
470 | if task == "Grounded Generation":
471 | sketch_pad_trigger = sketch_pad_trigger + 1
472 | return sketch_pad_trigger
473 |
474 |
475 | def clear_grounding_info(state):
476 | state['boxes'] = []
477 | state['masks'] = []
478 | return state, ''
479 |
480 |
481 | def switch_to_generate():
482 | task = "Grounded Generation"
483 | return (
484 | task,
485 | gr.Image.update(visible=True),
486 | gr.Textbox.update(visible=True),
487 | gr.Textbox.update(visible=True),
488 | gr.Button.update(visible=True),
489 | gr.Button.update(visible=True),
490 | gr.Accordion.update(visible=True),
491 | )
492 |
493 |
494 | def switch_to_inpaint():
495 | task = "Grounded Inpainting"
496 | return (
497 | task,
498 | gr.Image.update(visible=True),
499 | gr.Textbox.update(visible=False),
500 | gr.Textbox.update(visible=True),
501 | gr.Button.update(visible=True),
502 | gr.Button.update(visible=True),
503 | gr.Accordion.update(visible=True),
504 | )
505 |
506 |
507 | def switch_to_compose():
508 | task = "Compose"
509 | return (
510 | task,
511 | gr.Image.update(visible=False),
512 | gr.Textbox.update(visible=False),
513 | gr.Textbox.update(visible=False),
514 | gr.Button.update(visible=False),
515 | gr.Button.update(visible=False),
516 | gr.Accordion.update(visible=False),
517 | )
518 |
519 |
520 | def copy_to_llava_input(img):
521 | print('WORKING IMAGE CHANGED!!!!')
522 | if isinstance(img, Image.Image) is not True:
523 | img = Image.fromarray(img)
524 | return img
525 |
526 |
527 | title_markdown = """
528 | # 🌋 LLaVA-Interactive
529 |
530 | ### [Demo Walkthrough Video](https://youtu.be/r01w9NerNtA) - [Project Page](https://llava-vl.github.io/llava-interactive/) - [Code](https://github.com/LLaVA-VL/LLaVA-Interactive-Demo) | Models: [LLaVA](https://github.com/haotian-liu/LLaVA/blob/main/docs/MODEL_ZOO.md) + [GLIGEN](https://github.com/gligen/GLIGEN) + [SEEM](https://github.com/UX-Decoder/Segment-Everything-Everywhere-All-At-Once) + [LaMa](https://github.com/advimman/lama) | 📚 [LLaVA-Interactive](https://arxiv.org/abs/2311.00571) - [LLaVA-v1.5](https://arxiv.org/abs/2310.03744)
531 | """
532 |
533 | tos_markdown = """
534 | ### Terms of use
535 |
536 | By using this service, users are required to agree to the following terms:
537 | The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research.
538 | Please click the "Flag" button if you get any inappropriate answer! We will collect those to keep improving our moderator.
539 | For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
540 | """
541 |
542 |
543 | license_markdown = """
544 | ### License
545 |
546 | This project including LLaVA and SEEM are licensed under the Apache License. See the LICENSE file for more details. The GLIGEN project is licensed under the MIT License.
547 |
548 | The service is a research preview intended for non-commercial use only, subject to the model License of LLaMA, Terms of Use of the data generated by OpenAI, and Privacy Practices of ShareGPT. Please contact us if you find any potential violation.
549 | """
550 |
551 |
552 | def build_demo():
553 | demo = gr.Blocks(title="🌋 LLaVA-Interactive", css=css + GLIGEN.css)
554 | with demo:
555 | compose_state = gr.State(
556 | {
557 | 'boxes': [],
558 | 'move_no': 0,
559 | 'base_layer': None,
560 | 'segment_info': None,
561 | 'seg_boxes': {},
562 | 'changed_objects': [],
563 | }
564 | )
565 | llava_state = gr.State()
566 | shared_state = gr.State({'working_image': None})
567 | gligen_state = gr.State({'draw_box': True})
568 |
569 | gr.Markdown(title_markdown)
570 |
571 | gr.Markdown(
572 | '**Experience interactive multimodal chatting and image manipulation. Select a tab for your task and follow the instructions. Switch tasks anytime and ask questions in the chat window.**'
573 | )
574 |
575 | with gr.Row(visible=False):
576 | working_image = gr.Image(
577 | label="Working Image", type="numpy", elem_id="working_image", visible=False, interactive=False
578 | ) # hidden image to save current working image
579 | # for gligen
580 | sketch_pad_trigger = gr.Number(value=0, visible=False)
581 | sketch_pad_resize_trigger = gr.Number(value=0, visible=False)
582 | init_white_trigger = gr.Number(value=0, visible=False)
583 | image_scale = gr.Number(value=0, elem_id="image_scale", visible=False)
584 | task = gr.Radio(
585 | choices=["Grounded Generation", 'Grounded Inpainting', 'Compose'],
586 | type="value",
587 | value="Grounded Inpainting",
588 | label="Task",
589 | visible=False,
590 | )
591 |
592 | with gr.Row(equal_height=False):
593 | with gr.Column():
594 | with gr.Row():
595 | sketch_pad = ImageMask(
596 | label="Sketch Pad",
597 | type="numpy",
598 | shape=(512, 512),
599 | width=384,
600 | elem_id="img2img_image",
601 | brush_radius=20.0,
602 | visible=True,
603 | )
604 |
605 | compose_tab = gr.Tab("Remove or Change Objects")
606 | with compose_tab:
607 | gr.Markdown(
608 | "Segment an object by drawing a stroke or giving a referring text. Then press the segment button. Drag the highlighted object to move it. To remove it, drag it out of the frame. To replace it with a new object, give an instruction only if the object is removed and press the generate button until you like the image."
609 | )
610 | with gr.Row().style(equal_height=False):
611 | with gr.Column():
612 | with gr.Group():
613 | with gr.Column():
614 | with gr.Row():
615 | segment_task = gr.Radio(
616 | ["Stroke", "Text"], value="Stroke", label='Choose segmentation method'
617 | )
618 | segment_text = gr.Textbox(label="Enter referring text")
619 | segment_btn = gr.Button("Segment", elem_id="segment-btn")
620 |
621 | with gr.Group():
622 | segmented_img = gr.Image(label="Move or delete object", tool="compose", height=256)
623 |
624 | with gr.Group():
625 | with gr.Column():
626 | grounding_text_box = gr.Textbox(
627 | label="Enter grounding text for generating a new image"
628 | )
629 | with gr.Row():
630 | compose_clear_btn = gr.Button("Clear", elem_id="compose_clear_btn")
631 | compose_btn = gr.Button("Generate", elem_id="compose_btn")
632 |
633 | with gr.Accordion("Advanced Options", open=False):
634 | with gr.Row():
635 | masked_background_img = gr.Image(
636 | label="Background", type='pil', interactive=False, height=256
637 | )
638 | inpainted_background_img = gr.Image(
639 | label="Inpainted Background", type='pil', interactive=False, height=256
640 | )
641 | mask_dilate_slider = gr.Slider(
642 | minimum=0.0,
643 | maximum=100,
644 | value=50,
645 | step=2,
646 | interactive=True,
647 | label="Mask dilation",
648 | visible=True,
649 | scale=20,
650 | )
651 | with gr.Row(visible=False):
652 | compose_fix_seed = gr.Checkbox(value=False, label="Fixed seed", visible=False)
653 | compose_rand_seed = gr.Slider(
654 | minimum=0, maximum=1000, step=1, value=0, label="Seed", visible=False
655 | )
656 |
657 | gligen_inpaint = gr.Tab("Inpaint New Objects")
658 | with gligen_inpaint:
659 | gr.Markdown(
660 | "Add a new object to the image by drawing its bounding box and giving an instruction. Press the “generate” button repeatedly until you like the image. Press “clear” to accept the image and start over with another object."
661 | )
662 |
663 | gligen = gr.Tab("Generate New Image")
664 | with gligen:
665 | gr.Markdown(
666 | "Generate a new image by giving a language instruction below. Draw a bounding box and give an instruction for any specific objects that need to be grounded in certain places. Hit the “generate” button repeatedly until you get the image you want."
667 | )
668 |
669 | with gr.Group(visible=False):
670 | language_instruction = gr.Textbox(
671 | label="Language instruction", elem_id='language_instruction', visible=False
672 | )
673 | grounding_instruction = gr.Textbox(
674 | label="Grounding instruction (Separated by semicolon)",
675 | elem_id='grounding_instruction',
676 | visible=False,
677 | )
678 | with gr.Row():
679 | gligen_clear_btn = gr.Button(value='Clear', visible=False)
680 | gligen_gen_btn = gr.Button(value='Generate', elem_id="generate-btn", visible=False)
681 |
682 | with gr.Group():
683 | out_imagebox = gr.Image(type="pil", label="Parsed Sketch Pad", height=256, visible=False)
684 |
685 | gligen_adv_options = gr.Accordion("Advanced Options", open=False, visible=False)
686 | with gligen_adv_options:
687 | with gr.Column():
688 | alpha_sample = gr.Slider(
689 | minimum=0, maximum=1.0, step=0.1, value=0.3, label="Scheduled Sampling (τ)"
690 | )
691 | guidance_scale = gr.Slider(minimum=0, maximum=50, step=0.5, value=7.5, label="Guidance Scale")
692 |
693 | with gr.Row(visible=False):
694 | batch_size = gr.Slider(
695 | minimum=1, maximum=4, step=1, value=1, label="Number of Samples", visible=False
696 | )
697 | append_grounding = gr.Checkbox(
698 | value=True, label="Append grounding instructions to the caption", visible=False
699 | )
700 | use_actual_mask = gr.Checkbox(value=False, label="Use actual mask for inpainting", visible=False)
701 | fix_seed = gr.Checkbox(value=False, label="Fixed seed", visible=False)
702 | rand_seed = gr.Slider(minimum=0, maximum=1000, step=1, value=0, label="Seed", visible=False)
703 | use_style_cond = gr.Checkbox(value=False, label="Enable Style Condition", visible=False)
704 | style_cond_image = gr.Image(type="pil", label="Style Condition", visible=False, interactive=False)
705 |
706 | controller = GLIGEN.Controller()
707 | sketch_pad.edit(
708 | GLIGEN.draw,
709 | inputs=[task, sketch_pad, grounding_instruction, sketch_pad_resize_trigger, gligen_state],
710 | outputs=[out_imagebox, sketch_pad_resize_trigger, image_scale, gligen_state],
711 | queue=False,
712 | )
713 | llava_image = gr.Image(label='sketch_pad_image', type='pil', visible=False, interactive=False)
714 | working_image.change(copy_to_llava_input, [working_image], [llava_image])
715 | sketch_pad.upload(save_shared_state, inputs=[sketch_pad, shared_state], outputs=shared_state).then(
716 | load_shared_state, [shared_state], working_image
717 | )
718 | grounding_instruction.change(
719 | GLIGEN.draw,
720 | inputs=[task, sketch_pad, grounding_instruction, sketch_pad_resize_trigger, gligen_state],
721 | outputs=[out_imagebox, sketch_pad_resize_trigger, image_scale, gligen_state],
722 | queue=False,
723 | )
724 | gligen_clear_btn.click(
725 | GLIGEN.clear,
726 | inputs=[task, sketch_pad_trigger, batch_size, gligen_state],
727 | outputs=[sketch_pad, sketch_pad_trigger, out_imagebox, image_scale, gligen_state],
728 | queue=False,
729 | ).then(clear_grounding_info, gligen_state, [gligen_state, grounding_instruction]).then(
730 | load_shared_state, [shared_state], sketch_pad
731 | ).then(
732 | update_sketch_pad_trigger, [sketch_pad_trigger, task], sketch_pad_trigger
733 | )
734 | task.change(
735 | partial(GLIGEN.clear, switch_task=True),
736 | inputs=[task, sketch_pad_trigger, batch_size, gligen_state],
737 | outputs=[sketch_pad, sketch_pad_trigger, out_imagebox, image_scale, gligen_state],
738 | queue=False,
739 | ).then(load_shared_state, [shared_state, task], sketch_pad).then(
740 | update_sketch_pad_trigger, [sketch_pad_trigger, task], sketch_pad_trigger
741 | ).then(
742 | clear_grounding_info, gligen_state, [gligen_state, grounding_instruction]
743 | )
744 | sketch_pad_trigger.change(
745 | controller.init_white,
746 | inputs=[init_white_trigger],
747 | outputs=[sketch_pad, image_scale, init_white_trigger],
748 | queue=False,
749 | )
750 | sketch_pad_resize_trigger.change(
751 | controller.resize_masked, inputs=[gligen_state], outputs=[sketch_pad, gligen_state], queue=False
752 | )
753 |
754 | gligen_gen_btn.click(
755 | get_generated_full,
756 | inputs=[
757 | task,
758 | language_instruction,
759 | grounding_instruction,
760 | sketch_pad,
761 | alpha_sample,
762 | guidance_scale,
763 | batch_size,
764 | fix_seed,
765 | rand_seed,
766 | use_actual_mask,
767 | append_grounding,
768 | style_cond_image,
769 | gligen_state,
770 | ],
771 | outputs=[sketch_pad, gligen_state],
772 | queue=True,
773 | ).then(save_shared_state, [sketch_pad, shared_state], shared_state).then(
774 | load_shared_state, [shared_state], working_image
775 | )
776 |
777 | sketch_pad_resize_trigger.change(
778 | None, None, sketch_pad_resize_trigger, _js=GLIGEN.rescale_js, queue=False
779 | )
780 | init_white_trigger.change(None, None, init_white_trigger, _js=GLIGEN.rescale_js, queue=False)
781 | use_style_cond.change(
782 | lambda cond: gr.Image.update(visible=cond), use_style_cond, style_cond_image, queue=False
783 | )
784 | task.change(
785 | controller.switch_task_hide_cond,
786 | inputs=task,
787 | outputs=[use_style_cond, style_cond_image, alpha_sample, use_actual_mask],
788 | queue=False,
789 | )
790 |
791 | with gr.Column():
792 | llava_chatbot = gr.Chatbot(
793 | elem_id="chatbot",
794 | label="Chat with the latest image on the left at any time by entering your text below.",
795 | height=750,
796 | )
797 | with gr.Column(scale=8):
798 | llava_textbox = gr.Textbox(
799 | show_label=False, placeholder="Enter text and press ENTER", container=False
800 | )
801 | with gr.Column(scale=1, min_width=60):
802 | llava_submit_btn = gr.Button(value="Submit", visible=False)
803 |
804 | with gr.Row(visible=False):
805 | upvote_btn = gr.Button(value="👍 Upvote", interactive=False, visible=False)
806 | downvote_btn = gr.Button(value="👎 Downvote", interactive=False, visible=False)
807 | flag_btn = gr.Button(value="⚠️ Flag", interactive=False, visible=False)
808 | regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False, visible=False)
809 | llava_clear_btn = gr.Button(value="🗑️ Clear history", interactive=False, visible=False)
810 | with gr.Accordion("Parameters", open=False, visible=False) as parameter_row:
811 | temperature = gr.Slider(
812 | minimum=0.0,
813 | maximum=1.0,
814 | value=0.2,
815 | step=0.1,
816 | interactive=True,
817 | label="Temperature",
818 | visible=True,
819 | )
820 | top_p = gr.Slider(
821 | minimum=0.0, maximum=1.0, value=0.7, step=0.1, interactive=True, label="Top P", visible=True
822 | )
823 | max_output_tokens = gr.Slider(
824 | minimum=0,
825 | maximum=1024,
826 | value=512,
827 | step=64,
828 | interactive=True,
829 | label="Max output tokens",
830 | visible=True,
831 | )
832 |
833 | segment_btn.click(
834 | get_segments,
835 | inputs=[sketch_pad, segment_task, segment_text, mask_dilate_slider, compose_state],
836 | outputs=[segmented_img, masked_background_img, inpainted_background_img, compose_state],
837 | queue=True,
838 | )
839 | segmented_img.select(
840 | changed_objects_handler,
841 | [mask_dilate_slider, compose_state],
842 | [mask_dilate_slider, masked_background_img, compose_state],
843 | )
844 | mask_dilate_slider.release(
845 | get_base_layer_inpainted,
846 | inputs=[compose_state, mask_dilate_slider],
847 | outputs=[masked_background_img, inpainted_background_img, compose_state],
848 | )
849 | compose_btn.click(
850 | get_generated,
851 | [grounding_text_box, compose_fix_seed, compose_rand_seed, compose_state],
852 | [sketch_pad, compose_state],
853 | queue=True,
854 | ).then(save_shared_state, [sketch_pad, shared_state], shared_state).then(
855 | load_shared_state, [shared_state], working_image
856 | )
857 | compose_clear_btn.click(load_shared_state, [shared_state], sketch_pad)
858 |
859 | image_process_mode = gr.Radio(
860 | ["Crop", "Resize", "Pad"], value="Crop", label="Preprocess for non-square image", visible=False
861 | )
862 | models = LLAVA.get_model_list(args)
863 | model_selector = gr.Dropdown(
864 | choices=models,
865 | value=models[0] if len(models) > 0 else "",
866 | interactive=True,
867 | show_label=False,
868 | container=False,
869 | visible=False,
870 | )
871 |
872 | btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, llava_clear_btn]
873 | upvote_btn.click(
874 | LLAVA.upvote_last_response,
875 | [llava_state, model_selector],
876 | [llava_textbox, upvote_btn, downvote_btn, flag_btn],
877 | )
878 | downvote_btn.click(
879 | LLAVA.downvote_last_response,
880 | [llava_state, model_selector],
881 | [llava_textbox, upvote_btn, downvote_btn, flag_btn],
882 | )
883 | flag_btn.click(
884 | LLAVA.flag_last_response, [llava_state, model_selector], [llava_textbox, upvote_btn, downvote_btn, flag_btn]
885 | )
886 | regenerate_btn.click(
887 | LLAVA.regenerate,
888 | [llava_state, image_process_mode],
889 | [llava_state, llava_chatbot, llava_textbox, sketch_pad] + btn_list,
890 | ).then(
891 | LLAVA.http_bot,
892 | [llava_state, model_selector, temperature, top_p, max_output_tokens],
893 | [llava_state, llava_chatbot] + btn_list,
894 | )
895 | llava_clear_btn.click(
896 | LLAVA.clear_history, None, [llava_state, llava_chatbot, llava_textbox, llava_image] + btn_list
897 | )
898 |
899 | llava_textbox.submit(
900 | LLAVA.add_text,
901 | [llava_state, llava_textbox, llava_image, image_process_mode],
902 | [llava_state, llava_chatbot, llava_textbox, llava_image] + btn_list,
903 | ).then(
904 | LLAVA.http_bot,
905 | [llava_state, model_selector, temperature, top_p, max_output_tokens],
906 | [llava_state, llava_chatbot] + btn_list,
907 | )
908 | llava_submit_btn.click(
909 | LLAVA.add_text,
910 | [llava_state, llava_textbox, llava_image, image_process_mode],
911 | [llava_state, llava_chatbot, llava_textbox, llava_image] + btn_list,
912 | ).then(
913 | LLAVA.http_bot,
914 | [llava_state, model_selector, temperature, top_p, max_output_tokens],
915 | [llava_state, llava_chatbot] + btn_list,
916 | )
917 |
918 | if args.model_list_mode == "once":
919 | raise ValueError(f"Unsupported model list mode: {args.model_list_mode}")
920 | elif args.model_list_mode == "reload":
921 | print('disable for debugging')
922 | demo.load(LLAVA.load_demo_refresh_model_list, inputs=None, outputs=[llava_state, model_selector]).then(
923 | switch_to_compose,
924 | [],
925 | [
926 | task,
927 | out_imagebox,
928 | language_instruction,
929 | grounding_instruction,
930 | gligen_clear_btn,
931 | gligen_gen_btn,
932 | gligen_adv_options,
933 | ], # first tab show doesn't need any
934 | ).then(
935 | GLIGEN.clear,
936 | inputs=[task, sketch_pad_trigger, batch_size, gligen_state],
937 | outputs=[sketch_pad, sketch_pad_trigger, out_imagebox, image_scale, gligen_state],
938 | queue=False,
939 | )
940 |
941 | else:
942 | raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
943 |
944 | gligen.select(
945 | switch_to_generate,
946 | inputs=[],
947 | outputs=[
948 | task,
949 | out_imagebox,
950 | language_instruction,
951 | grounding_instruction,
952 | gligen_clear_btn,
953 | gligen_gen_btn,
954 | gligen_adv_options,
955 | ],
956 | )
957 | gligen_inpaint.select(
958 | switch_to_inpaint,
959 | inputs=[],
960 | outputs=[
961 | task,
962 | out_imagebox,
963 | language_instruction,
964 | grounding_instruction,
965 | gligen_clear_btn,
966 | gligen_gen_btn,
967 | gligen_adv_options,
968 | ],
969 | queue=False,
970 | )
971 |
972 | compose_tab.select(
973 | switch_to_compose,
974 | [],
975 | [
976 | task,
977 | out_imagebox,
978 | language_instruction,
979 | grounding_instruction,
980 | gligen_clear_btn,
981 | gligen_gen_btn,
982 | gligen_adv_options,
983 | ],
984 | )
985 |
986 | gr.Markdown(tos_markdown)
987 | gr.Markdown(license_markdown)
988 |
989 | return demo
990 |
991 |
992 | class LowercaseAction(argparse.Action):
993 | def __call__(self, parser, namespace, values, option_string=None):
994 | lowercase_values = [v.lower() for v in values]
995 | setattr(namespace, self.dest, lowercase_values)
996 |
997 |
998 | if __name__ == "__main__":
999 | parser = argparse.ArgumentParser()
1000 | parser.add_argument("--host", type=str, default="0.0.0.0")
1001 | parser.add_argument("--port", type=int)
1002 | parser.add_argument("--controller-url", type=str, default="http://localhost:10000")
1003 | parser.add_argument("--concurrency-count", type=int, default=8)
1004 | parser.add_argument("--model-list-mode", type=str, default="reload", choices=["once", "reload"])
1005 | parser.add_argument("--share", action="store_true")
1006 | parser.add_argument("--moderate", nargs="*", default=[], action=LowercaseAction)
1007 | parser.add_argument("--embed", action="store_true")
1008 | args = parser.parse_args()
1009 | LLAVA.set_args(args)
1010 |
1011 | demo = build_demo()
1012 | demo.queue(concurrency_count=args.concurrency_count, api_open=False)
1013 |
1014 | demo.launch(favicon_path="./demo_resources/images/llava_interactive_logo.png")
1015 |
--------------------------------------------------------------------------------
/ngrok.yml:
--------------------------------------------------------------------------------
1 | version: "2"
2 | tunnels:
3 | llavainteractive:
4 | proto: http
5 | addr: 7860
6 | domain: llavainteractive.ngrok.app
7 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.black]
2 | skip-string-normalization = true
3 | line-length = 120
4 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | albumentations==1.3.0
2 | accelerate==0.20.3
3 | altair==5.0.1
4 | cityscapesscripts==2.2.2
5 | diffusers==0.11.1
6 | diffdist==0.1
7 | ftfy==6.1.1
8 | fvcore==0.1.5.post20221221
9 | imageio==2.9.0
10 | imageio-ffmpeg==0.4.2
11 | invisible-watermark==0.1.5
12 | json_tricks==3.17.1
13 | kornia==0.6.9
14 | mup==1.0.0
15 | nltk==3.8.1
16 | numpy==1.23.1
17 | numba==0.57.1
18 | openai==0.27.8
19 | omegaconf==2.1.1
20 | opencv-python==4.7.0.72
21 | opencv-python-headless==4.7.0.72
22 | pandas==2.0.3
23 | pip==22.2.2
24 | pillow==9.4.0
25 | pyarrow==12.0.1
26 | pycocotools==2.0.5
27 | pydantic==1.10.9
28 | pyyaml==6.0
29 | protobuf==3.20.3
30 | pytorch-lightning==1.4.2
31 | regex==2023.6.3
32 | scikit-image==0.20.0
33 | scikit-learn==1.2.2
34 | sentencepiece==0.1.99
35 | shapely==2.0.1
36 | scann==1.2.7
37 | streamlit==1.12.1
38 | timm==0.4.12
39 | --find-links https://download.pytorch.org/whl/cu117/torch_stable.html
40 | torch==2.0.1+cu117
41 | --find-links https://download.pytorch.org/whl/cu117/torch_stable.html
42 | torchvision==0.15.2+cu117
43 | test-tube==0.7.5
44 | transformers==4.28.0
45 | vision-datasets==0.2.2
46 | yacs==0.1.8
47 | clip @ git+https://github.com/openai/CLIP.git@a9b1bf5920416aaeaec965c25dd9e8f98c864f16
48 | openai-whisper @ git+https://github.com/openai/whisper.git@248b6cb124225dd263bb9bd32d060b6517e067f8
49 | einops @ git+https://github.com/arogozhnikov/einops.git
50 | detectron2 @ git+https://github.com/maureenzou/detectron2-xyz.git@42121d75e10d9f858f3a91b6a39f5722c02868f0
51 | gradio @ git+https://github.com/wchen-github/gradio.git
52 | azure-ai-contentsafety==1.0.0b1
53 | azure-cognitiveservices-vision-contentmoderator==1.0.0
54 | fire==0.5.0
55 |
--------------------------------------------------------------------------------
/run_demo.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | pkill --signal 9 -f llava.serve.controller
4 | pkill --signal 9 -f llava.serve.model_worker
5 | pkill --signal 9 -f lama_server
6 | pkill --signal 9 -f llava_interactive
7 |
8 | eval "$(conda shell.bash hook)"
9 |
10 | # Check if --debug is in the command line arguments
11 | if [[ " $* " == *" --debug "* ]]; then
12 | RUN_LLAVA_INT=False
13 | else
14 | RUN_LLAVA_INT=True
15 | fi
16 |
17 | echo "RUN_LLAVA_INT: $RUN_LLAVA_INT"
18 |
19 | (
20 | cd LLaVA
21 | pwd
22 | conda deactivate
23 | conda activate llava
24 |
25 | export HOST_ADDRESS="0.0.0.0"
26 | export CONTROLLER_PORT=10000
27 | export MODEL_WORKER_PORT=40000
28 |
29 | python -m llava.serve.controller \
30 | --host $HOST_ADDRESS \
31 | --port $CONTROLLER_PORT &
32 |
33 | python -m llava.serve.model_worker \
34 | --host $HOST_ADDRESS \
35 | --controller http://localhost:$CONTROLLER_PORT \
36 | --port $MODEL_WORKER_PORT \
37 | --worker http://localhost:$MODEL_WORKER_PORT \
38 | --model-path ./llava-v1.5-13b &
39 | )
40 |
41 | sleep 30
42 |
43 | (
44 | cd lama
45 | pwd
46 | conda deactivate
47 | conda activate lama
48 | export TORCH_HOME=$(pwd)
49 | export PYTHONPATH=$(pwd)
50 | python ../lama_server.py &
51 | )
52 |
53 | sleep 10
54 |
55 | if [ "$RUN_LLAVA_INT" = "True" ]; then
56 | (
57 | pwd
58 | conda deactivate
59 | conda activate llava_int
60 |
61 | export LLAVA_INTERACTIVE_HOME=.
62 | export GRADIO_NO_RELOAD=True
63 |
64 | python llava_interactive.py
65 | )
66 | else
67 | echo "Skipping llava_interactive.py because --debug was given."
68 | fi
69 |
--------------------------------------------------------------------------------
/setup.sh:
--------------------------------------------------------------------------------
1 | echo "Cloning dependent repos..."
2 | git clone --single-branch https://github.com/mattmazzola/GLIGEN.git
3 | git clone --single-branch https://github.com/mattmazzola/Segment-Everything-Everywhere-All-At-Once.git SEEM
4 | git clone --single-branch -b make_usable_from_other_demos https://github.com/mattmazzola/LLaVA
5 | git clone --single-branch https://github.com/advimman/lama.git
6 |
7 | echo "Creating environments and download pretrained models..."
8 |
9 | cd LLaVA
10 | conda create -n llava python=3.10 -y
11 | conda activate llava
12 | pip install --upgrade pip # enable PEP 660 support
13 | pip install -e .
14 | #download pretrained model
15 | git clone https://huggingface.co/liuhaotian/llava-v1.5-13b
16 | conda deactivate
17 | cd ..
18 |
19 | #setting up lama
20 | cd lama
21 | conda env create --name lama -f conda_env.yml -y
22 | conda activate lama
23 | conda install pytorch torchvision torchaudio cudatoolkit=10.2 -c pytorch -y
24 | pip install torch==1.10.2+cu113 --find-links https://download.pytorch.org/whl/cu113/torch_stable.html
25 | pip install torchvision==0.11.3+cu113 --find-links https://download.pytorch.org/whl/cu113/torch_stable.html
26 | pip install flask
27 | pip install pytorch-lightning
28 | #download pretrained model
29 | git clone https://huggingface.co/smartywu/big-lama download
30 | unzip -n -q download/big-lama.zip
31 |
32 | conda deactivate
33 | cd ..
34 | echo "Done setting up."
35 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [flake8]
2 | # Flake8 Options: https://flake8.pycqa.org/en/latest/user/options.html#index-of-options
3 | max-line-length = 120
4 | ignore = F541, E501, F841
5 |
6 | exclude = GLIGEN, SEEM, lama, LLaVA
7 |
--------------------------------------------------------------------------------