├── .github └── workflows │ └── publish.yml ├── .gitignore ├── LICENSE ├── README.md ├── __init__.py ├── example_workflows ├── bark_and_musicgen.json ├── flux.json ├── llama3-405b.json ├── llama3-with-positive-feedback-loop.json ├── simple-garment-try-on.json ├── simple-llama3.json └── simple-llava.json ├── import_schemas.py ├── node.py ├── pyproject.toml ├── requirements.txt ├── schema_to_node.py ├── schemas ├── ai-forever_kandinsky-2.2.json ├── ai-forever_kandinsky-2.json ├── andreasjansson_blip-2.json ├── batouresearch_high-resolution-controlnet-tile.json ├── batouresearch_magic-image-refiner.json ├── batouresearch_magic-style-transfer.json ├── black-forest-labs_flux-1.1-pro.json ├── black-forest-labs_flux-dev.json ├── black-forest-labs_flux-pro.json ├── black-forest-labs_flux-schnell.json ├── bytedance_sdxl-lightning-4step.json ├── cjwbw_hyper-sdxl-1step-t2i.json ├── cjwbw_pixart-sigma.json ├── cjwbw_seamless_communication.json ├── cjwbw_supir.json ├── cuuupid_glm-4v-9b.json ├── cuuupid_idm-vton.json ├── declare-lab_tango.json ├── fofr_consistent-character.json ├── fofr_controlnet-preprocessors.json ├── fofr_face-to-many.json ├── fofr_latent-consistency-model.json ├── fofr_sd3-with-chaos.json ├── fofr_style-transfer.json ├── fofr_ultrapixel.json ├── ideogram-ai_ideogram-v2-turbo.json ├── ideogram-ai_ideogram-v2.json ├── lucataco_hunyuandit-v1.1.json ├── lucataco_llama-3-vision-alpha.json ├── lucataco_magnet.json ├── lucataco_moondream2.json ├── lucataco_paligemma-3b-pt-224.json ├── lucataco_pasd-magnify.json ├── lucataco_qwen-vl-chat.json ├── lucataco_sdxl-clip-interrogator.json ├── lucataco_xtts-v2.json ├── luosiallen_latent-consistency-model.json ├── meta_llama-2-70b-chat.json ├── meta_llama-2-7b-chat.json ├── meta_meta-llama-3-70b-instruct.json ├── meta_meta-llama-3-8b-instruct.json ├── meta_meta-llama-3.1-405b-instruct.json ├── meta_musicgen.json ├── nateraw_audio-super-resolution.json ├── nateraw_musicgen-songstarter-v0.2.json ├── omniedgeio_face-swap.json ├── pharmapsychotic_clip-interrogator.json ├── philz1337x_clarity-upscaler.json ├── recraft-ai_recraft-v3.json ├── replicate_deepfloyd-if.json ├── salesforce_blip.json ├── smoretalk_rembg-enhance.json ├── snowflake_snowflake-arctic-instruct.json ├── stability-ai_sdxl.json ├── stability-ai_stable-diffusion-3.json ├── stability-ai_stable-diffusion.json ├── suno-ai_bark.json ├── tstramer_material-diffusion.json ├── yorickvp_llava-13b.json ├── yorickvp_llava-v1.6-34b.json ├── yorickvp_llava-v1.6-mistral-7b.json └── zsxkib_realistic-voice-cloning.json └── supported_models.json /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to Comfy registry 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - "pyproject.toml" 9 | 10 | jobs: 11 | publish-node: 12 | name: Publish Custom Node to registry 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Check out code 16 | uses: actions/checkout@v4 17 | - name: Publish Custom Node 18 | uses: Comfy-Org/publish-node-action@main 19 | with: 20 | ## Add your own personal access token to your Github Repository secrets and reference it here. 21 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | venv 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Replicate 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # comfyui-replicate 2 | 3 | Custom nodes for running [Replicate models](https://replicate.com/explore) in ComfyUI. 4 | 5 | Take a look at the [example workflows](https://github.com/replicate/comfyui-replicate/tree/main/example_workflows) and the [supported Replicate models](https://github.com/replicate/comfyui-replicate/blob/main/supported_models.json) to get started. 6 | 7 | ![example-screenshot](https://github.com/replicate/comfyui-replicate/assets/319055/0eedb026-de3e-402a-b8fc-0a14c2fd209e) 8 | 9 | ## Set your Replicate API token before running 10 | 11 | Make sure you set your REPLICATE_API_TOKEN in your environment. Get your API tokens here, we recommend creating a new one: 12 | 13 | https://replicate.com/account/api-tokens 14 | 15 | To pass in your API token when running ComfyUI you could do: 16 | 17 | On MacOS or Linux: 18 | 19 | ```sh 20 | export REPLICATE_API_TOKEN="r8_************"; python main.py 21 | ``` 22 | 23 | On Windows: 24 | 25 | ```sh 26 | set REPLICATE_API_TOKEN="r8_************"; python main.py 27 | ``` 28 | 29 | ## Direct installation 30 | 31 | ```sh 32 | cd ComfyUI/custom-nodes 33 | git clone https://github.com/replicate/comfyui-replicate 34 | cd comfyui-replicate 35 | pip install -r requirements.txt 36 | ``` 37 | 38 | ## Supported Replicate models 39 | 40 | View the `supported_models.json` to see which models are packaged by default. 41 | 42 | ## Update Replicate models 43 | 44 | Simply run `./import_schemas.py` to update all model nodes. The latest version of a model is used by default. 45 | 46 | ## Add more models 47 | 48 | Only models that return simple text or image outputs are currently supported. If a model returns audio, video, JSON objects or a combination of outputs, the node will not work as expected. 49 | 50 | If you want to add more models, you can: 51 | 52 | - add the model to `supported_models.json` (for example, `fofr/consistent-character`) 53 | - run `./import_schemas.py`, this will update all schemas and import your new model 54 | - restart ComfyUI 55 | - use the model in workflow, it’ll have the title ‘Replicate [model author/model name]’ 56 | 57 | ## Roadmap 58 | 59 | Things to investigate and add to this custom node package: 60 | 61 | - support for more types of Replicate model (audio and video first) 62 | - showing logs, prediction status and progress (via tqdm) 63 | 64 | ## Contributing 65 | 66 | If you add models that others would find useful, feel free to raise PRs. 67 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from .node import NODE_CLASS_MAPPINGS 2 | __all__ = ['NODE_CLASS_MAPPINGS'] 3 | -------------------------------------------------------------------------------- /example_workflows/bark_and_musicgen.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 7, 3 | "last_link_id": 2, 4 | "nodes": [ 5 | { 6 | "id": 5, 7 | "type": "Replicate suno-ai/bark", 8 | "pos": [ 9 | 540, 10 | 264 11 | ], 12 | "size": { 13 | "0": 400, 14 | "1": 266 15 | }, 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "outputs": [ 20 | { 21 | "name": "AUDIO", 22 | "type": "AUDIO", 23 | "links": [ 24 | 1 25 | ], 26 | "shape": 3, 27 | "slot_index": 0 28 | }, 29 | { 30 | "name": "STRING", 31 | "type": "STRING", 32 | "links": null, 33 | "shape": 3 34 | } 35 | ], 36 | "properties": { 37 | "Node name for S&R": "Replicate suno-ai/bark" 38 | }, 39 | "widgets_values": [ 40 | "Hello, my name is Suno. And, uh — and I like pizza. [laughs] But I also have other interests such as playing tic tac toe.", 41 | "announcer", 42 | "", 43 | 0.7, 44 | 0.7, 45 | false, 46 | false 47 | ] 48 | }, 49 | { 50 | "id": 7, 51 | "type": "PreviewAudio", 52 | "pos": [ 53 | 1457, 54 | 272 55 | ], 56 | "size": { 57 | "0": 315, 58 | "1": 76 59 | }, 60 | "flags": {}, 61 | "order": 2, 62 | "mode": 0, 63 | "inputs": [ 64 | { 65 | "name": "audio", 66 | "type": "AUDIO", 67 | "link": 2 68 | } 69 | ], 70 | "properties": { 71 | "Node name for S&R": "PreviewAudio" 72 | }, 73 | "widgets_values": [ 74 | null 75 | ] 76 | }, 77 | { 78 | "id": 6, 79 | "type": "Replicate meta/musicgen", 80 | "pos": [ 81 | 1001, 82 | 269 83 | ], 84 | "size": { 85 | "0": 400, 86 | "1": 436 87 | }, 88 | "flags": {}, 89 | "order": 1, 90 | "mode": 0, 91 | "inputs": [ 92 | { 93 | "name": "input_audio", 94 | "type": "AUDIO", 95 | "link": 1 96 | } 97 | ], 98 | "outputs": [ 99 | { 100 | "name": "AUDIO", 101 | "type": "AUDIO", 102 | "links": [ 103 | 2 104 | ], 105 | "shape": 3, 106 | "slot_index": 0 107 | } 108 | ], 109 | "properties": { 110 | "Node name for S&R": "Replicate meta/musicgen" 111 | }, 112 | "widgets_values": [ 113 | "stereo-melody-large", 114 | "rap, rock", 115 | 20, 116 | true, 117 | 0, 118 | 6, 119 | false, 120 | "loudness", 121 | 250, 122 | 0, 123 | 1, 124 | 3, 125 | "wav", 126 | 1607, 127 | "randomize", 128 | false 129 | ] 130 | } 131 | ], 132 | "links": [ 133 | [ 134 | 1, 135 | 5, 136 | 0, 137 | 6, 138 | 0, 139 | "AUDIO" 140 | ], 141 | [ 142 | 2, 143 | 6, 144 | 0, 145 | 7, 146 | 0, 147 | "AUDIO" 148 | ] 149 | ], 150 | "groups": [], 151 | "config": {}, 152 | "extra": { 153 | "ds": { 154 | "scale": 1, 155 | "offset": [ 156 | -123.2666015625, 157 | -48.2666015625 158 | ] 159 | } 160 | }, 161 | "version": 0.4 162 | } -------------------------------------------------------------------------------- /example_workflows/flux.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 21, 3 | "last_link_id": 27, 4 | "nodes": [ 5 | { 6 | "id": 17, 7 | "type": "DF_Text_Box", 8 | "pos": [ 9 | -55, 10 | -228 11 | ], 12 | "size": { 13 | "0": 400, 14 | "1": 200 15 | }, 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "outputs": [ 20 | { 21 | "name": "STRING", 22 | "type": "STRING", 23 | "links": [ 24 | 22, 25 | 23, 26 | 24 27 | ], 28 | "shape": 3, 29 | "slot_index": 0 30 | } 31 | ], 32 | "properties": { 33 | "Node name for S&R": "DF_Text_Box" 34 | }, 35 | "widgets_values": [ 36 | "a neon light saying \"flux now in comfyui-replicate\"" 37 | ] 38 | }, 39 | { 40 | "id": 13, 41 | "type": "SaveImage", 42 | "pos": [ 43 | 1023, 44 | -699 45 | ], 46 | "size": { 47 | "0": 315, 48 | "1": 270 49 | }, 50 | "flags": {}, 51 | "order": 4, 52 | "mode": 0, 53 | "inputs": [ 54 | { 55 | "name": "images", 56 | "type": "IMAGE", 57 | "link": 25 58 | } 59 | ], 60 | "properties": {}, 61 | "widgets_values": [ 62 | "flux-schnell" 63 | ] 64 | }, 65 | { 66 | "id": 21, 67 | "type": "Replicate black-forest-labs/flux-schnell", 68 | "pos": [ 69 | 580, 70 | -697 71 | ], 72 | "size": [ 73 | 400, 74 | 268 75 | ], 76 | "flags": {}, 77 | "order": 1, 78 | "mode": 0, 79 | "inputs": [ 80 | { 81 | "name": "prompt", 82 | "type": "STRING", 83 | "link": 22, 84 | "widget": { 85 | "name": "prompt" 86 | } 87 | } 88 | ], 89 | "outputs": [ 90 | { 91 | "name": "IMAGE", 92 | "type": "IMAGE", 93 | "links": [ 94 | 25 95 | ], 96 | "shape": 3, 97 | "slot_index": 0 98 | } 99 | ], 100 | "properties": { 101 | "Node name for S&R": "Replicate black-forest-labs/flux-schnell" 102 | }, 103 | "widgets_values": [ 104 | "", 105 | "3:2", 106 | 1, 107 | 1331, 108 | "randomize", 109 | "webp", 110 | 80, 111 | false, 112 | false 113 | ] 114 | }, 115 | { 116 | "id": 14, 117 | "type": "SaveImage", 118 | "pos": [ 119 | 1027, 120 | -375 121 | ], 122 | "size": { 123 | "0": 315, 124 | "1": 270 125 | }, 126 | "flags": {}, 127 | "order": 5, 128 | "mode": 0, 129 | "inputs": [ 130 | { 131 | "name": "images", 132 | "type": "IMAGE", 133 | "link": 26 134 | } 135 | ], 136 | "properties": {}, 137 | "widgets_values": [ 138 | "flux-dev" 139 | ] 140 | }, 141 | { 142 | "id": 20, 143 | "type": "Replicate black-forest-labs/flux-dev", 144 | "pos": [ 145 | 579, 146 | -377 147 | ], 148 | "size": [ 149 | 400, 150 | 316 151 | ], 152 | "flags": {}, 153 | "order": 2, 154 | "mode": 0, 155 | "inputs": [ 156 | { 157 | "name": "image", 158 | "type": "IMAGE", 159 | "link": null 160 | }, 161 | { 162 | "name": "prompt", 163 | "type": "STRING", 164 | "link": 23, 165 | "widget": { 166 | "name": "prompt" 167 | } 168 | } 169 | ], 170 | "outputs": [ 171 | { 172 | "name": "IMAGE", 173 | "type": "IMAGE", 174 | "links": [ 175 | 26 176 | ], 177 | "shape": 3, 178 | "slot_index": 0 179 | } 180 | ], 181 | "properties": { 182 | "Node name for S&R": "Replicate black-forest-labs/flux-dev" 183 | }, 184 | "widgets_values": [ 185 | "", 186 | "3:2", 187 | 0.8, 188 | 1, 189 | 3.5, 190 | 1454, 191 | "randomize", 192 | "webp", 193 | 80, 194 | false, 195 | false 196 | ] 197 | }, 198 | { 199 | "id": 15, 200 | "type": "SaveImage", 201 | "pos": [ 202 | 1030, 203 | -7 204 | ], 205 | "size": { 206 | "0": 315, 207 | "1": 270 208 | }, 209 | "flags": {}, 210 | "order": 6, 211 | "mode": 0, 212 | "inputs": [ 213 | { 214 | "name": "images", 215 | "type": "IMAGE", 216 | "link": 27 217 | } 218 | ], 219 | "properties": {}, 220 | "widgets_values": [ 221 | "flux-pro" 222 | ] 223 | }, 224 | { 225 | "id": 19, 226 | "type": "Replicate black-forest-labs/flux-pro", 227 | "pos": [ 228 | 579, 229 | -3 230 | ], 231 | "size": [ 232 | 400, 233 | 268 234 | ], 235 | "flags": {}, 236 | "order": 3, 237 | "mode": 0, 238 | "inputs": [ 239 | { 240 | "name": "prompt", 241 | "type": "STRING", 242 | "link": 24, 243 | "widget": { 244 | "name": "prompt" 245 | } 246 | } 247 | ], 248 | "outputs": [ 249 | { 250 | "name": "IMAGE", 251 | "type": "IMAGE", 252 | "links": [ 253 | 27 254 | ], 255 | "shape": 3, 256 | "slot_index": 0 257 | } 258 | ], 259 | "properties": { 260 | "Node name for S&R": "Replicate black-forest-labs/flux-pro" 261 | }, 262 | "widgets_values": [ 263 | "", 264 | "3:2", 265 | 25, 266 | 3, 267 | 2, 268 | 2, 269 | 1262, 270 | "randomize", 271 | false 272 | ] 273 | } 274 | ], 275 | "links": [ 276 | [ 277 | 22, 278 | 17, 279 | 0, 280 | 21, 281 | 0, 282 | "STRING" 283 | ], 284 | [ 285 | 23, 286 | 17, 287 | 0, 288 | 20, 289 | 1, 290 | "STRING" 291 | ], 292 | [ 293 | 24, 294 | 17, 295 | 0, 296 | 19, 297 | 0, 298 | "STRING" 299 | ], 300 | [ 301 | 25, 302 | 21, 303 | 0, 304 | 13, 305 | 0, 306 | "IMAGE" 307 | ], 308 | [ 309 | 26, 310 | 20, 311 | 0, 312 | 14, 313 | 0, 314 | "IMAGE" 315 | ], 316 | [ 317 | 27, 318 | 19, 319 | 0, 320 | 15, 321 | 0, 322 | "IMAGE" 323 | ] 324 | ], 325 | "groups": [], 326 | "config": {}, 327 | "extra": { 328 | "ds": { 329 | "scale": 1, 330 | "offset": [ 331 | 189.2626953125, 332 | 773.8525390625 333 | ] 334 | } 335 | }, 336 | "version": 0.4 337 | } -------------------------------------------------------------------------------- /example_workflows/llama3-405b.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 10, 3 | "last_link_id": 13, 4 | "nodes": [ 5 | { 6 | "id": 5, 7 | "type": "CLIPTextEncode", 8 | "pos": [ 9 | 1199, 10 | 389 11 | ], 12 | "size": [ 13 | 400, 14 | 200 15 | ], 16 | "flags": {}, 17 | "order": 6, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "clip", 22 | "type": "CLIP", 23 | "link": 4 24 | }, 25 | { 26 | "name": "text", 27 | "type": "STRING", 28 | "link": 3, 29 | "widget": { 30 | "name": "text" 31 | } 32 | } 33 | ], 34 | "outputs": [ 35 | { 36 | "name": "CONDITIONING", 37 | "type": "CONDITIONING", 38 | "links": [ 39 | 5 40 | ], 41 | "shape": 3, 42 | "slot_index": 0 43 | } 44 | ], 45 | "properties": { 46 | "Node name for S&R": "CLIPTextEncode" 47 | }, 48 | "widgets_values": [ 49 | "" 50 | ] 51 | }, 52 | { 53 | "id": 4, 54 | "type": "CLIPTextEncode", 55 | "pos": [ 56 | 1195, 57 | 649 58 | ], 59 | "size": { 60 | "0": 400, 61 | "1": 200 62 | }, 63 | "flags": {}, 64 | "order": 3, 65 | "mode": 0, 66 | "inputs": [ 67 | { 68 | "name": "clip", 69 | "type": "CLIP", 70 | "link": 2 71 | } 72 | ], 73 | "outputs": [ 74 | { 75 | "name": "CONDITIONING", 76 | "type": "CONDITIONING", 77 | "links": [ 78 | 6 79 | ], 80 | "shape": 3, 81 | "slot_index": 0 82 | } 83 | ], 84 | "properties": { 85 | "Node name for S&R": "CLIPTextEncode" 86 | }, 87 | "widgets_values": [ 88 | "" 89 | ] 90 | }, 91 | { 92 | "id": 6, 93 | "type": "KSampler", 94 | "pos": [ 95 | 1664, 96 | 404 97 | ], 98 | "size": { 99 | "0": 315, 100 | "1": 262 101 | }, 102 | "flags": {}, 103 | "order": 7, 104 | "mode": 0, 105 | "inputs": [ 106 | { 107 | "name": "model", 108 | "type": "MODEL", 109 | "link": 11 110 | }, 111 | { 112 | "name": "positive", 113 | "type": "CONDITIONING", 114 | "link": 5 115 | }, 116 | { 117 | "name": "negative", 118 | "type": "CONDITIONING", 119 | "link": 6 120 | }, 121 | { 122 | "name": "latent_image", 123 | "type": "LATENT", 124 | "link": 7 125 | } 126 | ], 127 | "outputs": [ 128 | { 129 | "name": "LATENT", 130 | "type": "LATENT", 131 | "links": [ 132 | 8 133 | ], 134 | "shape": 3, 135 | "slot_index": 0 136 | } 137 | ], 138 | "properties": { 139 | "Node name for S&R": "KSampler" 140 | }, 141 | "widgets_values": [ 142 | 690393467025334, 143 | "randomize", 144 | 25, 145 | 7, 146 | "dpmpp_2m_sde_gpu", 147 | "karras", 148 | 1 149 | ] 150 | }, 151 | { 152 | "id": 8, 153 | "type": "VAEDecode", 154 | "pos": [ 155 | 2061, 156 | 406 157 | ], 158 | "size": { 159 | "0": 210, 160 | "1": 46 161 | }, 162 | "flags": {}, 163 | "order": 8, 164 | "mode": 0, 165 | "inputs": [ 166 | { 167 | "name": "samples", 168 | "type": "LATENT", 169 | "link": 8 170 | }, 171 | { 172 | "name": "vae", 173 | "type": "VAE", 174 | "link": 9 175 | } 176 | ], 177 | "outputs": [ 178 | { 179 | "name": "IMAGE", 180 | "type": "IMAGE", 181 | "links": [ 182 | 10 183 | ], 184 | "shape": 3, 185 | "slot_index": 0 186 | } 187 | ], 188 | "properties": { 189 | "Node name for S&R": "VAEDecode" 190 | } 191 | }, 192 | { 193 | "id": 3, 194 | "type": "CheckpointLoaderSimple", 195 | "pos": [ 196 | 735, 197 | 527 198 | ], 199 | "size": { 200 | "0": 315, 201 | "1": 98 202 | }, 203 | "flags": {}, 204 | "order": 0, 205 | "mode": 0, 206 | "outputs": [ 207 | { 208 | "name": "MODEL", 209 | "type": "MODEL", 210 | "links": [ 211 | 11 212 | ], 213 | "shape": 3, 214 | "slot_index": 0 215 | }, 216 | { 217 | "name": "CLIP", 218 | "type": "CLIP", 219 | "links": [ 220 | 2, 221 | 4 222 | ], 223 | "shape": 3, 224 | "slot_index": 1 225 | }, 226 | { 227 | "name": "VAE", 228 | "type": "VAE", 229 | "links": [ 230 | 9 231 | ], 232 | "shape": 3, 233 | "slot_index": 2 234 | } 235 | ], 236 | "properties": { 237 | "Node name for S&R": "CheckpointLoaderSimple" 238 | }, 239 | "widgets_values": [ 240 | "proteusV0.5.safetensors" 241 | ] 242 | }, 243 | { 244 | "id": 7, 245 | "type": "EmptyLatentImage", 246 | "pos": [ 247 | 1270, 248 | 219 249 | ], 250 | "size": [ 251 | 315, 252 | 106 253 | ], 254 | "flags": {}, 255 | "order": 4, 256 | "mode": 0, 257 | "inputs": [ 258 | { 259 | "name": "width", 260 | "type": "INT", 261 | "link": 12, 262 | "widget": { 263 | "name": "width" 264 | } 265 | }, 266 | { 267 | "name": "height", 268 | "type": "INT", 269 | "link": 13, 270 | "widget": { 271 | "name": "height" 272 | } 273 | } 274 | ], 275 | "outputs": [ 276 | { 277 | "name": "LATENT", 278 | "type": "LATENT", 279 | "links": [ 280 | 7 281 | ], 282 | "shape": 3, 283 | "slot_index": 0 284 | } 285 | ], 286 | "properties": { 287 | "Node name for S&R": "EmptyLatentImage" 288 | }, 289 | "widgets_values": [ 290 | 1024, 291 | 1024, 292 | 1 293 | ] 294 | }, 295 | { 296 | "id": 10, 297 | "type": "Width and height from aspect ratio 🪴", 298 | "pos": [ 299 | 838, 300 | 183 301 | ], 302 | "size": { 303 | "0": 315, 304 | "1": 126 305 | }, 306 | "flags": {}, 307 | "order": 1, 308 | "mode": 0, 309 | "outputs": [ 310 | { 311 | "name": "width", 312 | "type": "INT", 313 | "links": [ 314 | 12 315 | ], 316 | "shape": 3, 317 | "slot_index": 0 318 | }, 319 | { 320 | "name": "height", 321 | "type": "INT", 322 | "links": [ 323 | 13 324 | ], 325 | "shape": 3, 326 | "slot_index": 1 327 | } 328 | ], 329 | "properties": { 330 | "Node name for S&R": "Width and height from aspect ratio 🪴" 331 | }, 332 | "widgets_values": [ 333 | "16:9", 334 | 1024, 335 | 8 336 | ] 337 | }, 338 | { 339 | "id": 9, 340 | "type": "SaveImage", 341 | "pos": [ 342 | 157, 343 | 474 344 | ], 345 | "size": [ 346 | 500.09607697684555, 347 | 307.000781640146 348 | ], 349 | "flags": {}, 350 | "order": 9, 351 | "mode": 0, 352 | "inputs": [ 353 | { 354 | "name": "images", 355 | "type": "IMAGE", 356 | "link": 10 357 | } 358 | ], 359 | "properties": {}, 360 | "widgets_values": [ 361 | "ComfyUI" 362 | ] 363 | }, 364 | { 365 | "id": 2, 366 | "type": "ShowText|pysssss", 367 | "pos": [ 368 | 153, 369 | 234 370 | ], 371 | "size": [ 372 | 450.830078125, 373 | 192.2900390625 374 | ], 375 | "flags": {}, 376 | "order": 5, 377 | "mode": 0, 378 | "inputs": [ 379 | { 380 | "name": "text", 381 | "type": "STRING", 382 | "link": 1, 383 | "widget": { 384 | "name": "text" 385 | } 386 | } 387 | ], 388 | "outputs": [ 389 | { 390 | "name": "STRING", 391 | "type": "STRING", 392 | "links": null, 393 | "shape": 6 394 | } 395 | ], 396 | "properties": { 397 | "Node name for S&R": "ShowText|pysssss" 398 | }, 399 | "widgets_values": [ 400 | "", 401 | "A film still of a sci-fi anime movie, two space explorers in a tense, mystical confrontation on a bizarre, abandoned space station, weird, dreamlike, muted metallic colors, soft, ethereal lighting, unusual, organic architecture, anime style, intricate details, dramatic posing." 402 | ] 403 | }, 404 | { 405 | "id": 1, 406 | "type": "Replicate meta/meta-llama-3.1-405b-instruct", 407 | "pos": [ 408 | -274, 409 | 235 410 | ], 411 | "size": [ 412 | 398.7880859375, 413 | 447.314453125 414 | ], 415 | "flags": {}, 416 | "order": 2, 417 | "mode": 0, 418 | "outputs": [ 419 | { 420 | "name": "STRING", 421 | "type": "STRING", 422 | "links": [ 423 | 1, 424 | 3 425 | ], 426 | "shape": 3, 427 | "slot_index": 0 428 | } 429 | ], 430 | "properties": { 431 | "Node name for S&R": "Replicate meta/meta-llama-3.1-405b-instruct" 432 | }, 433 | "widgets_values": [ 434 | "an awesome scifi anime scene, with some dynamic interaction between two people, don't use neon stuff though, be weird", 435 | "You are a helpful text to image prompt assistant. You enhance prompts using short sentences and comma separated keywords, subject first then style. Return just the prompt. Make sure to keep all details from given prompt (like if it's a photo, etc)\n\nGood prompt example:\nA film still of an anime movie, two space explorers in a tense standoff on a desolate alien planet, surreal, muted color palette, eerie atmospheric lighting, unusual architecture, anime style, detailed textures, dramatic posing.", 436 | 0, 437 | 512, 438 | 0.6, 439 | 0.9, 440 | 50, 441 | 0, 442 | 0, 443 | "", 444 | false 445 | ] 446 | } 447 | ], 448 | "links": [ 449 | [ 450 | 1, 451 | 1, 452 | 0, 453 | 2, 454 | 0, 455 | "STRING" 456 | ], 457 | [ 458 | 2, 459 | 3, 460 | 1, 461 | 4, 462 | 0, 463 | "CLIP" 464 | ], 465 | [ 466 | 3, 467 | 1, 468 | 0, 469 | 5, 470 | 1, 471 | "STRING" 472 | ], 473 | [ 474 | 4, 475 | 3, 476 | 1, 477 | 5, 478 | 0, 479 | "CLIP" 480 | ], 481 | [ 482 | 5, 483 | 5, 484 | 0, 485 | 6, 486 | 1, 487 | "CONDITIONING" 488 | ], 489 | [ 490 | 6, 491 | 4, 492 | 0, 493 | 6, 494 | 2, 495 | "CONDITIONING" 496 | ], 497 | [ 498 | 7, 499 | 7, 500 | 0, 501 | 6, 502 | 3, 503 | "LATENT" 504 | ], 505 | [ 506 | 8, 507 | 6, 508 | 0, 509 | 8, 510 | 0, 511 | "LATENT" 512 | ], 513 | [ 514 | 9, 515 | 3, 516 | 2, 517 | 8, 518 | 1, 519 | "VAE" 520 | ], 521 | [ 522 | 10, 523 | 8, 524 | 0, 525 | 9, 526 | 0, 527 | "IMAGE" 528 | ], 529 | [ 530 | 11, 531 | 3, 532 | 0, 533 | 6, 534 | 0, 535 | "MODEL" 536 | ], 537 | [ 538 | 12, 539 | 10, 540 | 0, 541 | 7, 542 | 0, 543 | "INT" 544 | ], 545 | [ 546 | 13, 547 | 10, 548 | 1, 549 | 7, 550 | 1, 551 | "INT" 552 | ] 553 | ], 554 | "groups": [], 555 | "config": {}, 556 | "extra": { 557 | "ds": { 558 | "scale": 0.6209213230591553, 559 | "offset": [ 560 | 357.0363126973379, 561 | 28.753132988760044 562 | ] 563 | } 564 | }, 565 | "version": 0.4 566 | } -------------------------------------------------------------------------------- /example_workflows/simple-garment-try-on.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 4, 3 | "last_link_id": 3, 4 | "nodes": [ 5 | { 6 | "id": 3, 7 | "type": "LoadImage", 8 | "pos": [ 9 | 237, 10 | 507 11 | ], 12 | "size": [ 13 | 315, 14 | 314 15 | ], 16 | "flags": {}, 17 | "order": 0, 18 | "mode": 0, 19 | "outputs": [ 20 | { 21 | "name": "IMAGE", 22 | "type": "IMAGE", 23 | "links": [ 24 | 1 25 | ], 26 | "shape": 3, 27 | "slot_index": 0 28 | }, 29 | { 30 | "name": "MASK", 31 | "type": "MASK", 32 | "links": null, 33 | "shape": 3 34 | } 35 | ], 36 | "properties": { 37 | "Node name for S&R": "LoadImage" 38 | }, 39 | "widgets_values": [ 40 | "KakaoTalk_Photo_2024-04-04-21-44-45-1.png", 41 | "image" 42 | ] 43 | }, 44 | { 45 | "id": 2, 46 | "type": "LoadImage", 47 | "pos": [ 48 | 240, 49 | 137 50 | ], 51 | "size": [ 52 | 315, 53 | 314 54 | ], 55 | "flags": {}, 56 | "order": 1, 57 | "mode": 0, 58 | "outputs": [ 59 | { 60 | "name": "IMAGE", 61 | "type": "IMAGE", 62 | "links": [ 63 | 2 64 | ], 65 | "shape": 3, 66 | "slot_index": 0 67 | }, 68 | { 69 | "name": "MASK", 70 | "type": "MASK", 71 | "links": null, 72 | "shape": 3 73 | } 74 | ], 75 | "properties": { 76 | "Node name for S&R": "LoadImage" 77 | }, 78 | "widgets_values": [ 79 | "sweater.webp", 80 | "image" 81 | ] 82 | }, 83 | { 84 | "id": 1, 85 | "type": "Replicate cuuupid/idm-vton", 86 | "pos": [ 87 | 915, 88 | 300 89 | ], 90 | "size": { 91 | "0": 315, 92 | "1": 290 93 | }, 94 | "flags": {}, 95 | "order": 2, 96 | "mode": 0, 97 | "inputs": [ 98 | { 99 | "name": "garm_img", 100 | "type": "IMAGE", 101 | "link": 2 102 | }, 103 | { 104 | "name": "human_img", 105 | "type": "IMAGE", 106 | "link": 1 107 | }, 108 | { 109 | "name": "mask_img", 110 | "type": "IMAGE", 111 | "link": null 112 | } 113 | ], 114 | "outputs": [ 115 | { 116 | "name": "IMAGE", 117 | "type": "IMAGE", 118 | "links": [ 119 | 3 120 | ], 121 | "shape": 3, 122 | "slot_index": 0 123 | } 124 | ], 125 | "properties": { 126 | "Node name for S&R": "Replicate cuuupid/idm-vton" 127 | }, 128 | "widgets_values": [ 129 | "", 130 | "upper_body", 131 | false, 132 | false, 133 | false, 134 | 30, 135 | 71, 136 | "randomize", 137 | false 138 | ] 139 | }, 140 | { 141 | "id": 4, 142 | "type": "SaveImage", 143 | "pos": [ 144 | 1312, 145 | 296 146 | ], 147 | "size": [ 148 | 368.7568359375, 149 | 308.6533203125 150 | ], 151 | "flags": {}, 152 | "order": 3, 153 | "mode": 0, 154 | "inputs": [ 155 | { 156 | "name": "images", 157 | "type": "IMAGE", 158 | "link": 3 159 | } 160 | ], 161 | "properties": {}, 162 | "widgets_values": [ 163 | "ComfyUI" 164 | ] 165 | } 166 | ], 167 | "links": [ 168 | [ 169 | 1, 170 | 3, 171 | 0, 172 | 1, 173 | 1, 174 | "IMAGE" 175 | ], 176 | [ 177 | 2, 178 | 2, 179 | 0, 180 | 1, 181 | 0, 182 | "IMAGE" 183 | ], 184 | [ 185 | 3, 186 | 1, 187 | 0, 188 | 4, 189 | 0, 190 | "IMAGE" 191 | ] 192 | ], 193 | "groups": [], 194 | "config": {}, 195 | "extra": { 196 | "ds": { 197 | "scale": 1, 198 | "offset": [ 199 | 33.1787109375, 200 | -26.455078125 201 | ] 202 | } 203 | }, 204 | "version": 0.4 205 | } -------------------------------------------------------------------------------- /example_workflows/simple-llama3.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 12, 3 | "last_link_id": 11, 4 | "nodes": [ 5 | { 6 | "id": 7, 7 | "type": "CLIPTextEncode", 8 | "pos": [ 9 | 413, 10 | 389 11 | ], 12 | "size": { 13 | "0": 425.27801513671875, 14 | "1": 180.6060791015625 15 | }, 16 | "flags": {}, 17 | "order": 3, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "clip", 22 | "type": "CLIP", 23 | "link": 5 24 | } 25 | ], 26 | "outputs": [ 27 | { 28 | "name": "CONDITIONING", 29 | "type": "CONDITIONING", 30 | "links": [ 31 | 6 32 | ], 33 | "slot_index": 0 34 | } 35 | ], 36 | "properties": { 37 | "Node name for S&R": "CLIPTextEncode" 38 | }, 39 | "widgets_values": [ 40 | "text, watermark" 41 | ] 42 | }, 43 | { 44 | "id": 5, 45 | "type": "EmptyLatentImage", 46 | "pos": [ 47 | 473, 48 | 609 49 | ], 50 | "size": { 51 | "0": 315, 52 | "1": 106 53 | }, 54 | "flags": {}, 55 | "order": 0, 56 | "mode": 0, 57 | "outputs": [ 58 | { 59 | "name": "LATENT", 60 | "type": "LATENT", 61 | "links": [ 62 | 2 63 | ], 64 | "slot_index": 0 65 | } 66 | ], 67 | "properties": { 68 | "Node name for S&R": "EmptyLatentImage" 69 | }, 70 | "widgets_values": [ 71 | 512, 72 | 512, 73 | 1 74 | ] 75 | }, 76 | { 77 | "id": 8, 78 | "type": "VAEDecode", 79 | "pos": [ 80 | 1209, 81 | 188 82 | ], 83 | "size": { 84 | "0": 210, 85 | "1": 46 86 | }, 87 | "flags": {}, 88 | "order": 7, 89 | "mode": 0, 90 | "inputs": [ 91 | { 92 | "name": "samples", 93 | "type": "LATENT", 94 | "link": 7 95 | }, 96 | { 97 | "name": "vae", 98 | "type": "VAE", 99 | "link": 8 100 | } 101 | ], 102 | "outputs": [ 103 | { 104 | "name": "IMAGE", 105 | "type": "IMAGE", 106 | "links": [ 107 | 9 108 | ], 109 | "slot_index": 0 110 | } 111 | ], 112 | "properties": { 113 | "Node name for S&R": "VAEDecode" 114 | } 115 | }, 116 | { 117 | "id": 6, 118 | "type": "CLIPTextEncode", 119 | "pos": [ 120 | 415, 121 | 187 122 | ], 123 | "size": [ 124 | 422.84503173828125, 125 | 164.31304931640625 126 | ], 127 | "flags": {}, 128 | "order": 4, 129 | "mode": 0, 130 | "inputs": [ 131 | { 132 | "name": "clip", 133 | "type": "CLIP", 134 | "link": 3 135 | }, 136 | { 137 | "name": "text", 138 | "type": "STRING", 139 | "link": 10, 140 | "widget": { 141 | "name": "text" 142 | } 143 | } 144 | ], 145 | "outputs": [ 146 | { 147 | "name": "CONDITIONING", 148 | "type": "CONDITIONING", 149 | "links": [ 150 | 4 151 | ], 152 | "slot_index": 0 153 | } 154 | ], 155 | "properties": { 156 | "Node name for S&R": "CLIPTextEncode" 157 | }, 158 | "widgets_values": [ 159 | "beautiful scenery nature glass bottle landscape, , purple galaxy bottle," 160 | ] 161 | }, 162 | { 163 | "id": 4, 164 | "type": "CheckpointLoaderSimple", 165 | "pos": [ 166 | 43, 167 | 437 168 | ], 169 | "size": { 170 | "0": 315, 171 | "1": 98 172 | }, 173 | "flags": {}, 174 | "order": 1, 175 | "mode": 0, 176 | "outputs": [ 177 | { 178 | "name": "MODEL", 179 | "type": "MODEL", 180 | "links": [ 181 | 1 182 | ], 183 | "slot_index": 0 184 | }, 185 | { 186 | "name": "CLIP", 187 | "type": "CLIP", 188 | "links": [ 189 | 3, 190 | 5 191 | ], 192 | "slot_index": 1 193 | }, 194 | { 195 | "name": "VAE", 196 | "type": "VAE", 197 | "links": [ 198 | 8 199 | ], 200 | "slot_index": 2 201 | } 202 | ], 203 | "properties": { 204 | "Node name for S&R": "CheckpointLoaderSimple" 205 | }, 206 | "widgets_values": [ 207 | "dreamshaperXL_lightningDPMSDE.safetensors" 208 | ] 209 | }, 210 | { 211 | "id": 3, 212 | "type": "KSampler", 213 | "pos": [ 214 | 863, 215 | 186 216 | ], 217 | "size": { 218 | "0": 315, 219 | "1": 262 220 | }, 221 | "flags": {}, 222 | "order": 6, 223 | "mode": 0, 224 | "inputs": [ 225 | { 226 | "name": "model", 227 | "type": "MODEL", 228 | "link": 1 229 | }, 230 | { 231 | "name": "positive", 232 | "type": "CONDITIONING", 233 | "link": 4 234 | }, 235 | { 236 | "name": "negative", 237 | "type": "CONDITIONING", 238 | "link": 6 239 | }, 240 | { 241 | "name": "latent_image", 242 | "type": "LATENT", 243 | "link": 2 244 | } 245 | ], 246 | "outputs": [ 247 | { 248 | "name": "LATENT", 249 | "type": "LATENT", 250 | "links": [ 251 | 7 252 | ], 253 | "slot_index": 0 254 | } 255 | ], 256 | "properties": { 257 | "Node name for S&R": "KSampler" 258 | }, 259 | "widgets_values": [ 260 | 106695855998949, 261 | "randomize", 262 | 4, 263 | 2, 264 | "dpmpp_sde", 265 | "karras", 266 | 1 267 | ] 268 | }, 269 | { 270 | "id": 9, 271 | "type": "SaveImage", 272 | "pos": [ 273 | 1451, 274 | 189 275 | ], 276 | "size": [ 277 | 210, 278 | 270 279 | ], 280 | "flags": {}, 281 | "order": 8, 282 | "mode": 0, 283 | "inputs": [ 284 | { 285 | "name": "images", 286 | "type": "IMAGE", 287 | "link": 9 288 | } 289 | ], 290 | "properties": {}, 291 | "widgets_values": [ 292 | "ComfyUI" 293 | ] 294 | }, 295 | { 296 | "id": 12, 297 | "type": "ShowText|pysssss", 298 | "pos": [ 299 | 50, 300 | 85 301 | ], 302 | "size": [ 303 | 320.3125, 304 | 217.568359375 305 | ], 306 | "flags": {}, 307 | "order": 5, 308 | "mode": 0, 309 | "inputs": [ 310 | { 311 | "name": "text", 312 | "type": "STRING", 313 | "link": 11, 314 | "widget": { 315 | "name": "text" 316 | } 317 | } 318 | ], 319 | "outputs": [ 320 | { 321 | "name": "STRING", 322 | "type": "STRING", 323 | "links": null, 324 | "shape": 6 325 | } 326 | ], 327 | "properties": { 328 | "Node name for S&R": "ShowText|pysssss" 329 | }, 330 | "widgets_values": [ 331 | "", 332 | "cozy living room with large windows, plush couch, crackling fireplace, vintage rug, warm lighting, and a beautiful cityscape view outside" 333 | ] 334 | }, 335 | { 336 | "id": 10, 337 | "type": "Replicate meta/meta-llama-3-70b-instruct", 338 | "pos": [ 339 | -446, 340 | 83 341 | ], 342 | "size": [ 343 | 449.5546875, 344 | 696.453125 345 | ], 346 | "flags": {}, 347 | "order": 2, 348 | "mode": 0, 349 | "outputs": [ 350 | { 351 | "name": "STRING", 352 | "type": "STRING", 353 | "links": [ 354 | 10, 355 | 11 356 | ], 357 | "shape": 3, 358 | "slot_index": 0 359 | } 360 | ], 361 | "properties": { 362 | "Node name for S&R": "Replicate meta/meta-llama-3-70b-instruct" 363 | }, 364 | "widgets_values": [ 365 | "A living room", 366 | "You are a helpful text to image prompt assistant. You write short comma separated prompts for image generators. Return only the prompt, do not use quotes. Embellish the request.", 367 | 512, 368 | 0, 369 | 1, 370 | 0.95, 371 | 0, 372 | "<|end_of_text|>,<|eot_id|>", 373 | 1, 374 | 0, 375 | 1840, 376 | "randomize", 377 | "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>", 378 | false, 379 | false 380 | ] 381 | } 382 | ], 383 | "links": [ 384 | [ 385 | 1, 386 | 4, 387 | 0, 388 | 3, 389 | 0, 390 | "MODEL" 391 | ], 392 | [ 393 | 2, 394 | 5, 395 | 0, 396 | 3, 397 | 3, 398 | "LATENT" 399 | ], 400 | [ 401 | 3, 402 | 4, 403 | 1, 404 | 6, 405 | 0, 406 | "CLIP" 407 | ], 408 | [ 409 | 4, 410 | 6, 411 | 0, 412 | 3, 413 | 1, 414 | "CONDITIONING" 415 | ], 416 | [ 417 | 5, 418 | 4, 419 | 1, 420 | 7, 421 | 0, 422 | "CLIP" 423 | ], 424 | [ 425 | 6, 426 | 7, 427 | 0, 428 | 3, 429 | 2, 430 | "CONDITIONING" 431 | ], 432 | [ 433 | 7, 434 | 3, 435 | 0, 436 | 8, 437 | 0, 438 | "LATENT" 439 | ], 440 | [ 441 | 8, 442 | 4, 443 | 2, 444 | 8, 445 | 1, 446 | "VAE" 447 | ], 448 | [ 449 | 9, 450 | 8, 451 | 0, 452 | 9, 453 | 0, 454 | "IMAGE" 455 | ], 456 | [ 457 | 10, 458 | 10, 459 | 0, 460 | 6, 461 | 1, 462 | "STRING" 463 | ], 464 | [ 465 | 11, 466 | 10, 467 | 0, 468 | 12, 469 | 0, 470 | "STRING" 471 | ] 472 | ], 473 | "groups": [], 474 | "config": {}, 475 | "extra": { 476 | "ds": { 477 | "scale": 1, 478 | "offset": { 479 | "0": 574.140625, 480 | "1": 77.529296875 481 | } 482 | } 483 | }, 484 | "version": 0.4 485 | } -------------------------------------------------------------------------------- /example_workflows/simple-llava.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 5, 3 | "last_link_id": 4, 4 | "nodes": [ 5 | { 6 | "id": 4, 7 | "type": "Replicate yorickvp/llava-v1.6-34b", 8 | "pos": [ 9 | 900, 10 | 386 11 | ], 12 | "size": { 13 | "0": 400, 14 | "1": 200 15 | }, 16 | "flags": {}, 17 | "order": 1, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "image", 22 | "type": "IMAGE", 23 | "link": 3 24 | } 25 | ], 26 | "outputs": [ 27 | { 28 | "name": "STRING", 29 | "type": "STRING", 30 | "links": [ 31 | 4 32 | ], 33 | "shape": 3, 34 | "slot_index": 0 35 | } 36 | ], 37 | "properties": { 38 | "Node name for S&R": "Replicate yorickvp/llava-v1.6-34b" 39 | }, 40 | "widgets_values": [ 41 | "Describe this image", 42 | 1, 43 | 0.2, 44 | 1024, 45 | "", 46 | false 47 | ] 48 | }, 49 | { 50 | "id": 5, 51 | "type": "ShowText|pysssss", 52 | "pos": [ 53 | 1348, 54 | 390 55 | ], 56 | "size": [ 57 | 366.6171875, 58 | 247.916015625 59 | ], 60 | "flags": {}, 61 | "order": 2, 62 | "mode": 0, 63 | "inputs": [ 64 | { 65 | "name": "text", 66 | "type": "STRING", 67 | "link": 4, 68 | "widget": { 69 | "name": "text" 70 | } 71 | } 72 | ], 73 | "outputs": [ 74 | { 75 | "name": "STRING", 76 | "type": "STRING", 77 | "links": null, 78 | "shape": 6 79 | } 80 | ], 81 | "properties": { 82 | "Node name for S&R": "ShowText|pysssss" 83 | }, 84 | "widgets_values": [ 85 | "", 86 | "The image appears to be a stylized drawing of two individuals, likely a man and a woman, depicted in a cartoon or caricature style. The man has short, light-colored hair and a beard, and is wearing a dark suit with a white shirt and a black tie. The woman has long, blonde hair and is wearing what seems to be a black dress. Both characters are smiling and looking directly at the viewer. The background is a solid light blue color. There is a watermark or logo in the upper right corner of the image, but the text is not legible in this description. The overall style of the image is playful and artistic, with a focus on the facial features and expressions of the characters." 87 | ] 88 | }, 89 | { 90 | "id": 1, 91 | "type": "LoadImage", 92 | "pos": [ 93 | 551, 94 | 393 95 | ], 96 | "size": [ 97 | 315, 98 | 314 99 | ], 100 | "flags": {}, 101 | "order": 0, 102 | "mode": 0, 103 | "outputs": [ 104 | { 105 | "name": "IMAGE", 106 | "type": "IMAGE", 107 | "links": [ 108 | 3 109 | ], 110 | "shape": 3, 111 | "slot_index": 0 112 | }, 113 | { 114 | "name": "MASK", 115 | "type": "MASK", 116 | "links": null, 117 | "shape": 3 118 | } 119 | ], 120 | "properties": { 121 | "Node name for S&R": "LoadImage" 122 | }, 123 | "widgets_values": [ 124 | "R8__00002_-4.webp", 125 | "image" 126 | ] 127 | } 128 | ], 129 | "links": [ 130 | [ 131 | 3, 132 | 1, 133 | 0, 134 | 4, 135 | 0, 136 | "IMAGE" 137 | ], 138 | [ 139 | 4, 140 | 4, 141 | 0, 142 | 5, 143 | 0, 144 | "STRING" 145 | ] 146 | ], 147 | "groups": [], 148 | "config": {}, 149 | "extra": { 150 | "ds": { 151 | "scale": 1, 152 | "offset": [ 153 | 0, 154 | 0 155 | ] 156 | } 157 | }, 158 | "version": 0.4 159 | } -------------------------------------------------------------------------------- /import_schemas.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import replicate 3 | import json 4 | import os 5 | import argparse 6 | 7 | 8 | def format_json_file(file_path): 9 | try: 10 | with open(file_path, "r") as f: 11 | data = json.load(f) 12 | data["run_count"] = 0 13 | 14 | with open(file_path, "w") as f: 15 | json.dump(data, f, indent=4, ensure_ascii=False) 16 | except json.JSONDecodeError: 17 | print(f"Error: {file_path} contains invalid JSON") 18 | except IOError: 19 | print(f"Error: Could not read or write to {file_path}") 20 | 21 | 22 | def format_json_files_in_directory(directory): 23 | for filename in os.listdir(directory): 24 | if filename.endswith(".json"): 25 | file_path = os.path.join(directory, filename) 26 | format_json_file(file_path) 27 | 28 | 29 | def update_schemas(update=False): 30 | with open("supported_models.json", "r", encoding="utf-8") as f: 31 | supported_models = json.load(f) 32 | 33 | schemas_directory = "schemas" 34 | existing_schemas = set(os.listdir(schemas_directory)) 35 | 36 | for model in supported_models["models"]: 37 | schema_filename = f"{model.replace('/', '_')}.json" 38 | schema_path = os.path.join(schemas_directory, schema_filename) 39 | 40 | if update or schema_filename not in existing_schemas: 41 | try: 42 | m = replicate.models.get(model) 43 | with open(schema_path, "w", encoding="utf-8") as f: 44 | f.write(m.json()) 45 | print(f"{'Updated' if update else 'Added'} schema for {model}") 46 | except replicate.exceptions.ReplicateError as e: 47 | print(f"Error fetching schema for {model}: {str(e)}") 48 | continue 49 | 50 | format_json_files_in_directory(schemas_directory) 51 | 52 | 53 | if __name__ == "__main__": 54 | parser = argparse.ArgumentParser(description="Update model schemas") 55 | parser.add_argument("--update", action="store_true", help="Update all schemas, not just new ones") 56 | args = parser.parse_args() 57 | 58 | update_schemas(update=args.update) 59 | -------------------------------------------------------------------------------- /node.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from PIL import Image 4 | from io import BytesIO 5 | import io 6 | from torchvision import transforms 7 | import torch 8 | import base64 9 | import time 10 | import torchaudio 11 | import soundfile as sf 12 | from replicate.client import Client 13 | from .schema_to_node import ( 14 | schema_to_comfyui_input_types, 15 | get_return_type, 16 | name_and_version, 17 | inputs_that_need_arrays, 18 | ) 19 | 20 | replicate = Client(headers={"User-Agent": "comfyui-replicate/1.0.1"}) 21 | 22 | 23 | def create_comfyui_node(schema): 24 | replicate_model, node_name = name_and_version(schema) 25 | return_type = get_return_type(schema) 26 | 27 | class ReplicateToComfyUI: 28 | @classmethod 29 | def IS_CHANGED(cls, **kwargs): 30 | return time.time() if kwargs["force_rerun"] else "" 31 | 32 | @classmethod 33 | def INPUT_TYPES(cls): 34 | return schema_to_comfyui_input_types(schema) 35 | 36 | RETURN_TYPES = ( 37 | tuple(return_type.values()) 38 | if isinstance(return_type, dict) 39 | else (return_type,) 40 | ) 41 | FUNCTION = "run_replicate_model" 42 | CATEGORY = "Replicate" 43 | 44 | def convert_input_images_to_base64(self, kwargs): 45 | for key, value in kwargs.items(): 46 | if value is not None: 47 | input_type = ( 48 | self.INPUT_TYPES()["required"].get(key, (None,))[0] 49 | or self.INPUT_TYPES().get("optional", {}).get(key, (None,))[0] 50 | ) 51 | if input_type == "IMAGE": 52 | kwargs[key] = self.image_to_base64(value) 53 | elif input_type == "AUDIO": 54 | kwargs[key] = self.audio_to_base64(value) 55 | 56 | def image_to_base64(self, image): 57 | if isinstance(image, torch.Tensor): 58 | image = image.permute(0, 3, 1, 2).squeeze(0) 59 | to_pil = transforms.ToPILImage() 60 | pil_image = to_pil(image) 61 | else: 62 | pil_image = image 63 | 64 | buffer = io.BytesIO() 65 | pil_image.save(buffer, format="PNG") 66 | buffer.seek(0) 67 | img_str = base64.b64encode(buffer.getvalue()).decode() 68 | return f"data:image/png;base64,{img_str}" 69 | 70 | def audio_to_base64(self, audio): 71 | if ( 72 | isinstance(audio, dict) 73 | and "waveform" in audio 74 | and "sample_rate" in audio 75 | ): 76 | waveform = audio["waveform"] 77 | sample_rate = audio["sample_rate"] 78 | else: 79 | waveform, sample_rate = audio 80 | 81 | # Ensure waveform is 2D 82 | if waveform.dim() == 1: 83 | waveform = waveform.unsqueeze(0) 84 | elif waveform.dim() > 2: 85 | waveform = waveform.squeeze() 86 | if waveform.dim() > 2: 87 | raise ValueError("Waveform must be 1D or 2D") 88 | 89 | buffer = io.BytesIO() 90 | sf.write(buffer, waveform.numpy().T, sample_rate, format="wav") 91 | buffer.seek(0) 92 | audio_str = base64.b64encode(buffer.getvalue()).decode() 93 | return f"data:audio/wav;base64,{audio_str}" 94 | 95 | def handle_array_inputs(self, kwargs): 96 | array_inputs = inputs_that_need_arrays(schema) 97 | for input_name in array_inputs: 98 | if input_name in kwargs: 99 | if isinstance(kwargs[input_name], str): 100 | if kwargs[input_name] == "": 101 | kwargs[input_name] = [] 102 | else: 103 | kwargs[input_name] = kwargs[input_name].split("\n") 104 | else: 105 | kwargs[input_name] = [kwargs[input_name]] 106 | 107 | def log_input(self, kwargs): 108 | truncated_kwargs = { 109 | k: v[:20] + "..." 110 | if isinstance(v, str) 111 | and (v.startswith("data:image") or v.startswith("data:audio")) 112 | else v 113 | for k, v in kwargs.items() 114 | } 115 | print(f"Running {replicate_model} with {truncated_kwargs}") 116 | 117 | def handle_image_output(self, output): 118 | if output is None: 119 | print("No image output received") 120 | return None 121 | 122 | output_list = [output] if not isinstance(output, list) else output 123 | if output_list: 124 | output_tensors = [] 125 | transform = transforms.ToTensor() 126 | for file_obj in output_list: 127 | image_data = file_obj.read() 128 | image = Image.open(BytesIO(image_data)) 129 | if image.mode != "RGB": 130 | image = image.convert("RGB") 131 | 132 | tensor_image = transform(image) 133 | tensor_image = tensor_image.unsqueeze(0) 134 | tensor_image = tensor_image.permute(0, 2, 3, 1).cpu().float() 135 | output_tensors.append(tensor_image) 136 | 137 | # Combine all tensors into a single batch if multiple images 138 | return ( 139 | torch.cat(output_tensors, dim=0) 140 | if len(output_tensors) > 1 141 | else output_tensors[0] 142 | ) 143 | else: 144 | print("No output received from the model") 145 | return None 146 | 147 | def handle_audio_output(self, output): 148 | if output is None: 149 | print("No audio output received from the model") 150 | return None 151 | 152 | output_list = [output] if not isinstance(output, list) else output 153 | 154 | audio_data = [] 155 | for audio_file in output_list: 156 | if audio_file: 157 | audio_content = BytesIO(audio_file.read()) 158 | waveform, sample_rate = torchaudio.load(audio_content) 159 | audio_data.append({ 160 | "waveform": waveform.unsqueeze(0), 161 | "sample_rate": sample_rate 162 | }) 163 | else: 164 | print("Empty audio file received") 165 | 166 | if len(audio_data) == 1: 167 | return audio_data[0] 168 | elif len(audio_data) > 0: 169 | return audio_data 170 | else: 171 | print("No valid audio files processed") 172 | return None 173 | 174 | def remove_falsey_optional_inputs(self, kwargs): 175 | optional_inputs = self.INPUT_TYPES().get("optional", {}) 176 | for key in list(kwargs.keys()): 177 | if key in optional_inputs: 178 | if isinstance(kwargs[key], torch.Tensor): 179 | continue 180 | elif not kwargs[key]: 181 | del kwargs[key] 182 | 183 | def run_replicate_model(self, **kwargs): 184 | self.handle_array_inputs(kwargs) 185 | self.remove_falsey_optional_inputs(kwargs) 186 | self.convert_input_images_to_base64(kwargs) 187 | self.log_input(kwargs) 188 | kwargs_without_force_rerun = { 189 | k: v for k, v in kwargs.items() if k != "force_rerun" 190 | } 191 | output = replicate.run(replicate_model, input=kwargs_without_force_rerun) 192 | print(f"Output: {output}") 193 | 194 | processed_outputs = [] 195 | if isinstance(return_type, dict): 196 | for prop_name, prop_type in return_type.items(): 197 | if prop_type == "IMAGE": 198 | processed_outputs.append( 199 | self.handle_image_output(output.get(prop_name)) 200 | ) 201 | elif prop_type == "AUDIO": 202 | processed_outputs.append( 203 | self.handle_audio_output(output.get(prop_name)) 204 | ) 205 | elif prop_type == "STRING": 206 | processed_outputs.append( 207 | "".join(list(output.get(prop_name, ""))).strip() 208 | ) 209 | else: 210 | if return_type == "IMAGE": 211 | processed_outputs.append(self.handle_image_output(output)) 212 | elif return_type == "AUDIO": 213 | processed_outputs.append(self.handle_audio_output(output)) 214 | else: 215 | processed_outputs.append("".join(list(output)).strip()) 216 | 217 | return tuple(processed_outputs) 218 | 219 | return node_name, ReplicateToComfyUI 220 | 221 | 222 | def create_comfyui_nodes_from_schemas(schemas_dir): 223 | nodes = {} 224 | current_path = os.path.dirname(os.path.abspath(__file__)) 225 | schemas_dir_path = os.path.join(current_path, schemas_dir) 226 | for schema_file in os.listdir(schemas_dir_path): 227 | if schema_file.endswith(".json"): 228 | with open( 229 | os.path.join(schemas_dir_path, schema_file), "r", encoding="utf-8" 230 | ) as f: 231 | schema = json.load(f) 232 | node_name, node_class = create_comfyui_node(schema) 233 | nodes[node_name] = node_class 234 | return nodes 235 | 236 | 237 | _cached_node_class_mappings = None 238 | 239 | 240 | def get_node_class_mappings(): 241 | global _cached_node_class_mappings 242 | if _cached_node_class_mappings is None: 243 | _cached_node_class_mappings = create_comfyui_nodes_from_schemas("schemas") 244 | return _cached_node_class_mappings 245 | 246 | 247 | NODE_CLASS_MAPPINGS = get_node_class_mappings() 248 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui-replicate" 3 | description = "Run Replicate models in ComfyUI" 4 | version = "1.1.0" 5 | license = { file = "LICENSE" } 6 | 7 | [project.urls] 8 | Repository = "https://github.com/replicate/comfyui-replicate" 9 | # Used by Comfy Registry https://comfyregistry.org 10 | 11 | [tool.comfy] 12 | PublisherId = "fofr" 13 | DisplayName = "ComfyUI-Replicate" 14 | Icon = "" 15 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | replicate>=1.0.3 2 | -------------------------------------------------------------------------------- /schema_to_node.py: -------------------------------------------------------------------------------- 1 | DEFAULT_STEP = 0.01 2 | DEFAULT_ROUND = 0.001 3 | 4 | IMAGE_EXTENSIONS = (".png", ".jpg", ".jpeg", ".gif", ".webp") 5 | VIDEO_EXTENSIONS = (".mp4", ".mkv", ".webm", ".mov", ".mpg", ".mpeg") 6 | AUDIO_EXTENSIONS = (".mp3", ".wav", ".flac", ".mpga", ".m4a") 7 | 8 | TYPE_MAPPING = { 9 | "string": "STRING", 10 | "integer": "INT", 11 | "number": "FLOAT", 12 | "boolean": "BOOLEAN", 13 | } 14 | 15 | 16 | def convert_to_comfyui_input_type( 17 | input_name, openapi_type, openapi_format=None, default_example_input=None 18 | ): 19 | if openapi_type == "string" and openapi_format == "uri": 20 | if ( 21 | default_example_input 22 | and isinstance(default_example_input, dict) 23 | and input_name in default_example_input 24 | ): 25 | if is_type(default_example_input[input_name], IMAGE_EXTENSIONS): 26 | return "IMAGE" 27 | elif is_type(default_example_input[input_name], VIDEO_EXTENSIONS): 28 | return "VIDEO" 29 | elif is_type(default_example_input[input_name], AUDIO_EXTENSIONS): 30 | return "AUDIO" 31 | elif any(x in input_name.lower() for x in ["image", "mask"]): 32 | return "IMAGE" 33 | elif "audio" in input_name.lower(): 34 | return "AUDIO" 35 | else: 36 | return "STRING" 37 | 38 | return TYPE_MAPPING.get(openapi_type, "STRING") 39 | 40 | 41 | def name_and_version(schema): 42 | author = schema["owner"] 43 | name = schema["name"] 44 | version = schema["latest_version"]["id"] 45 | replicate_model = f"{author}/{name}:{version}" 46 | node_name = f"Replicate {author}/{name}" 47 | return replicate_model, node_name 48 | 49 | 50 | def resolve_schema(prop_data, openapi_schema): 51 | if "$ref" in prop_data: 52 | ref_path = prop_data["$ref"].split("/") 53 | current = openapi_schema 54 | for path in ref_path[1:]: # Skip the first '#' element 55 | if path not in current: 56 | return prop_data # Return original if path is invalid 57 | current = current[path] 58 | return current 59 | return prop_data 60 | 61 | 62 | def schema_to_comfyui_input_types(schema): 63 | openapi_schema = schema["latest_version"]["openapi_schema"] 64 | input_schema = openapi_schema["components"]["schemas"]["Input"] 65 | input_types = {"required": {}, "optional": {}} 66 | default_example_input = get_default_example_input(schema) 67 | 68 | required_props = input_schema.get("required", []) 69 | 70 | for prop_name, prop_data in input_schema["properties"].items(): 71 | prop_data = resolve_schema(prop_data, openapi_schema) 72 | default_value = prop_data.get("default", None) 73 | 74 | if "allOf" in prop_data: 75 | prop_data = resolve_schema(prop_data["allOf"][0], openapi_schema) 76 | 77 | if "enum" in prop_data: 78 | input_type = prop_data["enum"] 79 | elif "type" in prop_data: 80 | input_type = convert_to_comfyui_input_type( 81 | prop_name, 82 | prop_data["type"], 83 | prop_data.get("format"), 84 | default_example_input, 85 | ) 86 | else: 87 | input_type = "STRING" 88 | 89 | input_config = {"default": default_value} if default_value is not None else {} 90 | 91 | if "minimum" in prop_data: 92 | input_config["min"] = prop_data["minimum"] 93 | if "maximum" in prop_data: 94 | input_config["max"] = prop_data["maximum"] 95 | if input_type == "FLOAT": 96 | input_config["step"] = DEFAULT_STEP 97 | input_config["round"] = DEFAULT_ROUND 98 | 99 | if "prompt" in prop_name and prop_data.get("type") == "string": 100 | input_config["multiline"] = True 101 | 102 | # Meta prompt_template needs `{prompt}` to be sent through 103 | # dynamicPrompts would strip it out 104 | if "template" not in prop_name: 105 | input_config["dynamicPrompts"] = True 106 | 107 | if prop_name in required_props: 108 | input_types["required"][prop_name] = (input_type, input_config) 109 | else: 110 | input_types["optional"][prop_name] = (input_type, input_config) 111 | 112 | input_types["optional"]["force_rerun"] = ("BOOLEAN", {"default": False}) 113 | 114 | return order_inputs(input_types, input_schema) 115 | 116 | 117 | def order_inputs(input_types, input_schema): 118 | ordered_input_types = {"required": {}, "optional": {}} 119 | sorted_properties = sorted( 120 | input_schema["properties"].items(), 121 | key=lambda x: x[1].get("x-order", float("inf")), 122 | ) 123 | 124 | for prop_name, _ in sorted_properties: 125 | if prop_name in input_types["required"]: 126 | ordered_input_types["required"][prop_name] = input_types["required"][ 127 | prop_name 128 | ] 129 | elif prop_name in input_types["optional"]: 130 | ordered_input_types["optional"][prop_name] = input_types["optional"][ 131 | prop_name 132 | ] 133 | 134 | ordered_input_types["optional"]["force_rerun"] = input_types["optional"][ 135 | "force_rerun" 136 | ] 137 | 138 | return ordered_input_types 139 | 140 | 141 | def inputs_that_need_arrays(schema): 142 | openapi_schema = schema["latest_version"]["openapi_schema"] 143 | input_schema = openapi_schema["components"]["schemas"]["Input"] 144 | array_inputs = [] 145 | for prop_name, prop_data in input_schema["properties"].items(): 146 | if prop_data.get("type") == "array": 147 | array_inputs.append(prop_name) 148 | 149 | return array_inputs 150 | 151 | 152 | def is_type(default_example_output, extensions): 153 | if isinstance( 154 | default_example_output, str 155 | ) and default_example_output.lower().endswith(extensions): 156 | return True 157 | elif ( 158 | isinstance(default_example_output, list) 159 | and default_example_output 160 | and isinstance(default_example_output[0], str) 161 | and default_example_output[0].lower().endswith(extensions) 162 | ): 163 | return True 164 | return False 165 | 166 | 167 | def get_default_example(schema): 168 | default_example = schema.get("default_example") 169 | return default_example if default_example else None 170 | 171 | 172 | def get_default_example_input(schema): 173 | default_example = get_default_example(schema) 174 | return default_example.get("input") if default_example else None 175 | 176 | 177 | def get_default_example_output(schema): 178 | default_example = get_default_example(schema) 179 | return default_example.get("output") if default_example else None 180 | 181 | 182 | def get_return_type(schema): 183 | openapi_schema = schema["latest_version"]["openapi_schema"] 184 | output_schema = ( 185 | openapi_schema.get("components", {}).get("schemas", {}).get("Output") 186 | ) 187 | default_example_output = get_default_example_output(schema) 188 | 189 | if output_schema and "$ref" in output_schema: 190 | output_schema = resolve_schema(output_schema, openapi_schema) 191 | 192 | if isinstance(output_schema, dict) and output_schema.get("properties"): 193 | return_types = {} 194 | for prop_name, prop_data in output_schema["properties"].items(): 195 | if isinstance(default_example_output, dict): 196 | prop_value = default_example_output.get(prop_name) 197 | 198 | if is_type(prop_value, IMAGE_EXTENSIONS): 199 | return_types[prop_name] = "IMAGE" 200 | elif is_type(prop_value, AUDIO_EXTENSIONS): 201 | return_types[prop_name] = "AUDIO" 202 | elif is_type(prop_value, VIDEO_EXTENSIONS): 203 | return_types[prop_name] = "VIDEO_URI" 204 | else: 205 | return_types[prop_name] = "STRING" 206 | elif prop_data.get("format") == "uri": 207 | if "audio" in prop_name.lower(): 208 | return_types[prop_name] = "AUDIO" 209 | elif "image" in prop_name.lower(): 210 | return_types[prop_name] = "IMAGE" 211 | else: 212 | return_types[prop_name] = "STRING" 213 | elif prop_data.get("type") == "string": 214 | return_types[prop_name] = "STRING" 215 | else: 216 | return_types[prop_name] = "STRING" 217 | 218 | return return_types 219 | 220 | if is_type(default_example_output, IMAGE_EXTENSIONS): 221 | return "IMAGE" 222 | elif is_type(default_example_output, VIDEO_EXTENSIONS): 223 | return "VIDEO_URI" 224 | elif is_type(default_example_output, AUDIO_EXTENSIONS): 225 | return "AUDIO" 226 | 227 | if output_schema: 228 | if ( 229 | output_schema.get("type") == "string" 230 | and output_schema.get("format") == "uri" 231 | ): 232 | # Handle single image output 233 | return "IMAGE" 234 | elif ( 235 | output_schema.get("type") == "array" 236 | and output_schema.get("items", {}).get("type") == "string" 237 | and output_schema.get("items", {}).get("format") == "uri" 238 | ): 239 | # Handle multiple image output 240 | return "IMAGE" 241 | 242 | return "STRING" 243 | -------------------------------------------------------------------------------- /schemas/lucataco_moondream2.json: -------------------------------------------------------------------------------- 1 | { 2 | "url": "https://replicate.com/lucataco/moondream2", 3 | "owner": "lucataco", 4 | "name": "moondream2", 5 | "description": "moondream2 is a small vision language model designed to run efficiently on edge devices", 6 | "visibility": "public", 7 | "github_url": "https://github.com/lucataco/cog-moondream2", 8 | "paper_url": null, 9 | "license_url": "https://github.com/vikhyat/moondream?tab=Apache-2.0-1-ov-file#readme", 10 | "run_count": 0, 11 | "cover_image_url": "https://tjzk.replicate.delivery/models_models_featured_image/dc0dc539-f592-4c34-b24f-2d112f742975/moondream2.png", 12 | "default_example": { 13 | "id": "p9v8hg3mn1rgj0cgzvab9htxmg", 14 | "model": "lucataco/moondream2", 15 | "version": "72ccb656353c348c1385df54b237eeb7bfa874bf11486cf0b9473e691b662d31", 16 | "status": "succeeded", 17 | "input": { 18 | "image": "https://replicate.delivery/pbxt/KZKNhDQHqycw8Op7w056J8YTX5Bnb7xVcLiyB4le7oUgT2cY/moondream2.png", 19 | "prompt": "Describe this image" 20 | }, 21 | "output": [ 22 | "", 23 | " ", 24 | "The ", 25 | "image ", 26 | "features ", 27 | "a ", 28 | "logo ", 29 | "with ", 30 | "a ", 31 | "smiling ", 32 | "blue ", 33 | "circle ", 34 | "above ", 35 | "the ", 36 | "word ", 37 | "", 38 | "", 39 | "", 40 | "", 41 | "\"moondream\" ", 42 | "written ", 43 | "in ", 44 | "black ", 45 | "", 46 | "", 47 | "text." 48 | ], 49 | "logs": null, 50 | "error": null, 51 | "metrics": { 52 | "predict_time": 1.035634819, 53 | "total_time": 1.141193 54 | }, 55 | "created_at": "2024-07-29T14:10:54.760000Z", 56 | "started_at": "2024-07-29T14:10:54.865558Z", 57 | "completed_at": "2024-07-29T14:10:55.901193Z", 58 | "urls": { 59 | "stream": "https://streaming-api.svc.us.c.replicate.net/v1/streams/ogen5dlda7b4ovivwyj2sogogvm42dibes6ocvkey5wlmsq7c7aa", 60 | "get": "https://api.replicate.com/v1/predictions/p9v8hg3mn1rgj0cgzvab9htxmg", 61 | "cancel": "https://api.replicate.com/v1/predictions/p9v8hg3mn1rgj0cgzvab9htxmg/cancel" 62 | } 63 | }, 64 | "latest_version": { 65 | "id": "72ccb656353c348c1385df54b237eeb7bfa874bf11486cf0b9473e691b662d31", 66 | "created_at": "2024-07-29T00:54:21.803600+00:00", 67 | "cog_version": "0.9.13", 68 | "openapi_schema": { 69 | "info": { 70 | "title": "Cog", 71 | "version": "0.1.0" 72 | }, 73 | "paths": { 74 | "/": { 75 | "get": { 76 | "summary": "Root", 77 | "responses": { 78 | "200": { 79 | "content": { 80 | "application/json": { 81 | "schema": { 82 | "title": "Response Root Get" 83 | } 84 | } 85 | }, 86 | "description": "Successful Response" 87 | } 88 | }, 89 | "operationId": "root__get" 90 | } 91 | }, 92 | "/shutdown": { 93 | "post": { 94 | "summary": "Start Shutdown", 95 | "responses": { 96 | "200": { 97 | "content": { 98 | "application/json": { 99 | "schema": { 100 | "title": "Response Start Shutdown Shutdown Post" 101 | } 102 | } 103 | }, 104 | "description": "Successful Response" 105 | } 106 | }, 107 | "operationId": "start_shutdown_shutdown_post" 108 | } 109 | }, 110 | "/predictions": { 111 | "post": { 112 | "summary": "Predict", 113 | "responses": { 114 | "200": { 115 | "content": { 116 | "application/json": { 117 | "schema": { 118 | "$ref": "#/components/schemas/PredictionResponse" 119 | } 120 | } 121 | }, 122 | "description": "Successful Response" 123 | }, 124 | "422": { 125 | "content": { 126 | "application/json": { 127 | "schema": { 128 | "$ref": "#/components/schemas/HTTPValidationError" 129 | } 130 | } 131 | }, 132 | "description": "Validation Error" 133 | } 134 | }, 135 | "parameters": [ 136 | { 137 | "in": "header", 138 | "name": "prefer", 139 | "schema": { 140 | "type": "string", 141 | "title": "Prefer" 142 | }, 143 | "required": false 144 | } 145 | ], 146 | "description": "Run a single prediction on the model", 147 | "operationId": "predict_predictions_post", 148 | "requestBody": { 149 | "content": { 150 | "application/json": { 151 | "schema": { 152 | "$ref": "#/components/schemas/PredictionRequest" 153 | } 154 | } 155 | } 156 | } 157 | } 158 | }, 159 | "/health-check": { 160 | "get": { 161 | "summary": "Healthcheck", 162 | "responses": { 163 | "200": { 164 | "content": { 165 | "application/json": { 166 | "schema": { 167 | "title": "Response Healthcheck Health Check Get" 168 | } 169 | } 170 | }, 171 | "description": "Successful Response" 172 | } 173 | }, 174 | "operationId": "healthcheck_health_check_get" 175 | } 176 | }, 177 | "/predictions/{prediction_id}": { 178 | "put": { 179 | "summary": "Predict Idempotent", 180 | "responses": { 181 | "200": { 182 | "content": { 183 | "application/json": { 184 | "schema": { 185 | "$ref": "#/components/schemas/PredictionResponse" 186 | } 187 | } 188 | }, 189 | "description": "Successful Response" 190 | }, 191 | "422": { 192 | "content": { 193 | "application/json": { 194 | "schema": { 195 | "$ref": "#/components/schemas/HTTPValidationError" 196 | } 197 | } 198 | }, 199 | "description": "Validation Error" 200 | } 201 | }, 202 | "parameters": [ 203 | { 204 | "in": "path", 205 | "name": "prediction_id", 206 | "schema": { 207 | "type": "string", 208 | "title": "Prediction ID" 209 | }, 210 | "required": true 211 | }, 212 | { 213 | "in": "header", 214 | "name": "prefer", 215 | "schema": { 216 | "type": "string", 217 | "title": "Prefer" 218 | }, 219 | "required": false 220 | } 221 | ], 222 | "description": "Run a single prediction on the model (idempotent creation).", 223 | "operationId": "predict_idempotent_predictions__prediction_id__put", 224 | "requestBody": { 225 | "content": { 226 | "application/json": { 227 | "schema": { 228 | "allOf": [ 229 | { 230 | "$ref": "#/components/schemas/PredictionRequest" 231 | } 232 | ], 233 | "title": "Prediction Request" 234 | } 235 | } 236 | }, 237 | "required": true 238 | } 239 | } 240 | }, 241 | "/predictions/{prediction_id}/cancel": { 242 | "post": { 243 | "summary": "Cancel", 244 | "responses": { 245 | "200": { 246 | "content": { 247 | "application/json": { 248 | "schema": { 249 | "title": "Response Cancel Predictions Prediction Id Cancel Post" 250 | } 251 | } 252 | }, 253 | "description": "Successful Response" 254 | }, 255 | "422": { 256 | "content": { 257 | "application/json": { 258 | "schema": { 259 | "$ref": "#/components/schemas/HTTPValidationError" 260 | } 261 | } 262 | }, 263 | "description": "Validation Error" 264 | } 265 | }, 266 | "parameters": [ 267 | { 268 | "in": "path", 269 | "name": "prediction_id", 270 | "schema": { 271 | "type": "string", 272 | "title": "Prediction ID" 273 | }, 274 | "required": true 275 | } 276 | ], 277 | "description": "Cancel a running prediction", 278 | "operationId": "cancel_predictions__prediction_id__cancel_post" 279 | } 280 | } 281 | }, 282 | "openapi": "3.0.2", 283 | "components": { 284 | "schemas": { 285 | "Input": { 286 | "type": "object", 287 | "title": "Input", 288 | "required": [ 289 | "image" 290 | ], 291 | "properties": { 292 | "image": { 293 | "type": "string", 294 | "title": "Image", 295 | "format": "uri", 296 | "x-order": 0, 297 | "description": "Input image" 298 | }, 299 | "prompt": { 300 | "type": "string", 301 | "title": "Prompt", 302 | "default": "Describe this image", 303 | "x-order": 1, 304 | "description": "Input prompt" 305 | } 306 | } 307 | }, 308 | "Output": { 309 | "type": "array", 310 | "items": { 311 | "type": "string" 312 | }, 313 | "title": "Output", 314 | "x-cog-array-type": "iterator", 315 | "x-cog-array-display": "concatenate" 316 | }, 317 | "Status": { 318 | "enum": [ 319 | "starting", 320 | "processing", 321 | "succeeded", 322 | "canceled", 323 | "failed" 324 | ], 325 | "type": "string", 326 | "title": "Status", 327 | "description": "An enumeration." 328 | }, 329 | "WebhookEvent": { 330 | "enum": [ 331 | "start", 332 | "output", 333 | "logs", 334 | "completed" 335 | ], 336 | "type": "string", 337 | "title": "WebhookEvent", 338 | "description": "An enumeration." 339 | }, 340 | "ValidationError": { 341 | "type": "object", 342 | "title": "ValidationError", 343 | "required": [ 344 | "loc", 345 | "msg", 346 | "type" 347 | ], 348 | "properties": { 349 | "loc": { 350 | "type": "array", 351 | "items": { 352 | "anyOf": [ 353 | { 354 | "type": "string" 355 | }, 356 | { 357 | "type": "integer" 358 | } 359 | ] 360 | }, 361 | "title": "Location" 362 | }, 363 | "msg": { 364 | "type": "string", 365 | "title": "Message" 366 | }, 367 | "type": { 368 | "type": "string", 369 | "title": "Error Type" 370 | } 371 | } 372 | }, 373 | "PredictionRequest": { 374 | "type": "object", 375 | "title": "PredictionRequest", 376 | "properties": { 377 | "id": { 378 | "type": "string", 379 | "title": "Id" 380 | }, 381 | "input": { 382 | "$ref": "#/components/schemas/Input" 383 | }, 384 | "webhook": { 385 | "type": "string", 386 | "title": "Webhook", 387 | "format": "uri", 388 | "maxLength": 65536, 389 | "minLength": 1 390 | }, 391 | "created_at": { 392 | "type": "string", 393 | "title": "Created At", 394 | "format": "date-time" 395 | }, 396 | "output_file_prefix": { 397 | "type": "string", 398 | "title": "Output File Prefix" 399 | }, 400 | "webhook_events_filter": { 401 | "type": "array", 402 | "items": { 403 | "$ref": "#/components/schemas/WebhookEvent" 404 | }, 405 | "default": [ 406 | "start", 407 | "output", 408 | "logs", 409 | "completed" 410 | ] 411 | } 412 | } 413 | }, 414 | "PredictionResponse": { 415 | "type": "object", 416 | "title": "PredictionResponse", 417 | "properties": { 418 | "id": { 419 | "type": "string", 420 | "title": "Id" 421 | }, 422 | "logs": { 423 | "type": "string", 424 | "title": "Logs", 425 | "default": "" 426 | }, 427 | "error": { 428 | "type": "string", 429 | "title": "Error" 430 | }, 431 | "input": { 432 | "$ref": "#/components/schemas/Input" 433 | }, 434 | "output": { 435 | "$ref": "#/components/schemas/Output" 436 | }, 437 | "status": { 438 | "$ref": "#/components/schemas/Status" 439 | }, 440 | "metrics": { 441 | "type": "object", 442 | "title": "Metrics" 443 | }, 444 | "version": { 445 | "type": "string", 446 | "title": "Version" 447 | }, 448 | "created_at": { 449 | "type": "string", 450 | "title": "Created At", 451 | "format": "date-time" 452 | }, 453 | "started_at": { 454 | "type": "string", 455 | "title": "Started At", 456 | "format": "date-time" 457 | }, 458 | "completed_at": { 459 | "type": "string", 460 | "title": "Completed At", 461 | "format": "date-time" 462 | } 463 | } 464 | }, 465 | "HTTPValidationError": { 466 | "type": "object", 467 | "title": "HTTPValidationError", 468 | "properties": { 469 | "detail": { 470 | "type": "array", 471 | "items": { 472 | "$ref": "#/components/schemas/ValidationError" 473 | }, 474 | "title": "Detail" 475 | } 476 | } 477 | } 478 | } 479 | } 480 | } 481 | } 482 | } -------------------------------------------------------------------------------- /schemas/lucataco_paligemma-3b-pt-224.json: -------------------------------------------------------------------------------- 1 | { 2 | "url": "https://replicate.com/lucataco/paligemma-3b-pt-224", 3 | "owner": "lucataco", 4 | "name": "paligemma-3b-pt-224", 5 | "description": "PaliGemma 3B, an open VLM by Google, pre-trained with 224*224 input images and 128 token input/output text sequences", 6 | "visibility": "public", 7 | "github_url": "https://github.com/lucataco/cog-paligemma-3b-pt-224", 8 | "paper_url": "https://arxiv.org/abs/2310.09199", 9 | "license_url": "https://ai.google.dev/gemma/terms", 10 | "run_count": 0, 11 | "cover_image_url": "https://replicate.delivery/pbxt/Kv6Dn1Mk1tZe7vfVaRuPNBJcoDBYhRGQ33OTkq70l375ULSi/car.jpg", 12 | "default_example": { 13 | "id": "8m2yqtrtfdrgm0cff5cb3zasv8", 14 | "model": "lucataco/paligemma-3b-pt-224", 15 | "version": "c519755cce71af83c3831c3b3b7fe6c1de4a4dc27eff91f9e79639e14924a078", 16 | "status": "succeeded", 17 | "input": { 18 | "image": "https://replicate.delivery/pbxt/Kv6Dn1Mk1tZe7vfVaRuPNBJcoDBYhRGQ33OTkq70l375ULSi/car.jpg", 19 | "prompt": "caption es" 20 | }, 21 | "output": "persona estacionada en una calle", 22 | "logs": null, 23 | "error": null, 24 | "metrics": { 25 | "predict_time": 0.578439, 26 | "total_time": 0.625058 27 | }, 28 | "created_at": "2024-05-14T23:02:48.187000Z", 29 | "started_at": "2024-05-14T23:02:48.233619Z", 30 | "completed_at": "2024-05-14T23:02:48.812058Z", 31 | "urls": { 32 | "get": "https://api.replicate.com/v1/predictions/8m2yqtrtfdrgm0cff5cb3zasv8", 33 | "cancel": "https://api.replicate.com/v1/predictions/8m2yqtrtfdrgm0cff5cb3zasv8/cancel" 34 | } 35 | }, 36 | "latest_version": { 37 | "id": "c519755cce71af83c3831c3b3b7fe6c1de4a4dc27eff91f9e79639e14924a078", 38 | "created_at": "2024-05-14T22:58:12.484677+00:00", 39 | "cog_version": "0.9.6", 40 | "openapi_schema": { 41 | "info": { 42 | "title": "Cog", 43 | "version": "0.1.0" 44 | }, 45 | "paths": { 46 | "/": { 47 | "get": { 48 | "summary": "Root", 49 | "responses": { 50 | "200": { 51 | "content": { 52 | "application/json": { 53 | "schema": { 54 | "title": "Response Root Get" 55 | } 56 | } 57 | }, 58 | "description": "Successful Response" 59 | } 60 | }, 61 | "operationId": "root__get" 62 | } 63 | }, 64 | "/shutdown": { 65 | "post": { 66 | "summary": "Start Shutdown", 67 | "responses": { 68 | "200": { 69 | "content": { 70 | "application/json": { 71 | "schema": { 72 | "title": "Response Start Shutdown Shutdown Post" 73 | } 74 | } 75 | }, 76 | "description": "Successful Response" 77 | } 78 | }, 79 | "operationId": "start_shutdown_shutdown_post" 80 | } 81 | }, 82 | "/predictions": { 83 | "post": { 84 | "summary": "Predict", 85 | "responses": { 86 | "200": { 87 | "content": { 88 | "application/json": { 89 | "schema": { 90 | "$ref": "#/components/schemas/PredictionResponse" 91 | } 92 | } 93 | }, 94 | "description": "Successful Response" 95 | }, 96 | "422": { 97 | "content": { 98 | "application/json": { 99 | "schema": { 100 | "$ref": "#/components/schemas/HTTPValidationError" 101 | } 102 | } 103 | }, 104 | "description": "Validation Error" 105 | } 106 | }, 107 | "parameters": [ 108 | { 109 | "in": "header", 110 | "name": "prefer", 111 | "schema": { 112 | "type": "string", 113 | "title": "Prefer" 114 | }, 115 | "required": false 116 | } 117 | ], 118 | "description": "Run a single prediction on the model", 119 | "operationId": "predict_predictions_post", 120 | "requestBody": { 121 | "content": { 122 | "application/json": { 123 | "schema": { 124 | "$ref": "#/components/schemas/PredictionRequest" 125 | } 126 | } 127 | } 128 | } 129 | } 130 | }, 131 | "/health-check": { 132 | "get": { 133 | "summary": "Healthcheck", 134 | "responses": { 135 | "200": { 136 | "content": { 137 | "application/json": { 138 | "schema": { 139 | "title": "Response Healthcheck Health Check Get" 140 | } 141 | } 142 | }, 143 | "description": "Successful Response" 144 | } 145 | }, 146 | "operationId": "healthcheck_health_check_get" 147 | } 148 | }, 149 | "/predictions/{prediction_id}": { 150 | "put": { 151 | "summary": "Predict Idempotent", 152 | "responses": { 153 | "200": { 154 | "content": { 155 | "application/json": { 156 | "schema": { 157 | "$ref": "#/components/schemas/PredictionResponse" 158 | } 159 | } 160 | }, 161 | "description": "Successful Response" 162 | }, 163 | "422": { 164 | "content": { 165 | "application/json": { 166 | "schema": { 167 | "$ref": "#/components/schemas/HTTPValidationError" 168 | } 169 | } 170 | }, 171 | "description": "Validation Error" 172 | } 173 | }, 174 | "parameters": [ 175 | { 176 | "in": "path", 177 | "name": "prediction_id", 178 | "schema": { 179 | "type": "string", 180 | "title": "Prediction ID" 181 | }, 182 | "required": true 183 | }, 184 | { 185 | "in": "header", 186 | "name": "prefer", 187 | "schema": { 188 | "type": "string", 189 | "title": "Prefer" 190 | }, 191 | "required": false 192 | } 193 | ], 194 | "description": "Run a single prediction on the model (idempotent creation).", 195 | "operationId": "predict_idempotent_predictions__prediction_id__put", 196 | "requestBody": { 197 | "content": { 198 | "application/json": { 199 | "schema": { 200 | "allOf": [ 201 | { 202 | "$ref": "#/components/schemas/PredictionRequest" 203 | } 204 | ], 205 | "title": "Prediction Request" 206 | } 207 | } 208 | }, 209 | "required": true 210 | } 211 | } 212 | }, 213 | "/predictions/{prediction_id}/cancel": { 214 | "post": { 215 | "summary": "Cancel", 216 | "responses": { 217 | "200": { 218 | "content": { 219 | "application/json": { 220 | "schema": { 221 | "title": "Response Cancel Predictions Prediction Id Cancel Post" 222 | } 223 | } 224 | }, 225 | "description": "Successful Response" 226 | }, 227 | "422": { 228 | "content": { 229 | "application/json": { 230 | "schema": { 231 | "$ref": "#/components/schemas/HTTPValidationError" 232 | } 233 | } 234 | }, 235 | "description": "Validation Error" 236 | } 237 | }, 238 | "parameters": [ 239 | { 240 | "in": "path", 241 | "name": "prediction_id", 242 | "schema": { 243 | "type": "string", 244 | "title": "Prediction ID" 245 | }, 246 | "required": true 247 | } 248 | ], 249 | "description": "Cancel a running prediction", 250 | "operationId": "cancel_predictions__prediction_id__cancel_post" 251 | } 252 | } 253 | }, 254 | "openapi": "3.0.2", 255 | "components": { 256 | "schemas": { 257 | "Input": { 258 | "type": "object", 259 | "title": "Input", 260 | "required": [ 261 | "image" 262 | ], 263 | "properties": { 264 | "image": { 265 | "type": "string", 266 | "title": "Image", 267 | "format": "uri", 268 | "x-order": 0, 269 | "description": "Grayscale input image" 270 | }, 271 | "prompt": { 272 | "type": "string", 273 | "title": "Prompt", 274 | "default": "caption es", 275 | "x-order": 1, 276 | "description": "Input prompt" 277 | } 278 | } 279 | }, 280 | "Output": { 281 | "type": "string", 282 | "title": "Output" 283 | }, 284 | "Status": { 285 | "enum": [ 286 | "starting", 287 | "processing", 288 | "succeeded", 289 | "canceled", 290 | "failed" 291 | ], 292 | "type": "string", 293 | "title": "Status", 294 | "description": "An enumeration." 295 | }, 296 | "WebhookEvent": { 297 | "enum": [ 298 | "start", 299 | "output", 300 | "logs", 301 | "completed" 302 | ], 303 | "type": "string", 304 | "title": "WebhookEvent", 305 | "description": "An enumeration." 306 | }, 307 | "ValidationError": { 308 | "type": "object", 309 | "title": "ValidationError", 310 | "required": [ 311 | "loc", 312 | "msg", 313 | "type" 314 | ], 315 | "properties": { 316 | "loc": { 317 | "type": "array", 318 | "items": { 319 | "anyOf": [ 320 | { 321 | "type": "string" 322 | }, 323 | { 324 | "type": "integer" 325 | } 326 | ] 327 | }, 328 | "title": "Location" 329 | }, 330 | "msg": { 331 | "type": "string", 332 | "title": "Message" 333 | }, 334 | "type": { 335 | "type": "string", 336 | "title": "Error Type" 337 | } 338 | } 339 | }, 340 | "PredictionRequest": { 341 | "type": "object", 342 | "title": "PredictionRequest", 343 | "properties": { 344 | "id": { 345 | "type": "string", 346 | "title": "Id" 347 | }, 348 | "input": { 349 | "$ref": "#/components/schemas/Input" 350 | }, 351 | "webhook": { 352 | "type": "string", 353 | "title": "Webhook", 354 | "format": "uri", 355 | "maxLength": 65536, 356 | "minLength": 1 357 | }, 358 | "created_at": { 359 | "type": "string", 360 | "title": "Created At", 361 | "format": "date-time" 362 | }, 363 | "output_file_prefix": { 364 | "type": "string", 365 | "title": "Output File Prefix" 366 | }, 367 | "webhook_events_filter": { 368 | "type": "array", 369 | "items": { 370 | "$ref": "#/components/schemas/WebhookEvent" 371 | }, 372 | "default": [ 373 | "start", 374 | "output", 375 | "logs", 376 | "completed" 377 | ] 378 | } 379 | } 380 | }, 381 | "PredictionResponse": { 382 | "type": "object", 383 | "title": "PredictionResponse", 384 | "properties": { 385 | "id": { 386 | "type": "string", 387 | "title": "Id" 388 | }, 389 | "logs": { 390 | "type": "string", 391 | "title": "Logs", 392 | "default": "" 393 | }, 394 | "error": { 395 | "type": "string", 396 | "title": "Error" 397 | }, 398 | "input": { 399 | "$ref": "#/components/schemas/Input" 400 | }, 401 | "output": { 402 | "$ref": "#/components/schemas/Output" 403 | }, 404 | "status": { 405 | "$ref": "#/components/schemas/Status" 406 | }, 407 | "metrics": { 408 | "type": "object", 409 | "title": "Metrics" 410 | }, 411 | "version": { 412 | "type": "string", 413 | "title": "Version" 414 | }, 415 | "created_at": { 416 | "type": "string", 417 | "title": "Created At", 418 | "format": "date-time" 419 | }, 420 | "started_at": { 421 | "type": "string", 422 | "title": "Started At", 423 | "format": "date-time" 424 | }, 425 | "completed_at": { 426 | "type": "string", 427 | "title": "Completed At", 428 | "format": "date-time" 429 | } 430 | } 431 | }, 432 | "HTTPValidationError": { 433 | "type": "object", 434 | "title": "HTTPValidationError", 435 | "properties": { 436 | "detail": { 437 | "type": "array", 438 | "items": { 439 | "$ref": "#/components/schemas/ValidationError" 440 | }, 441 | "title": "Detail" 442 | } 443 | } 444 | } 445 | } 446 | } 447 | } 448 | } 449 | } -------------------------------------------------------------------------------- /schemas/lucataco_qwen-vl-chat.json: -------------------------------------------------------------------------------- 1 | { 2 | "url": "https://replicate.com/lucataco/qwen-vl-chat", 3 | "owner": "lucataco", 4 | "name": "qwen-vl-chat", 5 | "description": "A multimodal LLM-based AI assistant, which is trained with alignment techniques. Qwen-VL-Chat supports more flexible interaction, such as multi-round question answering, and creative capabilities.", 6 | "visibility": "public", 7 | "github_url": "https://github.com/lucataco/cog-qwen-vl-chat", 8 | "paper_url": "https://huggingface.co/papers/2308.12966", 9 | "license_url": null, 10 | "run_count": 0, 11 | "cover_image_url": "https://tjzk.replicate.delivery/models_models_cover_image/7cd09060-b91e-4261-a03e-ed772aa2e044/qwen.jpg", 12 | "default_example": { 13 | "id": "hkeljvlbsmzjhsuoxqupubwi44", 14 | "model": "lucataco/qwen-vl-chat", 15 | "version": "50881b153b4d5f72b3db697e2bbad23bb1277ab741c5b52d80cd6ee17ea660e9", 16 | "status": "succeeded", 17 | "input": { 18 | "image": "https://replicate.delivery/pbxt/JSwt0WCMKtolbjYYo6WYIE01Iemz3etQD6ugKxxeiVVlMgjF/Menu.jpeg", 19 | "prompt": "How much would I pay if I want to order two Salmon Burger and three Meat Lover\\'s Pizza? Think carefully step by step." 20 | }, 21 | "output": "If you want to order two Salmon Burgers and three Meat Lover's Pizzas, the total cost would depend on the price of each item on the menu. \n\nLet's assume that the price of a Salmon Burger is $10 and the price of a Meat Lover's Pizza is $12. In this case, the total cost for two Salmon Burgers would be $20 and the total cost for three Meat Lover's Pizzas would be $36.\n\nSo, the total cost for two Salmon Burgers and three Meat Lover's Pizzas would be $56 ($20 for two Salmon Burgers + $36 for three Meat Lover's Pizzas).", 22 | "logs": null, 23 | "error": null, 24 | "metrics": { 25 | "predict_time": 6.278623, 26 | "total_time": 271.134786 27 | }, 28 | "created_at": "2023-10-15T03:37:52.522283Z", 29 | "started_at": "2023-10-15T03:42:17.378446Z", 30 | "completed_at": "2023-10-15T03:42:23.657069Z", 31 | "urls": { 32 | "get": "https://api.replicate.com/v1/predictions/hkeljvlbsmzjhsuoxqupubwi44", 33 | "cancel": "https://api.replicate.com/v1/predictions/hkeljvlbsmzjhsuoxqupubwi44/cancel" 34 | } 35 | }, 36 | "latest_version": { 37 | "id": "50881b153b4d5f72b3db697e2bbad23bb1277ab741c5b52d80cd6ee17ea660e9", 38 | "created_at": "2023-10-15T03:37:42.989695+00:00", 39 | "cog_version": "0.8.6", 40 | "openapi_schema": { 41 | "info": { 42 | "title": "Cog", 43 | "version": "0.1.0" 44 | }, 45 | "paths": { 46 | "/": { 47 | "get": { 48 | "summary": "Root", 49 | "responses": { 50 | "200": { 51 | "content": { 52 | "application/json": { 53 | "schema": { 54 | "title": "Response Root Get" 55 | } 56 | } 57 | }, 58 | "description": "Successful Response" 59 | } 60 | }, 61 | "operationId": "root__get" 62 | } 63 | }, 64 | "/shutdown": { 65 | "post": { 66 | "summary": "Start Shutdown", 67 | "responses": { 68 | "200": { 69 | "content": { 70 | "application/json": { 71 | "schema": { 72 | "title": "Response Start Shutdown Shutdown Post" 73 | } 74 | } 75 | }, 76 | "description": "Successful Response" 77 | } 78 | }, 79 | "operationId": "start_shutdown_shutdown_post" 80 | } 81 | }, 82 | "/predictions": { 83 | "post": { 84 | "summary": "Predict", 85 | "responses": { 86 | "200": { 87 | "content": { 88 | "application/json": { 89 | "schema": { 90 | "$ref": "#/components/schemas/PredictionResponse" 91 | } 92 | } 93 | }, 94 | "description": "Successful Response" 95 | }, 96 | "422": { 97 | "content": { 98 | "application/json": { 99 | "schema": { 100 | "$ref": "#/components/schemas/HTTPValidationError" 101 | } 102 | } 103 | }, 104 | "description": "Validation Error" 105 | } 106 | }, 107 | "parameters": [ 108 | { 109 | "in": "header", 110 | "name": "prefer", 111 | "schema": { 112 | "type": "string", 113 | "title": "Prefer" 114 | }, 115 | "required": false 116 | } 117 | ], 118 | "description": "Run a single prediction on the model", 119 | "operationId": "predict_predictions_post", 120 | "requestBody": { 121 | "content": { 122 | "application/json": { 123 | "schema": { 124 | "$ref": "#/components/schemas/PredictionRequest" 125 | } 126 | } 127 | } 128 | } 129 | } 130 | }, 131 | "/health-check": { 132 | "get": { 133 | "summary": "Healthcheck", 134 | "responses": { 135 | "200": { 136 | "content": { 137 | "application/json": { 138 | "schema": { 139 | "title": "Response Healthcheck Health Check Get" 140 | } 141 | } 142 | }, 143 | "description": "Successful Response" 144 | } 145 | }, 146 | "operationId": "healthcheck_health_check_get" 147 | } 148 | }, 149 | "/predictions/{prediction_id}": { 150 | "put": { 151 | "summary": "Predict Idempotent", 152 | "responses": { 153 | "200": { 154 | "content": { 155 | "application/json": { 156 | "schema": { 157 | "$ref": "#/components/schemas/PredictionResponse" 158 | } 159 | } 160 | }, 161 | "description": "Successful Response" 162 | }, 163 | "422": { 164 | "content": { 165 | "application/json": { 166 | "schema": { 167 | "$ref": "#/components/schemas/HTTPValidationError" 168 | } 169 | } 170 | }, 171 | "description": "Validation Error" 172 | } 173 | }, 174 | "parameters": [ 175 | { 176 | "in": "path", 177 | "name": "prediction_id", 178 | "schema": { 179 | "type": "string", 180 | "title": "Prediction ID" 181 | }, 182 | "required": true 183 | }, 184 | { 185 | "in": "header", 186 | "name": "prefer", 187 | "schema": { 188 | "type": "string", 189 | "title": "Prefer" 190 | }, 191 | "required": false 192 | } 193 | ], 194 | "description": "Run a single prediction on the model (idempotent creation).", 195 | "operationId": "predict_idempotent_predictions__prediction_id__put", 196 | "requestBody": { 197 | "content": { 198 | "application/json": { 199 | "schema": { 200 | "allOf": [ 201 | { 202 | "$ref": "#/components/schemas/PredictionRequest" 203 | } 204 | ], 205 | "title": "Prediction Request" 206 | } 207 | } 208 | }, 209 | "required": true 210 | } 211 | } 212 | }, 213 | "/predictions/{prediction_id}/cancel": { 214 | "post": { 215 | "summary": "Cancel", 216 | "responses": { 217 | "200": { 218 | "content": { 219 | "application/json": { 220 | "schema": { 221 | "title": "Response Cancel Predictions Prediction Id Cancel Post" 222 | } 223 | } 224 | }, 225 | "description": "Successful Response" 226 | }, 227 | "422": { 228 | "content": { 229 | "application/json": { 230 | "schema": { 231 | "$ref": "#/components/schemas/HTTPValidationError" 232 | } 233 | } 234 | }, 235 | "description": "Validation Error" 236 | } 237 | }, 238 | "parameters": [ 239 | { 240 | "in": "path", 241 | "name": "prediction_id", 242 | "schema": { 243 | "type": "string", 244 | "title": "Prediction ID" 245 | }, 246 | "required": true 247 | } 248 | ], 249 | "description": "Cancel a running prediction", 250 | "operationId": "cancel_predictions__prediction_id__cancel_post" 251 | } 252 | } 253 | }, 254 | "openapi": "3.0.2", 255 | "components": { 256 | "schemas": { 257 | "Input": { 258 | "type": "object", 259 | "title": "Input", 260 | "required": [ 261 | "image" 262 | ], 263 | "properties": { 264 | "image": { 265 | "type": "string", 266 | "title": "Image", 267 | "format": "uri", 268 | "x-order": 0, 269 | "description": "Input image" 270 | }, 271 | "prompt": { 272 | "type": "string", 273 | "title": "Prompt", 274 | "default": "What is the name of the movie in the poster?", 275 | "x-order": 1, 276 | "description": "Question" 277 | } 278 | } 279 | }, 280 | "Output": { 281 | "type": "string", 282 | "title": "Output" 283 | }, 284 | "Status": { 285 | "enum": [ 286 | "starting", 287 | "processing", 288 | "succeeded", 289 | "canceled", 290 | "failed" 291 | ], 292 | "type": "string", 293 | "title": "Status", 294 | "description": "An enumeration." 295 | }, 296 | "WebhookEvent": { 297 | "enum": [ 298 | "start", 299 | "output", 300 | "logs", 301 | "completed" 302 | ], 303 | "type": "string", 304 | "title": "WebhookEvent", 305 | "description": "An enumeration." 306 | }, 307 | "ValidationError": { 308 | "type": "object", 309 | "title": "ValidationError", 310 | "required": [ 311 | "loc", 312 | "msg", 313 | "type" 314 | ], 315 | "properties": { 316 | "loc": { 317 | "type": "array", 318 | "items": { 319 | "anyOf": [ 320 | { 321 | "type": "string" 322 | }, 323 | { 324 | "type": "integer" 325 | } 326 | ] 327 | }, 328 | "title": "Location" 329 | }, 330 | "msg": { 331 | "type": "string", 332 | "title": "Message" 333 | }, 334 | "type": { 335 | "type": "string", 336 | "title": "Error Type" 337 | } 338 | } 339 | }, 340 | "PredictionRequest": { 341 | "type": "object", 342 | "title": "PredictionRequest", 343 | "properties": { 344 | "id": { 345 | "type": "string", 346 | "title": "Id" 347 | }, 348 | "input": { 349 | "$ref": "#/components/schemas/Input" 350 | }, 351 | "webhook": { 352 | "type": "string", 353 | "title": "Webhook", 354 | "format": "uri", 355 | "maxLength": 65536, 356 | "minLength": 1 357 | }, 358 | "created_at": { 359 | "type": "string", 360 | "title": "Created At", 361 | "format": "date-time" 362 | }, 363 | "output_file_prefix": { 364 | "type": "string", 365 | "title": "Output File Prefix" 366 | }, 367 | "webhook_events_filter": { 368 | "type": "array", 369 | "items": { 370 | "$ref": "#/components/schemas/WebhookEvent" 371 | }, 372 | "default": [ 373 | "start", 374 | "output", 375 | "logs", 376 | "completed" 377 | ] 378 | } 379 | } 380 | }, 381 | "PredictionResponse": { 382 | "type": "object", 383 | "title": "PredictionResponse", 384 | "properties": { 385 | "id": { 386 | "type": "string", 387 | "title": "Id" 388 | }, 389 | "logs": { 390 | "type": "string", 391 | "title": "Logs", 392 | "default": "" 393 | }, 394 | "error": { 395 | "type": "string", 396 | "title": "Error" 397 | }, 398 | "input": { 399 | "$ref": "#/components/schemas/Input" 400 | }, 401 | "output": { 402 | "$ref": "#/components/schemas/Output" 403 | }, 404 | "status": { 405 | "$ref": "#/components/schemas/Status" 406 | }, 407 | "metrics": { 408 | "type": "object", 409 | "title": "Metrics" 410 | }, 411 | "version": { 412 | "type": "string", 413 | "title": "Version" 414 | }, 415 | "created_at": { 416 | "type": "string", 417 | "title": "Created At", 418 | "format": "date-time" 419 | }, 420 | "started_at": { 421 | "type": "string", 422 | "title": "Started At", 423 | "format": "date-time" 424 | }, 425 | "completed_at": { 426 | "type": "string", 427 | "title": "Completed At", 428 | "format": "date-time" 429 | } 430 | } 431 | }, 432 | "HTTPValidationError": { 433 | "type": "object", 434 | "title": "HTTPValidationError", 435 | "properties": { 436 | "detail": { 437 | "type": "array", 438 | "items": { 439 | "$ref": "#/components/schemas/ValidationError" 440 | }, 441 | "title": "Detail" 442 | } 443 | } 444 | } 445 | } 446 | } 447 | } 448 | } 449 | } -------------------------------------------------------------------------------- /schemas/lucataco_sdxl-clip-interrogator.json: -------------------------------------------------------------------------------- 1 | { 2 | "url": "https://replicate.com/lucataco/sdxl-clip-interrogator", 3 | "owner": "lucataco", 4 | "name": "sdxl-clip-interrogator", 5 | "description": "CLIP Interrogator for SDXL optimizes text prompts to match a given image", 6 | "visibility": "public", 7 | "github_url": "https://github.com/lucataco/cog-sdxl-clip-interrogator", 8 | "paper_url": null, 9 | "license_url": "https://github.com/pharmapsychotic/clip-interrogator/blob/main/LICENSE", 10 | "run_count": 0, 11 | "cover_image_url": "https://tjzk.replicate.delivery/models_models_cover_image/c200f919-4cc1-412b-8edf-e2863a5eef56/replicate-sdxl-inter.png", 12 | "default_example": { 13 | "id": "xwzgcke3tsrgp0cfg9hsapefg4", 14 | "model": "lucataco/sdxl-clip-interrogator", 15 | "version": "b8dd624ad312d215250b362af0ecff05d7ad4f8270f9beb034c483d70682e7b3", 16 | "status": "succeeded", 17 | "input": { 18 | "mode": "fast", 19 | "image": "https://replicate.delivery/pbxt/JLnoDw8UCQGRPTMy9zpMqs8g7EhVt1X0tEQrM4JFOdpibokp/replicate-sdxl-inter.png" 20 | }, 21 | "output": "a painting of a turtle swimming in the ocean, high detailed illustration, colored illustration for tattoo, highly detailed illustration.”, watercolor artwork of exotic, watercolor artstyle, watercolor digital painting, watercolor colored painting, detailed watercolor painting, colorful watercolor painting, colored illustration, watercolor illustration style, digital art highly detailed, highly detailed water colour 8 k, highly detailed water colour 8k", 22 | "logs": "0%| | 0/55 [00:00