├── .github
└── workflow
│ └── workflow.yml
├── .gitignore
├── .pdm-python
├── .pdmignore
├── DrawBridgeAPI
├── __init__.py
├── api_server.py
├── app.py
├── backend
│ ├── FLUX_falai.py
│ ├── FLUX_replicate.py
│ ├── SD_A1111_webui.py
│ ├── SD_civitai_API.py
│ ├── __init__.py
│ ├── base.py
│ ├── comfyui.py
│ ├── liblibai.py
│ ├── midjourney.py
│ ├── novelai.py
│ ├── seaart.py
│ ├── tusiart.py
│ └── yunjie.py
├── base_config.py
├── comfyui_workflows
│ ├── diaopony-hr.json
│ ├── diaopony-hr_reflex.json
│ ├── diaopony-tipo.json
│ ├── diaopony-tipo_reflex.json
│ ├── flux-dev.json
│ ├── flux-dev_reflex.json
│ ├── flux-schnell.json
│ ├── flux-schnell_reflex.json
│ ├── flux修手.json
│ ├── flux修手_reflex.json
│ ├── sd3.5_txt2img.json
│ ├── sd3.5_txt2img_reflex.json
│ ├── sdbase_img2img.json
│ ├── sdbase_img2img_reflex.json
│ ├── sdbase_txt2img.json
│ ├── sdbase_txt2img_hr_fix.json
│ ├── sdbase_txt2img_hr_fix_reflex.json
│ ├── sdbase_txt2img_reflex.json
│ └── 创意融字 工作流Jianan_创意融字海报.json
├── config_example.yaml
├── dbapi.py
├── locales
│ ├── __init__.py
│ └── zh_CN
│ │ └── LC_MESSAGES
│ │ ├── messages.mo
│ │ └── messages.po
├── ui
│ └── __init__.py
└── utils
│ ├── __init__.py
│ ├── custom_class.py
│ ├── exceptions.py
│ ├── llm_caption_requirements.txt
│ ├── llm_captions.py
│ ├── request_model.py
│ ├── shared.py
│ ├── tagger-requirements.txt
│ ├── tagger.py
│ └── topaz.py
├── LICENSE
├── README.md
├── README_ZH.md
├── docs
└── API.md
├── images
├── caption.png
├── cover.png
├── gradio.png
├── how_to_enable_backend1.png
├── how_to_enable_backend2.png
├── idle.png
├── posting.png
└── working.png
├── pyproject.toml
└── requirements.txt
/.github/workflow/workflow.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 | branches:
9 | - main
10 |
11 | jobs:
12 | test:
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - name: Check out the repository
17 | uses: actions/checkout@v3
18 |
19 | - name: Set up Python
20 | uses: actions/setup-python@v4
21 | with:
22 | python-version: '3.10'
23 |
24 | - name: Install dependencies
25 | run: |
26 | python -m pip install --upgrade pip
27 | pip install -r requirements.txt
28 |
29 | - name: Run tests
30 | run: |
31 | pytest
32 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | venv/
2 | .idea/
3 | __pycache__/
4 | DrawBridgeAPI/__pycache__/
5 | tmp/
6 | .pdm-build/
7 | dist/
8 |
9 | *.log
10 | DrawBridgeAPI/config.yaml
11 | TODO
12 |
13 | DrawBridgeAPI/saved_images
--------------------------------------------------------------------------------
/.pdm-python:
--------------------------------------------------------------------------------
1 | C:/Users/43701/github/diao/sd_api/venv/Scripts/python.exe
--------------------------------------------------------------------------------
/.pdmignore:
--------------------------------------------------------------------------------
1 | *.log
2 | DrawBridgeAPI/config.yaml
3 | TODO
4 |
5 | DrawBridgeAPI/saved_images
--------------------------------------------------------------------------------
/DrawBridgeAPI/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiaoDaiaChan/Stable-Diffusion-DrawBridgeAPI/81f059531ee62d49fed089f80bfd77f8ecdf05ed/DrawBridgeAPI/__init__.py
--------------------------------------------------------------------------------
/DrawBridgeAPI/api_server.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import os
3 | import asyncio
4 | import time
5 | import traceback
6 | import json
7 | import itertools
8 | import argparse
9 | import uvicorn
10 | import logging
11 | import warnings
12 | import uuid
13 | import aiofiles
14 |
15 | os.environ['CIVITAI_API_TOKEN'] = 'kunkun'
16 | os.environ['FAL_KEY'] = 'Daisuki'
17 | path_env = os.getenv("CONF_PATH")
18 |
19 | from .utils import request_model, topaz, run_later
20 | from .base_config import setup_logger, init_instance
21 |
22 | from fastapi import FastAPI, Request
23 | from fastapi.responses import JSONResponse, RedirectResponse
24 | from fastapi.exceptions import HTTPException
25 | from pathlib import Path
26 |
27 | logger = setup_logger("[API]")
28 | logging.getLogger("uvicorn.access").disabled = True
29 | logging.getLogger("uvicorn.error").disabled = True
30 | logging.getLogger("fastapi").disabled = True
31 |
32 | app = FastAPI()
33 |
34 | parser = argparse.ArgumentParser(description='Run the FastAPI application.')
35 | parser.add_argument('--host', type=str, default='0.0.0.0',
36 | help='The host IP address to listen on (default: 0.0.0.0).')
37 | parser.add_argument('--port', type=int, default=8000,
38 | help='The port number to listen on (default: 8000).')
39 | parser.add_argument('--conf', '-c', type=str, default='./config.yaml',
40 | help='配置文件路径', dest='conf')
41 | parser.add_argument('--working-dir', '-wd', type=str, default='./',
42 | help='工作路径', dest='working_dir')
43 | parser.add_argument('--fastapi-log', action="store_true", help='工作路径', dest='log')
44 |
45 | args = parser.parse_args()
46 | port = args.port
47 | host = args.host
48 | config_file_path = path_env or args.conf
49 |
50 | from .locales import _
51 |
52 | init_instance.init(config_file_path)
53 | config = init_instance.config
54 | redis_client = init_instance.redis_client
55 | os.chdir(args.working_dir)
56 | logger.info(_("Working directory: {0}").format(os.getcwd()))
57 |
58 | from .backend import TaskHandler, Backend, StaticHandler
59 |
60 | warnings.filterwarnings("ignore", category=DeprecationWarning)
61 |
62 |
63 | class Api:
64 | def __init__(self):
65 | self.app = app
66 | self.backend_instance = Backend()
67 |
68 | self.add_api_route(
69 | "/sdapi/v1/txt2img",
70 | self.txt2img_api,
71 | methods=["POST"],
72 | # response_model=request_model.Txt2ImgRequest
73 | )
74 | self.add_api_route(
75 | "/sdapi/v1/img2img",
76 | self.img2img_api,
77 | methods=["POST"],
78 | # response_model=request_model.Img2ImgRequest
79 | )
80 | self.add_api_route(
81 | "/sdapi/v1/sd-models",
82 | self.get_sd_models,
83 | methods=["GET"]
84 | )
85 | self.add_api_route(
86 | "/sdapi/v1/progress",
87 | self.get_progress,
88 | methods=["GET"]
89 | )
90 | self.add_api_route(
91 | "/sdapi/v1/memory",
92 | self.get_memory,
93 | methods=["GET"]
94 | )
95 | self.add_api_route(
96 | "/sdapi/v1/options",
97 | self.get_options,
98 | methods=["GET"]
99 | )
100 | self.add_api_route(
101 | "/sdapi/v1/options",
102 | self.set_options,
103 | methods=["POST"]
104 | )
105 | self.add_api_route(
106 | "/sdapi/v1/prompt-styles",
107 | self.get_prompt_styles,
108 | methods=["GET"]
109 | )
110 |
111 | if config.server_settings['build_in_tagger']:
112 |
113 | from .utils.tagger import wd_tagger_handler, wd_logger
114 | self.add_api_route(
115 | "/tagger/v1/interrogate",
116 | self.tagger,
117 | methods=["POST"],
118 | response_model=request_model.TaggerRequest
119 | )
120 |
121 | if config.server_settings['llm_caption']['enable']:
122 | from .utils.llm_captions import llm_logger, joy_caption_handler
123 | self.add_api_route(
124 | "/llm/caption",
125 | self.llm_caption,
126 | methods=["POST"],
127 | response_model=request_model.TaggerRequest
128 | )
129 |
130 | if config.server_settings['build_in_photoai']['exec_path']:
131 | self.add_api_route(
132 | "/topazai/image",
133 | self.topaz_ai,
134 | methods=["POST"]
135 | )
136 |
137 | def add_api_route(self, path: str, endpoint, **kwargs):
138 | return self.app.add_api_route(path, endpoint, **kwargs)
139 |
140 | @staticmethod
141 | async def generate_handle(data) -> TaskHandler:
142 |
143 | model_to_backend = None
144 | if data['override_settings'].get("sd_model_checkpoint", None):
145 | model_to_backend = data['override_settings'].get("sd_model_checkpoint", None)
146 |
147 | styles = data.get('styles', [])
148 | selected_style = []
149 | selected_comfyui_style = []
150 |
151 | if styles:
152 | api_styles = StaticHandler.get_prompt_style()
153 | for index, i in enumerate(api_styles):
154 | for style in styles:
155 | if style.strip() == '':
156 | pass
157 | else:
158 | if style in i['name']:
159 | if 'comfyui' in i['name']:
160 | logger.info(f"{_('Selected ComfyUI style')} - {i['name']}")
161 | selected_comfyui_style.append(i['name'])
162 | else:
163 | selected_style.append(i['name'])
164 |
165 | if selected_style:
166 | for i in selected_style:
167 | data['prompt'] = data.get('prompt', '') + i['prompt']
168 | data['negative_prompt'] = data.get('negative_prompt', '') + i['negative_prompt']
169 |
170 | task_handler = TaskHandler(
171 | data,
172 | model_to_backend=model_to_backend,
173 | comfyui_json=selected_comfyui_style[0].replace('comfyui-work-flows-', '') if selected_comfyui_style else None
174 | )
175 |
176 | return task_handler
177 |
178 | @staticmethod
179 | async def txt2img_api(request: request_model.Txt2ImgRequest, api: Request):
180 |
181 | data = request.model_dump()
182 | client_host = api.client.host
183 |
184 | task_handler = await Api.generate_handle(data)
185 |
186 | try:
187 | logger.info(f"{_('Exec TXT2IMG')} - {client_host}")
188 | result = await task_handler.txt2img()
189 | except Exception as e:
190 | logger.error(traceback.format_exc())
191 | raise HTTPException(status_code=500, detail=str(e))
192 |
193 | if result is None:
194 | raise HTTPException(500, detail='Result not found')
195 |
196 | return result
197 |
198 | @staticmethod
199 | async def img2img_api(request: request_model.Img2ImgRequest, api: Request):
200 | data = request.model_dump()
201 | client_host = api.client.host
202 |
203 | if len(data['init_images']) == 0:
204 | raise HTTPException(status_code=400, detail=_('IMG2IMG Requires image to start'))
205 |
206 | task_handler = await Api.generate_handle(data)
207 |
208 | try:
209 | logger.info(f"{_('Exec IMG2IMG')} - {client_host}")
210 | result = await task_handler.img2img()
211 | except Exception as e:
212 | logger.error(traceback.format_exc())
213 | raise HTTPException(status_code=500, detail=str(e))
214 |
215 | if result is None:
216 | raise HTTPException(500, detail='Result not found')
217 |
218 | return result
219 |
220 | @staticmethod
221 | async def get_sd_models():
222 |
223 | task_list = []
224 | path = '/sdapi/v1/sd-models'
225 |
226 | task_handler = TaskHandler({}, None, path, reutrn_instance=True, override_model_select=True)
227 | instance_list: list[Backend] = await task_handler.sd_api()
228 |
229 | for i in instance_list:
230 | task_list.append(i.get_models())
231 | resp = await asyncio.gather(*task_list)
232 |
233 | models_dict = {}
234 | api_respond = []
235 | for i in resp:
236 | models_dict = models_dict | i
237 | api_respond = api_respond + list(i.values())
238 |
239 | api_respond = list(itertools.chain.from_iterable(api_respond))
240 |
241 | redis_resp: bytes = redis_client.get('models')
242 | redis_resp: dict = json.loads(redis_resp.decode('utf-8'))
243 | redis_resp.update(models_dict)
244 | redis_client.set('models', json.dumps(redis_resp))
245 | return api_respond
246 |
247 | async def tagger(self, request: request_model.TaggerRequest):
248 | from .utils.tagger import wd_tagger_handler, wd_logger
249 |
250 | data = request.model_dump()
251 | base64_image = await self.download_img_from_url(data)
252 | caption = await wd_tagger_handler.tagger_main(base64_image, data['threshold'], data['exclude_tags'])
253 | resp = {}
254 |
255 | resp['caption'] = caption
256 | wd_logger.info(f"{_('Caption Successful')}, {caption}")
257 | return JSONResponse(resp)
258 |
259 | async def llm_caption(self, request: request_model.TaggerRequest):
260 |
261 | from .utils.llm_captions import llm_logger, joy_caption_handler
262 | from .utils.tagger import wd_tagger_handler, wd_logger
263 |
264 | data = request.model_dump()
265 | base64_image = await self.download_img_from_url(data)
266 |
267 | try:
268 | caption = await joy_caption_handler.get_caption(base64_image, data['exclude_tags'])
269 | except Exception as e:
270 | traceback.print_exc()
271 | raise HTTPException(status_code=500, detail=str(e))
272 |
273 | resp = {}
274 |
275 | resp['llm'] = caption
276 | llm_logger.info(f"{_('Caption Successful')}, {caption}")
277 | # caption = await wd_tagger_handler.tagger_main(
278 | # base64_image,
279 | # data['threshold'],
280 | # data['exclude_tags']
281 | # )
282 | #
283 | # resp['caption'] = caption
284 | # wd_logger.info(f"打标成功,{caption}")
285 | return JSONResponse(resp)
286 |
287 | async def get_progress(self):
288 | return JSONResponse(self.backend_instance.format_progress_api_resp(0.0, time.time()))
289 |
290 | async def get_memory(self):
291 | return JSONResponse(self.backend_instance.format_vram_api_resp())
292 |
293 | @staticmethod
294 | async def get_options():
295 | return JSONResponse(StaticHandler.get_backend_options())
296 |
297 | @staticmethod
298 | async def set_options(request: request_model.SetConfigRequest):
299 |
300 | data = request.model_dump()
301 | if data.get('sd_model_checkpoint', None):
302 | logger.info(_("Lock to backend has configured"))
303 | StaticHandler.set_lock_to_backend(data.get('sd_model_checkpoint'))
304 |
305 | return
306 |
307 | @staticmethod
308 | async def topaz_ai(request: request_model.TopazAiRequest):
309 | data = request.model_dump()
310 |
311 | unique_id = str(uuid.uuid4())
312 | save_dir = Path("saved_images") / unique_id
313 | processed_dir = save_dir / 'processed'
314 | save_dir.mkdir(parents=True, exist_ok=True)
315 | del data['output_folder']
316 |
317 | try:
318 |
319 | if data['image']:
320 | base64_image = data['image']
321 | input_image_path = save_dir / f"{unique_id}_image.png"
322 | async with aiofiles.open(input_image_path, "wb") as image_file:
323 | await image_file.write(base64.b64decode(base64_image))
324 | output, error, return_code = await asyncio.get_running_loop().run_in_executor(
325 | None, topaz.run_tpai(
326 | input_folder=str(save_dir.resolve()),
327 | output_folder=str(processed_dir.resolve()),
328 | **data
329 | )
330 | )
331 | elif data['input_folder']:
332 | output, error, return_code = await asyncio.get_running_loop().run_in_executor(
333 | None, topaz.run_tpai(
334 | output_folder=str(processed_dir.resolve()),
335 | **data
336 | )
337 | )
338 | except:
339 | traceback.print_exc()
340 | raise HTTPException(status_code=500, detail="Error occurred while processing the image.")
341 |
342 | if return_code == 0:
343 | files = list(processed_dir.glob("*"))
344 |
345 | processed_image_path = files[0]
346 | if processed_image_path.exists():
347 | async with aiofiles.open(processed_image_path, "rb") as img_file:
348 | encoded_image = base64.b64encode(await img_file.read()).decode('utf-8')
349 | processed_dir.rmdir()
350 | return {"status": "success", "image": encoded_image}
351 | else:
352 | raise HTTPException(status_code=500, detail="Processed image not found.")
353 | else:
354 | raise HTTPException(status_code=500, detail=f"Error: {error}")
355 |
356 | async def download_img_from_url(self, data):
357 |
358 | base64_image = data['image']
359 |
360 | if data['image'].startswith("http"):
361 | image_url = data['image']
362 | logger.info(f"{_('URL detected')}: {image_url}")
363 | response = await self.backend_instance.http_request(
364 | "GET",
365 | image_url,
366 | format=False
367 | )
368 |
369 | if response.status_code != 200:
370 | logger.warning(_("Image download failed!"))
371 |
372 | base64_image = base64.b64encode(response.read())
373 |
374 | return base64_image
375 |
376 | @staticmethod
377 | async def get_prompt_styles():
378 |
379 | task_list = []
380 | path = '/sdapi/v1/prompt-styles'
381 |
382 | task_handler = TaskHandler({}, None, path, reutrn_instance=True, override_model_select=True)
383 | instance_list: list[Backend] = await task_handler.sd_api()
384 |
385 | for i in instance_list:
386 | task_list.append(i.get_all_prompt_style())
387 | resp = await asyncio.gather(*task_list)
388 |
389 | api_respond = []
390 | for i in resp:
391 | api_respond += i
392 |
393 | StaticHandler.set_prompt_style(api_respond)
394 |
395 | return api_respond
396 |
397 | async def init_api(self):
398 | await self.get_sd_models()
399 | await self.get_prompt_styles()
400 |
401 |
402 | api_instance = Api()
403 |
404 |
405 | @app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE", "PATCH"])
406 | async def proxy(path: str, request: Request):
407 | client_host = request.client.host
408 |
409 | task_handler = TaskHandler({}, request, path)
410 |
411 | try:
412 | logger.info(f"{_('Exec forwarding')} - {client_host}")
413 | result = await task_handler.sd_api()
414 | except Exception as e:
415 | logger.error(traceback.format_exc())
416 | raise HTTPException(500, detail=str(e))
417 |
418 | if result is None:
419 | raise HTTPException(500, detail='Result not found')
420 |
421 | return result
422 |
423 |
424 | @app.get("/backend-control")
425 | async def get_backend_control(backend: str, key: str, value: bool):
426 | pass
427 |
428 |
429 | @app.on_event("startup")
430 | async def startup_event():
431 | logger.info(_('Waiting for API initialization'))
432 | await api_instance.init_api()
433 | logger.info(_('API initialization completed'))
434 |
435 |
436 | if __name__ == "__main__":
437 |
438 | # if config.server_settings['start_gradio']:
439 | # demo = create_gradio_interface(host, port)
440 | # app = gradio.mount_gradio_app(api_instance.app, demo, path="/")
441 |
442 | uvicorn.run(api_instance.app, host=host, port=port, log_level="critical" if not args.log else "info")
443 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/app.py:
--------------------------------------------------------------------------------
1 | import json
2 | import asyncio
3 | import subprocess
4 |
5 | import gradio as gr
6 | import os
7 | os.environ['CIVITAI_API_TOKEN'] = 'kunkun'
8 | os.environ['FAL_KEY'] = 'Daisuki'
9 | os.environ['CONF_PATH'] = './config.yaml'
10 | from PIL import Image
11 |
12 | import io
13 | import base64
14 | import httpx
15 | from .base_config import init_instance
16 | from .backend import TaskHandler
17 | from .locales import _
18 |
19 |
20 | class Gradio:
21 | def __init__(self, host, port):
22 | self.host = '127.0.0.1' if host == '0.0.0.0' else host
23 | self.port = port
24 |
25 | def get_caption(self, image):
26 | caption = httpx.post(
27 | f"http://{self.host}:{self.port}/tagger/v1/interrogate",
28 | json=json.loads({"image": image}), timeout=600).json()
29 | return caption
30 |
31 |
32 | def format_caption_output(caption_result):
33 | llm_text = caption_result.get("llm", '')
34 | word_scores = "\n".join([f"{word}: {score}" for word, score in caption_result["caption"].items()])
35 | word_ = ",".join([f"{word}" for word in caption_result["caption"].keys()])
36 | return llm_text, word_scores, word_
37 |
38 |
39 | async def create_gradio_interface(host, port):
40 |
41 | gradio_api = Gradio(host, port)
42 | from .api_server import api_instance
43 | all_models = [i['title'] for i in await api_instance.get_sd_models()]
44 | init_instance.logger.info(f"{_('Server is ready!')} Listen on {host}:{port}")
45 |
46 | async def get_image(model, prompt, negative_prompt, width, height, cfg_scale, steps):
47 |
48 | payload = {
49 | "prompt": prompt,
50 | "negative_prompt": negative_prompt,
51 | "width": width,
52 | "height": height,
53 | "steps": steps,
54 | "cfg_scale": cfg_scale
55 | }
56 |
57 | task_handler = TaskHandler(payload, model_to_backend=model)
58 | result = await task_handler.txt2img()
59 | image_data = result.get("images")[0]
60 | image = Image.open(io.BytesIO(base64.b64decode(image_data)))
61 | return image
62 |
63 | with gr.Blocks() as demo:
64 | with gr.Tab("txt2img"):
65 | with gr.Row():
66 | with gr.Column():
67 | model = gr.Dropdown(label="Model", choices=all_models)
68 | prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...")
69 | negative_prompt = gr.Textbox(label="Negative Prompt",
70 | placeholder="Enter your negative prompt here...")
71 | width = gr.Slider(label="Width", minimum=64, maximum=2048, step=1, value=512)
72 | height = gr.Slider(label="Height", minimum=64, maximum=2048, step=1, value=512)
73 | cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=30, step=0.1, value=7.5)
74 | steps = gr.Slider(label="Steps", minimum=1, maximum=200, step=1, value=20)
75 | generate_button = gr.Button("Generate Image")
76 |
77 | with gr.Column():
78 | output_image = gr.Image(label="Generated Image")
79 |
80 | generate_button.click(get_image, [model, prompt, negative_prompt, width, height, cfg_scale, steps],
81 | output_image)
82 |
83 | with gr.Tab("Caption"):
84 | with gr.Row():
85 | with gr.Column():
86 | input_image = gr.Image(label="Input Image")
87 | caption_button = gr.Button("Get Caption")
88 |
89 | with gr.Column():
90 | llm_output = gr.Textbox(label="Natural Language Description")
91 | word_output_ = gr.Textbox(label="Keywords", lines=6)
92 | word_output = gr.Textbox(label="Keywords with Scores", lines=6)
93 |
94 | caption_button.click(
95 | lambda image: format_caption_output(gradio_api.get_caption(image)),
96 | inputs=[input_image],
97 | outputs=[llm_output, word_output, word_output_]
98 | )
99 |
100 | return demo
101 |
102 |
103 | async def run_gradio(host, port):
104 | interface = await create_gradio_interface(host, port)
105 | interface.launch(server_name=host, server_port=port+1)
106 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/backend/FLUX_falai.py:
--------------------------------------------------------------------------------
1 | import traceback
2 | import piexif
3 | import fal_client
4 | import os
5 |
6 | from io import BytesIO
7 | from .base import Backend
8 |
9 |
10 | class AIDRAW(Backend):
11 |
12 | def __init__(self, **kwargs):
13 | super().__init__(**kwargs)
14 |
15 | self.logger = self.setup_logger('[FLUX-FalAI]')
16 |
17 | async def get_shape(self):
18 |
19 | aspect_ratio = self.width / self.height
20 | tolerance = 0.05
21 |
22 | def is_close_to_ratio(ratio):
23 | return abs(aspect_ratio - ratio) < tolerance
24 |
25 | if self.width == self.height:
26 | return "square"
27 | elif is_close_to_ratio(4 / 3):
28 | return "portrait_4_3" if self.height > self.width else "landscape_4_3"
29 | elif is_close_to_ratio(16 / 9):
30 | return "portrait_16_9" if self.height > self.width else "landscape_16_9"
31 | else:
32 | return "portrait_4_3"
33 |
34 | async def update_progress(self):
35 | # 覆写函数
36 | pass
37 |
38 | async def get_img_comment(self):
39 |
40 | image_data = self.img_btyes[0]
41 | image_file = BytesIO(image_data)
42 | image_bytes = image_file.getvalue()
43 | exif_dict = piexif.load(image_bytes)
44 | try:
45 | user_comment = exif_dict['Exif'].get(piexif.ExifIFD.UserComment)
46 | except Exception:
47 | return 'No Raw Data'
48 |
49 | return user_comment.decode('utf-8', errors='ignore')
50 |
51 | async def check_backend_usability(self):
52 | pass
53 |
54 | async def err_formating_to_sd_style(self):
55 |
56 | await self.download_img()
57 |
58 | self.format_api_respond()
59 |
60 | self.result = self.build_respond
61 |
62 | async def posting(self):
63 |
64 | os.environ['FAL_KEY'] = self.backend_id
65 | image_shape = await self.get_shape()
66 | self.steps = int(self.steps / 3)
67 |
68 | handler = await fal_client.submit_async(
69 | "fal-ai/flux/schnell",
70 | arguments={
71 | "prompt": self.prompt,
72 | "image_size": image_shape,
73 | "seed": self.seed,
74 | "num_inference_steps": self.steps, # FLUX不需要很高的步数
75 | "num_images": self.total_img_count,
76 | "enable_safety_checker": True
77 | },
78 | )
79 |
80 | response = await handler.get()
81 |
82 | try:
83 | if response['images']:
84 | images_list = response['images']
85 | for i in images_list:
86 | self.img_url.append(i['url'])
87 | else:
88 | raise ValueError("图片没有被生成,可能是图片没有完成或者结果不可用")
89 | except Exception as e:
90 | self.fail_on_requesting = True
91 | self.logger.error(f"请求API失败: {e}\n{traceback.format_exc()}")
92 |
93 | await self.err_formating_to_sd_style()
94 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/backend/FLUX_replicate.py:
--------------------------------------------------------------------------------
1 | import traceback
2 | import piexif
3 | import os
4 | import replicate
5 |
6 | from io import BytesIO
7 |
8 | from .base import Backend
9 |
10 |
11 | class AIDRAW(Backend):
12 |
13 | def __init__(self, **kwargs):
14 | super().__init__(**kwargs)
15 | self.logger = self.setup_logger('[FLUX-Replicate]')
16 |
17 | async def get_shape(self):
18 |
19 | aspect_ratio = self.width / self.height
20 | tolerance = 0.05
21 |
22 | def is_close_to_ratio(ratio):
23 | return abs(aspect_ratio - ratio) < tolerance
24 |
25 | if self.width == self.height:
26 | return "1:1"
27 | elif is_close_to_ratio(16 / 9):
28 | return "16:9"
29 | elif is_close_to_ratio(21 / 9):
30 | return "21:9"
31 | elif is_close_to_ratio(2 / 3):
32 | return "2:3"
33 | elif is_close_to_ratio(3 / 2):
34 | return "3:2"
35 | elif is_close_to_ratio(4 / 5):
36 | return "4:5"
37 | elif is_close_to_ratio(5 / 4):
38 | return "5:4"
39 | elif is_close_to_ratio(9 / 16):
40 | return "9:16"
41 | elif is_close_to_ratio(9 / 21):
42 | return "9:21"
43 | else:
44 | return "2:3"
45 |
46 | async def update_progress(self):
47 | # 覆写函数
48 | pass
49 |
50 | async def get_img_comment(self):
51 |
52 | image_data = self.img_btyes[0]
53 | image_file = BytesIO(image_data)
54 | image_bytes = image_file.getvalue()
55 | exif_dict = piexif.load(image_bytes)
56 | try:
57 | user_comment = exif_dict['Exif'].get(piexif.ExifIFD.UserComment)
58 | except Exception:
59 | return 'No Raw Data'
60 |
61 | return user_comment.decode('utf-8', errors='ignore')
62 |
63 | async def check_backend_usability(self):
64 | pass
65 |
66 | async def err_formating_to_sd_style(self):
67 |
68 | await self.download_img()
69 |
70 | self.format_api_respond()
71 |
72 | self.result = self.build_respond
73 |
74 | async def posting(self):
75 |
76 | os.environ['REPLICATE_API_TOKEN'] = self.backend_id
77 | image_shape = await self.get_shape()
78 |
79 | input_ = {
80 | "prompt": self.prompt,
81 | "seed": self.seed,
82 | "num_outputs": self.total_img_count,
83 | "aspect_ratio": image_shape,
84 | "output_format": 'png',
85 | "output_quality": 90
86 | }
87 |
88 | output = await replicate.async_run(
89 | "black-forest-labs/flux-schnell",
90 | input=input_
91 | )
92 |
93 | try:
94 | if output:
95 | for i in output:
96 | self.img_url.append(i)
97 | else:
98 | raise ValueError("图片没有被生成,可能是图片没有完成或者结果不可用")
99 | except Exception as e:
100 | self.fail_on_requesting = True
101 | self.logger.error(f"请求API失败: {e}\n{traceback.format_exc()}")
102 |
103 | await self.err_formating_to_sd_style()
104 |
105 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/backend/SD_A1111_webui.py:
--------------------------------------------------------------------------------
1 | from urllib.parse import urlencode
2 |
3 | from .base import Backend
4 |
5 |
6 | class AIDRAW(Backend):
7 |
8 | def __init__(self, **kwargs):
9 | super().__init__(**kwargs)
10 |
11 | self.logger = self.setup_logger('[SD-A1111]')
12 |
13 | async def exec_login(self):
14 | login_data = {
15 | 'username': self.current_config['username'][self.count],
16 | 'password': self.current_config['password'][self.count]
17 | }
18 | encoded_data = urlencode(login_data)
19 |
20 | response = await self.http_request(
21 | method="POST",
22 | target_url=f"{self.backend_id}/login",
23 | headers={
24 | "Content-Type": "application/x-www-form-urlencoded",
25 | "accept": "application/json"
26 | },
27 | content=encoded_data,
28 | )
29 | if response.get('error') == "error":
30 | self.logger.warning(f"后端{self.backend_name}登录失败")
31 | self.fail_on_login = True
32 | return False, 500
33 | else:
34 | self.logger.info(f"后端{self.backend_name}登录成功")
35 | return True, 200
36 |
37 | async def check_backend_usability(self):
38 |
39 | if self.login:
40 | resp = await self.exec_login()
41 | if resp[0] is None:
42 | self.fail_on_login = True
43 | self.logger.warning(f"后端{self.backend_name}登陆失败")
44 | return False, resp
45 |
46 | async def get_backend_working_progress(self):
47 | """
48 | 获取后端工作进度, 默认A1111
49 | :return:
50 | """
51 | respond = await self.http_request(
52 | "GET",
53 | f"{self.backend_id}/sdapi/v1/options",
54 | verify=False,
55 | proxy=False,
56 | use_aiohttp=False
57 | )
58 |
59 | self.model = respond['sd_model_checkpoint']
60 | self.model_hash = respond
61 |
62 | if self.current_config['auth'][self.count]:
63 | self.login = True
64 | await self.exec_login()
65 |
66 | api_url = f"{self.backend_id}/sdapi/v1/progress"
67 |
68 | resp = await self.http_request(
69 | method="GET",
70 | target_url=api_url,
71 | format=False
72 | )
73 |
74 | resp_json = resp.json()
75 | return resp_json, resp.status_code, self.backend_id, resp.status_code
76 |
77 |
78 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/backend/SD_civitai_API.py:
--------------------------------------------------------------------------------
1 | import traceback
2 | import piexif
3 | import os
4 | import civitai
5 |
6 | from io import BytesIO
7 |
8 | from .base import Backend
9 |
10 | class AIDRAW(Backend):
11 |
12 | def __init__(self, **kwargs):
13 | super().__init__(**kwargs)
14 |
15 | self.logger = self.setup_logger('[Civitai]')
16 |
17 | async def update_progress(self):
18 | # 覆写函数
19 | pass
20 |
21 | async def get_img_comment(self):
22 |
23 | image_data = self.img_btyes[0]
24 | image_file = BytesIO(image_data)
25 | image_bytes = image_file.getvalue()
26 | exif_dict = piexif.load(image_bytes)
27 | try:
28 | user_comment = exif_dict['Exif'].get(piexif.ExifIFD.UserComment)
29 | except KeyError:
30 | return 'No Raw Data'
31 |
32 | return user_comment.decode('utf-8', errors='ignore')
33 |
34 | async def check_backend_usability(self):
35 |
36 | self.headers['Authorization'] = f"Bearer {self.backend_id}"
37 | response = await self.http_request(
38 | method="GET",
39 | target_url='https://civitai.com/api/v1/models',
40 | headers=self.headers,
41 | params=None,
42 | format=True
43 | )
44 |
45 | if isinstance(response, dict) and 'error' in response:
46 | self.fail_on_login = True
47 | return False
48 | else:
49 | resp_json = response
50 | return True, (resp_json, 200)
51 |
52 | async def err_formating_to_sd_style(self):
53 |
54 | await self.download_img()
55 | self.format_api_respond()
56 | self.result = self.build_respond
57 |
58 | async def posting(self):
59 |
60 | self.logger.info(f"开始使用{self.backend_id}获取图片")
61 |
62 | os.environ['CIVITAI_API_TOKEN'] = self.backend_id
63 | os.environ['HTTP_PROXY'] = self.config.server_settings['proxy']
64 | os.environ['HTTPS_PROXY'] = self.config.server_settings['proxy']
65 | await self.check_backend_usability()
66 |
67 | input_ = {
68 | "model": "urn:air:sd1:checkpoint:civitai:4201@130072",
69 | "params": {
70 | "prompt": self.prompt,
71 | "negativePrompt": self.negative_prompt,
72 | "scheduler": self.sampler,
73 | "steps": self.steps,
74 | "cfgScale": self.scale,
75 | "width": self.width,
76 | "height": self.height,
77 | "clipSkip": 2,
78 | "seed": self.seed
79 | }
80 | }
81 |
82 | self.logger.info(f"任务已经发送!本次生图{self.total_img_count}张")
83 |
84 | for i in range(self.total_img_count):
85 |
86 | try:
87 | response = await civitai.image.create(input_, wait=True)
88 | if response['jobs'][0]['result'].get('available'):
89 | self.img_url.append(response['jobs'][0]['result'].get('blobUrl'))
90 | else:
91 | raise ValueError("图片没有被生成,可能是图片没有完成或者结果不可用")
92 | except Exception as e:
93 | self.fail_on_requesting = True
94 | self.logger.error(f"请求API失败: {e}\n{traceback.format_exc()}")
95 |
96 | await self.err_formating_to_sd_style()
97 |
98 |
99 |
100 |
101 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/backend/__init__.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import random
3 | import json
4 | import time
5 | import traceback
6 |
7 | import aiofiles
8 |
9 | from tqdm import tqdm
10 | from pathlib import Path
11 | from fastapi import Request
12 | from fastapi.responses import JSONResponse
13 | from typing import Union
14 | from colorama import Fore, Style
15 | from colorama import init
16 | init()
17 |
18 | from ..base_config import setup_logger, init_instance
19 | from .base import Backend
20 |
21 | import_logger = setup_logger('[IMPORT_BACKEND]')
22 |
23 | from DrawBridgeAPI.locales import _ as i18n
24 |
25 |
26 | class BaseHandler:
27 | #
28 | selected_instance_list: list[Backend] = []
29 | class_list: list[Backend] = []
30 | class_dict: dict = {}
31 | selected_instance_dict: dict[list[Backend]] = {}
32 | init_parameters_list: list = []
33 | init_parameters_dict: dict = {}
34 | enable_backend: dict = {}
35 |
36 | def __init__(
37 | self,
38 | payload,
39 | request: Request = None,
40 | path: str = None,
41 | comfyui_task=None,
42 | ):
43 | self.task_list = []
44 | self.instance_list: list[Backend] = []
45 | self.payload = payload
46 | self.request = request
47 | self.path = path
48 | self.config = init_instance.config
49 | self.all_task_list = None
50 | self.comfyui_task: str = comfyui_task
51 | self.task_type: str = "enable_txt2img_backends"
52 |
53 | async def get_enable_task(
54 | self,
55 | task_type,
56 | ):
57 | """
58 | 此函数的作用是获取示例并且只保留选择了的后端
59 | :param enable_task:
60 | :return:
61 | """
62 |
63 | self.init_parameters_list = []
64 | self.selected_instance_list = []
65 | self.class_list = []
66 | update_dict = {}
67 |
68 | if self.selected_instance_dict.get(task_type, None):
69 |
70 | self.instance_list = self.selected_instance_dict[task_type]
71 | return
72 |
73 | enable_backend_list = self.config.enable_backends[task_type]
74 |
75 | for enable_backend, backend_setting in enable_backend_list.items():
76 |
77 | def create_and_append_instances(
78 | enable_backend_type,
79 | AIDRAW_class,
80 | backend_setting,
81 | extra_args: dict=None
82 | ):
83 |
84 | enable_queue = False
85 |
86 | for counter in backend_setting:
87 | if isinstance(counter, int):
88 | enable_queue = False
89 |
90 | elif isinstance(counter, dict):
91 | operation = list(counter.values())[0]
92 | counter = int(list(counter.keys())[0])
93 | enable_queue = True if operation == "queue" else False
94 |
95 | override = self.config.backends[enable_backend_type].get("override", None)
96 | try:
97 | if override is not None:
98 | override = override[counter]
99 | else:
100 | override = None
101 | except IndexError:
102 | override = None
103 |
104 | init_args = {
105 | "count": counter,
106 | "payload": self.payload,
107 | "enable_queue": enable_queue,
108 | "backend_type": enable_backend_type,
109 | "override_setting": override,
110 |
111 | }
112 |
113 | if extra_args:
114 | init_args.update(extra_args)
115 |
116 | self.init_parameters_list.append(init_args)
117 |
118 | aidraw_instance = AIDRAW_class(
119 | **init_args
120 | )
121 | update_dict.update({
122 | self.config.backends[enable_backend_type]["name"][counter]:
123 | self.config.backends[enable_backend_type]["api"][counter]
124 | }
125 | )
126 |
127 | aidraw_instance.init_backend_info()
128 | import_logger.info("Backend {0} is enabled".format(self.config.backends[enable_backend_type]['name'][counter]))
129 |
130 | self.selected_instance_list.append(aidraw_instance)
131 | self.class_list.append(AIDRAW_class)
132 |
133 | if "civitai" in enable_backend:
134 | from .SD_civitai_API import AIDRAW
135 | create_and_append_instances(enable_backend, AIDRAW, backend_setting)
136 |
137 | elif "a1111webui" in enable_backend:
138 | from .SD_A1111_webui import AIDRAW
139 |
140 | create_and_append_instances(enable_backend, AIDRAW, backend_setting,
141 | extra_args={"request": self.request, "path": self.path})
142 |
143 | elif "fal_ai" in enable_backend:
144 | from .FLUX_falai import AIDRAW
145 | create_and_append_instances(enable_backend, AIDRAW, backend_setting)
146 |
147 | elif "replicate" in enable_backend:
148 | from .FLUX_replicate import AIDRAW
149 | create_and_append_instances(enable_backend, AIDRAW, backend_setting)
150 |
151 | elif "liblibai" in enable_backend:
152 | from .liblibai import AIDRAW
153 | create_and_append_instances(enable_backend, AIDRAW, backend_setting)
154 |
155 | elif "tusiart" in enable_backend:
156 | from .tusiart import AIDRAW
157 | create_and_append_instances(enable_backend, AIDRAW, backend_setting)
158 |
159 | elif "seaart" in enable_backend:
160 | from .seaart import AIDRAW
161 | create_and_append_instances(enable_backend, AIDRAW, backend_setting)
162 |
163 | elif "yunjie" in enable_backend:
164 | from .yunjie import AIDRAW
165 | create_and_append_instances(enable_backend, AIDRAW, backend_setting)
166 |
167 | elif "comfyui" in enable_backend:
168 | from .comfyui import AIDRAW
169 |
170 | create_and_append_instances(enable_backend, AIDRAW, backend_setting, extra_args={
171 | "request": self.request,
172 | "path": self.path
173 | })
174 |
175 | elif "novelai" in enable_backend:
176 | from .novelai import AIDRAW
177 | create_and_append_instances(enable_backend, AIDRAW, backend_setting)
178 |
179 | elif "midjourney" in enable_backend:
180 | from .midjourney import AIDRAW
181 | create_and_append_instances(enable_backend, AIDRAW, backend_setting)
182 |
183 | self.enable_backend[task_type] = update_dict
184 | self.init_parameters_dict[task_type] = self.init_parameters_list
185 | self.selected_instance_dict[task_type] = self.selected_instance_list
186 | self.instance_list = self.selected_instance_list
187 | self.class_dict[task_type] = self.class_list
188 |
189 |
190 | class TXT2IMGHandler(BaseHandler):
191 |
192 | def __init__(self, payload, comfyui_task: str = None):
193 | super().__init__(comfyui_task=comfyui_task, payload=payload)
194 |
195 | async def get_all_instance(self) -> tuple[list[Backend], dict, dict, dict]:
196 |
197 | self.task_type = "enable_txt2img_backends"
198 | await self.get_enable_task(self.task_type)
199 | return self.instance_list, self.enable_backend, self.init_parameters_dict, self.class_dict
200 |
201 |
202 | class IMG2IMGHandler(BaseHandler):
203 |
204 | def __init__(self, payload, comfyui_task: str = None):
205 | super().__init__(comfyui_task=comfyui_task, payload=payload)
206 |
207 | async def get_all_instance(self) -> tuple[list[Backend], dict, dict, dict]:
208 | self.task_type = "enable_img2img_backends"
209 | await self.get_enable_task(self.task_type)
210 | return self.instance_list, self.enable_backend, self.init_parameters_dict, self.class_dict
211 |
212 |
213 | class A1111WebuiHandlerAPI(BaseHandler):
214 | async def get_all_instance(self) -> tuple[list[Backend], dict, dict, dict]:
215 |
216 | self.task_type = "enable_sdapi_backends"
217 | await self.get_enable_task(self.task_type)
218 | return self.instance_list, self.enable_backend, self.init_parameters_dict, self.class_dict
219 |
220 |
221 | class ComfyUIHandler(BaseHandler):
222 |
223 | async def get_all_instance(self) -> tuple[list[Backend], dict, dict, dict]:
224 |
225 | self.task_type = "enable_comfyui_backends"
226 | await self.get_enable_task(self.task_type)
227 | return self.instance_list, self.enable_backend, self.init_parameters_dict, self.class_dict
228 |
229 |
230 | class StaticHandler:
231 | lock_to_backend = None
232 | prompt_style: list = None
233 |
234 | @classmethod
235 | def set_lock_to_backend(cls, selected_model: str):
236 | cls.lock_to_backend = selected_model
237 |
238 | @classmethod
239 | def get_lock_to_backend(cls):
240 | return cls.lock_to_backend
241 |
242 | @classmethod
243 | def get_prompt_style(cls):
244 | return cls.prompt_style
245 |
246 | @classmethod
247 | def set_prompt_style(cls, prompt_style: list):
248 | cls.prompt_style = prompt_style
249 |
250 | @classmethod
251 | def get_backend_options(cls):
252 | build_resp = {
253 | "samples_save": True,
254 | "samples_format": "png",
255 | "samples_filename_pattern": "",
256 | "save_images_add_number": True,
257 | "grid_save": True,
258 | "grid_format": "png",
259 | "grid_extended_filename": False,
260 | "grid_only_if_multiple": True,
261 | "grid_prevent_empty_spots": False,
262 | "grid_zip_filename_pattern": "",
263 | "n_rows": -1.0,
264 | "font": "",
265 | "grid_text_active_color": "#000000",
266 | "grid_text_inactive_color": "#999999",
267 | "grid_background_color": "#ffffff",
268 | "enable_pnginfo": True,
269 | "save_txt": False,
270 | "save_images_before_face_restoration": False,
271 | "save_images_before_highres_fix": False,
272 | "save_images_before_color_correction": False,
273 | "save_mask": False,
274 | "save_mask_composite": False,
275 | "jpeg_quality": 80.0,
276 | "webp_lossless": False,
277 | "export_for_4chan": True,
278 | "img_downscale_threshold": 4.0,
279 | "target_side_length": 4000.0,
280 | "img_max_size_mp": 200.0,
281 | "use_original_name_batch": True,
282 | "use_upscaler_name_as_suffix": False,
283 | "save_selected_only": True,
284 | "save_init_img": False,
285 | "temp_dir": "",
286 | "clean_temp_dir_at_start": False,
287 | "save_incomplete_images": False,
288 | "outdir_samples": "",
289 | "outdir_txt2img_samples": "outputs/txt2img-images",
290 | "outdir_img2img_samples": "outputs/img2img-images",
291 | "outdir_extras_samples": "outputs/extras-images",
292 | "outdir_grids": "",
293 | "outdir_txt2img_grids": "outputs/txt2img-grids",
294 | "outdir_img2img_grids": "outputs/img2img-grids",
295 | "outdir_save": "log/images",
296 | "outdir_init_images": "outputs/init-images",
297 | "save_to_dirs": True,
298 | "grid_save_to_dirs": True,
299 | "use_save_to_dirs_for_ui": False,
300 | "directories_filename_pattern": "[date]",
301 | "directories_max_prompt_words": 8.0,
302 | "ESRGAN_tile": 192.0,
303 | "ESRGAN_tile_overlap": 8.0,
304 | "realesrgan_enabled_models": [
305 | "R-ESRGAN 4x+",
306 | "R-ESRGAN 4x+ Anime6B"
307 | ],
308 | "upscaler_for_img2img": None,
309 | "face_restoration": False,
310 | "face_restoration_model": "CodeFormer",
311 | "code_former_weight": 0.5,
312 | "face_restoration_unload": False,
313 | "auto_launch_browser": "Local",
314 | "show_warnings": False,
315 | "show_gradio_deprecation_warnings": True,
316 | "memmon_poll_rate": 8.0,
317 | "samples_log_stdout": False,
318 | "multiple_tqdm": True,
319 | "print_hypernet_extra": False,
320 | "list_hidden_files": True,
321 | "disable_mmap_load_safetensors": False,
322 | "hide_ldm_prints": True,
323 | "api_enable_requests": True,
324 | "api_forbid_local_requests": True,
325 | "api_useragent": "",
326 | "unload_models_when_training": False,
327 | "pin_memory": False,
328 | "save_optimizer_state": False,
329 | "save_training_settings_to_txt": True,
330 | "dataset_filename_word_regex": "",
331 | "dataset_filename_join_string": " ",
332 | "training_image_repeats_per_epoch": 1.0,
333 | "training_write_csv_every": 500.0,
334 | "training_xattention_optimizations": False,
335 | "training_enable_tensorboard": False,
336 | "training_tensorboard_save_images": False,
337 | "training_tensorboard_flush_every": 120.0,
338 | "sd_model_checkpoint": cls.lock_to_backend if cls.lock_to_backend else 'DrawBridgeAPI-Auto-Select',
339 | "sd_checkpoints_limit": 1.0,
340 | "sd_checkpoints_keep_in_cpu": True,
341 | "sd_checkpoint_cache": 3,
342 | "sd_unet": "None",
343 | "enable_quantization": False,
344 | "enable_emphasis": True,
345 | "enable_batch_seeds": True,
346 | "comma_padding_backtrack": 20.0,
347 | "CLIP_stop_at_last_layers": 3.0,
348 | "upcast_attn": False,
349 | "randn_source": "GPU",
350 | "tiling": False,
351 | "hires_fix_refiner_pass": "second pass",
352 | "sdxl_crop_top": 0.0,
353 | "sdxl_crop_left": 0.0,
354 | "sdxl_refiner_low_aesthetic_score": 2.5,
355 | "sdxl_refiner_high_aesthetic_score": 6.0,
356 | "sd_vae_explanation": "VAE is a neural network that transforms a standard RGB\nimage into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling\n(i.e. when the progress bar is between empty and full). For txt2img, VAE is used to create a resulting image after the sampling is finished.\nFor img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling.",
357 | "sd_vae_checkpoint_cache": 0,
358 | "sd_vae": "None",
359 | "sd_vae_overrides_per_model_preferences": False,
360 | "auto_vae_precision": True,
361 | "sd_vae_encode_method": "Full",
362 | "sd_vae_decode_method": "Full",
363 | "inpainting_mask_weight": 1.0,
364 | "initial_noise_multiplier": 1.0,
365 | "img2img_extra_noise": 0,
366 | "img2img_color_correction": False,
367 | "img2img_fix_steps": False,
368 | "img2img_background_color": "#ffffff",
369 | "img2img_editor_height": 720.0,
370 | "img2img_sketch_default_brush_color": "#ffffff",
371 | "img2img_inpaint_mask_brush_color": "#ffffff",
372 | "img2img_inpaint_sketch_default_brush_color": "#ffffff",
373 | "return_mask": False,
374 | "return_mask_composite": False,
375 | "cross_attention_optimization": "Automatic",
376 | "s_min_uncond": 0.0,
377 | "token_merging_ratio": 0.0,
378 | "token_merging_ratio_img2img": 0.0,
379 | "token_merging_ratio_hr": 0.0,
380 | "pad_cond_uncond": False,
381 | "persistent_cond_cache": True,
382 | "batch_cond_uncond": True,
383 | "use_old_emphasis_implementation": False,
384 | "use_old_karras_scheduler_sigmas": False,
385 | "no_dpmpp_sde_batch_determinism": False,
386 | "use_old_hires_fix_width_height": False,
387 | "dont_fix_second_order_samplers_schedule": False,
388 | "hires_fix_use_firstpass_conds": False,
389 | "use_old_scheduling": False,
390 | "interrogate_keep_models_in_memory": False,
391 | "interrogate_return_ranks": False,
392 | "interrogate_clip_num_beams": 1.0,
393 | "interrogate_clip_min_length": 24.0,
394 | "interrogate_clip_max_length": 48.0,
395 | "interrogate_clip_dict_limit": 1500.0,
396 | "interrogate_clip_skip_categories": [],
397 | "interrogate_deepbooru_score_threshold": 0.5,
398 | "deepbooru_sort_alpha": True,
399 | "deepbooru_use_spaces": True,
400 | "deepbooru_escape": True,
401 | "deepbooru_filter_tags": "",
402 | "extra_networks_show_hidden_directories": True,
403 | "extra_networks_hidden_models": "When searched",
404 | "extra_networks_default_multiplier": 1.0,
405 | "extra_networks_card_width": 0,
406 | "extra_networks_card_height": 0,
407 | "extra_networks_card_text_scale": 1.0,
408 | "extra_networks_card_show_desc": True,
409 | "extra_networks_add_text_separator": " ",
410 | "ui_extra_networks_tab_reorder": "",
411 | "textual_inversion_print_at_load": False,
412 | "textual_inversion_add_hashes_to_infotext": True,
413 | "sd_hypernetwork": "None",
414 | "localization": "None",
415 | "gradio_theme": "Default",
416 | "gradio_themes_cache": True,
417 | "gallery_height": "",
418 | "return_grid": True,
419 | "do_not_show_images": False,
420 | "send_seed": True,
421 | "send_size": True,
422 | "js_modal_lightbox": True,
423 | "js_modal_lightbox_initially_zoomed": True,
424 | "js_modal_lightbox_gamepad": False,
425 | "js_modal_lightbox_gamepad_repeat": 250.0,
426 | "show_progress_in_title": True,
427 | "samplers_in_dropdown": True,
428 | "dimensions_and_batch_together": True,
429 | "keyedit_precision_attention": 0.1,
430 | "keyedit_precision_extra": 0.05,
431 | "keyedit_delimiters": ".,\\/!?%^*;:{}=`~()",
432 | "keyedit_move": True,
433 | "quicksettings_list": [
434 | "sd_model_checkpoint",
435 | "sd_unet",
436 | "sd_vae",
437 | "CLIP_stop_at_last_layers"
438 | ],
439 | "ui_tab_order": [],
440 | "hidden_tabs": [],
441 | "ui_reorder_list": [],
442 | "hires_fix_show_sampler": False,
443 | "hires_fix_show_prompts": False,
444 | "disable_token_counters": False,
445 | "add_model_hash_to_info": True,
446 | "add_model_name_to_info": True,
447 | "add_user_name_to_info": False,
448 | "add_version_to_infotext": True,
449 | "disable_weights_auto_swap": True,
450 | "infotext_styles": "Apply if any",
451 | "show_progressbar": True,
452 | "live_previews_enable": True,
453 | "live_previews_image_format": "png",
454 | "show_progress_grid": True,
455 | "show_progress_every_n_steps": 10.0,
456 | "show_progress_type": "Approx NN",
457 | "live_preview_allow_lowvram_full": False,
458 | "live_preview_content": "Prompt",
459 | "live_preview_refresh_period": 1000.0,
460 | "live_preview_fast_interrupt": False,
461 | "hide_samplers": [],
462 | "eta_ddim": 0.0,
463 | "eta_ancestral": 1.0,
464 | "ddim_discretize": "uniform",
465 | "s_churn": 0.0,
466 | "s_tmin": 0.0,
467 | "s_tmax": 0,
468 | "s_noise": 1.0,
469 | "k_sched_type": "Automatic",
470 | "sigma_min": 0.0,
471 | "sigma_max": 0.0,
472 | "rho": 0.0,
473 | "eta_noise_seed_delta": 0,
474 | "always_discard_next_to_last_sigma": False,
475 | "sgm_noise_multiplier": False,
476 | "uni_pc_variant": "bh1",
477 | "uni_pc_skip_type": "time_uniform",
478 | "uni_pc_order": 3.0,
479 | "uni_pc_lower_order_final": True,
480 | "postprocessing_enable_in_main_ui": [],
481 | "postprocessing_operation_order": [],
482 | "upscaling_max_images_in_cache": 5.0,
483 | "disabled_extensions": [],
484 | "disable_all_extensions": "none",
485 | "restore_config_state_file": "",
486 | "sd_checkpoint_hash": "91e0f7cbaf70676153810c231e8703bf26b3208c116a3d1f2481cbc666905471"
487 | }
488 |
489 | return build_resp
490 |
491 |
492 | class TaskHandler(StaticHandler):
493 |
494 | backend_avg_dict: dict = {}
495 | write_count: dict = {}
496 | backend_images: dict = {}
497 |
498 | backend_site_list = None
499 | load_balance_logger = setup_logger('[AvgTimeCalculator]')
500 | load_balance_sample = 10
501 |
502 | redis_client = None
503 | backend_status = None
504 |
505 | @classmethod
506 | def update_backend_status(cls):
507 | cls.backend_status = json.loads(cls.redis_client.get("workload"))
508 |
509 | @classmethod
510 | def get_redis_client(cls):
511 | cls.redis_client = init_instance.redis_client
512 |
513 | @classmethod
514 | async def get_backend_avg_work_time(cls) -> dict:
515 | backend_sites = cls.backend_site_list
516 |
517 | avg_time_key = ""
518 |
519 | avg_time_data = cls.redis_client.get("backend_avg_time")
520 | if avg_time_data is None:
521 | cls.redis_client.set(avg_time_key, json.dumps(cls.backend_avg_dict))
522 | else:
523 | new_data = json.loads(avg_time_data)
524 | for key, values in new_data.items():
525 | if key in cls.backend_avg_dict:
526 | cls.backend_avg_dict[key].extend(
527 | values[-cls.load_balance_sample:] if len(values) >= cls.load_balance_sample else
528 | values
529 | )
530 | else:
531 | cls.backend_avg_dict[key] = (values[-cls.load_balance_sample:] if
532 | len(values) >= cls.load_balance_sample else values)
533 |
534 | cls.backend_avg_dict[key] = cls.backend_avg_dict[key][-10:]
535 |
536 | avg_time_dict = {}
537 | for backend_site in backend_sites:
538 | spend_time_list = cls.backend_avg_dict.get(backend_site, [])
539 | if spend_time_list and len(spend_time_list) >= cls.load_balance_sample:
540 | sorted_list = sorted(spend_time_list)
541 | trimmed_list = sorted_list[1:-1]
542 | avg_time = sum(trimmed_list) / len(trimmed_list) if trimmed_list else None
543 | avg_time_dict[backend_site] = avg_time
544 | else:
545 | avg_time_dict[backend_site] = None
546 |
547 | return avg_time_dict
548 |
549 | @classmethod
550 | async def set_backend_work_time(cls, spend_time, backend_site, total_images=1):
551 | spend_time_list = cls.backend_avg_dict.get(backend_site, [])
552 | spend_time_list.append(int(spend_time/total_images))
553 |
554 | if len(spend_time_list) >= cls.load_balance_sample:
555 | spend_time_list = spend_time_list[-cls.load_balance_sample:]
556 |
557 | cls.backend_avg_dict[backend_site] = spend_time_list
558 |
559 | cls.write_count[backend_site] = cls.write_count.get(backend_site, 0) + 1
560 |
561 | if cls.write_count.get(backend_site, 0) >= cls.load_balance_sample:
562 | cls.redis_client.set("backend_avg_time", json.dumps(cls.backend_avg_dict))
563 | cls.write_count[backend_site] = 0
564 |
565 | # info_str = ''
566 |
567 | # for key, values in cls.backend_avg_dict.items():
568 | # info_str += f"{key}: 最近10次生成时间{values}\n"
569 | #
570 | # cls.load_balance_logger.info(info_str)
571 |
572 | @classmethod
573 | def set_backend_image(cls, num=0, backend_site=None, get=False) -> Union[None, dict]:
574 | all_backend_dict = {}
575 |
576 | if backend_site:
577 | working_images = cls.backend_images.get(backend_site, 1)
578 | working_images += num
579 | cls.backend_images[backend_site] = working_images
580 |
581 | if get:
582 | for site in cls.backend_site_list:
583 | all_backend_dict[site] = cls.backend_images.get(site, 1)
584 | return all_backend_dict
585 |
586 | @classmethod
587 | def set_backend_list(cls, backend_dict):
588 | cls.backend_site_list = list(backend_dict.values())
589 |
590 | def __init__(
591 | self,
592 | payload=None,
593 | request: Request = None,
594 | path: str = None,
595 | select_backend: int = None,
596 | reutrn_instance: bool = False,
597 | model_to_backend: str = None,
598 | disable_loadbalance: bool = False,
599 | comfyui_json: str = "",
600 | override_model_select: bool = False,
601 | ):
602 | self.payload = payload
603 | self.instance_list: list[Backend] = []
604 | self.class_dict: dict = {}
605 | self.parameters_list = {}
606 | self.result = None
607 | self.request = request
608 | self.path = path
609 | self.enable_backend: dict[str: dict] = None
610 | self.reutrn_instance = reutrn_instance
611 | self.select_backend = select_backend
612 | self.model_to_backend = model_to_backend # 模型的名称
613 | self.disable_loadbalance = disable_loadbalance
614 | self.lock_to_backend = self.get_lock_to_backend() if override_model_select is False else None
615 | self.comfyui_json: str = comfyui_json
616 |
617 | self.total_images = (self.payload.get("batch_size", 1) * self.payload.get("n_iter", 1)) or 1
618 |
619 | self.ava_backend_url = None
620 | self.ava_backend_index = None
621 |
622 | self.task_type: str = None
623 |
624 | @staticmethod
625 | def get_backend_name(model_name) -> str:
626 | all_model: bytes = init_instance.redis_client.get('models')
627 | all_model: dict = json.loads(all_model.decode('utf-8'))
628 | for key, models in all_model.items():
629 | if isinstance(models, list):
630 | for model in models:
631 | if model.get("title") == model_name or model.get("model_name") == model_name:
632 | return key
633 |
634 | @staticmethod
635 | def get_backend_index(mapping_dict, key_to_find) -> int:
636 | keys = list(mapping_dict.keys())
637 | if key_to_find in keys:
638 | return keys.index(key_to_find)
639 | return None
640 |
641 | async def txt2img(self):
642 |
643 | self.instance_list, self.enable_backend, self.parameters_list, self.class_dict = await TXT2IMGHandler(
644 | self.payload,
645 | comfyui_task=self.comfyui_json
646 | ).get_all_instance()
647 |
648 | await self.choice_backend(task_type="enable_txt2img_backends")
649 | return self.result
650 |
651 | async def img2img(self):
652 |
653 | self.instance_list, self.enable_backend, self.parameters_list, self.class_dict = await IMG2IMGHandler(
654 | self.payload,
655 | comfyui_task=self.comfyui_json
656 | ).get_all_instance()
657 |
658 | await self.choice_backend(task_type="enable_img2img_backends")
659 | return self.result
660 |
661 | async def sd_api(self) -> JSONResponse | list[Backend]:
662 |
663 | self.instance_list, self.enable_backend, self.parameters_list, self.class_dict = await A1111WebuiHandlerAPI(
664 | self.payload,
665 | self.request,
666 | self.path
667 | ).get_all_instance()
668 |
669 | await self.choice_backend(task_type="enable_sdapi_backends")
670 | return self.result
671 |
672 | async def comfyui_api(self) -> JSONResponse | list[Backend]:
673 |
674 | self.instance_list, self.enable_backend, self.parameters_list, self.class_dict = await ComfyUIHandler(
675 | self.payload,
676 | self.request,
677 | self.path
678 | ).get_all_instance()
679 |
680 | await self.choice_backend(task_type="enable_comfyui_backends")
681 | return self.result
682 |
683 | async def choice_backend(self, task_type: str):
684 |
685 | self.task_type = task_type
686 |
687 | from DrawBridgeAPI.locales import _ as i18n
688 |
689 | if self.disable_loadbalance:
690 | return
691 |
692 | backend_url_dict = self.enable_backend[task_type]
693 |
694 | self.set_backend_list(backend_url_dict)
695 | self.get_redis_client()
696 |
697 | tasks = []
698 | is_avaiable = 0
699 | status_dict = {}
700 | ava_url = None
701 | n = -1
702 | e = -1
703 | normal_backend = []
704 | idle_backend = []
705 |
706 | logger = setup_logger(custom_prefix='[LOAD_BALANCE]')
707 |
708 | if self.reutrn_instance:
709 | self.result = self.instance_list
710 | return
711 | for i in self.instance_list:
712 | task = i.get_backend_working_progress()
713 | tasks.append(task)
714 | # 获取api队列状态
715 | key = self.get_backend_name(self.model_to_backend or self.lock_to_backend)
716 | if self.model_to_backend and key is not None:
717 |
718 | backend_index = self.get_backend_index(backend_url_dict, key)
719 | logger.info(f"{i18n('Manually select model')}: {self.model_to_backend}, {i18n('Backend select')}{key[:24]}")
720 |
721 | self.ava_backend_url = backend_url_dict[key]
722 | self.ava_backend_index = backend_index
723 |
724 | await self.exec_generate()
725 |
726 | elif self.lock_to_backend:
727 | if self.lock_to_backend and key is not None:
728 | backend_index = self.get_backend_index(backend_url_dict, key)
729 | logger.info(f"{i18n('Backend locked')}: {key[:24]}")
730 |
731 | self.ava_backend_url = backend_url_dict[key]
732 | self.ava_backend_index = backend_index
733 |
734 | await self.exec_generate()
735 |
736 | else:
737 | all_resp = await asyncio.gather(*tasks, return_exceptions=True)
738 | logger.info(i18n('Starting backend selection'))
739 | for resp_tuple in all_resp:
740 | e += 1
741 | if isinstance(resp_tuple, None or Exception):
742 | logger.warning(i18n('Backend {0} is down').format(self.instance_list[e].workload_name))
743 | else:
744 | try:
745 | if resp_tuple[3] in [200, 201]:
746 | n += 1
747 | status_dict[resp_tuple[2]] = resp_tuple[0]["eta_relative"]
748 | normal_backend = (list(status_dict.keys()))
749 | else:
750 | raise RuntimeError
751 | except (RuntimeError, TypeError):
752 | logger.warning(i18n('Backend {0} is failed or locked').format(self.instance_list[e].workload_name))
753 | continue
754 | else:
755 | # 更改判断逻辑
756 | if resp_tuple[0]["progress"] in [0, 0.0]:
757 | is_avaiable += 1
758 | idle_backend.append(normal_backend[n])
759 | else:
760 | pass
761 | # 显示进度
762 | total = 100
763 | progress = int(resp_tuple[0]["progress"] * 100)
764 | show_str = f"{self.instance_list[e].workload_name}"
765 | show_str = show_str.ljust(50, "-")
766 |
767 | bar_format = f"{Fore.CYAN}[Progress] {{l_bar}}{{bar}}|{Style.RESET_ALL}"
768 |
769 | with tqdm(
770 | total=total,
771 | desc=show_str + "-->",
772 | bar_format=bar_format
773 | ) as pbar:
774 | pbar.update(progress)
775 | if len(normal_backend) == 0:
776 | logger.error(i18n('No available backend'))
777 | raise RuntimeError(i18n('No available backend'))
778 |
779 | backend_total_work_time = {}
780 | avg_time_dict = await self.get_backend_avg_work_time()
781 | backend_image = self.set_backend_image(get=True)
782 |
783 | eta = 0
784 |
785 | for (site, time_), (_, image_count) in zip(avg_time_dict.items(), backend_image.items()):
786 | self.load_balance_logger.info(
787 | i18n('Backend: {0} Average work time: {1} seconds, Current tasks: {2}').format(site, time_, image_count - 1)
788 | )
789 |
790 | if site in normal_backend:
791 | self.update_backend_status()
792 | for key in self.backend_status:
793 | if site in key:
794 | end_time = self.backend_status[key].get('end_time', None)
795 | start_time = self.backend_status[key].get('start_time', None)
796 | if start_time:
797 | if end_time:
798 | eta = 0
799 | else:
800 | current_time = time.time()
801 | eta = int(current_time - start_time)
802 |
803 | effective_time = 1 if time_ is None else time_
804 | total_work_time = effective_time * int(image_count)
805 |
806 | eta = eta if time_ else 0
807 | self.load_balance_logger.info(f"{i18n('Extra time weight')} {eta}")
808 |
809 | backend_total_work_time[site] = total_work_time - eta if (total_work_time - eta) >= 0 else total_work_time
810 |
811 | total_time_dict = list(backend_total_work_time.values())
812 | rev_dict = {}
813 | for key, value in backend_total_work_time.items():
814 | if value in rev_dict:
815 | rev_dict[(value, key)] = value
816 | else:
817 | rev_dict[value] = key
818 |
819 | sorted_list = sorted(total_time_dict)
820 | fastest_backend = sorted_list[0]
821 | ava_url = rev_dict[fastest_backend]
822 | self.load_balance_logger.info(i18n('Backend: {0} is the fastest, has been selected').format(ava_url[:35]))
823 |
824 | ava_url_index = list(backend_url_dict.values()).index(ava_url)
825 |
826 | self.ava_backend_url = ava_url
827 | self.ava_backend_index = ava_url_index
828 |
829 | await self.exec_generate()
830 | # ava_url_tuple = (ava_url, reverse_dict[ava_url], all_resp, len(normal_backend), vram_dict[ava_url])
831 |
832 | async def exec_generate(self):
833 | fifo = None
834 | try:
835 | self.set_backend_image(self.total_images, self.ava_backend_url)
836 | backend_class = self.class_dict[self.task_type][self.ava_backend_index]
837 |
838 | self.parameters_list[self.task_type][self.ava_backend_index]['payload'] = self.payload
839 | select_instance = backend_class(**self.parameters_list[self.task_type][self.ava_backend_index])
840 |
841 | select_instance.init_backend_info()
842 | fifo = await select_instance.send_result_to_api()
843 | except:
844 | traceback.print_exc()
845 | finally:
846 | self.set_backend_image(-self.total_images, self.ava_backend_url)
847 | await self.set_backend_work_time(fifo.spend_time, self.ava_backend_url, fifo.total_img_count)
848 | self.result = fifo.result
849 |
850 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/backend/comfyui.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import copy
3 | import json
4 | import random
5 | import time
6 | import traceback
7 | import uuid
8 | from pathlib import Path
9 | from tqdm import tqdm
10 | import os
11 | import base64
12 | import aiohttp
13 |
14 | from .base import Backend
15 | from ..utils import run_later
16 |
17 | global __ALL_SUPPORT_NODE__
18 | MAX_SEED = 2 ** 32
19 |
20 |
21 | class AIDRAW(Backend):
22 |
23 | def __init__(self, **kwargs):
24 | super().__init__(**kwargs)
25 | # 需要更改
26 | self.logger = self.setup_logger('[Comfyui]')
27 |
28 | self.comfyui_api_json = None
29 | self.comfyui_api_json_reflex = None
30 |
31 | self.reflex_dict['sampler'] = {
32 | "DPM++ 2M": "dpmpp_2m",
33 | "DPM++ SDE": "dpmpp_sde",
34 | "DPM++ 2M SDE": "dpmpp_2m_sde",
35 | "DPM++ 2M SDE Heun": "dpmpp_2m_sde",
36 | "DPM++ 2S a": "dpmpp_2s_ancestral",
37 | "DPM++ 3M SDE": "dpmpp_3m_sde",
38 | "Euler a": "euler_ancestral",
39 | "Euler": "euler",
40 | "LMS": "lms",
41 | "Heun": "heun",
42 | "DPM2": "dpm_2",
43 | "DPM2 a": "dpm_2_ancestral",
44 | "DPM fast": "dpm_fast",
45 | "DPM adaptive": "dpm_adaptive",
46 | "Restart": "restart",
47 | "HeunPP2": "heunpp2",
48 | "IPNDM": "ipndm",
49 | "IPNDM_V": "ipndm_v",
50 | "DEIS": "deis",
51 | "DDIM": "ddim",
52 | "DDIM CFG++": "ddim",
53 | "PLMS": "plms",
54 | "UniPC": "uni_pc",
55 | "LCM": "lcm",
56 | "DDPM": "ddpm",
57 | # "[Forge] Flux Realistic": None,
58 | # "[Forge] Flux Realistic (Slow)": None,
59 | }
60 | self.reflex_dict['scheduler'] = {
61 | "Automatic": "normal",
62 | "Karras": "karras",
63 | "Exponential": "exponential",
64 | "SGM Uniform": "sgm_uniform",
65 | "Simple": "simple",
66 | "Normal": "normal",
67 | "DDIM": "ddim_uniform",
68 | "Beta": "beta"
69 | }
70 |
71 | self.reflex_dict['parameters'] = {}
72 |
73 | self.scheduler = self.reflex_dict['scheduler'].get(self.scheduler, "normal")
74 | self.sampler = self.reflex_dict['sampler'].get(self.sampler, "euler")
75 |
76 | async def heart_beat(self, id_):
77 | self.logger.info(f"{id_} 开始请求")
78 |
79 | async def get_images():
80 |
81 | response = await self.http_request(
82 | method="GET",
83 | target_url=f"{self.backend_id}/history/{id_}",
84 | )
85 |
86 | if response:
87 | for img in response[id_]['outputs'][str(self.comfyui_api_json_reflex.get('output', 9))]['images']:
88 | if img['subfolder'] == "":
89 | img_url = f"{self.backend_id}/view?filename={img['filename']}"
90 | else:
91 | img_url = f"{self.backend_id}/view?filename={img['filename']}&subfolder={img['subfolder']}"
92 | self.img_url.append(img_url)
93 |
94 | async with aiohttp.ClientSession() as session:
95 | ws_url = f'{self.backend_id}/ws?clientId={self.client_id}'
96 | async with session.ws_connect(ws_url) as ws:
97 |
98 | self.logger.info(f"WS连接成功: {ws_url}")
99 | progress_bar = None
100 |
101 | async for msg in ws:
102 | if msg.type == aiohttp.WSMsgType.TEXT:
103 | ws_msg = json.loads(msg.data)
104 | #
105 | # current_node = ws_msg['data']['node']
106 |
107 | if ws_msg['type'] == 'progress':
108 | value = ws_msg['data']['value']
109 | max_value = ws_msg['data']['max']
110 |
111 | if progress_bar is None:
112 | progress_bar = await asyncio.to_thread(
113 | tqdm, total=max_value,
114 | desc=f"Prompt ID: {ws_msg['data']['prompt_id']}",
115 | unit="steps"
116 | )
117 |
118 | delta = value - progress_bar.n
119 | await asyncio.to_thread(progress_bar.update, delta)
120 |
121 | if ws_msg['type'] == 'executing':
122 | if ws_msg['data']['node'] is None:
123 | self.logger.info(f"{id_}绘画完成!")
124 | await get_images()
125 | await ws.close()
126 | #
127 | # elif msg.type == aiohttp.WSMsgType.BINARY:
128 | # if current_node == 'save_image_websocket_node':
129 | # bytes_msg = msg.data
130 | # images_output = output_images.get(current_node, [])
131 | # images_output.append(bytes_msg[8:])
132 | # output_images[current_node] = images_output
133 |
134 | elif msg.type == aiohttp.WSMsgType.ERROR:
135 | self.logger.error(f"Error: {msg.data}")
136 | await ws.close()
137 | break
138 |
139 | if progress_bar is not None:
140 | await asyncio.to_thread(progress_bar.close)
141 |
142 | async def update_progress(self):
143 | # 覆写函数
144 | pass
145 |
146 | async def get_backend_working_progress(self):
147 |
148 | try:
149 | response = await self.http_request(
150 | method="GET",
151 | target_url=f"{self.backend_id}/queue",
152 | )
153 | if response.get("error", None):
154 | available = False
155 | else:
156 | available = True
157 |
158 | if len(response.get("queue_running", [])) == 0:
159 | progress = 0
160 | else:
161 | progress = 0.99
162 |
163 | build_resp = self.format_progress_api_resp(progress, self.start_time)
164 |
165 | sc = 200 if available is True else 500
166 | except:
167 | traceback.print_exc()
168 | finally:
169 | return build_resp, sc, self.backend_id, sc
170 |
171 | async def check_backend_usability(self):
172 | pass
173 |
174 | async def err_formating_to_sd_style(self):
175 |
176 | await self.download_img()
177 | self.format_api_respond()
178 | self.result = self.build_respond
179 |
180 | async def posting(self):
181 |
182 | self.logger.info(f"选择工作流{self.comfyui_api_json}")
183 | path_to_json = self.comfyui_api_json
184 | if self.comfyui_api_json:
185 |
186 | with open(
187 | Path(f"{os.path.dirname(os.path.abspath(__file__))}/../comfyui_workflows/{self.comfyui_api_json}.json").resolve(), 'r', encoding='utf-8') as f:
188 | self.comfyui_api_json = json.load(f)
189 | with open(
190 | Path(f"{os.path.dirname(os.path.abspath(__file__))}/../comfyui_workflows/{path_to_json}_reflex.json").resolve(), 'r', encoding='utf-8') as f:
191 | self.comfyui_api_json_reflex = json.load(f)
192 |
193 | upload_img_resp_list = []
194 |
195 | if self.init_images:
196 | for image in self.init_images:
197 | resp = await self.upload_base64_image(image, uuid.uuid4().hex)
198 | upload_img_resp_list.append(resp)
199 |
200 | await self.update_api_json(upload_img_resp_list)
201 |
202 | input_ = {
203 | "client_id": self.client_id,
204 | "prompt": self.comfyui_api_json
205 | }
206 |
207 | respone = await self.http_request(
208 | method="POST",
209 | target_url=f"{self.backend_id}/prompt",
210 | headers=self.headers,
211 | content=json.dumps(input_)
212 | )
213 |
214 | if respone.get("error", None):
215 | self.logger.error(respone)
216 | raise RuntimeError(respone["status_code"])
217 |
218 | self.task_id = respone['prompt_id']
219 |
220 | await self.heart_beat(self.task_id)
221 | await self.err_formating_to_sd_style()
222 |
223 | async def update_api_json(self, init_images):
224 | api_json = copy.deepcopy(self.comfyui_api_json)
225 | raw_api_json = copy.deepcopy(self.comfyui_api_json)
226 |
227 | update_mapping = {
228 | "sampler": {
229 | "seed": self.seed,
230 | "steps": self.steps,
231 | "cfg": self.scale,
232 | "sampler_name": self.sampler,
233 | "scheduler": self.scheduler,
234 | "denoise": self.denoising_strength
235 | },
236 | "seed": {
237 | "seed": self.seed,
238 | "noise_seed": self.seed
239 | },
240 | "image_size": {
241 | "width": self.width,
242 | "height": self.height,
243 | "batch_size": self.batch_size
244 | },
245 | "prompt": {
246 | "text": self.prompt
247 | },
248 | "negative_prompt": {
249 | "text": self.negative_prompt
250 | },
251 | "checkpoint": {
252 | "ckpt_name": self.model_path if self.model_path else None
253 | },
254 | "latentupscale": {
255 | "width": int(self.width*self.hr_scale) if not self.hr_resize_x else self.hr_resize_x,
256 | "height": int(self.height*self.hr_scale) if not self.hr_resize_y else self.hr_resize_y,
257 | },
258 | "load_image": {
259 | "image": init_images[0]['name'] if self.init_images else None
260 | },
261 | "resize": {
262 | "width": int(self.width*self.hr_scale) if not self.hr_resize_x else self.hr_resize_x,
263 | "height": int(self.height*self.hr_scale) if not self.hr_resize_y else self.hr_resize_y,
264 | },
265 | "hr_steps": {
266 | "seed": self.seed,
267 | "steps": self.hr_second_pass_steps,
268 | "cfg": self.hr_scale,
269 | "sampler_name": self.sampler,
270 | "scheduler": self.scheduler,
271 | "denoise": self.denoising_strength,
272 | },
273 | "hr_prompt": {
274 | "text": self.hr_prompt
275 | },
276 | "hr_negative_prompt": {
277 | "text": self.hr_negative_prompt
278 | },
279 | "tipo": {
280 | "width": self.width,
281 | "height": self.height,
282 | "seed": self.seed,
283 | "prompt": self.prompt,
284 | },
285 | "append_prompt": {
286 |
287 | }
288 | }
289 |
290 | __OVERRIDE_SUPPORT_KEYS__ = {
291 | 'keep',
292 | 'value',
293 | 'append_prompt',
294 | 'append_negative_prompt',
295 | 'remove',
296 | "randint",
297 | "get_text",
298 | "upscale",
299 | 'image'
300 |
301 | }
302 | __ALL_SUPPORT_NODE__ = set(update_mapping.keys())
303 |
304 | for item, node_id in self.comfyui_api_json_reflex.items():
305 |
306 | if node_id and item not in ("override", "note"):
307 |
308 | org_node_id = node_id
309 |
310 | if isinstance(node_id, list):
311 | node_id = node_id
312 | elif isinstance(node_id, (int, str)):
313 | node_id = [node_id]
314 | elif isinstance(node_id, dict):
315 | node_id = list(node_id.keys())
316 |
317 | for id_ in node_id:
318 | id_ = str(id_)
319 | update_dict = api_json.get(id_, None)
320 | if update_dict and item in update_mapping:
321 | api_json[id_]['inputs'].update(update_mapping[item])
322 |
323 | if isinstance(org_node_id, dict):
324 | for node, override_dict in org_node_id.items():
325 | single_node_or = override_dict.get("override", {})
326 |
327 | if single_node_or:
328 | for key, override_action in single_node_or.items():
329 |
330 | if override_action == "randint":
331 | api_json[node]['inputs'][key] = random.randint(0, MAX_SEED)
332 |
333 | elif override_action == "keep":
334 | org_cons = raw_api_json[node]['inputs'][key]
335 |
336 | elif override_action == "append_prompt":
337 | prompt = raw_api_json[node]['inputs'][key]
338 | prompt = self.prompt + prompt
339 | api_json[node]['inputs'][key] = prompt
340 |
341 | elif override_action == "append_negative_prompt":
342 | prompt = raw_api_json[node]['inputs'][key]
343 | prompt = self.negative_prompt + prompt
344 | api_json[node]['inputs'][key] = prompt
345 |
346 | elif "upscale" in override_action:
347 | scale = 1.5
348 | if "_" in override_action:
349 | scale = override_action.split("_")[1]
350 |
351 | if key == 'width':
352 | res = self.width
353 | elif key == 'height':
354 | res = self.height
355 |
356 | upscale_size = int(res * scale)
357 | api_json[node]['inputs'][key] = upscale_size
358 |
359 | elif "value" in override_action:
360 | override_value = raw_api_json[node]['inputs'][key]
361 | if "_" in override_action:
362 | override_value = override_action.split("_")[1]
363 | override_type = override_action.split("_")[2]
364 | if override_type == "int":
365 | override_value = int(override_value)
366 | elif override_type == "float":
367 | override_value = float(override_value)
368 | elif override_type == "str":
369 | override_value = str(override_value)
370 |
371 | api_json[node]['inputs'][key] = override_value
372 |
373 | elif "image" in override_action:
374 | image_id = int(override_action.split("_")[1])
375 | api_json[node]['inputs'][key] = init_images[image_id]['name']
376 |
377 | else:
378 | update_dict = api_json.get(node, None)
379 | if update_dict and item in update_mapping:
380 | api_json[node]['inputs'].update(update_mapping[item])
381 |
382 | await run_later(self.compare_dicts(api_json, self.comfyui_api_json), 0.5)
383 | self.comfyui_api_json = api_json
384 |
385 | async def compare_dicts(self, dict1, dict2):
386 |
387 | modified_keys = {k for k in dict1.keys() & dict2.keys() if dict1[k] != dict2[k]}
388 | build_info = "节点映射情况: \n"
389 | for key in modified_keys:
390 | build_info += f"节点ID: {key} -> \n"
391 | for (key1, value1), (key2, value2) in zip(dict1[key].items(), dict2[key].items()):
392 | if value1 == value2:
393 | pass
394 | else:
395 | build_info += f"新的值: {key1} -> {value1}\n旧的值: {key2} -> {value2}\n"
396 |
397 | self.logger.info(build_info)
398 |
399 | async def upload_base64_image(self, b64_image, name, image_type="input", overwrite=False):
400 |
401 | if b64_image.startswith("data:image"):
402 | header, b64_image = b64_image.split(",", 1)
403 | file_type = header.split(";")[0].split(":")[1].split("/")[1]
404 | else:
405 | raise ValueError("Invalid base64 image format.")
406 |
407 | image_data = base64.b64decode(b64_image)
408 |
409 | data = aiohttp.FormData()
410 | data.add_field('image', image_data, filename=f"{name}.{file_type}", content_type=f'image/{file_type}')
411 | data.add_field('type', image_type)
412 | data.add_field('overwrite', str(overwrite).lower())
413 |
414 | async with aiohttp.ClientSession() as session:
415 | async with session.post(f"{self.backend_id}/upload/image", data=data) as response:
416 | return json.loads(await response.read())
417 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/backend/liblibai.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | import traceback
4 |
5 | from .base import Backend
6 |
7 |
8 | class AIDRAW(Backend):
9 |
10 | def __init__(self, **kwargs):
11 | super().__init__(**kwargs)
12 | self.logger = self.setup_logger('[LiblibAI]')
13 |
14 | async def heart_beat(self, id_):
15 | self.logger.info(f"{id_}开始请求")
16 | for i in range(60):
17 |
18 | response = await self.http_request(
19 | method="POST",
20 | target_url=f"https://liblib-api.vibrou.com/gateway/sd-api/generate/progress/msg/v3/{id_}",
21 | headers=self.headers,
22 | content=json.dumps({"flag": 0}),
23 | verify=False
24 | )
25 |
26 | # 检查请求结果并处理
27 | if response.get('error') == "error":
28 | self.logger.warning(f"Failed to request: {response}")
29 | raise RuntimeError('服务器返回错误')
30 | if response['code'] != 0 or response['data']['statusMsg'] == '执行异常':
31 | raise RuntimeError('服务器返回错误')
32 |
33 | images = response['data']['images']
34 |
35 | if images is None:
36 | self.logger.info(f"第{i+1}次心跳,未返回结果")
37 | await asyncio.sleep(5)
38 | continue
39 | else:
40 | # await self.set_backend_working_status(available=True)
41 | for i in images:
42 | if 'porn' in i['previewPath']:
43 | self.nsfw_detected = True
44 | self.logger.warning("API侧检测到NSFW图片")
45 | else:
46 | self.logger.img(f"图片url: {i['previewPath']}")
47 | self.img_url.append(i['previewPath'])
48 | self.comment = i['imageInfo']
49 | break
50 |
51 | async def update_progress(self):
52 | # 覆写函数
53 | pass
54 |
55 | async def check_backend_usability(self):
56 | pass
57 |
58 | async def err_formating_to_sd_style(self):
59 |
60 | if self.nsfw_detected:
61 | await self.return_build_image()
62 | else:
63 | await self.download_img()
64 |
65 | self.format_api_respond()
66 |
67 | self.result = self.build_respond
68 |
69 | async def posting(self):
70 |
71 | if self.flux:
72 | input_ = {
73 | "checkpointId": 2295774,
74 | "generateType": 17,
75 | "frontCustomerReq": {
76 | "windowId": "",
77 | "tabType": "txt2img",
78 | "conAndSegAndGen": "gen"
79 | },
80 | "adetailerEnable": 0,
81 | "text2imgV3": {
82 | "clipSkip": 2,
83 | "checkPointName": 2295774,
84 | "prompt": self.prompt,
85 | "negPrompt": self.negative_prompt,
86 | "seed": self.seed,
87 | "randnSource": 0,
88 | "samplingMethod": 31,
89 | "imgCount": self.batch_size,
90 | "samplingStep": self.steps,
91 | "cfgScale": self.scale,
92 | "width": self.width,
93 | "height": self.height
94 | },
95 | "taskQueuePriority": 1
96 | }
97 |
98 | else:
99 | input_ = {
100 | "checkpointId": self.model_path,
101 | "generateType": 1,
102 | "frontCustomerReq": {
103 | # "frontId": "f46f8e35-5728-4ded-b163-832c3b85009d",
104 | "windowId": "",
105 | "tabType": "txt2img",
106 | "conAndSegAndGen": "gen"
107 | }
108 | ,
109 | "adetailerEnable": 0,
110 | "text2img": {
111 | "prompt": self.prompt,
112 | "negativePrompt": self.negative_prompt,
113 | "extraNetwork": "",
114 | "samplingMethod": 0,
115 | "samplingStep": self.steps,
116 | "width": self.width,
117 | "height": self.height,
118 | "imgCount": self.batch_size,
119 | "cfgScale": self.scale,
120 | "seed": self.seed,
121 | "seedExtra": 0,
122 | "hiResFix": 0,
123 | "restoreFaces": 0,
124 | "tiling": 0,
125 | "clipSkip": 2,
126 | "randnSource": 0,
127 | "tileDiffusion": None
128 | }
129 | ,
130 | "taskQueuePriority": 1
131 | }
132 |
133 | if self.enable_hr:
134 |
135 | hr_payload = {
136 | "hiresSteps": self.hr_second_pass_steps,
137 | "denoisingStrength": self.denoising_strength,
138 | "hiResFix": 1 if self.enable_hr else 0,
139 | "hiResFixInfo": {
140 | "upscaler": 6,
141 | "upscaleBy": self.hr_scale,
142 | "resizeWidth": int(self.width * self.hr_scale),
143 | "resizeHeight": int(self.height * self.hr_scale)
144 | }
145 | }
146 |
147 | input_['text2img'].update(hr_payload)
148 |
149 | new_headers = {
150 | "Accept": "application/json, text/plain, */*",
151 | "Token": self.backend_id
152 | }
153 | self.headers.update(new_headers)
154 |
155 | response = await self.http_request(
156 | method="POST",
157 | target_url="https://liblib-api.vibrou.com/gateway/sd-api/generate/image",
158 | headers=self.headers,
159 | content=json.dumps(input_),
160 | verify=False
161 | )
162 |
163 | # 检查请求结果
164 | if response.get('error') == "error":
165 | self.logger.warning(f"Failed to request: {response}")
166 | else:
167 | task = response
168 | if task.get('msg') == 'Insufficient power':
169 | self.logger.warning('费用不足!')
170 | self.logger.info(f"API返回{task}")
171 | task_id = task['data']
172 | await self.heart_beat(task_id)
173 |
174 | await self.err_formating_to_sd_style()
175 |
176 |
177 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/backend/midjourney.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import aiohttp
4 |
5 | from .base import Backend
6 | from PIL import Image
7 | import asyncio
8 | import json
9 | import traceback
10 | import math
11 | import zipfile
12 | import io
13 | import os
14 | import aiofiles
15 | import base64
16 |
17 | from pathlib import Path
18 |
19 | class AIDRAW(Backend):
20 |
21 | def __init__(self, **kwargs):
22 | super().__init__(**kwargs)
23 |
24 | self.logger = self.setup_logger('[MidJourney]')
25 |
26 | async def heart_beat(self, id_):
27 | task_url = f"{self.backend_id}/mj/task/{id_}/fetch"
28 |
29 | while True:
30 | try:
31 | resp = await self.http_request("GET", task_url, format=True)
32 | status = resp.get('status')
33 | content = ''
34 |
35 | if status == "SUCCESS":
36 | content = resp['imageUrl']
37 | self.img_url.append(resp['imageUrl'])
38 | self.logger.img(f"任务{id_}成功完成,图片URL:{resp['imageUrl']}")
39 | return content
40 |
41 | elif status == "FAILED":
42 | content = resp.get('failReason') or '未知原因'
43 | self.logger.error(f"任务处理失败,原因:{content}")
44 |
45 | raise Exception(f"任务处理失败,原因:{content}")
46 |
47 | elif status == "NOT_START":
48 | content = '任务未开始'
49 |
50 | elif status == "IN_PROGRESS":
51 | content = '任务正在运行'
52 | if resp.get('progress'):
53 | content += f",进度:{resp['progress']}"
54 |
55 | elif status == "SUBMITTED":
56 | content = '任务已提交处理'
57 |
58 | elif status == "FAILURE":
59 | fail_reason = resp.get('failReason') or '未知原因'
60 | self.logger.error(f"任务处理失败,原因:{fail_reason}")
61 | if "Banned prompt detected" in fail_reason:
62 | await self.return_build_image("NSFW Prompt Detected")
63 | return
64 | else:
65 | raise Exception(f"任务处理失败,原因:{content}")
66 |
67 | else:
68 | content = status
69 |
70 | self.logger.info(f"任务{id_}状态:{content}")
71 |
72 | await asyncio.sleep(5)
73 |
74 | except Exception as e:
75 | self.logger.error(f"任务{id_}心跳监控出错: {str(e)}")
76 | raise
77 |
78 | async def update_progress(self):
79 | # 覆写函数
80 | pass
81 |
82 | async def get_shape(self):
83 |
84 | gcd = math.gcd(self.width, self.height)
85 |
86 | simplified_width = self.width // gcd
87 | simplified_height = self.height // gcd
88 |
89 | ar = f"{simplified_width}:{simplified_height}"
90 |
91 | return ar
92 |
93 | async def check_backend_usability(self):
94 | pass
95 |
96 | async def split_image(self):
97 | img = Image.open(io.BytesIO(self.img_btyes[0]))
98 | width, height = img.size
99 |
100 | half_width = width // 2
101 | half_height = height // 2
102 |
103 | coordinates = [(0, 0, half_width, half_height),
104 | (half_width, 0, width, half_height),
105 | (0, half_height, half_width, height),
106 | (half_width, half_height, width, height)]
107 |
108 | images = [img.crop(c) for c in coordinates]
109 |
110 | images_bytes = [io.BytesIO() for _ in range(4)]
111 | base64_images = []
112 |
113 | for i in range(4):
114 | images[i].save(images_bytes[i], format='PNG')
115 |
116 | images_bytes[i].seek(0)
117 | base64_image = base64.b64encode(images_bytes[i].getvalue()).decode('utf-8')
118 |
119 | base64_images.append(base64_image)
120 |
121 | self.img_btyes += images_bytes
122 | self.img += base64_images
123 |
124 | # async def formating_to_sd_style(self):
125 | #
126 | # await self.download_img()
127 | # await self.split_image()
128 | #
129 | # self.format_api_respond()
130 | # self.result = self.build_respond
131 |
132 | async def posting(self):
133 |
134 | accept_ratio = await self.get_shape()
135 |
136 | ntags = f"--no {self.negative_prompt}" if self.negative_prompt else ""
137 |
138 | build_prompt = f"{self.prompt} --ar {accept_ratio} --seed {self.seed}" + ' ' + ntags + ' '
139 |
140 | payload = {
141 | "prompt": build_prompt
142 | }
143 |
144 | if self.config.midjourney['auth_toekn'][self.count]:
145 | self.headers.update({"mj-api-secret": self.config.midjourney['auth_toekn'][self.count]})
146 |
147 | resp = await self.http_request(
148 | "POST",
149 | f"{self.backend_url}/mj/submit/imagine",
150 | headers=self.headers,
151 | content=json.dumps(payload),
152 | format=True
153 | )
154 |
155 | if resp.get('code') == 24:
156 | await self.return_build_image(text="NSFW Prompt Detected")
157 |
158 | elif resp.get('code') == 1:
159 | task_id = resp.get('result')
160 | self.task_id = task_id
161 | self.logger.info(f"任务提交成功,任务id: {task_id}")
162 |
163 | await self.heart_beat(task_id)
164 | await self.download_img()
165 | await self.split_image()
166 |
167 | self.format_api_respond()
168 | self.result = self.build_respond
169 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/backend/novelai.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import aiohttp
4 |
5 | from .base import Backend
6 | import asyncio
7 | import json
8 | import traceback
9 | import zipfile
10 | import io
11 | import os
12 | import aiofiles
13 | import base64
14 |
15 | from pathlib import Path
16 |
17 | class AIDRAW(Backend):
18 |
19 | def __init__(self, **kwargs):
20 | super().__init__(**kwargs)
21 |
22 | self.logger = self.setup_logger('[NovelAI]')
23 |
24 | self.reflex_dict['sampler'] = {
25 | "DPM++ 2M": "k_dpmpp_2m",
26 | "DPM++ SDE": "k_dpmpp_sde",
27 | "DPM++ 2M SDE": "k_dpmpp_2m_sde",
28 | "DPM++ 2S a": "k_dpmpp_2s_ancestral",
29 | "Euler a": "k_euler_ancestral",
30 | "Euler": "k_euler",
31 | "DDIM": "ddim_v3"
32 | }
33 |
34 | async def update_progress(self):
35 | # 覆写函数
36 | pass
37 |
38 | async def get_shape(self):
39 | aspect_ratio = self.width / self.height
40 |
41 | resolutions = {
42 | "832x1216": (832, 1216),
43 | "1216x832": (1216, 832),
44 | "1024x1024": (1024, 1024),
45 | }
46 |
47 | closest_resolution = min(resolutions.keys(),
48 | key=lambda r: abs((resolutions[r][0] / resolutions[r][1]) - aspect_ratio))
49 |
50 | self.width, self.height = resolutions[closest_resolution]
51 |
52 | return closest_resolution
53 |
54 | async def check_backend_usability(self):
55 | pass
56 |
57 | async def err_formating_to_sd_style(self):
58 |
59 | if self.nsfw_detected:
60 | await self.return_build_image()
61 |
62 | self.format_api_respond()
63 |
64 | self.result = self.build_respond
65 |
66 | async def posting(self):
67 |
68 | self.sampler = self.reflex_dict['sampler'].get(self.sampler, "k_euler_ancestral")
69 |
70 | header = {
71 | "authorization": "Bearer " + self.backend_id,
72 | ":authority": "https://api.novelai.net",
73 | ":path": "/ai/generate-image",
74 | "content-type": "application/json",
75 | "referer": "https://novelai.net",
76 | "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36",
77 | }
78 |
79 | post_api = "https://image.novelai.net/ai/generate-image"
80 |
81 | await self.get_shape()
82 |
83 | parameters = {
84 | "width": self.width,
85 | "height": self.height,
86 | "qualityToggle": False,
87 | "scale": self.scale,
88 | "sampler": self.sampler,
89 | "steps": self.steps,
90 | "seed": self.seed,
91 | "n_samples": 1,
92 | "ucPreset": 0,
93 | "negative_prompt": self.negative_prompt,
94 | }
95 |
96 | json_data = {
97 | "input": self.prompt,
98 | "model": self.model_path,
99 | "parameters": parameters
100 | }
101 |
102 | async def send_request():
103 |
104 | async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=300)) as session:
105 | while True:
106 | async with session.post(
107 | post_api,
108 | headers=header,
109 | json=json_data,
110 | ssl=False,
111 | proxy=self.config.server_settings['proxy']
112 | ) as response:
113 |
114 | if response.status == 429:
115 | resp_text = await response.json()
116 | if resp_text['message'] == 'Rate limited':
117 | raise Exception("触发频率限制")
118 | self.logger.warning(f"token繁忙中..., {resp_text}")
119 | wait_time = 5
120 | await asyncio.sleep(wait_time)
121 | else:
122 | response_data = await response.read()
123 | try:
124 | with zipfile.ZipFile(io.BytesIO(response_data)) as z:
125 | z.extractall(self.save_path)
126 | except:
127 | try:
128 | resp_text = await response.json()
129 | except:
130 | if resp_text['statusCode'] == 402:
131 | self.logger.warning(f"token余额不足, {resp_text}")
132 | return
133 |
134 | await send_request()
135 |
136 | await self.images_to_base64(self.save_path)
137 | await self.err_formating_to_sd_style()
138 |
139 | async def images_to_base64(self, save_path):
140 |
141 | for filename in os.listdir(save_path):
142 | if filename.endswith('.png'):
143 | file_path = os.path.join(save_path, filename)
144 | async with aiofiles.open(file_path, "rb") as image_file:
145 | image_data = await image_file.read()
146 | encoded_string = base64.b64encode(image_data).decode('utf-8')
147 | self.img.append(encoded_string)
148 | self.img_btyes.append(image_data)
149 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/backend/seaart.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | import traceback
4 | from ..locales import _
5 |
6 | from .base import Backend
7 |
8 |
9 | class AIDRAW(Backend):
10 |
11 | def __init__(self, **kwargs):
12 | super().__init__(**kwargs)
13 |
14 | self.logger = self.setup_logger('[SeaArt]')
15 |
16 | async def heart_beat(self, id_):
17 | self.logger.info(f"{id_} 开始请求")
18 | self.logger.info(f"{_()}")
19 | data = json.dumps({"task_ids": [id_]})
20 | for i in range(60):
21 | response = await self.http_request(
22 | method="POST",
23 | target_url="https://www.seaart.me/api/v1/task/batch-progress",
24 | headers=self.headers,
25 | content=data
26 | )
27 |
28 | if isinstance(response, dict) and 'error' in response:
29 | raise RuntimeError(f"请求失败,错误信息: {response.get('details')}")
30 | else:
31 | items = response.get('data', {}).get('items', [])
32 |
33 | if not items:
34 | self.logger.info(f"第{i + 1}次心跳,未返回结果")
35 | await asyncio.sleep(5)
36 | continue
37 |
38 | for item in items:
39 | urls = item.get("img_uris")
40 |
41 | if urls is None:
42 | self.logger.info(f"第{i + 1}次心跳,未返回结果")
43 | await asyncio.sleep(5)
44 | continue
45 |
46 | elif isinstance(urls, list):
47 | for url in urls:
48 | self.logger.img(f"图片url: {url['url']}")
49 | self.img_url.append(url['url'])
50 | return
51 |
52 | raise RuntimeError(f"任务 {id_} 在60次心跳后仍未完成")
53 |
54 |
55 | async def update_progress(self):
56 | # 覆写函数
57 | pass
58 |
59 | async def check_backend_usability(self):
60 | pass
61 |
62 | async def err_formating_to_sd_style(self):
63 |
64 | await self.download_img()
65 |
66 | self.format_api_respond()
67 |
68 | self.result = self.build_respond
69 |
70 | async def posting(self):
71 |
72 | input_ = {
73 | "action": 1,
74 | "art_model_no": self.model_path or "1a486c58c2aa0601b57ddc263fc350d0",
75 | "category": 1,
76 | "speed_type": 1,
77 | "meta":
78 | {
79 | "prompt": self.prompt,
80 | "negative_prompt": self.negative_prompt,
81 | "restore_faces": self.restore_faces,
82 | "seed": self.seed,
83 | "sampler_name": self.sampler,
84 | "width": self.width,
85 | "height": self.height,
86 | "steps": self.steps,
87 | "cfg_scale": self.scale,
88 | "lora_models": [],
89 | "vae": "vae-ft-mse-840000-ema-pruned",
90 | "clip_skip": 1,
91 | "hr_second_pass_steps": 20,
92 | "lcm_mode": 0,
93 | "n_iter": 1,
94 | "embeddings": []
95 | }
96 | }
97 |
98 | if self.enable_hr:
99 |
100 | hr_payload = {
101 | "hr_second_pass_steps": self.hr_second_pass_steps,
102 | "enable_hr": True,
103 | "hr_upscaler": "4x-UltraSharp",
104 | "hr_scale": self.hr_scale,
105 | }
106 |
107 | input_['meta'].update(hr_payload)
108 |
109 | new_headers = {
110 | "Accept": "application/json, text/plain, */*",
111 | "Token": self.backend_id
112 | }
113 |
114 | self.headers.update(new_headers)
115 |
116 | data = json.dumps(input_)
117 | response = await self.http_request(
118 | method="POST",
119 | target_url="https://www.seaart.me/api/v1/task/create",
120 | headers=self.headers,
121 | content=data
122 | )
123 |
124 | if isinstance(response, dict) and 'error' in response:
125 | self.logger.warning(f"{response.get('details')}")
126 | else:
127 | task = response
128 | task_id = task.get('data', {}).get('id')
129 |
130 | if task_id:
131 | await self.heart_beat(task_id)
132 |
133 | await self.err_formating_to_sd_style()
134 |
135 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/backend/tusiart.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | import traceback
4 |
5 | from .base import Backend
6 |
7 |
8 | class AIDRAW(Backend):
9 |
10 | def __init__(self, **kwargs):
11 | super().__init__(**kwargs)
12 |
13 | self.logger = self.setup_logger('[TusiArt]')
14 |
15 | async def heart_beat(self, id_):
16 | self.logger.info(f"{id_}开始请求")
17 | self.headers['referer'] = "https://tusiart.com/models"
18 | del self.headers['sec-ch-ua']
19 |
20 | for i in range(60):
21 | await asyncio.sleep(5)
22 | self.logger.info(f"第{i + 1}次心跳")
23 | response = await self.http_request(
24 | method="GET",
25 | target_url='https://api.tusiart.cn/works/v1/works/tasks?size=20&cursor=0&returnAllTask=true',
26 | headers=self.headers
27 | )
28 |
29 | if isinstance(response, dict) and 'error' in response:
30 | raise RuntimeError(f"Request failed with error: {response.get('details')}")
31 | else:
32 | resp_json = response
33 | all_tasks = resp_json['data']['tasks']
34 | task_found = False
35 | for task in all_tasks:
36 | if task['taskId'] == id_:
37 | task_found = True
38 | if task['status'] == 'WAITING':
39 | break
40 | elif task['status'] == 'FINISH':
41 | matched = False
42 | for img in task['items']:
43 | if 'workspace.tusiassets.com' in img['url']:
44 | self.logger.img(f"图片url: {img['url']}")
45 | self.img_url.append(img['url'])
46 | matched = True
47 |
48 | if matched:
49 | return
50 | else:
51 | self.logger.info(f"第{i + 1}次心跳,FINISH状态下未找到符合条件的URL")
52 | await asyncio.sleep(5)
53 | break
54 | if not task_found:
55 | self.logger.info(f"任务 {id_} 未找到")
56 | await asyncio.sleep(5)
57 | continue
58 |
59 | raise RuntimeError(f"任务 {id_} 在 {60} 次轮询后仍未完成")
60 |
61 | async def update_progress(self):
62 | # 覆写函数
63 | pass
64 |
65 | async def check_backend_usability(self):
66 | pass
67 |
68 | async def err_formating_to_sd_style(self):
69 |
70 | await self.download_img()
71 |
72 | self.format_api_respond()
73 |
74 | self.result = self.build_respond
75 |
76 | async def posting(self):
77 |
78 | self.sampler = "Euler a"
79 |
80 | input_ = {
81 | "params":
82 | {
83 | "baseModel":
84 | {
85 | "modelId": self.model_path or "758751795863586176",
86 | "modelFileId": "708770380970509676"
87 | },
88 | "sdxl":
89 | {"refiner": False},
90 | "models": [],
91 | "embeddingModels": [],
92 | "sdVae": "Automatic",
93 | "prompt": self.prompt,
94 | "negativePrompt": self.negative_prompt,
95 | "height": self.height,
96 | "width": self.width,
97 | "imageCount": self.total_img_count,
98 | "steps": self.steps,
99 | "images": [],
100 | "cfgScale": self.scale,
101 | "seed": str(self.seed),
102 | "clipSkip": 2,
103 | "etaNoiseSeedDelta": 31337,
104 | "v1Clip": False,
105 | "samplerName": self.sampler
106 | },
107 | "taskType": "TXT2IMG",
108 | "isRemix": False,
109 | "captchaType": "CLOUDFLARE_TURNSTILE"
110 | }
111 |
112 | if self.enable_hr:
113 |
114 | hr_payload = {
115 | "enableHr": True,
116 | "hrUpscaler": "R-ESRGAN 4x+ Anime6B",
117 | "hrSecondPassSteps": self.hr_second_pass_steps,
118 | "denoisingStrength": self.denoising_strength,
119 | "hrResizeX": int(self.width*self.hr_scale),
120 | "hrResizeY": int(self.height*self.hr_scale)
121 | }
122 |
123 | input_['params'].update(hr_payload)
124 |
125 | new_headers = {
126 | "Authorization": f"Bearer {self.backend_id}",
127 | "Token": self.backend_id,
128 | "referer": self.config.backends[self.backend_type]['referer'][self.count],
129 | "sec-ch-ua": 'Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127'
130 | }
131 | self.headers.update(new_headers)
132 |
133 | data = json.dumps(input_)
134 |
135 | response = await self.http_request(
136 | method="POST",
137 | target_url="https://api.tusiart.cn/works/v1/works/task",
138 | headers=self.headers,
139 | content=data
140 | )
141 |
142 | if isinstance(response, dict) and 'error' in response:
143 | pass
144 | else:
145 | task = response
146 | if task['code'] == '1300100':
147 | error_text = f"""
148 | 后端:{self.config.tusiart_setting['note'][self.count]} 遇到人机验证,需到验证。
149 | 请前往https://tusiart.com/使用一次生图来触发验证码。
150 | 后端已被标记为不可使用,如需继续使用请重启API"
151 | """
152 | self.logger.warning("遇到人机验证!")
153 | raise RuntimeError(error_text)
154 | task_id = task['data']['task']['taskId']
155 | await self.heart_beat(task_id)
156 |
157 | await self.err_formating_to_sd_style()
158 |
159 |
160 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/backend/yunjie.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | import traceback
4 |
5 | from .base import Backend
6 |
7 |
8 | class AIDRAW(Backend):
9 |
10 | def __init__(self, **kwargs):
11 | super().__init__(**kwargs)
12 |
13 | self.logger = self.setup_logger('[YunJie]')
14 |
15 | async def heart_beat(self, id_):
16 | self.logger.info(f"{id_} 开始请求")
17 | for i in range(60):
18 | await asyncio.sleep(5)
19 |
20 | data = json.dumps({"taskId": id_})
21 | response = await self.http_request(
22 | method="POST",
23 | target_url="https://www.yunjie.art/rayvision/aigc/customer/task/progress",
24 | headers=self.headers,
25 | content=data
26 | )
27 |
28 | if isinstance(response, dict) and 'error' in response:
29 | raise RuntimeError(f"请求失败,错误信息: {response.get('details')}")
30 | else:
31 | resp_json = response
32 | if resp_json['code'] == "Account.Token.Expired":
33 | error_text = f"""
34 | 后端:{self.config.yunjie_setting['note'][self.count]} token过期。
35 | 请前往https://www.yunjie.art/ 登录重新获取token
36 | """
37 | self.logger.warning("token过期")
38 | raise RuntimeError(error_text)
39 | items = resp_json.get('data', {}).get('data', [])
40 | self.logger.info(f"第{i + 1}次心跳,未返回结果")
41 |
42 | if not items:
43 | continue
44 |
45 | for item in items:
46 | url = item.get("url")
47 |
48 | if url:
49 | self.logger.img(f"图片url: {url}")
50 | self.img_url.append(url)
51 | return
52 |
53 | raise RuntimeError(f"任务 {id_} 在60次心跳后仍未完成")
54 |
55 | async def update_progress(self):
56 | # 覆写函数
57 | pass
58 |
59 | async def check_backend_usability(self):
60 | pass
61 |
62 | async def err_formating_to_sd_style(self):
63 |
64 | await self.download_img()
65 |
66 | self.format_api_respond()
67 |
68 | self.result = self.build_respond
69 |
70 | async def posting(self):
71 |
72 | input_ = {
73 | "genModel": "advance",
74 | "initImage": "",
75 | "modelUuid": self.model_path or "MGC-17d172ee37c1b000",
76 | "samplingMethod":
77 | self.sampler,
78 | "cfgScale": self.scale,
79 | "samplingSteps": self.steps,
80 | "plugins": [],
81 | "clipSkip": 2,
82 | "etaNoiseSeedDelta": 31337,
83 | "prompt": self.prompt,
84 | "negativePrompt": self.negative_prompt,
85 | "resolutionX": self.width,
86 | "resolutionY": self.height,
87 | "genCount": self.total_img_count,
88 | "seed": self.seed,
89 | "prompt": []
90 | }
91 |
92 | if self.enable_hr:
93 |
94 | hr_payload = {
95 | "hires":
96 | {"hrSecondPassSteps": self.hr_second_pass_steps,
97 | "denoisingStrength": self.denoising_strength,
98 | "hrScale": self.hr_scale,
99 | "hrUpscaler": "R-ESRGAN 4x+"
100 | }
101 | }
102 |
103 | input_.update(hr_payload)
104 |
105 | new_headers = {
106 | "Token": self.backend_id
107 | }
108 | self.headers.update(new_headers)
109 | data = json.dumps(input_)
110 |
111 | response = await self.http_request(
112 | method="POST",
113 | target_url="https://www.yunjie.art/rayvision/aigc/customer/task/imageGen",
114 | headers=self.headers,
115 | content=data
116 | )
117 |
118 | if response.get("error", None):
119 | self.logger.error(f"请求失败,错误信息: {response.get('details')}")
120 | else:
121 | task = response
122 | task_id = task['data']['taskId']
123 | await self.heart_beat(task_id)
124 | await self.err_formating_to_sd_style()
125 |
126 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/base_config.py:
--------------------------------------------------------------------------------
1 | import yaml as yaml_
2 | import shutil
3 | import redis
4 | import json
5 | import logging
6 | import os
7 |
8 | from pydantic import BaseModel
9 | from typing import Dict, List
10 | from pathlib import Path
11 |
12 | from .locales import _
13 |
14 |
15 | redis_client = None
16 |
17 | api_current_dir = os.path.dirname(os.path.abspath(__file__))
18 |
19 |
20 | class CustomFormatter(logging.Formatter):
21 | def __init__(self, fmt=None, datefmt=None, style='%', prefix="[MAIN]"):
22 | super().__init__(fmt, datefmt, style)
23 | self.prefix = prefix
24 |
25 | def format(self, record):
26 | original_msg = record.msg
27 | record.msg = f"{self.prefix} {original_msg}"
28 | formatted_msg = super().format(record)
29 | record.msg = original_msg # 恢复原始消息
30 | return formatted_msg
31 |
32 |
33 | # 字典用于跟踪已创建的日志记录器
34 |
35 | empty_dict = {"token": None}
36 |
37 | import logging
38 |
39 |
40 | class CustomFormatter(logging.Formatter):
41 | """Custom formatter to add a fixed color for the prefix and variable colors for the log levels."""
42 |
43 | def __init__(self, prefix="", img_prefix="", *args, **kwargs):
44 | super().__init__(*args, **kwargs)
45 | self.prefix = f"\033[94m{prefix}\033[0m" # 固定蓝色前缀
46 | self.img_prefix = f"\033[93m{img_prefix}\033[0m" # 固定黄色前缀
47 | self.FORMATS = {
48 | logging.DEBUG: f"{self.prefix} \033[94m[DEBUG]\033[0m %(message)s",
49 | logging.INFO: f"{self.prefix} \033[92m[INFO]\033[0m %(message)s",
50 | logging.WARNING: f"{self.prefix} \033[93m[WARNING]\033[0m %(message)s",
51 | logging.ERROR: f"{self.prefix} \033[91m[ERROR]\033[0m %(message)s",
52 | logging.CRITICAL: f"{self.prefix} \033[95m[CRITICAL]\033[0m %(message)s",
53 | "IMG": f"{self.img_prefix} \033[93m[IMG]\033[0m %(message)s" # 黄色前缀的 IMG 日志
54 | }
55 |
56 | def format(self, record):
57 | log_fmt = self.FORMATS.get(record.levelno, self.FORMATS.get("IMG"))
58 | formatter = logging.Formatter(log_fmt)
59 | return formatter.format(record)
60 |
61 |
62 | class CustomLogger(logging.Logger):
63 | """Custom logger class to add an img method."""
64 |
65 | def __init__(self, name, level=logging.DEBUG):
66 | super().__init__(name, level)
67 | self.img_level = 25 # 自定义日志等级
68 | logging.addLevelName(self.img_level, "IMG")
69 |
70 | def img(self, msg, *args, **kwargs):
71 | if self.isEnabledFor(self.img_level):
72 | self._log(self.img_level, msg, args, **kwargs)
73 |
74 |
75 | loggers = {}
76 |
77 |
78 | def setup_logger(custom_prefix="[MAIN]"):
79 | # 检查是否已经存在具有相同前缀的 logger
80 | if custom_prefix in loggers:
81 | return loggers[custom_prefix]
82 |
83 | # 使用自定义的 Logger 类
84 | logger = CustomLogger(custom_prefix)
85 | logger.setLevel(logging.DEBUG)
86 |
87 | # 创建一个控制台处理器并设置日志级别为DEBUG
88 | console_handler = logging.StreamHandler()
89 | console_handler.setLevel(logging.DEBUG)
90 |
91 | # 创建一个文件处理器来保存所有日志到 log.txt
92 | file_handler = logging.FileHandler('log.log')
93 | file_handler.setLevel(logging.DEBUG)
94 |
95 | # 创建一个错误文件处理器来保存错误日志到 log_error.txt
96 | error_file_handler = logging.FileHandler('log_error.log')
97 | error_file_handler.setLevel(logging.ERROR)
98 |
99 | # 创建一个文件处理器来保存IMG日志到 log_img.log
100 | img_file_handler = logging.FileHandler('log_img.log')
101 | img_file_handler.setLevel(logger.img_level)
102 |
103 | # 创建格式器并将其添加到处理器
104 | formatter = CustomFormatter(prefix=custom_prefix, img_prefix=custom_prefix)
105 | console_handler.setFormatter(formatter)
106 | file_handler.setFormatter(formatter)
107 | error_file_handler.setFormatter(formatter)
108 | img_file_handler.setFormatter(formatter)
109 |
110 | # 将处理器添加到日志记录器
111 | logger.addHandler(console_handler)
112 | logger.addHandler(file_handler)
113 | logger.addHandler(error_file_handler)
114 | logger.addHandler(img_file_handler)
115 |
116 | # 将创建的 logger 存储在字典中
117 | loggers[custom_prefix] = logger
118 |
119 | return logger
120 |
121 |
122 | class Config(BaseModel):
123 |
124 | backend_name_list: list = []
125 |
126 | server_settings: dict = {}
127 | enable_backends: Dict[str, Dict[str, List[int | Dict]]] = {}
128 | backends: dict = {}
129 |
130 | retry_times: int = 3
131 | proxy: str = ''
132 |
133 | workload_dict: dict = {}
134 |
135 | base_workload_dict: dict = {
136 | "start_time": None,
137 | "end_time": None,
138 | "idle": True,
139 | "available": True,
140 | "fault": False
141 | }
142 |
143 | models_list: list = []
144 |
145 | name_url: dict = {}
146 |
147 |
148 | def package_import(copy_to_config_path):
149 | current_dir = os.path.dirname(os.path.abspath(__file__))
150 | source_template = Path(os.path.join(current_dir, "config_example.yaml")).resolve()
151 | shutil.copy(source_template, copy_to_config_path)
152 |
153 |
154 | class ConfigInit:
155 |
156 | def __init__(self):
157 | self.config = None
158 | self.config_file_path = None
159 | self.logger = setup_logger(custom_prefix="[INIT]")
160 | self.redis_client = None
161 |
162 | def load_config(self):
163 |
164 | with open(self.config_file_path, "r", encoding="utf-8") as f:
165 | yaml_config = yaml_.load(f, Loader=yaml_.FullLoader)
166 | config = Config(**yaml_config)
167 | self.logger.info(_('Loading config file completed'))
168 |
169 | return config
170 |
171 | def init(self, config_file_path):
172 |
173 | self.config_file_path = config_file_path
174 | config = self.load_config()
175 |
176 | welcome_txt = '''
177 | 欢迎使用
178 | _____ ____ _ _ _____ _____
179 | | __ \ | _ \ (_) | | /\ | __ \ |_ _|
180 | | | | | _ __ __ _ __ __ | |_) | _ __ _ __| | __ _ ___ / \ | |__) | | |
181 | | | | | | '__| / _` | \ \ /\ / / | _ < | '__| | | / _` | / _` | / _ \ / /\ \ | ___/ | |
182 | | |__| | | | | (_| | \ V V / | |_) | | | | | | (_| | | (_| | | __/ / ____ \ | | _| |_
183 | |_____/ |_| \__,_| \_/\_/ |____/ |_| |_| \__,_| \__, | \___| /_/ \_\ |_| |_____|
184 | __/ |
185 | |___/
186 | 关注雕雕, 关注雕雕喵
187 | 项目地址/Project Re: https://github.com/DiaoDaiaChan/Stable-Diffusion-DrawBridgeAPI
188 | '''
189 |
190 | print(welcome_txt)
191 |
192 | for backend_type, api in config.backends.items():
193 | if api:
194 | for name in api['name']:
195 | key = f"{backend_type}-{name}"
196 | config.workload_dict[key] = config.base_workload_dict
197 |
198 | models_dict = {}
199 | models_dict['is_loaded'] = False
200 | for back_name in list(config.workload_dict.keys()):
201 | models_dict[back_name] = config.models_list
202 |
203 | try:
204 | db_index = config.server_settings['redis_server'][3]
205 | except IndexError:
206 | db_index = 15
207 |
208 | self.redis_client = redis.Redis(
209 | host=config.server_settings['redis_server'][0],
210 | port=config.server_settings['redis_server'][1],
211 | password=config.server_settings['redis_server'][2],
212 | db=db_index
213 | )
214 |
215 | self.logger.info(_('Redis connection successful'))
216 |
217 | workload_json = json.dumps(config.workload_dict)
218 |
219 | rp = self.redis_client.pipeline()
220 | rp.set('workload', workload_json)
221 | rp.set('models', json.dumps(models_dict))
222 | rp.set('styles', json.dumps([]))
223 | rp.execute()
224 |
225 | self.config = config
226 |
227 |
228 | init_instance = ConfigInit()
229 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/comfyui_workflows/diaopony-hr.json:
--------------------------------------------------------------------------------
1 | {
2 | "4": {
3 | "inputs": {
4 | "ckpt_name": "models\\DiaoDaiaPony - 100 Artists - testing.safetensors"
5 | },
6 | "class_type": "CheckpointLoaderSimple",
7 | "_meta": {
8 | "title": "Load Checkpoint"
9 | }
10 | },
11 | "7": {
12 | "inputs": {
13 | "text": "score_3,poorly drawn,bad anatomy,bad proportions, watercolor painting, brush strokes,3d,2.5d,signature,watermark,bad face,distorted face,messed up eyes,deformed,(low quality, bad quality, worst quality:1.2),bad hand",
14 | "clip": [
15 | "4",
16 | 1
17 | ]
18 | },
19 | "class_type": "CLIPTextEncode",
20 | "_meta": {
21 | "title": "CLIP Text Encode (Negative Prompt)"
22 | }
23 | },
24 | "53": {
25 | "inputs": {
26 | "width": 768,
27 | "height": 1152,
28 | "batch_size": 1
29 | },
30 | "class_type": "EmptyLatentImage",
31 | "_meta": {
32 | "title": "Empty Latent Image"
33 | }
34 | },
35 | "79": {
36 | "inputs": {
37 | "seed": 657283391776279,
38 | "steps": 30,
39 | "cfg": 8,
40 | "sampler_name": "euler",
41 | "scheduler": "karras",
42 | "denoise": 1,
43 | "model": [
44 | "4",
45 | 0
46 | ],
47 | "positive": [
48 | "103",
49 | 0
50 | ],
51 | "negative": [
52 | "7",
53 | 0
54 | ],
55 | "latent_image": [
56 | "53",
57 | 0
58 | ]
59 | },
60 | "class_type": "KSampler",
61 | "_meta": {
62 | "title": "KSampler"
63 | }
64 | },
65 | "88": {
66 | "inputs": {
67 | "filename_prefix": "ComfyUI",
68 | "images": [
69 | "91",
70 | 0
71 | ]
72 | },
73 | "class_type": "SaveImage",
74 | "_meta": {
75 | "title": "Save Image"
76 | }
77 | },
78 | "91": {
79 | "inputs": {
80 | "upscale_by": 2,
81 | "seed": 291655160144038,
82 | "steps": 12,
83 | "cfg": 8,
84 | "sampler_name": "dpmpp_2m",
85 | "scheduler": "karras",
86 | "denoise": 0.2,
87 | "mode_type": "Linear",
88 | "tile_width": 1024,
89 | "tile_height": 1024,
90 | "mask_blur": 8,
91 | "tile_padding": 32,
92 | "seam_fix_mode": "None",
93 | "seam_fix_denoise": 1,
94 | "seam_fix_width": 64,
95 | "seam_fix_mask_blur": 8,
96 | "seam_fix_padding": 16,
97 | "force_uniform_tiles": true,
98 | "tiled_decode": false,
99 | "image": [
100 | "92",
101 | 0
102 | ],
103 | "model": [
104 | "4",
105 | 0
106 | ],
107 | "positive": [
108 | "103",
109 | 0
110 | ],
111 | "negative": [
112 | "7",
113 | 0
114 | ],
115 | "vae": [
116 | "4",
117 | 2
118 | ],
119 | "upscale_model": [
120 | "93",
121 | 0
122 | ]
123 | },
124 | "class_type": "UltimateSDUpscale",
125 | "_meta": {
126 | "title": "Ultimate SD Upscale"
127 | }
128 | },
129 | "92": {
130 | "inputs": {
131 | "samples": [
132 | "99",
133 | 0
134 | ],
135 | "vae": [
136 | "4",
137 | 2
138 | ]
139 | },
140 | "class_type": "VAEDecode",
141 | "_meta": {
142 | "title": "VAE Decode"
143 | }
144 | },
145 | "93": {
146 | "inputs": {
147 | "model_name": "4x-UltraSharp.pth"
148 | },
149 | "class_type": "UpscaleModelLoader",
150 | "_meta": {
151 | "title": "Load Upscale Model"
152 | }
153 | },
154 | "98": {
155 | "inputs": {
156 | "upscale_method": "nearest-exact",
157 | "width": 1152,
158 | "height": 1536,
159 | "crop": "disabled",
160 | "samples": [
161 | "79",
162 | 0
163 | ]
164 | },
165 | "class_type": "LatentUpscale",
166 | "_meta": {
167 | "title": "Upscale Latent"
168 | }
169 | },
170 | "99": {
171 | "inputs": {
172 | "seed": 641400482051274,
173 | "steps": 20,
174 | "cfg": 8,
175 | "sampler_name": "euler",
176 | "scheduler": "normal",
177 | "denoise": 1,
178 | "model": [
179 | "4",
180 | 0
181 | ],
182 | "positive": [
183 | "103",
184 | 0
185 | ],
186 | "negative": [
187 | "7",
188 | 0
189 | ],
190 | "latent_image": [
191 | "98",
192 | 0
193 | ]
194 | },
195 | "class_type": "KSampler",
196 | "_meta": {
197 | "title": "KSampler"
198 | }
199 | },
200 | "103": {
201 | "inputs": {
202 | "text": ",(qianqianjie:1.1),(shinyo yukino:1),roku 6,(miyu (miy u1308):1.1),momoko (momopoco), score_9,score_8_up,score_7_up,score_anime,amazing quality,very aesthetic,absurdres,",
203 | "clip": [
204 | "4",
205 | 1
206 | ]
207 | },
208 | "class_type": "CLIPTextEncode",
209 | "_meta": {
210 | "title": "CLIP Text Encode (Prompt)"
211 | }
212 | }
213 | }
--------------------------------------------------------------------------------
/DrawBridgeAPI/comfyui_workflows/diaopony-hr_reflex.json:
--------------------------------------------------------------------------------
1 | {
2 | "prompt": {"103": {"override": {"text": "append_prompt"}}},
3 | "negative_prompt": {"7": {"override": {"text": "append_negative_prompt"}}},
4 | "sampler": ["79", "99"],
5 | "image_size": {"53": {}, "98": {"override": {"width": "upscale", "height": "upscale"}}},
6 | "output": 88
7 | }
--------------------------------------------------------------------------------
/DrawBridgeAPI/comfyui_workflows/diaopony-tipo.json:
--------------------------------------------------------------------------------
1 | {
2 | "4": {
3 | "inputs": {
4 | "ckpt_name": "models\\DiaoDaiaPony - 100 Artists - testing.safetensors"
5 | },
6 | "class_type": "CheckpointLoaderSimple",
7 | "_meta": {
8 | "title": "Load Checkpoint"
9 | }
10 | },
11 | "6": {
12 | "inputs": {
13 | "text": [
14 | "50",
15 | 0
16 | ],
17 | "clip": [
18 | "4",
19 | 1
20 | ]
21 | },
22 | "class_type": "CLIPTextEncode",
23 | "_meta": {
24 | "title": "CLIP Text Encode (TIPO Prompt)"
25 | }
26 | },
27 | "7": {
28 | "inputs": {
29 | "text": "score_3,poorly drawn,bad anatomy,bad proportions, watercolor painting, brush strokes,3d,2.5d,signature,watermark,bad face,distorted face,messed up eyes,deformed,(low quality, bad quality, worst quality:1.2),bad hand",
30 | "clip": [
31 | "4",
32 | 1
33 | ]
34 | },
35 | "class_type": "CLIPTextEncode",
36 | "_meta": {
37 | "title": "CLIP Text Encode (Negative Prompt)"
38 | }
39 | },
40 | "8": {
41 | "inputs": {
42 | "samples": [
43 | "52",
44 | 0
45 | ],
46 | "vae": [
47 | "4",
48 | 2
49 | ]
50 | },
51 | "class_type": "VAEDecode",
52 | "_meta": {
53 | "title": "VAE Decode"
54 | }
55 | },
56 | "50": {
57 | "inputs": {
58 | "tags": "\n\nscore_9,score_8_up,score_7_up,score_anime,amazing quality,very aesthetic,absurdres",
59 | "nl_prompt": "An illustration of",
60 | "ban_tags": "text, censor, speech, say, illustrations, doll",
61 | "tipo_model": "KBlueLeaf/TIPO-500M",
62 | "format": "<|special|>, \n<|characters|>, <|copyrights|>, \n<|artist|>, \n\n<|general|>,\n\n<|extended|>.\n\n<|quality|>, <|meta|>, <|rating|>",
63 | "width": 1024,
64 | "height": 1024,
65 | "temperature": 0.5,
66 | "top_p": 0.95,
67 | "min_p": 0.05,
68 | "top_k": 80,
69 | "tag_length": "long",
70 | "nl_length": "long",
71 | "seed": 1763
72 | },
73 | "class_type": "TIPO",
74 | "_meta": {
75 | "title": "TIPO"
76 | }
77 | },
78 | "52": {
79 | "inputs": {
80 | "seed": 11451,
81 | "steps": 20,
82 | "cfg": 8,
83 | "sampler_name": "euler",
84 | "scheduler": "normal",
85 | "denoise": 1,
86 | "model": [
87 | "4",
88 | 0
89 | ],
90 | "positive": [
91 | "6",
92 | 0
93 | ],
94 | "negative": [
95 | "7",
96 | 0
97 | ],
98 | "latent_image": [
99 | "53",
100 | 0
101 | ]
102 | },
103 | "class_type": "KSampler",
104 | "_meta": {
105 | "title": "KSampler"
106 | }
107 | },
108 | "53": {
109 | "inputs": {
110 | "width": 1152,
111 | "height": 1536,
112 | "batch_size": 1
113 | },
114 | "class_type": "EmptyLatentImage",
115 | "_meta": {
116 | "title": "Empty Latent Image"
117 | }
118 | },
119 | "72": {
120 | "inputs": {
121 | "filename_prefix": "ComfyUI",
122 | "images": [
123 | "8",
124 | 0
125 | ]
126 | },
127 | "class_type": "SaveImage",
128 | "_meta": {
129 | "title": "Save Image"
130 | }
131 | }
132 | }
--------------------------------------------------------------------------------
/DrawBridgeAPI/comfyui_workflows/diaopony-tipo_reflex.json:
--------------------------------------------------------------------------------
1 | {
2 | "tipo": {"50": {"override": {"tags": "append_prompt"}}},
3 | "sampler": 52,
4 | "image_size": 53,
5 | "output": 72
6 | }
--------------------------------------------------------------------------------
/DrawBridgeAPI/comfyui_workflows/flux-dev.json:
--------------------------------------------------------------------------------
1 | {
2 | "1": {
3 | "inputs": {
4 | "ckpt_name": "models\\flux1-dev-bnb-nf4-v2.safetensors"
5 | },
6 | "class_type": "CheckpointLoaderNF4",
7 | "_meta": {
8 | "title": "CheckpointLoaderNF4"
9 | }
10 | },
11 | "2": {
12 | "inputs": {
13 | "text": "a tank",
14 | "clip": [
15 | "1",
16 | 1
17 | ]
18 | },
19 | "class_type": "CLIPTextEncode",
20 | "_meta": {
21 | "title": "CLIP Text Encode (Prompt)"
22 | }
23 | },
24 | "3": {
25 | "inputs": {
26 | "seed": 861133332082627,
27 | "steps": 20,
28 | "cfg": 1,
29 | "sampler_name": "euler",
30 | "scheduler": "simple",
31 | "denoise": 1,
32 | "model": [
33 | "1",
34 | 0
35 | ],
36 | "positive": [
37 | "2",
38 | 0
39 | ],
40 | "negative": [
41 | "2",
42 | 0
43 | ],
44 | "latent_image": [
45 | "4",
46 | 0
47 | ]
48 | },
49 | "class_type": "KSampler",
50 | "_meta": {
51 | "title": "KSampler"
52 | }
53 | },
54 | "4": {
55 | "inputs": {
56 | "width": 512,
57 | "height": 768,
58 | "batch_size": 1
59 | },
60 | "class_type": "EmptyLatentImage",
61 | "_meta": {
62 | "title": "Empty Latent Image"
63 | }
64 | },
65 | "5": {
66 | "inputs": {
67 | "samples": [
68 | "3",
69 | 0
70 | ],
71 | "vae": [
72 | "1",
73 | 2
74 | ]
75 | },
76 | "class_type": "VAEDecode",
77 | "_meta": {
78 | "title": "VAE Decode"
79 | }
80 | },
81 | "6": {
82 | "inputs": {
83 | "filename_prefix": "ComfyUI",
84 | "images": [
85 | "5",
86 | 0
87 | ]
88 | },
89 | "class_type": "SaveImage",
90 | "_meta": {
91 | "title": "Save Image"
92 | }
93 | }
94 | }
--------------------------------------------------------------------------------
/DrawBridgeAPI/comfyui_workflows/flux-dev_reflex.json:
--------------------------------------------------------------------------------
1 | {
2 | "prompt": 2,
3 | "image_size": 4,
4 | "output": 6,
5 | "seed": 3
6 | }
--------------------------------------------------------------------------------
/DrawBridgeAPI/comfyui_workflows/flux-schnell.json:
--------------------------------------------------------------------------------
1 | {
2 | "1": {
3 | "inputs": {
4 | "ckpt_name": "models\\flux1-schnell-bnb-nf4.safetensors"
5 | },
6 | "class_type": "CheckpointLoaderNF4",
7 | "_meta": {
8 | "title": "CheckpointLoaderNF4"
9 | }
10 | },
11 | "2": {
12 | "inputs": {
13 | "text": "a tank",
14 | "clip": [
15 | "1",
16 | 1
17 | ]
18 | },
19 | "class_type": "CLIPTextEncode",
20 | "_meta": {
21 | "title": "CLIP Text Encode (Prompt)"
22 | }
23 | },
24 | "3": {
25 | "inputs": {
26 | "seed": 0,
27 | "steps": 4,
28 | "cfg": 1,
29 | "sampler_name": "euler",
30 | "scheduler": "simple",
31 | "denoise": 1,
32 | "model": [
33 | "1",
34 | 0
35 | ],
36 | "positive": [
37 | "2",
38 | 0
39 | ],
40 | "negative": [
41 | "2",
42 | 0
43 | ],
44 | "latent_image": [
45 | "4",
46 | 0
47 | ]
48 | },
49 | "class_type": "KSampler",
50 | "_meta": {
51 | "title": "KSampler"
52 | }
53 | },
54 | "4": {
55 | "inputs": {
56 | "width": 512,
57 | "height": 768,
58 | "batch_size": 1
59 | },
60 | "class_type": "EmptyLatentImage",
61 | "_meta": {
62 | "title": "Empty Latent Image"
63 | }
64 | },
65 | "5": {
66 | "inputs": {
67 | "samples": [
68 | "3",
69 | 0
70 | ],
71 | "vae": [
72 | "1",
73 | 2
74 | ]
75 | },
76 | "class_type": "VAEDecode",
77 | "_meta": {
78 | "title": "VAE Decode"
79 | }
80 | },
81 | "6": {
82 | "inputs": {
83 | "filename_prefix": "ComfyUI",
84 | "images": [
85 | "5",
86 | 0
87 | ]
88 | },
89 | "class_type": "SaveImage",
90 | "_meta": {
91 | "title": "Save Image"
92 | }
93 | }
94 | }
--------------------------------------------------------------------------------
/DrawBridgeAPI/comfyui_workflows/flux-schnell_reflex.json:
--------------------------------------------------------------------------------
1 | {
2 | "prompt": 2,
3 | "image_size": 4,
4 | "output": 6,
5 | "seed": 3
6 | }
--------------------------------------------------------------------------------
/DrawBridgeAPI/comfyui_workflows/flux修手.json:
--------------------------------------------------------------------------------
1 | {
2 | "1": {
3 | "inputs": {
4 | "context_expand_pixels": 100,
5 | "context_expand_factor": 1,
6 | "fill_mask_holes": true,
7 | "blur_mask_pixels": 16,
8 | "invert_mask": false,
9 | "blend_pixels": 16,
10 | "rescale_algorithm": "bicubic",
11 | "mode": "ranged size",
12 | "force_width": 1024,
13 | "force_height": 1024,
14 | "rescale_factor": 1,
15 | "min_width": 512,
16 | "min_height": 512,
17 | "max_width": 768,
18 | "max_height": 768,
19 | "padding": 32,
20 | "image": [
21 | "47",
22 | 0
23 | ],
24 | "mask": [
25 | "50",
26 | 0
27 | ]
28 | },
29 | "class_type": "InpaintCrop",
30 | "_meta": {
31 | "title": "✂️ Inpaint Crop"
32 | }
33 | },
34 | "2": {
35 | "inputs": {
36 | "rescale_algorithm": "bislerp",
37 | "stitch": [
38 | "1",
39 | 0
40 | ],
41 | "inpainted_image": [
42 | "15",
43 | 0
44 | ]
45 | },
46 | "class_type": "InpaintStitch",
47 | "_meta": {
48 | "title": "✂️ Inpaint Stitch"
49 | }
50 | },
51 | "3": {
52 | "inputs": {
53 | "image": "a87ed50d8e69b8bfb62df848bac69d12.png",
54 | "upload": "image"
55 | },
56 | "class_type": "LoadImage",
57 | "_meta": {
58 | "title": "Load Image"
59 | }
60 | },
61 | "15": {
62 | "inputs": {
63 | "samples": [
64 | "100",
65 | 0
66 | ],
67 | "vae": [
68 | "99",
69 | 2
70 | ]
71 | },
72 | "class_type": "VAEDecode",
73 | "_meta": {
74 | "title": "VAE Decode"
75 | }
76 | },
77 | "19": {
78 | "inputs": {
79 | "positive": [
80 | "32",
81 | 0
82 | ],
83 | "negative": [
84 | "32",
85 | 0
86 | ],
87 | "vae": [
88 | "99",
89 | 2
90 | ],
91 | "pixels": [
92 | "1",
93 | 1
94 | ],
95 | "mask": [
96 | "1",
97 | 2
98 | ]
99 | },
100 | "class_type": "InpaintModelConditioning",
101 | "_meta": {
102 | "title": "InpaintModelConditioning"
103 | }
104 | },
105 | "25": {
106 | "inputs": {
107 | "rescale_algorithm": "bicubic",
108 | "mode": "ensure minimum size",
109 | "min_width": 0,
110 | "min_height": 1536,
111 | "rescale_factor": 1,
112 | "image": [
113 | "26",
114 | 0
115 | ],
116 | "mask": [
117 | "26",
118 | 1
119 | ]
120 | },
121 | "class_type": "InpaintResize",
122 | "_meta": {
123 | "title": "✂️ Resize Image Before Inpainting"
124 | }
125 | },
126 | "26": {
127 | "inputs": {
128 | "sam_model": "sam_vit_h (2.56GB)",
129 | "grounding_dino_model": "GroundingDINO_SwinB (938MB)",
130 | "threshold": 0.3,
131 | "detail_method": "VITMatte",
132 | "detail_erode": 6,
133 | "detail_dilate": 6,
134 | "black_point": 0.15,
135 | "white_point": 0.99,
136 | "process_detail": false,
137 | "prompt": "hand",
138 | "device": "cuda",
139 | "max_megapixels": 2,
140 | "cache_model": false,
141 | "image": [
142 | "3",
143 | 0
144 | ]
145 | },
146 | "class_type": "LayerMask: SegmentAnythingUltra V2",
147 | "_meta": {
148 | "title": "LayerMask: SegmentAnythingUltra V2"
149 | }
150 | },
151 | "32": {
152 | "inputs": {
153 | "text": "Masterpiece, High Definition, Real Person Portrait, 5 Fingers, Girl's Hand",
154 | "clip": [
155 | "99",
156 | 1
157 | ]
158 | },
159 | "class_type": "CLIPTextEncode",
160 | "_meta": {
161 | "title": "CLIP Text Encode (Prompt)"
162 | }
163 | },
164 | "47": {
165 | "inputs": {
166 | "fill_background": false,
167 | "background_color": "#000000",
168 | "RGBA_image": [
169 | "25",
170 | 0
171 | ],
172 | "mask": [
173 | "25",
174 | 1
175 | ]
176 | },
177 | "class_type": "LayerUtility: ImageRemoveAlpha",
178 | "_meta": {
179 | "title": "LayerUtility: ImageRemoveAlpha"
180 | }
181 | },
182 | "50": {
183 | "inputs": {
184 | "expand": 30,
185 | "incremental_expandrate": 0.1,
186 | "tapered_corners": false,
187 | "flip_input": false,
188 | "blur_radius": 10,
189 | "lerp_alpha": 1,
190 | "decay_factor": 1,
191 | "fill_holes": false,
192 | "mask": [
193 | "25",
194 | 1
195 | ]
196 | },
197 | "class_type": "GrowMaskWithBlur",
198 | "_meta": {
199 | "title": "Grow Mask With Blur"
200 | }
201 | },
202 | "94": {
203 | "inputs": {
204 | "filename_prefix": "hand_fix",
205 | "images": [
206 | "2",
207 | 0
208 | ]
209 | },
210 | "class_type": "SaveImage",
211 | "_meta": {
212 | "title": "Save Image"
213 | }
214 | },
215 | "99": {
216 | "inputs": {
217 | "ckpt_name": "models\\flux1-dev-bnb-nf4-v2.safetensors"
218 | },
219 | "class_type": "CheckpointLoaderNF4",
220 | "_meta": {
221 | "title": "CheckpointLoaderNF4"
222 | }
223 | },
224 | "100": {
225 | "inputs": {
226 | "seed": 266696528873091,
227 | "steps": 20,
228 | "cfg": 1,
229 | "sampler_name": "euler",
230 | "scheduler": "simple",
231 | "denoise": 0.5,
232 | "model": [
233 | "99",
234 | 0
235 | ],
236 | "positive": [
237 | "19",
238 | 0
239 | ],
240 | "negative": [
241 | "19",
242 | 1
243 | ],
244 | "latent_image": [
245 | "19",
246 | 2
247 | ]
248 | },
249 | "class_type": "KSampler",
250 | "_meta": {
251 | "title": "KSampler"
252 | }
253 | }
254 | }
--------------------------------------------------------------------------------
/DrawBridgeAPI/comfyui_workflows/flux修手_reflex.json:
--------------------------------------------------------------------------------
1 | {
2 | "prompt": 32,
3 | "output": 94,
4 | "load_image":3
5 | }
--------------------------------------------------------------------------------
/DrawBridgeAPI/comfyui_workflows/sd3.5_txt2img.json:
--------------------------------------------------------------------------------
1 | {
2 | "4": {
3 | "inputs": {
4 | "ckpt_name": "models\\sd3.5_large.safetensors"
5 | },
6 | "class_type": "CheckpointLoaderSimple",
7 | "_meta": {
8 | "title": "Load Checkpoint"
9 | }
10 | },
11 | "6": {
12 | "inputs": {
13 | "text": "beautiful scenery nature glass bottle landscape, purple galaxy bottle,",
14 | "clip": [
15 | "11",
16 | 0
17 | ]
18 | },
19 | "class_type": "CLIPTextEncode",
20 | "_meta": {
21 | "title": "CLIP Text Encode (Prompt)"
22 | }
23 | },
24 | "8": {
25 | "inputs": {
26 | "samples": [
27 | "294",
28 | 0
29 | ],
30 | "vae": [
31 | "4",
32 | 2
33 | ]
34 | },
35 | "class_type": "VAEDecode",
36 | "_meta": {
37 | "title": "VAE Decode"
38 | }
39 | },
40 | "11": {
41 | "inputs": {
42 | "clip_name1": "clip_g.pth",
43 | "clip_name2": "clip_l.safetensors",
44 | "clip_name3": "t5xxl_fp16.safetensors"
45 | },
46 | "class_type": "TripleCLIPLoader",
47 | "_meta": {
48 | "title": "TripleCLIPLoader"
49 | }
50 | },
51 | "13": {
52 | "inputs": {
53 | "shift": 3,
54 | "model": [
55 | "4",
56 | 0
57 | ]
58 | },
59 | "class_type": "ModelSamplingSD3",
60 | "_meta": {
61 | "title": "ModelSamplingSD3"
62 | }
63 | },
64 | "67": {
65 | "inputs": {
66 | "conditioning": [
67 | "71",
68 | 0
69 | ]
70 | },
71 | "class_type": "ConditioningZeroOut",
72 | "_meta": {
73 | "title": "ConditioningZeroOut"
74 | }
75 | },
76 | "68": {
77 | "inputs": {
78 | "start": 0.1,
79 | "end": 1,
80 | "conditioning": [
81 | "67",
82 | 0
83 | ]
84 | },
85 | "class_type": "ConditioningSetTimestepRange",
86 | "_meta": {
87 | "title": "ConditioningSetTimestepRange"
88 | }
89 | },
90 | "69": {
91 | "inputs": {
92 | "conditioning_1": [
93 | "68",
94 | 0
95 | ],
96 | "conditioning_2": [
97 | "70",
98 | 0
99 | ]
100 | },
101 | "class_type": "ConditioningCombine",
102 | "_meta": {
103 | "title": "Conditioning (Combine)"
104 | }
105 | },
106 | "70": {
107 | "inputs": {
108 | "start": 0,
109 | "end": 0.1,
110 | "conditioning": [
111 | "71",
112 | 0
113 | ]
114 | },
115 | "class_type": "ConditioningSetTimestepRange",
116 | "_meta": {
117 | "title": "ConditioningSetTimestepRange"
118 | }
119 | },
120 | "71": {
121 | "inputs": {
122 | "text": "",
123 | "clip": [
124 | "11",
125 | 0
126 | ]
127 | },
128 | "class_type": "CLIPTextEncode",
129 | "_meta": {
130 | "title": "CLIP Text Encode (Prompt)"
131 | }
132 | },
133 | "135": {
134 | "inputs": {
135 | "width": 1024,
136 | "height": 1024,
137 | "batch_size": 1
138 | },
139 | "class_type": "EmptySD3LatentImage",
140 | "_meta": {
141 | "title": "EmptySD3LatentImage"
142 | }
143 | },
144 | "294": {
145 | "inputs": {
146 | "seed": 143084108695924,
147 | "steps": 20,
148 | "cfg": 4.5,
149 | "sampler_name": "dpmpp_2m",
150 | "scheduler": "sgm_uniform",
151 | "denoise": 1,
152 | "model": [
153 | "13",
154 | 0
155 | ],
156 | "positive": [
157 | "6",
158 | 0
159 | ],
160 | "negative": [
161 | "69",
162 | 0
163 | ],
164 | "latent_image": [
165 | "135",
166 | 0
167 | ]
168 | },
169 | "class_type": "KSampler",
170 | "_meta": {
171 | "title": "KSampler"
172 | }
173 | },
174 | "302": {
175 | "inputs": {
176 | "filename_prefix": "ComfyUI",
177 | "images": [
178 | "8",
179 | 0
180 | ]
181 | },
182 | "class_type": "SaveImage",
183 | "_meta": {
184 | "title": "Save Image"
185 | }
186 | }
187 | }
--------------------------------------------------------------------------------
/DrawBridgeAPI/comfyui_workflows/sd3.5_txt2img_reflex.json:
--------------------------------------------------------------------------------
1 | {
2 | "prompt": 6,
3 | "negative_prompt": 71,
4 | "image_size": 135,
5 | "output": 302,
6 | "seed": 294
7 | }
--------------------------------------------------------------------------------
/DrawBridgeAPI/comfyui_workflows/sdbase_img2img.json:
--------------------------------------------------------------------------------
1 | {
2 | "3": {
3 | "inputs": {
4 | "seed": 280823642470253,
5 | "steps": 20,
6 | "cfg": 8,
7 | "sampler_name": "dpmpp_2m",
8 | "scheduler": "normal",
9 | "denoise": 0.8700000000000001,
10 | "model": [
11 | "14",
12 | 0
13 | ],
14 | "positive": [
15 | "6",
16 | 0
17 | ],
18 | "negative": [
19 | "7",
20 | 0
21 | ],
22 | "latent_image": [
23 | "12",
24 | 0
25 | ]
26 | },
27 | "class_type": "KSampler",
28 | "_meta": {
29 | "title": "KSampler"
30 | }
31 | },
32 | "6": {
33 | "inputs": {
34 | "text": "photograph of victorian woman with wings, sky clouds, meadow grass\n",
35 | "clip": [
36 | "14",
37 | 1
38 | ]
39 | },
40 | "class_type": "CLIPTextEncode",
41 | "_meta": {
42 | "title": "CLIP Text Encode (Prompt)"
43 | }
44 | },
45 | "7": {
46 | "inputs": {
47 | "text": "watermark, text\n",
48 | "clip": [
49 | "14",
50 | 1
51 | ]
52 | },
53 | "class_type": "CLIPTextEncode",
54 | "_meta": {
55 | "title": "CLIP Text Encode (Prompt)"
56 | }
57 | },
58 | "8": {
59 | "inputs": {
60 | "samples": [
61 | "3",
62 | 0
63 | ],
64 | "vae": [
65 | "14",
66 | 2
67 | ]
68 | },
69 | "class_type": "VAEDecode",
70 | "_meta": {
71 | "title": "VAE Decode"
72 | }
73 | },
74 | "9": {
75 | "inputs": {
76 | "filename_prefix": "ComfyUI",
77 | "images": [
78 | "8",
79 | 0
80 | ]
81 | },
82 | "class_type": "SaveImage",
83 | "_meta": {
84 | "title": "Save Image"
85 | }
86 | },
87 | "10": {
88 | "inputs": {
89 | "image": "example.png",
90 | "upload": "image"
91 | },
92 | "class_type": "LoadImage",
93 | "_meta": {
94 | "title": "Load Image"
95 | }
96 | },
97 | "12": {
98 | "inputs": {
99 | "pixels": [
100 | "10",
101 | 0
102 | ],
103 | "vae": [
104 | "14",
105 | 2
106 | ]
107 | },
108 | "class_type": "VAEEncode",
109 | "_meta": {
110 | "title": "VAE Encode"
111 | }
112 | },
113 | "14": {
114 | "inputs": {
115 | "ckpt_name": "v1-5-pruned-emaonly.ckpt"
116 | },
117 | "class_type": "CheckpointLoaderSimple",
118 | "_meta": {
119 | "title": "Load Checkpoint"
120 | }
121 | }
122 | }
--------------------------------------------------------------------------------
/DrawBridgeAPI/comfyui_workflows/sdbase_img2img_reflex.json:
--------------------------------------------------------------------------------
1 | {
2 | "sampler": 3,
3 | "prompt": 6,
4 | "image_size": 5,
5 | "negative_prompt": 7,
6 | "checkpoint": 14,
7 | "output": 9,
8 | "load_image":10
9 | }
--------------------------------------------------------------------------------
/DrawBridgeAPI/comfyui_workflows/sdbase_txt2img.json:
--------------------------------------------------------------------------------
1 | {
2 | "3": {
3 | "inputs": {
4 | "seed": 567570346829551,
5 | "steps": 20,
6 | "cfg": 8,
7 | "sampler_name": "euler",
8 | "scheduler": "normal",
9 | "denoise": 1,
10 | "model": [
11 | "4",
12 | 0
13 | ],
14 | "positive": [
15 | "6",
16 | 0
17 | ],
18 | "negative": [
19 | "7",
20 | 0
21 | ],
22 | "latent_image": [
23 | "5",
24 | 0
25 | ]
26 | },
27 | "class_type": "KSampler",
28 | "_meta": {
29 | "title": "KSampler"
30 | }
31 | },
32 | "4": {
33 | "inputs": {
34 | "ckpt_name": "models\\DiaoDaia_mix_4.5.ckpt"
35 | },
36 | "class_type": "CheckpointLoaderSimple",
37 | "_meta": {
38 | "title": "Load Checkpoint"
39 | }
40 | },
41 | "5": {
42 | "inputs": {
43 | "width": 512,
44 | "height": 512,
45 | "batch_size": 1
46 | },
47 | "class_type": "EmptyLatentImage",
48 | "_meta": {
49 | "title": "Empty Latent Image"
50 | }
51 | },
52 | "6": {
53 | "inputs": {
54 | "text": "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,",
55 | "clip": [
56 | "4",
57 | 1
58 | ]
59 | },
60 | "class_type": "CLIPTextEncode",
61 | "_meta": {
62 | "title": "CLIP Text Encode (Prompt)"
63 | }
64 | },
65 | "7": {
66 | "inputs": {
67 | "text": "text, watermark",
68 | "clip": [
69 | "4",
70 | 1
71 | ]
72 | },
73 | "class_type": "CLIPTextEncode",
74 | "_meta": {
75 | "title": "CLIP Text Encode (Prompt)"
76 | }
77 | },
78 | "8": {
79 | "inputs": {
80 | "samples": [
81 | "3",
82 | 0
83 | ],
84 | "vae": [
85 | "4",
86 | 2
87 | ]
88 | },
89 | "class_type": "VAEDecode",
90 | "_meta": {
91 | "title": "VAE Decode"
92 | }
93 | },
94 | "9": {
95 | "inputs": {
96 | "filename_prefix": "ComfyUI",
97 | "images": [
98 | "8",
99 | 0
100 | ]
101 | },
102 | "class_type": "SaveImage",
103 | "_meta": {
104 | "title": "Save Image"
105 | }
106 | }
107 | }
--------------------------------------------------------------------------------
/DrawBridgeAPI/comfyui_workflows/sdbase_txt2img_hr_fix.json:
--------------------------------------------------------------------------------
1 | {
2 | "3": {
3 | "inputs": {
4 | "seed": 213416933995644,
5 | "steps": 20,
6 | "cfg": 8,
7 | "sampler_name": "euler_ancestral",
8 | "scheduler": "normal",
9 | "denoise": 1,
10 | "model": [
11 | "4",
12 | 0
13 | ],
14 | "positive": [
15 | "6",
16 | 0
17 | ],
18 | "negative": [
19 | "7",
20 | 0
21 | ],
22 | "latent_image": [
23 | "5",
24 | 0
25 | ]
26 | },
27 | "class_type": "KSampler",
28 | "_meta": {
29 | "title": "KSampler"
30 | }
31 | },
32 | "4": {
33 | "inputs": {
34 | "ckpt_name": "models\\1053-S.ckpt"
35 | },
36 | "class_type": "CheckpointLoaderSimple",
37 | "_meta": {
38 | "title": "Load Checkpoint"
39 | }
40 | },
41 | "5": {
42 | "inputs": {
43 | "width": 768,
44 | "height": 512,
45 | "batch_size": 1
46 | },
47 | "class_type": "EmptyLatentImage",
48 | "_meta": {
49 | "title": "Empty Latent Image"
50 | }
51 | },
52 | "6": {
53 | "inputs": {
54 | "text": "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,",
55 | "clip": [
56 | "4",
57 | 1
58 | ]
59 | },
60 | "class_type": "CLIPTextEncode",
61 | "_meta": {
62 | "title": "CLIP Text Encode (Prompt)"
63 | }
64 | },
65 | "7": {
66 | "inputs": {
67 | "text": "text, watermark",
68 | "clip": [
69 | "4",
70 | 1
71 | ]
72 | },
73 | "class_type": "CLIPTextEncode",
74 | "_meta": {
75 | "title": "CLIP Text Encode (Prompt)"
76 | }
77 | },
78 | "8": {
79 | "inputs": {
80 | "samples": [
81 | "3",
82 | 0
83 | ],
84 | "vae": [
85 | "4",
86 | 2
87 | ]
88 | },
89 | "class_type": "VAEDecode",
90 | "_meta": {
91 | "title": "VAE Decode"
92 | }
93 | },
94 | "9": {
95 | "inputs": {
96 | "filename_prefix": "ComfyUI",
97 | "images": [
98 | "18",
99 | 0
100 | ]
101 | },
102 | "class_type": "SaveImage",
103 | "_meta": {
104 | "title": "Save Image"
105 | }
106 | },
107 | "10": {
108 | "inputs": {
109 | "upscale_method": "nearest-exact",
110 | "width": 1536,
111 | "height": 1152,
112 | "crop": "disabled",
113 | "samples": [
114 | "16",
115 | 0
116 | ]
117 | },
118 | "class_type": "LatentUpscale",
119 | "_meta": {
120 | "title": "Upscale Latent"
121 | }
122 | },
123 | "12": {
124 | "inputs": {
125 | "model_name": "RealESRGAN_x4plus.pth"
126 | },
127 | "class_type": "UpscaleModelLoader",
128 | "_meta": {
129 | "title": "Load Upscale Model"
130 | }
131 | },
132 | "14": {
133 | "inputs": {
134 | "upscale_model": [
135 | "12",
136 | 0
137 | ],
138 | "image": [
139 | "8",
140 | 0
141 | ]
142 | },
143 | "class_type": "ImageUpscaleWithModel",
144 | "_meta": {
145 | "title": "Upscale Image (using Model)"
146 | }
147 | },
148 | "15": {
149 | "inputs": {
150 | "upscale_method": "area",
151 | "width": 1152,
152 | "height": 768,
153 | "crop": "disabled",
154 | "image": [
155 | "14",
156 | 0
157 | ]
158 | },
159 | "class_type": "ImageScale",
160 | "_meta": {
161 | "title": "Upscale Image"
162 | }
163 | },
164 | "16": {
165 | "inputs": {
166 | "pixels": [
167 | "15",
168 | 0
169 | ],
170 | "vae": [
171 | "4",
172 | 2
173 | ]
174 | },
175 | "class_type": "VAEEncode",
176 | "_meta": {
177 | "title": "VAE Encode"
178 | }
179 | },
180 | "18": {
181 | "inputs": {
182 | "samples": [
183 | "19",
184 | 0
185 | ],
186 | "vae": [
187 | "4",
188 | 2
189 | ]
190 | },
191 | "class_type": "VAEDecode",
192 | "_meta": {
193 | "title": "VAE Decode"
194 | }
195 | },
196 | "19": {
197 | "inputs": {
198 | "seed": 1069147258069384,
199 | "steps": 8,
200 | "cfg": 8,
201 | "sampler_name": "euler",
202 | "scheduler": "sgm_uniform",
203 | "denoise": 0.6,
204 | "model": [
205 | "4",
206 | 0
207 | ],
208 | "positive": [
209 | "21",
210 | 0
211 | ],
212 | "negative": [
213 | "22",
214 | 0
215 | ],
216 | "latent_image": [
217 | "10",
218 | 0
219 | ]
220 | },
221 | "class_type": "KSampler",
222 | "_meta": {
223 | "title": "KSampler"
224 | }
225 | },
226 | "20": {
227 | "inputs": {
228 | "seed": 85387134314530,
229 | "steps": 20,
230 | "cfg": 5.74,
231 | "sampler_name": "dpm_2",
232 | "scheduler": "normal",
233 | "denoise": 1
234 | },
235 | "class_type": "KSampler",
236 | "_meta": {
237 | "title": "KSampler"
238 | }
239 | },
240 | "21": {
241 | "inputs": {
242 | "text": "",
243 | "clip": [
244 | "4",
245 | 1
246 | ]
247 | },
248 | "class_type": "CLIPTextEncode",
249 | "_meta": {
250 | "title": "CLIP Text Encode (Prompt)"
251 | }
252 | },
253 | "22": {
254 | "inputs": {
255 | "text": "",
256 | "clip": [
257 | "4",
258 | 1
259 | ]
260 | },
261 | "class_type": "CLIPTextEncode",
262 | "_meta": {
263 | "title": "CLIP Text Encode (Prompt)"
264 | }
265 | }
266 | }
--------------------------------------------------------------------------------
/DrawBridgeAPI/comfyui_workflows/sdbase_txt2img_hr_fix_reflex.json:
--------------------------------------------------------------------------------
1 | {
2 | "sampler": 3,
3 | "prompt": 6,
4 | "image_size": 5,
5 | "negative_prompt": 7,
6 | "checkpoint": 4,
7 | "output": 9,
8 | "latentupscale": 10,
9 | "resize": 15,
10 | "hr_steps": 19,
11 | "hr_prompt": 21,
12 | "hr_negative_prompt": 22
13 | }
--------------------------------------------------------------------------------
/DrawBridgeAPI/comfyui_workflows/sdbase_txt2img_reflex.json:
--------------------------------------------------------------------------------
1 | {
2 | "sampler": 3,
3 | "prompt": 6,
4 | "image_size": 5,
5 | "negative_prompt": 7,
6 | "checkpoint": 4,
7 | "output": 9
8 | }
--------------------------------------------------------------------------------
/DrawBridgeAPI/config_example.yaml:
--------------------------------------------------------------------------------
1 | enable_backends:
2 | enable_txt2img_backends:
3 | comfyui: [0]
4 | a1111webui: [0,1]
5 | novelai: [{"0": "queue"}] # 使用队列管理 (禁止并发)
6 | enable_sdapi_backends:
7 | a1111webui: [0,1]
8 | enable_img2img_backends:
9 | a1111webui: [0,1]
10 | enable_comfyui_backends:
11 | comfyui: [0]
12 |
13 | backends:
14 | civitai: # {"token": []}
15 | api:
16 | - b0ab2e10591a....
17 | name:
18 | - civitaiAPI
19 | a1111webui:
20 | api:
21 | - http://192.168.1.104:7860
22 | - http://127.0.0.1:20224
23 | name:
24 | - RTX2060
25 | - RTX2080
26 | auth:
27 | - false
28 | - false
29 | username:
30 | - admin
31 | - admin
32 | password:
33 | - admin
34 | - admin
35 | fal_ai:
36 | api:
37 | - 6e3c0665-f...
38 | name:
39 | - myfirsttoken
40 | replicate:
41 | api:
42 | - r8_bBAjALaO..
43 | name:
44 | - default
45 | liblibai:
46 | # https://www.liblib.art/ # 按下F12 -> 应用 -> cookies -> https://www.liblib.art -> usertoken 的值 d812c12d83c640.....
47 | api: #
48 | - f45123447d # 大号
49 | - 4de16d49bf7c44c0 # 2580120068
50 | - 66c70f855fd # 微信
51 | name:
52 | - google
53 | - 2580120068
54 | - wechat
55 | override:
56 | -
57 | model_path: 2931607
58 | model: NoobXL
59 | prompt:
60 | negative_prompt:
61 | xl: true
62 | steps: 35
63 |
64 | - model_path: 2675606
65 | model: liblib.art/modelinfo/fe3aac47589d4a20b24d0a6b045d607e
66 | prompt:
67 | negative_prompt: easynegative
68 | xl: false
69 | steps: 20
70 |
71 | - model_path: 2676318
72 | model: liblib.art/modelinfo/5ecc3218f1ef483ab63eeb4e4cff30cc
73 | prompt: "score_9,score_8_up,score_7_up,score_anime,score_anime_8_up"
74 | negative_prompt: "score_3,poorly drawn,bad anatomy,bad proportions, watercolor painting, brush strokes,3d,2.5d,signature,watermark,bad face,distorted face,messed up eyes,deformed,(low quality, bad quality, worst quality:1.2),bad hand"
75 | xl: true
76 | steps: 28
77 |
78 | tusiart:
79 | api:
80 | - eyJhbGciOiJIUzI1NiIsInR5...
81 | name:
82 | - 移动
83 | referer:
84 | - https://tusiart.com/u/759779980971380287
85 | override:
86 | - model_path: 758751795863586176
87 | prompt: "best quality"
88 |
89 | seaart:
90 | api:
91 | - eyJhbGciOiJSUzUxMiIs...
92 | name:
93 | - default
94 |
95 | yunjie:
96 | api:
97 | - "rsat:NUh5MOBfEccVuUmuwsyqT4fmX7O"
98 | name:
99 | - 移动
100 |
101 | comfyui:
102 | api:
103 | - http://10.147.20.155:8188
104 | name:
105 | - RTX2080TI
106 | override:
107 | - model_path: noobaiXLNAIXL_epsilonPred075.safetensors
108 | prompt: "best quality"
109 | negative_prompt: ",bad hands, worst quality, low quality,"
110 | comfyui_api_json: sdbase_txt2img
111 |
112 | novelai:
113 | api:
114 | - eyJhbGciOi...
115 | name:
116 | - default
117 | override:
118 | - model_path: nai-diffusion-3
119 | midjourney:
120 | api:
121 | - http://192.168.5.206:8081
122 | name:
123 | - myserver
124 | auth_toekn:
125 | - null
126 |
127 |
128 | server_settings:
129 | redis_server:
130 | - 127.0.0.1
131 | - 6379
132 | - null
133 | - 2
134 | enable_nsfw_check:
135 | false
136 | save_image:
137 | true
138 | build_in_tagger:
139 | false
140 | llm_caption:
141 | enable:
142 | false
143 | clip:
144 | google/siglip-so400m-patch14-384
145 | llm:
146 | unsloth/Meta-Llama-3.1-8B-bnb-4bit
147 | image_adapter: # https://huggingface.co/spaces/fancyfeast/joy-caption-pre-alpha/tree/main/wpkklhc6
148 | image_adapter.pt
149 | build_in_photoai:
150 | exec_path:
151 | "C:\\Program Files\\Topaz Labs LLC\\Topaz Photo AI\\tpai.exe"
152 |
153 | start_gradio:
154 | False
155 | same_port_with_api:
156 | False
157 |
158 | proxy:
159 | null
160 |
161 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/dbapi.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 |
4 | os.chdir('/root')
5 |
6 | os.environ['PIP_INDEX_URL'] = 'https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple'
7 | os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
8 |
9 | subprocess.run(['apt', 'update'])
10 | subprocess.run(['apt', 'install', '-y', 'aria2c', 'sshpass'])
11 | subprocess.run(['pip', 'install', 'jupyterlab', '--break-system-packages'])
12 |
13 | while True:
14 | os.system('python app.py & jupyter-lab --no-browser --ip=0.0.0.0 --allow-root --notebook-dir=/ --port=65432 --LabApp.allow_origin=* --LabApp.token= --LabApp.base_url=/diao')
--------------------------------------------------------------------------------
/DrawBridgeAPI/locales/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | import gettext
3 | import locale
4 |
5 | locale_dir = os.path.join(os.path.dirname(__file__), '../locales')
6 | lang = os.getenv('LANG', None)
7 | language, _ = locale.getdefaultlocale()
8 |
9 | lang = gettext.translation(
10 | 'messages', localedir=locale_dir, languages=[lang or language], fallback=True
11 | )
12 |
13 | lang.install()
14 |
15 | _ = lang.gettext
16 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/locales/zh_CN/LC_MESSAGES/messages.mo:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiaoDaiaChan/Stable-Diffusion-DrawBridgeAPI/81f059531ee62d49fed089f80bfd77f8ecdf05ed/DrawBridgeAPI/locales/zh_CN/LC_MESSAGES/messages.mo
--------------------------------------------------------------------------------
/DrawBridgeAPI/locales/zh_CN/LC_MESSAGES/messages.po:
--------------------------------------------------------------------------------
1 | msgid "Loading config file completed"
2 | msgstr "加载配置文件完成"
3 |
4 | msgid "API initialization completed"
5 | msgstr "API初始化完成"
6 |
7 | msgid "Redis connection successful"
8 | msgstr "Redis连接成功"
9 |
10 | msgid "Exec TXT2IMG"
11 | msgstr "开始进行文生图"
12 |
13 | msgid "IMG2IMG Requires image to start"
14 | msgstr "图生图需要图片来启动"
15 |
16 | msgid "Exec IMG2IMG"
17 | msgstr "开始进行图生图"
18 |
19 | msgid "Caption Successful"
20 | msgstr "打标成功"
21 |
22 | msgid "Lock to backend has configured"
23 | msgstr "设置已经锁定后端"
24 |
25 | msgid "URL detected"
26 | msgstr "检测到url"
27 |
28 | msgid "Image download failed!"
29 | msgstr "图片下载失败!"
30 |
31 | msgid "Exec forwarding"
32 | msgstr "开始进行转发"
33 |
34 | msgid "Waiting for API initialization"
35 | msgstr "请等待API初始化"
36 |
37 | msgid "Loading LLM"
38 | msgstr "LLM加载中"
39 |
40 | msgid "LLM loading completed, waiting for command"
41 | msgstr "LLM加载完成,等待命令"
42 |
43 | msgid "Loading Checkpoint"
44 | msgstr "模型加载中"
45 |
46 | msgid "Checkpoint loading completed, waiting for command"
47 | msgstr "模型加载完成,等待命令"
48 |
49 | msgid "Server is ready!"
50 | msgstr "服务器准备就绪!"
51 |
52 | msgid "Manually select model"
53 | msgstr "手动选择模型"
54 |
55 | msgid "Backend select"
56 | msgstr "已选择后端"
57 |
58 | msgid "Backend locked"
59 | msgstr "已锁定后端"
60 |
61 | msgid "Starting backend selection"
62 | msgstr "开始进行后端选择"
63 |
64 | msgid "Backend {0} is down"
65 | msgstr "后端 {0} 掉线"
66 |
67 | msgid "Backend {0} is failed or locked"
68 | msgstr "后端 {0} 出错或者锁定中"
69 |
70 | msgid "No available backend"
71 | msgstr "没有可用后端"
72 |
73 | msgid "Backend: {0} Average work time: {1} seconds, Current tasks: {2}"
74 | msgstr "后端: {0} 平均工作时间: {1}秒, 现在进行中的任务: {2}"
75 |
76 | msgid "Extra time weight"
77 | msgstr "额外的时间权重"
78 |
79 | msgid "Backend: {0} is the fastest, has been selected"
80 | msgstr "后端{0}最快, 已经选择"
81 |
82 | msgid "Task completed successfully"
83 | msgstr "任务成功完成"
84 |
85 | msgid "Task failed"
86 | msgstr "任务失败"
87 |
88 | msgid "Remaining tasks in the queue"
89 | msgstr "队列中的剩余任务"
90 |
91 | msgid "No remaining tasks in the queue"
92 | msgstr "队列中已无任务"
93 |
94 | msgid "Forwarding request"
95 | msgstr "已转发请求"
96 |
97 | msgid "Backend returned error"
98 | msgstr "后端返回错误"
99 |
100 | msgid "Backend not using built-in multi-image generation management"
101 | msgstr "后端不使用内置多图生成管理"
102 |
103 | msgid "A1111 Backend, not using built-in multi-image generation management"
104 | msgstr "A1111后端, 不使用内置多图生成管理"
105 |
106 | msgid "Over maximum retry times, posting still failed"
107 | msgstr "超过最大重试次数之后依然失败"
108 |
109 | msgid "Request completed, took {0} seconds"
110 | msgstr "请求完成, 花费{0}秒"
111 |
112 | msgid "VRAM OOM detected, auto model unload and reload"
113 | msgstr "检测到爆显存,执行自动模型释放并加载"
114 |
115 | msgid "Get a respond image, processing"
116 | msgstr "获取到返回图片,正在处理"
117 |
118 | msgid "Request failed, error message:"
119 | msgstr "请求失败,错误信息:"
120 |
121 | msgid "Downloading image successful"
122 | msgstr "图片下载成功"
123 |
124 | msgid "Selected ComfyUI style"
125 | msgstr "已选择ComfyUI工作流"
126 |
127 | msgid "Working directory: {0}"
128 | msgstr "工作目录:{0}"
129 |
130 | msgid "Starting image audit"
131 | msgstr "开始图片审核"
132 |
133 | msgid "NSFW Detected"
134 | msgstr "检测到不合适图片"
135 |
136 | msgid "Image safe"
137 | msgstr "图片安全"
138 |
139 | msgid "Backend {0} is enabled"
140 | msgstr "后端 {0} 已启用"
--------------------------------------------------------------------------------
/DrawBridgeAPI/ui/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiaoDaiaChan/Stable-Diffusion-DrawBridgeAPI/81f059531ee62d49fed089f80bfd77f8ecdf05ed/DrawBridgeAPI/ui/__init__.py
--------------------------------------------------------------------------------
/DrawBridgeAPI/utils/__init__.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import httpx
4 | from fastapi.exceptions import HTTPException
5 | from ..base_config import init_instance
6 | config = init_instance.config
7 | import asyncio
8 |
9 |
10 | async def http_request(
11 | method,
12 | target_url,
13 | headers=None,
14 | params=None,
15 | content=None,
16 | format=True
17 | ):
18 | async with httpx.AsyncClient() as client:
19 |
20 | response = await client.request(
21 | method,
22 | target_url,
23 | headers=headers,
24 | params=params,
25 | content=content
26 | )
27 |
28 | if response.status_code != 200:
29 | raise HTTPException(500)
30 | if format:
31 | return response.json()
32 | else:
33 | return response
34 |
35 |
36 | async def run_later(func, delay=1):
37 | loop = asyncio.get_running_loop()
38 | loop.call_later(
39 | delay,
40 | lambda: loop.create_task(
41 | func
42 | )
43 | )
44 |
45 |
46 | async def txt_audit(
47 | msg,
48 | prompt='''
49 | 接下来请你对一些聊天内容进行审核,
50 | 如果内容出现政治/暴恐内容(特别是我国的政治人物/或者和我国相关的政治)则请你输出,
51 | 如果没有则输出
52 | '''
53 | ):
54 |
55 | from ..backend import Backend
56 |
57 | system = [
58 | {"role": "system",
59 | "content": prompt}
60 | ]
61 |
62 | prompt = [{"role": "user", "content": msg}]
63 |
64 | try:
65 | resp = Backend.http_request(
66 | "POST",
67 | f"http://{config['prompt_audit']['site']}/v1/chat/completions",
68 | {"Authorization": config['prompt_audit']['api_key']},
69 | timeout=300,
70 | format=True,
71 | content= json.dumps(
72 | {
73 | "model": "gpt-3.5-turbo",
74 | "messages": system + prompt,
75 | "max_tokens": 4000,
76 | }
77 | )
78 | )
79 | except:
80 | return "yes"
81 | else:
82 | res: str = remove_punctuation(resp['choices'][0]['message']['content'].strip())
83 | return res
84 |
85 |
86 | def remove_punctuation(text):
87 | import string
88 | for i in range(len(text)):
89 | if text[i] not in string.punctuation:
90 | return text[i:]
91 | return ""
--------------------------------------------------------------------------------
/DrawBridgeAPI/utils/custom_class.py:
--------------------------------------------------------------------------------
1 | from fal_client.client import AsyncClient
2 | from fal_client.auth import fetch_credentials
3 |
4 | import httpx
5 | import os
6 |
7 | USER_AGENT = "fal-client/0.2.2 (python)"
8 |
9 |
10 | class CustomAsyncClient(AsyncClient):
11 | def __init__(self, key=None, default_timeout=120.0):
12 | if key is None:
13 | key = os.getenv("FAL_KEY")
14 | super().__init__(key=key, default_timeout=default_timeout)
15 |
16 | @property
17 | def _client(self):
18 | key = self.key
19 | if key is None:
20 | key = fetch_credentials()
21 |
22 | return httpx.AsyncClient(
23 | headers={
24 | "Authorization": f"Key {key}",
25 | "User-Agent": USER_AGENT,
26 | },
27 | timeout=self.default_timeout,
28 | )
--------------------------------------------------------------------------------
/DrawBridgeAPI/utils/exceptions.py:
--------------------------------------------------------------------------------
1 | class DrawBridgeAPIException(Exception):
2 |
3 | class DBAPIExceptions(Exception):
4 | pass
5 |
6 | class TokenExpired(DBAPIExceptions):
7 | def __init__(self, message="Token expired."):
8 | self.message = message
9 | super().__init__(self.message)
10 |
11 | class NeedRecaptcha(DBAPIExceptions):
12 | def __init__(self, message="Need Recaptcha."):
13 | self.message = message
14 | super().__init__(self.message)
15 |
16 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/utils/llm_caption_requirements.txt:
--------------------------------------------------------------------------------
1 | torch
2 | numpy
3 | pillow
4 | transformers>=4.43.3
5 | huggingface_hub
6 | protobuf
7 | bitsandbytes
8 | sentencepiece
9 | accelerate
--------------------------------------------------------------------------------
/DrawBridgeAPI/utils/llm_captions.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import base64
3 | import warnings
4 | warnings.simplefilter(action='ignore', category=UserWarning)
5 | from torch import nn
6 | from io import BytesIO
7 | from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, \
8 | AutoModelForCausalLM
9 | import torch
10 | import torch.amp.autocast_mode
11 | from PIL import Image
12 | import numpy as np
13 | from io import BytesIO
14 |
15 | from ..base_config import init_instance , setup_logger
16 | from ..locales import _
17 |
18 | llm_logger = setup_logger('[LLM-Caption]')
19 |
20 | class JoyPipeline:
21 | def __init__(self):
22 | self.clip_model = None
23 | self.clip_processor = None
24 | self.tokenizer = None
25 | self.text_model = None
26 | self.image_adapter = None
27 | self.parent = None
28 |
29 | def clearCache(self):
30 | self.clip_model = None
31 | self.clip_processor = None
32 | self.tokenizer = None
33 | self.text_model = None
34 | self.image_adapter = None
35 |
36 |
37 | class ImageAdapter(nn.Module):
38 | def __init__(self, input_features: int, output_features: int):
39 | super().__init__()
40 | self.linear1 = nn.Linear(input_features, output_features)
41 | self.activation = nn.GELU()
42 | self.linear2 = nn.Linear(output_features, output_features)
43 |
44 | def forward(self, vision_outputs: torch.Tensor):
45 | x = self.linear1(vision_outputs)
46 | x = self.activation(x)
47 | x = self.linear2(x)
48 | return x
49 |
50 |
51 | class Joy_caption_load:
52 |
53 | def __init__(self):
54 | self.model = None
55 | self.pipeline = JoyPipeline()
56 | self.pipeline.parent = self
57 | self.config = init_instance.config
58 | pass
59 |
60 | def loadCheckPoint(self):
61 | # 清除一波
62 | if self.pipeline != None:
63 | self.pipeline.clearCache()
64 |
65 | # clip
66 | model_id = self.config.server_settings['llm_caption']['clip']
67 |
68 | model = AutoModel.from_pretrained(model_id)
69 | clip_processor = AutoProcessor.from_pretrained(model_id)
70 | clip_model = AutoModel.from_pretrained(
71 | model_id,
72 | trust_remote_code=True
73 | )
74 |
75 | clip_model = clip_model.vision_model
76 | clip_model.eval()
77 | clip_model.requires_grad_(False)
78 | clip_model.to("cuda")
79 |
80 | # LLM
81 | model_path_llm = self.config.server_settings['llm_caption']['llm']
82 | tokenizer = AutoTokenizer.from_pretrained(model_path_llm, use_fast=False)
83 | assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer,
84 | PreTrainedTokenizerFast), f"Tokenizer is of type {type(tokenizer)}"
85 |
86 | text_model = AutoModelForCausalLM.from_pretrained(model_path_llm, device_map="auto", trust_remote_code=True)
87 | text_model.eval()
88 |
89 | # Image Adapte
90 |
91 | image_adapter = ImageAdapter(clip_model.config.hidden_size,
92 | text_model.config.hidden_size) # ImageAdapter(clip_model.config.hidden_size, 4096)
93 | image_adapter.load_state_dict(torch.load(self.config.server_settings['llm_caption']['image_adapter'], map_location="cpu", weights_only=True))
94 | adjusted_adapter = image_adapter # AdjustedImageAdapter(image_adapter, text_model.config.hidden_size)
95 | adjusted_adapter.eval()
96 | adjusted_adapter.to("cuda")
97 |
98 | self.pipeline.clip_model = clip_model
99 | self.pipeline.clip_processor = clip_processor
100 | self.pipeline.tokenizer = tokenizer
101 | self.pipeline.text_model = text_model
102 | self.pipeline.image_adapter = adjusted_adapter
103 |
104 | def clearCache(self):
105 | if self.pipeline != None:
106 | self.pipeline.clearCache()
107 |
108 | def gen(self, model):
109 | if self.model == None or self.model != model or self.pipeline == None:
110 | self.model = model
111 | self.loadCheckPoint()
112 | return (self.pipeline,)
113 |
114 |
115 | class Joy_caption:
116 |
117 | def __init__(self):
118 | pass
119 |
120 | @staticmethod
121 | def tensor2pil(t_image: torch.Tensor) -> Image:
122 | return Image.fromarray(np.clip(255.0 * t_image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
123 |
124 | def gen(
125 | self,
126 | joy_pipeline=JoyPipeline,
127 | image=Image,
128 | prompt="A descriptive caption for this image",
129 | max_new_tokens=300,
130 | temperature=0.5,
131 | cache=False
132 | ):
133 |
134 | if joy_pipeline.clip_processor == None:
135 | joy_pipeline.parent.loadCheckPoint()
136 |
137 | clip_processor = joy_pipeline.clip_processor
138 | tokenizer = joy_pipeline.tokenizer
139 | clip_model = joy_pipeline.clip_model
140 | image_adapter = joy_pipeline.image_adapter
141 | text_model = joy_pipeline.text_model
142 |
143 | input_image = image
144 |
145 | # Preprocess image
146 | pImge = clip_processor(images=input_image, return_tensors='pt').pixel_values
147 | pImge = pImge.to('cuda')
148 |
149 | # Tokenize the prompt
150 | prompt = tokenizer.encode(prompt, return_tensors='pt', padding=False, truncation=False,
151 | add_special_tokens=False)
152 | # Embed image
153 | with torch.amp.autocast_mode.autocast('cuda', enabled=True):
154 | vision_outputs = clip_model(pixel_values=pImge, output_hidden_states=True)
155 | image_features = vision_outputs.hidden_states[-2]
156 | embedded_images = image_adapter(image_features)
157 | embedded_images = embedded_images.to('cuda')
158 |
159 | # Embed prompt
160 | prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))
161 | assert prompt_embeds.shape == (1, prompt.shape[1],
162 | text_model.config.hidden_size), f"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}"
163 | embedded_bos = text_model.model.embed_tokens(
164 | torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))
165 |
166 | # Construct prompts
167 | inputs_embeds = torch.cat([
168 | embedded_bos.expand(embedded_images.shape[0], -1, -1),
169 | embedded_images.to(dtype=embedded_bos.dtype),
170 | prompt_embeds.expand(embedded_images.shape[0], -1, -1),
171 | ], dim=1)
172 |
173 | input_ids = torch.cat([
174 | torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),
175 | torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),
176 | prompt,
177 | ], dim=1).to('cuda')
178 | attention_mask = torch.ones_like(input_ids)
179 |
180 | generate_ids = text_model.generate(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask,
181 | max_new_tokens=max_new_tokens, do_sample=True, top_k=10,
182 | temperature=temperature, suppress_tokens=None)
183 |
184 | # Trim off the prompt
185 | generate_ids = generate_ids[:, input_ids.shape[1]:]
186 | if generate_ids[0][-1] == tokenizer.eos_token_id:
187 | generate_ids = generate_ids[:, :-1]
188 |
189 | caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]
190 | r = caption.strip()
191 |
192 | if cache == False:
193 | joy_pipeline.parent.clearCache()
194 |
195 | return (r,)
196 |
197 |
198 | class JoyCaptionHandler:
199 | def __init__(self, config):
200 | self.config = config
201 | self.pipeline, self.joy_caption = self._initialize()
202 |
203 | def _initialize(self):
204 | llm_logger.info(_("Loading LLM"))
205 | joy_caption_load = Joy_caption_load()
206 | model_path = self.config.server_settings['llm_caption']['llm']
207 | pipeline, = joy_caption_load.gen(model_path)
208 | joy_caption = Joy_caption()
209 | llm_logger.info(_("LLM loading completed, waiting for command"))
210 | return pipeline, joy_caption
211 |
212 | async def get_caption(self, image, ntags=[]):
213 | if image.startswith(b"data:image/png;base64,"):
214 | image = image.replace("data:image/png;base64,", "")
215 | image = Image.open(BytesIO(base64.b64decode(image))).convert(mode="RGB")
216 |
217 | extra_ = f"do not describe {','.join(ntags)} if it exist" if ntags else ''
218 | loop = asyncio.get_event_loop()
219 |
220 | caption = await loop.run_in_executor(
221 | None,
222 | self.joy_caption.gen,
223 | self.pipeline,
224 | image,
225 | f"A descriptive caption for this image, do not describe a signature or text in the image,{extra_}",
226 | 300,
227 | 0.5,
228 | True
229 | )
230 |
231 | return caption[0]
232 |
233 |
234 | config = init_instance.config
235 | if config.server_settings['llm_caption']['enable']:
236 | joy_caption_handler = JoyCaptionHandler(config)
237 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/utils/request_model.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel, conint
2 | from dataclasses import field
3 | from typing import Optional, List, Dict, Any
4 | from pathlib import Path
5 | import random
6 |
7 |
8 | class RequetModelClass(BaseModel):
9 | pass
10 |
11 |
12 | class Txt2ImgRequest(RequetModelClass):
13 | prompt: Optional[str] = ""
14 | negative_prompt: Optional[str] = ""
15 | styles: List[str] = []
16 | seed: int = -1
17 | subseed: int = -1
18 | subseed_strength: float = 0
19 | seed_resize_from_h: int = -1
20 | seed_resize_from_w: int = -1
21 | sampler_name: str = "Euler a"
22 | batch_size: int = 1
23 | n_iter: int = 1
24 | steps: int = 20
25 | cfg_scale: float = 7
26 | width: int = 512
27 | height: int = 512
28 | restore_faces: bool = False
29 | tiling: bool = False
30 | do_not_save_samples: bool = False
31 | do_not_save_grid: bool = False
32 | eta: float = 0
33 | denoising_strength: float = 1
34 | s_min_uncond: float = 0
35 | s_churn: float = 0
36 | s_tmax: float = 0
37 | s_tmin: float = 0
38 | s_noise: float = 0
39 | override_settings: Dict[str, Any] = {}
40 | override_settings_restore_afterwards: bool = False
41 | refiner_checkpoint: str = ""
42 | refiner_switch_at: int = 0
43 | disable_extra_networks: bool = False
44 | comments: Dict[str, Any] = {}
45 | enable_hr: bool = False
46 | firstphase_width: int = 0
47 | firstphase_height: int = 0
48 | hr_scale: float = 2
49 | hr_upscaler: str = ""
50 | hr_second_pass_steps: int = 10
51 | hr_resize_x: int = 0
52 | hr_resize_y: int = 0
53 | hr_checkpoint_name: str = ""
54 | hr_sampler_name: str = ""
55 | hr_prompt: str = ""
56 | hr_negative_prompt: str = ""
57 | sampler_index: str = "Euler a"
58 | script_name: str = ""
59 | script_args: List[Any] = []
60 | send_images: bool = True
61 | save_images: bool = True
62 | alwayson_scripts: Dict[str, Any] = {}
63 | scheduler: str = "Automatic"
64 |
65 |
66 | class Img2ImgRequest(RequetModelClass):
67 | prompt: Optional[str] = ""
68 | negative_prompt: Optional[str] = ""
69 | styles: List[str] = []
70 | seed: int = -1
71 | subseed: int = -1
72 | subseed_strength: float = 0
73 | seed_resize_from_h: int = -1
74 | seed_resize_from_w: int = -1
75 | sampler_name: str = "Euler a"
76 | batch_size: int = 1
77 | n_iter: int = 1
78 | steps: int = 50
79 | cfg_scale: float = 7
80 | width: int = 512
81 | height: int = 512
82 | restore_faces: bool = False
83 | tiling: bool = False
84 | do_not_save_samples: bool = False
85 | do_not_save_grid: bool = False
86 | eta: float = 0
87 | denoising_strength: float = 0.75
88 | s_min_uncond: float = 0
89 | s_churn: float = 0
90 | s_tmax: float = 0
91 | s_tmin: float = 0
92 | s_noise: float = 0
93 | override_settings: Dict[str, Any] = {}
94 | override_settings_restore_afterwards: bool = False
95 | refiner_checkpoint: str = ""
96 | refiner_switch_at: int = 0
97 | disable_extra_networks: bool = False
98 | comments: Dict[str, Any] = {}
99 | init_images: List[str] = [""]
100 | resize_mode: int = 0
101 | image_cfg_scale: float = 0
102 | mask: str = None
103 | mask_blur_x: int = 4
104 | mask_blur_y: int = 4
105 | mask_blur: int = 0
106 | inpainting_fill: int = 0
107 | inpaint_full_res: bool = True
108 | inpaint_full_res_padding: int = 0
109 | inpainting_mask_invert: int = 0
110 | initial_noise_multiplier: float = 0
111 | latent_mask: str = ""
112 | sampler_index: str = "Euler a"
113 | include_init_images: bool = False
114 | script_name: str = ""
115 | script_args: List[Any] = []
116 | send_images: bool = True
117 | save_images: bool = True
118 | alwayson_scripts: Dict[str, Any] = {}
119 | scheduler: str = "Automatic"
120 | # 以下为拓展
121 |
122 |
123 | class TaggerRequest(RequetModelClass):
124 | image: str = '',
125 | model: Optional[str] = 'wd14-vit-v2'
126 | threshold: Optional[float] = 0.35,
127 | exclude_tags: Optional[List[str]] = []
128 |
129 |
130 | class TopazAiRequest(BaseModel):
131 | image: Optional[str] = None
132 | input_folder: Optional[str | Path]
133 | output_folder: Optional[str] = None
134 | overwrite: Optional[bool] = False
135 | recursive: Optional[bool] = False
136 | format: Optional[str] = "preserve" # 可选值: jpg, jpeg, png, tif, tiff, dng, preserve
137 | quality: Optional[conint(ge=0, le=100)] = 95 # JPEG 质量,0到100之间
138 | compression: Optional[conint(ge=0, le=10)] = 2 # PNG 压缩,0到10之间
139 | bit_depth: Optional[conint(strict=True, ge=8, le=16)] = 16 # TIFF 位深度,8或16
140 | tiff_compression: Optional[str] = "zip" # 可选值: none, lzw, zip
141 | show_settings: Optional[bool] = False
142 | skip_processing: Optional[bool] = False
143 | verbose: Optional[bool] = False
144 | upscale: Optional[bool] = None
145 | noise: Optional[bool] = None
146 | sharpen: Optional[bool] = None
147 | lighting: Optional[bool] = None
148 | color: Optional[bool] = None
149 |
150 |
151 | class SetConfigRequest(BaseModel):
152 | class Config:
153 | extra = "allow"
154 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/utils/shared.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import os
3 |
4 | PATH_TO_COMFYUI_WORKFLOWS = Path(f"{os.path.dirname(os.path.abspath(__file__))}/../comfyui_workflows")
5 |
6 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/utils/tagger-requirements.txt:
--------------------------------------------------------------------------------
1 | pandas
2 | numpy
3 | pillow
4 | huggingface_hub
5 | onnxruntime
6 |
--------------------------------------------------------------------------------
/DrawBridgeAPI/utils/tagger.py:
--------------------------------------------------------------------------------
1 | import os
2 | import asyncio
3 |
4 | import pandas as pd
5 | import numpy as np
6 | import base64
7 |
8 | from typing import Tuple, List, Dict
9 | from io import BytesIO
10 | from PIL import Image
11 |
12 | from pathlib import Path
13 | from huggingface_hub import hf_hub_download
14 |
15 | from ..base_config import setup_logger, init_instance
16 | from ..locales import _
17 |
18 |
19 | use_cpu = True
20 | tf_device_name = '/gpu:0' if not use_cpu else '/cpu:0'
21 |
22 | wd_logger = setup_logger('[TAGGER]')
23 | # https://github.com/toriato/stable-diffusion-webui-wd14-tagger
24 |
25 |
26 | class Interrogator:
27 | @staticmethod
28 | def postprocess_tags(
29 | tags: Dict[str, float],
30 | threshold=0.35,
31 | additional_tags: List[str] = [],
32 | exclude_tags: List[str] = [],
33 | sort_by_alphabetical_order=False,
34 | add_confident_as_weight=False,
35 | replace_underscore=False,
36 | replace_underscore_excludes: List[str] = [],
37 | escape_tag=False
38 | ) -> Dict[str, float]:
39 | for t in additional_tags:
40 | tags[t] = 1.0
41 |
42 | tags = {
43 | t: c
44 | for t, c in sorted(
45 | tags.items(),
46 | key=lambda i: i[0 if sort_by_alphabetical_order else 1],
47 | reverse=not sort_by_alphabetical_order
48 | )
49 | if (
50 | c >= threshold
51 | and t not in exclude_tags
52 | )
53 | }
54 |
55 | new_tags = []
56 | for tag in list(tags):
57 | new_tag = tag
58 |
59 | if replace_underscore and tag not in replace_underscore_excludes:
60 | new_tag = new_tag.replace('_', ' ')
61 |
62 | if escape_tag:
63 | new_tag = tag.replace('_', '\\_')
64 |
65 | if add_confident_as_weight:
66 | new_tag = f'({new_tag}:{tags[tag]})'
67 |
68 | new_tags.append((new_tag, tags[tag]))
69 | tags = dict(new_tags)
70 |
71 | return tags
72 |
73 | def __init__(self, name: str) -> None:
74 | self.name = name
75 |
76 | def load(self):
77 | raise NotImplementedError()
78 |
79 | def unload(self) -> bool:
80 | unloaded = False
81 |
82 | if hasattr(self, 'model') and self.model is not None:
83 | del self.model
84 | unloaded = True
85 | print(f'Unloaded {self.name}')
86 |
87 | if hasattr(self, 'prompt'):
88 | del self.tags
89 |
90 | return unloaded
91 |
92 | def interrogate(
93 | self,
94 | image: Image
95 | ) -> Tuple[
96 | Dict[str, float], # rating confidents
97 | Dict[str, float] # tag confidents
98 | ]:
99 | raise NotImplementedError()
100 |
101 |
102 | class WaifuDiffusionInterrogator(Interrogator):
103 | def __init__(
104 | self,
105 | name: str,
106 | model_path='model.onnx',
107 | tags_path='selected_tags.csv',
108 | **kwargs
109 | ) -> None:
110 | super().__init__(name)
111 | self.model_path = model_path
112 | self.tags_path = tags_path
113 | self.kwargs = kwargs
114 |
115 | def download(self) -> Tuple[os.PathLike, os.PathLike]:
116 | wd_logger.info(f"Loading {self.name} model file from {self.kwargs['repo_id']}")
117 |
118 | model_path = Path(hf_hub_download(
119 | **self.kwargs, filename=self.model_path))
120 | tags_path = Path(hf_hub_download(
121 | **self.kwargs, filename=self.tags_path))
122 | return model_path, tags_path
123 |
124 | def load(self) -> None:
125 | model_path, tags_path = self.download()
126 |
127 | from onnxruntime import InferenceSession
128 |
129 | providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
130 | if use_cpu:
131 | providers.pop(0)
132 |
133 | self.model = InferenceSession(str(model_path), providers=providers)
134 |
135 | wd_logger.info(f'Loaded {self.name} model from {model_path}')
136 |
137 | self.tags = pd.read_csv(tags_path)
138 |
139 | def interrogate(
140 | self,
141 | image: Image
142 | ) -> Tuple[
143 | Dict[str, float], # rating confidents
144 | Dict[str, float] # tag confidents
145 | ]:
146 | if not hasattr(self, 'model') or self.model is None:
147 | self.load()
148 |
149 | _, height, _, _ = self.model.get_inputs()[0].shape
150 |
151 | image = image.convert('RGBA')
152 | new_image = Image.new('RGBA', image.size, 'WHITE')
153 | new_image.paste(image, mask=image)
154 | image = new_image.convert('RGB')
155 | image = np.asarray(image)
156 |
157 | image = image[:, :, ::-1]
158 |
159 | # 模拟`dbimutils`的make_square和smart_resize功能
160 | image = self.make_square(image, height)
161 | image = self.smart_resize(image, height)
162 | image = image.astype(np.float32)
163 | image = np.expand_dims(image, 0)
164 |
165 | input_name = self.model.get_inputs()[0].name
166 | label_name = self.model.get_outputs()[0].name
167 | confidents = self.model.run([label_name], {input_name: image})[0]
168 |
169 | tags = self.tags[:][['name']]
170 | tags['confidents'] = confidents[0]
171 |
172 | ratings = dict(tags[:4].values)
173 | tags = dict(tags[4:].values)
174 |
175 | return ratings, tags
176 |
177 | @staticmethod
178 | def make_square(image, size):
179 | old_size = image.shape[:2]
180 | ratio = float(size) / max(old_size)
181 | new_size = tuple([int(x * ratio) for x in old_size])
182 | image = Image.fromarray(image)
183 | image = image.resize(new_size, Image.LANCZOS)
184 | new_image = Image.new("RGB", (size, size))
185 | new_image.paste(image, ((size - new_size[0]) // 2,
186 | (size - new_size[1]) // 2))
187 | return np.array(new_image)
188 |
189 | @staticmethod
190 | def smart_resize(image, size):
191 | image = Image.fromarray(image)
192 | image = image.resize((size, size), Image.LANCZOS)
193 | return np.array(image)
194 |
195 |
196 | class WaifuDiffusionTaggerHandler:
197 | def __init__(self, name, repo_id, revision, model_path, tags_path):
198 | self.name = name
199 | self.repo_id = repo_id
200 | self.revision = revision
201 | self.model_path = model_path
202 | self.tags_path = tags_path
203 | self.wd_instance = self._initialize()
204 |
205 | def _initialize(self):
206 | wd_instance = WaifuDiffusionInterrogator(
207 | name=self.name,
208 | repo_id=self.repo_id,
209 | revision=self.revision,
210 | model_path=self.model_path,
211 | tags_path=self.tags_path
212 | )
213 | wd_logger.info(_("Loading Checkpoint"))
214 | wd_instance.load()
215 | wd_logger.info(_("Checkpoint loading completed, waiting for command"))
216 | return wd_instance
217 |
218 | async def tagger_main(self, base64_img, threshold, ntags=[], audit=False, ratings_=False):
219 | if base64_img.startswith("data:image/png;base64,"):
220 | base64_img = base64_img.replace("data:image/png;base64,", "")
221 |
222 | image_data = base64.b64decode(base64_img)
223 | image = Image.open(BytesIO(image_data))
224 |
225 | loop = asyncio.get_event_loop()
226 | ratings, tags = await loop.run_in_executor(
227 | None,
228 | self.wd_instance.interrogate,
229 | image
230 | )
231 | if ratings_:
232 | return ratings
233 | if audit:
234 | possibilities = ratings
235 | value = list(possibilities.values())
236 | value.sort(reverse=True)
237 | reverse_dict = {value: key for key, value in possibilities.items()}
238 | return True if reverse_dict[value[0]] == "questionable" or reverse_dict[value[0]] == "explicit" else False
239 |
240 | # 处理标签
241 | processed_tags = Interrogator.postprocess_tags(
242 | tags=tags,
243 | threshold=threshold,
244 | additional_tags=['best quality', 'highres'],
245 | exclude_tags=['lowres'] + ntags,
246 | sort_by_alphabetical_order=False,
247 | add_confident_as_weight=True,
248 | replace_underscore=True,
249 | replace_underscore_excludes=[],
250 | escape_tag=False
251 | )
252 |
253 | def process_dict(input_dict):
254 | processed_dict = {}
255 | for key, value in input_dict.items():
256 | cleaned_key = key.strip('()').split(':')[0]
257 | processed_dict[cleaned_key] = value
258 | return processed_dict
259 |
260 | processed_tags = process_dict(processed_tags)
261 |
262 | return {**ratings, **processed_tags}
263 |
264 |
265 | config = init_instance.config
266 | if config.server_settings['build_in_tagger']:
267 | wd_tagger_handler = WaifuDiffusionTaggerHandler(
268 | name='WaifuDiffusion',
269 | repo_id='SmilingWolf/wd-convnext-tagger-v3',
270 | revision='v2.0',
271 | model_path='model.onnx',
272 | tags_path='selected_tags.csv'
273 | )
--------------------------------------------------------------------------------
/DrawBridgeAPI/utils/topaz.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | from ..base_config import init_instance, setup_logger
3 |
4 | topazai_logger = setup_logger('[TopaAI]')
5 |
6 |
7 | def run_tpai(
8 | input_folder, output_folder=None, overwrite=False, recursive=False,
9 | format="preserve", quality=95, compression=2, bit_depth=16,
10 | tiff_compression="zip", show_settings=False, skip_processing=False,
11 | verbose=False, upscale=None, noise=None, sharpen=None,
12 | lighting=None, color=None, **kwargs
13 | ):
14 | # 基本命令和输入文件夹
15 | command = [rf'"{init_instance.config.server_settings["build_in_photoai"]["exec_path"]}"', f'"{input_folder}"']
16 |
17 | # 输出文件夹
18 | if output_folder:
19 | command.extend(["--output", f'"{output_folder}"'])
20 |
21 | # 覆盖现有文件
22 | if overwrite:
23 | command.append("--overwrite")
24 |
25 | # 递归处理子文件夹
26 | if recursive:
27 | command.append("--recursive")
28 |
29 | # 文件格式选项
30 | if format:
31 | command.extend(["--format", format])
32 | if quality is not None:
33 | command.extend(["--quality", str(quality)])
34 | if compression is not None:
35 | command.extend(["--compression", str(compression)])
36 | if bit_depth is not None:
37 | command.extend(["--bit-depth", str(bit_depth)])
38 | if tiff_compression:
39 | command.extend(["--tiff-compression", tiff_compression])
40 |
41 | # 调试选项
42 | if show_settings:
43 | command.append("--showSettings")
44 | if skip_processing:
45 | command.append("--skipProcessing")
46 | if verbose:
47 | command.append("--verbose")
48 |
49 | # 设置选项(实验性)
50 | if upscale is not None:
51 | command.extend(["--upscale", f"enabled={str(upscale).lower()}"])
52 | if noise is not None:
53 | command.extend(["--noise", f"enabled={str(noise).lower()}"])
54 | if sharpen is not None:
55 | command.extend(["--sharpen", f"enabled={str(sharpen).lower()}"])
56 | if lighting is not None:
57 | command.extend(["--lighting", f"enabled={str(lighting).lower()}"])
58 | if color is not None:
59 | command.extend(["--color", f"enabled={str(color).lower()}"])
60 |
61 | # 打印并执行命令
62 | topazai_logger.info(str(" ".join(command)))
63 | result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
64 | # 返回结果,并忽略无法解码的字符
65 | return result.stdout.decode(errors='ignore'), result.stderr.decode(errors='ignore'), result.returncode
66 |
67 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Sena
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Calling Various Online AI Drawing Website APIs With a Simple Gradio UI
2 |
3 | ## API Compatible with A1111webui API
4 |
5 | https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API
6 |
7 | ## 如果有需要适配的后端欢迎留下个issue,会进行适配.
8 | ### If you have any backends that need to be adapted, please feel free to leave an issue, and I will work on the adaptation.
9 | ## 之后项目会进行完善,我会写一些API调用教程.
10 | ### After the project is completed, I will write some API call tutorials.
11 |
12 | ## [中文说明](README_ZH.md)
13 | This project plans to implement i18n. (woking on it)
14 | ## 
15 | checkpoint by/模型 https://huggingface.co/nyanko7/flux-dev-anime-cg
16 |
17 | ## Environment Requirements: Python 3.10 and Redis
18 |
19 | ## Features
20 | - A Simple Gradio GUI, You can change your model here(Running on FastAPI port + 1)
21 | 
22 | - Multi-backend load balancing
23 | 
24 | - Automatic locking for backends that do not support concurrency
25 | - Built-in caption feature(https://github.com/toriato/stable-diffusion-webui-wd14-tagger), currently supporting natural language tagging (CUDA/8G VRAM) and wd tagger
26 | 
27 |
28 | ## Supported Backends
29 | - https://github.com/AUTOMATIC1111/stable-diffusion-webui
30 | - https://civitai.com/
31 | - https://fal.ai/models/fal-ai/flux/schnell
32 | - https://replicate.com/black-forest-labs/flux-schnell
33 | - https://www.liblib.art/
34 | - https://tusiart.com/
35 | - https://www.seaart.ai/
36 | - https://www.yunjie.art/
37 | - https://github.com/comfyanonymous/ComfyUI
38 | - https://novelai.net/image
39 | - https://github.com/novicezk/midjourney-proxy
40 |
41 | ### Deployment Guide (For Windows CMD)
42 | ### You need to deploy a Redis server on the server! Please install it yourself.
43 |
44 | Python 3.10
45 | ```
46 | git clone https://github.com/DiaoDaiaChan/Stable-Diffusion-DrawBridgeAPI
47 | cd Stable-Diffusion-DrawBridgeAPI
48 | ```
49 |
50 | #### Install Dependencies
51 | ```
52 | python -m venv venv
53 | .\venv\Scripts\python -m pip install -r .\requirements.txt
54 | ```
55 |
56 | #### Modify Configuration File
57 | Copy `config_example.yaml` to `config.yaml`.
58 | [See Detailed Instructions](DrawBridgeAPI/config_example.yaml)
59 | #### How to select enable backend?
60 | 
61 | #### Start
62 | 
63 | ```
64 | .\venv\Scripts\python.exe -m DrawBridgeAPI.api_server --port=8000 --host=127.0.0.1 --conf .\DrawBridgeAPI\config.yaml
65 | ```
66 |
67 | #### Access
68 | Visit http://localhost:8000/docs# for help.
69 |
70 | #### Warning
71 | The API currently has no authentication; do not expose this API to the public, or it may be abused.
72 |
73 | Testing with CURL
74 | ```
75 | curl -X POST -H "Content-Type: application/json" -d '{"prompt": "reimu", "width": 512, "height": 768}' http://localhost:8000/sdapi/v1/txt2img
76 | ```
77 | ..\venv\Scripts\python api_server.py --port=8000 --host=127.0.0.1
78 |
79 | #### Optional Service
80 | Start the built-in tagging server.
81 | Set `server_settings - build_in_tagger` to true in the `config.yaml` file to start it, and install the dependencies.
82 |
83 | If the working directory is `Stable-Diffusion-DrawBridgeAPI\DrawBridgeAPI`:
84 | ```
85 | ..\venv\Scripts\python -m pip install -r utils/tagger-requirements.txt
86 | ```
87 | ## TODO
88 | - A simple frontend for easy drawing, tagging, and other operations.
89 | ## Changelog
90 |
91 | ### 2024-08-28
92 | Updated to use LLM for natural language image tagging, adapted from https://github.com/StartHua/Comfyui_CXH_joy_caption.
93 | Note: Requires GPU and 8G VRAM.
94 | 
95 | ```angular2html
96 | build_in_tagger:
97 | true
98 | llm_caption: # Use LLM for natural language tagging
99 | enable:
100 | true
101 | clip:
102 | google/siglip-so400m-patch14-384
103 | llm:
104 | unsloth/Meta-Llama-3.1-8B-bnb-4bit
105 | image_adapter: # https://huggingface.co/spaces/fancyfeast/joy-caption-pre-alpha/tree/main/wpkklhc6
106 | image_adapter.pt
107 | ```
108 | Dependencies are in llm_caption_requirements.txt
--------------------------------------------------------------------------------
/README_ZH.md:
--------------------------------------------------------------------------------
1 | # 调用各种在线AI绘图网站的API以及一个简易GUI
2 | 本项目计划进行i18n
3 | [[English]](README.md)
4 |
5 | ## 兼容A1111webui API的API
6 | https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API
7 | ## 
8 | checkpoint by/模型 https://huggingface.co/nyanko7/flux-dev-anime-cg
9 | ## 环境要求
10 | Python 3.10 和 Redis
11 |
12 | ## 特性
13 | - Gradio 简易UI(端口号为FastAPI端口号+1),可以更换模型
14 | 
15 | - 多后端负载均衡
16 | 
17 | - 不支持并发的后端自动上锁
18 | - 自带打标功能,现在支持自然语言打标(CUDA/8G VRAM)以及 wd tagger
19 | 
20 |
21 | ## 已经适配的后端
22 | - https://github.com/AUTOMATIC1111/stable-diffusion-webui
23 | - https://civitai.com/
24 | - https://fal.ai/models/fal-ai/flux/schnell
25 | - https://replicate.com/black-forest-labs/flux-schnell
26 | - https://www.liblib.art/
27 | - https://tusiart.com/
28 | - https://www.seaart.ai/
29 | - https://www.yunjie.art/
30 | - https://github.com/comfyanonymous/ComfyUI
31 |
32 | ## QQ群 575601916
33 |
34 | ### 部署教程(以下为Windows CMD)
35 | ### 需要在服务器上部署Redis服务器!请自行安装
36 |
37 | Python 3.10
38 | ```
39 | git clone https://github.com/DiaoDaiaChan/Stable-Diffusion-DrawBridgeAPI
40 | cd Stable-Diffusion-DrawBridgeAPI
41 | ```
42 |
43 | #### 安装依赖
44 | ```
45 | python -m venv venv
46 | .\venv\Scripts\python -m pip install -r .\requirements.txt
47 | ```
48 |
49 | #### 更改配置文件
50 | 复制 `config_example.yaml` 为 `config.yaml`
51 | [查看详细说明](DrawBridgeAPI/config_example.yaml)
52 |
53 | #### 启动
54 | ```
55 | .\venv\Scripts\python.exe -m DrawBridgeAPI.api_server --port=8000 --host=127.0.0.1 --conf .\DrawBridgeAPI\config.yaml
56 | ```
57 |
58 | #### 访问
59 | 访问 http://localhost:8000/docs# 获取帮助
60 |
61 | #### 注意
62 | 目前API没有鉴权,请勿将此API暴露在公网,否则可能会被滥用。
63 |
64 | 使用CURL测试
65 | ```
66 | curl -X POST -H "Content-Type: application/json" -d '{"prompt": "reimu", "width": 512, "height": 768}' http://localhost:8000/sdapi/v1/txt2img
67 | ```
68 |
69 | #### 可选服务
70 | 启动服务器自带打标服务器。
71 | 修改 `config.yaml` 文件 `server_settings - build_in_tagger` 为 `true` 启动,安装依赖。
72 |
73 | 假如工作路径为 `Stable-Diffusion-DrawBridgeAPI\DrawBridgeAPI`:
74 | ```
75 | ..\venv\Scripts\python -m pip install -r utils/tagger-requirements.txt
76 | ```
77 |
78 | ### TODO
79 | 简单的前端,方便进行画图、打标等操作。
80 |
81 | ### 更新日志
82 | ### 2024-08-28
83 | 更新了使用LLM对图片进行自然语言打标,改自 https://github.com/StartHua/Comfyui_CXH_joy_caption
84 | 注意:需要GPU以及8G显存
85 | 
86 | ```yaml
87 | build_in_tagger:
88 | false
89 | llm_caption: # 使用LLM用自然语言打标
90 | enable:
91 | true
92 | clip:
93 | google/siglip-so400m-patch14-384
94 | llm:
95 | unsloth/Meta-Llama-3.1-8B-bnb-4bit
96 | image_adapter: # https://huggingface.co/spaces/fancyfeast/joy-caption-pre-alpha/tree/main/wpkklhc6
97 | image_adapter.pt
98 | ```
99 | 依赖文件在 `llm_caption_requirements.txt`
100 |
--------------------------------------------------------------------------------
/docs/API.md:
--------------------------------------------------------------------------------
1 | # API 特性
2 |
--------------------------------------------------------------------------------
/images/caption.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiaoDaiaChan/Stable-Diffusion-DrawBridgeAPI/81f059531ee62d49fed089f80bfd77f8ecdf05ed/images/caption.png
--------------------------------------------------------------------------------
/images/cover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiaoDaiaChan/Stable-Diffusion-DrawBridgeAPI/81f059531ee62d49fed089f80bfd77f8ecdf05ed/images/cover.png
--------------------------------------------------------------------------------
/images/gradio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiaoDaiaChan/Stable-Diffusion-DrawBridgeAPI/81f059531ee62d49fed089f80bfd77f8ecdf05ed/images/gradio.png
--------------------------------------------------------------------------------
/images/how_to_enable_backend1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiaoDaiaChan/Stable-Diffusion-DrawBridgeAPI/81f059531ee62d49fed089f80bfd77f8ecdf05ed/images/how_to_enable_backend1.png
--------------------------------------------------------------------------------
/images/how_to_enable_backend2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiaoDaiaChan/Stable-Diffusion-DrawBridgeAPI/81f059531ee62d49fed089f80bfd77f8ecdf05ed/images/how_to_enable_backend2.png
--------------------------------------------------------------------------------
/images/idle.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiaoDaiaChan/Stable-Diffusion-DrawBridgeAPI/81f059531ee62d49fed089f80bfd77f8ecdf05ed/images/idle.png
--------------------------------------------------------------------------------
/images/posting.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiaoDaiaChan/Stable-Diffusion-DrawBridgeAPI/81f059531ee62d49fed089f80bfd77f8ecdf05ed/images/posting.png
--------------------------------------------------------------------------------
/images/working.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DiaoDaiaChan/Stable-Diffusion-DrawBridgeAPI/81f059531ee62d49fed089f80bfd77f8ecdf05ed/images/working.png
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "stable-diffusion-drawbridge-api"
3 | version = "1.4"
4 | description = "使用标准A1111-SDWEBUI API来调用各种绘画后端的多功能负载均衡API"
5 | authors = [
6 | {name = "DiaoDaiaChan", email = "diaodaiachan@qq.com"},
7 | ]
8 | dependencies = [
9 | "aiohttp",
10 | "aiofiles",
11 | "Pillow>=10.4.0",
12 | "civitai-py",
13 | "civitai",
14 | "colorama",
15 | "fastapi",
16 | "fal-client",
17 | "httpx==0.27.0",
18 | "piexif",
19 | "pydantic",
20 | "PyYAML",
21 | "redis",
22 | "replicate",
23 | "tqdm",
24 | "uvicorn",
25 | "gradio"
26 | ]
27 |
28 | [tool.pdm]
29 | exclude = ["./DrawBridgeAPI/config.yaml", "./DrawBridgeAPI/*.log", "./DrawBridgeAPI/saved_images"]
30 |
31 | requires-python = ">=3.10"
32 | readme = "README.md"
33 | license = {text = "MIT"}
34 |
35 | [build-system]
36 | requires = ["pdm-backend"]
37 | build-backend = "pdm.backend"
38 |
39 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | aiofiles
2 | aiohttp
3 | Pillow
4 | civitai-py
5 | civitai
6 | colorama
7 | fastapi
8 | fal-client
9 | httpx
10 | httpx[http2]
11 | piexif
12 | pydantic
13 | PyYAML
14 | redis
15 | replicate
16 |
17 | tqdm
18 | uvicorn
19 |
20 | gradio>=4.0.0
--------------------------------------------------------------------------------