├── .gitignore
├── LICENSE
├── README.md
├── dev-requirements.txt
├── main.py
├── ml.py
├── requirements.txt
└── token.txt
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 FourthBrain
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
6 |
7 |
8 |
9 |
10 | # FastAPI for Stable Diffusion LLMs Demo
11 |
12 | This repository contains the files to build your very own AI image generation web application! Outlined are the core components of the FastAPI web framework, and application leverage the newly-released Stable Diffusion text-to-image deep learning model.
13 |
14 | 📺 You can checkout the full video [here](https://www.youtube.com/watch?v=_BZGtifh_gw)!
15 |
16 | 
17 |
18 | 
19 |
--------------------------------------------------------------------------------
/dev-requirements.txt:
--------------------------------------------------------------------------------
1 | mypy
2 | ruff
3 | black
4 | ipykernel
5 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import io
2 |
3 | from fastapi import FastAPI
4 | from fastapi.responses import FileResponse, StreamingResponse
5 | from pydantic import BaseModel
6 |
7 | from ml import obtain_image
8 |
9 | app = FastAPI()
10 |
11 |
12 | @app.get("/")
13 | def read_root():
14 | return {"Hello": "World"}
15 |
16 |
17 | @app.get("/items/{item_id}")
18 | def read_item(item_id: int):
19 | return {"item_id": item_id}
20 |
21 |
22 | class Item(BaseModel):
23 | name: str
24 | price: float
25 | tags: list[str] = []
26 |
27 |
28 | @app.post("/items/")
29 | def create_item(item: Item):
30 | return item
31 |
32 |
33 | @app.get("/generate")
34 | def generate_image(
35 | prompt: str,
36 | *,
37 | seed: int | None = None,
38 | num_inference_steps: int = 50,
39 | guidance_scale: float = 7.5
40 | ):
41 | image = obtain_image(
42 | prompt,
43 | num_inference_steps=num_inference_steps,
44 | seed=seed,
45 | guidance_scale=guidance_scale,
46 | )
47 | image.save("image.png")
48 | return FileResponse("image.png")
49 |
50 |
51 | @app.get("/generate-memory")
52 | def generate_image_memory(
53 | prompt: str,
54 | *,
55 | seed: int | None = None,
56 | num_inference_steps: int = 50,
57 | guidance_scale: float = 7.5
58 | ):
59 | image = obtain_image(
60 | prompt,
61 | num_inference_steps=num_inference_steps,
62 | seed=seed,
63 | guidance_scale=guidance_scale,
64 | )
65 | memory_stream = io.BytesIO()
66 | image.save(memory_stream, format="PNG")
67 | memory_stream.seek(0)
68 | return StreamingResponse(memory_stream, media_type="image/png")
69 |
--------------------------------------------------------------------------------
/ml.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import torch
4 | from diffusers import StableDiffusionPipeline
5 | from PIL.Image import Image
6 |
7 | token_path = Path("token.txt")
8 | token = token_path.read_text().strip()
9 |
10 | # get your token at https://huggingface.co/settings/tokens
11 | pipe = StableDiffusionPipeline.from_pretrained(
12 | "CompVis/stable-diffusion-v1-4",
13 | revision="fp16",
14 | torch_dtype=torch.float16,
15 | use_auth_token=token,
16 | )
17 |
18 | pipe.to("cuda")
19 |
20 | # prompt = "a photograph of an astronaut riding a horse"
21 |
22 |
23 | # image = pipe(prompt)["sample"][0]
24 |
25 |
26 | def obtain_image(
27 | prompt: str,
28 | *,
29 | seed: int | None = None,
30 | num_inference_steps: int = 50,
31 | guidance_scale: float = 7.5,
32 | ) -> Image:
33 | generator = None if seed is None else torch.Generator("cuda").manual_seed(seed)
34 | print(f"Using device: {pipe.device}")
35 | image: Image = pipe(
36 | prompt,
37 | guidance_scale=guidance_scale,
38 | num_inference_steps=num_inference_steps,
39 | generator=generator,
40 | ).images[0]
41 | return image
42 |
43 |
44 | # image = obtain_image(prompt, num_inference_steps=5, seed=1024)
45 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | fastapi[all]
2 | diffusers==0.4.0
3 | transformers
4 | scipy
5 | ftfy
6 |
--------------------------------------------------------------------------------
/token.txt:
--------------------------------------------------------------------------------
1 | hf_fill-this-in-with-your-token
--------------------------------------------------------------------------------