├── .gitignore ├── Dockerfile ├── LICENSE.txt ├── README.md ├── build.sh ├── handler.py └── test_input.json /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | permutations 131 | tests/output 132 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG FROM_IMAGE="gadicc/diffusers-api" 2 | FROM ${FROM_IMAGE} as base 3 | ENV FROM_IMAGE=${FROM_IMAGE} 4 | 5 | ARG MODEL_ID="stabilityai/stable-diffusion-2-1-base" 6 | ENV MODEL_ID=${MODEL_ID} 7 | ARG HF_MODEL_ID="" 8 | ENV HF_MODEL_ID=${HF_MODEL_ID} 9 | ARG MODEL_PRECISION="fp16" 10 | ENV MODEL_PRECISION=${MODEL_PRECISION} 11 | ARG MODEL_REVISION="fp16" 12 | ENV MODEL_REVISION=${MODEL_REVISION} 13 | #ARG MODEL_URL="s3://" 14 | ARG MODEL_URL="" 15 | ENV MODEL_URL=${MODEL_URL} 16 | 17 | ARG PIPELINE="ALL" 18 | ENV PIPELINE=${PIPELINE} 19 | 20 | ARG AWS_ACCESS_KEY_ID 21 | ARG AWS_SECRET_ACCESS_KEY 22 | ARG AWS_DEFAULT_REGION 23 | ARG AWS_S3_ENDPOINT_URL 24 | ARG AWS_S3_DEFAULT_BUCKET 25 | 26 | ARG CHECKPOINT_URL="" 27 | ENV CHECKPOINT_URL=${CHECKPOINT_URL} 28 | ARG CHECKPOINT_CONFIG_URL="" 29 | ENV CHECKPOINT_CONFIG_URL=${CHECKPOINT_CONFIG_URL} 30 | #ARG RUNTIME_DOWNLOADS=1 31 | #ENV RUNTIME_DOWNLOADS=${RUNTIME_DOWNLOADS} 32 | ENV RUNTIME_DOWNLOADS=0 33 | 34 | ARG http_proxy 35 | ARG https_proxy 36 | ARG REQUESTS_CA_BUNDLE=${http_proxy:+/usr/local/share/ca-certificates/squid-self-signed.crt} 37 | 38 | RUN python3 download.py 39 | RUN pip install runpod==0.9.10 nest_asyncio 40 | 41 | ARG SAFETENSORS_FAST_GPU=1 42 | ENV SAFETENSORS_FAST_GPU=${SAFETENSORS_FAST_GPU} 43 | 44 | ADD handler.py . 45 | 46 | # Uncomment this to perform a basic local test on run (then exit) 47 | # ADD test_input.json . 48 | 49 | # CMD [ "python", "-u", "handler.py" ] # <-- doesn't honor SHELL 50 | CMD python -u handler.py # <-- does honor SHELL 51 | 52 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 by Gadi Cohen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # docker-diffusers-api-runpod 2 | 3 | Copyright (c) 2022 by Gadi Cohen MIT Licensed. 4 | 5 | ## Quick Start 6 | 7 | 1. Clone the repo 8 | 9 | 2. Set any necessary environment variables and then build with e.g. 10 | 11 | ```bash 12 | ./build.sh -t user/runpod:sd-v2-1-512 \ 13 | --build-arg MODEL_ID="stabilityai/stable-diffusion-2-1-base" 14 | ``` 15 | 16 | (or just use `docker build` as you see fit). 17 | 18 | 3. Upload to your repository of choice, e.g. 19 | 20 | ```bash 21 | $ docker push user/runpod:sd-v2-1-512 22 | ``` 23 | 24 | See https://forums.kiri.art/t/running-on-runpod-io/102?u=gadicc 25 | for deployment instructions. 26 | 27 | # More full examples 28 | 29 | Download AnimeAnything v3, from `diffusers` branch / revision on HuggingFace, 30 | which is an `fp16` model, and is already prebuilt on our own S3-storage 31 | in the default location. 32 | 33 | ```bash 34 | $ ./build.sh -t user/runpod:sd-v2-1-512 \ 35 | --build-arg MODEL_ID="Linaqruf/anything-v3.0" \ 36 | --build-arg MODEL_PRECISION="fp16" \ 37 | --build-arg MODEL_REVISION="diffusers" \ 38 | --build-arg MODEL_URL="s3://" 39 | ``` -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ -z "$1" ]; then 4 | echo "build.sh usage:" 5 | echo "" 6 | echo 'Set any env variables (see script) and then e.g.:' 7 | echo "" 8 | echo './build.sh -t user/runpod:sd-v2-1-512 \' 9 | echo ' --build-arg MODEL_ID="stabilityai/stable-diffusion-2-1-base"' 10 | else 11 | DOCKER_BUILDKIT=1 BUILDKIT_PROGRESS=plain \ 12 | docker build \ 13 | --build-arg http_proxy="$http_proxy" \ 14 | --build-arg https_proxy="$https_proxy" \ 15 | --build-arg HF_AUTH_TOKEN="$HF_AUTH_TOKEN" \ 16 | --build-arg AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" \ 17 | --build-arg AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY" \ 18 | --build-arg AWS_DEFAULT_REGION="$AWS_DEFAULT_REGION" \ 19 | --build-arg AWS_S3_ENDPOINT_URL="$AWS_S3_ENDPOINT_URL" \ 20 | --build-arg AWS_S3_DEFAULT_BUCKET="$AWS_S3_DEFAULT_BUCKET" \ 21 | "$@" . 22 | fi 23 | -------------------------------------------------------------------------------- /handler.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import runpod 4 | import app as user_src 5 | import traceback 6 | import json 7 | import asyncio 8 | import nest_asyncio 9 | 10 | nest_asyncio.apply() 11 | 12 | ## Load models into VRAM here so they can be warm between requests 13 | user_src.init() 14 | 15 | 16 | def handler(job): 17 | """ 18 | This is the handler function that will be called by the serverless. 19 | """ 20 | print(type(job)) 21 | print(job) 22 | 23 | # do the things 24 | inputs = job["input"] 25 | print(type(inputs)) 26 | print(inputs) 27 | input = json.loads(json.dumps(inputs)) 28 | print(type(inputs)) 29 | print(inputs) 30 | 31 | loop = asyncio.get_event_loop() 32 | 33 | try: 34 | output = loop.run_until_complete(user_src.inference(input, None)) 35 | except Exception as err: 36 | output = { 37 | "$error": { 38 | "code": "APP_HANDLER_ERROR", 39 | "name": type(err).__name__, 40 | "message": str(err), 41 | "stack": traceback.format_exc(), 42 | } 43 | } 44 | 45 | return output 46 | 47 | # return the output that you want to be returned like pre-signed URLs to output artifacts 48 | # return "Hello World" 49 | 50 | 51 | runpod.serverless.start({"handler": handler}) 52 | -------------------------------------------------------------------------------- /test_input.json: -------------------------------------------------------------------------------- 1 | { 2 | "input": { 3 | "modelInputs": { 4 | "prompt": "realistic field of grass", 5 | "num_inference_steps": 20 6 | }, 7 | "callInputs": {} 8 | } 9 | } 10 | --------------------------------------------------------------------------------