├── .gitignore ├── Dockerfile ├── README.md ├── app.py ├── models └── README.md └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | *.onnx 3 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:11.6.0-cudnn8-runtime-ubuntu20.04 2 | 3 | # Install Python 3.10 4 | RUN apt-get update && \ 5 | apt-get install -y software-properties-common && \ 6 | add-apt-repository ppa:deadsnakes/ppa && \ 7 | apt-get update && \ 8 | apt-get install -y python3.10 python3.10-dev python3.10-distutils curl && \ 9 | apt-get clean && \ 10 | rm -rf /var/lib/apt/lists/* 11 | 12 | # Install pip 13 | RUN curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && \ 14 | python3.10 get-pip.py && \ 15 | rm get-pip.py 16 | 17 | # Install Python packages 18 | WORKDIR /app 19 | COPY requirements.txt /app/ 20 | 21 | RUN pip install --no-cache-dir -r requirements.txt 22 | EXPOSE 6873 23 | COPY app.py models/ /app/ 24 | 25 | CMD python3.10 app.py 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenUTAU remote ONNX hosting 2 | 3 | This repository hosts the ONNX models for DiffSinger OpenUTAU. 4 | You should put *.onnx files in the `models` directory, after you start the server, you will see their md5 hash in the console. 5 | 6 | ## Usage 7 | Set your `acoustic` in `dsconfig.yaml` to `http://${ADDRESS}/v1/${md5}/inference`. 8 | 9 | To start the server, run `python3 app.py` in the root directory of this repository. 10 | There is also a docker image for this server, but it is not tested yet. 11 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, request 2 | from pathlib import Path 3 | from hashlib import md5 4 | import onnxruntime as ort 5 | from loguru import logger 6 | import numpy as np 7 | 8 | app = Flask(__name__) 9 | all_models = {} 10 | 11 | for model_path in Path('models').glob('*.onnx'): 12 | name = md5(model_path.read_bytes()).hexdigest() 13 | all_models[name] = ort.InferenceSession(str(model_path), providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) 14 | 15 | logger.info(f'Loaded model {model_path} as {name}') 16 | 17 | @app.route('/v1//inference', methods=['POST']) 18 | def inference(model_name): 19 | if model_name not in all_models: 20 | return dict(error=f'Model {model_name} not found'), 404 21 | 22 | model = all_models[model_name] 23 | payload = request.get_json(silent=True) 24 | if payload is None: 25 | return dict(error='Invalid payload'), 400 26 | 27 | inputs = {} 28 | for model_input in model.get_inputs(): 29 | name = model_input.name 30 | 31 | if name not in payload: 32 | return dict(error=f'Input {name} not found'), 400 33 | 34 | input = payload[name] 35 | if 'data' not in input or 'shape' not in input: 36 | return dict(error=f'Input {name} is invalid'), 400 37 | 38 | np_type = None 39 | if model_input.type == 'tensor(float)': 40 | np_type = np.float32 41 | elif model_input.type == 'tensor(int64)': 42 | np_type = np.int64 43 | else: 44 | return dict(error=f'Input {name} has unsupported type {model_input.type}'), 400 45 | 46 | data = np.array( 47 | input['data'], 48 | dtype=np_type 49 | ).reshape(input['shape']) 50 | 51 | inputs[name] = data 52 | 53 | outputs = model.run(None, inputs) 54 | 55 | return dict( 56 | type="float32", 57 | shape=outputs[0].shape, 58 | data=outputs[0].flatten().tolist() 59 | ) 60 | 61 | 62 | if __name__ == '__main__': 63 | app.run(host='0.0.0.0', port=6873, debug=True, threaded=True) 64 | -------------------------------------------------------------------------------- /models/README.md: -------------------------------------------------------------------------------- 1 | PUT YOUR MODELS HERE 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | flask 2 | onnxruntime-gpu 3 | loguru 4 | --------------------------------------------------------------------------------