├── .github ├── ISSUE_TEMPLATE │ └── feature_request.md ├── create-api-issue.md └── workflows │ ├── python-api-adapter-transformers.yaml │ ├── python-api-allennlp.yaml │ ├── python-api-bertopic-cd.yaml │ ├── python-api-bertopic.yaml │ ├── python-api-diffusers-cd.yaml │ ├── python-api-diffusers.yaml │ ├── python-api-doctr.yaml │ ├── python-api-espnet.yaml │ ├── python-api-export-tasks.yaml │ ├── python-api-fairseq-cd.yaml │ ├── python-api-fairseq.yaml │ ├── python-api-fastai.yaml │ ├── python-api-fasttext-cd.yaml │ ├── python-api-fasttext.yaml │ ├── python-api-flair-cd.yaml │ ├── python-api-flair.yaml │ ├── python-api-k2-cd.yaml │ ├── python-api-k2.yaml │ ├── python-api-latent-image-cd.yaml │ ├── python-api-latent-image.yaml │ ├── python-api-mindspore-cd.yaml │ ├── python-api-mindspore.yaml │ ├── python-api-nemo-cd.yaml │ ├── python-api-nemo.yaml │ ├── python-api-open-clip-cd.yaml │ ├── python-api-open-clip.yaml │ ├── python-api-paddlenlp-cd.yaml │ ├── python-api-paddlenlp.yaml │ ├── python-api-peft-cd.yaml │ ├── python-api-peft.yaml │ ├── python-api-pyannote.yaml │ ├── python-api-quality.yaml │ ├── python-api-sentence-transformers-cd.yaml │ ├── python-api-sentence-transformers.yaml │ ├── python-api-setfit-cd.yaml │ ├── python-api-setfit.yaml │ ├── python-api-sklearn-cd.yaml │ ├── python-api-sklearn.yaml │ ├── python-api-spacy.yaml │ ├── python-api-span_marker-cd.yaml │ ├── python-api-span_marker.yaml │ ├── python-api-speechbrain-cd.yaml │ ├── python-api-speechbrain.yaml │ ├── python-api-stanza.yaml │ ├── python-api-tests.yaml │ ├── python-api-timm-cd.yaml │ └── python-api-timm.yaml ├── .gitignore ├── .pre-commit-config.yaml ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── api_inference_community ├── hub.py ├── normalizers.py ├── routes.py └── validation.py ├── build.sh ├── build_docker.py ├── docker_images ├── adapter_transformers │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── batch.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── question_answering.py │ │ │ ├── summarization.py │ │ │ ├── text_classification.py │ │ │ ├── text_generation.py │ │ │ └── token_classification.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── samples │ │ ├── malformed.flac │ │ ├── plane.jpg │ │ ├── plane2.jpg │ │ ├── sample1.flac │ │ ├── sample1.webm │ │ └── sample1_dual.ogg │ │ ├── test_api.py │ │ ├── test_api_question_answering.py │ │ ├── test_api_summarization.py │ │ ├── test_api_text_classification.py │ │ ├── test_api_text_generation.py │ │ ├── test_api_token_classification.py │ │ └── test_docker_build.py ├── allennlp │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ └── question_answering.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── samples │ │ ├── malformed.flac │ │ ├── plane.jpg │ │ ├── plane2.jpg │ │ ├── sample1.flac │ │ ├── sample1.webm │ │ └── sample1_dual.ogg │ │ ├── test_api.py │ │ ├── test_api_question_answering.py │ │ └── test_docker_build.py ├── asteroid │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── audio_source_separation.py │ │ │ ├── audio_to_audio.py │ │ │ └── base.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── samples │ │ ├── malformed.flac │ │ ├── plane.jpg │ │ ├── plane2.jpg │ │ ├── sample1.flac │ │ ├── sample1.webm │ │ └── sample1_dual.ogg │ │ ├── test_api.py │ │ ├── test_api_audio_source_separation.py │ │ ├── test_api_audio_to_audio.py │ │ └── test_docker_build.py ├── bertopic │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ └── text_classification.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── test_api.py │ │ ├── test_api_text_classification.py │ │ └── test_docker_build.py ├── common │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── audio_classification.py │ │ │ ├── audio_to_audio.py │ │ │ ├── automatic_speech_recognition.py │ │ │ ├── base.py │ │ │ ├── conversational.py │ │ │ ├── feature_extraction.py │ │ │ ├── fill_mask.py │ │ │ ├── image_classification.py │ │ │ ├── image_to_image.py │ │ │ ├── question_answering.py │ │ │ ├── sentence_similarity.py │ │ │ ├── speech_segmentation.py │ │ │ ├── summarization.py │ │ │ ├── tabular_classification_pipeline.py │ │ │ ├── tabular_regression_pipeline.py │ │ │ ├── text2text_generation.py │ │ │ ├── text_classification.py │ │ │ ├── text_to_image.py │ │ │ ├── text_to_speech.py │ │ │ └── token_classification.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── samples │ │ ├── malformed.flac │ │ ├── plane.jpg │ │ ├── plane2.jpg │ │ ├── sample1.flac │ │ ├── sample1.webm │ │ └── sample1_dual.ogg │ │ ├── test_api.py │ │ ├── test_api_audio_classification.py │ │ ├── test_api_audio_to_audio.py │ │ ├── test_api_automatic_speech_recognition.py │ │ ├── test_api_feature_extraction.py │ │ ├── test_api_image_classification.py │ │ ├── test_api_image_to_image.py │ │ ├── test_api_question_answering.py │ │ ├── test_api_sentence_similarity.py │ │ ├── test_api_speech_segmentation.py │ │ ├── test_api_summarization.py │ │ ├── test_api_tabular_classification.py │ │ ├── test_api_tabular_regression.py │ │ ├── test_api_text2text_generation.py │ │ ├── test_api_text_classification.py │ │ ├── test_api_text_to_image.py │ │ ├── test_api_text_to_speech.py │ │ ├── test_api_token_classification.py │ │ └── test_docker_build.py ├── diffusers │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── healthchecks.py │ │ ├── idle.py │ │ ├── lora.py │ │ ├── main.py │ │ ├── offline.py │ │ ├── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── image_to_image.py │ │ │ └── text_to_image.py │ │ ├── timing.py │ │ └── validation.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── test_api.py │ │ ├── test_api_image_to_image.py │ │ ├── test_api_text_to_image.py │ │ └── test_docker_build.py ├── doctr │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ └── object_detection.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── samples │ │ └── artefacts.jpg │ │ ├── test_api.py │ │ ├── test_api_object_detection.py │ │ └── test_docker_build.py ├── espnet │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── automatic_speech_recognition.py │ │ │ ├── base.py │ │ │ └── text_to_speech.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── samples │ │ ├── malformed.flac │ │ ├── plane.jpg │ │ ├── plane2.jpg │ │ ├── sample1.flac │ │ ├── sample1.webm │ │ └── sample1_dual.ogg │ │ ├── test_api.py │ │ ├── test_api_automatic_speech_recognition.py │ │ ├── test_api_text_to_speech.py │ │ └── test_docker_build.py ├── fairseq │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── audio_to_audio.py │ │ │ ├── base.py │ │ │ ├── text_to_speech.py │ │ │ └── utils.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── samples │ │ ├── malformed.flac │ │ ├── plane.jpg │ │ ├── plane2.jpg │ │ ├── sample1.flac │ │ ├── sample1.webm │ │ ├── sample1_dual.ogg │ │ └── sample2.flac │ │ ├── test_api.py │ │ ├── test_api_audio_to_audio.py │ │ ├── test_api_text_to_speech.py │ │ └── test_docker_build.py ├── fastai │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ └── image_classification.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── samples │ │ ├── plane.jpg │ │ └── plane2.jpg │ │ ├── test_api.py │ │ ├── test_api_image_classification.py │ │ └── test_docker_build.py ├── fasttext │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── feature_extraction.py │ │ │ └── text_classification.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── samples │ │ ├── malformed.flac │ │ ├── plane.jpg │ │ ├── plane2.jpg │ │ ├── sample1.flac │ │ ├── sample1.webm │ │ └── sample1_dual.ogg │ │ ├── test_api.py │ │ ├── test_api_feature_extraction.py │ │ ├── test_api_text_classification.py │ │ └── test_docker_build.py ├── flair │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ └── token_classification.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── samples │ │ ├── malformed.flac │ │ ├── plane.jpg │ │ ├── plane2.jpg │ │ ├── sample1.flac │ │ ├── sample1.webm │ │ └── sample1_dual.ogg │ │ ├── test_api.py │ │ ├── test_api_token_classification.py │ │ └── test_docker_build.py ├── k2 │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── common.py │ │ ├── decode.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── automatic_speech_recognition.py │ │ │ └── base.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── samples │ │ ├── malformed.flac │ │ ├── sample1.flac │ │ ├── sample1.webm │ │ └── sample1_dual.ogg │ │ ├── test_api.py │ │ ├── test_api_automatic_speech_recognition.py │ │ └── test_docker_build.py ├── latent-to-image │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── healthchecks.py │ │ ├── idle.py │ │ ├── main.py │ │ ├── offline.py │ │ ├── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ └── latent_to_image.py │ │ ├── timing.py │ │ └── validation.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── test_api.py │ │ ├── test_api_latent_to_image.py │ │ └── test_docker_build.py ├── mindspore │ ├── Dockerfile │ ├── app │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ └── image_classification.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── samples │ │ ├── 0.jpg │ │ └── 5.jpg │ │ ├── test_api.py │ │ ├── test_api_image_classification.py │ │ └── test_docker_build.py ├── nemo │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── automatic_speech_recognition.py │ │ │ └── base.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── samples │ │ ├── malformed.flac │ │ ├── plane.jpg │ │ ├── plane2.jpg │ │ ├── sample1.flac │ │ ├── sample1.webm │ │ └── sample1_dual.ogg │ │ ├── test_api.py │ │ ├── test_api_automatic_speech_recognition.py │ │ └── test_docker_build.py ├── open_clip │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ └── zero_shot_image_classification.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── samples │ │ ├── plane.jpg │ │ └── plane2.jpg │ │ ├── test_api.py │ │ ├── test_api_zero_shot_image_classification.py │ │ └── test_docker_build.py ├── paddlenlp │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── conversational.py │ │ │ ├── fill_mask.py │ │ │ ├── summarization.py │ │ │ └── zero_shot_classification.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── test_api.py │ │ ├── test_api_conversational.py │ │ ├── test_api_fill_mask.py │ │ ├── test_api_summarization.py │ │ ├── test_api_zero_shot_classification.py │ │ └── test_docker_build.py ├── peft │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── idle.py │ │ ├── main.py │ │ ├── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ └── text_generation.py │ │ └── timing.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── test_api.py │ │ └── test_api_text_generation.py ├── pyannote_audio │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── automatic_speech_recognition.py │ │ │ └── base.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── samples │ │ ├── malformed.flac │ │ ├── plane.jpg │ │ ├── plane2.jpg │ │ ├── sample1.flac │ │ ├── sample1.webm │ │ └── sample1_dual.ogg │ │ ├── test_api.py │ │ ├── test_api_automatic_speech_recognition.py │ │ └── test_docker_build.py ├── sentence_transformers │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── feature_extraction.py │ │ │ └── sentence_similarity.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── samples │ │ ├── malformed.flac │ │ ├── plane.jpg │ │ ├── plane2.jpg │ │ ├── sample1.flac │ │ ├── sample1.webm │ │ └── sample1_dual.ogg │ │ ├── test_api.py │ │ ├── test_api_feature_extraction.py │ │ ├── test_api_sentence_similarity.py │ │ └── test_docker_build.py ├── setfit │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ └── text_classification.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── test_api.py │ │ ├── test_api_text_classification.py │ │ └── test_docker_build.py ├── sklearn │ ├── Dockerfile │ ├── README.md │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── common.py │ │ │ ├── tabular_classification.py │ │ │ ├── tabular_regression.py │ │ │ └── text_classification.py │ ├── prestart.sh │ ├── requirements.txt │ ├── run_app.sh │ └── tests │ │ ├── __init__.py │ │ ├── generators │ │ ├── generate.py │ │ ├── run.sh │ │ ├── samples │ │ │ ├── iris-1.0-input.json │ │ │ ├── iris-hist_gradient_boosting-1.0-output.json │ │ │ ├── iris-hist_gradient_boosting-latest-output.json │ │ │ ├── iris-latest-input.json │ │ │ ├── iris-logistic_regression-1.0-output.json │ │ │ ├── iris-logistic_regression-latest-output.json │ │ │ ├── tabularregression-1.0-input.json │ │ │ ├── tabularregression-hist_gradient_boosting_regressor-1.0-output.json │ │ │ ├── tabularregression-hist_gradient_boosting_regressor-latest-output.json │ │ │ ├── tabularregression-latest-input.json │ │ │ ├── tabularregression-linear_regression-1.0-output.json │ │ │ ├── tabularregression-linear_regression-latest-output.json │ │ │ ├── textclassification-1.0-input.json │ │ │ ├── textclassification-hist_gradient_boosting-1.0-output.json │ │ │ ├── textclassification-hist_gradient_boosting-latest-output.json │ │ │ ├── textclassification-latest-input.json │ │ │ ├── textclassification-logistic_regression-1.0-output.json │ │ │ └── textclassification-logistic_regression-latest-output.json │ │ ├── sklearn-1.0.yml │ │ └── sklearn-latest.yml │ │ ├── samples │ │ ├── malformed.flac │ │ ├── plane.jpg │ │ ├── plane2.jpg │ │ ├── sample1.flac │ │ ├── sample1.webm │ │ └── sample1_dual.ogg │ │ ├── test_api.py │ │ ├── test_api_tabular_classification.py │ │ ├── test_api_tabular_regression.py │ │ ├── test_api_text_classification.py │ │ └── test_docker_build.py ├── spacy │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── sentence_similarity.py │ │ │ ├── text_classification.py │ │ │ └── token_classification.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── samples │ │ ├── malformed.flac │ │ ├── plane.jpg │ │ ├── plane2.jpg │ │ ├── sample1.flac │ │ ├── sample1.webm │ │ └── sample1_dual.ogg │ │ ├── test_api.py │ │ ├── test_api_sentence_similarity.py │ │ ├── test_api_text_classification.py │ │ ├── test_api_token_classification.py │ │ └── test_docker_build.py ├── span_marker │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ └── token_classification.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── test_api.py │ │ ├── test_api_token_classification.py │ │ └── test_docker_build.py ├── speechbrain │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── common.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── audio_classification.py │ │ │ ├── audio_to_audio.py │ │ │ ├── automatic_speech_recognition.py │ │ │ ├── base.py │ │ │ ├── text2text_generation.py │ │ │ └── text_to_speech.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── samples │ │ ├── malformed.flac │ │ ├── plane.jpg │ │ ├── plane2.jpg │ │ ├── sample1.flac │ │ ├── sample1.webm │ │ └── sample1_dual.ogg │ │ ├── test_api.py │ │ ├── test_api_audio_classification.py │ │ ├── test_api_audio_to_audio.py │ │ ├── test_api_automatic_speech_recognition.py │ │ ├── test_api_text2text_generation.py │ │ ├── test_api_text_to_speech.py │ │ └── test_docker_build.py ├── stanza │ ├── Dockerfile │ ├── app │ │ ├── __init__.py │ │ ├── batch.py │ │ ├── main.py │ │ └── pipelines │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ └── token_classification.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ │ ├── __init__.py │ │ ├── samples │ │ ├── malformed.flac │ │ ├── plane.jpg │ │ ├── plane2.jpg │ │ ├── sample1.flac │ │ ├── sample1.webm │ │ └── sample1_dual.ogg │ │ ├── test_api.py │ │ ├── test_api_token_classification.py │ │ └── test_docker_build.py └── timm │ ├── Dockerfile │ ├── app │ ├── __init__.py │ ├── main.py │ └── pipelines │ │ ├── __init__.py │ │ ├── base.py │ │ └── image_classification.py │ ├── prestart.sh │ ├── requirements.txt │ └── tests │ ├── __init__.py │ ├── samples │ ├── malformed.flac │ ├── plane.jpg │ ├── plane2.jpg │ ├── sample1.flac │ ├── sample1.webm │ └── sample1_dual.ogg │ ├── test_api.py │ ├── test_api_image_classification.py │ └── test_docker_build.py ├── manage.py ├── requirements.txt ├── scripts └── export_tasks.py ├── setup.cfg ├── setup.py └── tests ├── samples ├── malformed.flac ├── plane.jpg ├── plane2.jpg ├── sample1.flac ├── sample1.webm └── sample1_dual.ogg ├── test_audio.py ├── test_dockers.py ├── test_hub.py ├── test_image.py ├── test_nlp.py ├── test_normalizers.py └── test_routes.py /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/create-api-issue.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: A new community image was pushed, need to update API 3 | assignees: Narsil 4 | labels: api-inference-community 5 | --- 6 | {{ payload.sender.login }} just pushed new code: 7 | 8 | https://github.com/huggingface/api-inference-community/commit/{{ env.GITHUB_SHA }} 9 | -------------------------------------------------------------------------------- /.github/workflows/python-api-adapter-transformers.yaml: -------------------------------------------------------------------------------- 1 | name: adapter-transformers-docker 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - "docker_images/adapter_transformers/**" 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Set up Python ${{ matrix.python-version }} 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: "3.8" 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@v1 19 | - name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v1 21 | - name: Install dependencies 22 | run: | 23 | pip install --upgrade pip 24 | pip install pytest pillow httpx 25 | pip install -e . 26 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_adapter_transformers 27 | -------------------------------------------------------------------------------- /.github/workflows/python-api-allennlp.yaml: -------------------------------------------------------------------------------- 1 | name: allennlp-docker 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - "docker_images/allennlp/**" 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Set up Python ${{ matrix.python-version }} 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: "3.8" 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@v1 19 | - name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v1 21 | - name: Install dependencies 22 | run: | 23 | pip install --upgrade pip 24 | pip install pytest pillow httpx 25 | pip install -e . 26 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_allennlp 27 | -------------------------------------------------------------------------------- /.github/workflows/python-api-bertopic.yaml: -------------------------------------------------------------------------------- 1 | name: bertopic-docker 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - "docker_images/bertopic/**" 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Set up Python ${{ matrix.python-version }} 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: "3.8" 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@v1 19 | - name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v1 21 | - name: Install dependencies 22 | run: | 23 | pip install --upgrade pip 24 | pip install pytest pillow httpx 25 | pip install -e . 26 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_bertopic -------------------------------------------------------------------------------- /.github/workflows/python-api-diffusers.yaml: -------------------------------------------------------------------------------- 1 | name: diffusers-docker 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - "docker_images/diffusers/**" 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Set up Python ${{ matrix.python-version }} 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: "3.8" 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@v1 19 | - name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v1 21 | - name: Install dependencies 22 | run: | 23 | pip install --upgrade pip 24 | pip install pytest pillow httpx 25 | pip install -e . 26 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_diffusers 27 | -------------------------------------------------------------------------------- /.github/workflows/python-api-doctr.yaml: -------------------------------------------------------------------------------- 1 | name: doctr-docker 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - "docker_images/doctr/**" 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Set up Python ${{ matrix.python-version }} 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: "3.8" 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@v1 19 | - name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v1 21 | - name: Install dependencies 22 | run: | 23 | pip install --upgrade pip 24 | pip install pytest pillow httpx 25 | pip install -e . 26 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_doctr 27 | -------------------------------------------------------------------------------- /.github/workflows/python-api-espnet.yaml: -------------------------------------------------------------------------------- 1 | # TODO: Merge this with allenNLP to have a single workflow for all docker images. 2 | name: espnet-docker 3 | 4 | on: 5 | pull_request: 6 | paths: 7 | - "docker_images/espnet/**" 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Set up Python ${{ matrix.python-version }} 13 | uses: actions/setup-python@v2 14 | with: 15 | python-version: "3.8" 16 | - name: Checkout 17 | uses: actions/checkout@v2 18 | - name: Set up QEMU 19 | uses: docker/setup-qemu-action@v1 20 | - name: Set up Docker Buildx 21 | uses: docker/setup-buildx-action@v1 22 | - name: Install dependencies 23 | run: | 24 | pip install --upgrade pip 25 | pip install pytest pillow httpx 26 | pip install -e . 27 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_espnet 28 | -------------------------------------------------------------------------------- /.github/workflows/python-api-export-tasks.yaml: -------------------------------------------------------------------------------- 1 | name: Export tasks 2 | on: 3 | push: 4 | branches: 5 | - main 6 | jobs: 7 | build: 8 | runs-on: ubuntu-latest 9 | strategy: 10 | matrix: 11 | python-version: ["3.8"] 12 | 13 | steps: 14 | - run: | 15 | sudo apt-get update 16 | 17 | - uses: actions/checkout@v2 18 | 19 | - name: Set up Python ${{ matrix.python-version }} 20 | uses: actions/setup-python@v2 21 | with: 22 | python-version: ${{ matrix.python-version }} 23 | 24 | - run: python scripts/export_tasks.py -------------------------------------------------------------------------------- /.github/workflows/python-api-fairseq.yaml: -------------------------------------------------------------------------------- 1 | # TODO: Merge this with allenNLP to have a single workflow for all docker images. 2 | name: fairseq-docker 3 | 4 | on: 5 | pull_request: 6 | paths: 7 | - "docker_images/fairseq/**" 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Set up Python ${{ matrix.python-version }} 13 | uses: actions/setup-python@v2 14 | with: 15 | python-version: "3.8" 16 | - name: Checkout 17 | uses: actions/checkout@v2 18 | - name: Set up QEMU 19 | uses: docker/setup-qemu-action@v1 20 | - name: Set up Docker Buildx 21 | uses: docker/setup-buildx-action@v1 22 | - name: Install dependencies 23 | run: | 24 | pip install --upgrade pip 25 | pip install pytest pillow httpx 26 | pip install -e . 27 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_fairseq 28 | -------------------------------------------------------------------------------- /.github/workflows/python-api-fastai.yaml: -------------------------------------------------------------------------------- 1 | name: fastai-docker 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - "docker_images/fastai/**" 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Set up Python ${{ matrix.python-version }} 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: "3.8" 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@v1 19 | - name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v1 21 | - name: Install dependencies 22 | run: | 23 | pip install --upgrade pip 24 | pip install pytest pillow httpx 25 | pip install -e . 26 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_fastai 27 | -------------------------------------------------------------------------------- /.github/workflows/python-api-fasttext.yaml: -------------------------------------------------------------------------------- 1 | name: fasttext-docker 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - "docker_images/fasttext/**" 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Set up Python ${{ matrix.python-version }} 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: "3.8" 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@v1 19 | - name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v1 21 | - name: Install dependencies 22 | run: | 23 | pip install --upgrade pip 24 | pip install pytest pillow httpx 25 | pip install -e . 26 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_fasttext 27 | -------------------------------------------------------------------------------- /.github/workflows/python-api-flair.yaml: -------------------------------------------------------------------------------- 1 | name: flair-docker 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - "docker_images/flair/**" 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Set up Python ${{ matrix.python-version }} 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: "3.8" 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@v1 19 | - name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v1 21 | - name: Install dependencies 22 | run: | 23 | pip install --upgrade pip 24 | pip install pytest pillow httpx 25 | pip install -e . 26 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_flair 27 | -------------------------------------------------------------------------------- /.github/workflows/python-api-k2.yaml: -------------------------------------------------------------------------------- 1 | # TODO: Merge this with allenNLP to have a single workflow for all docker images. 2 | name: k2-docker 3 | 4 | on: 5 | pull_request: 6 | paths: 7 | - "docker_images/k2/**" 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Set up Python ${{ matrix.python-version }} 13 | uses: actions/setup-python@v2 14 | with: 15 | python-version: "3.8" 16 | - name: Checkout 17 | uses: actions/checkout@v2 18 | - name: Set up QEMU 19 | uses: docker/setup-qemu-action@v1 20 | - name: Set up Docker Buildx 21 | uses: docker/setup-buildx-action@v1 22 | - name: Install dependencies 23 | run: | 24 | pip install --upgrade pip 25 | pip install pytest pillow httpx 26 | pip install -e . 27 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_k2 28 | -------------------------------------------------------------------------------- /.github/workflows/python-api-latent-image.yaml: -------------------------------------------------------------------------------- 1 | # TODO: Merge this with allenNLP to have a single workflow for all docker images. 2 | name: latent-to-image-docker 3 | 4 | on: 5 | pull_request: 6 | paths: 7 | - "docker_images/latent-to-image/**" 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Set up Python ${{ matrix.python-version }} 13 | uses: actions/setup-python@v2 14 | with: 15 | python-version: "3.8" 16 | - name: Checkout 17 | uses: actions/checkout@v2 18 | - name: Set up QEMU 19 | uses: docker/setup-qemu-action@v1 20 | - name: Set up Docker Buildx 21 | uses: docker/setup-buildx-action@v1 22 | - name: Install dependencies 23 | run: | 24 | pip install --upgrade pip 25 | pip install pytest pillow httpx 26 | pip install -e . 27 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_latent_to_image 28 | -------------------------------------------------------------------------------- /.github/workflows/python-api-mindspore.yaml: -------------------------------------------------------------------------------- 1 | name: mindspore-docker 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | paths: 7 | - "docker_images/mindspore/**" 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Set up Python ${{ matrix.python-version }} 13 | uses: actions/setup-python@v2 14 | with: 15 | python-version: "3.8" 16 | - name: Checkout 17 | uses: actions/checkout@v2 18 | - name: Set up QEMU 19 | uses: docker/setup-qemu-action@v1 20 | - name: Set up Docker Buildx 21 | uses: docker/setup-buildx-action@v1 22 | - name: Install dependencies 23 | run: | 24 | pip install --upgrade pip 25 | pip install pytest pillow httpx 26 | pip install -e . 27 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_mindspore 28 | -------------------------------------------------------------------------------- /.github/workflows/python-api-nemo.yaml: -------------------------------------------------------------------------------- 1 | name: nemo-docker 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - "docker_images/nemo/**" 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Set up Python ${{ matrix.python-version }} 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: "3.8" 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@v1 19 | - name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v1 21 | - name: Install dependencies 22 | run: | 23 | pip install --upgrade pip 24 | pip install pytest pillow httpx 25 | pip install -e . 26 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_nemo 27 | -------------------------------------------------------------------------------- /.github/workflows/python-api-open-clip.yaml: -------------------------------------------------------------------------------- 1 | name: open_clip-docker 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - "docker_images/open_clip/**" 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Set up Python ${{ matrix.python-version }} 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: "3.8" 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@v1 19 | - name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v1 21 | - name: Install dependencies 22 | run: | 23 | pip install --upgrade pip 24 | pip install pytest pillow httpx 25 | pip install -e . 26 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_open_clip 27 | -------------------------------------------------------------------------------- /.github/workflows/python-api-paddlenlp.yaml: -------------------------------------------------------------------------------- 1 | name: paddlenlp-docker 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | paths: 7 | - "docker_images/paddlenlp/**" 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Set up Python ${{ matrix.python-version }} 13 | uses: actions/setup-python@v2 14 | with: 15 | python-version: "3.8" 16 | - name: Checkout 17 | uses: actions/checkout@v2 18 | - name: Set up QEMU 19 | uses: docker/setup-qemu-action@v1 20 | - name: Set up Docker Buildx 21 | uses: docker/setup-buildx-action@v1 22 | - name: Install dependencies 23 | run: | 24 | pip install --upgrade pip 25 | pip install pytest pillow httpx 26 | pip install -e . 27 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_paddlenlp 28 | -------------------------------------------------------------------------------- /.github/workflows/python-api-peft.yaml: -------------------------------------------------------------------------------- 1 | name: peft-docker 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - "docker_images/peft/**" 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Set up Python ${{ matrix.python-version }} 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: "3.8" 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@v1 19 | - name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v1 21 | - name: Install dependencies 22 | run: | 23 | pip install --upgrade pip 24 | pip install pytest pillow httpx 25 | pip install -e . 26 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_peft 27 | -------------------------------------------------------------------------------- /.github/workflows/python-api-pyannote.yaml: -------------------------------------------------------------------------------- 1 | # TODO: Merge this with allenNLP to have a single workflow for all docker images. 2 | name: pyannote-docker 3 | 4 | on: 5 | pull_request: 6 | paths: 7 | - "docker_images/pyannote_audio/**" 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Set up Python ${{ matrix.python-version }} 13 | uses: actions/setup-python@v2 14 | with: 15 | python-version: "3.8" 16 | - name: Checkout 17 | uses: actions/checkout@v2 18 | - name: Set up QEMU 19 | uses: docker/setup-qemu-action@v1 20 | - name: Set up Docker Buildx 21 | uses: docker/setup-buildx-action@v1 22 | - name: Install dependencies 23 | run: | 24 | pip install --upgrade pip 25 | pip install pytest pillow httpx 26 | pip install -e . 27 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_pyannote_audio 28 | -------------------------------------------------------------------------------- /.github/workflows/python-api-quality.yaml: -------------------------------------------------------------------------------- 1 | name: Inference API code quality 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - "**" 7 | 8 | jobs: 9 | run_tests: 10 | runs-on: ubuntu-20.04 11 | steps: 12 | - uses: actions/checkout@v2 13 | - name: Set up Python 14 | uses: actions/setup-python@v1 15 | with: 16 | python-version: 3.8 17 | - name: Install dependencies 18 | run: | 19 | pip install -e .[quality] 20 | - name: Make quality 21 | run: | 22 | make quality 23 | -------------------------------------------------------------------------------- /.github/workflows/python-api-sentence-transformers.yaml: -------------------------------------------------------------------------------- 1 | # TODO: Merge this with allenNLP to have a single workflow for all docker images. 2 | name: sentence-transformers-docker 3 | 4 | on: 5 | pull_request: 6 | paths: 7 | - "docker_images/sentence_transformers/**" 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Set up Python ${{ matrix.python-version }} 13 | uses: actions/setup-python@v2 14 | with: 15 | python-version: "3.8" 16 | - name: Checkout 17 | uses: actions/checkout@v2 18 | - name: Set up QEMU 19 | uses: docker/setup-qemu-action@v1 20 | - name: Set up Docker Buildx 21 | uses: docker/setup-buildx-action@v1 22 | - name: Install dependencies 23 | run: | 24 | pip install --upgrade pip 25 | pip install pytest pillow httpx 26 | pip install -e . 27 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_sentence_transformers 28 | -------------------------------------------------------------------------------- /.github/workflows/python-api-setfit.yaml: -------------------------------------------------------------------------------- 1 | name: setfit-docker 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - "docker_images/setfit/**" 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Set up Python ${{ matrix.python-version }} 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: "3.8" 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@v1 19 | - name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v1 21 | - name: Install dependencies 22 | run: | 23 | pip install --upgrade pip 24 | pip install pytest pillow httpx 25 | pip install -e . 26 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_setfit 27 | -------------------------------------------------------------------------------- /.github/workflows/python-api-sklearn.yaml: -------------------------------------------------------------------------------- 1 | name: sklearn-docker 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - "docker_images/sklearn/**" 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Set up Python ${{ matrix.python-version }} 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: "3.8" 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@v1 19 | - name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v1 21 | - name: Install dependencies 22 | run: | 23 | pip install --upgrade pip 24 | pip install pytest pillow httpx 25 | pip install -e . 26 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_sklearn 27 | -------------------------------------------------------------------------------- /.github/workflows/python-api-spacy.yaml: -------------------------------------------------------------------------------- 1 | # TODO: Merge this with allenNLP to have a single workflow for all docker images. 2 | name: spacy-docker 3 | 4 | on: 5 | pull_request: 6 | paths: 7 | - "docker_images/spacy/**" 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Set up Python ${{ matrix.python-version }} 13 | uses: actions/setup-python@v2 14 | with: 15 | python-version: "3.8" 16 | - name: Checkout 17 | uses: actions/checkout@v2 18 | - name: Set up QEMU 19 | uses: docker/setup-qemu-action@v1 20 | - name: Set up Docker Buildx 21 | uses: docker/setup-buildx-action@v1 22 | - name: Install dependencies 23 | run: | 24 | pip install --upgrade pip 25 | pip install pytest pillow httpx 26 | pip install -e . 27 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_spacy 28 | -------------------------------------------------------------------------------- /.github/workflows/python-api-span_marker.yaml: -------------------------------------------------------------------------------- 1 | name: span_marker-docker 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - "docker_images/span_marker/**" 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Set up Python ${{ matrix.python-version }} 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: "3.8" 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@v1 19 | - name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v1 21 | - name: Install dependencies 22 | run: | 23 | pip install --upgrade pip 24 | pip install pytest pillow httpx 25 | pip install -e . 26 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_span_marker 27 | -------------------------------------------------------------------------------- /.github/workflows/python-api-speechbrain.yaml: -------------------------------------------------------------------------------- 1 | # TODO: Merge this with allenNLP to have a single workflow for all docker images. 2 | name: speechbrain-docker 3 | 4 | on: 5 | pull_request: 6 | paths: 7 | - "docker_images/speechbrain/**" 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Set up Python ${{ matrix.python-version }} 13 | uses: actions/setup-python@v2 14 | with: 15 | python-version: "3.8" 16 | - name: Checkout 17 | uses: actions/checkout@v2 18 | - name: Set up QEMU 19 | uses: docker/setup-qemu-action@v1 20 | - name: Set up Docker Buildx 21 | uses: docker/setup-buildx-action@v1 22 | - name: Install dependencies 23 | run: | 24 | pip install --upgrade pip 25 | pip install pytest pillow httpx 26 | pip install -e . 27 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_speechbrain 28 | -------------------------------------------------------------------------------- /.github/workflows/python-api-stanza.yaml: -------------------------------------------------------------------------------- 1 | name: stanza-docker 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - "docker_images/stanza/**" 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Set up Python ${{ matrix.python-version }} 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: "3.8" 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@v1 19 | - name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v1 21 | - name: Install dependencies 22 | run: | 23 | pip install --upgrade pip 24 | pip install pytest pillow httpx 25 | pip install -e . 26 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_stanza 27 | -------------------------------------------------------------------------------- /.github/workflows/python-api-tests.yaml: -------------------------------------------------------------------------------- 1 | name: Inference API Python-tests 2 | on: 3 | pull_request: 4 | paths: 5 | - "**" 6 | jobs: 7 | build: 8 | runs-on: ubuntu-latest 9 | strategy: 10 | matrix: 11 | python-version: ["3.8"] 12 | 13 | steps: 14 | - run: | 15 | sudo apt-get update 16 | sudo apt-get install ffmpeg 17 | 18 | - uses: actions/checkout@v2 19 | 20 | - name: Set up Python ${{ matrix.python-version }} 21 | uses: actions/setup-python@v2 22 | with: 23 | python-version: ${{ matrix.python-version }} 24 | 25 | - name: Install dependencies 26 | run: | 27 | pip install --upgrade pip 28 | pip install -e .[test] 29 | - run: make test 30 | env: # Or as an environment variable 31 | API_TOKEN: ${{ secrets.USER_API_TOKEN }} 32 | -------------------------------------------------------------------------------- /.github/workflows/python-api-timm.yaml: -------------------------------------------------------------------------------- 1 | name: timm-docker 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | paths: 7 | - "docker_images/timm/**" 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Set up Python ${{ matrix.python-version }} 13 | uses: actions/setup-python@v2 14 | with: 15 | python-version: "3.8" 16 | - name: Checkout 17 | uses: actions/checkout@v2 18 | - name: Set up QEMU 19 | uses: docker/setup-qemu-action@v1 20 | - name: Set up Docker Buildx 21 | uses: docker/setup-buildx-action@v1 22 | - name: Install dependencies 23 | run: | 24 | pip install --upgrade pip 25 | pip install pytest pillow httpx 26 | pip install -e . 27 | - run: RUN_DOCKER_TESTS=1 pytest -sv tests/test_dockers.py::DockerImageTests::test_timm 28 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | *.py[cod] 3 | 4 | # Speechbrain artefact. 5 | model_checkpoints 6 | pretrained_models 7 | pretrained_checkpoints 8 | 9 | *.egg-info/ 10 | build/ 11 | dist/ 12 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md requirements.txt 2 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: quality style 2 | 3 | 4 | check_dirs := api_inference_community tests docker_images 5 | 6 | 7 | 8 | quality: 9 | black --check $(check_dirs) 10 | isort --check-only $(check_dirs) 11 | flake8 $(check_dirs) 12 | 13 | style: 14 | black $(check_dirs) 15 | isort $(check_dirs) 16 | 17 | 18 | test: 19 | pytest -sv --log-level=DEBUG tests/ 20 | 21 | -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | pip install -U pip build twine 2 | python -m build 3 | python -m twine upload dist/* 4 | 5 | -------------------------------------------------------------------------------- /docker_images/adapter_transformers/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/adapter_transformers/app/__init__.py -------------------------------------------------------------------------------- /docker_images/adapter_transformers/app/batch.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | 5 | from api_inference_community.batch import batch 6 | from app.main import get_pipeline 7 | 8 | 9 | DATASET_NAME = os.getenv("DATASET_NAME") 10 | DATASET_CONFIG = os.getenv("DATASET_CONFIG", None) 11 | DATASET_SPLIT = os.getenv("DATASET_SPLIT") 12 | DATASET_COLUMN = os.getenv("DATASET_COLUMN") 13 | USE_GPU = os.getenv("USE_GPU", "0").lower() in {"1", "true"} 14 | TOKEN = os.getenv("TOKEN") 15 | REPO_ID = os.getenv("REPO_ID") 16 | 17 | if __name__ == "__main__": 18 | batch( 19 | dataset_name=DATASET_NAME, 20 | dataset_config=DATASET_CONFIG, 21 | dataset_split=DATASET_SPLIT, 22 | dataset_column=DATASET_COLUMN, 23 | token=TOKEN, 24 | repo_id=REPO_ID, 25 | use_gpu=USE_GPU, 26 | pipeline=get_pipeline(), 27 | ) 28 | -------------------------------------------------------------------------------- /docker_images/adapter_transformers/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.question_answering import QuestionAnsweringPipeline 4 | from app.pipelines.summarization import SummarizationPipeline 5 | from app.pipelines.text_classification import TextClassificationPipeline 6 | from app.pipelines.text_generation import TextGenerationPipeline 7 | from app.pipelines.token_classification import TokenClassificationPipeline 8 | -------------------------------------------------------------------------------- /docker_images/adapter_transformers/app/pipelines/summarization.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List 2 | 3 | from app.pipelines import Pipeline 4 | from transformers import SummarizationPipeline as TransformersSummarizationPipeline 5 | 6 | 7 | class SummarizationPipeline(Pipeline): 8 | def __init__(self, adapter_id: str): 9 | self.pipeline = self._load_pipeline_instance( 10 | TransformersSummarizationPipeline, adapter_id 11 | ) 12 | 13 | def __call__(self, inputs: str) -> List[Dict[str, str]]: 14 | """ 15 | Args: 16 | inputs (:obj:`str`): a string to be summarized 17 | Return: 18 | A :obj:`list` of :obj:`dict` in the form of {"summary_text": "The string after summarization"} 19 | """ 20 | return self.pipeline(inputs, truncation=True) 21 | -------------------------------------------------------------------------------- /docker_images/adapter_transformers/app/pipelines/text_classification.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List 2 | 3 | from app.pipelines import Pipeline 4 | from transformers import ( 5 | TextClassificationPipeline as TransformersClassificationPipeline, 6 | ) 7 | 8 | 9 | class TextClassificationPipeline(Pipeline): 10 | def __init__( 11 | self, 12 | adapter_id: str, 13 | ): 14 | self.pipeline = self._load_pipeline_instance( 15 | TransformersClassificationPipeline, adapter_id 16 | ) 17 | 18 | def __call__(self, inputs: str) -> List[Dict[str, float]]: 19 | """ 20 | Args: 21 | inputs (:obj:`str`): 22 | a string containing some text 23 | Return: 24 | A :obj:`list`:. The object returned should be like [{"label": 0.9939950108528137}] containing : 25 | - "label": A string representing what the label/class is. There can be multiple labels. 26 | - "score": A score between 0 and 1 describing how confident the model is for this label/class. 27 | """ 28 | try: 29 | return self.pipeline(inputs, return_all_scores=True) 30 | except Exception as e: 31 | raise ValueError(e) 32 | -------------------------------------------------------------------------------- /docker_images/adapter_transformers/app/pipelines/text_generation.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List 2 | 3 | from app.pipelines import Pipeline 4 | from transformers import TextGenerationPipeline as TransformersTextGenerationPipeline 5 | 6 | 7 | class TextGenerationPipeline(Pipeline): 8 | def __init__(self, adapter_id: str): 9 | self.pipeline = self._load_pipeline_instance( 10 | TransformersTextGenerationPipeline, adapter_id 11 | ) 12 | 13 | def __call__(self, inputs: str) -> List[Dict[str, str]]: 14 | """ 15 | Args: 16 | inputs (:obj:`str`): 17 | The input text 18 | Return: 19 | A :obj:`list`:. The list contains a single item that is a dict {"text": the model output} 20 | """ 21 | return self.pipeline(inputs, truncation=True) 22 | -------------------------------------------------------------------------------- /docker_images/adapter_transformers/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/adapter_transformers/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.37.2 2 | api-inference-community==0.0.32 3 | torch==2.3.0 4 | adapters==0.2.1 5 | huggingface_hub==0.23.0 6 | -------------------------------------------------------------------------------- /docker_images/adapter_transformers/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/adapter_transformers/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/adapter_transformers/tests/samples/malformed.flac: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker_images/adapter_transformers/tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/adapter_transformers/tests/samples/plane.jpg -------------------------------------------------------------------------------- /docker_images/adapter_transformers/tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/adapter_transformers/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /docker_images/adapter_transformers/tests/samples/sample1.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/adapter_transformers/tests/samples/sample1.flac -------------------------------------------------------------------------------- /docker_images/adapter_transformers/tests/samples/sample1.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/adapter_transformers/tests/samples/sample1.webm -------------------------------------------------------------------------------- /docker_images/adapter_transformers/tests/samples/sample1_dual.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/adapter_transformers/tests/samples/sample1_dual.ogg -------------------------------------------------------------------------------- /docker_images/adapter_transformers/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/allennlp/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/allennlp/app/__init__.py -------------------------------------------------------------------------------- /docker_images/allennlp/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.question_answering import QuestionAnsweringPipeline 4 | -------------------------------------------------------------------------------- /docker_images/allennlp/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any, Optional 3 | 4 | 5 | class Pipeline(ABC): 6 | task: Optional[str] = None 7 | model_id: Optional[str] = None 8 | 9 | @abstractmethod 10 | def __init__(self, model_id: str): 11 | raise NotImplementedError("Pipelines should implement an __init__ method") 12 | 13 | @abstractmethod 14 | def __call__(self, inputs: Any) -> Any: 15 | raise NotImplementedError("Pipelines should implement a __call__ method") 16 | 17 | 18 | class PipelineException(Exception): 19 | pass 20 | -------------------------------------------------------------------------------- /docker_images/allennlp/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/allennlp/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | numpy==1.22.0 3 | allennlp>=2.5.0,<3.0.0 4 | # Even though it is not imported, it is actually required. 5 | allennlp_models>=2.5.0,<3.0.0 6 | api-inference-community==0.0.23 7 | huggingface_hub==0.5.1 8 | -------------------------------------------------------------------------------- /docker_images/allennlp/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/allennlp/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/allennlp/tests/samples/malformed.flac: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker_images/allennlp/tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/allennlp/tests/samples/plane.jpg -------------------------------------------------------------------------------- /docker_images/allennlp/tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/allennlp/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /docker_images/allennlp/tests/samples/sample1.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/allennlp/tests/samples/sample1.flac -------------------------------------------------------------------------------- /docker_images/allennlp/tests/samples/sample1.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/allennlp/tests/samples/sample1.webm -------------------------------------------------------------------------------- /docker_images/allennlp/tests/samples/sample1_dual.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/allennlp/tests/samples/sample1_dual.ogg -------------------------------------------------------------------------------- /docker_images/allennlp/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/asteroid/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/asteroid/app/__init__.py -------------------------------------------------------------------------------- /docker_images/asteroid/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.audio_source_separation import AudioSourceSeparationPipeline 4 | from app.pipelines.audio_to_audio import AudioToAudioPipeline 5 | -------------------------------------------------------------------------------- /docker_images/asteroid/app/pipelines/audio_source_separation.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | 3 | import numpy as np 4 | from app.pipelines import Pipeline 5 | from asteroid import separate 6 | from asteroid.models import BaseModel 7 | 8 | 9 | class AudioSourceSeparationPipeline(Pipeline): 10 | def __init__(self, model_id: str): 11 | self.model = BaseModel.from_pretrained(model_id) 12 | self.sampling_rate = self.model.sample_rate 13 | 14 | def __call__(self, inputs: np.array) -> Tuple[np.array, int]: 15 | """ 16 | Args: 17 | inputs (:obj:`np.array`): 18 | The raw waveform of audio received. By default at 16KHz. 19 | Check `app.validation` if a different sample rate is required 20 | or if it depends on the model 21 | Return: 22 | A :obj:`np.array` and a :obj:`int`: The raw waveform as a numpy array, and the sampling rate as an int. 23 | """ 24 | # Pass wav as [batch, n_chan, time]; here: [1, 1, time] 25 | separated = separate.numpy_separate(self.model, inputs.reshape((1, 1, -1))) 26 | # FIXME: how to deal with multiple sources? 27 | return separated[0, 0], int(self.model.sample_rate) 28 | -------------------------------------------------------------------------------- /docker_images/asteroid/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/asteroid/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/asteroid/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | api-inference-community==0.0.23 3 | huggingface_hub==0.5.1 4 | asteroid==0.4.4 5 | -------------------------------------------------------------------------------- /docker_images/asteroid/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/asteroid/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/asteroid/tests/samples/malformed.flac: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker_images/asteroid/tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/asteroid/tests/samples/plane.jpg -------------------------------------------------------------------------------- /docker_images/asteroid/tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/asteroid/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /docker_images/asteroid/tests/samples/sample1.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/asteroid/tests/samples/sample1.flac -------------------------------------------------------------------------------- /docker_images/asteroid/tests/samples/sample1.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/asteroid/tests/samples/sample1.webm -------------------------------------------------------------------------------- /docker_images/asteroid/tests/samples/sample1_dual.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/asteroid/tests/samples/sample1_dual.ogg -------------------------------------------------------------------------------- /docker_images/asteroid/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/bertopic/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tiangolo/uvicorn-gunicorn:python3.8 2 | LABEL maintainer="Daniel van Strien " 3 | 4 | # Add any system dependency here 5 | # RUN apt-get update -y && apt-get install libXXX -y 6 | 7 | COPY ./requirements.txt /app 8 | RUN pip install --no-cache-dir -r requirements.txt 9 | COPY ./prestart.sh /app/ 10 | 11 | 12 | # Most DL models are quite large in terms of memory, using workers is a HUGE 13 | # slowdown because of the fork and GIL with python. 14 | # Using multiple pods seems like a better default strategy. 15 | # Feel free to override if it does not make sense for your library. 16 | ARG max_workers=1 17 | ENV MAX_WORKERS=$max_workers 18 | ENV HUGGINGFACE_HUB_CACHE=/data 19 | 20 | # Necessary on GPU environment docker. 21 | # TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose 22 | # rendering TIMEOUT defined by uvicorn impossible to use correctly 23 | # We're overriding it to be renamed UVICORN_TIMEOUT 24 | # UVICORN_TIMEOUT is a useful variable for very large models that take more 25 | # than 30s (the default) to load in memory. 26 | # If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will 27 | # kill workers all the time before they finish. 28 | RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py 29 | COPY ./app /app/app 30 | -------------------------------------------------------------------------------- /docker_images/bertopic/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/bertopic/app/__init__.py -------------------------------------------------------------------------------- /docker_images/bertopic/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | from app.pipelines.text_classification import TextClassificationPipeline 3 | -------------------------------------------------------------------------------- /docker_images/bertopic/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/bertopic/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/bertopic/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | api-inference-community==0.0.25 3 | huggingface_hub==0.14.0 4 | bertopic==0.15.0 5 | safetensors==0.3.1 6 | -------------------------------------------------------------------------------- /docker_images/bertopic/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/bertopic/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/bertopic/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/common/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tiangolo/uvicorn-gunicorn:python3.8 2 | LABEL maintainer="me " 3 | 4 | # Add any system dependency here 5 | # RUN apt-get update -y && apt-get install libXXX -y 6 | 7 | COPY ./requirements.txt /app 8 | RUN pip install --no-cache-dir -r requirements.txt 9 | COPY ./prestart.sh /app/ 10 | 11 | 12 | # Most DL models are quite large in terms of memory, using workers is a HUGE 13 | # slowdown because of the fork and GIL with python. 14 | # Using multiple pods seems like a better default strategy. 15 | # Feel free to override if it does not make sense for your library. 16 | ARG max_workers=1 17 | ENV MAX_WORKERS=$max_workers 18 | ENV HUGGINGFACE_HUB_CACHE=/data 19 | 20 | # Necessary on GPU environment docker. 21 | # TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose 22 | # rendering TIMEOUT defined by uvicorn impossible to use correctly 23 | # We're overriding it to be renamed UVICORN_TIMEOUT 24 | # UVICORN_TIMEOUT is a useful variable for very large models that take more 25 | # than 30s (the default) to load in memory. 26 | # If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will 27 | # kill workers all the time before they finish. 28 | RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py 29 | COPY ./app /app/app 30 | -------------------------------------------------------------------------------- /docker_images/common/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/common/app/__init__.py -------------------------------------------------------------------------------- /docker_images/common/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.audio_classification import AudioClassificationPipeline 4 | from app.pipelines.audio_to_audio import AudioToAudioPipeline 5 | from app.pipelines.automatic_speech_recognition import ( 6 | AutomaticSpeechRecognitionPipeline, 7 | ) 8 | from app.pipelines.feature_extraction import FeatureExtractionPipeline 9 | from app.pipelines.image_classification import ImageClassificationPipeline 10 | from app.pipelines.question_answering import QuestionAnsweringPipeline 11 | from app.pipelines.sentence_similarity import SentenceSimilarityPipeline 12 | from app.pipelines.speech_segmentation import SpeechSegmentationPipeline 13 | from app.pipelines.tabular_classification_pipeline import TabularClassificationPipeline 14 | from app.pipelines.tabular_regression_pipeline import TabularRegressionPipeline 15 | from app.pipelines.text_to_speech import TextToSpeechPipeline 16 | from app.pipelines.token_classification import TokenClassificationPipeline 17 | -------------------------------------------------------------------------------- /docker_images/common/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/common/app/pipelines/feature_extraction.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from app.pipelines import Pipeline 4 | 5 | 6 | class FeatureExtractionPipeline(Pipeline): 7 | def __init__( 8 | self, 9 | model_id: str, 10 | ): 11 | # IMPLEMENT_THIS 12 | # Preload all the elements you are going to need at inference. 13 | # For instance your model, processors, tokenizer that might be needed. 14 | # This function is only called once, so do all the heavy processing I/O here 15 | raise NotImplementedError( 16 | "Please implement FeatureExtractionPipeline __init__ function" 17 | ) 18 | 19 | def __call__(self, inputs: str) -> List[float]: 20 | """ 21 | Args: 22 | inputs (:obj:`str`): 23 | a string to get the features of. 24 | Return: 25 | A :obj:`list` of floats: The features computed by the model. 26 | """ 27 | # IMPLEMENT_THIS 28 | raise NotImplementedError( 29 | "Please implement FeatureExtractionPipeline __call__ function" 30 | ) 31 | -------------------------------------------------------------------------------- /docker_images/common/app/pipelines/image_to_image.py: -------------------------------------------------------------------------------- 1 | from typing import TYPE_CHECKING, Optional 2 | 3 | from app.pipelines import Pipeline 4 | 5 | 6 | if TYPE_CHECKING: 7 | from PIL import Image 8 | 9 | 10 | class ImageToImagePipeline(Pipeline): 11 | def __init__(self, model_id: str): 12 | # IMPLEMENT_THIS 13 | # Preload all the elements you are going to need for inference. 14 | # For instance your model, processors, tokenizer that might be needed. 15 | # This function is only called once, so do all the heavy processing I/O here 16 | raise NotImplementedError( 17 | "Please implement ImageToImagePipeline.__init__ function" 18 | ) 19 | 20 | def __call__(self, image: Image.Image, prompt: Optional[str] = "") -> "Image.Image": 21 | """ 22 | Args: 23 | image (:obj:`PIL.Image.Image`): 24 | a condition image 25 | prompt (:obj:`str`, *optional*): 26 | a string containing some text 27 | Return: 28 | A :obj:`PIL.Image` with the raw image representation as PIL. 29 | """ 30 | # IMPLEMENT_THIS 31 | raise NotImplementedError( 32 | "Please implement ImageToImagePipeline.__call__ function" 33 | ) 34 | -------------------------------------------------------------------------------- /docker_images/common/app/pipelines/summarization.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List 2 | 3 | from app.pipelines import Pipeline 4 | 5 | 6 | class SummarizationPipeline(Pipeline): 7 | def __init__(self, model_id: str): 8 | # IMPLEMENT_THIS 9 | # Preload all the elements you are going to need at inference. 10 | # For instance your model, processors, tokenizer that might be needed. 11 | # This function is only called once, so do all the heavy processing I/O here 12 | raise NotImplementedError( 13 | "Please implement SummarizationPipeline __init__ function" 14 | ) 15 | 16 | def __call__(self, inputs: str) -> List[Dict[str, str]]: 17 | """ 18 | Args: 19 | inputs (:obj:`str`): a string to be summarized 20 | Return: 21 | A :obj:`list` of :obj:`dict` in the form of {"summary_text": "The string after summarization"} 22 | """ 23 | # IMPLEMENT_THIS 24 | raise NotImplementedError( 25 | "Please implement SummarizationPipeline __init__ function" 26 | ) 27 | -------------------------------------------------------------------------------- /docker_images/common/app/pipelines/tabular_classification_pipeline.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Union 2 | 3 | from app.pipelines import Pipeline 4 | 5 | 6 | class TabularClassificationPipeline(Pipeline): 7 | def __init__(self, model_id: str): 8 | # IMPLEMENT_THIS 9 | # Preload all the elements you are going to need at inference. 10 | # For instance your model, processors, tokenizer that might be needed. 11 | # This function is only called once, so do all the heavy processing I/O here 12 | raise NotImplementedError( 13 | "Please implement TabularClassificationPipeline __init__ function" 14 | ) 15 | 16 | def __call__( 17 | self, inputs: Dict[str, Dict[str, List[Union[int, str, float]]]] 18 | ) -> List[Union[int, str, float]]: 19 | """ 20 | Args: 21 | inputs (:obj:`dict`): 22 | a dictionary containing a key 'data' mapping to a dict in which 23 | the values represent each column. 24 | Return: 25 | A :obj:`list` of int, str, or float: The classification output for each row. 26 | """ 27 | # IMPLEMENT_THIS 28 | raise NotImplementedError( 29 | "Please implement TabularClassificationPipeline __init__ function" 30 | ) 31 | -------------------------------------------------------------------------------- /docker_images/common/app/pipelines/tabular_regression_pipeline.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Union 2 | 3 | from app.pipelines import Pipeline 4 | 5 | 6 | class TabularRegressionPipeline(Pipeline): 7 | def __init__(self, model_id: str): 8 | # IMPLEMENT_THIS 9 | # Preload all the elements you are going to need at inference. 10 | # For instance your model, processors, tokenizer that might be needed. 11 | # This function is only called once, so do all the heavy processing I/O here 12 | raise NotImplementedError( 13 | "Please implement TabularRegressionPipeline __init__ function" 14 | ) 15 | 16 | def __call__( 17 | self, inputs: Dict[str, Dict[str, List[Union[int, str, float]]]] 18 | ) -> List[float]: 19 | """ 20 | Args: 21 | inputs (:obj:`dict`): 22 | a dictionary containing a key 'data' mapping to a dict in which 23 | the values represent each column. 24 | Return: 25 | A :obj:`list` of float: The regression output for each row. 26 | """ 27 | # IMPLEMENT_THIS 28 | raise NotImplementedError( 29 | "Please implement TabularRegressionPipeline __init__ function" 30 | ) 31 | -------------------------------------------------------------------------------- /docker_images/common/app/pipelines/text2text_generation.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List 2 | 3 | from app.pipelines import Pipeline 4 | 5 | 6 | class TextToTextPipeline(Pipeline): 7 | def __init__(self, model_id: str): 8 | # IMPLEMENT_THIS 9 | # Preload all the elements you are going to need at inference. 10 | # For instance your model, processors, tokenizer that might be needed. 11 | # This function is only called once, so do all the heavy processing I/O here 12 | raise NotImplementedError( 13 | "Please implement TextToTextPipeline __init__ function" 14 | ) 15 | 16 | def __call__(self, inputs: str) -> List[Dict[str, str]]: 17 | """ 18 | Args: 19 | inputs (:obj:`str`): 20 | The input text 21 | Return: 22 | A :obj:`list`:. The list contains a single item that is a dict {"text": the model output} 23 | """ 24 | # IMPLEMENT_THIS 25 | raise NotImplementedError( 26 | "Please implement TextToTextPipeline __call__ function" 27 | ) 28 | -------------------------------------------------------------------------------- /docker_images/common/app/pipelines/text_to_image.py: -------------------------------------------------------------------------------- 1 | from typing import TYPE_CHECKING 2 | 3 | from app.pipelines import Pipeline 4 | 5 | 6 | if TYPE_CHECKING: 7 | from PIL import Image 8 | 9 | 10 | class TextToImagePipeline(Pipeline): 11 | def __init__(self, model_id: str): 12 | # IMPLEMENT_THIS 13 | # Preload all the elements you are going to need for inference. 14 | # For instance your model, processors, tokenizer that might be needed. 15 | # This function is only called once, so do all the heavy processing I/O here 16 | raise NotImplementedError( 17 | "Please implement TextToImagePipeline.__init__ function" 18 | ) 19 | 20 | def __call__(self, inputs: str) -> "Image.Image": 21 | """ 22 | Args: 23 | inputs (:obj:`str`): 24 | a string containing some text 25 | Return: 26 | A :obj:`PIL.Image` with the raw image representation as PIL. 27 | """ 28 | # IMPLEMENT_THIS 29 | raise NotImplementedError( 30 | "Please implement TextToImagePipeline.__call__ function" 31 | ) 32 | -------------------------------------------------------------------------------- /docker_images/common/app/pipelines/text_to_speech.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | 3 | import numpy as np 4 | from app.pipelines import Pipeline 5 | 6 | 7 | class TextToSpeechPipeline(Pipeline): 8 | def __init__(self, model_id: str): 9 | # IMPLEMENT_THIS 10 | # Preload all the elements you are going to need at inference. 11 | # For instance your model, processors, tokenizer that might be needed. 12 | # This function is only called once, so do all the heavy processing I/O here 13 | raise NotImplementedError( 14 | "Please implement TextToSpeechPipeline __init__ function" 15 | ) 16 | 17 | def __call__(self, inputs: str) -> Tuple[np.array, int]: 18 | """ 19 | Args: 20 | inputs (:obj:`str`): 21 | The text to generate audio from 22 | Return: 23 | A :obj:`np.array` and a :obj:`int`: The raw waveform as a numpy array, and the sampling rate as an int. 24 | """ 25 | # IMPLEMENT_THIS 26 | raise NotImplementedError( 27 | "Please implement TextToSpeechPipeline __call__ function" 28 | ) 29 | -------------------------------------------------------------------------------- /docker_images/common/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/common/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | api-inference-community==0.0.32 3 | huggingface_hub==0.11.0 4 | -------------------------------------------------------------------------------- /docker_images/common/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/common/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/common/tests/samples/malformed.flac: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker_images/common/tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/common/tests/samples/plane.jpg -------------------------------------------------------------------------------- /docker_images/common/tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/common/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /docker_images/common/tests/samples/sample1.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/common/tests/samples/sample1.flac -------------------------------------------------------------------------------- /docker_images/common/tests/samples/sample1.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/common/tests/samples/sample1.webm -------------------------------------------------------------------------------- /docker_images/common/tests/samples/sample1_dual.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/common/tests/samples/sample1_dual.ogg -------------------------------------------------------------------------------- /docker_images/common/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/diffusers/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/diffusers/app/__init__.py -------------------------------------------------------------------------------- /docker_images/diffusers/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.image_to_image import ImageToImagePipeline 4 | from app.pipelines.text_to_image import TextToImagePipeline 5 | -------------------------------------------------------------------------------- /docker_images/diffusers/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/diffusers/app/timing.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from functools import wraps 3 | from time import time 4 | 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | 9 | def timing(f): 10 | @wraps(f) 11 | def inner(*args, **kwargs): 12 | start = time() 13 | try: 14 | ret = f(*args, **kwargs) 15 | finally: 16 | end = time() 17 | logger.debug("Func: %r took: %.2f sec to execute", f.__name__, end - start) 18 | return ret 19 | 20 | return inner 21 | -------------------------------------------------------------------------------- /docker_images/diffusers/app/validation.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | 4 | STR_TO_BOOL = re.compile(r"^\s*true|yes|1\s*$", re.IGNORECASE) 5 | 6 | 7 | def str_to_bool(s): 8 | return STR_TO_BOOL.match(str(s)) 9 | -------------------------------------------------------------------------------- /docker_images/diffusers/prestart.sh: -------------------------------------------------------------------------------- 1 | echo "Prestart start at " $(date) 2 | 3 | METRICS_ENABLED=${METRICS_ENABLED:-"0"} 4 | 5 | if [ "$METRICS_ENABLED" = "1" ];then 6 | echo "Spawning metrics server" 7 | gunicorn -k "uvicorn.workers.UvicornWorker" --bind :${METRICS_PORT:-9400} "app.healthchecks:app" & 8 | pid=$! 9 | echo "Metrics server pid: $pid" 10 | fi 11 | 12 | echo "Prestart done at " $(date) 13 | -------------------------------------------------------------------------------- /docker_images/diffusers/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | api-inference-community==0.0.36 3 | # to be replaced with diffusers 0.31.0 as soon as released 4 | git+https://github.com/huggingface/diffusers.git@0f079b932d4382ad6675593f9a140b2a74c8cfb4 5 | transformers==4.41.2 6 | accelerate==0.31.0 7 | hf_transfer==0.1.3 8 | pydantic>=2 9 | ftfy==6.1.1 10 | sentencepiece==0.1.97 11 | scipy==1.10.0 12 | torch==2.0.1 13 | torchvision==0.15.2 14 | torchaudio==2.0.2 15 | invisible-watermark>=0.2.0 16 | uvicorn>=0.23.2 17 | gunicorn>=21.2.0 18 | psutil>=5.9.5 19 | aiohttp>=3.8.5 20 | peft==0.11.1 21 | protobuf==5.27.1 22 | -------------------------------------------------------------------------------- /docker_images/diffusers/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/diffusers/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/diffusers/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/doctr/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/doctr/app/__init__.py -------------------------------------------------------------------------------- /docker_images/doctr/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.object_detection import ObjectDetectionPipeline 4 | -------------------------------------------------------------------------------- /docker_images/doctr/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/doctr/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/doctr/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | api-inference-community==0.0.23 3 | python-doctr[torch]==0.5.1 4 | huggingface_hub==0.5.1 5 | -------------------------------------------------------------------------------- /docker_images/doctr/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/doctr/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/doctr/tests/samples/artefacts.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/doctr/tests/samples/artefacts.jpg -------------------------------------------------------------------------------- /docker_images/doctr/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/espnet/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/espnet/app/__init__.py -------------------------------------------------------------------------------- /docker_images/espnet/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.automatic_speech_recognition import ( 4 | AutomaticSpeechRecognitionPipeline, 5 | ) 6 | from app.pipelines.text_to_speech import TextToSpeechPipeline 7 | -------------------------------------------------------------------------------- /docker_images/espnet/app/pipelines/automatic_speech_recognition.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | 3 | import numpy as np 4 | from app.pipelines import Pipeline 5 | from espnet2.bin.asr_inference import Speech2Text 6 | 7 | 8 | class AutomaticSpeechRecognitionPipeline(Pipeline): 9 | def __init__(self, model_id: str): 10 | self.model = Speech2Text.from_pretrained(model_id, device="cpu", beam_size=1) 11 | self.sampling_rate = 16000 12 | 13 | def __call__(self, inputs: np.array) -> Dict[str, str]: 14 | """ 15 | Args: 16 | inputs (:obj:`np.array`): 17 | The raw waveform of audio received. By default at 16KHz. 18 | Check `app.validation` if a different sample rate is required 19 | or if it depends on the model 20 | Return: 21 | A :obj:`dict`:. The object return should be liked {"text": "XXX"} containing 22 | the detected language from the input audio 23 | """ 24 | outputs = self.model(inputs) 25 | text, *_ = outputs[0] 26 | return {"text": text} 27 | -------------------------------------------------------------------------------- /docker_images/espnet/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/espnet/app/pipelines/text_to_speech.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | 3 | import numpy as np 4 | from app.pipelines import Pipeline 5 | from espnet2.bin.tts_inference import Text2Speech 6 | 7 | 8 | class TextToSpeechPipeline(Pipeline): 9 | def __init__(self, model_id: str): 10 | self.model = Text2Speech.from_pretrained(model_id, device="cpu") 11 | 12 | if hasattr(self.model, "fs"): 13 | self.sampling_rate = self.model.fs 14 | else: 15 | # 16000 by default if not specified 16 | self.sampling_rate = 16000 17 | 18 | def __call__(self, inputs: str) -> Tuple[np.array, int]: 19 | """ 20 | Args: 21 | inputs (:obj:`str`): 22 | The text to generate audio from 23 | Return: 24 | A :obj:`np.array` and a :obj:`int`: The raw waveform as a numpy array, and the sampling rate as an int. 25 | """ 26 | outputs = self.model(inputs) 27 | speech = outputs["wav"] 28 | return speech.numpy(), self.sampling_rate 29 | -------------------------------------------------------------------------------- /docker_images/espnet/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/espnet/requirements.txt: -------------------------------------------------------------------------------- 1 | api-inference-community==0.0.32 2 | huggingface_hub==0.18.0 3 | espnet==202310 4 | torch<2.0.1 5 | torchaudio 6 | torch_optimizer 7 | espnet_model_zoo==0.1.7 8 | -------------------------------------------------------------------------------- /docker_images/espnet/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/espnet/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/espnet/tests/samples/malformed.flac: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker_images/espnet/tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/espnet/tests/samples/plane.jpg -------------------------------------------------------------------------------- /docker_images/espnet/tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/espnet/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /docker_images/espnet/tests/samples/sample1.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/espnet/tests/samples/sample1.flac -------------------------------------------------------------------------------- /docker_images/espnet/tests/samples/sample1.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/espnet/tests/samples/sample1.webm -------------------------------------------------------------------------------- /docker_images/espnet/tests/samples/sample1_dual.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/espnet/tests/samples/sample1_dual.ogg -------------------------------------------------------------------------------- /docker_images/espnet/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/fairseq/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fairseq/app/__init__.py -------------------------------------------------------------------------------- /docker_images/fairseq/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.audio_to_audio import SpeechToSpeechPipeline 4 | from app.pipelines.text_to_speech import TextToSpeechPipeline 5 | -------------------------------------------------------------------------------- /docker_images/fairseq/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/fairseq/app/pipelines/utils.py: -------------------------------------------------------------------------------- 1 | ARG_OVERRIDES_MAP = { 2 | "facebook/xm_transformer_s2ut_800m-es-en-st-asr-bt_h1_2022": { 3 | "config_yaml": "config.yaml", 4 | "task": "speech_to_text", 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /docker_images/fairseq/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/fairseq/requirements.txt: -------------------------------------------------------------------------------- 1 | api-inference-community==0.0.23 2 | g2p_en==2.1.0 3 | g2pc==0.9.9.3 4 | phonemizer==2.2.1 5 | librosa==0.8.1 6 | hanziconv==0.3.2 7 | sentencepiece==0.1.96 8 | # Dummy comment to trigger automatic deploy. 9 | git+https://github.com/facebookresearch/fairseq.git@d47119871c2ac9a0a0aa2904dd8cfc1929b113d9#egg=fairseq 10 | huggingface_hub==0.5.1 11 | -------------------------------------------------------------------------------- /docker_images/fairseq/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fairseq/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/fairseq/tests/samples/malformed.flac: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker_images/fairseq/tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fairseq/tests/samples/plane.jpg -------------------------------------------------------------------------------- /docker_images/fairseq/tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fairseq/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /docker_images/fairseq/tests/samples/sample1.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fairseq/tests/samples/sample1.flac -------------------------------------------------------------------------------- /docker_images/fairseq/tests/samples/sample1.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fairseq/tests/samples/sample1.webm -------------------------------------------------------------------------------- /docker_images/fairseq/tests/samples/sample1_dual.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fairseq/tests/samples/sample1_dual.ogg -------------------------------------------------------------------------------- /docker_images/fairseq/tests/samples/sample2.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fairseq/tests/samples/sample2.flac -------------------------------------------------------------------------------- /docker_images/fairseq/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/fastai/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fastai/app/__init__.py -------------------------------------------------------------------------------- /docker_images/fastai/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.image_classification import ImageClassificationPipeline 4 | 5 | 6 | # from app.pipelines.audio_classification import AudioClassificationPipeline 7 | # from app.pipelines.audio_to_audio import AudioToAudioPipeline 8 | # from app.pipelines.automatic_speech_recognition import ( 9 | # AutomaticSpeechRecognitionPipeline, 10 | # ) 11 | # from app.pipelines.feature_extraction import FeatureExtractionPipeline 12 | # from app.pipelines.question_answering import QuestionAnsweringPipeline 13 | # from app.pipelines.sentence_similarity import SentenceSimilarityPipeline 14 | # from app.pipelines.speech_segmentation import SpeechSegmentationPipeline 15 | # from app.pipelines.tabular_classification import ( 16 | # TabularDataPipeline, 17 | # ) 18 | # from app.pipelines.text_to_speech import TextToSpeechPipeline 19 | # from app.pipelines.token_classification import TokenClassificationPipeline 20 | -------------------------------------------------------------------------------- /docker_images/fastai/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any, Optional 3 | 4 | 5 | class Pipeline(ABC): 6 | task: Optional[str] = None 7 | model_id: Optional[str] = None 8 | 9 | @abstractmethod 10 | def __init__(self, model_id: str): 11 | raise NotImplementedError("Pipelines should implement an __init__ method") 12 | 13 | @abstractmethod 14 | def __call__(self, inputs: Any) -> Any: 15 | raise NotImplementedError("Pipelines should implement a __call__ method") 16 | 17 | 18 | class PipelineException(Exception): 19 | pass 20 | -------------------------------------------------------------------------------- /docker_images/fastai/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/fastai/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | api-inference-community==0.0.23 3 | huggingface_hub[fastai]==0.6.0 4 | timm==0.5.4 5 | -------------------------------------------------------------------------------- /docker_images/fastai/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fastai/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/fastai/tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fastai/tests/samples/plane.jpg -------------------------------------------------------------------------------- /docker_images/fastai/tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fastai/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /docker_images/fastai/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/fasttext/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tiangolo/uvicorn-gunicorn:python3.8 2 | LABEL maintainer="me " 3 | 4 | # Add any system dependency here 5 | # RUN apt-get update -y && apt-get install libXXX -y 6 | 7 | COPY ./requirements.txt /app 8 | RUN pip install --no-cache-dir -r requirements.txt 9 | COPY ./prestart.sh /app/ 10 | 11 | 12 | # Most DL models are quite large in terms of memory, using workers is a HUGE 13 | # slowdown because of the fork and GIL with python. 14 | # Using multiple pods seems like a better default strategy. 15 | # Feel free to override if it does not make sense for your library. 16 | ARG max_workers=1 17 | ENV MAX_WORKERS=$max_workers 18 | ENV HUGGINGFACE_HUB_CACHE=/data 19 | 20 | # Necessary on GPU environment docker. 21 | # TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose 22 | # rendering TIMEOUT defined by uvicorn impossible to use correctly 23 | # We're overriding it to be renamed UVICORN_TIMEOUT 24 | # UVICORN_TIMEOUT is a useful variable for very large models that take more 25 | # than 30s (the default) to load in memory. 26 | # If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will 27 | # kill workers all the time before they finish. 28 | RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py 29 | COPY ./app /app/app 30 | -------------------------------------------------------------------------------- /docker_images/fasttext/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fasttext/app/__init__.py -------------------------------------------------------------------------------- /docker_images/fasttext/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.feature_extraction import FeatureExtractionPipeline 4 | from app.pipelines.text_classification import TextClassificationPipeline 5 | -------------------------------------------------------------------------------- /docker_images/fasttext/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | import fasttext 5 | from huggingface_hub import hf_hub_download 6 | 7 | 8 | class Pipeline(ABC): 9 | @abstractmethod 10 | def __init__(self, model_id: str): 11 | model_path = hf_hub_download(model_id, "model.bin", library_name="fasttext") 12 | self.model = fasttext.load_model(model_path) 13 | self.model_id = model_id 14 | 15 | @abstractmethod 16 | def __call__(self, inputs: Any) -> Any: 17 | raise NotImplementedError("Pipelines should implement a __call__ method") 18 | 19 | 20 | class PipelineException(Exception): 21 | pass 22 | -------------------------------------------------------------------------------- /docker_images/fasttext/app/pipelines/feature_extraction.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from app.pipelines import Pipeline 4 | 5 | 6 | class FeatureExtractionPipeline(Pipeline): 7 | def __init__( 8 | self, 9 | model_id: str, 10 | ): 11 | # IMPLEMENT_THIS 12 | # Preload all the elements you are going to need at inference. 13 | # For instance your model, processors, tokenizer that might be needed. 14 | # This function is only called once, so do all the heavy processing I/O here 15 | super().__init__(model_id) 16 | 17 | def __call__(self, inputs: str) -> List[float]: 18 | """ 19 | Args: 20 | inputs (:obj:`str`): 21 | a string to get the features of. 22 | Return: 23 | A :obj:`list` of floats: The features computed by the model. 24 | """ 25 | return self.model.get_sentence_vector(inputs).tolist() 26 | -------------------------------------------------------------------------------- /docker_images/fasttext/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/fasttext/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | api-inference-community==0.0.23 3 | fasttext==0.9.2 4 | huggingface_hub==0.5.1 5 | # Dummy change. 6 | -------------------------------------------------------------------------------- /docker_images/fasttext/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fasttext/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/fasttext/tests/samples/malformed.flac: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker_images/fasttext/tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fasttext/tests/samples/plane.jpg -------------------------------------------------------------------------------- /docker_images/fasttext/tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fasttext/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /docker_images/fasttext/tests/samples/sample1.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fasttext/tests/samples/sample1.flac -------------------------------------------------------------------------------- /docker_images/fasttext/tests/samples/sample1.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fasttext/tests/samples/sample1.webm -------------------------------------------------------------------------------- /docker_images/fasttext/tests/samples/sample1_dual.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/fasttext/tests/samples/sample1_dual.ogg -------------------------------------------------------------------------------- /docker_images/fasttext/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/flair/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/flair/app/__init__.py -------------------------------------------------------------------------------- /docker_images/flair/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.token_classification import TokenClassificationPipeline 4 | -------------------------------------------------------------------------------- /docker_images/flair/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any, Optional 3 | 4 | 5 | class Pipeline(ABC): 6 | task: Optional[str] = None 7 | model_id: Optional[str] = None 8 | 9 | @abstractmethod 10 | def __init__(self, model_id: str): 11 | raise NotImplementedError("Pipelines should implement an __init__ method") 12 | 13 | @abstractmethod 14 | def __call__(self, inputs: Any) -> Any: 15 | raise NotImplementedError("Pipelines should implement a __call__ method") 16 | 17 | 18 | class PipelineException(Exception): 19 | pass 20 | -------------------------------------------------------------------------------- /docker_images/flair/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/flair/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | pydantic==1.8.2 3 | flair @ git+https://github.com/flairNLP/flair@e17ab1234fcfed2b089d8ef02b99949d520382d2 4 | api-inference-community==0.0.25 5 | -------------------------------------------------------------------------------- /docker_images/flair/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/flair/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/flair/tests/samples/malformed.flac: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker_images/flair/tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/flair/tests/samples/plane.jpg -------------------------------------------------------------------------------- /docker_images/flair/tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/flair/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /docker_images/flair/tests/samples/sample1.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/flair/tests/samples/sample1.flac -------------------------------------------------------------------------------- /docker_images/flair/tests/samples/sample1.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/flair/tests/samples/sample1.webm -------------------------------------------------------------------------------- /docker_images/flair/tests/samples/sample1_dual.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/flair/tests/samples/sample1_dual.ogg -------------------------------------------------------------------------------- /docker_images/flair/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/k2/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/k2/app/__init__.py -------------------------------------------------------------------------------- /docker_images/k2/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.automatic_speech_recognition import ( 4 | AutomaticSpeechRecognitionPipeline, 5 | ) 6 | -------------------------------------------------------------------------------- /docker_images/k2/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/k2/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/k2/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | api-inference-community==0.0.23 3 | huggingface_hub==0.5.1 4 | -------------------------------------------------------------------------------- /docker_images/k2/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/k2/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/k2/tests/samples/malformed.flac: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker_images/k2/tests/samples/sample1.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/k2/tests/samples/sample1.flac -------------------------------------------------------------------------------- /docker_images/k2/tests/samples/sample1.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/k2/tests/samples/sample1.webm -------------------------------------------------------------------------------- /docker_images/k2/tests/samples/sample1_dual.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/k2/tests/samples/sample1_dual.ogg -------------------------------------------------------------------------------- /docker_images/k2/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/latent-to-image/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/latent-to-image/app/__init__.py -------------------------------------------------------------------------------- /docker_images/latent-to-image/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.latent_to_image import LatentToImagePipeline 4 | -------------------------------------------------------------------------------- /docker_images/latent-to-image/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/latent-to-image/app/timing.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from functools import wraps 3 | from time import time 4 | 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | 9 | def timing(f): 10 | @wraps(f) 11 | def inner(*args, **kwargs): 12 | start = time() 13 | try: 14 | ret = f(*args, **kwargs) 15 | finally: 16 | end = time() 17 | logger.debug("Func: %r took: %.2f sec to execute", f.__name__, end - start) 18 | return ret 19 | 20 | return inner 21 | -------------------------------------------------------------------------------- /docker_images/latent-to-image/app/validation.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | 4 | STR_TO_BOOL = re.compile(r"^\s*true|yes|1\s*$", re.IGNORECASE) 5 | 6 | 7 | def str_to_bool(s): 8 | return STR_TO_BOOL.match(str(s)) 9 | -------------------------------------------------------------------------------- /docker_images/latent-to-image/prestart.sh: -------------------------------------------------------------------------------- 1 | echo "Prestart start at " $(date) 2 | 3 | METRICS_ENABLED=${METRICS_ENABLED:-"0"} 4 | 5 | if [ "$METRICS_ENABLED" = "1" ];then 6 | echo "Spawning metrics server" 7 | gunicorn -k "uvicorn.workers.UvicornWorker" --bind :${METRICS_PORT:-9400} "app.healthchecks:app" & 8 | pid=$! 9 | echo "Metrics server pid: $pid" 10 | fi 11 | 12 | echo "Prestart done at " $(date) 13 | -------------------------------------------------------------------------------- /docker_images/latent-to-image/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette 2 | api-inference-community>=0.0.37 3 | diffusers 4 | transformers 5 | accelerate 6 | safetensors 7 | hf_transfer 8 | pydantic 9 | ftfy 10 | scipy 11 | torch 12 | torchvision 13 | torchaudio 14 | uvicorn 15 | gunicorn 16 | psutil 17 | aiohttp 18 | peft 19 | protobuf 20 | -------------------------------------------------------------------------------- /docker_images/latent-to-image/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/latent-to-image/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/latent-to-image/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/mindspore/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.image_classification import ImageClassificationPipeline 4 | -------------------------------------------------------------------------------- /docker_images/mindspore/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any, Optional 3 | 4 | 5 | class Pipeline(ABC): 6 | task: Optional[str] = None 7 | model_id: Optional[str] = None 8 | 9 | @abstractmethod 10 | def __init__(self, model_id: str): 11 | raise NotImplementedError("Pipelines should implement an __init__ method") 12 | 13 | @abstractmethod 14 | def __call__(self, inputs: Any) -> Any: 15 | raise NotImplementedError("Pipelines should implement a __call__ method") 16 | 17 | 18 | class PipelineException(Exception): 19 | pass 20 | -------------------------------------------------------------------------------- /docker_images/mindspore/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/mindspore/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | api-inference-community==0.0.25 3 | huggingface_hub==0.11.0 4 | tinyms>=0.3.2 -------------------------------------------------------------------------------- /docker_images/mindspore/tests/samples/0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/mindspore/tests/samples/0.jpg -------------------------------------------------------------------------------- /docker_images/mindspore/tests/samples/5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/mindspore/tests/samples/5.jpg -------------------------------------------------------------------------------- /docker_images/mindspore/tests/test_api.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Dict 3 | from unittest import TestCase, skipIf 4 | 5 | from app.main import ALLOWED_TASKS, get_pipeline 6 | 7 | 8 | # Must contain at least one example of each implemented pipeline 9 | # Tests do not check the actual values of the model output, so small dummy 10 | # models are recommended for faster tests. 11 | TESTABLE_MODELS: Dict[str, str] = {"image-classification": "mindspore-ai/LeNet"} 12 | 13 | 14 | ALL_TASKS = {"image-classification"} 15 | 16 | 17 | class PipelineTestCase(TestCase): 18 | @skipIf( 19 | os.path.dirname(os.path.dirname(__file__)).endswith("common"), 20 | "common is a special case", 21 | ) 22 | def test_has_at_least_one_task_enabled(self): 23 | self.assertGreater( 24 | len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task" 25 | ) 26 | 27 | def test_unsupported_tasks(self): 28 | unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys() 29 | for unsupported_task in unsupported_tasks: 30 | with self.subTest(msg=unsupported_task, task=unsupported_task): 31 | with self.assertRaises(EnvironmentError): 32 | get_pipeline(unsupported_task, model_id="XX") 33 | -------------------------------------------------------------------------------- /docker_images/mindspore/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/nemo/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/nemo/app/__init__.py -------------------------------------------------------------------------------- /docker_images/nemo/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.automatic_speech_recognition import ( 4 | AutomaticSpeechRecognitionPipeline, 5 | ) 6 | -------------------------------------------------------------------------------- /docker_images/nemo/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/nemo/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/nemo/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.28.0 2 | api-inference-community==0.0.27 3 | nemo_toolkit[all]>=1.18.1 4 | huggingface_hub==0.15.1 5 | # Dummy 6 | -------------------------------------------------------------------------------- /docker_images/nemo/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/nemo/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/nemo/tests/samples/malformed.flac: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker_images/nemo/tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/nemo/tests/samples/plane.jpg -------------------------------------------------------------------------------- /docker_images/nemo/tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/nemo/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /docker_images/nemo/tests/samples/sample1.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/nemo/tests/samples/sample1.flac -------------------------------------------------------------------------------- /docker_images/nemo/tests/samples/sample1.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/nemo/tests/samples/sample1.webm -------------------------------------------------------------------------------- /docker_images/nemo/tests/samples/sample1_dual.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/nemo/tests/samples/sample1_dual.ogg -------------------------------------------------------------------------------- /docker_images/nemo/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/open_clip/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/open_clip/app/__init__.py -------------------------------------------------------------------------------- /docker_images/open_clip/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.zero_shot_image_classification import ( 4 | ZeroShotImageClassificationPipeline, 5 | ) 6 | -------------------------------------------------------------------------------- /docker_images/open_clip/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any, Optional 3 | 4 | 5 | class Pipeline(ABC): 6 | task: Optional[str] = None 7 | model_id: Optional[str] = None 8 | 9 | @abstractmethod 10 | def __init__(self, model_id: str): 11 | raise NotImplementedError("Pipelines should implement an __init__ method") 12 | 13 | @abstractmethod 14 | def __call__(self, inputs: Any) -> Any: 15 | raise NotImplementedError("Pipelines should implement a __call__ method") 16 | 17 | 18 | class PipelineException(Exception): 19 | pass 20 | -------------------------------------------------------------------------------- /docker_images/open_clip/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/open_clip/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | api-inference-community==0.0.32 3 | huggingface_hub>=0.12.1 4 | timm>=0.9.10 5 | transformers>=4.34.0 6 | open_clip_torch>=2.23.0 7 | #dummy. 8 | -------------------------------------------------------------------------------- /docker_images/open_clip/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/open_clip/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/open_clip/tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/open_clip/tests/samples/plane.jpg -------------------------------------------------------------------------------- /docker_images/open_clip/tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/open_clip/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /docker_images/open_clip/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/paddlenlp/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tiangolo/uvicorn-gunicorn:python3.8 2 | LABEL maintainer="PaddleNLP " 3 | 4 | # Add any system dependency here 5 | # RUN apt-get update -y && apt-get install libXXX -y 6 | 7 | COPY ./requirements.txt /app 8 | RUN pip install --no-cache-dir -r requirements.txt 9 | COPY ./prestart.sh /app/ 10 | 11 | 12 | # Most DL models are quite large in terms of memory, using workers is a HUGE 13 | # slowdown because of the fork and GIL with python. 14 | # Using multiple pods seems like a better default strategy. 15 | # Feel free to override if it does not make sense for your library. 16 | ARG max_workers=1 17 | ENV MAX_WORKERS=$max_workers 18 | ENV HUGGINGFACE_HUB_CACHE=/data 19 | 20 | 21 | # Necessary on GPU environment docker. 22 | # TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose 23 | # rendering TIMEOUT defined by uvicorn impossible to use correctly 24 | # We're overriding it to be renamed UVICORN_TIMEOUT 25 | # UVICORN_TIMEOUT is a useful variable for very large models that take more 26 | # than 30s (the default) to load in memory. 27 | # If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will 28 | # kill workers all the time before they finish. 29 | RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py 30 | COPY ./app /app/app 31 | -------------------------------------------------------------------------------- /docker_images/paddlenlp/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/paddlenlp/app/__init__.py -------------------------------------------------------------------------------- /docker_images/paddlenlp/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.conversational import ConversationalPipeline 4 | from app.pipelines.fill_mask import FillMaskPipeline 5 | from app.pipelines.summarization import SummarizationPipeline 6 | from app.pipelines.zero_shot_classification import ZeroShotClassificationPipeline 7 | -------------------------------------------------------------------------------- /docker_images/paddlenlp/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/paddlenlp/app/pipelines/fill_mask.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, List 2 | 3 | from app.pipelines import Pipeline 4 | from paddlenlp.taskflow import Taskflow 5 | 6 | 7 | class FillMaskPipeline(Pipeline): 8 | def __init__(self, model_id: str): 9 | self.taskflow = Taskflow("fill_mask", task_path=model_id, from_hf_hub=True) 10 | 11 | def __call__(self, inputs: str) -> List[Dict[str, Any]]: 12 | """ 13 | Args: 14 | inputs (:obj:`str`): a string to be filled from, must contain one and only one [MASK] token (check model card for exact name of the mask) 15 | Return: 16 | A :obj:`list`:. a list of dicts containing the following: 17 | - "sequence": The actual sequence of tokens that ran against the model (may contain special tokens) 18 | - "score": The probability for this token. 19 | - "token": The id of the token 20 | - "token_str": The string representation of the token 21 | """ 22 | results = self.taskflow(inputs) 23 | # since paddlenlp taskflow takes batch requests and returns batch results, we take the first element here 24 | return results[0] 25 | -------------------------------------------------------------------------------- /docker_images/paddlenlp/app/pipelines/summarization.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List 2 | 3 | from app.pipelines import Pipeline 4 | from paddlenlp.taskflow import Taskflow 5 | 6 | 7 | class SummarizationPipeline(Pipeline): 8 | def __init__(self, model_id: str): 9 | self.taskflow = Taskflow( 10 | "text_summarization", task_path=model_id, from_hf_hub=True 11 | ) 12 | 13 | def __call__(self, inputs: str) -> List[Dict[str, str]]: 14 | """ 15 | Args: 16 | inputs (:obj:`str`): a string to be summarized 17 | Return: 18 | A :obj:`list` of :obj:`dict` in the form of {"summary_text": "The string after summarization"} 19 | """ 20 | results = self.taskflow(inputs) 21 | return [{"summary_text": results[0]}] 22 | -------------------------------------------------------------------------------- /docker_images/paddlenlp/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/paddlenlp/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | api-inference-community==0.0.27 3 | huggingface_hub>=0.10.1 4 | paddlepaddle==2.5.0 5 | paddlenlp>=2.5.0 6 | #Dummy 7 | -------------------------------------------------------------------------------- /docker_images/paddlenlp/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/paddlenlp/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/paddlenlp/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/peft/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/peft/app/__init__.py -------------------------------------------------------------------------------- /docker_images/peft/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.text_generation import TextGenerationPipeline 4 | -------------------------------------------------------------------------------- /docker_images/peft/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/peft/app/timing.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from functools import wraps 3 | from time import time 4 | 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | 9 | def timing(f): 10 | @wraps(f) 11 | def inner(*args, **kwargs): 12 | start = time() 13 | try: 14 | ret = f(*args, **kwargs) 15 | finally: 16 | end = time() 17 | logger.debug("Func: %r took: %.2f sec to execute", f.__name__, end - start) 18 | return ret 19 | 20 | return inner 21 | -------------------------------------------------------------------------------- /docker_images/peft/prestart.sh: -------------------------------------------------------------------------------- 1 | echo "Prestart start at " $(date) 2 | python app/main.py 3 | echo "Prestart done at " $(date) -------------------------------------------------------------------------------- /docker_images/peft/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | api-inference-community==0.0.31 3 | huggingface_hub==0.18.0 4 | safetensors==0.3.1 5 | peft==0.6.2 6 | transformers==4.35.2 7 | accelerate>=0.21.0 8 | hf_transfer==0.1.3 9 | pydantic==1.8.2 10 | ftfy==6.1.1 11 | sentencepiece==0.1.97 12 | scipy==1.10.0 13 | torch==2.0.1 14 | pydantic<2 15 | #Dummy. 16 | -------------------------------------------------------------------------------- /docker_images/peft/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/peft/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/pyannote_audio/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/pyannote_audio/app/__init__.py -------------------------------------------------------------------------------- /docker_images/pyannote_audio/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.automatic_speech_recognition import ( 4 | AutomaticSpeechRecognitionPipeline, 5 | ) 6 | -------------------------------------------------------------------------------- /docker_images/pyannote_audio/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/pyannote_audio/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/pyannote_audio/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | api-inference-community==0.0.25 3 | torch==1.13.1 4 | torchvision==0.12.0 5 | torchaudio==0.11.0 6 | torchtext==0.12.0 7 | speechbrain==0.5.12 8 | pyannote-audio==2.0.1 9 | huggingface_hub==0.8.1 10 | -------------------------------------------------------------------------------- /docker_images/pyannote_audio/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/pyannote_audio/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/pyannote_audio/tests/samples/malformed.flac: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker_images/pyannote_audio/tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/pyannote_audio/tests/samples/plane.jpg -------------------------------------------------------------------------------- /docker_images/pyannote_audio/tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/pyannote_audio/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /docker_images/pyannote_audio/tests/samples/sample1.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/pyannote_audio/tests/samples/sample1.flac -------------------------------------------------------------------------------- /docker_images/pyannote_audio/tests/samples/sample1.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/pyannote_audio/tests/samples/sample1.webm -------------------------------------------------------------------------------- /docker_images/pyannote_audio/tests/samples/sample1_dual.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/pyannote_audio/tests/samples/sample1_dual.ogg -------------------------------------------------------------------------------- /docker_images/pyannote_audio/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/sentence_transformers/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/sentence_transformers/app/__init__.py -------------------------------------------------------------------------------- /docker_images/sentence_transformers/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.feature_extraction import FeatureExtractionPipeline 4 | from app.pipelines.sentence_similarity import SentenceSimilarityPipeline 5 | -------------------------------------------------------------------------------- /docker_images/sentence_transformers/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/sentence_transformers/app/pipelines/feature_extraction.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import List 3 | 4 | from app.pipelines import Pipeline 5 | from sentence_transformers import SentenceTransformer 6 | 7 | 8 | class FeatureExtractionPipeline(Pipeline): 9 | def __init__( 10 | self, 11 | model_id: str, 12 | ): 13 | self.model = SentenceTransformer( 14 | model_id, use_auth_token=os.getenv("HF_API_TOKEN") 15 | ) 16 | 17 | def __call__(self, inputs: str) -> List[float]: 18 | """ 19 | Args: 20 | inputs (:obj:`str`): 21 | a string to get the features of. 22 | Return: 23 | A :obj:`list` of floats: The features computed by the model. 24 | """ 25 | return self.model.encode(inputs).tolist() 26 | -------------------------------------------------------------------------------- /docker_images/sentence_transformers/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/sentence_transformers/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | api-inference-community==0.0.32 3 | sentence-transformers==3.3.1 4 | transformers==4.48.0 5 | tokenizers==0.21.0 6 | protobuf==3.18.3 7 | huggingface_hub==0.27.1 8 | sacremoses==0.0.53 9 | -------------------------------------------------------------------------------- /docker_images/sentence_transformers/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/sentence_transformers/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/sentence_transformers/tests/samples/malformed.flac: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker_images/sentence_transformers/tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/sentence_transformers/tests/samples/plane.jpg -------------------------------------------------------------------------------- /docker_images/sentence_transformers/tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/sentence_transformers/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /docker_images/sentence_transformers/tests/samples/sample1.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/sentence_transformers/tests/samples/sample1.flac -------------------------------------------------------------------------------- /docker_images/sentence_transformers/tests/samples/sample1.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/sentence_transformers/tests/samples/sample1.webm -------------------------------------------------------------------------------- /docker_images/sentence_transformers/tests/samples/sample1_dual.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/sentence_transformers/tests/samples/sample1_dual.ogg -------------------------------------------------------------------------------- /docker_images/sentence_transformers/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/setfit/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tiangolo/uvicorn-gunicorn:python3.8 2 | LABEL maintainer="Tom Aarsen " 3 | 4 | # Add any system dependency here 5 | # RUN apt-get update -y && apt-get install libXXX -y 6 | 7 | COPY ./requirements.txt /app 8 | RUN pip install --no-cache-dir -r requirements.txt 9 | COPY ./prestart.sh /app/ 10 | 11 | 12 | # Most DL models are quite large in terms of memory, using workers is a HUGE 13 | # slowdown because of the fork and GIL with python. 14 | # Using multiple pods seems like a better default strategy. 15 | # Feel free to override if it does not make sense for your library. 16 | ARG max_workers=1 17 | ENV MAX_WORKERS=$max_workers 18 | ENV HUGGINGFACE_HUB_CACHE=/data 19 | 20 | # Necessary on GPU environment docker. 21 | # TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose 22 | # rendering TIMEOUT defined by uvicorn impossible to use correctly 23 | # We're overriding it to be renamed UVICORN_TIMEOUT 24 | # UVICORN_TIMEOUT is a useful variable for very large models that take more 25 | # than 30s (the default) to load in memory. 26 | # If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will 27 | # kill workers all the time before they finish. 28 | RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py 29 | COPY ./app /app/app 30 | -------------------------------------------------------------------------------- /docker_images/setfit/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/setfit/app/__init__.py -------------------------------------------------------------------------------- /docker_images/setfit/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException 2 | from app.pipelines.text_classification import TextClassificationPipeline 3 | -------------------------------------------------------------------------------- /docker_images/setfit/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/setfit/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/setfit/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | git+https://github.com/huggingface/api-inference-community.git@f06a71e72e92caeebabaeced979eacb3542bf2ca 3 | huggingface_hub==0.20.2 4 | setfit==1.0.3 5 | -------------------------------------------------------------------------------- /docker_images/setfit/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/setfit/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/setfit/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/sklearn/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/sklearn/app/__init__.py -------------------------------------------------------------------------------- /docker_images/sklearn/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.tabular_classification import TabularClassificationPipeline 4 | from app.pipelines.tabular_regression import TabularRegressionPipeline 5 | from app.pipelines.text_classification import TextClassificationPipeline 6 | -------------------------------------------------------------------------------- /docker_images/sklearn/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/sklearn/app/pipelines/tabular_classification.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Union 2 | 3 | import pandas as pd 4 | from app.pipelines.common import SklearnBasePipeline 5 | 6 | 7 | class TabularClassificationPipeline(SklearnBasePipeline): 8 | def _get_output( 9 | self, inputs: Dict[str, Dict[str, List[Union[str, float]]]] 10 | ) -> List[Union[str, float]]: 11 | # We convert the inputs to a pandas DataFrame, and use self.columns 12 | # to order the columns in the order they're expected, ignore extra 13 | # columns given if any, and put NaN for missing columns. 14 | data = pd.DataFrame(inputs["data"], columns=self.columns) 15 | res = self.model.predict(data).tolist() 16 | return res 17 | -------------------------------------------------------------------------------- /docker_images/sklearn/app/pipelines/tabular_regression.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.tabular_classification import TabularClassificationPipeline 2 | 3 | 4 | class TabularRegressionPipeline(TabularClassificationPipeline): 5 | # The actual work done by the pipeline is identical 6 | pass 7 | -------------------------------------------------------------------------------- /docker_images/sklearn/app/pipelines/text_classification.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List 2 | 3 | from app.pipelines.common import SklearnBasePipeline 4 | 5 | 6 | class TextClassificationPipeline(SklearnBasePipeline): 7 | def _get_output(self, inputs: str) -> List[Dict[str, float]]: 8 | res = [] 9 | for i, c in enumerate(self.model.predict_proba([inputs]).tolist()[0]): 10 | res.append({"label": str(self.model.classes_[i]), "score": c}) 11 | return [res] 12 | -------------------------------------------------------------------------------- /docker_images/sklearn/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/sklearn/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette>=0.14.2 2 | api-inference-community>=0.0.25 3 | huggingface_hub>=0.5.1 4 | scikit-learn 5 | joblib>=1.0.1 6 | # Dummy changes. 7 | -------------------------------------------------------------------------------- /docker_images/sklearn/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/sklearn/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/sklearn/tests/generators/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # uncomment to enable debugging 4 | # set -xe 5 | 6 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) 7 | cd $SCRIPT_DIR 8 | 9 | # have to do this since can't do mamba run, and need bash functions to call 10 | # activate 11 | source $(mamba info -q --base)/etc/profile.d/conda.sh 12 | source $(mamba info -q --base)/etc/profile.d/mamba.sh 13 | 14 | mamba env update --file sklearn-1.0.yml 15 | mamba env update --file sklearn-latest.yml 16 | 17 | # not doing mamba run ... since it just wouldn't work and would use system's 18 | # python 19 | mamba activate api-inference-community-test-generator-sklearn-1-0 20 | python generate.py 1.0 21 | mamba deactivate 22 | 23 | mamba activate api-inference-community-test-generator-sklearn-latest 24 | python generate.py latest 25 | mamba deactivate 26 | -------------------------------------------------------------------------------- /docker_images/sklearn/tests/generators/samples/iris-1.0-input.json: -------------------------------------------------------------------------------- 1 | { 2 | "data": { 3 | "sepal length (cm)": [ 4 | 6.1, 5 | 5.7, 6 | 7.7, 7 | 6.0, 8 | 6.8, 9 | 5.4, 10 | 5.6, 11 | 6.9, 12 | 6.2, 13 | 5.8 14 | ], 15 | "sepal width (cm)": [ 16 | 2.8, 17 | 3.8, 18 | 2.6, 19 | 2.9, 20 | 2.8, 21 | 3.4, 22 | 2.9, 23 | 3.1, 24 | 2.2, 25 | 2.7 26 | ], 27 | "petal length (cm)": [ 28 | 4.7, 29 | 1.7, 30 | 6.9, 31 | 4.5, 32 | 4.8, 33 | 1.5, 34 | 3.6, 35 | 5.1, 36 | 4.5, 37 | 3.9 38 | ], 39 | "petal width (cm)": [ 40 | 1.2, 41 | 0.3, 42 | 2.3, 43 | 1.5, 44 | 1.4, 45 | 0.4, 46 | 1.3, 47 | 2.3, 48 | 1.5, 49 | 1.2 50 | ] 51 | } 52 | } -------------------------------------------------------------------------------- /docker_images/sklearn/tests/generators/samples/iris-hist_gradient_boosting-1.0-output.json: -------------------------------------------------------------------------------- 1 | [ 2 | 1, 3 | 0, 4 | 2, 5 | 1, 6 | 1, 7 | 0, 8 | 1, 9 | 2, 10 | 1, 11 | 1 12 | ] -------------------------------------------------------------------------------- /docker_images/sklearn/tests/generators/samples/iris-hist_gradient_boosting-latest-output.json: -------------------------------------------------------------------------------- 1 | [ 2 | 1, 3 | 0, 4 | 2, 5 | 1, 6 | 1, 7 | 0, 8 | 1, 9 | 2, 10 | 1, 11 | 1 12 | ] -------------------------------------------------------------------------------- /docker_images/sklearn/tests/generators/samples/iris-latest-input.json: -------------------------------------------------------------------------------- 1 | { 2 | "data": { 3 | "sepal length (cm)": [ 4 | 6.1, 5 | 5.7, 6 | 7.7, 7 | 6.0, 8 | 6.8, 9 | 5.4, 10 | 5.6, 11 | 6.9, 12 | 6.2, 13 | 5.8 14 | ], 15 | "sepal width (cm)": [ 16 | 2.8, 17 | 3.8, 18 | 2.6, 19 | 2.9, 20 | 2.8, 21 | 3.4, 22 | 2.9, 23 | 3.1, 24 | 2.2, 25 | 2.7 26 | ], 27 | "petal length (cm)": [ 28 | 4.7, 29 | 1.7, 30 | 6.9, 31 | 4.5, 32 | 4.8, 33 | 1.5, 34 | 3.6, 35 | 5.1, 36 | 4.5, 37 | 3.9 38 | ], 39 | "petal width (cm)": [ 40 | 1.2, 41 | 0.3, 42 | 2.3, 43 | 1.5, 44 | 1.4, 45 | 0.4, 46 | 1.3, 47 | 2.3, 48 | 1.5, 49 | 1.2 50 | ] 51 | } 52 | } -------------------------------------------------------------------------------- /docker_images/sklearn/tests/generators/samples/iris-logistic_regression-1.0-output.json: -------------------------------------------------------------------------------- 1 | [ 2 | 1, 3 | 0, 4 | 2, 5 | 1, 6 | 1, 7 | 0, 8 | 1, 9 | 2, 10 | 1, 11 | 1 12 | ] -------------------------------------------------------------------------------- /docker_images/sklearn/tests/generators/samples/iris-logistic_regression-latest-output.json: -------------------------------------------------------------------------------- 1 | [ 2 | 1, 3 | 0, 4 | 2, 5 | 1, 6 | 1, 7 | 0, 8 | 1, 9 | 2, 10 | 1, 11 | 1 12 | ] -------------------------------------------------------------------------------- /docker_images/sklearn/tests/generators/samples/tabularregression-hist_gradient_boosting_regressor-1.0-output.json: -------------------------------------------------------------------------------- 1 | [ 2 | 128.767605088706, 3 | 213.12484287152625, 4 | 152.87415981711302, 5 | 271.367552554169, 6 | 109.00499923164844, 7 | 81.88059224780598, 8 | 238.4711759447084, 9 | 215.14159932904784, 10 | 134.42407401121258, 11 | 189.15096503239798 12 | ] -------------------------------------------------------------------------------- /docker_images/sklearn/tests/generators/samples/tabularregression-hist_gradient_boosting_regressor-latest-output.json: -------------------------------------------------------------------------------- 1 | [ 2 | 128.767605088706, 3 | 213.12484287152625, 4 | 152.87415981711302, 5 | 271.367552554169, 6 | 109.00499923164844, 7 | 81.88059224780598, 8 | 238.4711759447084, 9 | 215.14159932904784, 10 | 134.42407401121258, 11 | 189.15096503239798 12 | ] -------------------------------------------------------------------------------- /docker_images/sklearn/tests/generators/samples/tabularregression-linear_regression-1.0-output.json: -------------------------------------------------------------------------------- 1 | [ 2 | 139.54831330342856, 3 | 179.52030577879273, 4 | 134.04133297819817, 5 | 291.4119359771987, 6 | 123.78723656395928, 7 | 92.17357676591854, 8 | 258.2340970376254, 9 | 181.33895237832277, 10 | 90.22217861672894, 11 | 108.63143297584902 12 | ] -------------------------------------------------------------------------------- /docker_images/sklearn/tests/generators/samples/tabularregression-linear_regression-latest-output.json: -------------------------------------------------------------------------------- 1 | [ 2 | 139.54755840379605, 3 | 179.51720835342783, 4 | 134.0387557189011, 5 | 291.4170292522083, 6 | 123.78965872239607, 7 | 92.17234650105041, 8 | 258.23238898921295, 9 | 181.3373205706072, 10 | 90.22411310941459, 11 | 108.63375858007925 12 | ] -------------------------------------------------------------------------------- /docker_images/sklearn/tests/generators/sklearn-1.0.yml: -------------------------------------------------------------------------------- 1 | name: api-inference-community-test-generator-sklearn-1-0 2 | channels: 3 | - conda-forge 4 | - nodefaults 5 | dependencies: 6 | - scikit-learn=1.0.2 7 | - pandas 8 | - huggingface_hub 9 | - pip 10 | - pip: 11 | # if you're testing skops, you should install from github, and probably 12 | # a specific hash if your PR on the skops side is not merged. 13 | - git+https://github.com/skops-dev/skops.git 14 | -------------------------------------------------------------------------------- /docker_images/sklearn/tests/generators/sklearn-latest.yml: -------------------------------------------------------------------------------- 1 | name: api-inference-community-test-generator-sklearn-latest 2 | channels: 3 | - conda-forge 4 | - nodefaults 5 | dependencies: 6 | - scikit-learn 7 | - pandas 8 | - huggingface_hub 9 | - pip 10 | - pip: 11 | # if you're testing skops, you should install from github, and probably 12 | # a specific hash if your PR on the skops side is not merged. 13 | - git+https://github.com/skops-dev/skops.git 14 | -------------------------------------------------------------------------------- /docker_images/sklearn/tests/samples/malformed.flac: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker_images/sklearn/tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/sklearn/tests/samples/plane.jpg -------------------------------------------------------------------------------- /docker_images/sklearn/tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/sklearn/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /docker_images/sklearn/tests/samples/sample1.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/sklearn/tests/samples/sample1.flac -------------------------------------------------------------------------------- /docker_images/sklearn/tests/samples/sample1.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/sklearn/tests/samples/sample1.webm -------------------------------------------------------------------------------- /docker_images/sklearn/tests/samples/sample1_dual.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/sklearn/tests/samples/sample1_dual.ogg -------------------------------------------------------------------------------- /docker_images/sklearn/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/spacy/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/spacy/app/__init__.py -------------------------------------------------------------------------------- /docker_images/spacy/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.sentence_similarity import SentenceSimilarityPipeline 4 | from app.pipelines.text_classification import TextClassificationPipeline 5 | from app.pipelines.token_classification import TokenClassificationPipeline 6 | -------------------------------------------------------------------------------- /docker_images/spacy/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/spacy/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/spacy/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | api-inference-community==0.0.23 3 | huggingface_hub==0.5.1 4 | requests==2.31.0 5 | -------------------------------------------------------------------------------- /docker_images/spacy/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/spacy/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/spacy/tests/samples/malformed.flac: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker_images/spacy/tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/spacy/tests/samples/plane.jpg -------------------------------------------------------------------------------- /docker_images/spacy/tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/spacy/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /docker_images/spacy/tests/samples/sample1.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/spacy/tests/samples/sample1.flac -------------------------------------------------------------------------------- /docker_images/spacy/tests/samples/sample1.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/spacy/tests/samples/sample1.webm -------------------------------------------------------------------------------- /docker_images/spacy/tests/samples/sample1_dual.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/spacy/tests/samples/sample1_dual.ogg -------------------------------------------------------------------------------- /docker_images/spacy/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/span_marker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tiangolo/uvicorn-gunicorn:python3.8 2 | LABEL maintainer="Tom Aarsen " 3 | 4 | # Add any system dependency here 5 | # RUN apt-get update -y && apt-get install libXXX -y 6 | 7 | COPY ./requirements.txt /app 8 | RUN pip install --no-cache-dir -r requirements.txt 9 | COPY ./prestart.sh /app/ 10 | 11 | 12 | # Most DL models are quite large in terms of memory, using workers is a HUGE 13 | # slowdown because of the fork and GIL with python. 14 | # Using multiple pods seems like a better default strategy. 15 | # Feel free to override if it does not make sense for your library. 16 | ARG max_workers=1 17 | ENV MAX_WORKERS=$max_workers 18 | ENV HUGGINGFACE_HUB_CACHE=/data 19 | 20 | # Necessary on GPU environment docker. 21 | # TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose 22 | # rendering TIMEOUT defined by uvicorn impossible to use correctly 23 | # We're overriding it to be renamed UVICORN_TIMEOUT 24 | # UVICORN_TIMEOUT is a useful variable for very large models that take more 25 | # than 30s (the default) to load in memory. 26 | # If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will 27 | # kill workers all the time before they finish. 28 | RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py 29 | COPY ./app /app/app 30 | -------------------------------------------------------------------------------- /docker_images/span_marker/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/span_marker/app/__init__.py -------------------------------------------------------------------------------- /docker_images/span_marker/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.token_classification import TokenClassificationPipeline 4 | -------------------------------------------------------------------------------- /docker_images/span_marker/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/span_marker/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/span_marker/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | api-inference-community==0.0.32 3 | huggingface_hub>=0.17.3 4 | span_marker>=1.4.0 -------------------------------------------------------------------------------- /docker_images/span_marker/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/span_marker/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/span_marker/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/speechbrain/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/speechbrain/app/__init__.py -------------------------------------------------------------------------------- /docker_images/speechbrain/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.audio_classification import AudioClassificationPipeline 4 | from app.pipelines.audio_to_audio import AudioToAudioPipeline 5 | from app.pipelines.automatic_speech_recognition import ( 6 | AutomaticSpeechRecognitionPipeline, 7 | ) 8 | from app.pipelines.text2text_generation import TextToTextPipeline 9 | from app.pipelines.text_to_speech import TextToSpeechPipeline 10 | -------------------------------------------------------------------------------- /docker_images/speechbrain/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/speechbrain/app/pipelines/text2text_generation.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List 2 | 3 | from app.common import ModelType, get_type 4 | from app.pipelines import Pipeline 5 | from speechbrain.inference import GraphemeToPhoneme 6 | 7 | 8 | POSTPROCESSING = {ModelType.GRAPHEMETOPHONEME: lambda output: "-".join(output)} 9 | 10 | 11 | class TextToTextPipeline(Pipeline): 12 | def __init__(self, model_id: str): 13 | model_type = get_type(model_id) 14 | if model_type == ModelType.GRAPHEMETOPHONEME: 15 | self.model = GraphemeToPhoneme.from_hparams(source=model_id) 16 | else: 17 | raise ValueError(f"{model_type.value} is invalid for text-to-text") 18 | self.post_process = POSTPROCESSING.get(model_type, lambda output: output) 19 | 20 | def __call__(self, inputs: str) -> List[Dict[str, str]]: 21 | """ 22 | Args: 23 | inputs (:obj:`str`): 24 | The input text 25 | Return: 26 | A :obj:`list`:. The list contains a single item that is a dict {"text": the model output} 27 | """ 28 | output = self.model(inputs) 29 | output = self.post_process(output) 30 | return [{"generated_text": output}] 31 | -------------------------------------------------------------------------------- /docker_images/speechbrain/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/speechbrain/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | # TODO: Replace with the correct tag once the core PR is merged 3 | api-inference-community==0.0.32 4 | huggingface_hub>=0.7 5 | transformers==4.30.0 6 | git+https://github.com/speechbrain/speechbrain@v1.0.0 7 | https://github.com/kpu/kenlm/archive/master.zip 8 | pygtrie 9 | #Dummy. 10 | -------------------------------------------------------------------------------- /docker_images/speechbrain/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/speechbrain/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/speechbrain/tests/samples/malformed.flac: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker_images/speechbrain/tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/speechbrain/tests/samples/plane.jpg -------------------------------------------------------------------------------- /docker_images/speechbrain/tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/speechbrain/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /docker_images/speechbrain/tests/samples/sample1.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/speechbrain/tests/samples/sample1.flac -------------------------------------------------------------------------------- /docker_images/speechbrain/tests/samples/sample1.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/speechbrain/tests/samples/sample1.webm -------------------------------------------------------------------------------- /docker_images/speechbrain/tests/samples/sample1_dual.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/speechbrain/tests/samples/sample1_dual.ogg -------------------------------------------------------------------------------- /docker_images/speechbrain/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/stanza/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tiangolo/uvicorn-gunicorn:python3.8 2 | LABEL maintainer="me " 3 | 4 | # Add any system dependency here 5 | # RUN apt-get update -y && apt-get install libXXX -y 6 | 7 | COPY ./requirements.txt /app 8 | RUN pip install --no-cache-dir -r requirements.txt 9 | COPY ./prestart.sh /app/ 10 | 11 | 12 | # Most DL models are quite large in terms of memory, using workers is a HUGE 13 | # slowdown because of the fork and GIL with python. 14 | # Using multiple pods seems like a better default strategy. 15 | # Feel free to override if it does not make sense for your library. 16 | ARG max_workers=1 17 | ENV MAX_WORKERS=$max_workers 18 | ENV HUGGINGFACE_HUB_CACHE=/data 19 | 20 | # Necessary on GPU environment docker. 21 | # TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose 22 | # rendering TIMEOUT defined by uvicorn impossible to use correctly 23 | # We're overriding it to be renamed UVICORN_TIMEOUT 24 | # UVICORN_TIMEOUT is a useful variable for very large models that take more 25 | # than 30s (the default) to load in memory. 26 | # If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will 27 | # kill workers all the time before they finish. 28 | RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py 29 | COPY ./app /app/app 30 | -------------------------------------------------------------------------------- /docker_images/stanza/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/stanza/app/__init__.py -------------------------------------------------------------------------------- /docker_images/stanza/app/batch.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | 5 | from api_inference_community.batch import batch 6 | from app.main import get_pipeline 7 | 8 | 9 | DATASET_NAME = os.getenv("DATASET_NAME") 10 | DATASET_CONFIG = os.getenv("DATASET_CONFIG", None) 11 | DATASET_SPLIT = os.getenv("DATASET_SPLIT") 12 | DATASET_COLUMN = os.getenv("DATASET_COLUMN") 13 | USE_GPU = os.getenv("USE_GPU", "0").lower() in {"1", "true"} 14 | TOKEN = os.getenv("TOKEN") 15 | REPO_ID = os.getenv("REPO_ID") 16 | TASK = os.getenv("TASK") 17 | 18 | if __name__ == "__main__": 19 | batch( 20 | dataset_name=DATASET_NAME, 21 | dataset_config=DATASET_CONFIG, 22 | dataset_split=DATASET_SPLIT, 23 | dataset_column=DATASET_COLUMN, 24 | token=TOKEN, 25 | repo_id=REPO_ID, 26 | use_gpu=USE_GPU, 27 | pipeline=get_pipeline(), 28 | task=TASK, 29 | ) 30 | -------------------------------------------------------------------------------- /docker_images/stanza/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.token_classification import TokenClassificationPipeline 4 | -------------------------------------------------------------------------------- /docker_images/stanza/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any 3 | 4 | 5 | class Pipeline(ABC): 6 | @abstractmethod 7 | def __init__(self, model_id: str): 8 | raise NotImplementedError("Pipelines should implement an __init__ method") 9 | 10 | @abstractmethod 11 | def __call__(self, inputs: Any) -> Any: 12 | raise NotImplementedError("Pipelines should implement a __call__ method") 13 | 14 | 15 | class PipelineException(Exception): 16 | pass 17 | -------------------------------------------------------------------------------- /docker_images/stanza/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/stanza/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | api-inference-community==0.0.23 3 | huggingface_hub==0.5.1 4 | stanza==1.3.0 5 | -------------------------------------------------------------------------------- /docker_images/stanza/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/stanza/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/stanza/tests/samples/malformed.flac: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker_images/stanza/tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/stanza/tests/samples/plane.jpg -------------------------------------------------------------------------------- /docker_images/stanza/tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/stanza/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /docker_images/stanza/tests/samples/sample1.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/stanza/tests/samples/sample1.flac -------------------------------------------------------------------------------- /docker_images/stanza/tests/samples/sample1.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/stanza/tests/samples/sample1.webm -------------------------------------------------------------------------------- /docker_images/stanza/tests/samples/sample1_dual.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/stanza/tests/samples/sample1_dual.ogg -------------------------------------------------------------------------------- /docker_images/stanza/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /docker_images/timm/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tiangolo/uvicorn-gunicorn:python3.8 2 | LABEL maintainer="me " 3 | 4 | # Add any system dependency here 5 | # RUN apt-get update -y && apt-get install libXXX -y 6 | 7 | COPY ./requirements.txt /app 8 | RUN pip install --no-cache-dir -r requirements.txt 9 | COPY ./prestart.sh /app/ 10 | 11 | 12 | # Most DL models are quite large in terms of memory, using workers is a HUGE 13 | # slowdown because of the fork and GIL with python. 14 | # Using multiple pods seems like a better default strategy. 15 | # Feel free to override if it does not make sense for your library. 16 | ARG max_workers=1 17 | ENV MAX_WORKERS=$max_workers 18 | ENV TORCH_HOME=/data/ 19 | 20 | # Necessary on GPU environment docker. 21 | # TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose 22 | # rendering TIMEOUT defined by uvicorn impossible to use correctly 23 | # We're overriding it to be renamed UVICORN_TIMEOUT 24 | # UVICORN_TIMEOUT is a useful variable for very large models that take more 25 | # than 30s (the default) to load in memory. 26 | # If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will 27 | # kill workers all the time before they finish. 28 | RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py 29 | COPY ./app /app/app 30 | -------------------------------------------------------------------------------- /docker_images/timm/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/timm/app/__init__.py -------------------------------------------------------------------------------- /docker_images/timm/app/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | from app.pipelines.base import Pipeline, PipelineException # isort:skip 2 | 3 | from app.pipelines.image_classification import ImageClassificationPipeline 4 | -------------------------------------------------------------------------------- /docker_images/timm/app/pipelines/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Any, Optional 3 | 4 | 5 | class Pipeline(ABC): 6 | task: Optional[str] = None 7 | model_id: Optional[str] = None 8 | 9 | @abstractmethod 10 | def __init__(self, model_id: str): 11 | raise NotImplementedError("Pipelines should implement an __init__ method") 12 | 13 | @abstractmethod 14 | def __call__(self, inputs: Any) -> Any: 15 | raise NotImplementedError("Pipelines should implement a __call__ method") 16 | 17 | 18 | class PipelineException(Exception): 19 | pass 20 | -------------------------------------------------------------------------------- /docker_images/timm/prestart.sh: -------------------------------------------------------------------------------- 1 | python app/main.py 2 | -------------------------------------------------------------------------------- /docker_images/timm/requirements.txt: -------------------------------------------------------------------------------- 1 | starlette==0.27.0 2 | api-inference-community==0.0.32 3 | huggingface_hub>=0.11.1 4 | timm>=1.0.7 5 | #dummy 6 | -------------------------------------------------------------------------------- /docker_images/timm/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/timm/tests/__init__.py -------------------------------------------------------------------------------- /docker_images/timm/tests/samples/malformed.flac: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docker_images/timm/tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/timm/tests/samples/plane.jpg -------------------------------------------------------------------------------- /docker_images/timm/tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/timm/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /docker_images/timm/tests/samples/sample1.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/timm/tests/samples/sample1.flac -------------------------------------------------------------------------------- /docker_images/timm/tests/samples/sample1.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/timm/tests/samples/sample1.webm -------------------------------------------------------------------------------- /docker_images/timm/tests/samples/sample1_dual.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/docker_images/timm/tests/samples/sample1_dual.ogg -------------------------------------------------------------------------------- /docker_images/timm/tests/test_docker_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from unittest import TestCase 4 | 5 | 6 | class cd: 7 | """Context manager for changing the current working directory""" 8 | 9 | def __init__(self, newPath): 10 | self.newPath = os.path.expanduser(newPath) 11 | 12 | def __enter__(self): 13 | self.savedPath = os.getcwd() 14 | os.chdir(self.newPath) 15 | 16 | def __exit__(self, etype, value, traceback): 17 | os.chdir(self.savedPath) 18 | 19 | 20 | class DockerBuildTestCase(TestCase): 21 | def test_can_build_docker_image(self): 22 | with cd(os.path.dirname(os.path.dirname(__file__))): 23 | subprocess.check_output(["docker", "build", "."]) 24 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | starlette>=0.14.2 2 | numpy>=1.18.0 3 | pydantic>=2 4 | parameterized>=0.8.1 5 | pillow>=8.2.0 6 | huggingface_hub>=0.20.2 7 | datasets>=2.2 8 | psutil>=6.0.0 9 | pytest 10 | httpx 11 | uvicorn 12 | black 13 | isort 14 | flake8 15 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | default_section = FIRSTPARTY 3 | ensure_newline_before_comments = True 4 | force_grid_wrap = 0 5 | line_length = 88 6 | include_trailing_comma = True 7 | known_first_party = main 8 | lines_after_imports = 2 9 | multi_line_output = 3 10 | use_parentheses = True 11 | 12 | [flake8] 13 | ignore = E203, E501, E741, W503, W605 14 | max-line-length = 88 15 | per-file-ignores = __init__.py:F401 16 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | 4 | setup( 5 | name="api_inference_community", 6 | version="0.0.37", 7 | description="A package with helper tools to build an API Inference docker app for Hugging Face API inference using huggingface_hub", 8 | long_description=open("README.md", "r", encoding="utf-8").read(), 9 | long_description_content_type="text/markdown", 10 | url="http://github.com/huggingface/api-inference-community", 11 | author="Nicolas Patry", 12 | author_email="nicolas@huggingface.co", 13 | license="MIT", 14 | packages=["api_inference_community"], 15 | python_requires=">=3.6.0", 16 | zip_safe=False, 17 | install_requires=list(line for line in open("requirements.txt", "r")), 18 | extras_require={ 19 | "test": [ 20 | "httpx>=0.18", 21 | "Pillow>=8.2", 22 | "httpx>=0.18", 23 | "torch>=1.9.0", 24 | "pytest>=6.2", 25 | ], 26 | "quality": ["black==22.3.0", "isort", "flake8", "mypy"], 27 | }, 28 | ) 29 | -------------------------------------------------------------------------------- /tests/samples/malformed.flac: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/samples/plane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/tests/samples/plane.jpg -------------------------------------------------------------------------------- /tests/samples/plane2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/tests/samples/plane2.jpg -------------------------------------------------------------------------------- /tests/samples/sample1.flac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/tests/samples/sample1.flac -------------------------------------------------------------------------------- /tests/samples/sample1.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/tests/samples/sample1.webm -------------------------------------------------------------------------------- /tests/samples/sample1_dual.ogg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/huggingface/api-inference-community/5c3dbdc3946c6032953205910f2312caccc32d17/tests/samples/sample1_dual.ogg -------------------------------------------------------------------------------- /tests/test_image.py: -------------------------------------------------------------------------------- 1 | import os 2 | from unittest import TestCase 3 | 4 | import PIL 5 | from api_inference_community.validation import normalize_payload_image 6 | 7 | 8 | class ValidationTestCase(TestCase): 9 | def test_original_imagefile(self): 10 | dirname = os.path.dirname(os.path.abspath(__file__)) 11 | filename = os.path.join(dirname, "samples", "plane.jpg") 12 | with open(filename, "rb") as f: 13 | bpayload = f.read() 14 | 15 | payload, params = normalize_payload_image(bpayload) 16 | self.assertEqual(params, {}) 17 | self.assertTrue(isinstance(payload, PIL.Image.Image)) 18 | self.assertEqual(payload.size, (300, 200)) 19 | 20 | def test_secondary_file(self): 21 | dirname = os.path.dirname(os.path.abspath(__file__)) 22 | filename = os.path.join(dirname, "samples", "plane2.jpg") 23 | with open(filename, "rb") as f: 24 | bpayload = f.read() 25 | 26 | payload, params = normalize_payload_image(bpayload) 27 | self.assertEqual(params, {}) 28 | self.assertTrue(isinstance(payload, PIL.Image.Image)) 29 | self.assertEqual(payload.size, (2560, 1440)) 30 | --------------------------------------------------------------------------------