├── .gitignore ├── .vscode └── launch.json ├── MLServer ├── .dockerignore ├── .github │ ├── dependabot.yml │ ├── release.yml │ └── workflows │ │ ├── benchmark.yml │ │ ├── publish.yml │ │ ├── release-sc.yml │ │ ├── release.yml │ │ ├── security.yml │ │ └── tests.yml ├── .gitignore ├── .package.lock ├── .readthedocs.yaml ├── CHANGELOG.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── MANIFEST.in ├── Makefile ├── OWNERS ├── OWNERS_ALIASES ├── README.md ├── RELEASE.md ├── benchmarking │ ├── Makefile │ ├── README.md │ ├── clone-models.sh │ ├── common │ │ ├── grpc.js │ │ ├── helpers.js │ │ └── rest.js │ ├── data │ │ ├── iris │ │ │ ├── grpc-requests.json │ │ │ └── rest-requests.json │ │ └── sum-model │ │ │ ├── grpc-requests.json │ │ │ └── rest-requests.json │ ├── generator.py │ ├── scenarios │ │ ├── inference-grpc.js │ │ ├── inference-rest.js │ │ └── mms.js │ └── testserver │ │ └── settings.json ├── docs │ ├── Makefile │ ├── _static │ │ └── css │ │ │ └── custom.css │ ├── assets │ │ ├── adaptive-batching.svg │ │ ├── architecture.svg │ │ ├── content-type.svg │ │ ├── input-codecs.svg │ │ ├── parallel-inference.svg │ │ └── request-codecs.svg │ ├── changelog.md │ ├── conf.py │ ├── examples │ │ ├── Makefile │ │ ├── alibi-detect │ │ │ ├── .gitignore │ │ │ ├── README.ipynb │ │ │ ├── README.md │ │ │ ├── alibi-detector-artifacts │ │ │ │ └── .gitkeep │ │ │ ├── model-settings.json │ │ │ └── settings.json │ │ ├── alibi-explain │ │ │ ├── data │ │ │ │ └── mnist_anchor_image │ │ │ │ │ ├── explainer.dill │ │ │ │ │ ├── meta.dill │ │ │ │ │ └── segmentation_fn.dill │ │ │ ├── model-settings.json │ │ │ └── settings.json │ │ ├── conda │ │ │ ├── Makefile │ │ │ ├── README.ipynb │ │ │ ├── README.md │ │ │ ├── environment.yml │ │ │ └── model-settings.json │ │ ├── content-type │ │ │ ├── README.ipynb │ │ │ ├── README.md │ │ │ ├── model-settings.json │ │ │ └── runtime.py │ │ ├── custom-json │ │ │ ├── README.ipynb │ │ │ ├── README.md │ │ │ ├── jsonmodels.py │ │ │ ├── model-settings.json │ │ │ └── settings.json │ │ ├── custom │ │ │ ├── README.ipynb │ │ │ ├── README.md │ │ │ ├── model-settings.json │ │ │ ├── models.py │ │ │ ├── numpyro-divorce.json │ │ │ ├── requirements.txt │ │ │ ├── seldondeployment.yaml │ │ │ └── settings.json │ │ ├── huggingface │ │ │ ├── .gitignore │ │ │ ├── README.ipynb │ │ │ └── README.md │ │ ├── index.md │ │ ├── kafka │ │ │ ├── .gitignore │ │ │ ├── README.ipynb │ │ │ ├── README.md │ │ │ ├── inference-request.json │ │ │ ├── model-settings.json │ │ │ └── settings.json │ │ ├── lightgbm │ │ │ ├── README.ipynb │ │ │ ├── README.md │ │ │ ├── iris-lightgbm.bst │ │ │ ├── model-settings.json │ │ │ └── settings.json │ │ ├── mlflow │ │ │ ├── .gitignore │ │ │ ├── README.ipynb │ │ │ ├── README.md │ │ │ ├── model-settings.json │ │ │ └── src │ │ │ │ └── train.py │ │ ├── mllib │ │ │ ├── data │ │ │ │ ├── .part-00000-3b569ac9-e283-4220-ba6c-31088ed23cb8-c000.snappy.parquet.crc │ │ │ │ ├── _SUCCESS │ │ │ │ └── part-00000-3b569ac9-e283-4220-ba6c-31088ed23cb8-c000.snappy.parquet │ │ │ ├── metadata │ │ │ │ ├── .part-00000.crc │ │ │ │ ├── _SUCCESS │ │ │ │ └── part-00000 │ │ │ └── model-settings.json │ │ ├── mms │ │ │ ├── Makefile │ │ │ ├── README.ipynb │ │ │ ├── README.md │ │ │ ├── agaricus.txt.test │ │ │ ├── agaricus.txt.train │ │ │ └── settings.json │ │ ├── model-repository │ │ │ ├── README.ipynb │ │ │ └── README.md │ │ ├── sklearn │ │ │ ├── README.ipynb │ │ │ ├── README.md │ │ │ ├── inference-request.json │ │ │ ├── model-settings.json │ │ │ └── settings.json │ │ ├── tempo │ │ │ ├── README.ipynb │ │ │ ├── README.md │ │ │ └── model-settings.json │ │ └── xgboost │ │ │ ├── README.ipynb │ │ │ ├── README.md │ │ │ ├── agaricus.txt.test │ │ │ ├── agaricus.txt.train │ │ │ ├── model-settings.json │ │ │ ├── mushroom-xgboost.json │ │ │ └── settings.json │ ├── favicon.ico │ ├── index.md │ ├── reference │ │ ├── api │ │ │ ├── codecs.md │ │ │ ├── index.md │ │ │ ├── metrics.md │ │ │ ├── model.md │ │ │ └── types.md │ │ ├── cli.md │ │ ├── index.md │ │ ├── model-settings.md │ │ └── settings.md │ ├── references.bib │ ├── requirements.txt │ ├── runtimes │ │ ├── alibi-detect.md │ │ ├── alibi-explain.md │ │ ├── custom.md │ │ ├── huggingface.md │ │ ├── index.md │ │ ├── lightgbm.md │ │ ├── mlflow.md │ │ ├── mllib.md │ │ ├── sklearn.md │ │ └── xgboost.md │ └── user-guide │ │ ├── adaptive-batching.md │ │ ├── content-type.md │ │ ├── custom.md │ │ ├── deployment │ │ ├── index.md │ │ ├── kserve.md │ │ └── seldon-core.md │ │ ├── index.md │ │ ├── metrics.md │ │ ├── openapi.md │ │ └── parallel-inference.md ├── hack │ ├── activate-env.sh │ ├── build-env.sh │ ├── build-images.sh │ ├── build-saeid.sh │ ├── build-wheels.sh │ ├── generate-types.sh │ ├── generate_dotenv.py │ ├── templates │ │ └── BaseModel_root.jinja2 │ └── update-version.sh ├── licenses │ ├── license.txt │ ├── license_info.csv │ └── license_info.no_versions.csv ├── log │ └── .lock ├── mlserver │ ├── __init__.py │ ├── batch_processing.py │ ├── batching │ │ ├── __init__.py │ │ ├── adaptive.py │ │ ├── hooks.py │ │ ├── requests.py │ │ └── shape.py │ ├── cli │ │ ├── __init__.py │ │ ├── build.py │ │ ├── constants.py │ │ ├── init_project.py │ │ ├── main.py │ │ └── serve.py │ ├── cloudevents.py │ ├── codecs │ │ ├── __init__.py │ │ ├── base.py │ │ ├── base64.py │ │ ├── datetime.py │ │ ├── decorator.py │ │ ├── errors.py │ │ ├── lists.py │ │ ├── numpy.py │ │ ├── pandas.py │ │ ├── string.py │ │ └── utils.py │ ├── env.py │ ├── errors.py │ ├── grpc │ │ ├── __init__.py │ │ ├── converters.py │ │ ├── dataplane_pb2.py │ │ ├── dataplane_pb2.pyi │ │ ├── dataplane_pb2_grpc.py │ │ ├── interceptors.py │ │ ├── logging.py │ │ ├── model_repository.py │ │ ├── model_repository_pb2.py │ │ ├── model_repository_pb2.pyi │ │ ├── model_repository_pb2_grpc.py │ │ ├── server.py │ │ ├── servicers.py │ │ └── utils.py │ ├── handlers │ │ ├── __init__.py │ │ ├── custom.py │ │ ├── dataplane.py │ │ └── model_repository.py │ ├── kafka │ │ ├── __init__.py │ │ ├── errors.py │ │ ├── handlers.py │ │ ├── logging.py │ │ ├── message.py │ │ └── server.py │ ├── logging.py │ ├── metrics │ │ ├── __init__.py │ │ ├── context.py │ │ ├── errors.py │ │ ├── logging.py │ │ ├── prometheus.py │ │ ├── registry.py │ │ └── server.py │ ├── middleware.py │ ├── model.py │ ├── parallel │ │ ├── __init__.py │ │ ├── dispatcher.py │ │ ├── errors.py │ │ ├── logging.py │ │ ├── messages.py │ │ ├── model.py │ │ ├── pool.py │ │ ├── registry.py │ │ ├── utils.py │ │ └── worker.py │ ├── raw.py │ ├── registry.py │ ├── repository │ │ ├── __init__.py │ │ ├── factory.py │ │ ├── load.py │ │ └── repository.py │ ├── rest │ │ ├── __init__.py │ │ ├── app.py │ │ ├── endpoints.py │ │ ├── errors.py │ │ ├── logging.py │ │ ├── openapi.py │ │ ├── requests.py │ │ ├── responses.py │ │ ├── server.py │ │ └── utils.py │ ├── server.py │ ├── settings.py │ ├── types │ │ ├── __init__.py │ │ ├── base.py │ │ ├── dataplane.py │ │ └── model_repository.py │ ├── utils.py │ └── version.py ├── openapi │ ├── dataplane.json │ ├── dataplane.yaml │ ├── model_repository.json │ └── model_repository.yaml ├── proto │ ├── dataplane.proto │ └── model_repository.proto ├── requirements │ └── dev.txt ├── runtimes │ ├── alibi-detect │ │ ├── LICENSE │ │ ├── README.md │ │ ├── mlserver_alibi_detect │ │ │ ├── __init__.py │ │ │ ├── runtime.py │ │ │ └── version.py │ │ ├── requirements │ │ │ └── dev.txt │ │ ├── setup.py │ │ └── tests │ │ │ ├── __init__.py │ │ │ ├── conftest.py │ │ │ ├── test_drift_detector.py │ │ │ ├── test_outlier_detector.py │ │ │ ├── test_runtime.py │ │ │ └── testdata │ │ │ └── inference-request.json │ ├── alibi-explain │ │ ├── LICENSE │ │ ├── README.md │ │ ├── mlserver_alibi_explain │ │ │ ├── __init__.py │ │ │ ├── alibi_dependency_reference.py │ │ │ ├── common.py │ │ │ ├── errors.py │ │ │ ├── explainers │ │ │ │ ├── __init__.py │ │ │ │ ├── black_box_runtime.py │ │ │ │ ├── integrated_gradients.py │ │ │ │ └── white_box_runtime.py │ │ │ ├── runtime.py │ │ │ └── version.py │ │ ├── requirements │ │ │ └── dev.txt │ │ ├── setup.py │ │ └── tests │ │ │ ├── __init__.py │ │ │ ├── conftest.py │ │ │ ├── helpers │ │ │ ├── __init__.py │ │ │ ├── metrics.py │ │ │ ├── run_async.py │ │ │ └── tf_model.py │ │ │ ├── test_alibi_runtime_base.py │ │ │ ├── test_black_box.py │ │ │ ├── test_common.py │ │ │ ├── test_integrated_gradients.py │ │ │ └── test_utils.py │ ├── huggingface │ │ ├── LICENSE │ │ ├── README.md │ │ ├── mlserver_huggingface │ │ │ ├── __init__.py │ │ │ ├── codecs │ │ │ │ ├── __init__.py │ │ │ │ ├── base.py │ │ │ │ ├── conversation.py │ │ │ │ ├── image.py │ │ │ │ ├── json.py │ │ │ │ ├── jsonlist.py │ │ │ │ ├── numpylist.py │ │ │ │ ├── raw.py │ │ │ │ └── utils.py │ │ │ ├── common.py │ │ │ ├── errors.py │ │ │ ├── metadata.py │ │ │ ├── runtime.py │ │ │ ├── settings.py │ │ │ └── version.py │ │ ├── setup.py │ │ └── tests │ │ │ ├── __init__.py │ │ │ ├── conftest.py │ │ │ ├── test_codecs.py │ │ │ ├── test_codecs │ │ │ ├── __init__.py │ │ │ ├── test_base.py │ │ │ ├── test_conversation.py │ │ │ ├── test_image.py │ │ │ ├── test_json.py │ │ │ ├── test_jsonlist.py │ │ │ ├── test_numpylist.py │ │ │ └── test_raw.py │ │ │ ├── test_common.py │ │ │ ├── test_runtime.py │ │ │ ├── test_runtime_cases.py │ │ │ ├── test_tasks │ │ │ ├── __init__.py │ │ │ ├── conftest.py │ │ │ ├── test_audio_classification.py │ │ │ └── test_visual_question_anwsering.py │ │ │ ├── testdata │ │ │ ├── audio.mp3 │ │ │ ├── dogs.jpg │ │ │ ├── hancat.jpeg │ │ │ ├── image_base64.txt │ │ │ └── ugcat.jpeg │ │ │ └── utils.py │ ├── lightgbm │ │ ├── LICENSE │ │ ├── README.md │ │ ├── mlserver_lightgbm │ │ │ ├── __init__.py │ │ │ ├── lightgbm.py │ │ │ └── version.py │ │ ├── setup.py │ │ └── tests │ │ │ ├── __init__.py │ │ │ ├── conftest.py │ │ │ ├── test_lightgbm.py │ │ │ └── testdata │ │ │ └── inference-request.json │ ├── mlflow │ │ ├── LICENSE │ │ ├── README.md │ │ ├── mlserver_mlflow │ │ │ ├── __init__.py │ │ │ ├── codecs.py │ │ │ ├── metadata.py │ │ │ ├── runtime.py │ │ │ └── version.py │ │ ├── requirements │ │ │ └── dev.txt │ │ ├── setup.py │ │ └── tests │ │ │ ├── __init__.py │ │ │ ├── conftest.py │ │ │ ├── rest │ │ │ ├── __init__.py │ │ │ ├── conftest.py │ │ │ ├── test_endpoints.py │ │ │ ├── test_invocations.py │ │ │ └── utils.py │ │ │ ├── test_codecs.py │ │ │ ├── test_metadata.py │ │ │ ├── test_runtime.py │ │ │ ├── testdata │ │ │ └── inference-request.json │ │ │ └── torch_fixtures.py │ ├── mllib │ │ ├── LICENSE │ │ ├── README.md │ │ ├── mlserver_mllib │ │ │ ├── __init__.py │ │ │ ├── errors.py │ │ │ ├── mllib.py │ │ │ ├── utils.py │ │ │ └── version.py │ │ └── setup.py │ ├── sklearn │ │ ├── LICENSE │ │ ├── README.md │ │ ├── mlserver_sklearn │ │ │ ├── __init__.py │ │ │ ├── sklearn.py │ │ │ └── version.py │ │ ├── setup.py │ │ └── tests │ │ │ ├── __init__.py │ │ │ ├── conftest.py │ │ │ ├── test_sklearn.py │ │ │ └── testdata │ │ │ └── inference-request.json │ └── xgboost │ │ ├── LICENSE │ │ ├── README.md │ │ ├── mlserver_xgboost │ │ ├── __init__.py │ │ ├── version.py │ │ └── xgboost.py │ │ ├── setup.py │ │ └── tests │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── test_xgboost.py │ │ └── testdata │ │ └── inference-request.json ├── setup.cfg ├── setup.py └── tests │ ├── __init__.py │ ├── batch_processing │ ├── __init__.py │ ├── conftest.py │ └── test_rest.py │ ├── batching │ ├── __init__.py │ ├── conftest.py │ ├── test_adaptive.py │ ├── test_hooks.py │ ├── test_requests.py │ └── test_shape.py │ ├── cli │ ├── __init__.py │ ├── conftest.py │ ├── test_build.py │ ├── test_build_cases.py │ ├── test_serve.py │ ├── test_start.py │ ├── test_start_cases.py │ └── test_version.py │ ├── codecs │ ├── __init__.py │ ├── test_base.py │ ├── test_base64.py │ ├── test_datetime.py │ ├── test_decorator.py │ ├── test_numpy.py │ ├── test_pandas.py │ ├── test_string.py │ └── test_utils.py │ ├── conftest.py │ ├── fixtures.py │ ├── grpc │ ├── __init__.py │ ├── conftest.py │ ├── test_codecs.py │ ├── test_converters.py │ ├── test_model_repository.py │ ├── test_servicers.py │ └── test_utils.py │ ├── handlers │ ├── __init__.py │ ├── conftest.py │ ├── test_custom.py │ ├── test_dataplane.py │ └── test_model_repository.py │ ├── kafka │ ├── __init__.py │ ├── conftest.py │ ├── test_handlers.py │ ├── test_server.py │ └── utils.py │ ├── metrics │ ├── __init__.py │ ├── conftest.py │ ├── test_context.py │ ├── test_custom.py │ ├── test_endpoint.py │ ├── test_endpoint_cases.py │ ├── test_grpc.py │ ├── test_prometheus.py │ ├── test_queue.py │ ├── test_registry.py │ ├── test_rest.py │ └── utils.py │ ├── parallel │ ├── __init__.py │ ├── conftest.py │ ├── test_errors.py │ ├── test_messages.py │ ├── test_model.py │ ├── test_pool.py │ ├── test_registry.py │ └── test_worker.py │ ├── repository │ ├── __init__.py │ ├── conftest.py │ ├── dummymdrepo.py │ ├── test_custom_md_repo.py │ ├── test_load.py │ └── test_repository.py │ ├── rest │ ├── __init__.py │ ├── conftest.py │ ├── test_codecs.py │ ├── test_custom.py │ ├── test_endpoints.py │ ├── test_openapi.py │ ├── test_server.py │ └── test_utils.py │ ├── test_cloudevents.py │ ├── test_env.py │ ├── test_logging.py │ ├── test_model.py │ ├── test_raw.py │ ├── test_registry.py │ ├── test_settings.py │ ├── test_types.py │ ├── test_utils.py │ ├── testdata │ ├── .test.env │ ├── batch_processing │ │ ├── invalid.txt │ │ ├── invalid_among_many.txt │ │ ├── many.txt │ │ ├── single.txt │ │ └── single_with_id.txt │ ├── env_models.py │ ├── environment.yml │ ├── grpc │ │ └── model-infer-request.json │ ├── inference-request-with-output.json │ ├── inference-request.json │ ├── inference-response.json │ ├── metadata-model-response.json │ ├── metadata-server-response.json │ ├── model-settings.json │ ├── requirements.txt │ ├── settings-custom-md-repo.json │ └── settings.json │ └── utils.py ├── README.md ├── data ├── configs │ ├── accuracies.yaml │ ├── final │ │ ├── sample-audio-qa.yaml │ │ ├── sample-audio-sent.yaml │ │ ├── sample-nlp.yaml │ │ ├── sample-sum-qa.yaml │ │ ├── video-1.yaml │ │ ├── video-10.yaml │ │ ├── video-11.yaml │ │ ├── video-12.yaml │ │ ├── video-13.yaml │ │ ├── video-15.yaml │ │ ├── video-16.yaml │ │ ├── video-17.yaml │ │ ├── video-18.yaml │ │ ├── video-2.yaml │ │ ├── video-20.yaml │ │ ├── video-3.yaml │ │ ├── video-5.yaml │ │ ├── video-6.yaml │ │ ├── video-7.yaml │ │ ├── video-8.yaml │ │ ├── video-mul-1.yaml │ │ ├── video-mul-10.yaml │ │ ├── video-mul-11.yaml │ │ ├── video-mul-12.yaml │ │ ├── video-mul-13.yaml │ │ ├── video-mul-15.yaml │ │ ├── video-mul-16.yaml │ │ ├── video-mul-17.yaml │ │ ├── video-mul-18.yaml │ │ ├── video-mul-2.yaml │ │ ├── video-mul-20.yaml │ │ ├── video-mul-3.yaml │ │ ├── video-mul-5.yaml │ │ ├── video-mul-6.yaml │ │ ├── video-mul-7.yaml │ │ └── video-mul-8.yaml │ ├── models-metadata.yaml │ ├── pipeline-simulation │ │ ├── audio-qa.yaml │ │ └── video.yaml │ └── profiling │ │ └── nodes │ │ ├── 1-config-audio.yaml │ │ ├── 1-config-qa.yaml │ │ ├── 2-config-audio.yaml │ │ ├── 2-config-sent.yaml │ │ ├── 3-config-li.yaml │ │ ├── 3-config-sum.yaml │ │ ├── 3-config-trans.yaml │ │ ├── 4-config-qa.yaml │ │ ├── 5-config-resnet-human.yaml │ │ ├── 5-config-yolo.yaml │ │ ├── 6-mlserver-mock.yaml │ │ ├── 7-intra-false-audio.yaml │ │ ├── 7-intra-false-qa.yaml │ │ ├── 7-intra-false-resnet-human.yaml │ │ ├── 7-intra-false-sent.yaml │ │ ├── 7-intra-false-sum.yaml │ │ ├── 7-intra-false-trans.yaml │ │ ├── 7-intra-false-yolo.yaml │ │ ├── 7-intra-true-audio.yaml │ │ ├── 7-intra-true-qa.yaml │ │ ├── 7-intra-true-resnet-human.yaml │ │ ├── 7-intra-true-sent.yaml │ │ ├── 7-intra-true-sum.yaml │ │ ├── 7-intra-true-trans.yaml │ │ ├── 7-intra-true-yolo.yaml │ │ └── throughput-test.yaml └── figures │ ├── .gitkeep │ ├── gorubi-scalibility.pdf │ ├── latency-cdf.pdf │ ├── metaseries-14-audio-qa.pdf │ ├── metaseries-14-audio-sent.pdf │ ├── metaseries-14-nlp.pdf │ ├── metaseries-14-sum-qa.pdf │ ├── metaseries-14-video.pdf │ ├── metaseries-18-audio-qa.pdf │ ├── metaseries-18-audio-sent.pdf │ ├── metaseries-18-nlp.pdf │ ├── metaseries-18-sum-qa.pdf │ ├── metaseries-18-video.pdf │ ├── metaseries-20-video.pdf │ ├── metaseries-21-video.pdf │ ├── objective-preferences-mult.pdf │ ├── objective-preferences.pdf │ ├── patterns.pdf │ ├── predictor-abelation-sla.pdf │ └── two_metrics.pdf ├── doc-figs ├── artifact-eval.png ├── mapping.png ├── paper-figure.png ├── pipelines-paper.png ├── revision-artifact-eval.png └── revision-paper-figure.png ├── experiments ├── __init__.py ├── pipeline-simulation │ ├── mock_tests.py │ ├── notebooks │ │ └── video-pipeline.ipynb │ └── static_search_space.py ├── profiling │ ├── __init__.py │ └── single-node │ │ ├── __init__.py │ │ ├── model_iter.py │ │ ├── notebooks │ │ ├── intera-test.ipynb │ │ ├── series-100-final-audio.ipynb │ │ ├── series-100-yolo.ipynb │ │ ├── series-200-throughput-test.ipynb │ │ ├── series-201-resnet.ipynb │ │ ├── series-70-yolo.ipynb │ │ ├── series-71-yolo-final.ipynb │ │ ├── series-72-resnet-final.ipynb │ │ ├── series-73-1-throughput-yolo.ipynb │ │ ├── series-73-2-throughput-resnet.ipynb │ │ ├── series-74-yolo.ipynb │ │ ├── series-75-resnet.ipynb │ │ ├── series-81-queuing-test.ipynb │ │ ├── series-85-final-audio.ipynb │ │ ├── series-86-final-sent.ipynb │ │ ├── series-91.ipynb │ │ ├── series-97-sum.ipynb │ │ ├── series-98-li.ipynb │ │ ├── series-99-trans.ipynb │ │ └── table-1.ipynb │ │ ├── report.md │ │ └── run.sh ├── runner │ ├── __init__.py │ ├── adaptation_runner.py │ ├── experiments_runner.py │ ├── notebooks │ │ ├── Jsys-reviewers-revision.ipynb │ │ ├── Jsys-reviewers.ipynb │ │ ├── paper-fig10-e2e-audio-sent.ipynb │ │ ├── paper-fig11-e2e-sum-qa.ipynb │ │ ├── paper-fig12-e2e-nlp.ipynb │ │ ├── paper-fig13-gurobi-decision-latency.ipynb │ │ ├── paper-fig14-objecitve-preference.ipynb │ │ ├── paper-fig15-comparison-cdf.ipynb │ │ ├── paper-fig16-predictor-abelation.ipynb │ │ ├── paper-fig2-movivation-latency-accuracy-throughput.ipynb │ │ ├── paper-fig7-patterns.ipynb │ │ ├── paper-fig8-e2e-video.ipynb │ │ ├── paper-fig9-e2e-audio-qa.ipynb │ │ ├── paper-revision-fig10-e2e-audio-sent.ipynb │ │ ├── paper-revision-fig11-e2e-sum-qa.ipynb │ │ ├── paper-revision-fig12-e2e-nlp.ipynb │ │ ├── paper-revision-fig14-objecitve-preference.ipynb │ │ ├── paper-revision-fig15-comparison-cdf.ipynb │ │ ├── paper-revision-fig8-e2e-video.ipynb │ │ └── paper-revision-fig9-e2e-audio-qa.ipynb │ ├── run-failed-experiment.sh │ ├── run-revision-failed-experiment.sh │ ├── run-revision.sh │ ├── run.sh │ ├── runner_script.py │ ├── simulation_runner.py │ ├── simulation_runner_testing_groubi.py │ ├── trace.ipynb │ └── trace.py └── utils │ ├── __init__.py │ ├── constants.py │ ├── drawing.py │ ├── logger.py │ ├── misc.py │ ├── obj.py │ ├── parser.py │ ├── pipeline_operations.py │ ├── prometheus.py │ ├── simulation_operations.py │ └── workload.py ├── infrastructure ├── README.md ├── automated.md ├── build.sh ├── hack │ ├── disable_firewall.sh │ ├── download_data.sh │ ├── gurobi.sh │ ├── jupyters.sh │ ├── kubernetes.sh │ ├── repos.sh │ ├── storage.sh │ ├── utilities.sh │ └── zsh.sh ├── istio-monitoring.yaml ├── manual-installation │ ├── chameleon-lease.md │ ├── custom-mlserver.md │ ├── guide-prometheus.md │ ├── guide-seldon.md │ ├── gurobi-installation.md │ ├── multi-node.md │ ├── setup-chameleon-k8s.md │ ├── setup-istio.md │ ├── setup-prometeus-monitoring.md │ ├── setup-seldon-core-installation.md │ ├── setup-storage.md │ └── test_installation.md └── manual.md ├── load_tester ├── .gitignore ├── README.md ├── barazmoon │ ├── __init__.py │ ├── main.py │ ├── mlserver.py │ └── twitter │ │ ├── __init__.py │ │ ├── loader.py │ │ ├── workload.tbz2 │ │ └── workload.txt ├── gitignore └── setup.py ├── models-to-minio ├── __init__.py ├── model-saver-resnet.py ├── model-saver-transformers.py ├── model-saver-yolo.py └── readme.md ├── num-parameters ├── huggingface.py ├── torchvision-models.py └── yolov5-yolo.py ├── optimizer ├── __init__.py ├── adapter.py ├── models.py ├── optimizer.py └── sim_adapter.py ├── pipelines ├── README.md ├── __init__.py ├── mlserver-centralized │ ├── audio-qa │ │ ├── README.md │ │ ├── __init__.py │ │ ├── notebook-version.ipynb │ │ └── seldon-core-version │ │ │ ├── load-test-grpc.py │ │ │ ├── nodes │ │ │ ├── audio │ │ │ │ ├── Dockerfile │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ ├── build-with-model.sh │ │ │ │ ├── build.sh │ │ │ │ ├── client-async-grpc.py │ │ │ │ ├── load-test-grpc.py │ │ │ │ ├── model-settings.json │ │ │ │ ├── models.py │ │ │ │ ├── node-template-with-model.yaml │ │ │ │ ├── node-template.yaml │ │ │ │ ├── node-with-model.yaml │ │ │ │ ├── node.yaml │ │ │ │ ├── readme.md │ │ │ │ ├── requirements.txt │ │ │ │ ├── rq.py │ │ │ │ ├── settings.json │ │ │ │ └── test.py │ │ │ └── nlp-qa │ │ │ │ ├── Dockerfile │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ ├── build-with-model.sh │ │ │ │ ├── build.sh │ │ │ │ ├── input-sample-shape.json │ │ │ │ ├── input-sample.txt │ │ │ │ ├── load-test-grpc.py │ │ │ │ ├── model-settings.json │ │ │ │ ├── models.py │ │ │ │ ├── node-template-with-model.yaml │ │ │ │ ├── node-template.yaml │ │ │ │ ├── node-with-model.yaml │ │ │ │ ├── node.yaml │ │ │ │ ├── readme.md │ │ │ │ ├── requirements.txt │ │ │ │ └── settings.json │ │ │ ├── pipeline.yaml │ │ │ └── stress.bash │ ├── audio-sent │ │ ├── README.md │ │ ├── notebook-version.ipynb │ │ ├── seldon-core-version │ │ │ ├── load-test-grpc.py │ │ │ ├── nodes │ │ │ │ ├── audio │ │ │ │ │ ├── Dockerfile │ │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ │ ├── build-with-model.sh │ │ │ │ │ ├── build.sh │ │ │ │ │ ├── input-sample-shape.json │ │ │ │ │ ├── input-sample.json │ │ │ │ │ ├── load-test-grpc.py │ │ │ │ │ ├── model-settings.json │ │ │ │ │ ├── models.py │ │ │ │ │ ├── node-template-with-model.yaml │ │ │ │ │ ├── node-template.yaml │ │ │ │ │ ├── node-with-model.yaml │ │ │ │ │ ├── node.yaml │ │ │ │ │ ├── readme.md │ │ │ │ │ ├── requirements.txt │ │ │ │ │ └── settings.json │ │ │ │ └── nlp-sent │ │ │ │ │ ├── Dockerfile │ │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ │ ├── build-with-model.sh │ │ │ │ │ ├── build.sh │ │ │ │ │ ├── input-sample.txt │ │ │ │ │ ├── load-test-grpc.py │ │ │ │ │ ├── model-settings.json │ │ │ │ │ ├── models.py │ │ │ │ │ ├── node-template-with-model.yaml │ │ │ │ │ ├── node-template.yaml │ │ │ │ │ ├── node-with-model.yaml │ │ │ │ │ ├── node.yaml │ │ │ │ │ ├── readme.md │ │ │ │ │ ├── requirements.txt │ │ │ │ │ └── settings.json │ │ │ ├── pipeline.yaml │ │ │ └── stress.bash │ │ └── single-node.py │ ├── mock │ │ ├── README.md │ │ ├── __init__.py │ │ ├── notebook-version.ipynb │ │ └── seldon-core-version │ │ │ ├── client-async-grpc.py │ │ │ ├── load-test-grpc.py │ │ │ ├── nodes │ │ │ ├── node-one │ │ │ │ ├── Dockerfile │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ ├── build.sh │ │ │ │ ├── load-test-grpc.py │ │ │ │ ├── model-settings.json │ │ │ │ ├── models.py │ │ │ │ ├── node.yaml │ │ │ │ ├── readme.md │ │ │ │ ├── requirements.txt │ │ │ │ └── settings.json │ │ │ └── node-two │ │ │ │ ├── Dockerfile │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ ├── build.sh │ │ │ │ ├── input-sample-shape.json │ │ │ │ ├── input-sample.txt │ │ │ │ ├── load-test-grpc.py │ │ │ │ ├── model-settings.json │ │ │ │ ├── models.py │ │ │ │ ├── node.yaml │ │ │ │ ├── readme.md │ │ │ │ ├── requirements.txt │ │ │ │ └── settings.json │ │ │ ├── pipeline.yaml │ │ │ └── stress.bash │ ├── nlp │ │ ├── README.md │ │ ├── notebook-version.ipynb │ │ ├── seldon-core-version │ │ │ ├── input-sample-long.txt │ │ │ ├── input-sample.txt │ │ │ ├── load-test-grpc.py │ │ │ ├── nodes │ │ │ │ ├── nlp-li │ │ │ │ │ ├── Dockerfile │ │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ │ ├── build-with-model.sh │ │ │ │ │ ├── build.sh │ │ │ │ │ ├── input-sample-long.txt │ │ │ │ │ ├── input-sample-shape.json │ │ │ │ │ ├── input-sample.txt │ │ │ │ │ ├── load-test-grpc.py │ │ │ │ │ ├── model-settings.json │ │ │ │ │ ├── models.py │ │ │ │ │ ├── node-template-with-model.yaml │ │ │ │ │ ├── node-template.yaml │ │ │ │ │ ├── node-with-model.yaml │ │ │ │ │ ├── node.yaml │ │ │ │ │ ├── readme.md │ │ │ │ │ ├── requirements.txt │ │ │ │ │ └── settings.json │ │ │ │ ├── nlp-sum │ │ │ │ │ ├── Dockerfile │ │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ │ ├── build-with-model.sh │ │ │ │ │ ├── build.sh │ │ │ │ │ ├── input-sample-long.txt │ │ │ │ │ ├── input-sample-shape.json │ │ │ │ │ ├── input-sample.txt │ │ │ │ │ ├── load-test-grpc.py │ │ │ │ │ ├── model-settings.json │ │ │ │ │ ├── models.py │ │ │ │ │ ├── node-template-with-model.yaml │ │ │ │ │ ├── node-template.yaml │ │ │ │ │ ├── node-with-model.yaml │ │ │ │ │ ├── node.yaml │ │ │ │ │ ├── readme.md │ │ │ │ │ ├── requirements.txt │ │ │ │ │ ├── settings.json │ │ │ │ │ └── triton.py │ │ │ │ └── nlp-trans │ │ │ │ │ ├── Dockerfile │ │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ │ ├── build-with-model.sh │ │ │ │ │ ├── build.sh │ │ │ │ │ ├── input-sample-long.txt │ │ │ │ │ ├── input-sample-shape.json │ │ │ │ │ ├── input-sample.txt │ │ │ │ │ ├── load-test-grpc.py │ │ │ │ │ ├── model-settings.json │ │ │ │ │ ├── models.py │ │ │ │ │ ├── node-template-with-model.yaml │ │ │ │ │ ├── node-template.yaml │ │ │ │ │ ├── node-with-model.yaml │ │ │ │ │ ├── node.yaml │ │ │ │ │ ├── readme.md │ │ │ │ │ ├── requirements.txt │ │ │ │ │ └── settings.json │ │ │ ├── pipeline-with-models.yaml │ │ │ └── pipeline.yaml │ │ └── single-node.py │ ├── sum-qa │ │ ├── README.md │ │ ├── notebook-version.ipynb │ │ └── seldon-core-version │ │ │ ├── input-sample-long.txt │ │ │ ├── input-sample.txt │ │ │ ├── load-test-grpc.py │ │ │ ├── nodes │ │ │ ├── nlp-qa │ │ │ │ ├── Dockerfile │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ ├── build-with-model.sh │ │ │ │ ├── build.sh │ │ │ │ ├── input-sample-shape.json │ │ │ │ ├── input-sample.txt │ │ │ │ ├── load-test-grpc.py │ │ │ │ ├── model-settings.json │ │ │ │ ├── models.py │ │ │ │ ├── node-template-with-model.yaml │ │ │ │ ├── node-template.yaml │ │ │ │ ├── node-with-model.yaml │ │ │ │ ├── node.yaml │ │ │ │ ├── readme.md │ │ │ │ ├── requirements.txt │ │ │ │ ├── settings.json │ │ │ │ └── triton.py │ │ │ └── nlp-sum │ │ │ │ ├── Dockerfile │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ ├── build-with-model.sh │ │ │ │ ├── build.sh │ │ │ │ ├── input-sample-long.txt │ │ │ │ ├── input-sample-shape.json │ │ │ │ ├── input-sample.txt │ │ │ │ ├── load-test-grpc.py │ │ │ │ ├── model-settings.json │ │ │ │ ├── models.py │ │ │ │ ├── node-template-with-model.yaml │ │ │ │ ├── node-template.yaml │ │ │ │ ├── node-with-model.yaml │ │ │ │ ├── node.yaml │ │ │ │ ├── readme.md │ │ │ │ ├── requirements.txt │ │ │ │ └── settings.json │ │ │ └── pipeline.yaml │ └── video │ │ ├── README.md │ │ ├── notebook-version.ipynb │ │ └── seldon-core-version │ │ ├── input-sample-shape.json │ │ ├── input-sample.JPEG │ │ ├── load-test-grpc.py │ │ ├── nodes │ │ ├── resnet-human-debug │ │ │ ├── Dockerfile │ │ │ ├── Dockerfile.dockerignore │ │ │ ├── README.md │ │ │ ├── build.sh │ │ │ ├── convertor.py │ │ │ ├── input-sample.JPEG │ │ │ ├── input-sample.npy │ │ │ ├── load-test-grpc.py │ │ │ ├── model-settings.json │ │ │ ├── models.py │ │ │ ├── node-template.yaml │ │ │ ├── node.yaml │ │ │ ├── requirements.txt │ │ │ ├── settings.json │ │ │ └── shape-fixer.py │ │ ├── resnet-human │ │ │ ├── Dockerfile │ │ │ ├── Dockerfile.dockerignore │ │ │ ├── README.md │ │ │ ├── build-with-model.sh │ │ │ ├── build.sh │ │ │ ├── convertor.py │ │ │ ├── input-sample.JPEG │ │ │ ├── input-sample.npy │ │ │ ├── load-test-grpc.py │ │ │ ├── model-settings.json │ │ │ ├── models.py │ │ │ ├── node-template-with-model.yaml │ │ │ ├── node-template.yaml │ │ │ ├── node-with-model.yaml │ │ │ ├── node.yaml │ │ │ ├── requirements.txt │ │ │ ├── settings.json │ │ │ └── shape-fixer.py │ │ ├── yolo-debug │ │ │ ├── Dockerfile │ │ │ ├── Dockerfile.dockerignore │ │ │ ├── README.md │ │ │ ├── build.sh │ │ │ ├── input-sample-shape.json │ │ │ ├── input-sample.JPEG │ │ │ ├── load-test-grpc.py │ │ │ ├── model-settings.json │ │ │ ├── model_saver.py │ │ │ ├── node-template.yaml │ │ │ ├── node.yaml │ │ │ ├── requirements.txt │ │ │ ├── settings.json │ │ │ └── yolo-model.py │ │ └── yolo │ │ │ ├── Dockerfile │ │ │ ├── Dockerfile.dockerignore │ │ │ ├── README.md │ │ │ ├── build-with-model.sh │ │ │ ├── build.sh │ │ │ ├── input-sample-shape.json │ │ │ ├── input-sample.JPEG │ │ │ ├── load-test-grpc.py │ │ │ ├── model-settings.json │ │ │ ├── model_saver.py │ │ │ ├── node-template-with-model.yaml │ │ │ ├── node-template.yaml │ │ │ ├── node-with-model.yaml │ │ │ ├── node.yaml │ │ │ ├── requirements.txt │ │ │ ├── settings.json │ │ │ └── yolo-model.py │ │ └── pipeline.yaml ├── mlserver-final │ ├── Dockerfile │ ├── Dockerfile.dockerignore │ ├── README.md │ ├── __init__.py │ ├── audio-qa │ │ ├── README.md │ │ ├── __init__.py │ │ ├── notebook-version.ipynb │ │ └── seldon-core-version │ │ │ ├── load-test-grpc.py │ │ │ ├── nodes │ │ │ ├── audio │ │ │ │ ├── Dockerfile │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ ├── build-with-model.sh │ │ │ │ ├── build.sh │ │ │ │ ├── client-async-grpc.py │ │ │ │ ├── load-test-grpc.py │ │ │ │ ├── model-settings.json │ │ │ │ ├── models.py │ │ │ │ ├── node-template-with-model.yaml │ │ │ │ ├── node-template.yaml │ │ │ │ ├── node-with-model.yaml │ │ │ │ ├── node.yaml │ │ │ │ ├── readme.md │ │ │ │ ├── requirements.txt │ │ │ │ ├── rq.py │ │ │ │ ├── settings.json │ │ │ │ └── test.py │ │ │ └── nlp-qa │ │ │ │ ├── Dockerfile │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ ├── build-with-model.sh │ │ │ │ ├── build.sh │ │ │ │ ├── input-sample-shape.json │ │ │ │ ├── input-sample.txt │ │ │ │ ├── load-test-grpc.py │ │ │ │ ├── model-settings.json │ │ │ │ ├── models.py │ │ │ │ ├── node-template-with-model.yaml │ │ │ │ ├── node-template.yaml │ │ │ │ ├── node-with-model.yaml │ │ │ │ ├── node.yaml │ │ │ │ ├── readme.md │ │ │ │ ├── requirements.txt │ │ │ │ └── settings.json │ │ │ ├── pipeline-router.yaml │ │ │ ├── pipeline-seldon.yaml │ │ │ └── stress.bash │ ├── audio-sent │ │ ├── README.md │ │ ├── notebook-version.ipynb │ │ ├── seldon-core-version │ │ │ ├── load-test-grpc.py │ │ │ ├── nodes │ │ │ │ ├── audio │ │ │ │ │ ├── Dockerfile │ │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ │ ├── build-with-model.sh │ │ │ │ │ ├── build.sh │ │ │ │ │ ├── input-sample-shape.json │ │ │ │ │ ├── input-sample.json │ │ │ │ │ ├── load-test-grpc.py │ │ │ │ │ ├── model-settings.json │ │ │ │ │ ├── models.py │ │ │ │ │ ├── node-template-with-model.yaml │ │ │ │ │ ├── node-template.yaml │ │ │ │ │ ├── node-with-model.yaml │ │ │ │ │ ├── node.yaml │ │ │ │ │ ├── readme.md │ │ │ │ │ ├── requirements.txt │ │ │ │ │ └── settings.json │ │ │ │ └── nlp-sent │ │ │ │ │ ├── Dockerfile │ │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ │ ├── build-with-model.sh │ │ │ │ │ ├── build.sh │ │ │ │ │ ├── input-sample.txt │ │ │ │ │ ├── load-test-grpc.py │ │ │ │ │ ├── model-settings.json │ │ │ │ │ ├── models.py │ │ │ │ │ ├── node-template-with-model.yaml │ │ │ │ │ ├── node-template.yaml │ │ │ │ │ ├── node-with-model.yaml │ │ │ │ │ ├── node.yaml │ │ │ │ │ ├── readme.md │ │ │ │ │ ├── requirements.txt │ │ │ │ │ └── settings.json │ │ │ ├── pipeline-router.yaml │ │ │ ├── pipeline-seldon.yaml │ │ │ └── stress.bash │ │ └── single-node.py │ ├── mock │ │ ├── README.md │ │ ├── __init__.py │ │ ├── notebook-version.ipynb │ │ └── seldon-core-version │ │ │ ├── load-test-grpc.py │ │ │ ├── nodes │ │ │ ├── node-one │ │ │ │ ├── Dockerfile │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ ├── build.sh │ │ │ │ ├── client-async-grpc.py │ │ │ │ ├── load-test-grpc.py │ │ │ │ ├── model-settings.json │ │ │ │ ├── models.py │ │ │ │ ├── node.yaml │ │ │ │ ├── readme.md │ │ │ │ ├── requirements.txt │ │ │ │ ├── settings.json │ │ │ │ └── test.py │ │ │ └── node-two │ │ │ │ ├── Dockerfile │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ ├── build.sh │ │ │ │ ├── input-sample-shape.json │ │ │ │ ├── input-sample.txt │ │ │ │ ├── load-test-grpc.py │ │ │ │ ├── model-settings.json │ │ │ │ ├── models.py │ │ │ │ ├── node.yaml │ │ │ │ ├── readme.md │ │ │ │ ├── requirements.txt │ │ │ │ └── settings.json │ │ │ ├── pipeline-router.yaml │ │ │ └── stress.bash │ ├── nlp │ │ ├── README.md │ │ ├── notebook-version.ipynb │ │ └── seldon-core-version │ │ │ ├── input-sample-long.txt │ │ │ ├── input-sample.txt │ │ │ ├── load-test-grpc.py │ │ │ ├── nodes │ │ │ ├── nlp-li │ │ │ │ ├── Dockerfile │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ ├── build-with-model.sh │ │ │ │ ├── build.sh │ │ │ │ ├── input-sample-long.txt │ │ │ │ ├── input-sample-shape.json │ │ │ │ ├── input-sample.txt │ │ │ │ ├── load-test-grpc.py │ │ │ │ ├── model-settings.json │ │ │ │ ├── models.py │ │ │ │ ├── node-template-with-model.yaml │ │ │ │ ├── node-template.yaml │ │ │ │ ├── node-with-model.yaml │ │ │ │ ├── node.yaml │ │ │ │ ├── readme.md │ │ │ │ ├── requirements.txt │ │ │ │ └── settings.json │ │ │ ├── nlp-sum-startup-test │ │ │ │ ├── Dockerfile │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ ├── build.sh │ │ │ │ ├── input-sample-long.txt │ │ │ │ ├── input-sample-shape.json │ │ │ │ ├── input-sample.txt │ │ │ │ ├── load-test-grpc.py │ │ │ │ ├── model-settings.json │ │ │ │ ├── models.py │ │ │ │ ├── node-template.yaml │ │ │ │ ├── node.yaml │ │ │ │ ├── readme.md │ │ │ │ ├── requirements.txt │ │ │ │ ├── settings.json │ │ │ │ └── triton.py │ │ │ ├── nlp-sum │ │ │ │ ├── Dockerfile │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ ├── build-with-model.sh │ │ │ │ ├── build.sh │ │ │ │ ├── input-sample-long.txt │ │ │ │ ├── input-sample-shape.json │ │ │ │ ├── input-sample.txt │ │ │ │ ├── load-test-grpc.py │ │ │ │ ├── model-settings.json │ │ │ │ ├── models.py │ │ │ │ ├── node-template-with-model.yaml │ │ │ │ ├── node-template.yaml │ │ │ │ ├── node-with-model.yaml │ │ │ │ ├── node.yaml │ │ │ │ ├── readme.md │ │ │ │ ├── requirements.txt │ │ │ │ ├── settings.json │ │ │ │ └── triton.py │ │ │ └── nlp-trans │ │ │ │ ├── Dockerfile │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ ├── build-with-model.sh │ │ │ │ ├── build.sh │ │ │ │ ├── input-sample-shape.json │ │ │ │ ├── input-sample.txt │ │ │ │ ├── load-test-grpc.py │ │ │ │ ├── model-settings.json │ │ │ │ ├── models.py │ │ │ │ ├── node-template-with-model.yaml │ │ │ │ ├── node-template.yaml │ │ │ │ ├── node-with-model.yaml │ │ │ │ ├── node.yaml │ │ │ │ ├── readme.md │ │ │ │ ├── requirements.txt │ │ │ │ └── settings.json │ │ │ ├── pipeline-router.yaml │ │ │ └── pipeline-seldon.yaml │ ├── sum-qa │ │ ├── README.md │ │ ├── input-sample-short.txt │ │ ├── notebook-version.ipynb │ │ └── seldon-core-version │ │ │ ├── input-sample-short.txt │ │ │ ├── input-sample.txt │ │ │ ├── load-test-grpc.py │ │ │ ├── nodes │ │ │ ├── nlp-qa │ │ │ │ ├── Dockerfile │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ ├── build-with-model.sh │ │ │ │ ├── build.sh │ │ │ │ ├── input-sample-shape.json │ │ │ │ ├── input-sample.txt │ │ │ │ ├── load-test-grpc.py │ │ │ │ ├── model-settings.json │ │ │ │ ├── models.py │ │ │ │ ├── node-template-with-model.yaml │ │ │ │ ├── node-template.yaml │ │ │ │ ├── node-with-model.yaml │ │ │ │ ├── node.yaml │ │ │ │ ├── readme.md │ │ │ │ ├── requirements.txt │ │ │ │ ├── settings.json │ │ │ │ └── triton.py │ │ │ └── nlp-sum │ │ │ │ ├── Dockerfile │ │ │ │ ├── Dockerfile.dockerignore │ │ │ │ ├── build-with-model.sh │ │ │ │ ├── build.sh │ │ │ │ ├── input-sample-shape.json │ │ │ │ ├── input-sample-short.txt │ │ │ │ ├── input-sample.txt │ │ │ │ ├── load-test-grpc.py │ │ │ │ ├── model-settings.json │ │ │ │ ├── models.py │ │ │ │ ├── node-template-with-model.yaml │ │ │ │ ├── node-template.yaml │ │ │ │ ├── node-with-model.yaml │ │ │ │ ├── node.yaml │ │ │ │ ├── readme.md │ │ │ │ ├── requirements.txt │ │ │ │ └── settings.json │ │ │ ├── pipeline-router.yaml │ │ │ └── pipeline-seldon.yaml │ └── video │ │ ├── README.md │ │ ├── notebook-version.ipynb │ │ └── seldon-core-version │ │ ├── input-sample-shape.json │ │ ├── input-sample.JPEG │ │ ├── load-test-grpc.py │ │ ├── nodes │ │ ├── resnet-human-debug │ │ │ ├── Dockerfile │ │ │ ├── Dockerfile.dockerignore │ │ │ ├── README.md │ │ │ ├── build.sh │ │ │ ├── convertor.py │ │ │ ├── input-sample.JPEG │ │ │ ├── input-sample.npy │ │ │ ├── load-test-grpc.py │ │ │ ├── model-settings.json │ │ │ ├── models.py │ │ │ ├── node-template.yaml │ │ │ ├── node.yaml │ │ │ ├── requirements.txt │ │ │ ├── settings.json │ │ │ └── shape-fixer.py │ │ ├── resnet-human │ │ │ ├── Dockerfile │ │ │ ├── Dockerfile.dockerignore │ │ │ ├── README.md │ │ │ ├── build-with-model.sh │ │ │ ├── build.sh │ │ │ ├── convertor.py │ │ │ ├── input-sample.JPEG │ │ │ ├── input-sample.npy │ │ │ ├── load-test-grpc.py │ │ │ ├── model-settings.json │ │ │ ├── models.py │ │ │ ├── node-template-with-model.yaml │ │ │ ├── node-template.yaml │ │ │ ├── node-with-model.yaml │ │ │ ├── node.yaml │ │ │ ├── requirements.txt │ │ │ ├── settings.json │ │ │ └── shape-fixer.py │ │ ├── yolo-debug │ │ │ ├── Dockerfile │ │ │ ├── Dockerfile.dockerignore │ │ │ ├── README.md │ │ │ ├── build.sh │ │ │ ├── input-sample-shape.json │ │ │ ├── input-sample.JPEG │ │ │ ├── load-test-grpc.py │ │ │ ├── model-settings.json │ │ │ ├── node-template.yaml │ │ │ ├── node.yaml │ │ │ ├── requirements.txt │ │ │ ├── settings.json │ │ │ └── yolo-model.py │ │ └── yolo │ │ │ ├── Dockerfile │ │ │ ├── Dockerfile.dockerignore │ │ │ ├── README.md │ │ │ ├── build-with-model.sh │ │ │ ├── build.sh │ │ │ ├── input-sample-shape.json │ │ │ ├── input-sample.JPEG │ │ │ ├── load-test-grpc.py │ │ │ ├── model-settings.json │ │ │ ├── model_saver.py │ │ │ ├── node-template-with-model.yaml │ │ │ ├── node-template.yaml │ │ │ ├── node-with-model.yaml │ │ │ ├── node.yaml │ │ │ ├── requirements.txt │ │ │ ├── settings.json │ │ │ └── yolo-model.py │ │ ├── pipeline-router-debug.yaml │ │ ├── pipeline-router.yaml │ │ └── pipeline-seldon.yaml ├── queue │ ├── Dockerfile │ ├── Dockerfile.dockerignore │ ├── build.sh │ ├── client-async-grpc.py │ ├── load-test-grpc.py │ ├── model-settings.json │ ├── models.py │ ├── node-template.yaml │ ├── node.yaml │ ├── queue-size-change.py │ ├── readme.md │ ├── requirements.txt │ └── settings.json └── router │ ├── Dockerfile │ ├── Dockerfile.dockerignore │ ├── build.sh │ ├── client-async-grpc.py │ ├── load-test-grpc.py │ ├── model-settings.json │ ├── models.py │ ├── node-template.yaml │ ├── node.yaml │ ├── readme.md │ ├── requirements.txt │ ├── settings.json │ └── stand-alone.py ├── prediction-modules ├── evaluation.py └── lstm-module │ ├── __init__.py │ ├── test.py │ ├── train.py │ ├── workload.tbz2 │ └── workload.txt ├── recordings ├── experiment.yml └── log.yml ├── requirements.txt └── twitter-trace-preprocess ├── builder_req.py ├── data_loader.py ├── plotter.py ├── twitter-2018-04-25.tar.1 ├── workload_create.txt └── workload_delete.txt /MLServer/.dockerignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | *.pyc 8 | *.pyo 9 | *.pyd 10 | bin 11 | 12 | # Mac file system 13 | **/.DS_Store 14 | 15 | # Python dev 16 | __pycache__ 17 | .Python 18 | env 19 | pip-log.txt 20 | pip-delete-this-directory.txt 21 | .mypy_cache 22 | eggs/ 23 | .eggs/ 24 | *.egg-info/ 25 | ./pytest_cache 26 | .tox 27 | build/ 28 | dist/ 29 | venv/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /MLServer/.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | updates: 4 | - package-ecosystem: "pip" 5 | directory: "/" 6 | # Check the npm registry for updates every day (weekdays) 7 | schedule: 8 | interval: "weekly" 9 | reviewers: 10 | - adriangonz 11 | 12 | - package-ecosystem: "docker" 13 | directory: "/" 14 | schedule: 15 | interval: "weekly" 16 | reviewers: 17 | - adriangonz 18 | -------------------------------------------------------------------------------- /MLServer/.github/release.yml: -------------------------------------------------------------------------------- 1 | changelog: 2 | exclude: 3 | authors: 4 | - dependabot 5 | -------------------------------------------------------------------------------- /MLServer/.package.lock: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/.package.lock -------------------------------------------------------------------------------- /MLServer/.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Build documentation in the docs/ directory with Sphinx 9 | sphinx: 10 | configuration: docs/conf.py 11 | 12 | # Optionally build your docs in additional formats such as PDF 13 | formats: 14 | - pdf 15 | 16 | # Optionally set the version of Python and requirements required to build your docs 17 | python: 18 | version: 3.8 19 | install: 20 | - method: pip 21 | path: . 22 | - requirements: docs/requirements.txt 23 | -------------------------------------------------------------------------------- /MLServer/MANIFEST.in: -------------------------------------------------------------------------------- 1 | include openapi/*.json 2 | -------------------------------------------------------------------------------- /MLServer/OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - cliveseldon 3 | - adriangonz 4 | - RafalSkolasinski 5 | reviewers: 6 | - cliveseldon 7 | - adriangonz 8 | - RafalSkolasinski 9 | -------------------------------------------------------------------------------- /MLServer/OWNERS_ALIASES: -------------------------------------------------------------------------------- 1 | approvers: 2 | - cliveseldon 3 | - adriangonz 4 | - RafalSkolasinski 5 | reviewers: 6 | - cliveseldon 7 | - adriangonz 8 | - RafalSkolasinski 9 | -------------------------------------------------------------------------------- /MLServer/benchmarking/Makefile: -------------------------------------------------------------------------------- 1 | start-testserver: 2 | mlserver start ${PWD}/testserver 3 | 4 | install-dev: 5 | @if [[ ! -x "$$(command -v k6)" ]];\ 6 | then \ 7 | echo "k6 command not found!"; \ 8 | echo "To install k6, please check: "; \ 9 | echo " https://k6.io/docs/getting-started/installation/"; \ 10 | fi 11 | 12 | generate: 13 | python generator.py 14 | 15 | benchmark-rest: 16 | k6 run \ 17 | -e MLSERVER_HOST=0.0.0.0 \ 18 | -e MLSERVER_HTTP_PORT=8080 \ 19 | -e MLSERVER_GRPC_PORT=8081 \ 20 | scenarios/inference-rest.js 21 | 22 | benchmark-grpc: 23 | k6 run \ 24 | -e MLSERVER_HOST=localhost \ 25 | -e MLSERVER_HTTP_PORT=8080 \ 26 | -e MLSERVER_GRPC_PORT=8081 \ 27 | scenarios/inference-grpc.js 28 | -------------------------------------------------------------------------------- /MLServer/benchmarking/clone-models.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | _copy() { 4 | local _src=$1 5 | local _newModelName=$2 6 | local _modelName=$(basename $_src) 7 | local _dst="$(dirname $_src)/$_newModelName" 8 | 9 | cp -r $_src $_dst 10 | sed -i "s/$_modelName/$_newModelName/" "$_dst/model-settings.json" 11 | } 12 | 13 | _main() { 14 | local _copies=10 15 | 16 | for _i in {1..$_copies}; do 17 | _copy "$PWD/testserver/models/iris" "iris-$_i" 18 | _copy "$PWD/testserver/models/sum-model" "sum-model-$_i" 19 | done 20 | } 21 | 22 | _main 23 | -------------------------------------------------------------------------------- /MLServer/benchmarking/common/helpers.js: -------------------------------------------------------------------------------- 1 | export function readTestData(name) { 2 | return { 3 | rest: JSON.parse(open(`../data/${name}/rest-requests.json`)), 4 | grpc: JSON.parse(open(`../data/${name}/grpc-requests.json`)), 5 | }; 6 | } 7 | -------------------------------------------------------------------------------- /MLServer/benchmarking/data/iris/grpc-requests.json: -------------------------------------------------------------------------------- 1 | {"model_name": "iris", "inputs": [{"name": "input-0", "shape": [1, 4], "datatype": "FP32", "contents": {"fp32_contents": [1, 2, 3, 4]}}]} 2 | -------------------------------------------------------------------------------- /MLServer/benchmarking/data/iris/rest-requests.json: -------------------------------------------------------------------------------- 1 | {"inputs": [{"name": "input-0", "shape": [1, 4], "datatype": "FP32", "data": [1, 2, 3, 4]}]} 2 | -------------------------------------------------------------------------------- /MLServer/benchmarking/testserver/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": false, 3 | "load_models_at_startup": false, 4 | "parallel_workers": 2, 5 | "metrics_endpoint": null 6 | } 7 | -------------------------------------------------------------------------------- /MLServer/docs/_static/css/custom.css: -------------------------------------------------------------------------------- 1 | 2 | /* Hide first elem of nav bar */ 3 | .md-tabs__list > li:first-child { 4 | display: none; 5 | } 6 | 7 | dt { 8 | display: table; 9 | margin: 6px 0; 10 | margin-top: 6px; 11 | font-size: 90%; 12 | line-height: normal; 13 | background: #e7f2fa; 14 | color: #2980B9; 15 | border-top: solid 3px #6ab0de; 16 | padding: 6px; 17 | position: relative; 18 | } 19 | 20 | 21 | -------------------------------------------------------------------------------- /MLServer/docs/changelog.md: -------------------------------------------------------------------------------- 1 | ```{include} ../CHANGELOG.md 2 | :relative-docs: ./docs/ 3 | ``` 4 | -------------------------------------------------------------------------------- /MLServer/docs/examples/Makefile: -------------------------------------------------------------------------------- 1 | .SUFFIXES: .ipynb .md 2 | 3 | EXAMPLES := $(wildcard */*.ipynb) 4 | EXAMPLES_MD=$(patsubst %.ipynb, %.md, $(EXAMPLES)) 5 | 6 | all: $(EXAMPLES_MD) 7 | 8 | .ipynb.md: 9 | jupyter nbconvert \ 10 | $< \ 11 | --ClearOutputPreprocessor.enabled=True \ 12 | --to markdown \ 13 | --output $(notdir $@) 14 | 15 | 16 | .PHONY: all 17 | -------------------------------------------------------------------------------- /MLServer/docs/examples/alibi-detect/.gitignore: -------------------------------------------------------------------------------- 1 | alibi-detector-artifacts/* 2 | !alibi-detector-artifacts/.gitkeep 3 | -------------------------------------------------------------------------------- /MLServer/docs/examples/alibi-detect/alibi-detector-artifacts/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/docs/examples/alibi-detect/alibi-detector-artifacts/.gitkeep -------------------------------------------------------------------------------- /MLServer/docs/examples/alibi-detect/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "income-tabular-drift", 3 | "implementation": "mlserver_alibi_detect.AlibiDetectRuntime", 4 | "parameters": { 5 | "uri": "./alibi-detector-artifacts", 6 | "version": "v0.1.0", 7 | "extra": { 8 | "predict_parameters":{ 9 | "drift_type": "feature" 10 | } 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /MLServer/docs/examples/alibi-detect/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "true" 3 | } 4 | -------------------------------------------------------------------------------- /MLServer/docs/examples/alibi-explain/data/mnist_anchor_image/explainer.dill: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/docs/examples/alibi-explain/data/mnist_anchor_image/explainer.dill -------------------------------------------------------------------------------- /MLServer/docs/examples/alibi-explain/data/mnist_anchor_image/meta.dill: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/docs/examples/alibi-explain/data/mnist_anchor_image/meta.dill -------------------------------------------------------------------------------- /MLServer/docs/examples/alibi-explain/data/mnist_anchor_image/segmentation_fn.dill: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/docs/examples/alibi-explain/data/mnist_anchor_image/segmentation_fn.dill -------------------------------------------------------------------------------- /MLServer/docs/examples/alibi-explain/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "anchor-image-explain-model", 3 | "implementation": "mlserver_alibi_explain.AlibiExplainRuntime", 4 | "parallel_workers": 0, 5 | "parameters": { 6 | "uri": "./data/mnist_anchor_image", 7 | "version": "v0.1.0", 8 | "extra": { 9 | "explainer_type": "anchor_image", 10 | "infer_uri": "http://localhost:42315/v2/models/test-pytorch-mnist/infer" 11 | } 12 | } 13 | } 14 | 15 | -------------------------------------------------------------------------------- /MLServer/docs/examples/alibi-explain/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "true" 3 | } 4 | -------------------------------------------------------------------------------- /MLServer/docs/examples/conda/Makefile: -------------------------------------------------------------------------------- 1 | old-sklearn.tar.gz: environment.yml 2 | conda env create --force -f environment.yml 3 | conda pack --force -n old-sklearn -o old-sklearn.tar.gz 4 | conda env remove -n old-sklearn 5 | 6 | -------------------------------------------------------------------------------- /MLServer/docs/examples/conda/environment.yml: -------------------------------------------------------------------------------- 1 | 2 | name: old-sklearn 3 | channels: 4 | - conda-forge 5 | dependencies: 6 | - python == 3.8 7 | - scikit-learn == 0.24.2 8 | - joblib == 0.17.0 9 | - requests 10 | - pip 11 | - pip: 12 | - mlserver == 1.1.0 13 | - mlserver-sklearn == 1.1.0 14 | -------------------------------------------------------------------------------- /MLServer/docs/examples/conda/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mnist-svm", 3 | "implementation": "mlserver_sklearn.SKLearnModel" 4 | } 5 | -------------------------------------------------------------------------------- /MLServer/docs/examples/content-type/model-settings.json: -------------------------------------------------------------------------------- 1 | 2 | { 3 | "name": "content-type-example", 4 | "implementation": "runtime.EchoRuntime", 5 | "inputs": [ 6 | { 7 | "name": "metadata-np", 8 | "datatype": "INT32", 9 | "shape": [2, 2], 10 | "parameters": { 11 | "content_type": "np" 12 | } 13 | }, 14 | { 15 | "name": "metadata-str", 16 | "datatype": "BYTES", 17 | "shape": [11], 18 | "parameters": { 19 | "content_type": "str" 20 | } 21 | } 22 | ] 23 | } 24 | -------------------------------------------------------------------------------- /MLServer/docs/examples/custom-json/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "json-hello-world", 3 | "implementation": "jsonmodels.JsonHelloWorldModel" 4 | } 5 | -------------------------------------------------------------------------------- /MLServer/docs/examples/custom-json/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "true" 3 | } 4 | -------------------------------------------------------------------------------- /MLServer/docs/examples/custom/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "numpyro-divorce", 3 | "implementation": "models.NumpyroModel", 4 | "parameters": { 5 | "uri": "./numpyro-divorce.json" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /MLServer/docs/examples/custom/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.22.4 2 | numpyro==0.8.0 3 | jax==0.2.24 4 | jaxlib==0.3.7 5 | -------------------------------------------------------------------------------- /MLServer/docs/examples/custom/seldondeployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: machinelearning.seldon.io/v1 2 | kind: SeldonDeployment 3 | metadata: 4 | name: numpyro-model 5 | spec: 6 | protocol: v2 7 | predictors: 8 | - name: default 9 | graph: 10 | name: numpyro-divorce 11 | type: MODEL 12 | componentSpecs: 13 | - spec: 14 | containers: 15 | - name: numpyro-divorce 16 | image: my-custom-numpyro-server:0.1.0 17 | -------------------------------------------------------------------------------- /MLServer/docs/examples/custom/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "true" 3 | } 4 | -------------------------------------------------------------------------------- /MLServer/docs/examples/huggingface/.gitignore: -------------------------------------------------------------------------------- 1 | mlruns 2 | -------------------------------------------------------------------------------- /MLServer/docs/examples/kafka/.gitignore: -------------------------------------------------------------------------------- 1 | kafka_* 2 | -------------------------------------------------------------------------------- /MLServer/docs/examples/kafka/inference-request.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "request-0", 3 | "inputs": [ 4 | { 5 | "name": "predict", 6 | "shape": [3], 7 | "datatype": "INT32", 8 | "data": [ 0., 0., 1., 11., 14., 15., 3., 0., 0., 1., 13., 16., 12., 9 | 16., 8., 0., 0., 8., 16., 4., 6., 16., 5., 0., 0., 5., 10 | 15., 11., 13., 14., 0., 0., 0., 0., 2., 12., 16., 13., 0., 11 | 0., 0., 0., 0., 13., 16., 16., 6., 0., 0., 0., 0., 16., 12 | 16., 16., 7., 0., 0., 0., 0., 11., 13., 12., 1., 0.] 13 | } 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /MLServer/docs/examples/kafka/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mnist-svm", 3 | "implementation": "mlserver_sklearn.SKLearnModel", 4 | "parameters": { 5 | "uri": "./mnist-svm.joblib", 6 | "version": "v0.1.0" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /MLServer/docs/examples/kafka/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "true", 3 | "kafka_enabled": "true" 4 | } 5 | -------------------------------------------------------------------------------- /MLServer/docs/examples/lightgbm/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "iris-lgb", 3 | "implementation": "mlserver_lightgbm.LightGBMModel", 4 | "parameters": { 5 | "uri": "./iris-lightgbm.bst", 6 | "version": "v0.1.0" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /MLServer/docs/examples/lightgbm/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "true" 3 | } 4 | -------------------------------------------------------------------------------- /MLServer/docs/examples/mlflow/.gitignore: -------------------------------------------------------------------------------- 1 | mlruns 2 | -------------------------------------------------------------------------------- /MLServer/docs/examples/mlflow/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "wine-classifier", 3 | "implementation": "mlserver_mlflow.MLflowRuntime", 4 | "parameters": { 5 | "uri": "./mlruns/0/6cd26d62883641188b4f7616725ed838/artifacts/model" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /MLServer/docs/examples/mllib/data/.part-00000-3b569ac9-e283-4220-ba6c-31088ed23cb8-c000.snappy.parquet.crc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/docs/examples/mllib/data/.part-00000-3b569ac9-e283-4220-ba6c-31088ed23cb8-c000.snappy.parquet.crc -------------------------------------------------------------------------------- /MLServer/docs/examples/mllib/data/_SUCCESS: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/docs/examples/mllib/data/_SUCCESS -------------------------------------------------------------------------------- /MLServer/docs/examples/mllib/data/part-00000-3b569ac9-e283-4220-ba6c-31088ed23cb8-c000.snappy.parquet: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/docs/examples/mllib/data/part-00000-3b569ac9-e283-4220-ba6c-31088ed23cb8-c000.snappy.parquet -------------------------------------------------------------------------------- /MLServer/docs/examples/mllib/metadata/.part-00000.crc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/docs/examples/mllib/metadata/.part-00000.crc -------------------------------------------------------------------------------- /MLServer/docs/examples/mllib/metadata/_SUCCESS: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/docs/examples/mllib/metadata/_SUCCESS -------------------------------------------------------------------------------- /MLServer/docs/examples/mllib/metadata/part-00000: -------------------------------------------------------------------------------- 1 | {"class":"org.apache.spark.mllib.classification.LogisticRegressionModel","version":"1.0","numFeatures":692,"numClasses":2} 2 | -------------------------------------------------------------------------------- /MLServer/docs/examples/mllib/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "spark-mllib", 3 | "implementation": "mlserver-mllib.MLlibModel", 4 | "parameters": { 5 | "uri": "./", 6 | "format": "LogisticRegression", 7 | "version": "v0.1.0" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /MLServer/docs/examples/mms/Makefile: -------------------------------------------------------------------------------- 1 | upload: 2 | gsutil cp -r models/* gs://seldon-models/mlserver/mms 3 | gsutil cp -r models/mnist-svm gs://seldon-models/mlserver/mms-sklearn 4 | gsutil cp -r models/mushroom-xgboost gs://seldon-models/mlserver/mms-xgboost 5 | 6 | -------------------------------------------------------------------------------- /MLServer/docs/examples/mms/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "true" 3 | } 4 | -------------------------------------------------------------------------------- /MLServer/docs/examples/sklearn/inference-request.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "request-0", 3 | "inputs": [ 4 | { 5 | "name": "predict", 6 | "shape": [3], 7 | "datatype": "INT32", 8 | "data": [ 0., 0., 1., 11., 14., 15., 3., 0., 0., 1., 13., 16., 12., 9 | 16., 8., 0., 0., 8., 16., 4., 6., 16., 5., 0., 0., 5., 10 | 15., 11., 13., 14., 0., 0., 0., 0., 2., 12., 16., 13., 0., 11 | 0., 0., 0., 0., 13., 16., 16., 6., 0., 0., 0., 0., 16., 12 | 16., 16., 7., 0., 0., 0., 0., 11., 13., 12., 1., 0.] 13 | } 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /MLServer/docs/examples/sklearn/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mnist-svm", 3 | "implementation": "mlserver_sklearn.SKLearnModel", 4 | "parameters": { 5 | "uri": "./mnist-svm.joblib", 6 | "version": "v0.1.0" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /MLServer/docs/examples/sklearn/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "true" 3 | } 4 | -------------------------------------------------------------------------------- /MLServer/docs/examples/tempo/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "inference-pipeline", 3 | "implementation": "tempo.mlserver.InferenceRuntime", 4 | "parameters": { 5 | "uri": "./models/inference-pipeline" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /MLServer/docs/examples/xgboost/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mushroom-xgboost", 3 | "implementation": "mlserver_xgboost.XGBoostModel", 4 | "parameters": { 5 | "uri": "./mushroom-xgboost.json", 6 | "version": "v0.1.0" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /MLServer/docs/examples/xgboost/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "true" 3 | } 4 | -------------------------------------------------------------------------------- /MLServer/docs/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/docs/favicon.ico -------------------------------------------------------------------------------- /MLServer/docs/index.md: -------------------------------------------------------------------------------- 1 | ```{toctree} 2 | :hidden: 3 | :titlesonly: 4 | User Guide <./user-guide/index> 5 | Inference Runtimes <./runtimes/index> 6 | Reference <./reference/index> 7 | ``` 8 | 9 | ```{toctree} 10 | :hidden: 11 | :titlesonly: 12 | :maxdepth: 1 13 | 14 | Examples <./examples/index> 15 | Changelog <./changelog> 16 | ``` 17 | 18 | ```{include} ../README.md 19 | :relative-docs: ./docs/ 20 | ``` 21 | -------------------------------------------------------------------------------- /MLServer/docs/reference/api/index.md: -------------------------------------------------------------------------------- 1 | # Python API 2 | 3 | MLServer can be installed as a Python packaged, which exposes a public 4 | framework which can be used to build [custom inference 5 | runtimes](../../user-guide/custom) and [codecs](../../user-guide/content-type). 6 | 7 | Below, you can find the main reference for the Python API exposed by the 8 | MLServer framework. 9 | 10 | ```{toctree} 11 | :titlesonly: 12 | 13 | ./model.md 14 | ./types.md 15 | ./codecs.md 16 | ./metrics.md 17 | ``` 18 | -------------------------------------------------------------------------------- /MLServer/docs/reference/api/metrics.md: -------------------------------------------------------------------------------- 1 | # Metrics 2 | 3 | The MLServer package exposes a set of methods that let you register and track 4 | custom metrics. 5 | This can be used within your own [custom inference 6 | runtimes](../../user-guide/custom). 7 | To learn more about how to expose custom metrics, check out the [metrics usage 8 | guide](../../user-guide/metrics). 9 | 10 | ```{eval-rst} 11 | .. automodule:: mlserver 12 | :members: register, log 13 | ``` 14 | -------------------------------------------------------------------------------- /MLServer/docs/reference/api/types.md: -------------------------------------------------------------------------------- 1 | # Types 2 | 3 | ```{eval-rst} 4 | .. automodule:: mlserver.types 5 | :members: 6 | ``` 7 | -------------------------------------------------------------------------------- /MLServer/docs/reference/cli.md: -------------------------------------------------------------------------------- 1 | # MLServer CLI 2 | 3 | The MLServer package includes a `mlserver` CLI designed to help with some of 4 | the common tasks involved with a model's lifecycle. 5 | Below, you can find the full list of supported subcommands. 6 | Note that you can also get a similar high-level outline at any time by running: 7 | 8 | ```bash 9 | mlserver --help 10 | ``` 11 | 12 | ## Commands 13 | 14 | ```{eval-rst} 15 | 16 | .. click:: mlserver.cli.main:root 17 | :prog: mlserver 18 | :nested: full 19 | ``` 20 | -------------------------------------------------------------------------------- /MLServer/docs/reference/index.md: -------------------------------------------------------------------------------- 1 | # Reference 2 | 3 | ```{toctree} 4 | :titlesonly: 5 | 6 | ./settings.md 7 | ./model-settings.md 8 | ./cli.md 9 | ./api/index.md 10 | ``` 11 | -------------------------------------------------------------------------------- /MLServer/docs/reference/settings.md: -------------------------------------------------------------------------------- 1 | # MLServer Settings 2 | 3 | MLServer can be configured through a `settings.json` file on the root folder 4 | from where MLServer is started. 5 | Note that these are server-wide settings (e.g. gRPC or HTTP port) which are 6 | separate from the [invidual model settings](./model-settings). 7 | Alternatively, this configuration can also be passed through **environment 8 | variables** prefixed with `MLSERVER_` (e.g. `MLSERVER_GRPC_PORT`). 9 | 10 | ## Settings 11 | 12 | ```{eval-rst} 13 | 14 | .. autopydantic_settings:: mlserver.settings.Settings 15 | ``` 16 | -------------------------------------------------------------------------------- /MLServer/docs/requirements.txt: -------------------------------------------------------------------------------- 1 | Sphinx==5.3.0 2 | sphinx_material==0.0.35 3 | readthedocs-sphinx-search==0.2.0 4 | myst-parser==1.0.0 5 | sphinxcontrib-bibtex==2.5.0 6 | autodoc_pydantic==1.8.0 7 | sphinx-click==4.4.0 8 | sphinx_design==0.3.0 9 | sphinx-autobuild==2021.3.14 10 | -------------------------------------------------------------------------------- /MLServer/docs/runtimes/alibi-detect.md: -------------------------------------------------------------------------------- 1 | ```{include} ../../runtimes/alibi-detect/README.md 2 | :relative-docs: ../../docs/ 3 | ``` 4 | -------------------------------------------------------------------------------- /MLServer/docs/runtimes/alibi-explain.md: -------------------------------------------------------------------------------- 1 | ```{include} ../../runtimes/alibi-explain/README.md 2 | :relative-docs: ../../docs/ 3 | ``` 4 | -------------------------------------------------------------------------------- /MLServer/docs/runtimes/custom.md: -------------------------------------------------------------------------------- 1 | # Custom Inference Runtimes 2 | 3 | There may be cases where the [inference runtimes](./index) offered 4 | out-of-the-box by MLServer may not be enough, or where you may need **extra 5 | custom functionality** which is not included in MLServer (e.g. custom codecs). 6 | To cover these cases, MLServer lets you create custom runtimes very easily. 7 | 8 | To learn more about how you can write custom runtimes with MLServer, check out 9 | the [Custom Runtimes user guide](../user-guide/custom). 10 | Alternatively, you can also see this [end-to-end 11 | example](../examples/custom/README) which walks through the process of writing 12 | a custom runtime. 13 | -------------------------------------------------------------------------------- /MLServer/docs/runtimes/huggingface.md: -------------------------------------------------------------------------------- 1 | ```{include} ../../runtimes/huggingface/README.md 2 | :relative-docs: ../../docs/ 3 | ``` 4 | -------------------------------------------------------------------------------- /MLServer/docs/runtimes/lightgbm.md: -------------------------------------------------------------------------------- 1 | ```{include} ../../runtimes/lightgbm/README.md 2 | :relative-docs: ../../docs/ 3 | ``` 4 | -------------------------------------------------------------------------------- /MLServer/docs/runtimes/mlflow.md: -------------------------------------------------------------------------------- 1 | ```{include} ../../runtimes/mlflow/README.md 2 | :relative-docs: ../../docs/ 3 | ``` 4 | -------------------------------------------------------------------------------- /MLServer/docs/runtimes/mllib.md: -------------------------------------------------------------------------------- 1 | ```{include} ../../runtimes/mllib/README.md 2 | :relative-docs: ../../docs/ 3 | ``` 4 | -------------------------------------------------------------------------------- /MLServer/docs/runtimes/sklearn.md: -------------------------------------------------------------------------------- 1 | ```{include} ../../runtimes/sklearn/README.md 2 | :relative-docs: ../../docs/ 3 | ``` 4 | -------------------------------------------------------------------------------- /MLServer/docs/runtimes/xgboost.md: -------------------------------------------------------------------------------- 1 | ```{include} ../../runtimes/xgboost/README.md 2 | :relative-docs: ../../docs/ 3 | ``` 4 | -------------------------------------------------------------------------------- /MLServer/docs/user-guide/index.md: -------------------------------------------------------------------------------- 1 | # User Guide 2 | 3 | On this section you can learn more about the different features of MLServer and 4 | how to use them. 5 | 6 | ```{toctree} 7 | :titlesonly: 8 | 9 | ./content-type 10 | ./openapi 11 | ./parallel-inference 12 | ./adaptive-batching 13 | ./custom 14 | ./metrics 15 | ./deployment/index 16 | ``` 17 | -------------------------------------------------------------------------------- /MLServer/log/.lock: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/log/.lock -------------------------------------------------------------------------------- /MLServer/mlserver/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import __version__ 2 | from .server import MLServer 3 | from .model import MLModel 4 | from .settings import Settings, ModelSettings 5 | from .metrics import register, log 6 | 7 | __all__ = [ 8 | "__version__", 9 | "MLServer", 10 | "MLModel", 11 | "Settings", 12 | "ModelSettings", 13 | "register", 14 | "log", 15 | ] 16 | -------------------------------------------------------------------------------- /MLServer/mlserver/batching/__init__.py: -------------------------------------------------------------------------------- 1 | from .requests import BatchedRequests 2 | from .adaptive import AdaptiveBatcher 3 | from .hooks import load_batching 4 | 5 | __all__ = ["AdaptiveBatcher", "load_batching", "BatchedRequests"] 6 | -------------------------------------------------------------------------------- /MLServer/mlserver/cli/__init__.py: -------------------------------------------------------------------------------- 1 | from .main import main 2 | 3 | __all__ = ["main"] 4 | -------------------------------------------------------------------------------- /MLServer/mlserver/cli/init_project.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | from ..logging import logger 3 | 4 | 5 | def init_cookiecutter_project(template: str): 6 | rc = subprocess.call(["which", "cookiecutter"]) 7 | if rc == 0: 8 | cmd = f"cookiecutter {template}" 9 | subprocess.run(cmd, check=True, shell=True) 10 | else: 11 | logger.error( 12 | "The cookiecutter command is not found. \n\n" 13 | "Please install with 'pip install cookiecutter' and retry" 14 | ) 15 | -------------------------------------------------------------------------------- /MLServer/mlserver/grpc/__init__.py: -------------------------------------------------------------------------------- 1 | from .server import GRPCServer 2 | 3 | __all__ = ["GRPCServer"] 4 | -------------------------------------------------------------------------------- /MLServer/mlserver/grpc/logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | gRPCLoggerName = "mlserver.grpc" 4 | logger = logging.getLogger(gRPCLoggerName) 5 | -------------------------------------------------------------------------------- /MLServer/mlserver/handlers/__init__.py: -------------------------------------------------------------------------------- 1 | from .dataplane import DataPlane 2 | from .model_repository import ModelRepositoryHandlers 3 | from .custom import get_custom_handlers, custom_handler 4 | 5 | __all__ = [ 6 | "DataPlane", 7 | "ModelRepositoryHandlers", 8 | "get_custom_handlers", 9 | "custom_handler", 10 | ] 11 | -------------------------------------------------------------------------------- /MLServer/mlserver/kafka/__init__.py: -------------------------------------------------------------------------------- 1 | from .server import KafkaServer 2 | 3 | __all__ = ["KafkaServer"] 4 | -------------------------------------------------------------------------------- /MLServer/mlserver/kafka/errors.py: -------------------------------------------------------------------------------- 1 | from fastapi import status 2 | 3 | from ..errors import MLServerError 4 | 5 | 6 | class InvalidMessageHeaders(MLServerError): 7 | def __init__(self, missing_header: str): 8 | msg = ( 9 | f"Invalid Kafka message. Expected '{missing_header}' header not " 10 | "found in message." 11 | ) 12 | super().__init__(msg, status.HTTP_422_UNPROCESSABLE_ENTITY) 13 | -------------------------------------------------------------------------------- /MLServer/mlserver/kafka/logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | loggerName = "mlserver.kafka" 4 | logger = logging.getLogger(loggerName) 5 | -------------------------------------------------------------------------------- /MLServer/mlserver/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | from .server import MetricsServer 2 | from .prometheus import configure_metrics 3 | from .context import model_context, register, log 4 | from .registry import REGISTRY 5 | 6 | __all__ = [ 7 | "MetricsServer", 8 | "configure_metrics", 9 | "model_context", 10 | "register", 11 | "log", 12 | "REGISTRY", 13 | ] 14 | -------------------------------------------------------------------------------- /MLServer/mlserver/metrics/logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | loggerName = "mlserver.metrics" 4 | logger = logging.getLogger(loggerName) 5 | -------------------------------------------------------------------------------- /MLServer/mlserver/parallel/__init__.py: -------------------------------------------------------------------------------- 1 | from .registry import InferencePoolRegistry 2 | from .utils import configure_inference_pool 3 | from .worker import Worker 4 | 5 | __all__ = ["InferencePoolRegistry", "configure_inference_pool", "Worker"] 6 | -------------------------------------------------------------------------------- /MLServer/mlserver/parallel/logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | gRPCLoggerName = "mlserver.parallel" 4 | logger = logging.getLogger(gRPCLoggerName) 5 | -------------------------------------------------------------------------------- /MLServer/mlserver/repository/__init__.py: -------------------------------------------------------------------------------- 1 | from .repository import ( 2 | ModelRepository, 3 | SchemalessModelRepository, 4 | DEFAULT_MODEL_SETTINGS_FILENAME, 5 | ) 6 | 7 | from .factory import ModelRepositoryFactory 8 | 9 | __all__ = [ 10 | "ModelRepository", 11 | "SchemalessModelRepository", 12 | "DEFAULT_MODEL_SETTINGS_FILENAME", 13 | "ModelRepositoryFactory", 14 | ] 15 | -------------------------------------------------------------------------------- /MLServer/mlserver/rest/__init__.py: -------------------------------------------------------------------------------- 1 | from .server import RESTServer 2 | 3 | __all__ = ["RESTServer"] 4 | -------------------------------------------------------------------------------- /MLServer/mlserver/rest/errors.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | from fastapi import Request 3 | from pydantic import BaseModel 4 | 5 | from .responses import Response 6 | from ..errors import MLServerError 7 | 8 | 9 | class APIErrorResponse(BaseModel): 10 | error: Optional[str] = None 11 | 12 | 13 | async def handle_mlserver_error(request: Request, exc: MLServerError) -> Response: 14 | err_res = APIErrorResponse(error=str(exc)) 15 | return Response(status_code=exc.status_code, content=err_res.dict()) 16 | 17 | 18 | _EXCEPTION_HANDLERS = {MLServerError: handle_mlserver_error} 19 | -------------------------------------------------------------------------------- /MLServer/mlserver/version.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.3.0.dev4" 2 | -------------------------------------------------------------------------------- /MLServer/runtimes/alibi-detect/mlserver_alibi_detect/__init__.py: -------------------------------------------------------------------------------- 1 | from .runtime import AlibiDetectRuntime 2 | 3 | __all__ = ["AlibiDetectRuntime"] 4 | -------------------------------------------------------------------------------- /MLServer/runtimes/alibi-detect/mlserver_alibi_detect/version.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.3.0.dev4" 2 | -------------------------------------------------------------------------------- /MLServer/runtimes/alibi-detect/requirements/dev.txt: -------------------------------------------------------------------------------- 1 | # Force tests to use TF 2.10 - otherwise, it may fallback to TF 2.9 (due to the 2 | # incompatibility with `protobuf>=3.20.2`) 3 | tensorflow==2.10.1 4 | -------------------------------------------------------------------------------- /MLServer/runtimes/alibi-detect/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/runtimes/alibi-detect/tests/__init__.py -------------------------------------------------------------------------------- /MLServer/runtimes/alibi-detect/tests/test_runtime.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from mlserver.codecs import CodecError 4 | from mlserver.types import RequestInput, InferenceRequest 5 | 6 | from mlserver_alibi_detect import AlibiDetectRuntime 7 | 8 | 9 | async def test_multiple_inputs_error( 10 | outlier_detector: AlibiDetectRuntime, 11 | inference_request: InferenceRequest, 12 | ): 13 | inference_request.inputs.append( 14 | RequestInput(name="input-1", shape=[1, 3], data=[[0, 1, 6]], datatype="FP32") 15 | ) 16 | 17 | with pytest.raises(CodecError): 18 | await outlier_detector.predict(inference_request) 19 | -------------------------------------------------------------------------------- /MLServer/runtimes/alibi-detect/tests/testdata/inference-request.json: -------------------------------------------------------------------------------- 1 | { 2 | "inputs": [ 3 | { 4 | "name": "input-0", 5 | "shape": [1, 3], 6 | "datatype": "INT32", 7 | "data": [[1, 2, 3]] 8 | } 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /MLServer/runtimes/alibi-explain/README.md: -------------------------------------------------------------------------------- 1 | # Alibi-Explain runtime for MLServer 2 | 3 | This package provides a MLServer runtime compatible with Alibi-Explain. 4 | 5 | ## Usage 6 | 7 | You can install the runtime, alongside `mlserver`, as: 8 | 9 | ```bash 10 | pip install mlserver mlserver-alibi-explain 11 | ``` 12 | -------------------------------------------------------------------------------- /MLServer/runtimes/alibi-explain/mlserver_alibi_explain/__init__.py: -------------------------------------------------------------------------------- 1 | from .runtime import AlibiExplainRuntime 2 | 3 | __all__ = ["AlibiExplainRuntime"] 4 | -------------------------------------------------------------------------------- /MLServer/runtimes/alibi-explain/mlserver_alibi_explain/errors.py: -------------------------------------------------------------------------------- 1 | from mlserver.errors import MLServerError 2 | 3 | from typing import List, Union 4 | 5 | 6 | class RemoteInferenceError(MLServerError): 7 | def __init__(self, code: int, reason: str): 8 | super().__init__(f"Remote inference call failed with {code}, {reason}") 9 | 10 | 11 | class InvalidExplanationShape(MLServerError): 12 | def __init__(self, shape: Union[List[int], int]): 13 | super().__init__( 14 | f"Expected a single element, but multiple were returned {shape}" 15 | ) 16 | -------------------------------------------------------------------------------- /MLServer/runtimes/alibi-explain/mlserver_alibi_explain/explainers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/runtimes/alibi-explain/mlserver_alibi_explain/explainers/__init__.py -------------------------------------------------------------------------------- /MLServer/runtimes/alibi-explain/mlserver_alibi_explain/version.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.3.0.dev4" 2 | -------------------------------------------------------------------------------- /MLServer/runtimes/alibi-explain/requirements/dev.txt: -------------------------------------------------------------------------------- 1 | requests-mock==1.10.0 2 | types-requests==2.28.11.5 3 | -------------------------------------------------------------------------------- /MLServer/runtimes/alibi-explain/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/runtimes/alibi-explain/tests/__init__.py -------------------------------------------------------------------------------- /MLServer/runtimes/alibi-explain/tests/helpers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/runtimes/alibi-explain/tests/helpers/__init__.py -------------------------------------------------------------------------------- /MLServer/runtimes/alibi-explain/tests/helpers/metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client import CollectorRegistry 2 | 3 | 4 | def unregister_metrics(registry: CollectorRegistry): 5 | # NOTE: Since `REGISTRY` objects are usually global, this method is NOT 6 | # thread-safe!! 7 | collectors = list(registry._collector_to_names.keys()) 8 | for collector in collectors: 9 | registry.unregister(collector) 10 | -------------------------------------------------------------------------------- /MLServer/runtimes/huggingface/mlserver_huggingface/__init__.py: -------------------------------------------------------------------------------- 1 | from .runtime import HuggingFaceRuntime 2 | 3 | __all__ = ["HuggingFaceRuntime"] 4 | -------------------------------------------------------------------------------- /MLServer/runtimes/huggingface/mlserver_huggingface/codecs/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import MultiInputRequestCodec, HuggingfaceRequestCodec 2 | from .image import PILImageCodec 3 | from .json import HuggingfaceSingleJSONCodec 4 | from .jsonlist import HuggingfaceListJSONCodec 5 | from .numpylist import NumpyListCodec 6 | from .conversation import HuggingfaceConversationCodec 7 | from .raw import RawCodec 8 | from .utils import EqualUtil 9 | 10 | __all__ = [ 11 | "MultiInputRequestCodec", 12 | "HuggingfaceRequestCodec", 13 | "PILImageCodec", 14 | "HuggingfaceSingleJSONCodec", 15 | "HuggingfaceListJSONCodec", 16 | "HuggingfaceConversationCodec", 17 | "NumpyListCodec", 18 | "RawCodec", 19 | "EqualUtil", 20 | ] 21 | -------------------------------------------------------------------------------- /MLServer/runtimes/huggingface/mlserver_huggingface/version.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.3.0.dev4" 2 | -------------------------------------------------------------------------------- /MLServer/runtimes/huggingface/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/runtimes/huggingface/tests/__init__.py -------------------------------------------------------------------------------- /MLServer/runtimes/huggingface/tests/test_codecs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/runtimes/huggingface/tests/test_codecs/__init__.py -------------------------------------------------------------------------------- /MLServer/runtimes/huggingface/tests/test_tasks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/runtimes/huggingface/tests/test_tasks/__init__.py -------------------------------------------------------------------------------- /MLServer/runtimes/huggingface/tests/test_tasks/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from ..utils import file_path, file_bytescontent 3 | 4 | 5 | @pytest.fixture 6 | def audio_filepath(): 7 | return file_path("audio.mp3") 8 | 9 | 10 | @pytest.fixture 11 | def audio_filebytes(): 12 | return file_bytescontent("audio.mp3") 13 | -------------------------------------------------------------------------------- /MLServer/runtimes/huggingface/tests/testdata/audio.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/runtimes/huggingface/tests/testdata/audio.mp3 -------------------------------------------------------------------------------- /MLServer/runtimes/huggingface/tests/testdata/dogs.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/runtimes/huggingface/tests/testdata/dogs.jpg -------------------------------------------------------------------------------- /MLServer/runtimes/huggingface/tests/testdata/hancat.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/runtimes/huggingface/tests/testdata/hancat.jpeg -------------------------------------------------------------------------------- /MLServer/runtimes/huggingface/tests/testdata/ugcat.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/runtimes/huggingface/tests/testdata/ugcat.jpeg -------------------------------------------------------------------------------- /MLServer/runtimes/lightgbm/mlserver_lightgbm/__init__.py: -------------------------------------------------------------------------------- 1 | from .lightgbm import LightGBMModel 2 | 3 | __all__ = ["LightGBMModel"] 4 | -------------------------------------------------------------------------------- /MLServer/runtimes/lightgbm/mlserver_lightgbm/version.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.3.0.dev4" 2 | -------------------------------------------------------------------------------- /MLServer/runtimes/lightgbm/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/runtimes/lightgbm/tests/__init__.py -------------------------------------------------------------------------------- /MLServer/runtimes/lightgbm/tests/testdata/inference-request.json: -------------------------------------------------------------------------------- 1 | { 2 | "inputs": [ 3 | { 4 | "name": "input-0", 5 | "shape": [1, 3], 6 | "datatype": "INT32", 7 | "data": [[1, 2, 3]] 8 | } 9 | ], 10 | "outputs": [{ "name": "predict" }, { "name": "predict_proba" }] 11 | } 12 | -------------------------------------------------------------------------------- /MLServer/runtimes/mlflow/mlserver_mlflow/__init__.py: -------------------------------------------------------------------------------- 1 | from .runtime import MLflowRuntime 2 | from .codecs import TensorDictCodec 3 | 4 | __all__ = ["MLflowRuntime", "TensorDictCodec"] 5 | -------------------------------------------------------------------------------- /MLServer/runtimes/mlflow/mlserver_mlflow/version.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.3.0.dev4" 2 | -------------------------------------------------------------------------------- /MLServer/runtimes/mlflow/requirements/dev.txt: -------------------------------------------------------------------------------- 1 | torch==1.13.1 2 | pytorch-lightning==1.8.0.post1 3 | # Pin torchmetrics to a version older than 0.5.0 to avoid this issue: 4 | # https://github.com/PyTorchLightning/pytorch-lightning/issues/10233 5 | torchmetrics==0.10.2 6 | torchvision==0.14.1 7 | 8 | # Force local tests to use MLflow 2.x 9 | mlflow >= 2.0.0rc0 10 | -------------------------------------------------------------------------------- /MLServer/runtimes/mlflow/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/runtimes/mlflow/tests/__init__.py -------------------------------------------------------------------------------- /MLServer/runtimes/mlflow/tests/rest/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/runtimes/mlflow/tests/rest/__init__.py -------------------------------------------------------------------------------- /MLServer/runtimes/mlflow/tests/rest/test_endpoints.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from mlflow.version import VERSION 4 | 5 | 6 | @pytest.mark.parametrize("endpoint", ["/ping", "/health"]) 7 | async def test_ping(rest_client, endpoint: str): 8 | response = await rest_client.get(endpoint) 9 | 10 | assert response.status_code == 200 11 | assert response.json() == "\n" 12 | 13 | 14 | async def test_version(rest_client): 15 | response = await rest_client.get("/version") 16 | 17 | assert response.status_code == 200 18 | assert response.json() == VERSION 19 | -------------------------------------------------------------------------------- /MLServer/runtimes/mlflow/tests/rest/utils.py: -------------------------------------------------------------------------------- 1 | from prometheus_client import CollectorRegistry 2 | 3 | 4 | def unregister_metrics(registry: CollectorRegistry): 5 | # NOTE: Since `REGISTRY` objects are usually global, this method is NOT 6 | # thread-safe!! 7 | collectors = list(registry._collector_to_names.keys()) 8 | for collector in collectors: 9 | registry.unregister(collector) 10 | -------------------------------------------------------------------------------- /MLServer/runtimes/mlflow/tests/testdata/inference-request.json: -------------------------------------------------------------------------------- 1 | { 2 | "inputs": [ 3 | { 4 | "name": "foo", 5 | "shape": [3], 6 | "datatype": "FP64", 7 | "data": [1, 2, 3] 8 | } 9 | ], 10 | "parameters": { 11 | "content_type": "dict" 12 | }, 13 | "outputs": [{ "name": "predict" }, { "name": "predict_proba" }] 14 | } 15 | -------------------------------------------------------------------------------- /MLServer/runtimes/mllib/README.md: -------------------------------------------------------------------------------- 1 | # Spark MLlib runtime for MLServer 2 | 3 | This package provides a MLServer runtime compatible with Spark MLlib. 4 | 5 | ## Usage 6 | 7 | You can install the runtime, alongside `mlserver`, as: 8 | 9 | ```bash 10 | pip install mlserver mlserver-mllib 11 | ``` 12 | 13 | For further information on how to use MLServer with Spark MLlib, you can check 14 | out the [MLServer repository](https://github.com/SeldonIO/MLServer). 15 | -------------------------------------------------------------------------------- /MLServer/runtimes/mllib/mlserver_mllib/__init__.py: -------------------------------------------------------------------------------- 1 | from .mllib import MLlibModel 2 | 3 | __all__ = ["MLlibModel"] 4 | -------------------------------------------------------------------------------- /MLServer/runtimes/mllib/mlserver_mllib/errors.py: -------------------------------------------------------------------------------- 1 | from mlserver.errors import MLServerError 2 | from typing import Optional 3 | 4 | 5 | class InvalidMLlibFormat(MLServerError): 6 | def __init__(self, name: str, model_uri: Optional[str] = None): 7 | msg = f"Invalid MLlib format for model {name}" 8 | if model_uri: 9 | msg += f" ({model_uri})" 10 | 11 | super().__init__(msg) 12 | -------------------------------------------------------------------------------- /MLServer/runtimes/mllib/mlserver_mllib/version.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.3.0.dev4" 2 | -------------------------------------------------------------------------------- /MLServer/runtimes/sklearn/mlserver_sklearn/__init__.py: -------------------------------------------------------------------------------- 1 | from .sklearn import SKLearnModel 2 | 3 | __all__ = ["SKLearnModel"] 4 | -------------------------------------------------------------------------------- /MLServer/runtimes/sklearn/mlserver_sklearn/version.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.3.0.dev4" 2 | -------------------------------------------------------------------------------- /MLServer/runtimes/sklearn/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/runtimes/sklearn/tests/__init__.py -------------------------------------------------------------------------------- /MLServer/runtimes/sklearn/tests/testdata/inference-request.json: -------------------------------------------------------------------------------- 1 | { 2 | "inputs": [ 3 | { 4 | "name": "input-0", 5 | "shape": [3], 6 | "datatype": "INT32", 7 | "data": [1, 2, 3] 8 | } 9 | ], 10 | "outputs": [{ "name": "predict" }, { "name": "predict_proba" }] 11 | } 12 | -------------------------------------------------------------------------------- /MLServer/runtimes/xgboost/mlserver_xgboost/__init__.py: -------------------------------------------------------------------------------- 1 | from .xgboost import XGBoostModel 2 | 3 | __all__ = ["XGBoostModel"] 4 | -------------------------------------------------------------------------------- /MLServer/runtimes/xgboost/mlserver_xgboost/version.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.3.0.dev4" 2 | -------------------------------------------------------------------------------- /MLServer/runtimes/xgboost/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/runtimes/xgboost/tests/__init__.py -------------------------------------------------------------------------------- /MLServer/runtimes/xgboost/tests/testdata/inference-request.json: -------------------------------------------------------------------------------- 1 | { 2 | "inputs": [ 3 | { 4 | "name": "input-0", 5 | "shape": [1, 3], 6 | "datatype": "INT32", 7 | "data": [[1, 2, 3]] 8 | } 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /MLServer/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/tests/__init__.py -------------------------------------------------------------------------------- /MLServer/tests/batch_processing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/tests/batch_processing/__init__.py -------------------------------------------------------------------------------- /MLServer/tests/batching/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/tests/batching/__init__.py -------------------------------------------------------------------------------- /MLServer/tests/cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/tests/cli/__init__.py -------------------------------------------------------------------------------- /MLServer/tests/cli/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import docker 3 | 4 | from typing import Tuple 5 | from docker.client import DockerClient 6 | 7 | from ..utils import get_available_ports 8 | 9 | 10 | @pytest.fixture 11 | def docker_client() -> DockerClient: 12 | return docker.from_env() 13 | 14 | 15 | @pytest.fixture 16 | def free_ports() -> Tuple[int, int, int]: 17 | http_port, grpc_port, metrics_port = get_available_ports(3) 18 | return http_port, grpc_port, metrics_port 19 | -------------------------------------------------------------------------------- /MLServer/tests/cli/test_version.py: -------------------------------------------------------------------------------- 1 | from click.testing import CliRunner 2 | 3 | from mlserver.cli.main import root 4 | 5 | 6 | def test_version_does_not_error(): 7 | runner = CliRunner() 8 | 9 | result = runner.invoke(root, ["--version"]) 10 | 11 | assert result.exit_code == 0 12 | assert result.exception is None 13 | -------------------------------------------------------------------------------- /MLServer/tests/codecs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/tests/codecs/__init__.py -------------------------------------------------------------------------------- /MLServer/tests/grpc/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/tests/grpc/__init__.py -------------------------------------------------------------------------------- /MLServer/tests/grpc/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from typing import Dict, Tuple 4 | 5 | from mlserver.grpc.utils import to_metadata 6 | 7 | 8 | @pytest.mark.parametrize( 9 | "headers, expected", 10 | [ 11 | ({"foo": "bar"}, (("foo", "bar"),)), 12 | ({"foo": "bar", "foo2": "bar2"}, (("foo", "bar"), ("foo2", "bar2"))), 13 | ({"foo": "bar", "X-Foo": "bar2"}, (("foo", "bar"), ("x-foo", "bar2"))), 14 | ], 15 | ) 16 | def test_to_metadata(headers: Dict[str, str], expected: Tuple[Tuple[str, str], ...]): 17 | metadata = to_metadata(headers) 18 | 19 | assert metadata == expected 20 | -------------------------------------------------------------------------------- /MLServer/tests/handlers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/tests/handlers/__init__.py -------------------------------------------------------------------------------- /MLServer/tests/handlers/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from mlserver.handlers.custom import CustomHandler 4 | 5 | from ..fixtures import SumModel 6 | 7 | 8 | @pytest.fixture 9 | def custom_handler(sum_model: SumModel) -> CustomHandler: 10 | return CustomHandler(rest_path="/my-custom-endpoint") 11 | -------------------------------------------------------------------------------- /MLServer/tests/kafka/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/tests/kafka/__init__.py -------------------------------------------------------------------------------- /MLServer/tests/metrics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/tests/metrics/__init__.py -------------------------------------------------------------------------------- /MLServer/tests/metrics/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from mlserver.server import MLServer 4 | from mlserver.settings import Settings 5 | from mlserver.metrics.prometheus import PrometheusEndpoint 6 | 7 | from .utils import MetricsClient 8 | 9 | 10 | @pytest.fixture 11 | def prometheus_endpoint(settings: Settings) -> PrometheusEndpoint: 12 | return PrometheusEndpoint(settings) 13 | 14 | 15 | @pytest.fixture 16 | async def metrics_client(mlserver: MLServer, settings: Settings): 17 | http_server = f"{settings.host}:{settings.metrics_port}" 18 | client = MetricsClient(http_server, metrics_endpoint=settings.metrics_endpoint) 19 | 20 | yield client 21 | 22 | await client.close() 23 | -------------------------------------------------------------------------------- /MLServer/tests/metrics/test_endpoint_cases.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | 4 | def case_metrics() -> Optional[str]: 5 | return "/metrics" 6 | 7 | 8 | def case_prometheus() -> Optional[str]: 9 | return "/prometheus" 10 | 11 | 12 | def case_disabled() -> Optional[str]: 13 | return None 14 | -------------------------------------------------------------------------------- /MLServer/tests/parallel/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/tests/parallel/__init__.py -------------------------------------------------------------------------------- /MLServer/tests/repository/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/tests/repository/__init__.py -------------------------------------------------------------------------------- /MLServer/tests/repository/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | 4 | from mlserver.settings import Settings 5 | 6 | from ..conftest import TESTDATA_PATH 7 | 8 | 9 | @pytest.fixture 10 | def settings() -> Settings: 11 | settings_path = os.path.join(TESTDATA_PATH, "settings-custom-md-repo.json") 12 | return Settings.parse_file(settings_path) 13 | -------------------------------------------------------------------------------- /MLServer/tests/repository/test_custom_md_repo.py: -------------------------------------------------------------------------------- 1 | from mlserver.settings import Settings 2 | 3 | from ..utils import RESTClient 4 | 5 | 6 | async def test_custom_model_repo_by_settings( 7 | rest_client: RESTClient, 8 | settings: Settings, 9 | ): 10 | await rest_client.wait_until_model_ready("sum-model") 11 | 12 | result = await rest_client.list_models() 13 | 14 | assert len(result) == 1 15 | -------------------------------------------------------------------------------- /MLServer/tests/rest/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/MLServer/tests/rest/__init__.py -------------------------------------------------------------------------------- /MLServer/tests/rest/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from fastapi import status 4 | from mlserver.rest.utils import to_status_code 5 | 6 | 7 | @pytest.mark.parametrize( 8 | "flag,expected", [(True, status.HTTP_200_OK), (False, status.HTTP_400_BAD_REQUEST)] 9 | ) 10 | def test_to_status_code(flag: bool, expected: int): 11 | status_code = to_status_code(flag) 12 | assert status_code == expected 13 | -------------------------------------------------------------------------------- /MLServer/tests/test_logging.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from mlserver.logging import logger, configure_logger 4 | from mlserver.settings import Settings 5 | 6 | 7 | @pytest.mark.parametrize("debug", [True, False]) 8 | def test_log_level_gets_persisted(debug: bool, settings: Settings, caplog): 9 | settings.debug = debug 10 | configure_logger(settings) 11 | 12 | test_log_message = "foo - bar - this is a test" 13 | logger.debug(test_log_message) 14 | 15 | if debug: 16 | assert test_log_message in caplog.text 17 | else: 18 | assert test_log_message not in caplog.text 19 | -------------------------------------------------------------------------------- /MLServer/tests/test_types.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import json 3 | 4 | from mlserver import types 5 | 6 | 7 | @pytest.mark.parametrize( 8 | "data", 9 | [ 10 | [1, 2, 3], 11 | [1.0, 2.0, 3.0], 12 | [[1.0, 2.0, 3.0]], 13 | [34.5, 8.4], 14 | [True, False, True], 15 | ["one", "two", "three"], 16 | ], 17 | ) 18 | def test_tensor_data(data): 19 | raw = json.dumps(data) 20 | tensor_data = types.TensorData.parse_raw(raw) 21 | 22 | assert tensor_data.__root__ == data 23 | for tensor_elem, elem in zip(tensor_data, data): 24 | assert type(tensor_elem) == type(elem) 25 | -------------------------------------------------------------------------------- /MLServer/tests/testdata/.test.env: -------------------------------------------------------------------------------- 1 | MLSERVER_ALLOW_ORIGIN_REGEX=".*" 2 | MLSERVER_DEBUG=1 3 | MLSERVER_HTTP_PORT=9999 4 | MLSERVER_MAX_AGE=999 5 | MLSERVER_MODEL_NAME="dummy-name" 6 | MLSERVER_MODEL_URI="dummy-uri" 7 | MLSERVER_MODEL_IMPLEMENTATION="mlserver.MLModel" 8 | -------------------------------------------------------------------------------- /MLServer/tests/testdata/batch_processing/invalid.txt: -------------------------------------------------------------------------------- 1 | {"inputs":[{"name":"input-0","shape":[1,3],"data":[1,2,3]}]} 2 | -------------------------------------------------------------------------------- /MLServer/tests/testdata/batch_processing/invalid_among_many.txt: -------------------------------------------------------------------------------- 1 | {"inputs":[{"name":"input-0","shape":[1,3],"datatype":"INT32","data":[1,0,0]}]} 2 | {"inputs":[{"name":"input-0","shape":[1,3],"datatype":"INT32","data":[2,0,0]}]} 3 | {"inputs":[{"name":"input-0","shape":[1,3],"data":[1,2,3]}]} 4 | {"inputs":[{"name":"input-0","shape":[1,3],"datatype":"INT32","data":[4,0,0]}]} 5 | {"inputs":[{"name":"input-0","shape":[1,3],"datatype":"INT32","data":[5,0,0]}]} 6 | {"inputs":[{"name":"input-0","shape":[1,3],"datatype":"INT32","data":[6,0,0]}]} 7 | -------------------------------------------------------------------------------- /MLServer/tests/testdata/batch_processing/many.txt: -------------------------------------------------------------------------------- 1 | {"inputs":[{"name":"input-0","shape":[1,3],"datatype":"INT32","data":[1,0,0]}]} 2 | {"inputs":[{"name":"input-0","shape":[1,3],"datatype":"INT32","data":[2,0,0]}]} 3 | {"inputs":[{"name":"input-0","shape":[1,3],"datatype":"INT32","data":[3,0,0]}]} 4 | {"inputs":[{"name":"input-0","shape":[1,3],"datatype":"INT32","data":[4,0,0]}]} 5 | {"inputs":[{"name":"input-0","shape":[1,3],"datatype":"INT32","data":[5,0,0]}]} 6 | {"inputs":[{"name":"input-0","shape":[1,3],"datatype":"INT32","data":[6,0,0]}]} 7 | -------------------------------------------------------------------------------- /MLServer/tests/testdata/batch_processing/single.txt: -------------------------------------------------------------------------------- 1 | {"inputs":[{"name":"input-0","shape":[1,3],"datatype":"INT32","data":[1,2,3]}]} 2 | -------------------------------------------------------------------------------- /MLServer/tests/testdata/batch_processing/single_with_id.txt: -------------------------------------------------------------------------------- 1 | {"id": "my-test-id", "inputs":[{"name":"input-0","shape":[1,3],"datatype":"INT32","data":[1,2,3]}]} 2 | -------------------------------------------------------------------------------- /MLServer/tests/testdata/environment.yml: -------------------------------------------------------------------------------- 1 | name: custom-runtime-environment 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python == 3.8 6 | - scikit-learn == 1.0.2 7 | - pip: 8 | - mlserver == 1.3.0.dev2 9 | -------------------------------------------------------------------------------- /MLServer/tests/testdata/grpc/model-infer-request.json: -------------------------------------------------------------------------------- 1 | { 2 | "model_name": "sum-model", 3 | "model_version": "v1.2.3", 4 | "inputs": [ 5 | { 6 | "name": "input-0", 7 | "datatype": "INT32", 8 | "shape": [1, 3], 9 | "contents": { "int_contents": [1, 2, 3] }, 10 | "parameters": { 11 | "content_type": { "string_param": "np" } 12 | } 13 | } 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /MLServer/tests/testdata/inference-request-with-output.json: -------------------------------------------------------------------------------- 1 | { 2 | "inputs": [ 3 | { 4 | "name": "input-0", 5 | "shape": [1, 3], 6 | "datatype": "INT32", 7 | "data": [1, 2, 3] 8 | } 9 | ], 10 | "outputs": [ 11 | { 12 | "name": "requested-output-0" 13 | } 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /MLServer/tests/testdata/inference-request.json: -------------------------------------------------------------------------------- 1 | { 2 | "inputs": [ 3 | { 4 | "name": "input-0", 5 | "shape": [1, 3], 6 | "datatype": "INT32", 7 | "data": [1, 2, 3] 8 | } 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /MLServer/tests/testdata/inference-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "model_name": "sum-model", 3 | "id": "123", 4 | "outputs": [ 5 | { 6 | "name": "output-0", 7 | "shape": [1], 8 | "datatype": "FP32", 9 | "data": [21.0] 10 | } 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /MLServer/tests/testdata/metadata-model-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "sum-model", 3 | "versions": ["sum-model/v1.2.3"], 4 | "platform": "mlserver", 5 | "inputs": [ 6 | { 7 | "datatype": "FP32", 8 | "name": "input-0", 9 | "shape": [128] 10 | } 11 | ], 12 | "outputs": [ 13 | { 14 | "datatype": "FP32", 15 | "name": "output-0", 16 | "shape": [1] 17 | } 18 | ] 19 | } 20 | -------------------------------------------------------------------------------- /MLServer/tests/testdata/metadata-server-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mlserver", 3 | "version": "v0.1.0", 4 | "extensions": ["metrics"] 5 | } 6 | -------------------------------------------------------------------------------- /MLServer/tests/testdata/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "sum-model", 3 | 4 | "implementation": "tests.fixtures.SumModel", 5 | 6 | "max_batch_size": 10, 7 | "max_batch_time": 0.4, 8 | 9 | "versions": ["sum-model/v1.2.3"], 10 | "platform": "mlserver", 11 | "inputs": [ 12 | { 13 | "datatype": "FP32", 14 | "name": "input-0", 15 | "shape": [128], 16 | "parameters": { 17 | "content_type": "np" 18 | } 19 | } 20 | ], 21 | "outputs": [ 22 | { 23 | "datatype": "FP32", 24 | "name": "output-0", 25 | "shape": [1] 26 | } 27 | ], 28 | 29 | "parameters": { 30 | "version": "v1.2.3" 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /MLServer/tests/testdata/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-learn 2 | -------------------------------------------------------------------------------- /MLServer/tests/testdata/settings-custom-md-repo.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": true, 3 | "host": "127.0.0.1", 4 | "http_port": 8087, 5 | "grpc_port": 8088, 6 | "metrics_port": 8089, 7 | "cors_settings": { 8 | "allow_origins": [ 9 | "*" 10 | ] 11 | }, 12 | "model_repository_implementation": "tests.repository.dummymdrepo.DummyModelRepository", 13 | "model_repository_implementation_args": { 14 | "files": [ 15 | "tests/testdata/model-settings.json" 16 | ] 17 | } 18 | } -------------------------------------------------------------------------------- /MLServer/tests/testdata/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": true, 3 | "host": "127.0.0.1", 4 | "parallel_workers": 2, 5 | "cors_settings": { 6 | "allow_origins": ["*"] 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /data/figures/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/.gitkeep -------------------------------------------------------------------------------- /data/figures/gorubi-scalibility.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/gorubi-scalibility.pdf -------------------------------------------------------------------------------- /data/figures/latency-cdf.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/latency-cdf.pdf -------------------------------------------------------------------------------- /data/figures/metaseries-14-audio-qa.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/metaseries-14-audio-qa.pdf -------------------------------------------------------------------------------- /data/figures/metaseries-14-audio-sent.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/metaseries-14-audio-sent.pdf -------------------------------------------------------------------------------- /data/figures/metaseries-14-nlp.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/metaseries-14-nlp.pdf -------------------------------------------------------------------------------- /data/figures/metaseries-14-sum-qa.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/metaseries-14-sum-qa.pdf -------------------------------------------------------------------------------- /data/figures/metaseries-14-video.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/metaseries-14-video.pdf -------------------------------------------------------------------------------- /data/figures/metaseries-18-audio-qa.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/metaseries-18-audio-qa.pdf -------------------------------------------------------------------------------- /data/figures/metaseries-18-audio-sent.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/metaseries-18-audio-sent.pdf -------------------------------------------------------------------------------- /data/figures/metaseries-18-nlp.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/metaseries-18-nlp.pdf -------------------------------------------------------------------------------- /data/figures/metaseries-18-sum-qa.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/metaseries-18-sum-qa.pdf -------------------------------------------------------------------------------- /data/figures/metaseries-18-video.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/metaseries-18-video.pdf -------------------------------------------------------------------------------- /data/figures/metaseries-20-video.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/metaseries-20-video.pdf -------------------------------------------------------------------------------- /data/figures/metaseries-21-video.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/metaseries-21-video.pdf -------------------------------------------------------------------------------- /data/figures/objective-preferences-mult.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/objective-preferences-mult.pdf -------------------------------------------------------------------------------- /data/figures/objective-preferences.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/objective-preferences.pdf -------------------------------------------------------------------------------- /data/figures/patterns.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/patterns.pdf -------------------------------------------------------------------------------- /data/figures/predictor-abelation-sla.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/predictor-abelation-sla.pdf -------------------------------------------------------------------------------- /data/figures/two_metrics.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/data/figures/two_metrics.pdf -------------------------------------------------------------------------------- /doc-figs/artifact-eval.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/doc-figs/artifact-eval.png -------------------------------------------------------------------------------- /doc-figs/mapping.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/doc-figs/mapping.png -------------------------------------------------------------------------------- /doc-figs/paper-figure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/doc-figs/paper-figure.png -------------------------------------------------------------------------------- /doc-figs/pipelines-paper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/doc-figs/pipelines-paper.png -------------------------------------------------------------------------------- /doc-figs/revision-artifact-eval.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/doc-figs/revision-artifact-eval.png -------------------------------------------------------------------------------- /doc-figs/revision-paper-figure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/doc-figs/revision-paper-figure.png -------------------------------------------------------------------------------- /experiments/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/experiments/__init__.py -------------------------------------------------------------------------------- /experiments/profiling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/experiments/profiling/__init__.py -------------------------------------------------------------------------------- /experiments/profiling/single-node/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/experiments/profiling/single-node/__init__.py -------------------------------------------------------------------------------- /experiments/runner/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/experiments/runner/__init__.py -------------------------------------------------------------------------------- /experiments/runner/run-failed-experiment.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | experiment_number=$1 4 | 5 | conda activate central 6 | 7 | # Running Python script with the extracted experiment number 8 | python runner_script.py --config-name "video-$experiment_number" 9 | sleep 60 10 | 11 | # Drawing the results of the experiment 12 | jupyter nbconvert --execute --to notebook --inplace ~/ipa/experiments/runner/notebooks/Jsys-reviewers-revision.ipynb 13 | -------------------------------------------------------------------------------- /experiments/runner/run-revision-failed-experiment.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | experiment_number=$1 4 | 5 | conda activate central 6 | 7 | # Running Python script with the extracted experiment number 8 | python runner_script.py --config-name "video-mul-$experiment_number" 9 | sleep 60 10 | 11 | # Drawing the results of the experiment 12 | jupyter nbconvert --execute --to notebook --inplace ~/ipa/experiments/runner/notebooks/Jsys-reviewers-revision.ipynb 13 | -------------------------------------------------------------------------------- /experiments/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/experiments/utils/__init__.py -------------------------------------------------------------------------------- /experiments/utils/misc.py: -------------------------------------------------------------------------------- 1 | import json 2 | import numpy as np 3 | 4 | # def convert_values_to_strings(dictionary): 5 | # new_dict = {} 6 | # for key, value in dictionary.items(): 7 | # if isinstance(value, dict): 8 | # new_value = convert_values_to_strings(value) 9 | # else: 10 | # new_value = str(value) 11 | # new_dict[key] = new_value 12 | # return new_dict 13 | 14 | 15 | class Int64Encoder(json.JSONEncoder): 16 | def default(self, obj): 17 | if isinstance(obj, np.int64): 18 | return int(obj) 19 | return super().default(obj) 20 | -------------------------------------------------------------------------------- /experiments/utils/obj.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | # set up object storage 5 | def setup_obj_store(): 6 | os.system("sudo umount -l ~/my_mounting_point") 7 | os.system("cc-cloudfuse mount ~/my_mounting_point") 8 | -------------------------------------------------------------------------------- /infrastructure/README.md: -------------------------------------------------------------------------------- 1 | # Infrastructure 2 | 3 | 1. Use the guide for [automated infrastructure installation](automated.md) (Recommended for the reviewers) 4 | 2. In case of any errors you can also use the guide for [manual infrastructure installation](manual.md) 5 | -------------------------------------------------------------------------------- /infrastructure/hack/disable_firewall.sh: -------------------------------------------------------------------------------- 1 | # Disable all firewalls 2 | function disable_firewalls() { 3 | sudo iptables -F 4 | sudo iptables -X 5 | sudo iptables -t nat -F 6 | sudo iptables -t nat -X 7 | sudo iptables -t mangle -F 8 | sudo iptables -t mangle -X 9 | sudo iptables -P INPUT ACCEPT 10 | sudo iptables -P FORWARD ACCEPT 11 | sudo iptables -P OUTPUT ACCEPT 12 | 13 | sudo systemctl stop firewalld 14 | sudo systemctl disable firewalld 15 | 16 | echo "Disabled all firewalls" 17 | echo 18 | } 19 | 20 | disable_firewalls -------------------------------------------------------------------------------- /infrastructure/hack/zsh.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | install_zsh() { 4 | sudo apt update 5 | sudo apt install -y zsh 6 | sh -c "$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)" 7 | git clone https://github.com/zsh-users/zsh-autosuggestions ~/.oh-my-zsh/custom/plugins/zsh-autosuggestions 8 | sed -i 's/plugins=(git)/plugins=(git zsh-autosuggestions)/' ~/.zshrc 9 | chsh -s "$(which zsh)" 10 | exec zsh 11 | } 12 | 13 | install_zsh 14 | -------------------------------------------------------------------------------- /load_tester/README.md: -------------------------------------------------------------------------------- 1 | Load Tester module of the IPA for Asynchronous load testing 2 | -------------------------------------------------------------------------------- /load_tester/barazmoon/__init__.py: -------------------------------------------------------------------------------- 1 | from barazmoon.main import BarAzmoonProcess 2 | from barazmoon.main import BarAzmoonAsyncRest 3 | from barazmoon.mlserver import MLServerProcess 4 | from barazmoon.mlserver import MLServerAsyncRest 5 | from barazmoon.mlserver import MLServerAsyncGrpc 6 | from barazmoon.main import Data 7 | -------------------------------------------------------------------------------- /load_tester/barazmoon/twitter/__init__.py: -------------------------------------------------------------------------------- 1 | from .loader import twitter_workload_generator 2 | -------------------------------------------------------------------------------- /load_tester/barazmoon/twitter/workload.tbz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/load_tester/barazmoon/twitter/workload.tbz2 -------------------------------------------------------------------------------- /load_tester/setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | from setuptools import setup, find_packages 3 | 4 | 5 | def read(): 6 | return open(os.path.join(os.path.dirname(__file__), "README.md")).read() 7 | 8 | 9 | setup( 10 | name="barazmoon", 11 | version="0.0.1", 12 | keywords=["load testing", "web service", "restful api"], 13 | packages=find_packages("."), 14 | long_description=read(), 15 | install_requires=[ 16 | "numpy>=1.19.2", 17 | "aiohttp>=3.7.4", 18 | ], 19 | ) 20 | -------------------------------------------------------------------------------- /models-to-minio/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/models-to-minio/__init__.py -------------------------------------------------------------------------------- /models-to-minio/readme.md: -------------------------------------------------------------------------------- 1 | scripts for moving models to minio object storage that will be setup during the infrastructure setup 2 | -------------------------------------------------------------------------------- /num-parameters/yolov5-yolo.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | # Load the YOLOv5 model from Torch Hub 4 | model = torch.hub.load("ultralytics/yolov5", "yolov5s") 5 | 6 | ## Count the number of parameters 7 | num_params = sum(x.numel() for x in model.parameters()) 8 | 9 | print(f"Number of parameters in the Yolo5s model: {num_params}") 10 | -------------------------------------------------------------------------------- /optimizer/__init__.py: -------------------------------------------------------------------------------- 1 | from .models import Model, ResourceAllocation, Profile, Task, Pipeline 2 | from .optimizer import Optimizer 3 | from .adapter import Adapter 4 | from .sim_adapter import SimAdapter 5 | -------------------------------------------------------------------------------- /pipelines/README.md: -------------------------------------------------------------------------------- 1 | 1. Video Monitoring (1 Chameleon Cascadelake_r node) 2 | 2. Audio Question Answering (2 Chameleon Cascadelake_r node) 3 | 3. Audio Sentiment Analysis (2 Chameleon Cascadelake_r node) 4 | 4. Summerization Question Answering (3 Chameleon Cascadelake_r node) 5 | 5. Natural Language Processing (6 Chameleon Cascadelake_r node) 6 | -------------------------------------------------------------------------------- /pipelines/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/__init__.py -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-qa/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-centralized/audio-qa/__init__.py -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-qa/seldon-core-version/nodes/audio/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-qa/seldon-core-version/nodes/audio/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=audio-qa-centralized:audio 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-qa/seldon-core-version/nodes/audio/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "audio", 3 | "implementation": "models.GeneralAudio", 4 | "max_batch_size": 1, 5 | "parameters": { 6 | "uri": "./fakeuri" 7 | } 8 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-qa/seldon-core-version/nodes/audio/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "audio", 9 | "implementation": "models.GeneralAudio", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "audio", 21 | "implementation": "models.GeneralAudio", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-qa/seldon-core-version/nodes/audio/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.21.1 2 | sentencepiece==0.1.97 3 | torchaudio==0.12.1 -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-qa/seldon-core-version/nodes/audio/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-qa/seldon-core-version/nodes/audio/test.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-centralized/audio-qa/seldon-core-version/nodes/audio/test.py -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-qa/seldon-core-version/nodes/nlp-qa/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-qa/seldon-core-version/nodes/nlp-qa/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=audio-qa-centralized:nlpqa 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-qa/seldon-core-version/nodes/nlp-qa/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [1] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-qa/seldon-core-version/nodes/nlp-qa/input-sample.txt: -------------------------------------------------------------------------------- 1 | mister quilter is the apostle of the middle classes and we are glad to welcome his gospel -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-qa/seldon-core-version/nodes/nlp-qa/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nlp-qa", 3 | "implementation": "models.GeneralNLP", 4 | "max_batch_size": 1, 5 | "parameters": { 6 | "uri": "./fakeuri" 7 | } 8 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-qa/seldon-core-version/nodes/nlp-qa/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "nlp-qa", 9 | "implementation": "models.GeneralNLP", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "nlp-qa", 21 | "implementation": "models.GeneralNLP", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-qa/seldon-core-version/nodes/nlp-qa/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.35.0 2 | sentencepiece==0.1.97 3 | torch==2.1.0 -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-qa/seldon-core-version/nodes/nlp-qa/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-qa/seldon-core-version/stress.bash: -------------------------------------------------------------------------------- 1 | while true 2 | sleep 0.05 3 | do 4 | curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0, 6.0]]}}'\ 5 | -X POST http://localhost:32000/seldon/seldon/linear-pipeline-separate-pods/api/v1.0/predictions\ 6 | -H "Content-Type: application/json" 7 | done -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-sent/seldon-core-version/nodes/audio/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-sent/seldon-core-version/nodes/audio/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=audio-sent-centralized:audio 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-sent/seldon-core-version/nodes/audio/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [1, 93680] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-sent/seldon-core-version/nodes/audio/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "audio", 3 | "implementation": "models.GeneralAudio", 4 | "max_batch_size": 1, 5 | "parameters": { 6 | "uri": "./fakeuri" 7 | } 8 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-sent/seldon-core-version/nodes/audio/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "audio", 9 | "implementation": "models.GeneralAudio", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "audio", 21 | "implementation": "models.GeneralAudio", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-sent/seldon-core-version/nodes/audio/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.21.1 2 | sentencepiece==0.1.97 3 | torchaudio==0.12.1 -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-sent/seldon-core-version/nodes/audio/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-sent/seldon-core-version/nodes/nlp-sent/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-sent/seldon-core-version/nodes/nlp-sent/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=audio-sent-centralized:nlpsent 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-sent/seldon-core-version/nodes/nlp-sent/input-sample.txt: -------------------------------------------------------------------------------- 1 | mister quilter is the apostle of the middle classes and we are glad to welcome his gospel -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-sent/seldon-core-version/nodes/nlp-sent/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nlp-sent", 3 | "implementation": "models.GeneralNLP", 4 | "max_batch_size": 1, 5 | "parameters": { 6 | "uri": "./fakeuri" 7 | } 8 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-sent/seldon-core-version/nodes/nlp-sent/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "nlp-sent", 9 | "implementation": "models.GeneralNLP", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "nlp-sent", 21 | "implementation": "models.GeneralNLP", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-sent/seldon-core-version/nodes/nlp-sent/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.21.1 2 | sentencepiece==0.1.97 3 | torch==1.11.0 -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-sent/seldon-core-version/nodes/nlp-sent/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/audio-sent/seldon-core-version/stress.bash: -------------------------------------------------------------------------------- 1 | while true 2 | sleep 0.05 3 | do 4 | curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0, 6.0]]}}'\ 5 | -X POST http://localhost:32000/seldon/seldon/linear-pipeline-separate-pods/api/v1.0/predictions\ 6 | -H "Content-Type: application/json" 7 | done -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/mock/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-centralized/mock/__init__.py -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/mock/seldon-core-version/nodes/node-one/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/mock/seldon-core-version/nodes/node-one/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=mock-centralized:node-one 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev15-slim/custom-1-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/mock/seldon-core-version/nodes/node-one/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "node-one", 3 | "implementation": "models.NodeOne", 4 | "parameters": { 5 | "uri": "./fakeuri" 6 | } 7 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/mock/seldon-core-version/nodes/node-one/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "audio", 9 | "implementation": "models.GeneralAudio", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "audio", 21 | "implementation": "models.GeneralAudio", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/mock/seldon-core-version/nodes/node-one/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-centralized/mock/seldon-core-version/nodes/node-one/requirements.txt -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/mock/seldon-core-version/nodes/node-one/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/mock/seldon-core-version/nodes/node-two/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/mock/seldon-core-version/nodes/node-two/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=mock-centralized:node-two 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev15-slim/custom-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/mock/seldon-core-version/nodes/node-two/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [1] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/mock/seldon-core-version/nodes/node-two/input-sample.txt: -------------------------------------------------------------------------------- 1 | mister quilter is the apostle of the middle classes and we are glad to welcome his gospel -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/mock/seldon-core-version/nodes/node-two/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "node-two", 3 | "implementation": "models.NodeTwo", 4 | "parameters": { 5 | "uri": "./fakeuri" 6 | } 7 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/mock/seldon-core-version/nodes/node-two/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "nlp-qa", 9 | "implementation": "models.GeneralNLP", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "nlp-qa", 21 | "implementation": "models.GeneralNLP", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/mock/seldon-core-version/nodes/node-two/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-centralized/mock/seldon-core-version/nodes/node-two/requirements.txt -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/mock/seldon-core-version/nodes/node-two/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/mock/seldon-core-version/stress.bash: -------------------------------------------------------------------------------- 1 | while true 2 | sleep 0.05 3 | do 4 | curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0, 6.0]]}}'\ 5 | -X POST http://localhost:32000/seldon/seldon/linear-pipeline-separate-pods/api/v1.0/predictions\ 6 | -H "Content-Type: application/json" 7 | done -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/input-sample.txt: -------------------------------------------------------------------------------- 1 | Après des décennies en tant que pratiquant d'arts martiaux et coureur, Wes a "trouvé" le yoga en 2010. -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-li/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-li/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=nlp-centralized:nlpli 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-li/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [1] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-li/input-sample.txt: -------------------------------------------------------------------------------- 1 | Après des décennies en tant que pratiquant d'arts martiaux et coureur, Wes a "trouvé" le yoga en 2010. -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-li/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nlp-li", 3 | "implementation": "models.GeneralNLP", 4 | "max_batch_size": 1, 5 | "parameters": { 6 | "uri": "./fakeuri" 7 | } 8 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-li/readme.md: -------------------------------------------------------------------------------- 1 | If you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "nlp-li", 9 | "implementation": "models.GeneralNLP", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "nlp-li", 21 | "implementation": "models.GeneralNLP", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-li/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.21.1 2 | sentencepiece==0.1.97 3 | torch==1.11.0 -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-li/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-sum/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-sum/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=nlp-centralized:nlpsum 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-sum/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [1] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-sum/input-sample.txt: -------------------------------------------------------------------------------- 1 | After decades as a martial arts practitioner and runner, Wes \"found\" yoga in 2010. -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-sum/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nlp-sum", 3 | "implementation": "models.GeneralNLP", 4 | "max_batch_size": 1, 5 | "parameters": { 6 | "uri": "./fakeuri" 7 | } 8 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-sum/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "nlp-sum", 9 | "implementation": "models.GeneralNLP", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "nlp-sum", 21 | "implementation": "models.GeneralNLP", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-sum/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.35.0 2 | sentencepiece==0.1.97 3 | torch==2.1.0 -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-sum/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-trans/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-trans/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=nlp-centralized:nlptrans 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-trans/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [1] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-trans/input-sample.txt: -------------------------------------------------------------------------------- 1 | Après des décennies en tant que pratiquant d'arts martiaux et coureur, Wes a "trouvé" le yoga en 2010. -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-trans/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nlp-trans", 3 | "implementation": "models.GeneralNLP", 4 | "max_batch_size": 1, 5 | "parameters": { 6 | "uri": "./fakeuri" 7 | } 8 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-trans/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.21.1 2 | sentencepiece==0.1.97 3 | torch==1.11.0 -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/nlp/seldon-core-version/nodes/nlp-trans/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/sum-qa/seldon-core-version/input-sample.txt: -------------------------------------------------------------------------------- 1 | After decades as a martial arts practitioner and runner, Wes \"found\" yoga in 2010. -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/sum-qa/seldon-core-version/nodes/nlp-qa/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/sum-qa/seldon-core-version/nodes/nlp-qa/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=sum-qa-centralized:nlpqa 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/sum-qa/seldon-core-version/nodes/nlp-qa/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [1] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/sum-qa/seldon-core-version/nodes/nlp-qa/input-sample.txt: -------------------------------------------------------------------------------- 1 | mister quilter is the apostle of the middle classes and we are glad to welcome his gospel -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/sum-qa/seldon-core-version/nodes/nlp-qa/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nlp-qa", 3 | "implementation": "models.GeneralNLP", 4 | "max_batch_size": 1, 5 | "max_batch_time": 1, 6 | "parameters": { 7 | "uri": "./fakeuri" 8 | } 9 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/sum-qa/seldon-core-version/nodes/nlp-qa/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "nlp-qa", 9 | "implementation": "models.GeneralNLP", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "nlp-qa", 21 | "implementation": "models.GeneralNLP", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/sum-qa/seldon-core-version/nodes/nlp-qa/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.35.0 2 | sentencepiece==0.1.97 3 | torch==2.1.0 -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/sum-qa/seldon-core-version/nodes/nlp-qa/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/sum-qa/seldon-core-version/nodes/nlp-sum/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/sum-qa/seldon-core-version/nodes/nlp-sum/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=sum-qa-centralized:nlpsum 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/sum-qa/seldon-core-version/nodes/nlp-sum/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [1] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/sum-qa/seldon-core-version/nodes/nlp-sum/input-sample.txt: -------------------------------------------------------------------------------- 1 | After decades as a martial arts practitioner and runner, Wes \"found\" yoga in 2010. -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/sum-qa/seldon-core-version/nodes/nlp-sum/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nlp-sum", 3 | "implementation": "models.GeneralNLP", 4 | "max_batch_size": 1, 5 | "max_batch_time": 1, 6 | "parameters": { 7 | "uri": "./fakeuri" 8 | } 9 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/sum-qa/seldon-core-version/nodes/nlp-sum/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "nlp-sum", 9 | "implementation": "models.GeneralNLP", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "nlp-sum", 21 | "implementation": "models.GeneralNLP", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/sum-qa/seldon-core-version/nodes/nlp-sum/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.35.0 2 | sentencepiece==0.1.97 3 | torch==2.1.0 -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/sum-qa/seldon-core-version/nodes/nlp-sum/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/README.md: -------------------------------------------------------------------------------- 1 | Two node pipeline 2 | 3 | list of available models per node 4 | Yolo: node 1 5 | source: https://github.com/ultralytics/yolov5 6 | 7 | List: 8 | 9 | yolov5n 10 | yolov5s 11 | yolov5m 12 | yolov5l 13 | yolov5x 14 | yolov5n6 15 | yolov5s6 16 | yolov5m6 17 | yolov5l6 18 | yolov5l6 19 | 20 | Resnet: node 2 21 | source: https://github.com/rwightman/pytorch-image-models 22 | 23 | List: 24 | 25 | resnet18 26 | resnet34 27 | resnet50 28 | resnet101 29 | resnet152 30 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [253, 400, 3] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/input-sample.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-centralized/video/seldon-core-version/input-sample.JPEG -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human-debug/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human-debug/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=video-centralized:resnet-human-debug 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-1-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human-debug/convertor.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | from PIL import Image 4 | import pathlib 5 | 6 | path = pathlib.Path(__file__).parent.resolve() 7 | iamge_name = "input-sample.npy" 8 | file_path = os.path.join(path, iamge_name) 9 | 10 | array = np.load(file_path) 11 | im = Image.fromarray(array) 12 | im.save("input-sample.JPEG") 13 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human-debug/input-sample.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human-debug/input-sample.JPEG -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human-debug/input-sample.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human-debug/input-sample.npy -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human-debug/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "resnet-human", 3 | "implementation": "models.ResnetHuman", 4 | "max_batch_size": 1, 5 | "parameters": { 6 | "uri": "./fakeuri" 7 | } 8 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human-debug/requirements.txt: -------------------------------------------------------------------------------- 1 | torch==1.11.0 2 | torchvision==0.12.0 -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human-debug/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human-debug/shape-fixer.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | dir = os.path.dirname(__file__) 5 | file_path = os.path.join(dir, "input-sample-multiple.txt") 6 | 7 | saved_file_path = os.path.join(dir, "input-sample.txt") 8 | 9 | with open(file_path, "r") as json_file: 10 | output = json.load(json_file) 11 | 12 | output["output"]["person"] = [output["output"]["person"][0]] 13 | 14 | with open(saved_file_path, "w") as json_write: 15 | json.dump(output, json_write) 16 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=video-centralized:resnet-human 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human/convertor.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | from PIL import Image 4 | import pathlib 5 | 6 | path = pathlib.Path(__file__).parent.resolve() 7 | iamge_name = "input-sample.npy" 8 | file_path = os.path.join(path, iamge_name) 9 | 10 | array = np.load(file_path) 11 | im = Image.fromarray(array) 12 | im.save("input-sample.JPEG") 13 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human/input-sample.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human/input-sample.JPEG -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human/input-sample.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human/input-sample.npy -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "resnet-human", 3 | "implementation": "models.ResnetHuman", 4 | "max_batch_size": 1, 5 | "parameters": { 6 | "uri": "./fakeuri" 7 | } 8 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human/requirements.txt: -------------------------------------------------------------------------------- 1 | torch==1.11.0 2 | torchvision==0.12.0 -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/resnet-human/shape-fixer.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | dir = os.path.dirname(__file__) 5 | file_path = os.path.join(dir, "input-sample-multiple.txt") 6 | 7 | saved_file_path = os.path.join(dir, "input-sample.txt") 8 | 9 | with open(file_path, "r") as json_file: 10 | output = json.load(json_file) 11 | 12 | output["output"]["person"] = [output["output"]["person"][0]] 13 | 14 | with open(saved_file_path, "w") as json_write: 15 | json.dump(output, json_write) 16 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/yolo-debug/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/yolo-debug/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=video-centralized:yolo-debug 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-1-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/yolo-debug/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [253, 400, 3] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/yolo-debug/input-sample.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-centralized/video/seldon-core-version/nodes/yolo-debug/input-sample.JPEG -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/yolo-debug/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "yolo", 3 | "implementation": "yolo-model.Yolo", 4 | "max_batch_size": 1, 5 | "parameters": { 6 | "uri": "./fakeuri" 7 | } 8 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/yolo-debug/model_saver.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import os 3 | 4 | 5 | par_dir = "./yolov5_torchhub" 6 | model_name = "yolov5m" 7 | torch.hub.set_dir(par_dir) 8 | model = torch.hub.load("ultralytics/yolov5", model_name) 9 | loc = f"/mnt/myshareddir/torchhub/{model_name}" 10 | 11 | dirs = os.listdir(par_dir) 12 | for d in dirs: 13 | if os.path.isdir(f"{par_dir}/{d}"): 14 | os.system(f"sudo mkdir {loc}") 15 | os.system(f"sudo mv {par_dir}/{d}/* {loc}") 16 | os.system(f"rm -rf {par_dir}") 17 | 18 | os.system(f"sudo mv {model_name}.pt {loc}") 19 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/yolo-debug/requirements.txt: -------------------------------------------------------------------------------- 1 | opencv-python==4.5.5.64 2 | matplotlib>=3.2.2 3 | PyYAML>=5.3.1 4 | scipy>=1.4.1 5 | gitpython>=3.1.30 6 | seaborn>=0.11.0 7 | psutil 8 | torch==1.11.0 9 | torchvision==0.12.0 -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/yolo-debug/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/yolo/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/yolo/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=video-centralized:yolo 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i '/^USER 1000/i \ 6 | RUN microdnf update -y && microdnf install -y git' Dockerfile 7 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 8 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 9 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 10 | for REPO in ${REPOS[@]} 11 | do 12 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 13 | docker push $REPO/$IMAGE_NAME 14 | done -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/yolo/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [253, 400, 3] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/yolo/input-sample.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-centralized/video/seldon-core-version/nodes/yolo/input-sample.JPEG -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/yolo/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "yolo", 3 | "implementation": "yolo-model.Yolo", 4 | "max_batch_size": 1, 5 | "parameters": { 6 | "uri": "./fakeuri" 7 | } 8 | } -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/yolo/model_saver.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import os 3 | 4 | 5 | par_dir = "./yolov5_torchhub" 6 | model_name = "yolov5m" 7 | torch.hub.set_dir(par_dir) 8 | model = torch.hub.load("ultralytics/yolov5", model_name) 9 | loc = f"/mnt/myshareddir/torchhub/{model_name}" 10 | 11 | dirs = os.listdir(par_dir) 12 | for d in dirs: 13 | if os.path.isdir(f"{par_dir}/{d}"): 14 | os.system(f"sudo mkdir {loc}") 15 | os.system(f"sudo mv {par_dir}/{d}/* {loc}") 16 | os.system(f"rm -rf {par_dir}") 17 | 18 | os.system(f"sudo mv {model_name}.pt {loc}") 19 | -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/yolo/requirements.txt: -------------------------------------------------------------------------------- 1 | opencv-python==4.8.0.74 2 | matplotlib>=3.2.2 3 | PyYAML>=5.3.1 4 | scipy>=1.4.1 5 | gitpython>=3.1.30 6 | seaborn>=0.11.0 7 | psutil 8 | torch==1.13.1 9 | torchvision==0.14.1 10 | ultralytics==8.0.147 -------------------------------------------------------------------------------- /pipelines/mlserver-centralized/video/seldon-core-version/nodes/yolo/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-final/__init__.py -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-qa/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-final/audio-qa/__init__.py -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-qa/seldon-core-version/nodes/audio/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-qa/seldon-core-version/nodes/audio/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=audio-qa-final:audio 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-qa/seldon-core-version/nodes/audio/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "audio", 3 | "implementation": "models.GeneralAudio", 4 | "max_batch_size": 1, 5 | "parameters": { 6 | "uri": "./fakeuri" 7 | } 8 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-qa/seldon-core-version/nodes/audio/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "audio", 9 | "implementation": "models.GeneralAudio", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "audio", 21 | "implementation": "models.GeneralAudio", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-qa/seldon-core-version/nodes/audio/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.21.1 2 | sentencepiece==0.1.97 3 | torchaudio==0.12.1 -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-qa/seldon-core-version/nodes/audio/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-qa/seldon-core-version/nodes/audio/test.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-final/audio-qa/seldon-core-version/nodes/audio/test.py -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-qa/seldon-core-version/nodes/nlp-qa/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-qa/seldon-core-version/nodes/nlp-qa/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=audio-qa-final:nlpqa 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-qa/seldon-core-version/nodes/nlp-qa/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [1] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-qa/seldon-core-version/nodes/nlp-qa/input-sample.txt: -------------------------------------------------------------------------------- 1 | mister quilter is the apostle of the middle classes and we are glad to welcome his gospel -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-qa/seldon-core-version/nodes/nlp-qa/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nlp-qa", 3 | "implementation": "models.GeneralNLP", 4 | "parameters": { 5 | "uri": "./fakeuri" 6 | } 7 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-qa/seldon-core-version/nodes/nlp-qa/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "nlp-qa", 9 | "implementation": "models.GeneralNLP", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "nlp-qa", 21 | "implementation": "models.GeneralNLP", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-qa/seldon-core-version/nodes/nlp-qa/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.21.1 2 | sentencepiece==0.1.97 3 | torch==1.11.0 -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-qa/seldon-core-version/nodes/nlp-qa/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-qa/seldon-core-version/stress.bash: -------------------------------------------------------------------------------- 1 | while true 2 | sleep 0.05 3 | do 4 | curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0, 6.0]]}}'\ 5 | -X POST http://localhost:32000/seldon/seldon/linear-pipeline-separate-pods/api/v1.0/predictions\ 6 | -H "Content-Type: application/json" 7 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-sent/seldon-core-version/nodes/audio/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-sent/seldon-core-version/nodes/audio/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=audio-sent-final:audio 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-sent/seldon-core-version/nodes/audio/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [1, 93680] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-sent/seldon-core-version/nodes/audio/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "audio", 3 | "implementation": "models.GeneralAudio", 4 | "parameters": { 5 | "uri": "./fakeuri" 6 | } 7 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-sent/seldon-core-version/nodes/audio/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "audio", 9 | "implementation": "models.GeneralAudio", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "audio", 21 | "implementation": "models.GeneralAudio", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-sent/seldon-core-version/nodes/audio/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.21.1 2 | sentencepiece==0.1.97 3 | torchaudio==0.12.1 -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-sent/seldon-core-version/nodes/audio/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-sent/seldon-core-version/nodes/nlp-sent/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-sent/seldon-core-version/nodes/nlp-sent/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=audio-sent-final:nlpsent 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-sent/seldon-core-version/nodes/nlp-sent/input-sample.txt: -------------------------------------------------------------------------------- 1 | mister quilter is the apostle of the middle classes and we are glad to welcome his gospel -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-sent/seldon-core-version/nodes/nlp-sent/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nlp-sent", 3 | "implementation": "models.GeneralNLP", 4 | "parameters": { 5 | "uri": "./fakeuri" 6 | } 7 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-sent/seldon-core-version/nodes/nlp-sent/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "nlp-sent", 9 | "implementation": "models.GeneralNLP", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "nlp-sent", 21 | "implementation": "models.GeneralNLP", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-sent/seldon-core-version/nodes/nlp-sent/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.21.1 2 | sentencepiece==0.1.97 3 | torch==1.11.0 -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-sent/seldon-core-version/nodes/nlp-sent/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/audio-sent/seldon-core-version/stress.bash: -------------------------------------------------------------------------------- 1 | while true 2 | sleep 0.05 3 | do 4 | curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0, 6.0]]}}'\ 5 | -X POST http://localhost:32000/seldon/seldon/linear-pipeline-separate-pods/api/v1.0/predictions\ 6 | -H "Content-Type: application/json" 7 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/mock/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-final/mock/__init__.py -------------------------------------------------------------------------------- /pipelines/mlserver-final/mock/seldon-core-version/nodes/node-one/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/mock/seldon-core-version/nodes/node-one/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=mock-final:node-one 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev15-slim/custom-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/mock/seldon-core-version/nodes/node-one/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "node-one", 3 | "implementation": "models.NodeOne", 4 | "max_batch_size": 5, 5 | "max_batch_time": 1, 6 | "parameters": { 7 | "uri": "./fakeuri" 8 | } 9 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/mock/seldon-core-version/nodes/node-one/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "audio", 9 | "implementation": "models.GeneralAudio", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "audio", 21 | "implementation": "models.GeneralAudio", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-final/mock/seldon-core-version/nodes/node-one/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-final/mock/seldon-core-version/nodes/node-one/requirements.txt -------------------------------------------------------------------------------- /pipelines/mlserver-final/mock/seldon-core-version/nodes/node-one/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/mock/seldon-core-version/nodes/node-one/test.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-final/mock/seldon-core-version/nodes/node-one/test.py -------------------------------------------------------------------------------- /pipelines/mlserver-final/mock/seldon-core-version/nodes/node-two/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/mock/seldon-core-version/nodes/node-two/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=mock-final:node-two 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev15-slim/custom-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/mock/seldon-core-version/nodes/node-two/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [1] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/mock/seldon-core-version/nodes/node-two/input-sample.txt: -------------------------------------------------------------------------------- 1 | mister quilter is the apostle of the middle classes and we are glad to welcome his gospel -------------------------------------------------------------------------------- /pipelines/mlserver-final/mock/seldon-core-version/nodes/node-two/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "node-two", 3 | "implementation": "models.NodeTwo", 4 | "max_batch_size": 5, 5 | "max_batch_time": 1, 6 | "parameters": { 7 | "uri": "./fakeuri" 8 | } 9 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/mock/seldon-core-version/nodes/node-two/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "nlp-qa", 9 | "implementation": "models.GeneralNLP", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "nlp-qa", 21 | "implementation": "models.GeneralNLP", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-final/mock/seldon-core-version/nodes/node-two/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-final/mock/seldon-core-version/nodes/node-two/requirements.txt -------------------------------------------------------------------------------- /pipelines/mlserver-final/mock/seldon-core-version/nodes/node-two/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/mock/seldon-core-version/stress.bash: -------------------------------------------------------------------------------- 1 | while true 2 | sleep 0.05 3 | do 4 | curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0, 6.0]]}}'\ 5 | -X POST http://localhost:32000/seldon/seldon/linear-pipeline-separate-pods/api/v1.0/predictions\ 6 | -H "Content-Type: application/json" 7 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/input-sample.txt: -------------------------------------------------------------------------------- 1 | Après des décennies en tant que pratiquant d'arts martiaux et coureur, Wes a "trouvé" le yoga en 2010. -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-li/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-li/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=nlp-final:nlpli 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-li/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [1] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-li/input-sample.txt: -------------------------------------------------------------------------------- 1 | Après des décennies en tant que pratiquant d'arts martiaux et coureur, Wes a "trouvé" le yoga en 2010. -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-li/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nlp-li", 3 | "implementation": "models.GeneralNLP", 4 | "parameters": { 5 | "uri": "./fakeuri" 6 | } 7 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-li/readme.md: -------------------------------------------------------------------------------- 1 | If you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "nlp-li", 9 | "implementation": "models.GeneralNLP", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "nlp-li", 21 | "implementation": "models.GeneralNLP", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-li/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.21.1 2 | sentencepiece==0.1.97 3 | torch==1.11.0 -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-li/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-sum-startup-test/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-sum-startup-test/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=nlp-final-test:nlpsum 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-sum-startup-test/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [1] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-sum-startup-test/input-sample.txt: -------------------------------------------------------------------------------- 1 | After decades as a martial arts practitioner and runner, Wes \"found\" yoga in 2010. -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-sum-startup-test/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nlp-sum", 3 | "implementation": "models.GeneralNLP", 4 | "max_batch_size": 1, 5 | "max_batch_time": 1, 6 | "parameters": { 7 | "uri": "./fakeuri" 8 | } 9 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-sum-startup-test/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.21.1 2 | sentencepiece==0.1.97 3 | torch==1.11.0 -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-sum-startup-test/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-sum/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-sum/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=nlp-final:nlpsum 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-sum/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [1] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-sum/input-sample.txt: -------------------------------------------------------------------------------- 1 | After decades as a martial arts practitioner and runner, Wes \"found\" yoga in 2010. -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-sum/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nlp-sum", 3 | "implementation": "models.GeneralNLP", 4 | "max_batch_size": 1, 5 | "max_batch_time": 1, 6 | "parameters": { 7 | "uri": "./fakeuri" 8 | } 9 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-sum/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "nlp-sum", 9 | "implementation": "models.GeneralNLP", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "nlp-sum", 21 | "implementation": "models.GeneralNLP", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-sum/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.21.1 2 | sentencepiece==0.1.97 3 | torch==1.11.0 -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-sum/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-trans/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-trans/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=nlp-final:nlptrans 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-trans/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [1] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-trans/input-sample.txt: -------------------------------------------------------------------------------- 1 | Après des décennies en tant que pratiquant d'arts martiaux et coureur, Wes a "trouvé" le yoga en 2010. -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-trans/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nlp-trans", 3 | "implementation": "models.GeneralNLP", 4 | "parameters": { 5 | "uri": "./fakeuri" 6 | } 7 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-trans/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "nlp-trans", 9 | "implementation": "models.GeneralNLP", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "nlp-trans", 21 | "implementation": "models.GeneralNLP", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-trans/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.21.1 2 | sentencepiece==0.1.97 3 | torch==1.11.0 -------------------------------------------------------------------------------- /pipelines/mlserver-final/nlp/seldon-core-version/nodes/nlp-trans/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/sum-qa/input-sample-short.txt: -------------------------------------------------------------------------------- 1 | After decades as a martial arts practitioner and runner, Wes \"found\" yoga in 2010. 2 | He came to appreciate that its breadth and depth provide a wonderful ballast to stabilize the body and mind in the fast and technology-oriented lifestyle of today; 3 | yoga is an antidote to stress and a path to a better understanding of oneself and others. -------------------------------------------------------------------------------- /pipelines/mlserver-final/sum-qa/seldon-core-version/input-sample-short.txt: -------------------------------------------------------------------------------- 1 | After decades as a martial arts practitioner and runner, Wes \"found\" yoga in 2010. 2 | He came to appreciate that its breadth and depth provide a wonderful ballast to stabilize the body and mind in the fast and technology-oriented lifestyle of today; 3 | yoga is an antidote to stress and a path to a better understanding of oneself and others. -------------------------------------------------------------------------------- /pipelines/mlserver-final/sum-qa/seldon-core-version/nodes/nlp-qa/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/sum-qa/seldon-core-version/nodes/nlp-qa/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=sum-qa-final:nlpqa 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/sum-qa/seldon-core-version/nodes/nlp-qa/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [1] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/sum-qa/seldon-core-version/nodes/nlp-qa/input-sample.txt: -------------------------------------------------------------------------------- 1 | mister quilter is the apostle of the middle classes and we are glad to welcome his gospel -------------------------------------------------------------------------------- /pipelines/mlserver-final/sum-qa/seldon-core-version/nodes/nlp-qa/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nlp-qa", 3 | "implementation": "models.GeneralNLP", 4 | "max_batch_size": 1, 5 | "max_batch_time": 1, 6 | "parameters": { 7 | "uri": "./fakeuri" 8 | } 9 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/sum-qa/seldon-core-version/nodes/nlp-qa/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "nlp-qa", 9 | "implementation": "models.GeneralNLP", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "nlp-qa", 21 | "implementation": "models.GeneralNLP", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-final/sum-qa/seldon-core-version/nodes/nlp-qa/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.21.1 2 | sentencepiece==0.1.97 3 | torch==1.11.0 -------------------------------------------------------------------------------- /pipelines/mlserver-final/sum-qa/seldon-core-version/nodes/nlp-qa/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/sum-qa/seldon-core-version/nodes/nlp-sum/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/sum-qa/seldon-core-version/nodes/nlp-sum/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=sum-qa-final:nlpsum 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/sum-qa/seldon-core-version/nodes/nlp-sum/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [1] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/sum-qa/seldon-core-version/nodes/nlp-sum/input-sample-short.txt: -------------------------------------------------------------------------------- 1 | After decades as a martial arts practitioner and runner, Wes \"found\" yoga in 2010. 2 | He came to appreciate that its breadth and depth provide a wonderful ballast to stabilize the body and mind in the fast and technology-oriented lifestyle of today; 3 | yoga is an antidote to stress and a path to a better understanding of oneself and others. -------------------------------------------------------------------------------- /pipelines/mlserver-final/sum-qa/seldon-core-version/nodes/nlp-sum/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nlp-sum", 3 | "implementation": "models.GeneralNLP", 4 | "max_batch_size": 1, 5 | "max_batch_time": 1, 6 | "parameters": { 7 | "uri": "./fakeuri" 8 | } 9 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/sum-qa/seldon-core-version/nodes/nlp-sum/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "nlp-sum", 9 | "implementation": "models.GeneralNLP", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "nlp-sum", 21 | "implementation": "models.GeneralNLP", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/mlserver-final/sum-qa/seldon-core-version/nodes/nlp-sum/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers==4.21.1 2 | sentencepiece==0.1.97 3 | torch==1.11.0 -------------------------------------------------------------------------------- /pipelines/mlserver-final/sum-qa/seldon-core-version/nodes/nlp-sum/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/README.md: -------------------------------------------------------------------------------- 1 | Two node pipeline 2 | 3 | list of available models per node 4 | Yolo: node 1 5 | source: https://github.com/ultralytics/yolov5 6 | 7 | List: 8 | 9 | yolov5n 10 | yolov5s 11 | yolov5m 12 | yolov5l 13 | yolov5x 14 | yolov5n6 15 | yolov5s6 16 | yolov5m6 17 | yolov5l6 18 | yolov5l6 19 | 20 | Resnet: node 2 21 | source: https://github.com/rwightman/pytorch-image-models 22 | 23 | List: 24 | 25 | resnet18 26 | resnet34 27 | resnet50 28 | resnet101 29 | resnet152 30 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [253, 400, 3] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/input-sample.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-final/video/seldon-core-version/input-sample.JPEG -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human-debug/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human-debug/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=video-final:resnet-human-debug 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human-debug/convertor.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | from PIL import Image 4 | import pathlib 5 | 6 | path = pathlib.Path(__file__).parent.resolve() 7 | iamge_name = "input-sample.npy" 8 | file_path = os.path.join(path, iamge_name) 9 | 10 | array = np.load(file_path) 11 | im = Image.fromarray(array) 12 | im.save("input-sample.JPEG") 13 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human-debug/input-sample.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human-debug/input-sample.JPEG -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human-debug/input-sample.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human-debug/input-sample.npy -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human-debug/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "resnet-human", 3 | "implementation": "models.ResnetHuman", 4 | "parameters": { 5 | "uri": "./fakeuri" 6 | } 7 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human-debug/requirements.txt: -------------------------------------------------------------------------------- 1 | torch==1.11.0 2 | torchvision==0.12.0 -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human-debug/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human-debug/shape-fixer.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | dir = os.path.dirname(__file__) 5 | file_path = os.path.join(dir, "input-sample-multiple.txt") 6 | 7 | saved_file_path = os.path.join(dir, "input-sample.txt") 8 | 9 | with open(file_path, "r") as json_file: 10 | output = json.load(json_file) 11 | 12 | output["output"]["person"] = [output["output"]["person"][0]] 13 | 14 | with open(saved_file_path, "w") as json_write: 15 | json.dump(output, json_write) 16 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=video-final:resnet-human 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human/convertor.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | from PIL import Image 4 | import pathlib 5 | 6 | path = pathlib.Path(__file__).parent.resolve() 7 | iamge_name = "input-sample.npy" 8 | file_path = os.path.join(path, iamge_name) 9 | 10 | array = np.load(file_path) 11 | im = Image.fromarray(array) 12 | im.save("input-sample.JPEG") 13 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human/input-sample.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human/input-sample.JPEG -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human/input-sample.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human/input-sample.npy -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "resnet-human", 3 | "implementation": "models.ResnetHuman", 4 | "parameters": { 5 | "uri": "./fakeuri" 6 | } 7 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human/requirements.txt: -------------------------------------------------------------------------------- 1 | torch==1.11.0 2 | torchvision==0.12.0 -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/resnet-human/shape-fixer.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | dir = os.path.dirname(__file__) 5 | file_path = os.path.join(dir, "input-sample-multiple.txt") 6 | 7 | saved_file_path = os.path.join(dir, "input-sample.txt") 8 | 9 | with open(file_path, "r") as json_file: 10 | output = json.load(json_file) 11 | 12 | output["output"]["person"] = [output["output"]["person"][0]] 13 | 14 | with open(saved_file_path, "w") as json_write: 15 | json.dump(output, json_write) 16 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/yolo-debug/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/yolo-debug/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=video-final:yolo-debug 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/yolo-debug/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [253, 400, 3] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/yolo-debug/input-sample.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-final/video/seldon-core-version/nodes/yolo-debug/input-sample.JPEG -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/yolo-debug/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "yolo", 3 | "implementation": "yolo-model.Yolo", 4 | "parameters": { 5 | "uri": "./fakeuri" 6 | } 7 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/yolo-debug/requirements.txt: -------------------------------------------------------------------------------- 1 | opencv-python==4.5.5.64 2 | matplotlib>=3.2.2 3 | PyYAML>=5.3.1 4 | scipy>=1.4.1 5 | gitpython>=3.1.30 6 | seaborn>=0.11.0 7 | psutil 8 | torch==1.11.0 9 | torchvision==0.12.0 -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/yolo-debug/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/yolo/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/yolo/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=video-final:yolo 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i '/^USER 1000/i \ 6 | RUN microdnf update -y && microdnf install -y git' Dockerfile 7 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 8 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 9 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 10 | for REPO in ${REPOS[@]} 11 | do 12 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 13 | docker push $REPO/$IMAGE_NAME 14 | done -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/yolo/input-sample-shape.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_shape": [253, 400, 3] 3 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/yolo/input-sample.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/pipelines/mlserver-final/video/seldon-core-version/nodes/yolo/input-sample.JPEG -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/yolo/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "yolo", 3 | "implementation": "yolo-model.Yolo", 4 | "parameters": { 5 | "uri": "./fakeuri" 6 | } 7 | } -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/yolo/model_saver.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import os 3 | 4 | 5 | par_dir = "./yolov5_torchhub" 6 | model_name = "yolov5m" 7 | torch.hub.set_dir(par_dir) 8 | model = torch.hub.load("ultralytics/yolov5", model_name) 9 | loc = f"/mnt/myshareddir/torchhub/{model_name}" 10 | 11 | dirs = os.listdir(par_dir) 12 | for d in dirs: 13 | if os.path.isdir(f"{par_dir}/{d}"): 14 | os.system(f"sudo mkdir {loc}") 15 | os.system(f"sudo mv {par_dir}/{d}/* {loc}") 16 | os.system(f"rm -rf {par_dir}") 17 | 18 | os.system(f"sudo mv {model_name}.pt {loc}") 19 | -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/yolo/requirements.txt: -------------------------------------------------------------------------------- 1 | opencv-python==4.8.0.74 2 | matplotlib>=3.2.2 3 | PyYAML>=5.3.1 4 | scipy>=1.4.1 5 | gitpython>=3.1.30 6 | seaborn>=0.11.0 7 | psutil 8 | torch==1.13.1 9 | torchvision==0.14.1 10 | ultralytics==8.0.147 -------------------------------------------------------------------------------- /pipelines/mlserver-final/video/seldon-core-version/nodes/yolo/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | } -------------------------------------------------------------------------------- /pipelines/queue/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/queue/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=queue:queue 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-2-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/queue/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "queue", 3 | "implementation": "models.Queue", 4 | "parameters": { 5 | "uri": "./fakeuri" 6 | } 7 | } -------------------------------------------------------------------------------- /pipelines/queue/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "audio", 9 | "implementation": "models.GeneralAudio", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "audio", 21 | "implementation": "models.GeneralAudio", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/queue/requirements.txt: -------------------------------------------------------------------------------- 1 | charset-normalizer==2.0 -------------------------------------------------------------------------------- /pipelines/queue/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | 4 | } -------------------------------------------------------------------------------- /pipelines/router/Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | *.pyc 9 | *.pyo 10 | *.pyd 11 | bin 12 | 13 | # Mac file system 14 | **/.DS_Store 15 | 16 | # Python dev 17 | __pycache__ 18 | .Python 19 | env 20 | pip-log.txt 21 | pip-delete-this-directory.txt 22 | .mypy_cache 23 | eggs/ 24 | .eggs/ 25 | *.egg-info/ 26 | ./pytest_cache 27 | .tox 28 | build/ 29 | dist/ 30 | 31 | # Notebook Checkpoints 32 | .ipynb_checkpoints 33 | 34 | .coverage 35 | .coverage.* 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | *,cover 40 | *.log 41 | .git 42 | -------------------------------------------------------------------------------- /pipelines/router/build.sh: -------------------------------------------------------------------------------- 1 | REPOS=( 2 | sdghafouri) 3 | IMAGE_NAME=router:router 4 | mlserver dockerfile --include-dockerignore . 5 | sed -i 's/seldonio/sdghafouri/g' Dockerfile 6 | sed -i 's/1.3.0.dev4-slim/custom-1-slim/g' Dockerfile 7 | DOCKER_BUILDKIT=1 docker build . --tag=$IMAGE_NAME 8 | for REPO in ${REPOS[@]} 9 | do 10 | docker tag $IMAGE_NAME $REPO/$IMAGE_NAME 11 | docker push $REPO/$IMAGE_NAME 12 | done -------------------------------------------------------------------------------- /pipelines/router/model-settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "router", 3 | "max_batch_size": 1, 4 | "implementation": "models.Router", 5 | "parameters": { 6 | "uri": "./fakeuri" 7 | } 8 | } -------------------------------------------------------------------------------- /pipelines/router/readme.md: -------------------------------------------------------------------------------- 1 | if you are using the mlser server throught command line like: 2 | ``` 3 | mlserver start . 4 | ``` 5 | then use the following `model-settings.json` (include batching variable): 6 | ```json 7 | { 8 | "name": "audio", 9 | "implementation": "models.GeneralAudio", 10 | "max_batch_size": 5, 11 | "max_batch_time": 1, 12 | "parameters": { 13 | "uri": "./fakeuri" 14 | } 15 | } 16 | ``` 17 | if you are are compling the mlserver then use the follwoing (remove batching variables): 18 | ```json 19 | { 20 | "name": "audio", 21 | "implementation": "models.GeneralAudio", 22 | "parameters": { 23 | "uri": "./fakeuri" 24 | } 25 | } 26 | ``` -------------------------------------------------------------------------------- /pipelines/router/requirements.txt: -------------------------------------------------------------------------------- 1 | charset-normalizer==2.0 -------------------------------------------------------------------------------- /pipelines/router/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "debug": "false" 3 | 4 | } -------------------------------------------------------------------------------- /prediction-modules/lstm-module/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/prediction-modules/lstm-module/__init__.py -------------------------------------------------------------------------------- /prediction-modules/lstm-module/workload.tbz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/prediction-modules/lstm-module/workload.tbz2 -------------------------------------------------------------------------------- /twitter-trace-preprocess/builder_req.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | 4 | def build_workload(): 5 | with open("data.json") as json_file: 6 | data = json.load(json_file) 7 | 8 | times = [0 for i in range(3600 * 24)] 9 | 10 | for key in data.keys(): 11 | time = key.split()[1] 12 | hour, minute, second = map(int, time.split(":")) 13 | times[hour * 3600 + minute * 60 + second] = data[key] 14 | 15 | file = open("workload.txt", "w") 16 | 17 | # Saving the array in a text file 18 | content = str(times) 19 | file.write(content) 20 | file.close() 21 | -------------------------------------------------------------------------------- /twitter-trace-preprocess/twitter-2018-04-25.tar.1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/reconfigurable-ml-pipeline/ipa/e1f08dde84e2bb721b2c78ad7ef651134abf5380/twitter-trace-preprocess/twitter-2018-04-25.tar.1 --------------------------------------------------------------------------------