├── datasources ├── __init__.py ├── requirements.txt ├── README.md └── avro_inference.py ├── examples ├── __init__.py ├── HCOPD_Avro_format │ ├── __init__.py │ ├── requirements.txt │ ├── label_scheme.avsc │ ├── data_scheme.avsc │ ├── HCOPD_data_stream_producer.py │ └── HCOPD_inference.py ├── MNIST_RAW_format │ ├── requirements.txt │ ├── mnist_dataset_inference_example_v2.py │ ├── mnist_dataset_training_example.py │ ├── mnist_dataset_inference_example.py │ ├── mnist_dataset_unsupervised_training_example.py │ ├── visualization.json │ └── mnist_dataset_online_training_example.py ├── FEDERATED_MNIST_RAW_format │ ├── requirements.txt │ ├── mnist_sample_input_format.json │ ├── mnist_dataset_federated_training_example.py │ └── mnist_dataset_unsupervised_federated_training_example.py ├── MLGPARK_STREAM_RAW_format │ ├── requirements.txt │ ├── MLGPARK_dataset_training_example.py │ ├── README.md │ └── MLGPARK_dataset_inference_example.py ├── VGG16_CIFAR10_RAW_format │ ├── requirements.txt │ ├── vgg16_training_example.py │ └── vgg16_inference_example.py ├── EUROSAT_RAW_format │ ├── requirements.txt │ ├── eurosat_dataset_training_example.py │ ├── eurosat_dataset_inference_example.py │ └── README.md └── SO2SAT_RAW_format │ ├── requirements.txt │ ├── so2sat_dataset_training_example.py │ └── so2sat_dataset_inference_example.py ├── backend ├── automl │ ├── __init__.py │ ├── tests.py │ ├── apps.py │ ├── routing.py │ ├── admin.py │ └── urls.py ├── autoweb │ ├── __init__.py │ ├── contracts │ │ └── token │ │ │ └── ERC20 │ │ │ ├── IERC20Metadata.sol │ │ │ └── Context.sol │ ├── urls.py │ ├── wsgi.py │ └── asgi.py ├── models │ ├── __init__.py │ ├── pre │ │ └── __init__.py │ └── trained │ │ └── __init__.py ├── nginx │ ├── Dockerfile │ └── nginx.conf ├── start.sh ├── requirements.txt ├── Dockerfile └── manage.py ├── frontend ├── src │ ├── assets │ │ ├── .gitkeep │ │ ├── env.js │ │ └── env.template.js │ ├── app │ │ ├── inference-view │ │ │ ├── inference-view.component.css │ │ │ └── inference-view.component.spec.ts │ │ ├── datasource-dialog │ │ │ ├── datasource-dialog.component.css │ │ │ ├── datasource-dialog.component.ts │ │ │ ├── datasource-dialog.component.html │ │ │ └── datasource-dialog.component.spec.ts │ │ ├── plot-view │ │ │ ├── plot-view.component.css │ │ │ ├── plot-view.component.spec.ts │ │ │ └── plot-view.component.html │ │ ├── confirm-dialog │ │ │ ├── confirm-dialog.component.css │ │ │ ├── confirm-dialog.component.html │ │ │ ├── confirm-dialog.component.ts │ │ │ └── confirm-dialog.component.spec.ts │ │ ├── configuration-view │ │ │ ├── configuration-view.component.css │ │ │ ├── configuration-view.component.spec.ts │ │ │ └── configuration-view.component.html │ │ ├── shared │ │ │ ├── max-char.pipe.spec.ts │ │ │ ├── configuration.model.ts │ │ │ ├── max-char.pipe.ts │ │ │ ├── ml.model.ts │ │ │ ├── inference.model.ts │ │ │ ├── filter.ts │ │ │ └── deployment.model.ts │ │ ├── datasource-list │ │ │ ├── datasource-list.component.css │ │ │ └── datasource-list.component.spec.ts │ │ ├── layout │ │ │ ├── sidenav-list │ │ │ │ ├── sidenav-list.component.css │ │ │ │ ├── sidenav-list.component.ts │ │ │ │ ├── sidenav-list.component.spec.ts │ │ │ │ └── sidenav-list.component.html │ │ │ └── header │ │ │ │ ├── header.component.ts │ │ │ │ ├── header.component.css │ │ │ │ ├── header.component.spec.ts │ │ │ │ └── header.component.html │ │ ├── app.component.ts │ │ ├── model-view │ │ │ ├── model-view.component.css │ │ │ └── model-view.component.spec.ts │ │ ├── deployment-view │ │ │ ├── deployment-view.component.css │ │ │ └── deployment-view.component.spec.ts │ │ ├── model-list │ │ │ ├── model-list.component.css │ │ │ └── model-list.component.spec.ts │ │ ├── inference-list │ │ │ ├── inference-list.component.css │ │ │ └── inference-list.component.spec.ts │ │ ├── app.component.css │ │ ├── services │ │ │ ├── model.service.spec.ts │ │ │ ├── deployment.service.spec.ts │ │ │ ├── configuration.service.spec.ts │ │ │ ├── visualization-ws.service.spec.ts │ │ │ ├── datasource.service.ts │ │ │ ├── inference.service.ts │ │ │ ├── deployment.service.ts │ │ │ ├── model.service.ts │ │ │ ├── visualization-ws.service.ts │ │ │ ├── configuration.service.ts │ │ │ └── result.service.ts │ │ ├── app.component.html │ │ ├── result-list │ │ │ ├── result-list.component.css │ │ │ └── result-list.component.spec.ts │ │ ├── visualization │ │ │ ├── visualization.component.spec.ts │ │ │ └── visualization.component.css │ │ ├── deployment-list │ │ │ ├── deployment-list.component.spec.ts │ │ │ └── deployment-list.component.css │ │ ├── app.component.spec.ts │ │ ├── configuration-list │ │ │ ├── configuration-list.component.spec.ts │ │ │ ├── configuration-list.component.css │ │ │ └── configuration-list.component.ts │ │ └── app-routing.module.ts │ ├── favicon.ico │ ├── environments │ │ ├── environment.prod.ts │ │ └── environment.ts │ ├── main.ts │ ├── test.ts │ ├── index.html │ └── styles.css ├── .dockerignore ├── e2e │ ├── tsconfig.json │ ├── src │ │ ├── app.po.ts │ │ └── app.e2e-spec.ts │ └── protractor.conf.js ├── tsconfig.app.json ├── .editorconfig ├── start.sh ├── tsconfig.spec.json ├── Dockerfile ├── browserslist ├── tsconfig.json ├── nginx-custom.conf ├── karma.conf.js ├── package.json └── tslint.json ├── kustomize ├── local │ ├── .gitignore │ ├── resources │ │ ├── namespace.yaml │ │ ├── kafka-service.yaml │ │ └── kafka-deployment.yaml │ └── kustomization.yaml ├── base │ ├── resources │ │ ├── frontend-service.yaml │ │ ├── backend-service.yaml │ │ ├── tf-executor-service.yaml │ │ ├── pth-executor-service.yaml │ │ ├── tf-executor-deployment.yaml │ │ ├── pth-executor-deployment.yaml │ │ ├── role.yaml │ │ ├── frontend-deployment.yaml │ │ ├── kafka-control-logger-deployment.yaml │ │ └── kafkaml-configmap.yaml │ └── kustomization.yaml ├── v1.0-gpu │ └── kustomization.yaml ├── v1.1-gpu │ └── kustomization.yaml ├── v1.3-gpu │ └── kustomization.yaml ├── master-gpu │ └── kustomization.yaml ├── v1.0 │ └── kustomization.yaml ├── v1.1 │ └── kustomization.yaml ├── v1.3 │ └── kustomization.yaml └── master │ └── kustomization.yaml ├── federated-module ├── federated_backend │ ├── automl │ │ ├── __init__.py │ │ ├── apps.py │ │ ├── admin.py │ │ ├── urls.py │ │ ├── serializers.py │ │ └── models.py │ ├── models │ │ ├── __init__.py │ │ ├── pre │ │ │ └── __init__.py │ │ └── trained │ │ │ └── __init__.py │ ├── autoweb │ │ ├── __init__.py │ │ ├── asgi.py │ │ ├── wsgi.py │ │ └── urls.py │ ├── requirements.txt │ ├── start.sh │ ├── Dockerfile │ └── manage.py ├── federated_model_training │ ├── pytorch │ │ └── .gitkeep │ └── tensorflow │ │ ├── start.sh │ │ ├── config.py │ │ ├── requirements.txt │ │ ├── Dockerfile │ │ ├── federated_training.py │ │ ├── federated_singleClassicTraining.py │ │ ├── federated_distributedClassicTraining.py │ │ ├── federated_singleIncrementalTraining.py │ │ └── federated_distributedIncrementalTraining.py ├── federated_data_control_logger │ ├── requirements.txt │ ├── federated_data_control_logger.sh │ ├── Dockerfile │ └── README.md ├── federated_model_control_logger │ ├── requirements.txt │ ├── federated_model_control_logger.sh │ ├── Dockerfile │ └── README.md └── kustomize │ ├── base │ ├── resources │ │ ├── federated-backend-service.yaml │ │ ├── federated-kafkaml-configmap.yaml │ │ ├── role.yaml │ │ ├── federated-data-control-logger.yaml │ │ └── federated-model-control-logger.yaml │ └── kustomization.yaml │ ├── v1.1-gpu │ └── kustomization.yaml │ ├── master-gpu │ └── kustomization.yaml │ ├── v1.1 │ └── kustomization.yaml │ └── master │ └── kustomization.yaml ├── kafka_control_logger ├── requirements.txt ├── start.sh ├── Dockerfile └── README.md ├── model_training ├── pytorch │ ├── start.sh │ ├── config.py │ ├── requirements.txt │ ├── Dockerfile │ ├── utils.py │ └── README.md └── tensorflow │ ├── start.sh │ ├── config.py │ ├── requirements.txt │ ├── Dockerfile │ └── README.md ├── model_inference ├── pytorch │ ├── config.py │ ├── start.sh │ ├── requirements.txt │ ├── Dockerfile │ └── README.md └── tensorflow │ ├── start.sh │ ├── config.py │ ├── requirements.txt │ ├── Dockerfile │ └── README.md ├── images ├── pipeline_.png ├── regression.png ├── create-model.png ├── classification.png ├── training-metrics.png ├── training-results.png ├── deploy-configuration.png ├── create-distributed-model.png ├── distributed-deploy-inference.png ├── distributed-training-metrics.png ├── distributed-training-results.png ├── deploy-federated-configuration.png ├── configure-distributed-deployment.png ├── create-distributed-configuration.png ├── deploy-distributed-configuration.png ├── deploy-unsupervised-configuration.png ├── deploy-incremental-configuration-1.png └── deploy-incremental-configuration-2.png ├── mlcode_executor ├── pthexecutor │ ├── start.sh │ ├── requirements.txt │ ├── Dockerfile │ └── README.md └── tfexecutor │ ├── start.sh │ ├── requirements.txt │ ├── Dockerfile │ └── README.md ├── .github └── workflows │ ├── backend.yml │ ├── frontend.yml │ ├── kafka_control_logger.yml │ ├── federated_backend.yml │ ├── federated_data_control_logger.yml │ ├── federated_model_control_logger.yml │ ├── tfexecutor.yml │ ├── pthexecutor.yml │ ├── tensorflow_model_training.yml │ ├── tensorflow_model_inference.yml │ ├── pytorch_model_training.yml │ ├── pytorch_model_inference.yml │ ├── federated_tensorflow_model_training.yml │ └── build.yml └── LICENSE /datasources/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /backend/automl/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /backend/autoweb/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /backend/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /backend/models/pre/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /backend/models/trained/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/HCOPD_Avro_format/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /kustomize/local/.gitignore: -------------------------------------------------------------------------------- 1 | charts/ 2 | -------------------------------------------------------------------------------- /federated-module/federated_backend/automl/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /federated-module/federated_backend/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /federated-module/federated_backend/autoweb/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /federated-module/federated_backend/models/pre/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /federated-module/federated_model_training/pytorch/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /kafka_control_logger/requirements.txt: -------------------------------------------------------------------------------- 1 | kafka-python==2.0.2 -------------------------------------------------------------------------------- /federated-module/federated_backend/models/trained/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/app/inference-view/inference-view.component.css: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/app/datasource-dialog/datasource-dialog.component.css: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /datasources/requirements.txt: -------------------------------------------------------------------------------- 1 | kafka-python==2.0.2 2 | avro-python3==1.9.2.1 -------------------------------------------------------------------------------- /kafka_control_logger/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | python logger.py 3 | -------------------------------------------------------------------------------- /model_training/pytorch/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | python training.py 3 | -------------------------------------------------------------------------------- /examples/MNIST_RAW_format/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow==2.7.0 2 | kafka-python==2.0.2 -------------------------------------------------------------------------------- /federated-module/federated_data_control_logger/requirements.txt: -------------------------------------------------------------------------------- 1 | kafka-python==2.0.2 -------------------------------------------------------------------------------- /model_inference/pytorch/config.py: -------------------------------------------------------------------------------- 1 | # Global configuration file 2 | 3 | DEBUG = False -------------------------------------------------------------------------------- /model_inference/pytorch/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | python inference.py 3 | -------------------------------------------------------------------------------- /model_inference/tensorflow/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | python inference.py 3 | -------------------------------------------------------------------------------- /model_training/tensorflow/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | python training.py 3 | -------------------------------------------------------------------------------- /model_inference/tensorflow/config.py: -------------------------------------------------------------------------------- 1 | # Global configuration file 2 | 3 | DEBUG = False -------------------------------------------------------------------------------- /examples/FEDERATED_MNIST_RAW_format/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow==2.7.0 2 | kafka-python==2.0.2 -------------------------------------------------------------------------------- /examples/MLGPARK_STREAM_RAW_format/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow==2.7.0 2 | kafka-python==2.0.2 -------------------------------------------------------------------------------- /examples/VGG16_CIFAR10_RAW_format/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow==2.7.0 2 | kafka-python==2.0.2 -------------------------------------------------------------------------------- /frontend/.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | e2e/ 3 | dist/ 4 | Dockerfile 5 | .dockerignore 6 | -------------------------------------------------------------------------------- /model_inference/tensorflow/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow-io==0.23.1 2 | confluent-kafka==1.8.2 -------------------------------------------------------------------------------- /backend/automl/tests.py: -------------------------------------------------------------------------------- 1 | from django.test import TestCase 2 | 3 | # Create your tests here. 4 | -------------------------------------------------------------------------------- /images/pipeline_.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/images/pipeline_.png -------------------------------------------------------------------------------- /images/regression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/images/regression.png -------------------------------------------------------------------------------- /model_training/pytorch/config.py: -------------------------------------------------------------------------------- 1 | # Auto-model-trainning global configuration file 2 | 3 | DEBUG = False -------------------------------------------------------------------------------- /frontend/src/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/frontend/src/favicon.ico -------------------------------------------------------------------------------- /images/create-model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/images/create-model.png -------------------------------------------------------------------------------- /model_training/tensorflow/config.py: -------------------------------------------------------------------------------- 1 | # Auto-model-trainning global configuration file 2 | 3 | DEBUG = False -------------------------------------------------------------------------------- /examples/HCOPD_Avro_format/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow==2.7.0 2 | kafka-python==2.0.2 3 | avro-python3==1.9.2.1 -------------------------------------------------------------------------------- /federated-module/federated_model_control_logger/requirements.txt: -------------------------------------------------------------------------------- 1 | kafka-python==2.0.2 2 | confluent-kafka==1.5.0 -------------------------------------------------------------------------------- /federated-module/federated_model_training/tensorflow/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | python federated_training.py -------------------------------------------------------------------------------- /images/classification.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/images/classification.png -------------------------------------------------------------------------------- /images/training-metrics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/images/training-metrics.png -------------------------------------------------------------------------------- /images/training-results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/images/training-results.png -------------------------------------------------------------------------------- /mlcode_executor/pthexecutor/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | gunicorn app:app --bind 0.0.0.0:8002 --timeout 0 3 | -------------------------------------------------------------------------------- /mlcode_executor/tfexecutor/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | gunicorn app:app --bind 0.0.0.0:8001 --timeout 0 3 | -------------------------------------------------------------------------------- /examples/EUROSAT_RAW_format/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow==2.7.0 2 | tensorflow-datasets==4.5.2 3 | kafka-python==2.0.2 -------------------------------------------------------------------------------- /examples/SO2SAT_RAW_format/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow==2.7.0 2 | tensorflow-datasets==4.5.2 3 | kafka-python==2.0.2 -------------------------------------------------------------------------------- /images/deploy-configuration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/images/deploy-configuration.png -------------------------------------------------------------------------------- /backend/automl/apps.py: -------------------------------------------------------------------------------- 1 | from django.apps import AppConfig 2 | 3 | 4 | class AutomlConfig(AppConfig): 5 | name = 'automl' 6 | -------------------------------------------------------------------------------- /backend/nginx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:1.17.4-alpine 2 | 3 | RUN rm /etc/nginx/conf.d/default.conf 4 | COPY nginx.conf /etc/nginx/conf.d -------------------------------------------------------------------------------- /federated-module/federated_model_training/tensorflow/config.py: -------------------------------------------------------------------------------- 1 | # Auto-model-trainning global configuration file 2 | 3 | DEBUG = False -------------------------------------------------------------------------------- /images/create-distributed-model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/images/create-distributed-model.png -------------------------------------------------------------------------------- /images/distributed-deploy-inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/images/distributed-deploy-inference.png -------------------------------------------------------------------------------- /images/distributed-training-metrics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/images/distributed-training-metrics.png -------------------------------------------------------------------------------- /images/distributed-training-results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/images/distributed-training-results.png -------------------------------------------------------------------------------- /mlcode_executor/tfexecutor/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==2.0.2 2 | gunicorn==20.0.4 3 | tensorflow-datasets==4.4.0 4 | protobuf==3.20.0 5 | -------------------------------------------------------------------------------- /federated-module/federated_data_control_logger/federated_data_control_logger.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | python federated_data_control_logger.py -------------------------------------------------------------------------------- /images/deploy-federated-configuration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/images/deploy-federated-configuration.png -------------------------------------------------------------------------------- /federated-module/federated_model_control_logger/federated_model_control_logger.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | python federated_model_control_logger.py -------------------------------------------------------------------------------- /images/configure-distributed-deployment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/images/configure-distributed-deployment.png -------------------------------------------------------------------------------- /images/create-distributed-configuration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/images/create-distributed-configuration.png -------------------------------------------------------------------------------- /images/deploy-distributed-configuration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/images/deploy-distributed-configuration.png -------------------------------------------------------------------------------- /images/deploy-unsupervised-configuration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/images/deploy-unsupervised-configuration.png -------------------------------------------------------------------------------- /kustomize/local/resources/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: kafkaml 5 | labels: 6 | name: kafkaml -------------------------------------------------------------------------------- /federated-module/federated_backend/automl/apps.py: -------------------------------------------------------------------------------- 1 | from django.apps import AppConfig 2 | 3 | class AutomlConfig(AppConfig): 4 | name = 'automl' 5 | -------------------------------------------------------------------------------- /images/deploy-incremental-configuration-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/images/deploy-incremental-configuration-1.png -------------------------------------------------------------------------------- /images/deploy-incremental-configuration-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ertis-research/kafka-ml/HEAD/images/deploy-incremental-configuration-2.png -------------------------------------------------------------------------------- /frontend/src/app/plot-view/plot-view.component.css: -------------------------------------------------------------------------------- 1 | .mat-form-field { 2 | width: 50%; 3 | } 4 | 5 | .img_container{ 6 | width: 75%; 7 | } 8 | 9 | .container { 10 | width: 100%; 11 | } -------------------------------------------------------------------------------- /examples/HCOPD_Avro_format/label_scheme.avsc: -------------------------------------------------------------------------------- 1 | {"namespace": "label.ertis.uma.es", 2 | "type": "record", 3 | "name": "HCOPD", 4 | "fields": [ 5 | {"name": "diagnosis", "type": "boolean"} 6 | ] 7 | } -------------------------------------------------------------------------------- /frontend/src/app/confirm-dialog/confirm-dialog.component.css: -------------------------------------------------------------------------------- 1 | mat-dialog-content { 2 | font-family: Roboto, "Helvetica Neue", sans-serif; 3 | } 4 | 5 | mat-dialog-actions { 6 | margin-top: .7rem; 7 | } -------------------------------------------------------------------------------- /model_training/tensorflow/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow-io==0.23.1 2 | kafka-python==2.0.2 3 | scikit-learn==1.0.2 4 | seaborn==0.11.2 5 | confluent-kafka==1.8.2 6 | py-solc-x==2.0.2 7 | web3==5.28.0 8 | protobuf==3.20.3 -------------------------------------------------------------------------------- /backend/automl/routing.py: -------------------------------------------------------------------------------- 1 | # chat/routing.py 2 | from django.urls import re_path 3 | 4 | from . import websockets 5 | 6 | websocket_urlpatterns = [ 7 | re_path(r'ws/$', websockets.KafkaWSConsumer.as_asgi()), 8 | ] 9 | -------------------------------------------------------------------------------- /federated-module/federated_model_training/tensorflow/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow-io==0.23.1 2 | kafka-python==2.0.2 3 | scikit-learn==1.0.2 4 | seaborn==0.11.2 5 | confluent-kafka==1.8.2 6 | web3==5.28.0 7 | protobuf==3.20.3 -------------------------------------------------------------------------------- /federated-module/federated_backend/automl/admin.py: -------------------------------------------------------------------------------- 1 | from django.contrib import admin 2 | 3 | from automl.models import Datasource, ModelSource 4 | 5 | admin.site.register(ModelSource) 6 | admin.site.register(Datasource) 7 | -------------------------------------------------------------------------------- /frontend/src/assets/env.js: -------------------------------------------------------------------------------- 1 | (function(window) { 2 | window["env"] = window["env"] || {}; 3 | 4 | window["env"]["API_SERVER_URL"] = "http://localhost:8000"; 5 | window["env"]["ENABLE_FEDML_BLOCKCHAIN"] = "1"; 6 | })(this); -------------------------------------------------------------------------------- /frontend/src/environments/environment.prod.ts: -------------------------------------------------------------------------------- 1 | export const environment = { 2 | production: true, 3 | baseUrl: window["env"]["API_SERVER_URL"], 4 | enableFederatedBlockchain: window["env"]["ENABLE_FEDML_BLOCKCHAIN"] === '1' 5 | }; 6 | -------------------------------------------------------------------------------- /federated-module/federated_backend/requirements.txt: -------------------------------------------------------------------------------- 1 | django==3.2.13 2 | djangorestframework==3.11.0 3 | django-cors-headers==3.2.1 4 | django-model-utils==4.0.0 5 | kubernetes==11.0.0 6 | gunicorn==20.0.4 7 | confluent-kafka==1.8.2 8 | daphne==3.0.2 9 | -------------------------------------------------------------------------------- /backend/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | python manage.py makemigrations --noinput 3 | python manage.py migrate --run-syncdb 4 | # gunicorn autoweb.wsgi:application --bind 0.0.0.0:8000 --timeout 0 5 | daphne -b 0.0.0.0 -p 8000 -t 0 autoweb.asgi:application -------------------------------------------------------------------------------- /frontend/src/app/configuration-view/configuration-view.component.css: -------------------------------------------------------------------------------- 1 | .create-model-form { 2 | min-width: 150px; 3 | max-width: 500px; 4 | width: 100%; 5 | } 6 | 7 | .mat-card { 8 | min-width: 150px; 9 | max-width: 500px; 10 | } -------------------------------------------------------------------------------- /kustomize/base/resources/frontend-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: frontend 5 | spec: 6 | selector: 7 | name: pod-frontend 8 | type: LoadBalancer 9 | ports: 10 | - port: 80 11 | protocol: TCP 12 | -------------------------------------------------------------------------------- /frontend/src/app/shared/max-char.pipe.spec.ts: -------------------------------------------------------------------------------- 1 | import { MaxCharPipe } from './max-char.pipe'; 2 | 3 | describe('MaxCharPipe', () => { 4 | it('create an instance', () => { 5 | const pipe = new MaxCharPipe(); 6 | expect(pipe).toBeTruthy(); 7 | }); 8 | }); 9 | -------------------------------------------------------------------------------- /frontend/src/app/shared/configuration.model.ts: -------------------------------------------------------------------------------- 1 | export class SimpleModel { 2 | id: number; 3 | name: string; 4 | } 5 | 6 | export class Configuration { 7 | id: number; 8 | name: string; 9 | description: string; 10 | ml_models : SimpleModel[]; 11 | } -------------------------------------------------------------------------------- /federated-module/federated_backend/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | python manage.py makemigrations --noinput 3 | python manage.py migrate --run-syncdb 4 | gunicorn autoweb.wsgi:application --bind 0.0.0.0:8085 --timeout 0 5 | # daphne -b 0.0.0.0 -p 8085 -t 0 autoweb.asgi:application -------------------------------------------------------------------------------- /frontend/src/app/datasource-list/datasource-list.component.css: -------------------------------------------------------------------------------- 1 | 2 | .mat-column-view { 3 | flex: 0 0 70px !important; 4 | white-space: unset !important; 5 | } 6 | 7 | .mat-column-deploy { 8 | flex: 0 0 60px !important; 9 | white-space: unset !important; 10 | } -------------------------------------------------------------------------------- /kustomize/base/resources/backend-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: backend 5 | spec: 6 | selector: 7 | name: pod-backend 8 | type: LoadBalancer 9 | ports: 10 | - protocol: "TCP" 11 | port: 8000 12 | targetPort: 8000 -------------------------------------------------------------------------------- /frontend/src/app/layout/sidenav-list/sidenav-list.component.css: -------------------------------------------------------------------------------- 1 | a { 2 | text-decoration: none; 3 | color: white; 4 | } 5 | 6 | a:hover, a:active{ 7 | color: lightgray; 8 | } 9 | 10 | .nav-caption{ 11 | display: inline-block; 12 | padding-left: 6px; 13 | } -------------------------------------------------------------------------------- /backend/requirements.txt: -------------------------------------------------------------------------------- 1 | django==3.2.13 2 | djangorestframework==3.11.0 3 | django-cors-headers==3.2.1 4 | django-model-utils==4.0.0 5 | kubernetes==11.0.0 6 | gunicorn==20.0.4 7 | confluent-kafka==1.8.2 8 | channels==3.0.4 9 | daphne==3.0.2 10 | py-solc-x==2.0.2 11 | web3==5.28.0 -------------------------------------------------------------------------------- /frontend/src/assets/env.template.js: -------------------------------------------------------------------------------- 1 | (function(window) { 2 | window.env = window.env || {}; 3 | 4 | // Environment variables 5 | window["env"]["API_SERVER_URL"] = "${BACKEND_URL}"; 6 | window["env"]["ENABLE_FEDML_BLOCKCHAIN"] = "${ENABLE_FEDML_BLOCKCHAIN}"; 7 | })(this); -------------------------------------------------------------------------------- /kustomize/base/resources/tf-executor-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: tfexecutor 5 | spec: 6 | selector: 7 | name: pod-tfexecutor 8 | type: LoadBalancer 9 | ports: 10 | - protocol: "TCP" 11 | port: 8001 12 | targetPort: 8001 -------------------------------------------------------------------------------- /examples/HCOPD_Avro_format/data_scheme.avsc: -------------------------------------------------------------------------------- 1 | {"namespace": "data.ertis.uma.es", 2 | "type": "record", 3 | "name": "HCOPD", 4 | "fields": [ 5 | {"name": "gender", "type": "float"}, 6 | {"name": "age", "type": "float"}, 7 | {"name": "smoking", "type": "float"} 8 | ] 9 | } -------------------------------------------------------------------------------- /frontend/src/app/app.component.ts: -------------------------------------------------------------------------------- 1 | import { Component } from '@angular/core'; 2 | 3 | @Component({ 4 | selector: 'app-root', 5 | templateUrl: './app.component.html', 6 | styleUrls: ['./app.component.css'] 7 | }) 8 | export class AppComponent { 9 | title = 'autofront'; 10 | } 11 | -------------------------------------------------------------------------------- /kustomize/base/resources/pth-executor-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: pthexecutor 5 | spec: 6 | selector: 7 | name: pod-pthexecutor 8 | type: LoadBalancer 9 | ports: 10 | - protocol: "TCP" 11 | port: 8002 12 | targetPort: 8002 -------------------------------------------------------------------------------- /frontend/e2e/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../tsconfig.json", 3 | "compilerOptions": { 4 | "outDir": "../out-tsc/e2e", 5 | "module": "commonjs", 6 | "target": "es5", 7 | "types": [ 8 | "jasmine", 9 | "jasminewd2", 10 | "node" 11 | ] 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /frontend/tsconfig.app.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "./tsconfig.json", 3 | "compilerOptions": { 4 | "outDir": "./out-tsc/app", 5 | "types": [] 6 | }, 7 | "files": [ 8 | "src/main.ts", 9 | "src/polyfills.ts" 10 | ], 11 | "include": [ 12 | "src/**/*.d.ts" 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /federated-module/kustomize/base/resources/federated-backend-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: federated-backend 5 | spec: 6 | selector: 7 | name: pod-federated-backend 8 | type: LoadBalancer 9 | ports: 10 | - protocol: "TCP" 11 | port: 8085 12 | targetPort: 8085 -------------------------------------------------------------------------------- /mlcode_executor/pthexecutor/requirements.txt: -------------------------------------------------------------------------------- 1 | -f https://download.pytorch.org/whl/torch_stable.html 2 | 3 | # GPU Accelerated 4 | # torchvision==0.11.1+cu113 5 | 6 | # CPU Only 7 | torch==1.10.0+cpu 8 | torchvision==0.11.1+cpu 9 | 10 | torchinfo==1.6.3 11 | pytorch-ignite==0.4.7 12 | Flask==2.0.2 13 | gunicorn==20.0.4 -------------------------------------------------------------------------------- /frontend/src/app/model-view/model-view.component.css: -------------------------------------------------------------------------------- 1 | .create-model-form { 2 | min-width: 150px; 3 | max-width: 500px; 4 | width: 100%; 5 | } 6 | 7 | .mat-card { 8 | min-width: 150px; 9 | max-width: 500px; 10 | } 11 | 12 | .mat-radio-button ~ .mat-radio-button { 13 | margin-left: 16px; 14 | } -------------------------------------------------------------------------------- /frontend/src/app/deployment-view/deployment-view.component.css: -------------------------------------------------------------------------------- 1 | .create-deployment-form { 2 | min-width: 150px; 3 | max-width: 500px; 4 | width: 100%; 5 | } 6 | 7 | .mat-radio-button ~ .mat-radio-button { 8 | margin-left: 16px; 9 | } 10 | 11 | .mat-checkbox ~ .mat-checkbox { 12 | margin-left: 16px; 13 | } -------------------------------------------------------------------------------- /frontend/src/app/model-list/model-list.component.css: -------------------------------------------------------------------------------- 1 | 2 | .mat-column-view { 3 | flex: 0 0 70px !important; 4 | white-space: unset !important; 5 | } 6 | 7 | .mat-column-delete { 8 | flex: 0 0 70px !important; 9 | white-space: unset !important; 10 | } 11 | 12 | .mat-row { 13 | max-height: 30px; 14 | } -------------------------------------------------------------------------------- /frontend/src/app/shared/max-char.pipe.ts: -------------------------------------------------------------------------------- 1 | import { Pipe, PipeTransform } from '@angular/core'; 2 | 3 | @Pipe({ 4 | name: 'maxChar' 5 | }) 6 | export class MaxCharPipe implements PipeTransform { 7 | 8 | transform(value: string, limit: number): string { 9 | return value.substr(0, limit).trim() + '...'; 10 | } 11 | 12 | } 13 | -------------------------------------------------------------------------------- /frontend/src/app/shared/ml.model.ts: -------------------------------------------------------------------------------- 1 | export class SimpleModel { 2 | id: number; 3 | name: string; 4 | } 5 | 6 | export class MLModel { 7 | id: number; 8 | name: string; 9 | description: string; 10 | imports: string; 11 | code: string; 12 | distributed: boolean; 13 | father: SimpleModel; 14 | framework: string; 15 | } -------------------------------------------------------------------------------- /model_inference/pytorch/requirements.txt: -------------------------------------------------------------------------------- 1 | -f https://download.pytorch.org/whl/torch_stable.html 2 | 3 | # GPU Accelerated 4 | # torchvision==0.11.1+cu113 5 | 6 | # CPU Only 7 | torch==1.10.0+cpu 8 | torchvision==0.11.1+cpu 9 | 10 | torchinfo==1.6.3 11 | pytorch-ignite==0.4.7 12 | avro==1.11.0 13 | confluent-kafka==1.8.2 14 | requests==2.27.1 -------------------------------------------------------------------------------- /backend/nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | upstream django { 2 | server web:8000; 3 | } 4 | 5 | server { 6 | 7 | listen 80; 8 | 9 | location / { 10 | proxy_pass http://autoweb; 11 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 12 | proxy_set_header Host $host; 13 | proxy_redirect off; 14 | } 15 | 16 | } -------------------------------------------------------------------------------- /federated-module/federated_backend/automl/urls.py: -------------------------------------------------------------------------------- 1 | from django.urls import path 2 | from django.views.generic import TemplateView 3 | from automl.views import DatasourceList, ModelFromControlLogger 4 | 5 | 6 | urlpatterns = [ 7 | path('federated-datasources/', DatasourceList.as_view()), 8 | path('model-control-logger/', ModelFromControlLogger.as_view()) 9 | ] -------------------------------------------------------------------------------- /frontend/.editorconfig: -------------------------------------------------------------------------------- 1 | # Editor configuration, see https://editorconfig.org 2 | root = true 3 | 4 | [*] 5 | charset = utf-8 6 | indent_style = space 7 | indent_size = 2 8 | insert_final_newline = true 9 | trim_trailing_whitespace = true 10 | 11 | [*.ts] 12 | quote_type = single 13 | 14 | [*.md] 15 | max_line_length = off 16 | trim_trailing_whitespace = false 17 | -------------------------------------------------------------------------------- /frontend/e2e/src/app.po.ts: -------------------------------------------------------------------------------- 1 | import { browser, by, element } from 'protractor'; 2 | 3 | export class AppPage { 4 | navigateTo(): Promise { 5 | return browser.get(browser.baseUrl) as Promise; 6 | } 7 | 8 | getTitleText(): Promise { 9 | return element(by.css('app-root .content span')).getText() as Promise; 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /frontend/start.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | 3 | export BACKEND_PROXY_URL=${BACKEND_PROXY_URL:-"http://localhost:80"} 4 | export BACKEND_URL=${BACKEND_URL:-"/api"} 5 | 6 | envsubst < /usr/share/nginx/html/assets/env.template.js > /usr/share/nginx/html/assets/env.js 7 | envsubst '$BACKEND_PROXY_URL' < /default.template.conf > /etc/nginx/conf.d/default.conf 8 | exec nginx -g 'daemon off;' 9 | -------------------------------------------------------------------------------- /frontend/tsconfig.spec.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "./tsconfig.json", 3 | "compilerOptions": { 4 | "outDir": "./out-tsc/spec", 5 | "types": [ 6 | "jasmine", 7 | "node" 8 | ] 9 | }, 10 | "files": [ 11 | "src/test.ts", 12 | "src/polyfills.ts" 13 | ], 14 | "include": [ 15 | "src/**/*.spec.ts", 16 | "src/**/*.d.ts" 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /model_training/pytorch/requirements.txt: -------------------------------------------------------------------------------- 1 | -f https://download.pytorch.org/whl/torch_stable.html 2 | 3 | # GPU Accelerated 4 | # torchvision==0.11.1+cu113 5 | 6 | # CPU Only 7 | torch==1.10.0+cpu 8 | torchvision==0.11.1+cpu 9 | 10 | torchinfo==1.6.3 11 | pytorch-ignite==0.4.7 12 | avro==1.11.0 13 | kafka-python==2.0.2 14 | scikit-learn==1.0.2 15 | seaborn==0.11.2 16 | requests==2.27.1 -------------------------------------------------------------------------------- /frontend/src/app/inference-list/inference-list.component.css: -------------------------------------------------------------------------------- 1 | .mat-column-manage { 2 | flex: 0 0 50px !important; 3 | white-space: unset !important; 4 | } 5 | 6 | .mat-column-status { 7 | flex: 0 0 50px !important; 8 | white-space: unset !important; 9 | } 10 | 11 | .stopped{ 12 | color: rgb(233, 48, 48); 13 | } 14 | 15 | .deployed{ 16 | color: green; 17 | } -------------------------------------------------------------------------------- /kustomize/local/resources/kafka-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | name: kafka 6 | name: kafka 7 | spec: 8 | ports: 9 | - name: "9094" 10 | port: 9094 11 | targetPort: 9094 12 | - name: "9092" 13 | port: 9092 14 | targetPort: 9092 15 | selector: 16 | name: kafka 17 | type: LoadBalancer 18 | status: 19 | loadBalancer: {} -------------------------------------------------------------------------------- /frontend/src/app/app.component.css: -------------------------------------------------------------------------------- 1 | @import "~@angular/material/prebuilt-themes/indigo-pink.css"; 2 | 3 | .fill-remaining-space { 4 | flex: 1 1 auto; 5 | } 6 | md-icon{ 7 | font-family: 'Material Icons' !important; 8 | } 9 | 10 | mat-sidenav-container, mat-sidenav-content, mat-sidenav { 11 | height: 100%; 12 | } 13 | 14 | mat-sidenav { 15 | width: 250px; 16 | } 17 | 18 | main { 19 | padding: 10px; 20 | } -------------------------------------------------------------------------------- /backend/automl/admin.py: -------------------------------------------------------------------------------- 1 | from django.contrib import admin 2 | 3 | # Register your models here. 4 | 5 | from automl.models import MLModel, Configuration, Deployment, TrainingResult, Datasource, Inference 6 | 7 | 8 | admin.site.register(MLModel) 9 | admin.site.register(Configuration) 10 | admin.site.register(Deployment) 11 | admin.site.register(TrainingResult) 12 | admin.site.register(Datasource) 13 | admin.site.register(Inference) -------------------------------------------------------------------------------- /frontend/src/main.ts: -------------------------------------------------------------------------------- 1 | import { enableProdMode } from '@angular/core'; 2 | import { platformBrowserDynamic } from '@angular/platform-browser-dynamic'; 3 | 4 | import { AppModule } from './app/app.module'; 5 | import { environment } from './environments/environment'; 6 | 7 | if (environment.production) { 8 | enableProdMode(); 9 | } 10 | 11 | platformBrowserDynamic().bootstrapModule(AppModule) 12 | .catch(err => console.error(err)); 13 | -------------------------------------------------------------------------------- /frontend/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:14 as angular-builder 2 | COPY package.json *lock* /build/ 3 | WORKDIR /build 4 | RUN npm install 5 | COPY browserslist *.json *.js /build 6 | COPY src /build/src 7 | RUN npx ng build --prod 8 | 9 | FROM nginx:1.18-alpine 10 | COPY --from=angular-builder /build/dist/autofront /usr/share/nginx/html 11 | COPY ./nginx-custom.conf /default.template.conf 12 | COPY --chown=777 start.sh /start.sh 13 | CMD ["sh", "/start.sh"] 14 | -------------------------------------------------------------------------------- /frontend/src/app/confirm-dialog/confirm-dialog.component.html: -------------------------------------------------------------------------------- 1 |

Are you sure?

2 | 3 | 4 | You will remove {{ data.title }} 5 | 6 |
7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /kustomize/local/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - resources/namespace.yaml 3 | - resources/kafka-deployment.yaml 4 | - resources/kafka-service.yaml 5 | - "../v1.0" 6 | 7 | namespace: kafkaml 8 | 9 | configMapGenerator: 10 | - name: kafkaml-configmap 11 | behavior: merge 12 | literals: 13 | - brokers=kafka:9092 14 | images: 15 | - name: kafka 16 | newName: docker.io/bitnami/kafka 17 | newTag: "3.4.0-debian-11-r22" 18 | -------------------------------------------------------------------------------- /frontend/src/app/services/model.service.spec.ts: -------------------------------------------------------------------------------- 1 | import { TestBed } from '@angular/core/testing'; 2 | 3 | import { ModelService } from './model.service'; 4 | 5 | describe('ModelService', () => { 6 | let service: ModelService; 7 | 8 | beforeEach(() => { 9 | TestBed.configureTestingModule({}); 10 | service = TestBed.inject(ModelService); 11 | }); 12 | 13 | it('should be created', () => { 14 | expect(service).toBeTruthy(); 15 | }); 16 | }); 17 | -------------------------------------------------------------------------------- /frontend/src/app/app.component.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
9 | 10 |
11 |
12 |
13 | 14 | -------------------------------------------------------------------------------- /frontend/src/app/shared/inference.model.ts: -------------------------------------------------------------------------------- 1 | export class Inference { 2 | replicas: number; 3 | model_result: number; 4 | input_format: string; 5 | input_config: string; 6 | input_topic: string; 7 | output_topic: string; 8 | token: string; 9 | external_host: string; 10 | input_kafka_broker: string; 11 | output_kafka_broker: string; 12 | upper_kafka_broker: string; 13 | limit: number; 14 | output_upper: string; 15 | gpumem: number; 16 | } -------------------------------------------------------------------------------- /kafka_control_logger/Dockerfile: -------------------------------------------------------------------------------- 1 | # pull official base image 2 | FROM python:3.8.6 3 | 4 | # set work directory 5 | WORKDIR /usr/src/app 6 | 7 | # set environment variables 8 | ENV PYTHONDONTWRITEBYTECODE 1 9 | ENV PYTHONUNBUFFERED 1 10 | 11 | # install dependencies 12 | COPY ./requirements.txt /usr/src/app/requirements.txt 13 | RUN pip install --no-cache-dir -r requirements.txt 14 | 15 | # copy project 16 | COPY . /usr/src/app/ 17 | 18 | RUN chmod +x ./start.sh 19 | CMD ["./start.sh"] -------------------------------------------------------------------------------- /.github/workflows/backend.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | paths: 4 | - backend/**/* 5 | branches: 6 | - "master" 7 | - "main" 8 | - "latest" 9 | - "dev*" 10 | release: 11 | types: 12 | - created 13 | jobs: 14 | build-backend: 15 | uses: ./.github/workflows/build.yml 16 | with: 17 | context: backend 18 | dockerfile: backend/Dockerfile 19 | name: kafka-ml-backend 20 | platforms: linux/amd64 21 | secrets: inherit 22 | -------------------------------------------------------------------------------- /frontend/browserslist: -------------------------------------------------------------------------------- 1 | # This file is used by the build system to adjust CSS and JS output to support the specified browsers below. 2 | # For additional information regarding the format and rule options, please see: 3 | # https://github.com/browserslist/browserslist#queries 4 | 5 | # You can see what browsers were selected by your queries by running: 6 | # npx browserslist 7 | 8 | > 0.5% 9 | last 2 versions 10 | Firefox ESR 11 | not dead 12 | not IE 9-11 # For IE 9-11 support, remove 'not'. -------------------------------------------------------------------------------- /.github/workflows/frontend.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | paths: 4 | - frontend/**/* 5 | branches: 6 | - "master" 7 | - "main" 8 | - "latest" 9 | - "dev*" 10 | release: 11 | types: 12 | - created 13 | jobs: 14 | build-frontend: 15 | uses: ./.github/workflows/build.yml 16 | with: 17 | context: frontend 18 | dockerfile: frontend/Dockerfile 19 | name: kafka-ml-frontend 20 | platforms: linux/amd64 21 | secrets: inherit 22 | -------------------------------------------------------------------------------- /frontend/src/app/services/deployment.service.spec.ts: -------------------------------------------------------------------------------- 1 | import { TestBed } from '@angular/core/testing'; 2 | 3 | import { DeploymentService } from './deployment.service'; 4 | 5 | describe('DeploymentService', () => { 6 | let service: DeploymentService; 7 | 8 | beforeEach(() => { 9 | TestBed.configureTestingModule({}); 10 | service = TestBed.inject(DeploymentService); 11 | }); 12 | 13 | it('should be created', () => { 14 | expect(service).toBeTruthy(); 15 | }); 16 | }); 17 | -------------------------------------------------------------------------------- /federated-module/federated_backend/autoweb/asgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | ASGI config for autoweb project. 3 | 4 | It exposes the ASGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.asgi import get_asgi_application 13 | 14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'autoweb.settings') 15 | 16 | application = get_asgi_application() -------------------------------------------------------------------------------- /federated-module/federated_backend/autoweb/wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | WSGI config for autoweb project. 3 | 4 | It exposes the WSGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.wsgi import get_wsgi_application 13 | 14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'autoweb.settings') 15 | 16 | application = get_wsgi_application() 17 | -------------------------------------------------------------------------------- /federated-module/kustomize/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | buildMetadata: [managedByLabel, originAnnotations, transformerAnnotations] 5 | 6 | resources: 7 | - resources/role.yaml 8 | - resources/federated-kafkaml-configmap.yaml 9 | - resources/federated-backend-deployment.yaml 10 | - resources/federated-backend-service.yaml 11 | - resources/federated-data-control-logger.yaml 12 | - resources/federated-model-control-logger.yaml -------------------------------------------------------------------------------- /frontend/src/app/services/configuration.service.spec.ts: -------------------------------------------------------------------------------- 1 | import { TestBed } from '@angular/core/testing'; 2 | 3 | import { ConfigurationService } from './configuration.service'; 4 | 5 | describe('ConfigurationService', () => { 6 | let service: ConfigurationService; 7 | 8 | beforeEach(() => { 9 | TestBed.configureTestingModule({}); 10 | service = TestBed.inject(ConfigurationService); 11 | }); 12 | 13 | it('should be created', () => { 14 | expect(service).toBeTruthy(); 15 | }); 16 | }); 17 | -------------------------------------------------------------------------------- /examples/FEDERATED_MNIST_RAW_format/mnist_sample_input_format.json: -------------------------------------------------------------------------------- 1 | { 2 | "features": { 3 | "label": { 4 | "num_classes": 10, 5 | "names": [ 6 | "0", 7 | "1", 8 | "2", 9 | "3", 10 | "4", 11 | "5", 12 | "6", 13 | "7", 14 | "8", 15 | "9" 16 | ] 17 | } 18 | }, 19 | "supervised_keys": { 20 | "input": "image", 21 | "output": "label" 22 | } 23 | } -------------------------------------------------------------------------------- /frontend/src/app/services/visualization-ws.service.spec.ts: -------------------------------------------------------------------------------- 1 | import { TestBed } from '@angular/core/testing'; 2 | 3 | import { VisualizationWsService } from './visualization-ws.service'; 4 | 5 | describe('VisualizationWsService', () => { 6 | let service: VisualizationWsService; 7 | 8 | beforeEach(() => { 9 | TestBed.configureTestingModule({}); 10 | service = TestBed.inject(VisualizationWsService); 11 | }); 12 | 13 | it('should be created', () => { 14 | expect(service).toBeTruthy(); 15 | }); 16 | }); 17 | -------------------------------------------------------------------------------- /federated-module/kustomize/v1.1-gpu/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - "../v1.1" 3 | 4 | configMapGenerator: 5 | - name: federated-kafkaml-configmap 6 | behavior: merge 7 | literals: 8 | - federated.tensorflow.training.image=ertis/federated-kafka-ml-tensorflow_model_training-gpu:v1.1 9 | - federated.pytorch.training.image=ertis/federated-kafka-ml-pytorch_model_training-gpu:v1.1 # Non existing 10 | # - kml.cloud.brokers=kafka-cloud.broker:9092 11 | # - federated.data.brokers=kafka-federated.broker:9092 12 | -------------------------------------------------------------------------------- /federated-module/federated_model_training/tensorflow/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG TFTAG=2.7.0 2 | FROM tensorflow/tensorflow:${TFTAG} 3 | 4 | # set work directory 5 | WORKDIR /usr/src/app 6 | 7 | # set environment variables 8 | ENV PYTHONDONTWRITEBYTECODE 1 9 | ENV PYTHONUNBUFFERED 1 10 | 11 | # install dependencies 12 | RUN pip install --upgrade pip 13 | COPY ./requirements.txt /usr/src/app/requirements.txt 14 | RUN pip install -r requirements.txt 15 | 16 | # copy project 17 | COPY . /usr/src/app/ 18 | 19 | RUN chmod +x ./start.sh 20 | CMD ["./start.sh"] -------------------------------------------------------------------------------- /frontend/src/app/confirm-dialog/confirm-dialog.component.ts: -------------------------------------------------------------------------------- 1 | import { Component, Inject } from '@angular/core'; 2 | import { MatDialogRef, MAT_DIALOG_DATA } from '@angular/material/dialog'; 3 | 4 | @Component({ 5 | selector: 'app-confirm-dialog', 6 | templateUrl: './confirm-dialog.component.html', 7 | styleUrls: ['./confirm-dialog.component.css'] 8 | }) 9 | export class ConfirmDialogComponent { 10 | 11 | constructor( public dialogRef: MatDialogRef, 12 | @Inject(MAT_DIALOG_DATA) public data: any) {} 13 | 14 | } 15 | -------------------------------------------------------------------------------- /frontend/src/app/shared/filter.ts: -------------------------------------------------------------------------------- 1 | import {Pipe, PipeTransform} from '@angular/core'; 2 | 3 | @Pipe({ 4 | name: 'datafilter' 5 | }) 6 | export class DataFilterPipe implements PipeTransform { 7 | transform(items: any[], searchText: string): any[] { 8 | if(!items) return []; 9 | if(!searchText) return items; 10 | searchText = searchText.toLowerCase(); 11 | 12 | return items.filter(item => { 13 | return Object.keys(item).find(key => String(item[key]).toLowerCase().includes(searchText)); 14 | }); 15 | } 16 | } -------------------------------------------------------------------------------- /.github/workflows/kafka_control_logger.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | paths: 4 | - kafka_control_logger/**/* 5 | branches: 6 | - "master" 7 | - "main" 8 | - "latest" 9 | - "dev*" 10 | release: 11 | types: 12 | - created 13 | jobs: 14 | build-control-logger: 15 | uses: ./.github/workflows/build.yml 16 | with: 17 | context: kafka_control_logger 18 | dockerfile: kafka_control_logger/Dockerfile 19 | name: kafka-ml-kafka_control_logger 20 | platforms: linux/amd64 21 | secrets: inherit 22 | -------------------------------------------------------------------------------- /federated-module/federated_data_control_logger/Dockerfile: -------------------------------------------------------------------------------- 1 | # pull official base image 2 | FROM python:3.8.6 3 | 4 | # set work directory 5 | WORKDIR /usr/src/app 6 | 7 | # set environment variables 8 | ENV PYTHONDONTWRITEBYTECODE 1 9 | ENV PYTHONUNBUFFERED 1 10 | 11 | # install dependencies 12 | COPY ./requirements.txt /usr/src/app/requirements.txt 13 | RUN pip install --no-cache-dir -r requirements.txt 14 | 15 | # copy project 16 | COPY . /usr/src/app/ 17 | 18 | RUN chmod +x ./federated_data_control_logger.sh 19 | CMD ["./federated_data_control_logger.sh"] 20 | -------------------------------------------------------------------------------- /frontend/src/app/layout/header/header.component.ts: -------------------------------------------------------------------------------- 1 | import { Component, OnInit, Output, EventEmitter } from '@angular/core'; 2 | 3 | @Component({ 4 | selector: 'layout-header', 5 | templateUrl: './header.component.html', 6 | styleUrls: ['./header.component.css'] 7 | }) 8 | export class HeaderComponent implements OnInit { 9 | 10 | @Output() public sidenavToggle = new EventEmitter(); 11 | 12 | constructor() { } 13 | 14 | ngOnInit() { 15 | } 16 | 17 | public onToggleSidenav = () => { 18 | this.sidenavToggle.emit(); 19 | } 20 | 21 | 22 | } 23 | -------------------------------------------------------------------------------- /backend/Dockerfile: -------------------------------------------------------------------------------- 1 | # pull official base image 2 | FROM python:3.8.6 3 | # FROM python:3.7.7-slim-stretch # for Raspberry 4 | 5 | # set work directory 6 | WORKDIR /usr/src/app 7 | 8 | # set environment variables 9 | ENV PYTHONDONTWRITEBYTECODE 1 10 | ENV PYTHONUNBUFFERED 1 11 | 12 | # install dependencies 13 | RUN pip install --upgrade pip 14 | COPY ./requirements.txt /usr/src/app/requirements.txt 15 | RUN pip install -r requirements.txt 16 | 17 | # copy project 18 | COPY . /usr/src/app/ 19 | 20 | EXPOSE 8000 21 | 22 | RUN chmod +x ./start.sh 23 | CMD ["./start.sh"] -------------------------------------------------------------------------------- /federated-module/federated_model_control_logger/Dockerfile: -------------------------------------------------------------------------------- 1 | # pull official base image 2 | FROM python:3.8.6 3 | 4 | # set work directory 5 | WORKDIR /usr/src/app 6 | 7 | # set environment variables 8 | ENV PYTHONDONTWRITEBYTECODE 1 9 | ENV PYTHONUNBUFFERED 1 10 | 11 | # install dependencies 12 | COPY ./requirements.txt /usr/src/app/requirements.txt 13 | RUN pip install --no-cache-dir -r requirements.txt 14 | 15 | # copy project 16 | COPY . /usr/src/app/ 17 | 18 | RUN chmod +x ./federated_model_control_logger.sh 19 | CMD ["./federated_model_control_logger.sh"] 20 | -------------------------------------------------------------------------------- /frontend/src/app/layout/sidenav-list/sidenav-list.component.ts: -------------------------------------------------------------------------------- 1 | import { Component, OnInit, Output, EventEmitter } from '@angular/core'; 2 | 3 | @Component({ 4 | selector: 'app-sidenav-list', 5 | templateUrl: './sidenav-list.component.html', 6 | styleUrls: ['./sidenav-list.component.css'] 7 | }) 8 | export class SidenavListComponent implements OnInit { 9 | @Output() sidenavClose = new EventEmitter(); 10 | 11 | constructor() { } 12 | 13 | ngOnInit() { 14 | } 15 | 16 | public onSidenavClose = () => { 17 | this.sidenavClose.emit(); 18 | } 19 | 20 | } -------------------------------------------------------------------------------- /model_inference/tensorflow/Dockerfile: -------------------------------------------------------------------------------- 1 | # --build-arg TFTAG=2.7.0-gpu for GPU version 2 | ARG TFTAG=2.7.0 3 | FROM tensorflow/tensorflow:${TFTAG} 4 | 5 | # set work directory 6 | WORKDIR /usr/src/app 7 | 8 | # set environment variables 9 | ENV PYTHONDONTWRITEBYTECODE 1 10 | ENV PYTHONUNBUFFERED 1 11 | 12 | # install dependencies 13 | RUN pip install --upgrade pip 14 | COPY ./requirements.txt /usr/src/app/requirements.txt 15 | RUN pip install -r requirements.txt 16 | 17 | # copy project 18 | COPY . /usr/src/app/ 19 | 20 | RUN chmod +x ./start.sh 21 | CMD ["./start.sh"] -------------------------------------------------------------------------------- /model_training/tensorflow/Dockerfile: -------------------------------------------------------------------------------- 1 | # --build-arg TFTAG=2.7.0-gpu for GPU version 2 | ARG TFTAG=2.7.0 3 | FROM tensorflow/tensorflow:${TFTAG} 4 | 5 | # set work directory 6 | WORKDIR /usr/src/app 7 | 8 | # set environment variables 9 | ENV PYTHONDONTWRITEBYTECODE 1 10 | ENV PYTHONUNBUFFERED 1 11 | 12 | # install dependencies 13 | RUN pip install --upgrade pip 14 | COPY ./requirements.txt /usr/src/app/requirements.txt 15 | RUN pip install -r requirements.txt 16 | 17 | # copy project 18 | COPY . /usr/src/app/ 19 | 20 | RUN chmod +x ./start.sh 21 | CMD ["./start.sh"] -------------------------------------------------------------------------------- /examples/MNIST_RAW_format/mnist_dataset_inference_example_v2.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import logging 3 | from kafka import KafkaProducer, KafkaConsumer 4 | 5 | logging.basicConfig(level=logging.INFO) 6 | 7 | UPPER_TOPIC = 'minst-upper' 8 | BOOTSTRAP_SERVERS= '127.0.0.1:9094' 9 | 10 | upper_consumer = KafkaConsumer(UPPER_TOPIC, bootstrap_servers=BOOTSTRAP_SERVERS, group_id="output_group") 11 | """Creates an upper consumer to receive the predictions""" 12 | 13 | print('\n') 14 | 15 | print('Upper consumer: ') 16 | for msg in upper_consumer: 17 | print (msg.value.decode()) -------------------------------------------------------------------------------- /federated-module/kustomize/master-gpu/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - "../master" 3 | 4 | # namespace: kafkaml 5 | 6 | configMapGenerator: 7 | - name: federated-kafkaml-configmap 8 | behavior: merge 9 | literals: 10 | - federated.tensorflow.training.image=ertis/federated-kafka-ml-tensorflow_model_training-gpu:master 11 | - federated.pytorch.training.image=ertis/federated-kafka-ml-pytorch_model_training-gpu:master # Non existing 12 | # - kml.cloud.brokers=kafka-cloud.broker:9092 13 | # - federated.data.brokers=kafka-federated.broker:9092 14 | -------------------------------------------------------------------------------- /kustomize/base/resources/tf-executor-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | name: tfexecutor 6 | name: tfexecutor 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | name: pod-tfexecutor 12 | template: 13 | metadata: 14 | labels: 15 | name: pod-tfexecutor 16 | name: tfexecutor 17 | spec: 18 | containers: 19 | - name: tfexecutor 20 | image: kafka-ml-tfexecutor 21 | ports: 22 | - containerPort: 8001 23 | imagePullPolicy: Always 24 | -------------------------------------------------------------------------------- /mlcode_executor/tfexecutor/Dockerfile: -------------------------------------------------------------------------------- 1 | # --build-arg TFTAG=2.7.0-gpu for GPU version 2 | ARG TFTAG=2.7.0 3 | FROM tensorflow/tensorflow:${TFTAG} 4 | 5 | # set work directory 6 | WORKDIR /usr/src/app 7 | 8 | # set environment variables 9 | ENV PYTHONDONTWRITEBYTECODE 1 10 | ENV PYTHONUNBUFFERED 1 11 | 12 | # install dependencies 13 | RUN pip install --upgrade pip 14 | COPY ./requirements.txt /usr/src/app/requirements.txt 15 | RUN pip install -r requirements.txt 16 | 17 | # copy project 18 | COPY . /usr/src/app/ 19 | 20 | EXPOSE 8001 21 | 22 | RUN chmod +x ./start.sh 23 | CMD ["./start.sh"] -------------------------------------------------------------------------------- /model_inference/pytorch/Dockerfile: -------------------------------------------------------------------------------- 1 | # --build-arg BASEIMG=pytorch/pytorch:1.10.0-cuda11.3-cudnn8-runtime for GPU version 2 | ARG BASEIMG=python:3.8.6 3 | FROM ${BASEIMG} 4 | 5 | # set work directory 6 | WORKDIR /usr/src/app 7 | 8 | # set environment variables 9 | ENV PYTHONDONTWRITEBYTECODE 1 10 | ENV PYTHONUNBUFFERED 1 11 | 12 | # install dependencies 13 | RUN pip install --upgrade pip 14 | COPY ./requirements.txt /usr/src/app/requirements.txt 15 | RUN pip install -r requirements.txt 16 | 17 | # copy project 18 | COPY . /usr/src/app/ 19 | 20 | RUN chmod +x ./start.sh 21 | CMD ["./start.sh"] -------------------------------------------------------------------------------- /model_training/pytorch/Dockerfile: -------------------------------------------------------------------------------- 1 | # --build-arg BASEIMG=pytorch/pytorch:1.10.0-cuda11.3-cudnn8-runtime for GPU version 2 | ARG BASEIMG=python:3.8.6 3 | FROM ${BASEIMG} 4 | 5 | # set work directory 6 | WORKDIR /usr/src/app 7 | 8 | # set environment variables 9 | ENV PYTHONDONTWRITEBYTECODE 1 10 | ENV PYTHONUNBUFFERED 1 11 | 12 | # install dependencies 13 | RUN pip install --upgrade pip 14 | COPY ./requirements.txt /usr/src/app/requirements.txt 15 | RUN pip install -r requirements.txt 16 | 17 | # copy project 18 | COPY . /usr/src/app/ 19 | 20 | RUN chmod +x ./start.sh 21 | CMD ["./start.sh"] -------------------------------------------------------------------------------- /.github/workflows/federated_backend.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | paths: 4 | - federated-module/federated_backend/**/* 5 | branches: 6 | - "master" 7 | - "main" 8 | - "latest" 9 | - "dev*" 10 | release: 11 | types: 12 | - created 13 | jobs: 14 | build-federated-backend: 15 | uses: ./.github/workflows/build.yml 16 | with: 17 | context: federated-module/federated_backend 18 | dockerfile: federated-module/federated_backend/Dockerfile 19 | name: federated-kafka-ml-backend 20 | platforms: linux/amd64 21 | secrets: inherit 22 | -------------------------------------------------------------------------------- /federated-module/federated_backend/Dockerfile: -------------------------------------------------------------------------------- 1 | # pull official base image 2 | FROM python:3.8.6 3 | # FROM python:3.7.7-slim-stretch # for Raspberry 4 | 5 | # set work directory 6 | WORKDIR /usr/src/app 7 | 8 | # set environment variables 9 | ENV PYTHONDONTWRITEBYTECODE 1 10 | ENV PYTHONUNBUFFERED 1 11 | 12 | # install dependencies 13 | RUN pip install --upgrade pip 14 | COPY ./requirements.txt /usr/src/app/requirements.txt 15 | RUN pip install -r requirements.txt 16 | 17 | # copy project 18 | COPY . /usr/src/app/ 19 | 20 | EXPOSE 8000 21 | 22 | RUN chmod +x ./start.sh 23 | CMD ["./start.sh"] 24 | -------------------------------------------------------------------------------- /kustomize/base/resources/pth-executor-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | name: pthexecutor 6 | name: pthexecutor 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | name: pod-pthexecutor 12 | template: 13 | metadata: 14 | labels: 15 | name: pod-pthexecutor 16 | name: pthexecutor 17 | spec: 18 | containers: 19 | - name: pthexecutor 20 | image: kafka-ml-pthexecutor 21 | ports: 22 | - containerPort: 8002 23 | imagePullPolicy: Always 24 | -------------------------------------------------------------------------------- /frontend/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compileOnSave": false, 3 | "compilerOptions": { 4 | "baseUrl": "./", 5 | "outDir": "./dist/out-tsc", 6 | "sourceMap": true, 7 | "declaration": false, 8 | "downlevelIteration": true, 9 | "experimentalDecorators": true, 10 | "module": "esnext", 11 | "moduleResolution": "node", 12 | "importHelpers": true, 13 | "target": "es2015", 14 | "lib": [ 15 | "es2018", 16 | "dom" 17 | ] 18 | }, 19 | "angularCompilerOptions": { 20 | "fullTemplateTypeCheck": true, 21 | "strictInjectionParameters": true 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /mlcode_executor/pthexecutor/Dockerfile: -------------------------------------------------------------------------------- 1 | # --build-arg BASEIMG=pytorch/pytorch:1.10.0-cuda11.3-cudnn8-runtime for GPU version 2 | ARG BASEIMG=python:3.8.6 3 | FROM ${BASEIMG} 4 | 5 | # set work directory 6 | WORKDIR /usr/src/app 7 | 8 | # set environment variables 9 | ENV PYTHONDONTWRITEBYTECODE 1 10 | ENV PYTHONUNBUFFERED 1 11 | 12 | # install dependencies 13 | RUN pip install --upgrade pip 14 | COPY ./requirements.txt /usr/src/app/requirements.txt 15 | RUN pip install -r requirements.txt 16 | 17 | # copy project 18 | COPY . /usr/src/app/ 19 | 20 | EXPOSE 8002 21 | 22 | RUN chmod +x ./start.sh 23 | CMD ["./start.sh"] -------------------------------------------------------------------------------- /frontend/src/app/result-list/result-list.component.css: -------------------------------------------------------------------------------- 1 | 2 | .mat-form-field { 3 | font-size: 14px; 4 | width: 80%; 5 | } 6 | 7 | .mat-column-inference, .mat-column-chart { 8 | flex: 0 0 60px !important; 9 | white-space: unset !important; 10 | } 11 | 12 | .mat-column-manage, .mat-column-download, .mat-column-status { 13 | flex: 0 0 50px !important; 14 | white-space: unset !important; 15 | } 16 | 17 | .created{ 18 | color: grey; 19 | 20 | } 21 | 22 | .deployed{ 23 | color: #3F51B5; 24 | } 25 | 26 | .stopped{ 27 | color: rgb(233, 48, 48); 28 | } 29 | 30 | .finished{ 31 | color: green; 32 | } -------------------------------------------------------------------------------- /.github/workflows/federated_data_control_logger.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | paths: 4 | - federated-module/federated_data_control_logger/**/* 5 | branches: 6 | - "master" 7 | - "main" 8 | - "latest" 9 | - "dev*" 10 | release: 11 | types: 12 | - created 13 | jobs: 14 | build-federated-data-control-logger: 15 | uses: ./.github/workflows/build.yml 16 | with: 17 | context: federated-module/federated_data_control_logger 18 | dockerfile: federated-module/federated_data_control_logger/Dockerfile 19 | name: federated-kafka-ml-data_control_logger 20 | platforms: linux/amd64 21 | secrets: inherit 22 | -------------------------------------------------------------------------------- /frontend/src/app/datasource-dialog/datasource-dialog.component.ts: -------------------------------------------------------------------------------- 1 | import { Component, Inject } from '@angular/core'; 2 | import { MatDialogRef, MAT_DIALOG_DATA } from '@angular/material/dialog'; 3 | 4 | @Component({ 5 | selector: 'app-datasource-dialog', 6 | templateUrl: './datasource-dialog.component.html', 7 | styleUrls: ['./datasource-dialog.component.css'] 8 | }) 9 | export class DatasourceDialogComponent { 10 | 11 | deployment: Number; 12 | 13 | constructor( public dialogRef: MatDialogRef, 14 | @Inject(MAT_DIALOG_DATA) public data: any) {} 15 | 16 | onNoClick(): void { 17 | this.dialogRef.close(); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /.github/workflows/federated_model_control_logger.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | paths: 4 | - federated-module/federated_model_control_logger/**/* 5 | branches: 6 | - "master" 7 | - "main" 8 | - "latest" 9 | - "dev*" 10 | release: 11 | types: 12 | - created 13 | jobs: 14 | build-federated-model-control-logger: 15 | uses: ./.github/workflows/build.yml 16 | with: 17 | context: federated-module/federated_model_control_logger 18 | dockerfile: federated-module/federated_model_control_logger/Dockerfile 19 | name: federated-kafka-ml-model_control_logger 20 | platforms: linux/amd64 21 | secrets: inherit 22 | -------------------------------------------------------------------------------- /federated-module/kustomize/base/resources/federated-kafkaml-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: federated-kafkaml-configmap 5 | data: 6 | federated.data.control.topic: FEDERATED_DATA_CONTROL_TOPIC 7 | federated.model.control.topic: FEDERATED_MODEL_CONTROL_TOPIC 8 | federated.backend.url: federated-backend:8085 9 | backend.allowedhosts: 127.0.0.1,localhost,federated-backend 10 | 11 | # kml.cloud.brokers: cloud-kafka-broker:9092 12 | # federated.data.brokers: data-kafka-broker:9092 13 | # debug: "0" 14 | 15 | # Blockchain configuration 16 | fedml.blockchain.wallet-address: "0x0" 17 | fedml.blockchain.wallet-key: asdf -------------------------------------------------------------------------------- /kustomize/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | buildMetadata: [managedByLabel, originAnnotations, transformerAnnotations] 5 | 6 | resources: 7 | - resources/role.yaml 8 | - resources/kafkaml-configmap.yaml 9 | - resources/backend-deployment.yaml 10 | - resources/backend-service.yaml 11 | - resources/frontend-deployment.yaml 12 | - resources/frontend-service.yaml 13 | - resources/kafka-control-logger-deployment.yaml 14 | - resources/pth-executor-deployment.yaml 15 | - resources/pth-executor-service.yaml 16 | - resources/tf-executor-deployment.yaml 17 | - resources/tf-executor-service.yaml 18 | -------------------------------------------------------------------------------- /frontend/src/app/layout/header/header.component.css: -------------------------------------------------------------------------------- 1 | a { 2 | text-decoration: none; 3 | color: white; 4 | } 5 | 6 | a:hover, a:active{ 7 | color: lightgray; 8 | } 9 | .navigation-items{ 10 | list-style-type: none; 11 | padding: 10px; 12 | margin: 0; 13 | } 14 | 15 | @media screen and (max-width: 600px) { 16 | .navigation-items { 17 | display: none; 18 | } 19 | } 20 | 21 | .navigation-items li { 22 | display: inline-block; 23 | } 24 | 25 | mat-toolbar{ 26 | border-radius: 3px; 27 | } 28 | 29 | li a { 30 | text-align: center; 31 | padding: 10px; 32 | } 33 | 34 | .mat-toolbar-row, .mat-toolbar-single-row { 35 | height: 55px; 36 | } -------------------------------------------------------------------------------- /kustomize/v1.0-gpu/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - "../v1.0" 3 | 4 | configMapGenerator: 5 | - name: kafkaml-configmap 6 | behavior: merge 7 | literals: 8 | - tensorflow.training.image=ertis/kafka-ml-tensorflow_model_training-gpu:v1.0 9 | - tensorflow.inference.image=ertis/kafka-ml-tensorflow_model_inference-gpu:v1.0 10 | - pytorch.training.image=ertis/kafka-ml-pytorch_model_training-gpu:v1.0 11 | - pytorch.inference.image=ertis/kafka-ml-pytorch_model_inference-gpu:v1.0 12 | 13 | images: 14 | - name: ertis/kafka-ml-pthexecutor 15 | newName: ertis/kafka-ml-pthexecutor-gpu 16 | - name: ertis/kafka-ml-tfexecutor 17 | newName: ertis/kafka-ml-tfexecutor-gpu 18 | -------------------------------------------------------------------------------- /kustomize/v1.1-gpu/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - "../v1.1" 3 | 4 | configMapGenerator: 5 | - name: kafkaml-configmap 6 | behavior: merge 7 | literals: 8 | - tensorflow.training.image=ertis/kafka-ml-tensorflow_model_training-gpu:v1.1 9 | - tensorflow.inference.image=ertis/kafka-ml-tensorflow_model_inference-gpu:v1.1 10 | - pytorch.training.image=ertis/kafka-ml-pytorch_model_training-gpu:v1.1 11 | - pytorch.inference.image=ertis/kafka-ml-pytorch_model_inference-gpu:v1.1 12 | 13 | images: 14 | - name: ertis/kafka-ml-pthexecutor 15 | newName: ertis/kafka-ml-pthexecutor-gpu 16 | - name: ertis/kafka-ml-tfexecutor 17 | newName: ertis/kafka-ml-tfexecutor-gpu 18 | -------------------------------------------------------------------------------- /kustomize/v1.3-gpu/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - "../v1.3" 3 | 4 | configMapGenerator: 5 | - name: kafkaml-configmap 6 | behavior: merge 7 | literals: 8 | - tensorflow.training.image=ertis/kafka-ml-tensorflow_model_training-gpu:v1.3 9 | - tensorflow.inference.image=ertis/kafka-ml-tensorflow_model_inference-gpu:v1.3 10 | - pytorch.training.image=ertis/kafka-ml-pytorch_model_training-gpu:v1.3 11 | - pytorch.inference.image=ertis/kafka-ml-pytorch_model_inference-gpu:v1.3 12 | 13 | images: 14 | - name: ertis/kafka-ml-pthexecutor 15 | newName: ertis/kafka-ml-pthexecutor-gpu 16 | - name: ertis/kafka-ml-tfexecutor 17 | newName: ertis/kafka-ml-tfexecutor-gpu 18 | -------------------------------------------------------------------------------- /kustomize/master-gpu/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - "../master" 3 | 4 | configMapGenerator: 5 | - name: kafkaml-configmap 6 | behavior: merge 7 | literals: 8 | - tensorflow.training.image=ertis/kafka-ml-tensorflow_model_training-gpu:master 9 | - tensorflow.inference.image=ertis/kafka-ml-tensorflow_model_inference-gpu:master 10 | - pytorch.training.image=ertis/kafka-ml-pytorch_model_training-gpu:master 11 | - pytorch.inference.image=ertis/kafka-ml-pytorch_model_inference-gpu:master 12 | 13 | images: 14 | - name: ertis/kafka-ml-pthexecutor 15 | newName: ertis/kafka-ml-pthexecutor-gpu 16 | - name: ertis/kafka-ml-tfexecutor 17 | newName: ertis/kafka-ml-tfexecutor-gpu 18 | -------------------------------------------------------------------------------- /backend/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Django's command-line utility for administrative tasks.""" 3 | import os 4 | import sys 5 | 6 | 7 | def main(): 8 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'autoweb.settings') 9 | 10 | try: 11 | from django.core.management import execute_from_command_line 12 | except ImportError as exc: 13 | raise ImportError( 14 | "Couldn't import Django. Are you sure it's installed and " 15 | "available on your PYTHONPATH environment variable? Did you " 16 | "forget to activate a virtual environment?" 17 | ) from exc 18 | execute_from_command_line(sys.argv) 19 | 20 | 21 | if __name__ == '__main__': 22 | main() 23 | -------------------------------------------------------------------------------- /frontend/src/app/services/datasource.service.ts: -------------------------------------------------------------------------------- 1 | import { Injectable } from '@angular/core'; 2 | import { HttpClient } from '@angular/common/http'; 3 | 4 | import { environment } from '../../environments/environment'; 5 | 6 | @Injectable({ 7 | providedIn: 'root' 8 | }) 9 | 10 | export class DatasourceService { 11 | 12 | baseUrl = environment.baseUrl; 13 | 14 | constructor(private httpClient: HttpClient) { } 15 | 16 | url = this.baseUrl + '/datasources/'; 17 | 18 | getDatasources(){ 19 | return this.httpClient.get(this.url); 20 | } 21 | 22 | deployDatasource(data: JSON){ 23 | var url = this.url+'kafka' 24 | return this.httpClient.post(url, data) 25 | } 26 | 27 | } 28 | -------------------------------------------------------------------------------- /federated-module/federated_backend/automl/serializers.py: -------------------------------------------------------------------------------- 1 | from rest_framework import serializers 2 | from automl.models import ModelSource, Datasource 3 | 4 | class ModelSourceSerializer(serializers.ModelSerializer): 5 | class Meta: 6 | model = ModelSource 7 | fields = ['federated_string_id', 'data_restriction', 'min_data', 'input_shape', 'output_shape', 'framework', 'time', 'distributed', 'blockchain'] 8 | 9 | 10 | class DatasourceSerializer(serializers.ModelSerializer): 11 | class Meta: 12 | model = Datasource 13 | fields = ['incremental', 'topic', 'unsupervised_topic', 'input_format', 'input_config', 'description', 'dataset_restrictions', 14 | 'validation_rate', 'test_rate', 'total_msg', 'time'] -------------------------------------------------------------------------------- /federated-module/federated_backend/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Django's command-line utility for administrative tasks.""" 3 | import os 4 | import sys 5 | 6 | 7 | def main(): 8 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'autoweb.settings') 9 | try: 10 | from django.core.management import execute_from_command_line 11 | except ImportError as exc: 12 | raise ImportError( 13 | "Couldn't import Django. Are you sure it's installed and " 14 | "available on your PYTHONPATH environment variable? Did you " 15 | "forget to activate a virtual environment?" 16 | ) from exc 17 | execute_from_command_line(sys.argv) 18 | 19 | 20 | if __name__ == '__main__': 21 | main() 22 | -------------------------------------------------------------------------------- /frontend/e2e/src/app.e2e-spec.ts: -------------------------------------------------------------------------------- 1 | import { AppPage } from './app.po'; 2 | import { browser, logging } from 'protractor'; 3 | 4 | describe('workspace-project App', () => { 5 | let page: AppPage; 6 | 7 | beforeEach(() => { 8 | page = new AppPage(); 9 | }); 10 | 11 | it('should display welcome message', () => { 12 | page.navigateTo(); 13 | expect(page.getTitleText()).toEqual('autofront app is running!'); 14 | }); 15 | 16 | afterEach(async () => { 17 | // Assert that there are no errors emitted from the browser 18 | const logs = await browser.manage().logs().get(logging.Type.BROWSER); 19 | expect(logs).not.toContain(jasmine.objectContaining({ 20 | level: logging.Level.SEVERE, 21 | } as logging.Entry)); 22 | }); 23 | }); 24 | -------------------------------------------------------------------------------- /kustomize/base/resources/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: kafkaml 5 | --- 6 | apiVersion: rbac.authorization.k8s.io/v1 7 | kind: Role 8 | metadata: 9 | name: kafkaml-role 10 | rules: 11 | - apiGroups: ["", "apps", "batch"] 12 | resources: 13 | - deployments 14 | - jobs 15 | - pods 16 | - replicasets 17 | - services 18 | - replicationcontrollers 19 | verbs: ["create", "get", "list", "delete", "watch"] 20 | --- 21 | apiVersion: rbac.authorization.k8s.io/v1 22 | kind: RoleBinding 23 | metadata: 24 | name: kafkaml-rolebinding 25 | roleRef: 26 | apiGroup: rbac.authorization.k8s.io 27 | kind: Role 28 | name: kafkaml-role 29 | subjects: 30 | - kind: ServiceAccount 31 | name: kafkaml 32 | -------------------------------------------------------------------------------- /frontend/src/app/layout/header/header.component.spec.ts: -------------------------------------------------------------------------------- 1 | import { async, ComponentFixture, TestBed } from '@angular/core/testing'; 2 | 3 | import { HeaderComponent } from './header.component'; 4 | 5 | describe('HeaderComponent', () => { 6 | let component: HeaderComponent; 7 | let fixture: ComponentFixture; 8 | 9 | beforeEach(async(() => { 10 | TestBed.configureTestingModule({ 11 | declarations: [ HeaderComponent ] 12 | }) 13 | .compileComponents(); 14 | })); 15 | 16 | beforeEach(() => { 17 | fixture = TestBed.createComponent(HeaderComponent); 18 | component = fixture.componentInstance; 19 | fixture.detectChanges(); 20 | }); 21 | 22 | it('should create', () => { 23 | expect(component).toBeTruthy(); 24 | }); 25 | }); 26 | -------------------------------------------------------------------------------- /.github/workflows/tfexecutor.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | paths: 4 | - mlcode_executor/tfexecutor/**/* 5 | branches: 6 | - "master" 7 | - "main" 8 | - "latest" 9 | - "dev*" 10 | release: 11 | types: 12 | - created 13 | jobs: 14 | build-tfexecutor: 15 | strategy: 16 | matrix: 17 | include: 18 | - name: kafka-ml-tfexecutor 19 | - name: kafka-ml-tfexecutor-gpu 20 | build-args: "TFTAG=2.7.0-gpu" 21 | uses: ./.github/workflows/build.yml 22 | with: 23 | context: mlcode_executor/tfexecutor 24 | dockerfile: mlcode_executor/tfexecutor/Dockerfile 25 | name: ${{ matrix.name }} 26 | platforms: linux/amd64 27 | build-args: ${{ matrix.build-args }} 28 | secrets: inherit 29 | -------------------------------------------------------------------------------- /frontend/src/app/plot-view/plot-view.component.spec.ts: -------------------------------------------------------------------------------- 1 | import { async, ComponentFixture, TestBed } from '@angular/core/testing'; 2 | 3 | import { PlotViewComponent } from './plot-view.component'; 4 | 5 | describe('PlotViewComponent', () => { 6 | let component: PlotViewComponent; 7 | let fixture: ComponentFixture; 8 | 9 | beforeEach(async(() => { 10 | TestBed.configureTestingModule({ 11 | declarations: [ PlotViewComponent ] 12 | }) 13 | .compileComponents(); 14 | })); 15 | 16 | beforeEach(() => { 17 | fixture = TestBed.createComponent(PlotViewComponent); 18 | component = fixture.componentInstance; 19 | fixture.detectChanges(); 20 | }); 21 | 22 | it('should create', () => { 23 | expect(component).toBeTruthy(); 24 | }); 25 | }); 26 | -------------------------------------------------------------------------------- /frontend/src/app/datasource-dialog/datasource-dialog.component.html: -------------------------------------------------------------------------------- 1 | 2 |

Send datasource to deployment

3 | 4 | 5 |

Please, make sure that the timestamp of the datasource is less than KAFKA_LOG_RETENTION_DAYS. 6 | Otherwise, the datasource could have been deleted from Kafka

7 |
8 | 9 | 11 | 12 |
13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /examples/EUROSAT_RAW_format/eurosat_dataset_training_example.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append(sys.path[0] + "/../..") 3 | """To allow importing datasources""" 4 | 5 | from datasources.raw_sink import RawSink 6 | import tensorflow as tf 7 | import tensorflow_datasets as tfds 8 | import logging 9 | 10 | logging.basicConfig(level=logging.INFO) 11 | 12 | eurosat = RawSink(boostrap_servers='localhost:9094', topic='automl', deployment_id=1, 13 | description='eurosat dataset', validation_rate=0.1, test_rate=0.1) 14 | 15 | ds = tfds.load('eurosat', as_supervised=True, shuffle_files=True, data_dir='datasets/eurosat') 16 | ds['train'] = ds['train'].shuffle(buffer_size=1000) 17 | 18 | for image, label in ds['train']: 19 | eurosat.send(data=image.numpy(), label=label.numpy()) 20 | 21 | eurosat.close() -------------------------------------------------------------------------------- /frontend/src/app/model-list/model-list.component.spec.ts: -------------------------------------------------------------------------------- 1 | import { async, ComponentFixture, TestBed } from '@angular/core/testing'; 2 | 3 | import { ModelListComponent } from './model-list.component'; 4 | 5 | describe('ModelListComponent', () => { 6 | let component: ModelListComponent; 7 | let fixture: ComponentFixture; 8 | 9 | beforeEach(async(() => { 10 | TestBed.configureTestingModule({ 11 | declarations: [ ModelListComponent ] 12 | }) 13 | .compileComponents(); 14 | })); 15 | 16 | beforeEach(() => { 17 | fixture = TestBed.createComponent(ModelListComponent); 18 | component = fixture.componentInstance; 19 | fixture.detectChanges(); 20 | }); 21 | 22 | it('should create', () => { 23 | expect(component).toBeTruthy(); 24 | }); 25 | }); 26 | -------------------------------------------------------------------------------- /frontend/src/app/model-view/model-view.component.spec.ts: -------------------------------------------------------------------------------- 1 | import { async, ComponentFixture, TestBed } from '@angular/core/testing'; 2 | 3 | import { ModelViewComponent } from './model-view.component'; 4 | 5 | describe('ModelViewComponent', () => { 6 | let component: ModelViewComponent; 7 | let fixture: ComponentFixture; 8 | 9 | beforeEach(async(() => { 10 | TestBed.configureTestingModule({ 11 | declarations: [ ModelViewComponent ] 12 | }) 13 | .compileComponents(); 14 | })); 15 | 16 | beforeEach(() => { 17 | fixture = TestBed.createComponent(ModelViewComponent); 18 | component = fixture.componentInstance; 19 | fixture.detectChanges(); 20 | }); 21 | 22 | it('should create', () => { 23 | expect(component).toBeTruthy(); 24 | }); 25 | }); 26 | -------------------------------------------------------------------------------- /frontend/src/app/result-list/result-list.component.spec.ts: -------------------------------------------------------------------------------- 1 | import { async, ComponentFixture, TestBed } from '@angular/core/testing'; 2 | 3 | import { ResultListComponent } from './result-list.component'; 4 | 5 | describe('ResultListComponent', () => { 6 | let component: ResultListComponent; 7 | let fixture: ComponentFixture; 8 | 9 | beforeEach(async(() => { 10 | TestBed.configureTestingModule({ 11 | declarations: [ ResultListComponent ] 12 | }) 13 | .compileComponents(); 14 | })); 15 | 16 | beforeEach(() => { 17 | fixture = TestBed.createComponent(ResultListComponent); 18 | component = fixture.componentInstance; 19 | fixture.detectChanges(); 20 | }); 21 | 22 | it('should create', () => { 23 | expect(component).toBeTruthy(); 24 | }); 25 | }); 26 | -------------------------------------------------------------------------------- /examples/MLGPARK_STREAM_RAW_format/MLGPARK_dataset_training_example.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append(sys.path[0] + "/../..") 3 | """To allow importing datasources""" 4 | 5 | from datasources.raw_sink import RawSink 6 | 7 | import numpy as np 8 | import pandas as pd 9 | 10 | import logging 11 | logging.basicConfig(level=logging.INFO) 12 | 13 | 14 | df = pd.read_csv('MLGPARK_Dataset.csv') 15 | 16 | target = "free_places" 17 | features = df.drop(columns= "free_places").to_numpy() 18 | labels=np.ravel(df[target]) 19 | 20 | print(features) 21 | 22 | 23 | mlgpark = RawSink(boostrap_servers='localhost:9094', topic='automl', deployment_id=1, 24 | description='MLGPARK dataset', validation_rate=0.1, test_rate=0.1) 25 | 26 | for (x, y) in zip(features, labels): 27 | mlgpark.send(data=x, label=y) 28 | 29 | mlgpark.close() -------------------------------------------------------------------------------- /frontend/src/app/layout/sidenav-list/sidenav-list.component.spec.ts: -------------------------------------------------------------------------------- 1 | import { async, ComponentFixture, TestBed } from '@angular/core/testing'; 2 | 3 | import { SidenavListComponent } from './sidenav-list.component'; 4 | 5 | describe('SidenavListComponent', () => { 6 | let component: SidenavListComponent; 7 | let fixture: ComponentFixture; 8 | 9 | beforeEach(async(() => { 10 | TestBed.configureTestingModule({ 11 | declarations: [ SidenavListComponent ] 12 | }) 13 | .compileComponents(); 14 | })); 15 | 16 | beforeEach(() => { 17 | fixture = TestBed.createComponent(SidenavListComponent); 18 | component = fixture.componentInstance; 19 | fixture.detectChanges(); 20 | }); 21 | 22 | it('should create', () => { 23 | expect(component).toBeTruthy(); 24 | }); 25 | }); 26 | -------------------------------------------------------------------------------- /.github/workflows/pthexecutor.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | paths: 4 | - mlcode_executor/pthexecutor/**/* 5 | branches: 6 | - "master" 7 | - "main" 8 | - "latest" 9 | - "dev*" 10 | release: 11 | types: 12 | - created 13 | jobs: 14 | build-pthexecutor: 15 | strategy: 16 | matrix: 17 | include: 18 | - name: kafka-ml-pthexecutor 19 | - name: kafka-ml-pthexecutor-gpu 20 | build-args: "BASEIMG=pytorch/pytorch:1.10.0-cuda11.3-cudnn8-runtime" 21 | uses: ./.github/workflows/build.yml 22 | with: 23 | context: mlcode_executor/pthexecutor 24 | dockerfile: mlcode_executor/pthexecutor/Dockerfile 25 | name: ${{ matrix.name }} 26 | platforms: linux/amd64 27 | build-args: ${{ matrix.build-args }} 28 | secrets: inherit 29 | -------------------------------------------------------------------------------- /frontend/src/app/visualization/visualization.component.spec.ts: -------------------------------------------------------------------------------- 1 | import { ComponentFixture, TestBed } from '@angular/core/testing'; 2 | 3 | import { VisualizationComponent } from './visualization.component'; 4 | 5 | describe('VisualizationComponent', () => { 6 | let component: VisualizationComponent; 7 | let fixture: ComponentFixture; 8 | 9 | beforeEach(async () => { 10 | await TestBed.configureTestingModule({ 11 | declarations: [ VisualizationComponent ] 12 | }) 13 | .compileComponents(); 14 | }); 15 | 16 | beforeEach(() => { 17 | fixture = TestBed.createComponent(VisualizationComponent); 18 | component = fixture.componentInstance; 19 | fixture.detectChanges(); 20 | }); 21 | 22 | it('should create', () => { 23 | expect(component).toBeTruthy(); 24 | }); 25 | }); 26 | -------------------------------------------------------------------------------- /.github/workflows/tensorflow_model_training.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | paths: 4 | - model_training/tensorflow/**/* 5 | branches: 6 | - "master" 7 | - "main" 8 | - "latest" 9 | - "dev*" 10 | release: 11 | types: 12 | - created 13 | jobs: 14 | build-tf-model-training: 15 | strategy: 16 | matrix: 17 | include: 18 | - name: kafka-ml-tensorflow_model_training 19 | - name: kafka-ml-tensorflow_model_training-gpu 20 | build-args: "TFTAG=2.7.0-gpu" 21 | uses: ./.github/workflows/build.yml 22 | with: 23 | context: model_training/tensorflow 24 | dockerfile: model_training/tensorflow/Dockerfile 25 | name: ${{ matrix.name }} 26 | platforms: linux/amd64 27 | build-args: ${{ matrix.build-args }} 28 | secrets: inherit 29 | -------------------------------------------------------------------------------- /federated-module/kustomize/base/resources/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: federated-kafkaml 5 | --- 6 | apiVersion: rbac.authorization.k8s.io/v1 7 | kind: Role 8 | metadata: 9 | name: federated-kafkaml-role 10 | rules: 11 | - apiGroups: ["", "apps", "batch"] 12 | resources: 13 | - deployments 14 | - jobs 15 | - pods 16 | - replicasets 17 | - services 18 | - replicationcontrollers 19 | verbs: ["create", "get", "list", "delete", "watch"] 20 | --- 21 | apiVersion: rbac.authorization.k8s.io/v1 22 | kind: RoleBinding 23 | metadata: 24 | name: federated-kafkaml-rolebinding 25 | roleRef: 26 | apiGroup: rbac.authorization.k8s.io 27 | kind: Role 28 | name: federated-kafkaml-role 29 | subjects: 30 | - kind: ServiceAccount 31 | name: federated-kafkaml -------------------------------------------------------------------------------- /frontend/src/app/confirm-dialog/confirm-dialog.component.spec.ts: -------------------------------------------------------------------------------- 1 | import { async, ComponentFixture, TestBed } from '@angular/core/testing'; 2 | 3 | import { ConfirmDialogComponent } from './confirm-dialog.component'; 4 | 5 | describe('ConfirmDialogComponent', () => { 6 | let component: ConfirmDialogComponent; 7 | let fixture: ComponentFixture; 8 | 9 | beforeEach(async(() => { 10 | TestBed.configureTestingModule({ 11 | declarations: [ ConfirmDialogComponent ] 12 | }) 13 | .compileComponents(); 14 | })); 15 | 16 | beforeEach(() => { 17 | fixture = TestBed.createComponent(ConfirmDialogComponent); 18 | component = fixture.componentInstance; 19 | fixture.detectChanges(); 20 | }); 21 | 22 | it('should create', () => { 23 | expect(component).toBeTruthy(); 24 | }); 25 | }); 26 | -------------------------------------------------------------------------------- /frontend/src/app/inference-list/inference-list.component.spec.ts: -------------------------------------------------------------------------------- 1 | import { async, ComponentFixture, TestBed } from '@angular/core/testing'; 2 | 3 | import { InferenceListComponent } from './inference-list.component'; 4 | 5 | describe('InferenceListComponent', () => { 6 | let component: InferenceListComponent; 7 | let fixture: ComponentFixture; 8 | 9 | beforeEach(async(() => { 10 | TestBed.configureTestingModule({ 11 | declarations: [ InferenceListComponent ] 12 | }) 13 | .compileComponents(); 14 | })); 15 | 16 | beforeEach(() => { 17 | fixture = TestBed.createComponent(InferenceListComponent); 18 | component = fixture.componentInstance; 19 | fixture.detectChanges(); 20 | }); 21 | 22 | it('should create', () => { 23 | expect(component).toBeTruthy(); 24 | }); 25 | }); 26 | -------------------------------------------------------------------------------- /frontend/src/app/inference-view/inference-view.component.spec.ts: -------------------------------------------------------------------------------- 1 | import { async, ComponentFixture, TestBed } from '@angular/core/testing'; 2 | 3 | import { InferenceViewComponent } from './inference-view.component'; 4 | 5 | describe('InferenceViewComponent', () => { 6 | let component: InferenceViewComponent; 7 | let fixture: ComponentFixture; 8 | 9 | beforeEach(async(() => { 10 | TestBed.configureTestingModule({ 11 | declarations: [ InferenceViewComponent ] 12 | }) 13 | .compileComponents(); 14 | })); 15 | 16 | beforeEach(() => { 17 | fixture = TestBed.createComponent(InferenceViewComponent); 18 | component = fixture.componentInstance; 19 | fixture.detectChanges(); 20 | }); 21 | 22 | it('should create', () => { 23 | expect(component).toBeTruthy(); 24 | }); 25 | }); 26 | -------------------------------------------------------------------------------- /frontend/src/environments/environment.ts: -------------------------------------------------------------------------------- 1 | // This file can be replaced during build by using the `fileReplacements` array. 2 | // `ng build --prod` replaces `environment.ts` with `environment.prod.ts`. 3 | // The list of file replacements can be found in `angular.json`. 4 | 5 | export const environment = { 6 | production: false, 7 | baseUrl: 'http://localhost:8000', 8 | enableFederatedBlockchain: false 9 | }; 10 | 11 | /* 12 | * For easier debugging in development mode, you can import the following file 13 | * to ignore zone related error stack frames such as `zone.run`, `zoneDelegate.invokeTask`. 14 | * 15 | * This import should be commented out in production mode because it will have a negative impact 16 | * on performance if an error is thrown. 17 | */ 18 | // import 'zone.js/dist/zone-error'; // Included with Angular CLI. 19 | -------------------------------------------------------------------------------- /.github/workflows/tensorflow_model_inference.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | paths: 4 | - model_inference/tensorflow/**/* 5 | branches: 6 | - "master" 7 | - "main" 8 | - "latest" 9 | - "dev*" 10 | release: 11 | types: 12 | - created 13 | jobs: 14 | build-tf-model-inference: 15 | strategy: 16 | matrix: 17 | include: 18 | - name: kafka-ml-tensorflow_model_inference 19 | - name: kafka-ml-tensorflow_model_inference-gpu 20 | build-args: "TFTAG=2.7.0-gpu" 21 | uses: ./.github/workflows/build.yml 22 | with: 23 | context: model_inference/tensorflow 24 | dockerfile: model_inference/tensorflow/Dockerfile 25 | name: ${{ matrix.name }} 26 | platforms: linux/amd64 27 | build-args: ${{ matrix.build-args }} 28 | secrets: inherit 29 | -------------------------------------------------------------------------------- /backend/autoweb/contracts/token/ERC20/IERC20Metadata.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | // OpenZeppelin Contracts v4.4.1 (token/ERC20/extensions/IERC20Metadata.sol) 3 | 4 | pragma solidity ^0.8.0; 5 | 6 | import "./IERC20.sol"; 7 | 8 | /** 9 | * @dev Interface for the optional metadata functions from the ERC20 standard. 10 | * 11 | * _Available since v4.1._ 12 | */ 13 | interface IERC20Metadata is IERC20 { 14 | /** 15 | * @dev Returns the name of the token. 16 | */ 17 | function name() external view returns (string memory); 18 | 19 | /** 20 | * @dev Returns the symbol of the token. 21 | */ 22 | function symbol() external view returns (string memory); 23 | 24 | /** 25 | * @dev Returns the decimals places of the token. 26 | */ 27 | function decimals() external view returns (uint8); 28 | } -------------------------------------------------------------------------------- /datasources/README.md: -------------------------------------------------------------------------------- 1 | # Data sources 2 | 3 | This folder contains the clients that can be used to send data streams to Apache Kafka in Kafka-ML using Avro and RAW formats. 4 | 5 | To use the following clients, you should install the python libraries by running `python -m pip install -r requirements.txt`. 6 | - `avro_sink.py` client to send data stream training data using Avro. This client requires the definition of two Avro schemes for label and data. 7 | - `raw_sink.py` client to send data stream training data using RAW format. 8 | - `sink.py` base file used by avro and raw sinks. 9 | - `avro_inference.py` client to send data stream inference data using Avro. This client requires the definition of a Avro scheme for data. 10 | - `federated_raw_sink.py` client to send data stream training data using RAW format to federated learining-prepared topics. -------------------------------------------------------------------------------- /frontend/src/app/datasource-list/datasource-list.component.spec.ts: -------------------------------------------------------------------------------- 1 | import { async, ComponentFixture, TestBed } from '@angular/core/testing'; 2 | 3 | import { DatasourceListComponent } from './datasource-list.component'; 4 | 5 | describe('DatasourceListComponent', () => { 6 | let component: DatasourceListComponent; 7 | let fixture: ComponentFixture; 8 | 9 | beforeEach(async(() => { 10 | TestBed.configureTestingModule({ 11 | declarations: [ DatasourceListComponent ] 12 | }) 13 | .compileComponents(); 14 | })); 15 | 16 | beforeEach(() => { 17 | fixture = TestBed.createComponent(DatasourceListComponent); 18 | component = fixture.componentInstance; 19 | fixture.detectChanges(); 20 | }); 21 | 22 | it('should create', () => { 23 | expect(component).toBeTruthy(); 24 | }); 25 | }); 26 | -------------------------------------------------------------------------------- /frontend/src/app/deployment-list/deployment-list.component.spec.ts: -------------------------------------------------------------------------------- 1 | import { async, ComponentFixture, TestBed } from '@angular/core/testing'; 2 | 3 | import { DeploymentListComponent } from './deployment-list.component'; 4 | 5 | describe('DeploymentListComponent', () => { 6 | let component: DeploymentListComponent; 7 | let fixture: ComponentFixture; 8 | 9 | beforeEach(async(() => { 10 | TestBed.configureTestingModule({ 11 | declarations: [ DeploymentListComponent ] 12 | }) 13 | .compileComponents(); 14 | })); 15 | 16 | beforeEach(() => { 17 | fixture = TestBed.createComponent(DeploymentListComponent); 18 | component = fixture.componentInstance; 19 | fixture.detectChanges(); 20 | }); 21 | 22 | it('should create', () => { 23 | expect(component).toBeTruthy(); 24 | }); 25 | }); 26 | -------------------------------------------------------------------------------- /frontend/src/app/deployment-view/deployment-view.component.spec.ts: -------------------------------------------------------------------------------- 1 | import { async, ComponentFixture, TestBed } from '@angular/core/testing'; 2 | 3 | import { DeploymentViewComponent } from './deployment-view.component'; 4 | 5 | describe('DeploymentViewComponent', () => { 6 | let component: DeploymentViewComponent; 7 | let fixture: ComponentFixture; 8 | 9 | beforeEach(async(() => { 10 | TestBed.configureTestingModule({ 11 | declarations: [ DeploymentViewComponent ] 12 | }) 13 | .compileComponents(); 14 | })); 15 | 16 | beforeEach(() => { 17 | fixture = TestBed.createComponent(DeploymentViewComponent); 18 | component = fixture.componentInstance; 19 | fixture.detectChanges(); 20 | }); 21 | 22 | it('should create', () => { 23 | expect(component).toBeTruthy(); 24 | }); 25 | }); 26 | -------------------------------------------------------------------------------- /.github/workflows/pytorch_model_training.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | paths: 4 | - model_training/pytorch/**/* 5 | branches: 6 | - "master" 7 | - "main" 8 | - "latest" 9 | - "dev*" 10 | release: 11 | types: 12 | - created 13 | jobs: 14 | build-pth-model-training: 15 | strategy: 16 | matrix: 17 | include: 18 | - name: kafka-ml-pytorch_model_training 19 | - name: kafka-ml-pytorch_model_training-gpu 20 | build-args: "BASEIMG=pytorch/pytorch:1.10.0-cuda11.3-cudnn8-runtime" 21 | uses: ./.github/workflows/build.yml 22 | with: 23 | context: model_training/pytorch 24 | dockerfile: model_training/pytorch/Dockerfile 25 | name: ${{ matrix.name }} 26 | platforms: linux/amd64 27 | build-args: ${{ matrix.build-args }} 28 | secrets: inherit 29 | -------------------------------------------------------------------------------- /frontend/src/app/services/inference.service.ts: -------------------------------------------------------------------------------- 1 | import { Injectable } from '@angular/core'; 2 | import { HttpClient } from '@angular/common/http'; 3 | import { environment } from '../../environments/environment'; 4 | 5 | @Injectable({ 6 | providedIn: 'root' 7 | }) 8 | 9 | export class InferenceService { 10 | 11 | baseUrl = environment.baseUrl; 12 | 13 | constructor(private httpClient: HttpClient) { } 14 | 15 | url = this.baseUrl + '/inferences/'; 16 | 17 | getInferences(){ 18 | return this.httpClient.get(this.url); 19 | } 20 | 21 | deleteInference(id: number){ 22 | const url = `${this.url}${id}` 23 | return this.httpClient.delete(url); 24 | } 25 | 26 | stopInference(id: number){ 27 | const url = `${this.url}${id}` 28 | return this.httpClient.post(url, null); 29 | } 30 | 31 | } 32 | -------------------------------------------------------------------------------- /.github/workflows/pytorch_model_inference.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | paths: 4 | - model_inference/pytorch/**/* 5 | branches: 6 | - "master" 7 | - "main" 8 | - "latest" 9 | - "dev*" 10 | release: 11 | types: 12 | - created 13 | jobs: 14 | build-pth-model-inference: 15 | strategy: 16 | matrix: 17 | include: 18 | - name: kafka-ml-pytorch_model_inference 19 | - name: kafka-ml-pytorch_model_inference-gpu 20 | build-args: "BASEIMG=pytorch/pytorch:1.10.0-cuda11.3-cudnn8-runtime" 21 | uses: ./.github/workflows/build.yml 22 | with: 23 | context: model_inference/pytorch 24 | dockerfile: model_inference/pytorch/Dockerfile 25 | name: ${{ matrix.name }} 26 | platforms: linux/amd64 27 | build-args: ${{ matrix.build-args }} 28 | secrets: inherit 29 | -------------------------------------------------------------------------------- /frontend/src/app/datasource-dialog/datasource-dialog.component.spec.ts: -------------------------------------------------------------------------------- 1 | import { async, ComponentFixture, TestBed } from '@angular/core/testing'; 2 | 3 | import { DatasourceDialogComponent } from './datasource-dialog.component'; 4 | 5 | describe('DatasourceDialogComponent', () => { 6 | let component: DatasourceDialogComponent; 7 | let fixture: ComponentFixture; 8 | 9 | beforeEach(async(() => { 10 | TestBed.configureTestingModule({ 11 | declarations: [ DatasourceDialogComponent ] 12 | }) 13 | .compileComponents(); 14 | })); 15 | 16 | beforeEach(() => { 17 | fixture = TestBed.createComponent(DatasourceDialogComponent); 18 | component = fixture.componentInstance; 19 | fixture.detectChanges(); 20 | }); 21 | 22 | it('should create', () => { 23 | expect(component).toBeTruthy(); 24 | }); 25 | }); 26 | -------------------------------------------------------------------------------- /frontend/src/test.ts: -------------------------------------------------------------------------------- 1 | // This file is required by karma.conf.js and loads recursively all the .spec and framework files 2 | 3 | import 'zone.js/dist/zone-testing'; 4 | import { getTestBed } from '@angular/core/testing'; 5 | import { 6 | BrowserDynamicTestingModule, 7 | platformBrowserDynamicTesting 8 | } from '@angular/platform-browser-dynamic/testing'; 9 | 10 | declare const require: { 11 | context(path: string, deep?: boolean, filter?: RegExp): { 12 | keys(): string[]; 13 | (id: string): T; 14 | }; 15 | }; 16 | 17 | // First, initialize the Angular testing environment. 18 | getTestBed().initTestEnvironment( 19 | BrowserDynamicTestingModule, 20 | platformBrowserDynamicTesting() 21 | ); 22 | // Then we find all the tests. 23 | const context = require.context('./', true, /\.spec\.ts$/); 24 | // And load the modules. 25 | context.keys().map(context); 26 | -------------------------------------------------------------------------------- /frontend/src/app/configuration-view/configuration-view.component.spec.ts: -------------------------------------------------------------------------------- 1 | import { async, ComponentFixture, TestBed } from '@angular/core/testing'; 2 | 3 | import { ConfigurationViewComponent } from './configuration-view.component'; 4 | 5 | describe('ConfigurationViewComponent', () => { 6 | let component: ConfigurationViewComponent; 7 | let fixture: ComponentFixture; 8 | 9 | beforeEach(async(() => { 10 | TestBed.configureTestingModule({ 11 | declarations: [ ConfigurationViewComponent ] 12 | }) 13 | .compileComponents(); 14 | })); 15 | 16 | beforeEach(() => { 17 | fixture = TestBed.createComponent(ConfigurationViewComponent); 18 | component = fixture.componentInstance; 19 | fixture.detectChanges(); 20 | }); 21 | 22 | it('should create', () => { 23 | expect(component).toBeTruthy(); 24 | }); 25 | }); 26 | -------------------------------------------------------------------------------- /frontend/src/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Kafka-ML: connecting the data stream with ML/AI 6 | 7 | 8 | 9 | 10 | 14 | 15 | 16 | Loading... 17 | 18 | -------------------------------------------------------------------------------- /mlcode_executor/pthexecutor/README.md: -------------------------------------------------------------------------------- 1 | # PyTorch Executor 2 | 3 | This project provides the PyTorch code Executor for Kafka-ML. It has been implemented using the Python web framework [Flask](https://flask.palletsprojects.com/en/2.0.x/) version 2.0.2. This project requires Python 3.7+. 4 | 5 | The file `app.py` is the important file here. It has implemented the RESTful API implementation through Views. A View can implement some HTTP methods (e.g., GET, POST). 6 | 7 | ## Installation for local development 8 | Run `python -m pip install -r requirements.txt` to install the dependencies used by this module. 9 | 10 | ## Running server 11 | 12 | Run `gunicorn app:app --bind 0.0.0.0:8002 --timeout 0` for running the development server. You can change the IP and port when running the back-end. 13 | 14 | Note that if you change the IP or port in development mode, you should also change the reference in the backend deployment. 15 | -------------------------------------------------------------------------------- /federated-module/kustomize/v1.1/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - "../base" 3 | 4 | configMapGenerator: 5 | - name: federated-kafkaml-configmap 6 | behavior: merge 7 | literals: 8 | - federated.tensorflow.training.image=ertis/federated-kafka-ml-tensorflow_model_training:v1.1 9 | - federated.pytorch.training.image=ertis/federated-kafka-ml-pytorch_model_training:v1.1 # Non existing 10 | # - kml.cloud.brokers=kafka-cloud.broker:9092 11 | # - federated.data.brokers=kafka-federated.broker:9092 12 | 13 | images: 14 | - name: federated-kafka-ml-backend 15 | newName: ertis/federated-kafka-ml-backend 16 | newTag: v1.1 17 | - name: federated-kafka-ml-data_control_logger 18 | newName: ertis/federated-kafka-ml-data_control_logger 19 | newTag: v1.1 20 | - name: federated-kafka-ml-model_control_logger 21 | newName: ertis/federated-kafka-ml-model_control_logger 22 | newTag: v1.1 -------------------------------------------------------------------------------- /mlcode_executor/tfexecutor/README.md: -------------------------------------------------------------------------------- 1 | # TensorFlow Executor 2 | 3 | This project provides the TensorFlow code Executor for Kafka-ML. It has been implemented using the Python web framework [Flask](https://flask.palletsprojects.com/en/2.0.x/) version 2.0.2. This project requires Python 3.5-3.8. 4 | 5 | The file `app.py` is the important file here. It has implemented the RESTful API implementation through Views. A View can implement some HTTP methods (e.g., GET, POST). 6 | 7 | ## Installation for local development 8 | Run `python -m pip install -r requirements.txt` to install the dependencies used by this module. 9 | 10 | ## Running server 11 | 12 | Run `gunicorn app:app --bind 0.0.0.0:8001 --timeout 0` for running the development server. You can change the IP and port when running the back-end. 13 | 14 | Note that if you change the IP or port in development mode, you should also change the reference in the backend deployment. 15 | -------------------------------------------------------------------------------- /.github/workflows/federated_tensorflow_model_training.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | paths: 4 | - federated-module/federated_model_training/tensorflow/**/* 5 | branches: 6 | - "master" 7 | - "main" 8 | - "latest" 9 | - "dev*" 10 | release: 11 | types: 12 | - created 13 | jobs: 14 | build-tf-model-training: 15 | strategy: 16 | matrix: 17 | include: 18 | - name: federated-kafka-ml-tensorflow_model_training 19 | - name: federated-kafka-ml-tensorflow_model_training-gpu 20 | build-args: "TFTAG=2.7.0-gpu" 21 | uses: ./.github/workflows/build.yml 22 | with: 23 | context: federated-module/federated_model_training/tensorflow 24 | dockerfile: federated-module/federated_model_training/tensorflow/Dockerfile 25 | name: ${{ matrix.name }} 26 | platforms: linux/amd64 27 | build-args: ${{ matrix.build-args }} 28 | secrets: inherit 29 | -------------------------------------------------------------------------------- /federated-module/kustomize/master/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - "../base" 3 | 4 | # namespace: kafkaml 5 | 6 | configMapGenerator: 7 | - name: federated-kafkaml-configmap 8 | behavior: merge 9 | literals: 10 | - federated.tensorflow.training.image=ertis/federated-kafka-ml-tensorflow_model_training:master 11 | - federated.pytorch.training.image=ertis/federated-kafka-ml-pytorch_model_training:master # Non existing 12 | # - kml.cloud.brokers=kafka-cloud.broker:9092 13 | # - federated.data.brokers=kafka-federated.broker:9092 14 | 15 | images: 16 | - name: federated-kafka-ml-backend 17 | newName: ertis/federated-kafka-ml-backend 18 | newTag: master 19 | - name: federated-kafka-ml-data_control_logger 20 | newName: ertis/federated-kafka-ml-data_control_logger 21 | newTag: master 22 | - name: federated-kafka-ml-model_control_logger 23 | newName: ertis/federated-kafka-ml-model_control_logger 24 | newTag: master -------------------------------------------------------------------------------- /frontend/e2e/protractor.conf.js: -------------------------------------------------------------------------------- 1 | // @ts-check 2 | // Protractor configuration file, see link for more information 3 | // https://github.com/angular/protractor/blob/master/lib/config.ts 4 | 5 | const { SpecReporter } = require('jasmine-spec-reporter'); 6 | 7 | /** 8 | * @type { import("protractor").Config } 9 | */ 10 | exports.config = { 11 | allScriptsTimeout: 11000, 12 | specs: [ 13 | './src/**/*.e2e-spec.ts' 14 | ], 15 | capabilities: { 16 | browserName: 'chrome' 17 | }, 18 | directConnect: true, 19 | baseUrl: 'http://localhost:4200/', 20 | framework: 'jasmine', 21 | jasmineNodeOpts: { 22 | showColors: true, 23 | defaultTimeoutInterval: 30000, 24 | print: function() {} 25 | }, 26 | onPrepare() { 27 | require('ts-node').register({ 28 | project: require('path').join(__dirname, './tsconfig.json') 29 | }); 30 | jasmine.getEnv().addReporter(new SpecReporter({ spec: { displayStacktrace: true } })); 31 | } 32 | }; -------------------------------------------------------------------------------- /backend/autoweb/contracts/token/ERC20/Context.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | // OpenZeppelin Contracts v4.4.1 (utils/Context.sol) 3 | 4 | pragma solidity ^0.8.0; 5 | 6 | /** 7 | * @dev Provides information about the current execution context, including the 8 | * sender of the transaction and its data. While these are generally available 9 | * via msg.sender and msg.data, they should not be accessed in such a direct 10 | * manner, since when dealing with meta-transactions the account sending and 11 | * paying for execution may not be the actual sender (as far as an application 12 | * is concerned). 13 | * 14 | * This contract is only required for intermediate, library-like contracts. 15 | */ 16 | abstract contract Context { 17 | function _msgSender() internal view virtual returns (address) { 18 | return msg.sender; 19 | } 20 | 21 | function _msgData() internal view virtual returns (bytes calldata) { 22 | return msg.data; 23 | } 24 | } -------------------------------------------------------------------------------- /examples/SO2SAT_RAW_format/so2sat_dataset_training_example.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append(sys.path[0] + "/../..") 3 | """To allow importing datasources""" 4 | 5 | from datasources.raw_sink import RawSink 6 | import tensorflow as tf 7 | import tensorflow_datasets as tfds 8 | import logging 9 | 10 | logging.basicConfig(level=logging.INFO) 11 | 12 | so2sat = RawSink(boostrap_servers='localhost:9094', topic='automl', deployment_id=1, 13 | description='so2sat dataset', validation_rate=0.1, test_rate=0.1) 14 | 15 | ds = tfds.load('so2sat', as_supervised=True, shuffle_files=True, data_dir='datasets/so2sat') 16 | 17 | ds['train'] = ds['train'].shuffle(buffer_size=1000) 18 | ds['validation'] = ds['validation'].shuffle(buffer_size=1000) 19 | 20 | for image, label in ds['train']: 21 | so2sat.send(data=image.numpy(), label=label.numpy()) 22 | 23 | for image, label in ds['validation']: 24 | so2sat.send(data=image.numpy(), label=label.numpy()) 25 | 26 | so2sat.close() -------------------------------------------------------------------------------- /frontend/nginx-custom.conf: -------------------------------------------------------------------------------- 1 | # Expires map 2 | map $sent_http_content_type $expires { 3 | default off; 4 | text/html epoch; 5 | text/css max; 6 | application/json epoch; 7 | application/javascript max; 8 | ~image/ max; 9 | } 10 | 11 | server { 12 | listen 80; 13 | 14 | location /api/ws/ { 15 | rewrite /api/(.*) /$1 break; 16 | proxy_pass ${BACKEND_PROXY_URL}; 17 | proxy_http_version 1.1; 18 | proxy_set_header Upgrade $http_upgrade; 19 | proxy_set_header Connection "Upgrade"; 20 | proxy_set_header Host $host; 21 | } 22 | 23 | location /api { 24 | rewrite /api/(.*) /$1 break; 25 | proxy_pass ${BACKEND_PROXY_URL}; 26 | } 27 | 28 | location / { 29 | root /usr/share/nginx/html; 30 | index index.html index.htm; 31 | try_files $uri $uri/ /index.html =404; 32 | } 33 | expires $expires; 34 | gzip on; 35 | } -------------------------------------------------------------------------------- /examples/FEDERATED_MNIST_RAW_format/mnist_dataset_federated_training_example.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append(sys.path[0] + "/../..") 3 | """To allow importing datasources""" 4 | 5 | from datasources.federated_raw_sink import FederatedRawSink 6 | import tensorflow as tf 7 | import logging 8 | import json 9 | 10 | logging.basicConfig(level=logging.INFO) 11 | 12 | with open('mnist_sample_input_format.json') as json_file: 13 | data_res = json.load(json_file) 14 | 15 | mnist = FederatedRawSink(boostrap_servers='localhost:9094', topic='mnist_fed', deployment_id=1, description='Mnist dataset', 16 | dataset_restrictions=json.dumps(data_res), validation_rate=0.1, test_rate=0, control_topic='FEDERATED_DATA_CONTROL_TOPIC') 17 | 18 | (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() 19 | 20 | for (x, y) in zip(x_train, y_train): 21 | mnist.send(data=x, label=y) 22 | 23 | for (x, y) in zip(x_test, y_test): 24 | mnist.send(data=x, label=y) 25 | 26 | mnist.close() -------------------------------------------------------------------------------- /kustomize/v1.0/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - "../base" 3 | 4 | configMapGenerator: 5 | - name: kafkaml-configmap 6 | behavior: merge 7 | literals: 8 | - tensorflow.training.image=ertis/kafka-ml-tensorflow_model_training:v1.0 9 | - tensorflow.inference.image=ertis/kafka-ml-tensorflow_model_inference:v1.0 10 | - pytorch.training.image=ertis/kafka-ml-pytorch_model_training:v1.0 11 | - pytorch.inference.image=ertis/kafka-ml-pytorch_model_inference:v1.0 12 | 13 | images: 14 | - name: kafka-ml-backend 15 | newName: ertis/kafka-ml-backend 16 | newTag: v1.0 17 | - name: kafka-ml-frontend 18 | newName: ertis/kafka-ml-frontend 19 | newTag: v1.0 20 | - name: kafka-ml-kafka_control_logger 21 | newName: ertis/kafka-ml-kafka_control_logger 22 | newTag: v1.0 23 | - name: kafka-ml-pthexecutor 24 | newName: ertis/kafka-ml-pthexecutor 25 | newTag: v1.0 26 | - name: kafka-ml-tfexecutor 27 | newName: ertis/kafka-ml-tfexecutor 28 | newTag: v1.0 29 | -------------------------------------------------------------------------------- /kustomize/v1.1/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - "../base" 3 | 4 | configMapGenerator: 5 | - name: kafkaml-configmap 6 | behavior: merge 7 | literals: 8 | - tensorflow.training.image=ertis/kafka-ml-tensorflow_model_training:v1.1 9 | - tensorflow.inference.image=ertis/kafka-ml-tensorflow_model_inference:v1.1 10 | - pytorch.training.image=ertis/kafka-ml-pytorch_model_training:v1.1 11 | - pytorch.inference.image=ertis/kafka-ml-pytorch_model_inference:v1.1 12 | 13 | images: 14 | - name: kafka-ml-backend 15 | newName: ertis/kafka-ml-backend 16 | newTag: v1.1 17 | - name: kafka-ml-frontend 18 | newName: ertis/kafka-ml-frontend 19 | newTag: v1.1 20 | - name: kafka-ml-kafka_control_logger 21 | newName: ertis/kafka-ml-kafka_control_logger 22 | newTag: v1.1 23 | - name: kafka-ml-pthexecutor 24 | newName: ertis/kafka-ml-pthexecutor 25 | newTag: v1.1 26 | - name: kafka-ml-tfexecutor 27 | newName: ertis/kafka-ml-tfexecutor 28 | newTag: v1.1 29 | -------------------------------------------------------------------------------- /kustomize/v1.3/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - "../base" 3 | 4 | configMapGenerator: 5 | - name: kafkaml-configmap 6 | behavior: merge 7 | literals: 8 | - tensorflow.training.image=ertis/kafka-ml-tensorflow_model_training:v1.3 9 | - tensorflow.inference.image=ertis/kafka-ml-tensorflow_model_inference:v1.3 10 | - pytorch.training.image=ertis/kafka-ml-pytorch_model_training:v1.3 11 | - pytorch.inference.image=ertis/kafka-ml-pytorch_model_inference:v1.3 12 | 13 | images: 14 | - name: kafka-ml-backend 15 | newName: ertis/kafka-ml-backend 16 | newTag: v1.3 17 | - name: kafka-ml-frontend 18 | newName: ertis/kafka-ml-frontend 19 | newTag: v1.3 20 | - name: kafka-ml-kafka_control_logger 21 | newName: ertis/kafka-ml-kafka_control_logger 22 | newTag: v1.3 23 | - name: kafka-ml-pthexecutor 24 | newName: ertis/kafka-ml-pthexecutor 25 | newTag: v1.3 26 | - name: kafka-ml-tfexecutor 27 | newName: ertis/kafka-ml-tfexecutor 28 | newTag: v1.3 29 | -------------------------------------------------------------------------------- /examples/MNIST_RAW_format/mnist_dataset_training_example.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append(sys.path[0] + "/../..") 3 | """To allow importing datasources""" 4 | 5 | from datasources.raw_sink import RawSink 6 | import tensorflow as tf 7 | import logging 8 | 9 | logging.basicConfig(level=logging.INFO) 10 | 11 | # mnist = RawSink(boostrap_servers='127.0.0.1:9094', topic='automl', deployment_id=1, 12 | # description='Mnist dataset', validation_rate=0.1, test_rate=0.1, 13 | # data_type='uint8', label_type='uint8', data_reshape='28 28') 14 | 15 | mnist = RawSink(boostrap_servers='localhost:9094', topic='automl', deployment_id=1, 16 | description='Mnist dataset', validation_rate=0.1, test_rate=0.1) 17 | 18 | (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() 19 | print("train: ", (x_train.shape, y_train.shape)) 20 | 21 | for (x, y) in zip(x_train, y_train): 22 | mnist.send(data=x, label=y) 23 | 24 | for (x, y) in zip(x_test, y_test): 25 | mnist.send(data=x, label=y) 26 | 27 | mnist.close() -------------------------------------------------------------------------------- /kustomize/base/resources/frontend-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | name: frontend 6 | name: frontend 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | name: pod-frontend 12 | template: 13 | metadata: 14 | labels: 15 | name: pod-frontend 16 | name: frontend 17 | spec: 18 | containers: 19 | - image: kafka-ml-frontend 20 | name: frontend 21 | ports: 22 | - containerPort: 80 23 | imagePullPolicy: Always 24 | env: 25 | - name: BACKEND_PROXY_URL 26 | valueFrom: 27 | configMapKeyRef: 28 | name: kafkaml-configmap 29 | key: backend.url 30 | - name: ENABLE_FEDML_BLOCKCHAIN 31 | valueFrom: 32 | configMapKeyRef: 33 | name: kafkaml-configmap 34 | key: fedml.blockchain.enable 35 | optional: true 36 | -------------------------------------------------------------------------------- /kustomize/master/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - "../base" 3 | 4 | configMapGenerator: 5 | - name: kafkaml-configmap 6 | behavior: merge 7 | literals: 8 | - tensorflow.training.image=ertis/kafka-ml-tensorflow_model_training:master 9 | - tensorflow.inference.image=ertis/kafka-ml-tensorflow_model_inference:master 10 | - pytorch.training.image=ertis/kafka-ml-pytorch_model_training:master 11 | - pytorch.inference.image=ertis/kafka-ml-pytorch_model_inference:master 12 | 13 | images: 14 | - name: kafka-ml-backend 15 | newName: ertis/kafka-ml-backend 16 | newTag: master 17 | - name: kafka-ml-frontend 18 | newName: ertis/kafka-ml-frontend 19 | newTag: master 20 | - name: kafka-ml-kafka_control_logger 21 | newName: ertis/kafka-ml-kafka_control_logger 22 | newTag: master 23 | - name: kafka-ml-pthexecutor 24 | newName: ertis/kafka-ml-pthexecutor 25 | newTag: master 26 | - name: kafka-ml-tfexecutor 27 | newName: ertis/kafka-ml-tfexecutor 28 | newTag: master 29 | -------------------------------------------------------------------------------- /examples/VGG16_CIFAR10_RAW_format/vgg16_training_example.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append(sys.path[0] + "/../..") 3 | """To allow importing datasources""" 4 | 5 | from datasources.raw_sink import RawSink 6 | import tensorflow as tf 7 | import logging 8 | 9 | logging.basicConfig(level=logging.INFO) 10 | 11 | # vgg16 = RawSink(boostrap_servers='localhost:9094', topic='automl', deployment_id=1, 12 | # description='Cifar10 dataset', validation_rate=0.1, test_rate=0.1, 13 | # data_type='uint8', label_type='uint8', data_reshape='32 32 3') 14 | 15 | vgg16 = RawSink(boostrap_servers='localhost:9094', topic='automl', deployment_id=1, 16 | description='Cifar10 dataset', validation_rate=0.1, test_rate=0.1) 17 | 18 | (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() 19 | print("train: ", (x_train.shape, y_train.shape)) 20 | 21 | for (x, y) in zip(x_train, y_train): 22 | vgg16.send(data=x, label=y) 23 | 24 | for (x, y) in zip(x_test, y_test): 25 | vgg16.send(data=x, label=y) 26 | 27 | vgg16.close() -------------------------------------------------------------------------------- /backend/autoweb/urls.py: -------------------------------------------------------------------------------- 1 | """autoweb URL Configuration 2 | 3 | The `urlpatterns` list routes URLs to views. For more information please see: 4 | https://docs.djangoproject.com/en/3.0/topics/http/urls/ 5 | Examples: 6 | Function views 7 | 1. Add an import: from my_app import views 8 | 2. Add a URL to urlpatterns: path('', views.home, name='home') 9 | Class-based views 10 | 1. Add an import: from other_app.views import Home 11 | 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') 12 | Including another URLconf 13 | 1. Import the include() function: from django.urls import include, path 14 | 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) 15 | """ 16 | from django.contrib import admin 17 | from django.conf import settings 18 | from django.urls import path, include 19 | from django.conf.urls.static import static 20 | 21 | urlpatterns = [ 22 | path('', include('automl.urls')), 23 | path('admin/', admin.site.urls), 24 | ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) 25 | -------------------------------------------------------------------------------- /frontend/src/app/services/deployment.service.ts: -------------------------------------------------------------------------------- 1 | import { Injectable } from '@angular/core'; 2 | import { HttpClient } from '@angular/common/http'; 3 | import { environment } from '../../environments/environment'; 4 | import {Deployment} from '../shared/deployment.model' 5 | 6 | @Injectable({ 7 | providedIn: 'root' 8 | }) 9 | 10 | export class DeploymentService { 11 | 12 | baseUrl = environment.baseUrl; 13 | 14 | constructor(private httpClient: HttpClient) { } 15 | 16 | url = this.baseUrl + '/deployments/'; 17 | 18 | getDeployments(){ 19 | return this.httpClient.get(this.url); 20 | } 21 | 22 | getDeploymentConfigurationID(id: number){ 23 | const url = `${this.url}${id}` 24 | return this.httpClient.get(url); 25 | } 26 | 27 | deleteDeployment(id: number){ 28 | const url = `${this.url}${id}` 29 | return this.httpClient.delete(url); 30 | } 31 | 32 | deploy(deployment: Deployment){ 33 | return this.httpClient.post(this.url, deployment); 34 | } 35 | 36 | 37 | 38 | } 39 | -------------------------------------------------------------------------------- /federated-module/federated_backend/autoweb/urls.py: -------------------------------------------------------------------------------- 1 | """autoweb URL Configuration 2 | 3 | The `urlpatterns` list routes URLs to views. For more information please see: 4 | https://docs.djangoproject.com/en/3.0/topics/http/urls/ 5 | Examples: 6 | Function views 7 | 1. Add an import: from my_app import views 8 | 2. Add a URL to urlpatterns: path('', views.home, name='home') 9 | Class-based views 10 | 1. Add an import: from other_app.views import Home 11 | 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') 12 | Including another URLconf 13 | 1. Import the include() function: from django.urls import include, path 14 | 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) 15 | """ 16 | from django.contrib import admin 17 | from django.conf import settings 18 | from django.urls import path, include 19 | from django.conf.urls.static import static 20 | 21 | urlpatterns = [ 22 | path('', include('automl.urls')), 23 | path('admin/', admin.site.urls), 24 | ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) 25 | -------------------------------------------------------------------------------- /examples/MNIST_RAW_format/mnist_dataset_inference_example.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import logging 3 | from kafka import KafkaProducer, KafkaConsumer 4 | 5 | logging.basicConfig(level=logging.INFO) 6 | 7 | INPUT_TOPIC = 'minst-in' 8 | OUTPUT_TOPIC = 'minst-out' 9 | BOOTSTRAP_SERVERS= '127.0.0.1:9094' 10 | ITEMS_TO_PREDICT = 10 11 | 12 | (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() 13 | print("Datasize minst: ", x_test.shape) 14 | 15 | producer = KafkaProducer(bootstrap_servers=BOOTSTRAP_SERVERS) 16 | """Creates a producer to send the values to predict""" 17 | for i in range (0, ITEMS_TO_PREDICT): 18 | producer.send(INPUT_TOPIC, x_test[i].tobytes()) 19 | """Sends the value to predict to Kafka""" 20 | producer.flush() 21 | producer.close() 22 | 23 | output_consumer = KafkaConsumer(OUTPUT_TOPIC, bootstrap_servers=BOOTSTRAP_SERVERS, group_id="output_group") 24 | """Creates an output consumer to receive the predictions""" 25 | 26 | print('\n') 27 | 28 | print('Output consumer: ') 29 | for msg in output_consumer: 30 | print (msg.value.decode()) -------------------------------------------------------------------------------- /examples/VGG16_CIFAR10_RAW_format/vgg16_inference_example.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import logging 3 | from kafka import KafkaProducer, KafkaConsumer 4 | 5 | logging.basicConfig(level=logging.INFO) 6 | 7 | INPUT_TOPIC = 'vgg-in' 8 | OUTPUT_TOPIC = 'vgg-out' 9 | BOOTSTRAP_SERVERS= 'localhost:9094' 10 | ITEMS_TO_PREDICT = 10 11 | 12 | (x_train, y_train ), ( x_test, y_test ) = tf.keras.datasets.cifar10.load_data() 13 | y_train = tf.keras.utils.to_categorical( y_train ) 14 | y_test = tf.keras.utils.to_categorical( y_test ) 15 | 16 | 17 | producer = KafkaProducer(bootstrap_servers=BOOTSTRAP_SERVERS) 18 | """Creates a producer to send the values to predict""" 19 | 20 | for i in range (0, ITEMS_TO_PREDICT): 21 | producer.send(INPUT_TOPIC, x_test[i].tobytes()) 22 | """ Sends the value to predict to Kafka""" 23 | producer.flush() 24 | producer.close() 25 | 26 | consumer = KafkaConsumer(OUTPUT_TOPIC, bootstrap_servers=BOOTSTRAP_SERVERS, group_id="output_group") 27 | """Creates a consumer to receive the predictions""" 28 | 29 | for msg in consumer: 30 | print (msg.value.decode()) 31 | 32 | -------------------------------------------------------------------------------- /kafka_control_logger/README.md: -------------------------------------------------------------------------------- 1 | # Kafka Control Logger 2 | 3 | This module contains the Kafka control logger that consumes control Kafka-ML messages to send them to the Back-end. That's all. These messages will be used in the Back-end to reuse the data streams and send them to other deployed training tasks. 4 | 5 | A brief introduction of its files: 6 | - File `logger.py` main file of this module. 7 | 8 | ## Installation for local development 9 | Run `python -m pip install -r requirements.txt` to install the dependencies used by this module. 10 | 11 | Once installed, you have to set each one of the environment vars below to execute this task. For instance, you can run `export CONTROL_TOPIC=control` to export the `CONTROL_TOPIC` var with the value `control`. Once configured all the vars, execute `python logger.py` to execute this task. 12 | 13 | ## Environments vars received 14 | 15 | - **BOOTSTRAP_SERVERS**: list of brokers for the connection to Apache Kafka. 16 | - **BACKEND**: hostname and port of the Back-end (e.g., localhost:8000). 17 | - **CONTROL_TOPIC**: name of the Kafka control topic. -------------------------------------------------------------------------------- /frontend/src/app/shared/deployment.model.ts: -------------------------------------------------------------------------------- 1 | export class Deployment { 2 | // Classic Deployment Settings 3 | batch: number; 4 | tf_kwargs_fit: string; 5 | tf_kwargs_val: string; 6 | pth_kwargs_fit: string; 7 | pth_kwargs_val: string; 8 | conf_mat_settings: boolean; 9 | configuration: number; 10 | gpumem: number; 11 | // Distributed Deployment Settings 12 | optimizer: string; 13 | learning_rate: number; 14 | loss: string; 15 | metrics: string; 16 | // Incremental Deployment Settings 17 | incremental: boolean; 18 | indefinite: boolean; 19 | stream_timeout: number; 20 | monitoring_metric: string; 21 | change: string; 22 | improvement: number; 23 | // Unsupervised Deployment Settings 24 | unsupervised: boolean; 25 | unsupervised_rounds: number; 26 | confidence: number; 27 | // Federated Deployment Settings 28 | federated: boolean; 29 | agg_rounds: number; 30 | min_data: number; 31 | agg_strategy: string; 32 | data_restriction: string; 33 | // Federated Blockchain Deployment Settings 34 | blockchain: boolean; 35 | } -------------------------------------------------------------------------------- /examples/MNIST_RAW_format/mnist_dataset_unsupervised_training_example.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append(sys.path[0] + "/../..") 3 | """To allow importing datasources""" 4 | 5 | from datasources.raw_sink import RawSink 6 | import tensorflow as tf 7 | import logging 8 | 9 | logging.basicConfig(level=logging.INFO) 10 | 11 | mnist = RawSink(boostrap_servers='localhost:9094', topic='automl', deployment_id=1, 12 | description='Mnist dataset', validation_rate=0.1, test_rate=0.1, unsupervised_topic='unsupervised_automl') 13 | 14 | (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() 15 | print("train: ", (x_train.shape, y_train.shape)) 16 | 17 | x_train_supervised = x_train[:9999] 18 | y_train_supervised = y_train[:9999] 19 | 20 | x_train_unsupervised = x_train[10000:] 21 | 22 | # Training data with labels 23 | for (x, y) in zip(x_train_supervised, y_train_supervised): 24 | mnist.send(data=x, label=y) 25 | 26 | # Training data without labels 27 | for x in x_train_unsupervised: 28 | mnist.unsupervised_send(data=x) 29 | 30 | for (x, y) in zip(x_test, y_test): 31 | mnist.send(data=x, label=y) 32 | 33 | mnist.close() -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 ERTIS Research Group 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /model_training/pytorch/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from ignite.metrics import * 4 | import os 5 | import urllib 6 | import time 7 | import logging 8 | import requests 9 | import json 10 | import numpy as np 11 | import torchvision.models as models 12 | 13 | from torch.utils import data 14 | from config import * 15 | 16 | def download_model(model_url, retries, sleep_time): 17 | """Downloads the model from the URL received and saves it in the filesystem 18 | Args: 19 | model_url(str): URL of the model 20 | """ 21 | finished = False 22 | retry = 0 23 | while not finished and retry < retries: 24 | try: 25 | 26 | datatowrite = requests.get(model_url).content.decode("utf-8") 27 | 28 | print(datatowrite) 29 | 30 | exec(datatowrite, None, globals()) 31 | 32 | if DEBUG: 33 | print(model) 34 | 35 | finished = True 36 | logging.info("Downloaded file model from server!") 37 | 38 | return model 39 | except Exception as e: 40 | retry +=1 41 | logging.error("Error getting the model from backend [%s]", str(e)) 42 | time.sleep(sleep_time) -------------------------------------------------------------------------------- /examples/EUROSAT_RAW_format/eurosat_dataset_inference_example.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import tensorflow_datasets as tfds 3 | import logging 4 | from kafka import KafkaProducer, KafkaConsumer 5 | 6 | logging.basicConfig(level=logging.INFO) 7 | 8 | INPUT_TOPIC = 'eurosat-in' 9 | OUTPUT_TOPIC = 'eurosat-out' 10 | BOOTSTRAP_SERVERS= '127.0.0.1:9094' 11 | ITEMS_TO_PREDICT = 10 12 | 13 | eurosat = tfds.load('eurosat', as_supervised=True, shuffle_files=True, 14 | split=[f"train[:{ITEMS_TO_PREDICT}]"], data_dir='datasets/eurosat') 15 | 16 | producer = KafkaProducer(bootstrap_servers=BOOTSTRAP_SERVERS) 17 | """Creates a producer to send the values to predict""" 18 | 19 | for image, _ in eurosat[0]: 20 | producer.send(INPUT_TOPIC, image.numpy().tobytes()) 21 | """Sends the value to predict to Kafka""" 22 | producer.flush() 23 | producer.close() 24 | 25 | output_consumer = KafkaConsumer(OUTPUT_TOPIC, bootstrap_servers=BOOTSTRAP_SERVERS, group_id="output_group") 26 | """Creates an output consumer to receive the predictions""" 27 | 28 | print('\n') 29 | 30 | print('Output consumer: ') 31 | for msg in output_consumer: 32 | print (msg.value.decode()) -------------------------------------------------------------------------------- /examples/SO2SAT_RAW_format/so2sat_dataset_inference_example.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import tensorflow_datasets as tfds 3 | import logging 4 | from kafka import KafkaProducer, KafkaConsumer 5 | 6 | logging.basicConfig(level=logging.INFO) 7 | 8 | INPUT_TOPIC = 'so2sat-in' 9 | OUTPUT_TOPIC = 'so2sat-out' 10 | BOOTSTRAP_SERVERS= '127.0.0.1:9094' 11 | ITEMS_TO_PREDICT = 10 12 | 13 | so2sat = tfds.load('so2sat', as_supervised=True, shuffle_files=True, 14 | split=[f"validation[:{ITEMS_TO_PREDICT}]"], data_dir='datasets/so2sat') 15 | 16 | producer = KafkaProducer(bootstrap_servers=BOOTSTRAP_SERVERS) 17 | """Creates a producer to send the values to predict""" 18 | 19 | for image, _ in so2sat[0]: 20 | producer.send(INPUT_TOPIC, image.numpy().tobytes()) 21 | """Sends the value to predict to Kafka""" 22 | producer.flush() 23 | producer.close() 24 | 25 | output_consumer = KafkaConsumer(OUTPUT_TOPIC, bootstrap_servers=BOOTSTRAP_SERVERS, group_id="output_group") 26 | """Creates an output consumer to receive the predictions""" 27 | 28 | print('\n') 29 | 30 | print('Output consumer: ') 31 | for msg in output_consumer: 32 | print (msg.value.decode()) -------------------------------------------------------------------------------- /frontend/karma.conf.js: -------------------------------------------------------------------------------- 1 | // Karma configuration file, see link for more information 2 | // https://karma-runner.github.io/1.0/config/configuration-file.html 3 | 4 | module.exports = function (config) { 5 | config.set({ 6 | basePath: '', 7 | frameworks: ['jasmine', '@angular-devkit/build-angular'], 8 | plugins: [ 9 | require('karma-jasmine'), 10 | require('karma-chrome-launcher'), 11 | require('karma-jasmine-html-reporter'), 12 | require('karma-coverage-istanbul-reporter'), 13 | require('@angular-devkit/build-angular/plugins/karma') 14 | ], 15 | client: { 16 | clearContext: false // leave Jasmine Spec Runner output visible in browser 17 | }, 18 | coverageIstanbulReporter: { 19 | dir: require('path').join(__dirname, './coverage/autofront'), 20 | reports: ['html', 'lcovonly', 'text-summary'], 21 | fixWebpackSourcePaths: true 22 | }, 23 | reporters: ['progress', 'kjhtml'], 24 | port: 9876, 25 | colors: true, 26 | logLevel: config.LOG_INFO, 27 | autoWatch: true, 28 | browsers: ['Chrome'], 29 | singleRun: false, 30 | restartOnFileChange: true 31 | }); 32 | }; 33 | -------------------------------------------------------------------------------- /examples/HCOPD_Avro_format/HCOPD_data_stream_producer.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import sys 4 | sys.path.append(sys.path[0] + "/../..") 5 | 6 | from datasources.avro_sink import AvroSink 7 | 8 | import pandas as pd 9 | 10 | from sklearn import preprocessing 11 | 12 | import logging 13 | 14 | logging.basicConfig(level=logging.INFO) 15 | 16 | copd_data = pd.read_csv('HCOPD_Dataset.csv') 17 | """Reads the HCOPD dataset""" 18 | 19 | copd_data_columns = copd_data.columns 20 | 21 | features = pd.DataFrame(preprocessing.scale(copd_data[copd_data_columns[copd_data_columns != 'Diagnosis']])) 22 | """All columns except Diagnosis""" 23 | 24 | diagnosis = copd_data['Diagnosis'] 25 | """Diagnosis column""" 26 | 27 | hcopd = AvroSink(boostrap_servers='127.0.0.1:9094', topic='hcopd', deployment_id=1, 28 | data_scheme_filename='data_scheme.avsc', label_scheme_filename='label_scheme.avsc', 29 | description='COPD dataset', validation_rate=0.1, test_rate=0.1) 30 | 31 | for i in range (0, copd_data.shape[0]): 32 | data = {"gender": features[0][i], "age": features[1][i], "smoking": features[2][i]} 33 | label = {"diagnosis": bool(diagnosis[i])} 34 | hcopd.send_avro(data, label) 35 | 36 | hcopd.close() -------------------------------------------------------------------------------- /federated-module/federated_data_control_logger/README.md: -------------------------------------------------------------------------------- 1 | # Federated Kafka Data Control Logger 2 | 3 | This module contains the Federated Kafka data control logger that consumes control Kafka-ML federated messages to send them to the federated backend. That's all. These messages will be used in the federated backend to reuse the data streams and send them to other deployed training tasks. 4 | 5 | A brief introduction of its files: 6 | - File `federated_data_control_logger.py` main file of this module. 7 | 8 | ## Installation for local development 9 | Run `python -m pip install -r requirements.txt` to install the dependencies used by this module. 10 | 11 | Once installed, you have to set each one of the environment vars below to execute this task. For instance, you can run `export CONTROL_TOPIC=control` to export the `CONTROL_TOPIC` var with the value `control`. Once configured all the vars, execute `python federated_data_control_logger.py` to execute this task. 12 | 13 | ## Environments vars received 14 | 15 | - **BOOTSTRAP_SERVERS**: list of brokers for the connection to Apache Kafka. 16 | - **BACKEND**: hostname and port of the Back-end (e.g., localhost:8000). 17 | - **CONTROL_TOPIC**: name of the Kafka control topic. -------------------------------------------------------------------------------- /kustomize/base/resources/kafka-control-logger-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | name: kafka-control-logger 6 | name: kafka-control-logger 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: kafka-control-logger 12 | strategy: 13 | type: Recreate 14 | template: 15 | metadata: 16 | labels: 17 | app: kafka-control-logger 18 | name: kafka-control-logger 19 | spec: 20 | containers: 21 | - image: kafka-ml-kafka_control_logger 22 | name: kafka-control-logger 23 | imagePullPolicy: Always 24 | env: 25 | - name: BOOTSTRAP_SERVERS 26 | valueFrom: 27 | configMapKeyRef: 28 | name: kafkaml-configmap 29 | key: brokers 30 | - name: CONTROL_TOPIC 31 | valueFrom: 32 | configMapKeyRef: 33 | name: kafkaml-configmap 34 | key: control.topic 35 | - name: BACKEND 36 | valueFrom: 37 | configMapKeyRef: 38 | name: kafkaml-configmap 39 | key: backend.address 40 | -------------------------------------------------------------------------------- /frontend/src/app/app.component.spec.ts: -------------------------------------------------------------------------------- 1 | import { TestBed, async } from '@angular/core/testing'; 2 | import { RouterTestingModule } from '@angular/router/testing'; 3 | import { AppComponent } from './app.component'; 4 | 5 | describe('AppComponent', () => { 6 | beforeEach(async(() => { 7 | TestBed.configureTestingModule({ 8 | imports: [ 9 | RouterTestingModule 10 | ], 11 | declarations: [ 12 | AppComponent 13 | ], 14 | }).compileComponents(); 15 | })); 16 | 17 | it('should create the app', () => { 18 | const fixture = TestBed.createComponent(AppComponent); 19 | const app = fixture.componentInstance; 20 | expect(app).toBeTruthy(); 21 | }); 22 | 23 | it(`should have as title 'autofront'`, () => { 24 | const fixture = TestBed.createComponent(AppComponent); 25 | const app = fixture.componentInstance; 26 | expect(app.title).toEqual('autofront'); 27 | }); 28 | 29 | it('should render title', () => { 30 | const fixture = TestBed.createComponent(AppComponent); 31 | fixture.detectChanges(); 32 | const compiled = fixture.nativeElement; 33 | expect(compiled.querySelector('.content span').textContent).toContain('autofront app is running!'); 34 | }); 35 | }); 36 | -------------------------------------------------------------------------------- /federated-module/federated_model_control_logger/README.md: -------------------------------------------------------------------------------- 1 | # Federated Kafka Model Control Logger 2 | 3 | This module contains the Federated Kafka model control logger that consumes model control Kafka-ML messages to send them to the federated backend. That's all. These messages will be used in the federated backend to check if the models are suitable to be deployed in the federated learning environment given the received data streams. 4 | 5 | A brief introduction of its files: 6 | - File `federated_model_control_logger.py` main file of this module. 7 | 8 | ## Installation for local development 9 | Run `python -m pip install -r requirements.txt` to install the dependencies used by this module. 10 | 11 | Once installed, you have to set each one of the environment vars below to execute this task. For instance, you can run `export CONTROL_TOPIC=control` to export the `CONTROL_TOPIC` var with the value `control`. Once configured all the vars, execute `python federated_model_control_logger.py` to execute this task. 12 | 13 | ## Environments vars received 14 | 15 | - **BOOTSTRAP_SERVERS**: list of brokers for the connection to Apache Kafka. 16 | - **BACKEND**: hostname and port of the Back-end (e.g., localhost:8000). 17 | - **CONTROL_TOPIC**: name of the Kafka control topic. -------------------------------------------------------------------------------- /federated-module/kustomize/base/resources/federated-data-control-logger.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | name: federated-data-control-logger 6 | name: federated-data-control-logger 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: federated-data-control-logger 12 | strategy: 13 | type: Recreate 14 | template: 15 | metadata: 16 | labels: 17 | app: federated-data-control-logger 18 | name: federated-data-control-logger 19 | spec: 20 | containers: 21 | - image: federated-kafka-ml-data_control_logger 22 | name: federated-data-control-logger 23 | imagePullPolicy: Always 24 | env: 25 | - name: BOOTSTRAP_SERVERS 26 | valueFrom: 27 | configMapKeyRef: 28 | name: federated-kafkaml-configmap 29 | key: federated.data.brokers 30 | - name: CONTROL_TOPIC 31 | valueFrom: 32 | configMapKeyRef: 33 | name: federated-kafkaml-configmap 34 | key: federated.data.control.topic 35 | - name: BACKEND 36 | valueFrom: 37 | configMapKeyRef: 38 | name: federated-kafkaml-configmap 39 | key: federated.backend.url -------------------------------------------------------------------------------- /federated-module/kustomize/base/resources/federated-model-control-logger.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | name: federated-model-control-logger 6 | name: federated-model-control-logger 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: federated-model-control-logger 12 | strategy: 13 | type: Recreate 14 | template: 15 | metadata: 16 | labels: 17 | app: federated-model-control-logger 18 | name: federated-model-control-logger 19 | spec: 20 | containers: 21 | - image: federated-kafka-ml-model_control_logger 22 | name: federated-model-control-logger 23 | imagePullPolicy: Always 24 | env: 25 | - name: BOOTSTRAP_SERVERS 26 | valueFrom: 27 | configMapKeyRef: 28 | name: federated-kafkaml-configmap 29 | key: kml.cloud.brokers 30 | - name: CONTROL_TOPIC 31 | valueFrom: 32 | configMapKeyRef: 33 | name: federated-kafkaml-configmap 34 | key: federated.model.control.topic 35 | - name: BACKEND 36 | valueFrom: 37 | configMapKeyRef: 38 | name: federated-kafkaml-configmap 39 | key: federated.backend.url -------------------------------------------------------------------------------- /frontend/src/app/visualization/visualization.component.css: -------------------------------------------------------------------------------- 1 | 2 | .open-close-container { 3 | border: 1px solid #dddddd; 4 | margin-top: 1em; 5 | padding: 20px 20px 0px 20px; 6 | color: #000000; 7 | font-weight: bold; 8 | font-size: 20px; 9 | height: '200px'; 10 | } 11 | 12 | mat-form-field.mat-form-field { 13 | width: 100%; 14 | } 15 | 16 | .center { 17 | text-align: center; 18 | position: absolute; 19 | display: flex; 20 | justify-content: center; 21 | align-items: center; 22 | color:black; 23 | height: 200px; 24 | font-size:40px; 25 | width: 75%; 26 | } 27 | 28 | .example-chip-list { 29 | width: 100%; 30 | } 31 | table { 32 | width: 100%; 33 | table-layout: fixed; 34 | } 35 | 36 | .tr { 37 | width: 100%; 38 | } 39 | 40 | .mat-cell{ 41 | padding-top: 10px; 42 | } 43 | 44 | .mat-column-one { 45 | flex: none; 46 | width: 300px; 47 | text-align: center; 48 | } 49 | .mat-column-two { 50 | 51 | flex: none; 52 | width: 25% !important; 53 | text-align: center; 54 | } 55 | .mat-column-three { 56 | flex: none; 57 | width: 25% !important; 58 | text-align: center; 59 | } 60 | .mat-column-four { 61 | flex: none; 62 | width: 25% !important; 63 | text-align: center; 64 | } 65 | 66 | tr.mat-header-row { 67 | height: 0px; 68 | } -------------------------------------------------------------------------------- /kustomize/local/resources/kafka-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | name: kafka 6 | name: kafka 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | name: kafka 12 | template: 13 | metadata: 14 | labels: 15 | name: kafka 16 | name: kafka 17 | spec: 18 | containers: 19 | - image: kafka 20 | name: kafka 21 | ports: 22 | - containerPort: 9092 23 | - containerPort: 9094 24 | imagePullPolicy: Always 25 | env: 26 | - name: ALLOW_PLAINTEXT_LISTENER 27 | value: "yes" 28 | - name: KAFKA_CFG_LISTENERS 29 | value: PLAINTEXT://:9092,PLAINTEXT_HOST://0.0.0.0:9094,CONTROLLER://:9093 30 | - name: KAFKA_CFG_ADVERTISED_LISTENERS 31 | value: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:9094 32 | - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP 33 | value: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT,CONTROLLER:PLAINTEXT 34 | - name: KAFKA_CFG_INTER_BROKER_LISTENER_NAME 35 | value: PLAINTEXT 36 | - name: KAFKA_CFG_CONTROLLER_LISTENER_NAMES 37 | value: CONTROLLER 38 | - name: KAFKA_ENABLE_KRAFT 39 | value: "true" -------------------------------------------------------------------------------- /examples/MNIST_RAW_format/visualization.json: -------------------------------------------------------------------------------- 1 | { 2 | "average_updated": false, 3 | "average_window": 10000, 4 | "type": "regression", 5 | "labels":[ 6 | { 7 | "id": 0, 8 | "color": "#fff100", 9 | "label": "Zero" 10 | }, 11 | { 12 | "id": 1, 13 | "color": "#ff8c00", 14 | "label": "One" 15 | }, 16 | { 17 | "id": 2, 18 | "color": "#e81123", 19 | "label": "Two" 20 | }, 21 | { 22 | "id": 3, 23 | "color": "#ec008c", 24 | "label": "Three" 25 | }, 26 | { 27 | "id": 4, 28 | "color": "#68217a", 29 | "label": "Four" 30 | }, 31 | { 32 | "id": 5, 33 | "color": "#00188f", 34 | "label": "Five" 35 | }, 36 | { 37 | "id": 6, 38 | "color": "#00bcf2", 39 | "label": "Six" 40 | }, 41 | { 42 | "id": 7, 43 | "color": "#00b294", 44 | "label": "Seven" 45 | }, 46 | { 47 | "id": 8, 48 | "color": "#009e49", 49 | "label": "Eight" 50 | }, 51 | { 52 | "id": 9, 53 | "color": "#bad80a", 54 | "label": "Nine" 55 | } 56 | 57 | ] 58 | } -------------------------------------------------------------------------------- /examples/FEDERATED_MNIST_RAW_format/mnist_dataset_unsupervised_federated_training_example.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append(sys.path[0] + "/../..") 3 | """To allow importing datasources""" 4 | 5 | from datasources.federated_raw_sink import FederatedRawSink 6 | import tensorflow as tf 7 | import logging 8 | import json 9 | 10 | logging.basicConfig(level=logging.INFO) 11 | 12 | with open('mnist_sample_input_format.json') as json_file: 13 | data_res = json.load(json_file) 14 | 15 | mnist = FederatedRawSink(boostrap_servers='localhost:9094', topic='mnist_fed', deployment_id=1, description='Mnist dataset', 16 | dataset_restrictions=json.dumps(data_res), validation_rate=0.1, test_rate=0, control_topic='FEDERATED_DATA_CONTROL_TOPIC', 17 | unsupervised_topic='unsupervised_automl') 18 | 19 | (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() 20 | 21 | x_train_supervised = x_train[:9999] 22 | y_train_supervised = y_train[:9999] 23 | 24 | x_train_unsupervised = x_train[10000:] 25 | 26 | # Training data with labels 27 | for (x, y) in zip(x_train_supervised, y_train_supervised): 28 | mnist.send(data=x, label=y) 29 | 30 | # Training data without labels 31 | for x in x_train_unsupervised: 32 | mnist.unsupervised_send(data=x) 33 | 34 | for (x, y) in zip(x_test, y_test): 35 | mnist.send(data=x, label=y) 36 | 37 | mnist.close() -------------------------------------------------------------------------------- /frontend/src/app/deployment-list/deployment-list.component.css: -------------------------------------------------------------------------------- 1 | .dashboard-card { 2 | max-height: 400px; 3 | overflow-y: scroll; 4 | width: 100%; 5 | top: 0; 6 | left: 0; 7 | right: 0; 8 | bottom: 0; 9 | position: absolute; 10 | margin: 20px; 11 | } 12 | 13 | .more-button { 14 | position: absolute; 15 | top: 5px; 16 | right: 10px; 17 | } 18 | 19 | .dashboard-card-content { 20 | text-align: left; 21 | margin: 1rem; 22 | } 23 | 24 | .h3-content, .mat-raised-button { 25 | text-align: left; 26 | margin-bottom: 0.5rem; 27 | } 28 | 29 | .header h1{ 30 | display: inline; 31 | padding-top: .6rem; 32 | font-weight: 400; 33 | font-size: 30px; 34 | } 35 | .column .mat-raised-button{ 36 | margin-bottom: 0.5rem; 37 | margin-top: 0.5rem; 38 | } 39 | 40 | .created{ 41 | background: grey; 42 | color: white; 43 | 44 | } 45 | 46 | .deployed{ 47 | background: #3F51B5; 48 | color: white; 49 | } 50 | 51 | .stopped{ 52 | background: rgb(233, 48, 48); 53 | color: white; 54 | } 55 | 56 | .finished{ 57 | background: green; 58 | color: white; 59 | } 60 | 61 | .filter-input { 62 | min-height: 64px; 63 | padding: 8px 24px 0; 64 | margin-top: 20px; 65 | background-color: #ededed; 66 | } 67 | 68 | .mat-form-field { 69 | font-size: 14px; 70 | width: 90%; 71 | } 72 | 73 | h6{ 74 | font-weight:normal 75 | } 76 | -------------------------------------------------------------------------------- /frontend/src/app/layout/header/header.component.html: -------------------------------------------------------------------------------- 1 | 2 |
3 | 6 |
7 |
8 | Kafka-ML 9 |
10 |
11 | 38 |
39 |
-------------------------------------------------------------------------------- /frontend/src/app/layout/sidenav-list/sidenav-list.component.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | dashboard Home 4 | 5 | 6 | code Models 7 | 8 | 9 | settingsConfigurations 10 | 11 | 12 | launchDeployments 13 | 14 | 15 | playlist_add_checkTraining 16 | 17 | 18 | cachedInference 19 | 20 | 21 | bookDatasources 22 | 23 | 24 | timelineVisualization 25 | 26 | 27 | -------------------------------------------------------------------------------- /kustomize/base/resources/kafkaml-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: kafkaml-configmap 5 | data: 6 | # brokers: 192.168.43.7:32001 7 | control.topic: KAFKA_ML_CONTROL_TOPIC 8 | 9 | federated.modelloggertopic: FEDERATED_MODEL_CONTROL_TOPIC 10 | 11 | frontend.url: http://localhost 12 | backend.url: http://backend:8000 13 | backend.address: backend:8000 # TODO replace this 14 | backend.allowedhosts: 127.0.0.1,localhost,backend 15 | tfexecutor.url: http://tfexecutor:8001/ 16 | pthexecutor.url: http://pthexecutor:8002/ 17 | 18 | # tensorflow.training.image: ertis/kafka-ml-tensorflow_model_training:master 19 | # tensorflow.inference.image: ertis/kafka-ml-tensorflow_model_inference:master 20 | # pytorch.training.image: ertis/kafka-ml-pytorch_model_training:master 21 | # pytorch.inference.image: ertis/kafka-ml-pytorch_model_inference:master 22 | 23 | # debug: "0" 24 | 25 | # BlockChain-based Federated Learning Configuration 26 | fedml.blockchain.enable: "0" 27 | fedml.blockchain.rpc-url: http://blockchain:8000 28 | fedml.blockchain.chain-id: "1337" 29 | fedml.blockchain.network-id: "1999" 30 | fedml.blockchain.wallet-address: "0x0" 31 | fedml.blockchain.wallet-key: ffff 32 | fedml.blockchain.blockscout-link: "0" 33 | fedml.blockchain.blockscout-url: http://blockscout:8000 34 | fedml.blockchain.token-name: KafkaML-FedToken 35 | fedml.blockchain.token-symbol: KFKMLA 36 | -------------------------------------------------------------------------------- /examples/MNIST_RAW_format/mnist_dataset_online_training_example.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append(sys.path[0] + "/../..") 3 | """To allow importing datasources""" 4 | 5 | from datasources.online_raw_sink import OnlineRawSink 6 | import tensorflow as tf 7 | from time import sleep 8 | import logging 9 | 10 | logging.basicConfig(level=logging.INFO) 11 | 12 | mnist = OnlineRawSink(boostrap_servers='127.0.0.1:9094', topic='automl', deployment_id=1, 13 | description='Mnist dataset', validation_rate=0.1) 14 | 15 | (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() 16 | print("train: ", (x_train.shape, y_train.shape)) 17 | 18 | x_train_1 = x_train[:19999] 19 | y_train_1 = y_train[:19999] 20 | 21 | x_train_2 = x_train[20000:39999] 22 | y_train_2 = y_train[20000:39999] 23 | 24 | x_train_3 = x_train[40000:] 25 | y_train_3 = y_train[40000:] 26 | 27 | logging.info("Sending first part of the data...") 28 | 29 | for (x, y) in zip(x_train_1, y_train_1): 30 | mnist.send(data=x, label=y) 31 | 32 | logging.info("Waiting 30 seconds...") 33 | 34 | sleep(30) 35 | 36 | logging.info("Sending second part of the data...") 37 | 38 | for (x, y) in zip(x_train_2, y_train_2): 39 | mnist.send(data=x, label=y) 40 | 41 | logging.info("Waiting 30 seconds...") 42 | 43 | sleep(30) 44 | 45 | logging.info("Sending third part of the data...") 46 | 47 | for (x, y) in zip(x_train_3, y_train_3): 48 | mnist.send(data=x, label=y) 49 | 50 | mnist.online_close() -------------------------------------------------------------------------------- /backend/autoweb/wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | WSGI config for autoweb project. 3 | 4 | It exposes the WSGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/ 8 | """ 9 | 10 | import os 11 | import json 12 | 13 | from django.core.wsgi import get_wsgi_application 14 | 15 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'autoweb.settings') 16 | 17 | if os.environ.get('ENABLE_FEDML_BLOCKCHAIN') == '1': 18 | from autoweb import create_blockchain_token 19 | try: 20 | token_address, abi = create_blockchain_token.create_token( 21 | token_name=os.environ.get('FEDML_BLOCKCHAIN_TOKEN_NAME', "KafkaML Token"), 22 | token_symbol=os.environ.get('FEDML_BLOCKCHAIN_TOKEN_SYMBOL', "KML"), 23 | rpc_url=os.environ.get('FEDML_BLOCKCHAIN_RPC_URL', "http://localhost:8545"), 24 | chain_id=os.environ.get('FEDML_BLOCKCHAIN_CHAIN_ID', 1337), 25 | solc_version="0.8.6", 26 | wallet_address=os.environ.get('FEDML_BLOCKCHAIN_WALLET_ADDRESS', None), 27 | wallet_key=os.environ.get('FEDML_BLOCKCHAIN_WALLET_KEY', None) 28 | ) 29 | 30 | os.environ['FEDML_BLOCKCHAIN_TOKEN_ADDRESS'] = token_address 31 | os.environ['FEDML_BLOCKCHAIN_ABI'] = json.dumps(abi) 32 | 33 | except Exception as e: 34 | print(f"Error creating blockchain token. Some parameters may be missing: {e}") 35 | raise e 36 | 37 | application = get_wsgi_application() 38 | -------------------------------------------------------------------------------- /frontend/src/app/configuration-list/configuration-list.component.spec.ts: -------------------------------------------------------------------------------- 1 | import { LayoutModule } from '@angular/cdk/layout'; 2 | import { NoopAnimationsModule } from '@angular/platform-browser/animations'; 3 | import { async, ComponentFixture, TestBed } from '@angular/core/testing'; 4 | import { MatButtonModule } from '@angular/material/button'; 5 | import { MatCardModule } from '@angular/material/card'; 6 | import { MatGridListModule } from '@angular/material/grid-list'; 7 | import { MatIconModule } from '@angular/material/icon'; 8 | import { MatMenuModule } from '@angular/material/menu'; 9 | 10 | import { ConfigurationListComponent } from './configuration-list.component'; 11 | 12 | describe('ConfigurationListComponent', () => { 13 | let component: ConfigurationListComponent; 14 | let fixture: ComponentFixture; 15 | 16 | beforeEach(async(() => { 17 | TestBed.configureTestingModule({ 18 | declarations: [ConfigurationListComponent], 19 | imports: [ 20 | NoopAnimationsModule, 21 | LayoutModule, 22 | MatButtonModule, 23 | MatCardModule, 24 | MatGridListModule, 25 | MatIconModule, 26 | MatMenuModule, 27 | ] 28 | }).compileComponents(); 29 | })); 30 | 31 | beforeEach(() => { 32 | fixture = TestBed.createComponent(ConfigurationListComponent); 33 | component = fixture.componentInstance; 34 | fixture.detectChanges(); 35 | }); 36 | 37 | it('should compile', () => { 38 | expect(component).toBeTruthy(); 39 | }); 40 | }); 41 | -------------------------------------------------------------------------------- /frontend/src/app/services/model.service.ts: -------------------------------------------------------------------------------- 1 | import { Injectable } from '@angular/core'; 2 | import { HttpClient } from '@angular/common/http'; 3 | 4 | import { environment } from '../../environments/environment'; 5 | 6 | @Injectable({ 7 | providedIn: 'root' 8 | }) 9 | 10 | export class ModelService { 11 | 12 | baseUrl = environment.baseUrl; 13 | 14 | constructor(private httpClient: HttpClient) { } 15 | 16 | url = this.baseUrl + '/models/'; 17 | 18 | getModels(){ 19 | return this.httpClient.get(this.url); 20 | } 21 | 22 | getDistributedModels(){ 23 | const url = `${this.url}${'distributed'}` 24 | return this.httpClient.get(url); 25 | } 26 | 27 | getFatherModels(){ 28 | const url = `${this.url}${'fathers'}` 29 | return this.httpClient.get(url); 30 | } 31 | 32 | createModel(data: JSON){ 33 | return this.httpClient.post(this.url, data) 34 | } 35 | 36 | getModel(id: number){ 37 | const url = `${this.url}${id}` 38 | return this.httpClient.get(url); 39 | } 40 | 41 | getModelResultID(id: number) { 42 | const url = `${this.url}${'result/'}${id}` 43 | return this.httpClient.get(url); 44 | } 45 | 46 | deleteModel(id: number){ 47 | const url = `${this.url}${id}` 48 | return this.httpClient.delete(url); 49 | } 50 | 51 | editModel(id: number, data: JSON){ 52 | const url = `${this.url}${id}` 53 | return this.httpClient.put(url, data); 54 | } 55 | 56 | } 57 | -------------------------------------------------------------------------------- /frontend/src/app/services/visualization-ws.service.ts: -------------------------------------------------------------------------------- 1 | import { Injectable } from '@angular/core'; 2 | import { environment } from '../../environments/environment'; 3 | import {Observable } from 'rxjs'; 4 | 5 | @Injectable({ 6 | providedIn: 'root' 7 | }) 8 | export class VisualizationWsService { 9 | baseUrl = environment.baseUrl; 10 | url: URL; 11 | 12 | constructor() { 13 | this.url = new URL(this.baseUrl + '/ws/', window.location.href); 14 | if (this.url.protocol === 'https') { 15 | this.url.protocol = 'wss'; 16 | } else { 17 | this.url.protocol = 'ws'; 18 | } 19 | } 20 | 21 | ws: WebSocket; 22 | socketIsOpen = 1; 23 | connected= true; 24 | 25 | createObservableSocket(): Observable { 26 | this.ws = new WebSocket(this.url.toString()); 27 | return new Observable( 28 | observer => { 29 | 30 | this.ws.onmessage = (event) => 31 | observer.next(event.data); 32 | 33 | this.ws.onerror = (event) => observer.error(event); 34 | 35 | this.ws.onclose = (event) => observer.complete(); 36 | 37 | return () => 38 | this.ws.close(1000, "The user disconnected"); 39 | } 40 | ); 41 | } 42 | 43 | sendMessage(topic: string, isClassification: boolean): boolean { 44 | if (this.ws.readyState === this.socketIsOpen) { 45 | var jsonData = {"topic": topic, "classification": isClassification}; 46 | this.ws.send(JSON.stringify(jsonData)); 47 | return true; 48 | } else { 49 | return false; 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /examples/HCOPD_Avro_format/HCOPD_inference.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import sys 4 | sys.path.append(sys.path[0] + "/../..") 5 | 6 | from datasources.avro_inference import AvroInference 7 | 8 | import pandas as pd 9 | 10 | from kafka import KafkaConsumer 11 | 12 | from sklearn import preprocessing 13 | 14 | import logging 15 | 16 | INPUT_TOPIC = 'hcopd-in' 17 | OUTPUT_TOPIC = 'hcopd-out' 18 | BOOTSTRAP_SERVERS= '127.0.0.1:9094' 19 | ITEMS_TO_PREDICT = 10 20 | 21 | logging.basicConfig(level=logging.INFO) 22 | 23 | copd_data = pd.read_csv('HCOPD_Dataset.csv') 24 | """Reads the HCOPD dataset""" 25 | 26 | copd_data_columns = copd_data.columns 27 | 28 | features = pd.DataFrame(preprocessing.scale(copd_data[copd_data_columns[copd_data_columns != 'Diagnosis']])) 29 | """All columns except Diagnosis""" 30 | 31 | diagnosis = copd_data['Diagnosis'] 32 | """Diagnosis column""" 33 | 34 | hcopd = AvroInference(boostrap_servers='127.0.0.1:9094', topic=INPUT_TOPIC, 35 | data_scheme_filename='data_scheme.avsc') 36 | """Creates an Avro inference""" 37 | 38 | for i in range (0, ITEMS_TO_PREDICT): 39 | data = {"gender": features[0][i], "age": features[1][i], "smoking": features[2][i]} 40 | hcopd.send(data) 41 | """ Sends the value to predict to Kafka""" 42 | logging.info("Data sent for prediction") 43 | hcopd.close() 44 | 45 | consumer = KafkaConsumer(OUTPUT_TOPIC, bootstrap_servers=BOOTSTRAP_SERVERS, group_id="output_group") 46 | """Creates a consumer to receive the predictions""" 47 | 48 | logging.info("Waiting for predictions") 49 | for msg in consumer: 50 | print (msg.value.decode()) -------------------------------------------------------------------------------- /frontend/src/styles.css: -------------------------------------------------------------------------------- 1 | /* You can add global styles to this file, and also import other style files */ 2 | @import "~material-design-icons/iconfont/material-icons.css"; 3 | 4 | html, body { height: 100%; } 5 | body { margin: 0; font-family: Roboto, "Helvetica Neue", sans-serif; } 6 | 7 | body { 8 | margin: 0; 9 | } 10 | 11 | 12 | h1, h2, h3, h4, h5, h6, p { 13 | margin: 0; 14 | } 15 | 16 | .container { 17 | max-width: 1240px; 18 | margin: 0 auto; 19 | margin-top: 1.5rem; 20 | } 21 | 22 | .spacer { 23 | flex: 1 1 auto; 24 | } 25 | 26 | .mat-card { 27 | min-width: 150px; 28 | max-width: 500px; 29 | } 30 | 31 | 32 | .mat-table { 33 | overflow: auto; 34 | max-height: 500px; 35 | } 36 | 37 | .header { 38 | display: flex; 39 | margin-bottom: 1.5rem; 40 | padding: 0 0.4rem; 41 | font-family: Roboto, "Helvetica Neue", sans-serif; 42 | } 43 | .header h1{ 44 | display: inline; 45 | padding-top: .6rem; 46 | font-weight: 400; 47 | font-size: 30px; 48 | } 49 | 50 | .table-header { 51 | min-height: 64px; 52 | padding: 8px 24px 0; 53 | background-color: #ededed; 54 | } 55 | .mat-form-field { 56 | font-size: 14px; 57 | width: 80%; 58 | } 59 | 60 | textarea { 61 | resize: none; 62 | } 63 | 64 | .row-input { 65 | padding-top: 1.25rem; 66 | padding-bottom: 1rem; 67 | } 68 | 69 | .row-buttons { 70 | display: flex; 71 | padding: 1rem 0; 72 | } 73 | 74 | .form-btn { 75 | margin-left: 0.5rem; 76 | } 77 | 78 | .full-width { 79 | width: 100%; 80 | } -------------------------------------------------------------------------------- /frontend/src/app/services/configuration.service.ts: -------------------------------------------------------------------------------- 1 | import { Injectable } from '@angular/core'; 2 | import { HttpClient } from '@angular/common/http'; 3 | 4 | import { environment } from '../../environments/environment'; 5 | 6 | @Injectable({ 7 | providedIn: 'root' 8 | }) 9 | 10 | export class ConfigurationService { 11 | 12 | baseUrl = environment.baseUrl; 13 | 14 | constructor(private httpClient: HttpClient) { } 15 | 16 | url = this.baseUrl + '/configurations/'; 17 | 18 | getConfigurations(){ 19 | return this.httpClient.get(this.url); 20 | } 21 | 22 | createConfiguration(data: JSON){ 23 | return this.httpClient.post(this.url, data) 24 | } 25 | 26 | getConfiguration(id: number){ 27 | const url = `${this.url}${id}` 28 | return this.httpClient.get(url); 29 | } 30 | 31 | deleteConfiguration(id: number){ 32 | const url = `${this.url}${id}` 33 | return this.httpClient.delete(url); 34 | } 35 | 36 | editConfiguration(id: number, data: JSON){ 37 | const url = `${this.url}${id}` 38 | return this.httpClient.put(url, data); 39 | } 40 | 41 | frameworksConfigUrl = this.baseUrl + '/frameworksInConfiguration/'; 42 | 43 | getFrameworksUsedInConfiguration(id: number){ 44 | const url = `${this.frameworksConfigUrl}${id}` 45 | return this.httpClient.get(url); 46 | } 47 | 48 | distributedConfiguration = this.baseUrl + '/distributedConfiguration/'; 49 | 50 | getDistributedConfiguration(id: number){ 51 | const url = `${this.distributedConfiguration}${id}` 52 | return this.httpClient.get(url); 53 | } 54 | } -------------------------------------------------------------------------------- /model_inference/pytorch/README.md: -------------------------------------------------------------------------------- 1 | # Model inference 2 | 3 | This module contains the inference task that will be executed when a inference Job for a PyTorch ML model is launched in Kafka-ML through Kubernetes. Once deployed, this task waits for data in the input topic configured and send the predictions to the output topic configured. 4 | 5 | A brief introduction of its files: 6 | - File `inference.py` main file of this module that will be executed when executed the inference Job. 7 | - File `config.py` to configure debug. 8 | - File `utils.py` common functions used by other files. 9 | 10 | ## Installation for local development 11 | Run `python -m pip install -r requirements.txt` to install the dependencies used by this module. 12 | 13 | Once installed, you have to set each one of the environment vars below to execute the inference task. For instance, you can run `export INPUT_TOPIC=ertis-input` to export the `INPUT_TOPIC` var with the value `ertis-input`. Once configured all the vars, execute `python inference.py` to execute the inference task. 14 | 15 | ## Environments vars received 16 | 17 | - **BOOTSTRAP_SERVERS**: list of brokers for the connection to Apache Kafka 18 | - **MODEL_ARCH_URL**: URL for downloading the model architecture from the Back-end. 19 | - **MODEL_URL**: URL for downloading the model's trained weights from the Back-end. 20 | - **INPUT_FORMAT**: input format used for decoding. 21 | - **INPUT_CONFIG**: input format configuration used for decoding. 22 | - **INPUT_TOPIC**: Kafka input topic to received data streams. 23 | - **OUTPUT_TOPIC**: Kafka output topic to send the predictions. 24 | - **GROUP_ID**: Kafka group name used mainly when having different container replicas. 25 | -------------------------------------------------------------------------------- /model_inference/tensorflow/README.md: -------------------------------------------------------------------------------- 1 | # Model inference 2 | 3 | This module contains the inference task that will be executed when a TensorFlow inference Job is launched in Kafka-ML through Kubernetes. Once deployed, this task waits for data in the input topic configured and send the predictions to the output topic configured. 4 | 5 | A brief introduction of its files: 6 | - File `inference.py` main file of this module that will be executed when executed the inference Job. 7 | - File `decoders.py` decoders (RAW, Avro, JSON, TELEGRAF_STR_JSON) used to decode data streams. 8 | - File `config.py` to configure debug. 9 | - File `utils.py` common functions used by other files. 10 | 11 | ## Installation for local development 12 | Run `python -m pip install -r requirements.txt` to install the dependencies used by this module. 13 | 14 | Once installed, you have to set each one of the environment vars below to execute the inference task. For instance, you can run `export INPUT_TOPIC=ertis-input` to export the `INPUT_TOPIC` var with the value `ertis-input`. Once configured all the vars, execute `python inference.py` to execute the inference task. 15 | 16 | ## Environments vars received 17 | 18 | - **BOOTSTRAP_SERVERS**: list of brokers for the connection to Apache Kafka 19 | - **MODEL_URL**: URL for downloading the trained model from the Back-end. 20 | - **MODEL_ARCH_URL**: Unused in TensorFlow. 21 | - **INPUT_FORMAT**: input format used for decoding. 22 | - **INPUT_CONFIG**: input format configuration used for decoding. 23 | - **INPUT_TOPIC**: Kafka input topic to received data streams. 24 | - **OUTPUT_TOPIC**: Kafka output topic to send the predictions. 25 | - **GROUP_ID**: Kafka group name used mainly when having different container replicas. 26 | -------------------------------------------------------------------------------- /examples/MLGPARK_STREAM_RAW_format/README.md: -------------------------------------------------------------------------------- 1 | # Malaga Parking Occupance prediction 2 | 3 | The following TensorFlow deep learning model has been used in Kafka-ML for this example using the Malaga Parking Occupance dataset: 4 | 5 | ``` 6 | model = tf.keras.models.Sequential([ 7 | tf.keras.layers.Dense(256, activation='relu', input_shape=(6,)), 8 | tf.keras.layers.Dense(128, activation='relu'), 9 | tf.keras.layers.Dense(1) 10 | ]) 11 | model.compile(optimizer='sgd', 12 | loss='mae', 13 | metrics=['mae', 'mse']) 14 | ``` 15 | The batch_size used is 16 and the training configuration (epochs=25, shuffle=True). 16 | 17 | In the PyTorch Case, the following deep learning model has been used in Kafka-ML for the Malaga Parking Occupance dataset example: 18 | 19 | ``` 20 | class NeuralNetwork(nn.Module): 21 | def __init__(self): 22 | super(NeuralNetwork, self).__init__() 23 | self.linear_relu_stack = nn.Sequential( 24 | nn.Linear(6, 256), 25 | nn.ReLU(), 26 | nn.Linear(256, 128), 27 | nn.ReLU(), 28 | nn.Linear(128, 1), 29 | ) 30 | 31 | def forward(self, x): 32 | return self.linear_relu_stack(x) 33 | 34 | def loss_fn(self): 35 | return nn.L1Loss() 36 | 37 | def optimizer(self): 38 | return torch.optim.SGD(model.parameters(), lr=1e-3) 39 | 40 | def metrics(self): 41 | val_metrics = { 42 | "mse": MeanSquaredError(), 43 | "loss": Loss(self.loss_fn()) 44 | } 45 | return val_metrics 46 | 47 | model = NeuralNetwork() 48 | ``` 49 | The batch_size used is 16 and the training configuration (max_epochs=25, shuffle=True) -------------------------------------------------------------------------------- /frontend/src/app/configuration-list/configuration-list.component.css: -------------------------------------------------------------------------------- 1 | .dashboard-card { 2 | overflow-y: scroll; 3 | min-height: 400px; 4 | width: 90%; 5 | top: 0; 6 | left: 0; 7 | right: 0; 8 | bottom: 0; 9 | position: absolute; 10 | margin: 30px; 11 | } 12 | .more-button { 13 | position: absolute; 14 | top: 5px; 15 | right: 10px; 16 | } 17 | 18 | .dashboard-card-content { 19 | text-align: left; 20 | margin: 1rem; 21 | height: 400px; 22 | width: 100% 23 | } 24 | 25 | .header { 26 | display: flex; 27 | margin-bottom: 1.5rem; 28 | padding: 0 0.4rem; 29 | font-family: Roboto, "Helvetica Neue", sans-serif; 30 | } 31 | 32 | h6{ 33 | font-weight:normal 34 | } 35 | 36 | .header h1{ 37 | display: inline; 38 | padding-top: .6rem; 39 | font-weight: 400; 40 | font-size: 30px; 41 | } 42 | 43 | .mat-table { 44 | overflow: auto; 45 | width: 50%; 46 | padding-bottom: 20px; 47 | } 48 | th.mat-header-cell{ 49 | 50 | height: 24px; 51 | padding-top: 10px; 52 | } 53 | td.mat-cell, td.mat-footer-cell { 54 | 55 | padding-top: 7px; 56 | padding-bottom: 5px; 57 | border-bottom-width: 1px; 58 | border-bottom-style: solid; 59 | border-bottom-style: solid; 60 | } 61 | 62 | .row { 63 | display: flex; 64 | } 65 | 66 | .column { 67 | flex: 50%; 68 | } 69 | 70 | .column .mat-raised-button{ 71 | margin-bottom: 0.5rem; 72 | margin-top: 0.5rem; 73 | } 74 | 75 | 76 | .deployed{ 77 | background: #3F51B5; 78 | color: white; 79 | } 80 | 81 | .filter-input { 82 | min-height: 64px; 83 | padding: 8px 24px 0; 84 | margin-top: 20px; 85 | background-color: #ededed; 86 | } 87 | 88 | .mat-form-field { 89 | font-size: 14px; 90 | width: 90%; 91 | } 92 | 93 | .mat-card{ 94 | padding: 00px; 95 | } -------------------------------------------------------------------------------- /datasources/avro_inference.py: -------------------------------------------------------------------------------- 1 | 2 | from .sink import KafkaMLSink 3 | import avro.schema 4 | import io 5 | from avro.io import DatumWriter, BinaryEncoder 6 | from kafka import KafkaProducer 7 | 8 | class AvroInference(): 9 | """Class representing a sink of Avro inference data to Apache Kafka. 10 | 11 | Args: 12 | boostrap_servers (str): List of Kafka brokers 13 | topic (str): Kafka topic 14 | data_scheme_filename (str): Filename of the AVRO scheme for training data 15 | group_id (str): Group ID of the Kafka consumer. Defaults to sink 16 | 17 | """ 18 | 19 | def __init__(self, boostrap_servers, topic, 20 | data_scheme_filename, group_id='sink'): 21 | 22 | self.boostrap_servers= boostrap_servers 23 | self.topic = topic 24 | 25 | self.data_scheme_filename = data_scheme_filename 26 | 27 | self.data_schema = open(self.data_scheme_filename, "r").read() 28 | 29 | self.avro_data_schema = avro.schema.Parse(self.data_schema) 30 | self.data_writer = DatumWriter(self.avro_data_schema) 31 | 32 | self.data_io = io.BytesIO() 33 | self.data_encoder = BinaryEncoder(self.data_io) 34 | self.__producer = KafkaProducer( 35 | bootstrap_servers=self.boostrap_servers 36 | ) 37 | 38 | def send(self, data): 39 | 40 | self.data_writer.write(data, self.data_encoder) 41 | data_bytes = self.data_io.getvalue() 42 | 43 | self.__producer.send(self.topic, data_bytes) 44 | 45 | self.data_io.seek(0) 46 | self.data_io.truncate(0) 47 | """Cleans data buffer""" 48 | 49 | def close(self): 50 | self.__producer.flush() 51 | self.__producer.close() -------------------------------------------------------------------------------- /frontend/src/app/configuration-view/configuration-view.component.html: -------------------------------------------------------------------------------- 1 |
2 | 3 | 4 |
Create Configuration
5 |
Edit Configuration
6 |
7 | 8 |
9 | 10 | 11 | 12 | 13 |

14 | This field is required  15 |

16 | 17 | 18 | 19 | 20 | 21 | 22 | ML Models 23 | 24 | 25 | ID{{model.id}} {{model.name}} 26 | 27 | 28 | 29 |
30 | 31 | Go Back 32 | 36 |
37 |
38 |
39 |
-------------------------------------------------------------------------------- /frontend/src/app/plot-view/plot-view.component.html: -------------------------------------------------------------------------------- 1 |
2 |
3 |

Training result {{resultID}} Metrics charts

4 |
5 | 6 | 7 | Select Metrics 8 | 9 | {{metric}} 10 | 11 | 12 | 13 | 16 | 17 | 20 | 21 |

22 | 23 | 27 | 28 |
29 |



30 | Confusion Matrix Image 31 | 32 |

33 | 38 |
39 |
-------------------------------------------------------------------------------- /examples/MLGPARK_STREAM_RAW_format/MLGPARK_dataset_inference_example.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from kafka import KafkaProducer, KafkaConsumer 3 | import json 4 | import urllib3 5 | import datetime 6 | import numpy as np 7 | from kafka import KafkaProducer, KafkaConsumer 8 | 9 | import logging 10 | logging.basicConfig(level=logging.INFO) 11 | 12 | INPUT_TOPIC = 'mlgpark-in' 13 | OUTPUT_TOPIC = 'mlgpark-out' 14 | BOOTSTRAP_SERVERS= 'localhost:9094' 15 | url = 'https://datosabiertos.malaga.eu/api/3/action/datastore_search?resource_id=0dcf7abd-26b4-42c8-af19-4992f1ee60c6' 16 | 17 | 18 | producer = KafkaProducer(bootstrap_servers=BOOTSTRAP_SERVERS) 19 | """Creates a producer to send the values to predict""" 20 | 21 | ### Get data from api 22 | http = urllib3.PoolManager() 23 | response = http.request('GET', url) 24 | data = json.loads(response.data.decode('utf-8')) 25 | 26 | producer = KafkaProducer(bootstrap_servers=BOOTSTRAP_SERVERS) 27 | 28 | for x in data['result']['records']: 29 | input_data = [int(x['poiID'])] 30 | 31 | if x['fechahora_ultima_actualizacion'] == 'None': 32 | date = datetime.datetime.now(datetime.timezone.utc).strftime("%d %m %Y %H %M").split(' ') 33 | else: 34 | date = datetime.datetime.strptime(x['fechahora_ultima_actualizacion'], '%Y-%m-%d %H:%M:%S UTC').strftime("%d %m %Y %H %M").split(' ') 35 | pass 36 | 37 | input_data += [np.int64(i) for i in date] 38 | input_data = np.array(input_data) 39 | 40 | # Send data 41 | producer.send(INPUT_TOPIC, input_data.tobytes()) 42 | 43 | 44 | producer.flush() 45 | producer.close() 46 | 47 | 48 | output_consumer = KafkaConsumer(OUTPUT_TOPIC, bootstrap_servers=BOOTSTRAP_SERVERS, group_id="output_group") 49 | print('\n') 50 | 51 | print('Output consumer: ') 52 | for msg in output_consumer: 53 | print (msg.value.decode()) 54 | output_consumer.close() 55 | 56 | -------------------------------------------------------------------------------- /federated-module/federated_model_training/tensorflow/federated_training.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Antonio J. Chaves' 2 | 3 | import os 4 | import traceback 5 | 6 | from decoders import * 7 | from utils import * 8 | 9 | 10 | from classic_federated_training import classicFederatedTraining 11 | from blockchain_federated_training import blockchainFederatedTraining 12 | 13 | 14 | from federated_singleClassicTraining import SingleClassicTraining 15 | from federated_singleIncrementalTraining import SingleIncrementalTraining 16 | from federated_distributedClassicTraining import DistributedClassicTraining 17 | from federated_distributedIncrementalTraining import DistributedIncrementalTraining 18 | from federated_blockchainSingleClassicTraining import BlockchainSingleClassicTraining 19 | 20 | if __name__ == '__main__': 21 | try: 22 | configure_logging() 23 | """Configures the logging""" 24 | 25 | select_gpu() 26 | """Configures the GPU""" 27 | 28 | case = int(os.environ.get('CASE')) if os.environ.get('CASE') else 1 29 | 30 | if case == FEDERATED_NOT_DISTRIBUTED_NOT_INCREMENTAL: 31 | classicFederatedTraining(SingleClassicTraining()) 32 | elif case == FEDERATED_NOT_DISTRIBUTED_INCREMENTAL: 33 | classicFederatedTraining(SingleIncrementalTraining()) 34 | elif case == FEDERATED_DISTRIBUTED_NOT_INCREMENTAL: 35 | classicFederatedTraining(DistributedClassicTraining()) 36 | elif case == FEDERATED_DISTRIBUTED_INCREMENTAL: 37 | classicFederatedTraining(DistributedIncrementalTraining()) 38 | elif case == BLOCKCHAIN_FEDERATED_NOT_DISTRIBUTED_NOT_INCREMENTAL: 39 | blockchainFederatedTraining(BlockchainSingleClassicTraining()) 40 | else: 41 | raise ValueError(case) 42 | 43 | except Exception as e: 44 | traceback.print_exc() 45 | logging.error("Error in main [%s]. Service will be restarted.", str(e)) -------------------------------------------------------------------------------- /frontend/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "autofront", 3 | "version": "0.0.0", 4 | "scripts": { 5 | "ng": "ng", 6 | "start": "ng serve", 7 | "build": "ng build", 8 | "test": "ng test", 9 | "lint": "ng lint", 10 | "e2e": "ng e2e" 11 | }, 12 | "private": true, 13 | "dependencies": { 14 | "@angular/animations": "~9.1.0-next.3", 15 | "@angular/cdk": "^9.1.1", 16 | "@angular/common": "~9.1.0-next.3", 17 | "@angular/compiler": "~9.1.0-next.3", 18 | "@angular/core": "~9.1.0-next.3", 19 | "@angular/forms": "~9.1.0-next.3", 20 | "@angular/material": "^9.1.1", 21 | "@angular/platform-browser": "~9.1.0-next.3", 22 | "@angular/platform-browser-dynamic": "~9.1.0-next.3", 23 | "@angular/router": "~9.1.0-next.3", 24 | "@swimlane/ngx-charts": "^18.0.1", 25 | "core-js-compat": "^3.20.2", 26 | "material-design-icons": "^3.0.1", 27 | "rxjs": "~6.5.4", 28 | "tslib": "^1.10.0", 29 | "zone.js": "~0.10.2", 30 | "ws": "^7.5.3" 31 | }, 32 | "devDependencies": { 33 | "@angular-devkit/build-angular": "~0.901.0-next.2", 34 | "@angular/cli": "~9.1.0-next.2", 35 | "@angular/compiler-cli": "~9.1.0-next.3", 36 | "@angular/language-service": "~9.1.0-next.3", 37 | "@babel/preset-env": "^7.14.7", 38 | "@babel/core": "^7.14.6", 39 | "@types/node": "^12.11.1", 40 | "@types/jasmine": "~3.5.0", 41 | "@types/jasminewd2": "~2.0.3", 42 | "codelyzer": "^5.1.2", 43 | "jasmine-core": "~3.5.0", 44 | "jasmine-spec-reporter": "~4.2.1", 45 | "karma": "~4.4.1", 46 | "karma-chrome-launcher": "~3.1.0", 47 | "karma-coverage-istanbul-reporter": "~2.1.0", 48 | "karma-jasmine": "~3.0.1", 49 | "karma-jasmine-html-reporter": "^1.4.2", 50 | "protractor": "~5.4.3", 51 | "ts-node": "~8.3.0", 52 | "tslint": "~5.18.0", 53 | "typescript": "~3.7.5" 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /backend/autoweb/asgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | ASGI config for autoweb project. 3 | 4 | It exposes the ASGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ 8 | """ 9 | 10 | import os 11 | import json 12 | 13 | from django.core.asgi import get_asgi_application 14 | from channels.routing import ProtocolTypeRouter, URLRouter 15 | from channels.auth import AuthMiddlewareStack 16 | import automl.routing 17 | 18 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'autoweb.settings') 19 | 20 | if os.environ.get('ENABLE_FEDML_BLOCKCHAIN') == '1': 21 | from autoweb import create_blockchain_token 22 | 23 | try: 24 | token_address, abi = create_blockchain_token.create_token( 25 | token_name=os.environ.get('FEDML_BLOCKCHAIN_TOKEN_NAME', "KafkaML Token"), 26 | token_symbol=os.environ.get('FEDML_BLOCKCHAIN_TOKEN_SYMBOL', "KML"), 27 | rpc_url=os.environ.get('FEDML_BLOCKCHAIN_RPC_URL', "http://localhost:8545"), 28 | chain_id=int(os.environ.get('FEDML_BLOCKCHAIN_CHAIN_ID', 1337)), 29 | solc_version="0.8.6", 30 | wallet_address=os.environ.get('FEDML_BLOCKCHAIN_WALLET_ADDRESS', None), 31 | wallet_key=os.environ.get('FEDML_BLOCKCHAIN_WALLET_KEY', None) 32 | ) 33 | 34 | os.environ['FEDML_BLOCKCHAIN_TOKEN_ADDRESS'] = token_address 35 | os.environ['FEDML_BLOCKCHAIN_ABI'] = json.dumps(abi) 36 | 37 | except Exception as e: 38 | print(f"Error creating blockchain token. Some parameters may be missing: {e}") 39 | raise e 40 | 41 | 42 | application = ProtocolTypeRouter({ 43 | "http": get_asgi_application(), 44 | "websocket": AuthMiddlewareStack( 45 | URLRouter( 46 | automl.routing.websocket_urlpatterns 47 | ) 48 | ), 49 | }) 50 | 51 | -------------------------------------------------------------------------------- /federated-module/federated_backend/automl/models.py: -------------------------------------------------------------------------------- 1 | from django.db import models 2 | from django.utils.timezone import now 3 | from django.conf import settings 4 | from model_utils import Choices 5 | from model_utils.fields import StatusField 6 | 7 | class ModelSource(models.Model): 8 | """Machine learning model to be trained in the system""" 9 | federated_string_id = models.TextField() 10 | 11 | input_shape = models.TextField() 12 | output_shape = models.TextField() 13 | 14 | data_restriction = models.JSONField() 15 | min_data = models.IntegerField() 16 | 17 | framework = models.TextField(default='tf') 18 | distributed = models.BooleanField(default=False) 19 | blockchain = models.JSONField(default={}) 20 | 21 | time = models.DateTimeField(default=now, editable=False) 22 | 23 | class Meta(object): 24 | ordering = ('-time', ) 25 | 26 | 27 | class Datasource(models.Model): 28 | """Datasource used for training a deployed model""" 29 | 30 | INPUT_FORMAT = Choices('RAW', 'AVRO') 31 | """Sets its default value to the first item in the STATUS choices:""" 32 | input_format = StatusField(choices_name='INPUT_FORMAT') 33 | input_config = models.TextField(blank=True) 34 | 35 | incremental = models.BooleanField(default=False) 36 | 37 | topic = models.TextField() 38 | unsupervised_topic = models.TextField(blank=True, null=True) 39 | 40 | total_msg = models.IntegerField(blank=True, null=True) 41 | validation_rate = models.DecimalField(max_digits=7, decimal_places=6, blank=True, null=True) 42 | test_rate = models.DecimalField(max_digits=7, decimal_places=6, blank=True, null=True) 43 | 44 | description = models.TextField(blank=True) 45 | dataset_restrictions = models.JSONField(default={}, blank=True, null=True) 46 | 47 | time = models.DateTimeField() 48 | 49 | class Meta(object): 50 | ordering = ('-time', ) -------------------------------------------------------------------------------- /federated-module/federated_model_training/tensorflow/federated_singleClassicTraining.py: -------------------------------------------------------------------------------- 1 | from federated_mainTraining import MainTraining 2 | 3 | class SingleClassicTraining(MainTraining): 4 | """Class for single models training 5 | 6 | Attributes: 7 | kml_cloud_bootstrap_server (str): Kafka bootstrap server for the KML Cloud 8 | data_bootstrap_server (str): Kafka bootstrap server for data 9 | federated_model_id (str): Federated model ID 10 | input_data_topic (str): Input data topic 11 | input_format (str): Input data format 12 | input_config (dict): Input data configuration 13 | validation_rate (float): Validation rate 14 | total_msg (int): Total number of messages 15 | """ 16 | 17 | def __init__(self): 18 | """Loads the environment information""" 19 | 20 | super().__init__() 21 | 22 | def get_data(self, training_settings): 23 | """Gets the data from Kafka""" 24 | 25 | return super().get_kafka_dataset(training_settings) 26 | 27 | def get_unsupervised_data(self, training_settings): 28 | """Gets the unsupervised data from Kafka""" 29 | 30 | return super().get_unsupervised_kafka_dataset(training_settings) 31 | 32 | def load_model(self, message): 33 | """Downloads the model and loads it""" 34 | 35 | return super().load_model(message) 36 | 37 | def train(self, model, training_settings): 38 | """Trains the model""" 39 | return super().train_classic_model(model, training_settings) 40 | 41 | def unsupervised_train(self, model, training_settings): 42 | """Trains the model in unsupervised mode""" 43 | 44 | return super().train_classic_semi_supervised_model(model, training_settings) 45 | 46 | def save_metrics(self, model_trained): 47 | """Saves the metrics of the model""" 48 | 49 | return super().save_metrics(model_trained) -------------------------------------------------------------------------------- /federated-module/federated_model_training/tensorflow/federated_distributedClassicTraining.py: -------------------------------------------------------------------------------- 1 | from federated_mainTraining import MainTraining 2 | 3 | class DistributedClassicTraining(MainTraining): 4 | """Class for distributed models training 5 | 6 | Attributes: 7 | kml_cloud_bootstrap_server (str): Kafka bootstrap server for the KML Cloud 8 | data_bootstrap_server (str): Kafka bootstrap server for data 9 | federated_model_id (str): Federated model ID 10 | input_data_topic (str): Input data topic 11 | input_format (str): Input data format 12 | input_config (dict): Input data configuration 13 | validation_rate (float): Validation rate 14 | total_msg (int): Total number of messages 15 | """ 16 | 17 | def __init__(self): 18 | """Loads the environment information""" 19 | 20 | super().__init__() 21 | 22 | def get_data(self, training_settings): 23 | """Gets the data from Kafka""" 24 | 25 | return super().get_kafka_dataset(training_settings) 26 | 27 | def get_unsupervised_data(self, training_settings): 28 | """Gets the unsupervised data from Kafka""" 29 | 30 | return super().get_unsupervised_kafka_dataset(training_settings) 31 | 32 | def load_model(self, message): 33 | """Downloads the model and loads it""" 34 | 35 | return super().load_model(message) 36 | 37 | def train(self, model, training_settings): 38 | """Trains the model""" 39 | 40 | return super().train_classic_model(model, training_settings) 41 | 42 | def unsupervised_train(self, model, training_settings): 43 | """Trains the model in unsupervised mode""" 44 | 45 | return super().train_classic_semi_supervised_model(model, training_settings) 46 | 47 | def save_metrics(self, model_trained): 48 | """Saves the metrics of the model""" 49 | 50 | return super().save_metrics(model_trained) -------------------------------------------------------------------------------- /federated-module/federated_model_training/tensorflow/federated_singleIncrementalTraining.py: -------------------------------------------------------------------------------- 1 | from federated_mainTraining import MainTraining 2 | 3 | class SingleIncrementalTraining(MainTraining): 4 | """Class for single models incremental training 5 | 6 | Attributes: 7 | kml_cloud_bootstrap_server (str): Kafka bootstrap server for the KML Cloud 8 | data_bootstrap_server (str): Kafka bootstrap server for data 9 | federated_model_id (str): Federated model ID 10 | input_data_topic (str): Input data topic 11 | input_format (str): Input data format 12 | input_config (dict): Input data configuration 13 | validation_rate (float): Validation rate 14 | total_msg (int): Total number of messages 15 | """ 16 | 17 | def __init__(self): 18 | """Loads the environment information""" 19 | 20 | super().__init__() 21 | 22 | def get_data(self, training_settings): 23 | """Gets the data from Kafka""" 24 | 25 | return super().get_online_kafka_dataset(training_settings) 26 | 27 | def get_unsupervised_data(self, training_settings): 28 | """Gets the unsupervised data from Kafka""" 29 | 30 | return super().get_online_unsupervised_kafka_dataset(training_settings) 31 | 32 | def load_model(self, message): 33 | """Downloads the model and loads it""" 34 | 35 | return super().load_model(message) 36 | 37 | def train(self, model, training_settings): 38 | """Trains the model""" 39 | 40 | return super().train_incremental_model(model, training_settings) 41 | 42 | def unsupervised_train(self, model, training_settings): 43 | """Trains the model in unsupervised mode""" 44 | 45 | return super().train_incremental_semi_supervised_model(model, training_settings) 46 | 47 | def save_metrics(self, model_trained): 48 | """Saves the metrics of the model""" 49 | 50 | return super().save_metrics(model_trained) -------------------------------------------------------------------------------- /federated-module/federated_model_training/tensorflow/federated_distributedIncrementalTraining.py: -------------------------------------------------------------------------------- 1 | from federated_mainTraining import MainTraining 2 | 3 | class DistributedIncrementalTraining(MainTraining): 4 | """Class for distributed models incremental training 5 | 6 | Attributes: 7 | kml_cloud_bootstrap_server (str): Kafka bootstrap server for the KML Cloud 8 | data_bootstrap_server (str): Kafka bootstrap server for data 9 | federated_model_id (str): Federated model ID 10 | input_data_topic (str): Input data topic 11 | input_format (str): Input data format 12 | input_config (dict): Input data configuration 13 | validation_rate (float): Validation rate 14 | total_msg (int): Total number of messages 15 | """ 16 | 17 | def __init__(self): 18 | """Loads the environment information""" 19 | 20 | super().__init__() 21 | 22 | def get_data(self, training_settings): 23 | """Gets the data from Kafka""" 24 | 25 | return super().get_online_kafka_dataset(training_settings) 26 | 27 | def get_unsupervised_data(self, training_settings): 28 | """Gets the unsupervised data from Kafka""" 29 | 30 | return super().get_online_unsupervised_kafka_dataset(training_settings) 31 | 32 | def load_model(self, message): 33 | """Downloads the model and loads it""" 34 | 35 | return super().load_model(message) 36 | 37 | def train(self, model, training_settings): 38 | """Trains the model""" 39 | 40 | return super().train_incremental_model(model, training_settings) 41 | 42 | def unsupervised_train(self, model, training_settings): 43 | """Trains the model in unsupervised mode""" 44 | 45 | return super().train_incremental_semi_supervised_model(model, training_settings) 46 | 47 | def save_metrics(self, model_trained): 48 | """Saves the metrics of the model""" 49 | 50 | return super().save_metrics(model_trained) -------------------------------------------------------------------------------- /model_training/pytorch/README.md: -------------------------------------------------------------------------------- 1 | # Model training 2 | 3 | This module contains the training task that will be executed when a PyTorch training Job is launched in Kafka-ML through Kubernetes. Once deployed, this task waits until a received control message contains the `deployment_id` configured. Once received the control message and the corresponding data stream, the downloaded PyTorch model from the Back-end will be trained, and the trained model and training and optionally validation results will be sent again to the Back-end. 4 | 5 | A brief introduction of its files: 6 | - File `training.py` main file of this module that will be executed when executed the training Job. 7 | - File `config.py` to configure debug. 8 | - File `utils.py` common functions used by other files. 9 | 10 | ## Installation for local development 11 | Run `python -m pip install -r requirements.txt` to install the dependencies used by this module. 12 | 13 | Once installed, you have to set each one of the environment vars below to execute the training task. For instance, you can run `export BOOTSTRAP_SERVERS=localhost:9094` to export the `BOOTSTRAP_SERVERS` var with the value `localhost:9094`. Once configured all the vars, execute `python training.py` to execute the training task. 14 | 15 | ## Environments vars received 16 | 17 | - **BOOTSTRAP_SERVERS**: list of brokers for the connection to Apache Kafka 18 | - **RESULT_URL**: URL for downloading the untrained model from the Back-end (GET request). This URL is the same for updating the training results (POST request). 19 | - **RESULT_ID**: Result ID of the model 20 | - **CONTROL_TOPIC**: name of the Kafka control topic used in Kafka-ML 21 | - **DEPLOYMENT_ID**: deployment ID of the configuration to match with the control messages received 22 | - **BATCH**: Batch size used for training and configured in the Front-end 23 | - **KWARGS_FIT**: JSON with the arguments used for training and configured in the Front-end 24 | - **KWARGS_VAL**: JSON with the arguments used for validation and configured in the Front-end 25 | -------------------------------------------------------------------------------- /frontend/src/app/services/result.service.ts: -------------------------------------------------------------------------------- 1 | import { Injectable } from '@angular/core'; 2 | import { HttpClient } from '@angular/common/http'; 3 | import { environment } from '../../environments/environment'; 4 | import {Inference} from '../shared/inference.model' 5 | 6 | @Injectable({ 7 | providedIn: 'root' 8 | }) 9 | 10 | export class ResultService { 11 | 12 | baseUrl = environment.baseUrl; 13 | 14 | constructor(private httpClient: HttpClient) { } 15 | 16 | url = this.baseUrl + '/results/'; 17 | results_deployment_ulr = this.baseUrl + '/deployments/results/'; 18 | getResults(){ 19 | return this.httpClient.get(this.url); 20 | } 21 | 22 | getResultsDeploymentID(id: number){ 23 | const url = `${this.results_deployment_ulr}${id}` 24 | return this.httpClient.get(url); 25 | } 26 | 27 | deleteResult(id: number){ 28 | const url = `${this.url}${id}` 29 | return this.httpClient.delete(url); 30 | } 31 | 32 | getTrainedModel(id: number){ 33 | const url = `${this.url}model/${id}` 34 | return this.httpClient.get(url, {responseType: "blob", observe: 'response'}); 35 | //return this.httpClient.get(url, {responseType: "blob"}); 36 | } 37 | 38 | getConfusionMatrix(id: number){ 39 | const url = `${this.url}confusion_matrix/${id}` 40 | return this.httpClient.get(url, {responseType: "blob"}); 41 | } 42 | 43 | getInferenceInfo(id: number){ 44 | const url = `${this.url}inference/${id}` 45 | return this.httpClient.get(url); 46 | } 47 | 48 | getChartInfo(id: number){ 49 | const url = `${this.url}chart/${id}` 50 | return this.httpClient.get(url); 51 | } 52 | 53 | deployInference(id: number, inference: Inference){ 54 | const url = `${this.url}inference/${id}` 55 | return this.httpClient.post(url, inference); 56 | } 57 | 58 | stopTraining(id: number){ 59 | const url = `${this.url}stop/${id}` 60 | return this.httpClient.post(url, null); 61 | } 62 | 63 | } 64 | -------------------------------------------------------------------------------- /model_training/tensorflow/README.md: -------------------------------------------------------------------------------- 1 | # Model training 2 | 3 | This module contains the training task that will be executed when a TensorFlow training Job is launched in Kafka-ML through Kubernetes. Once deployed, this task waits until a received control message contains the `deployment_id` configured. Once received the control message and the corresponding data stream, the downloaded TensorFlow model from the Back-end will be trained, and the trained model and training and optionally validation results will be sent again to the Back-end. 4 | 5 | A brief introduction of its files: 6 | - File `training.py` main file of this module that will be executed when executed the training Job. 7 | - File `decoders.py` decoders (RAW, Avro, JSON, TELEGRAF_STR_JSON) used to decode data streams. 8 | - File `config.py` to configure debug. 9 | - File `utils.py` common functions used by other files. 10 | 11 | ## Installation for local development 12 | Run `python -m pip install -r requirements.txt` to install the dependencies used by this module. 13 | 14 | Once installed, you have to set each one of the environment vars below to execute the training task. For instance, you can run `export BOOTSTRAP_SERVERS=localhost:9094` to export the `BOOTSTRAP_SERVERS` var with the value `localhost:9094`. Once configured all the vars, execute `python training.py` to execute the training task. 15 | 16 | ## Environments vars received 17 | 18 | - **BOOTSTRAP_SERVERS**: list of brokers for the connection to Apache Kafka 19 | - **RESULT_URL**: URL for downloading the untrained model from the Back-end (GET request). This URL is the same for updating the training results (POST request). 20 | - **RESULT_ID**: Result ID of the model 21 | - **CONTROL_TOPIC**: name of the Kafka control topic used in Kafka-ML 22 | - **DEPLOYMENT_ID**: deployment ID of the configuration to match with the control messages received 23 | - **BATCH**: Batch size used for training and configured in the Front-end 24 | - **KWARGS_FIT**: JSON with the arguments used for training and configured in the Front-end 25 | - **KWARGS_VAL**: JSON with the arguments used for validation and configured in the Front-end 26 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | # Based on https://github.com/ertis-research/fission-environments/blob/ecabe4ece71be5b12c8f3087bcbeae2062b239e2/.github/workflows/build.yml 2 | on: 3 | workflow_call: 4 | inputs: 5 | context: 6 | required: true 7 | type: string 8 | dockerfile: 9 | required: true 10 | type: string 11 | name: 12 | required: true 13 | type: string 14 | platforms: 15 | required: false 16 | type: string 17 | default: "linux/amd64" 18 | build-args: 19 | required: false 20 | type: string 21 | 22 | env: 23 | # REGISTRY: ghcr.io 24 | USERNAME: ertis 25 | IMAGE_NAME: ${{ inputs.name }} 26 | jobs: 27 | build-and-push-image: 28 | runs-on: ubuntu-latest 29 | permissions: 30 | contents: read 31 | packages: write 32 | steps: 33 | - name: Checkout repository 34 | uses: actions/checkout@v3 35 | - name: Log in to the Container registry 36 | uses: docker/login-action@v2 37 | with: 38 | # registry: ${{ env.REGISTRY }} 39 | username: ${{ env.USERNAME }} 40 | password: ${{ secrets.DOCKERHUB_KEY }} 41 | - # Add support for more platforms with QEMU (optional) 42 | # https://github.com/docker/setup-qemu-action 43 | name: Set up QEMU 44 | uses: docker/setup-qemu-action@v2 45 | - name: Set up Docker Buildx 46 | uses: docker/setup-buildx-action@v2 47 | - name: Extract metadata (tags, labels) for Docker 48 | id: meta 49 | uses: docker/metadata-action@v4 50 | with: 51 | images: ${{ env.USERNAME }}/${{ env.IMAGE_NAME }} 52 | - name: Build and push Docker image 53 | uses: docker/build-push-action@v4 54 | with: 55 | context: ${{ inputs.context }} 56 | file: ${{ inputs.dockerfile }} 57 | push: true 58 | tags: ${{ steps.meta.outputs.tags }} 59 | labels: ${{ steps.meta.outputs.labels }} 60 | platforms: ${{ inputs.platforms }} 61 | cache-from: type=gha 62 | cache-to: type=gha,mode=max 63 | build-args: ${{ inputs.build-args }} 64 | -------------------------------------------------------------------------------- /backend/automl/urls.py: -------------------------------------------------------------------------------- 1 | from django.urls import path 2 | from django.views.generic import TemplateView 3 | from automl.views import ModelList, ModelID, DeploymentList, TrainingResultID, ConfigurationList, ConfigurationID, DatasourceToKafka, ConfigurationUsedFrameworks, DistributedConfiguration 4 | from automl.views import DeploymentsConfigurationID, TrainingResultList, DeploymentResultID, DownloadTrainedModel, DatasourceList, TrainingResultGetMetrics, DownloadConfussionMatrix 5 | from automl.views import InferenceResultID, InferenceList, InferenceStopDelete, TrainingResultStop, DistributedModelList, FatherModelList, ModelResultID, TrainingResultMetricsID 6 | 7 | urlpatterns = [ 8 | path('configurations/', ConfigurationList.as_view()), 9 | path('configurations/', ConfigurationID.as_view()), 10 | path('frameworksInConfiguration/', ConfigurationUsedFrameworks.as_view()), 11 | path('distributedConfiguration/', DistributedConfiguration.as_view()), 12 | path('datasources/', DatasourceList.as_view()), 13 | path('datasources/kafka', DatasourceToKafka.as_view()), 14 | path('deployments/', DeploymentList.as_view()), 15 | path('deployments/', DeploymentsConfigurationID.as_view()), 16 | path('deployments/results/', DeploymentResultID.as_view()), 17 | path('inferences/', InferenceList.as_view()), 18 | path('inferences/', InferenceStopDelete.as_view()), 19 | path('models/', ModelList.as_view()), 20 | path('models/', ModelID.as_view()), 21 | path('models/result/', ModelResultID.as_view()), 22 | path('models/distributed', DistributedModelList.as_view()), 23 | path('models/fathers', FatherModelList.as_view()), 24 | path('results/', TrainingResultList.as_view()), 25 | path('results/', TrainingResultID.as_view()), 26 | path('results/stop/', TrainingResultStop.as_view()), 27 | path('results/inference/', InferenceResultID.as_view()), 28 | path('results/model/', DownloadTrainedModel.as_view()), 29 | path('results/confusion_matrix/', DownloadConfussionMatrix.as_view()), 30 | path('results/chart/', TrainingResultGetMetrics.as_view()), 31 | path('results_metrics/', TrainingResultMetricsID.as_view()), 32 | ] -------------------------------------------------------------------------------- /frontend/src/app/configuration-list/configuration-list.component.ts: -------------------------------------------------------------------------------- 1 | import { Component } from '@angular/core'; 2 | import { ConfigurationService } from '../services/configuration.service'; 3 | import { MatDialog } from '@angular/material/dialog'; 4 | import { ConfirmDialogComponent } from '../confirm-dialog/confirm-dialog.component'; 5 | import {MatSnackBar} from '@angular/material/snack-bar'; 6 | 7 | @Component({ 8 | selector: 'app-configuration-list', 9 | templateUrl: './configuration-list.component.html', 10 | styleUrls: ['./configuration-list.component.css'] 11 | }) 12 | export class ConfigurationListComponent { 13 | configurations = null; 14 | modelDisplayedColumns = ['id', 'name']; 15 | filtered_data: string = ''; 16 | 17 | constructor(private configurationService: ConfigurationService, 18 | public dialog: MatDialog, 19 | private snackbar: MatSnackBar) { } 20 | 21 | ngOnInit(): void { 22 | 23 | this.configurationService.getConfigurations().subscribe((data: JSON[])=>{ 24 | this.configurations=data; 25 | }, 26 | (err)=>{ 27 | this.snackbar.open('Error connecting with the server', '', { 28 | duration: 3000 29 | }); 30 | }); 31 | 32 | } 33 | 34 | confirm(id: number) { 35 | const dialogRef = this.dialog.open(ConfirmDialogComponent, { 36 | width: '300px', 37 | data: { title: 'Configuration '+id } 38 | }); 39 | 40 | dialogRef.afterClosed().subscribe(result => { 41 | if (result) { 42 | this.delete(id); 43 | } 44 | }); 45 | } 46 | 47 | delete(id: number) { 48 | this.configurationService.deleteConfiguration(id).subscribe( 49 | ()=>{ 50 | this.snackbar.open('Configuration deleted', '', { 51 | duration: 3000 52 | }); 53 | this.updateData(id); 54 | },(err)=>{ 55 | this.snackbar.open('Error deleting the configuration: '+err.error, '', { 56 | duration: 4000 57 | }); 58 | }, 59 | 60 | ); 61 | } 62 | 63 | updateData (id: number) { 64 | const itemIndex = this.configurations.findIndex(obj => obj['id'] === id); 65 | this.configurations.splice(itemIndex, 1); 66 | } 67 | 68 | applyFilter(value: string) { 69 | // TODO: apply filter 70 | } 71 | 72 | } 73 | -------------------------------------------------------------------------------- /frontend/tslint.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "tslint:recommended", 3 | "rules": { 4 | "array-type": false, 5 | "arrow-parens": false, 6 | "deprecation": { 7 | "severity": "warning" 8 | }, 9 | "component-class-suffix": true, 10 | "contextual-lifecycle": true, 11 | "directive-class-suffix": true, 12 | "directive-selector": [ 13 | true, 14 | "attribute", 15 | "app", 16 | "camelCase" 17 | ], 18 | "component-selector": [ 19 | true, 20 | "element", 21 | "app", 22 | "kebab-case" 23 | ], 24 | "import-blacklist": [ 25 | true, 26 | "rxjs/Rx" 27 | ], 28 | "interface-name": false, 29 | "max-classes-per-file": false, 30 | "max-line-length": [ 31 | true, 32 | 140 33 | ], 34 | "member-access": false, 35 | "member-ordering": [ 36 | true, 37 | { 38 | "order": [ 39 | "static-field", 40 | "instance-field", 41 | "static-method", 42 | "instance-method" 43 | ] 44 | } 45 | ], 46 | "no-consecutive-blank-lines": false, 47 | "no-console": [ 48 | true, 49 | "debug", 50 | "info", 51 | "time", 52 | "timeEnd", 53 | "trace" 54 | ], 55 | "no-empty": false, 56 | "no-inferrable-types": [ 57 | true, 58 | "ignore-params" 59 | ], 60 | "no-non-null-assertion": true, 61 | "no-redundant-jsdoc": true, 62 | "no-switch-case-fall-through": true, 63 | "no-var-requires": false, 64 | "object-literal-key-quotes": [ 65 | true, 66 | "as-needed" 67 | ], 68 | "object-literal-sort-keys": false, 69 | "ordered-imports": false, 70 | "quotemark": [ 71 | true, 72 | "single" 73 | ], 74 | "trailing-comma": false, 75 | "no-conflicting-lifecycle": true, 76 | "no-host-metadata-property": true, 77 | "no-input-rename": true, 78 | "no-inputs-metadata-property": true, 79 | "no-output-native": true, 80 | "no-output-on-prefix": true, 81 | "no-output-rename": true, 82 | "no-outputs-metadata-property": true, 83 | "template-banana-in-box": true, 84 | "template-no-negated-async": true, 85 | "use-lifecycle-interface": true, 86 | "use-pipe-transform-interface": true 87 | }, 88 | "rulesDirectory": [ 89 | "codelyzer" 90 | ] 91 | } -------------------------------------------------------------------------------- /frontend/src/app/app-routing.module.ts: -------------------------------------------------------------------------------- 1 | import { NgModule } from '@angular/core'; 2 | import { Routes, RouterModule } from '@angular/router'; 3 | import { ModelViewComponent } from './model-view/model-view.component'; 4 | import { ModelListComponent } from './model-list/model-list.component'; 5 | import { ConfigurationListComponent } from './configuration-list/configuration-list.component'; 6 | import { ConfigurationViewComponent } from './configuration-view/configuration-view.component'; 7 | import { DeploymentListComponent } from './deployment-list/deployment-list.component'; 8 | import { DeploymentViewComponent } from './deployment-view/deployment-view.component'; 9 | import { ResultListComponent } from './result-list/result-list.component'; 10 | import {DatasourceListComponent} from './datasource-list/datasource-list.component' 11 | import {InferenceViewComponent} from './inference-view/inference-view.component' 12 | import {InferenceListComponent} from './inference-list/inference-list.component' 13 | import {PlotViewComponent} from './plot-view/plot-view.component' 14 | import {VisualizationComponent} from './visualization/visualization.component' 15 | 16 | const routes: Routes = [ 17 | {path: 'configuration-create', component: ConfigurationViewComponent}, 18 | {path: 'configurations', component: ConfigurationListComponent}, 19 | {path: 'configuration/:id', component: ConfigurationViewComponent}, 20 | {path: 'datasources', component: DatasourceListComponent}, 21 | {path: 'deploy/:id', component: DeploymentViewComponent}, 22 | {path: 'inferences', component: InferenceListComponent}, 23 | {path: 'model-create', component: ModelViewComponent}, 24 | {path: 'deployments', component: DeploymentListComponent}, 25 | {path: 'deployments/:id', component: DeploymentListComponent}, 26 | {path: 'models', component: ModelListComponent}, 27 | {path: 'model/:id', component: ModelViewComponent}, 28 | {path: 'results', component: ResultListComponent}, 29 | {path: 'results/:id', component: ResultListComponent}, 30 | {path: 'results/inference/:id', component: InferenceViewComponent}, 31 | {path: 'results/chart/:id', component: PlotViewComponent}, 32 | {path: 'visualization', component: VisualizationComponent} 33 | ]; 34 | 35 | @NgModule({ 36 | imports: [RouterModule.forRoot(routes)], 37 | exports: [RouterModule] 38 | }) 39 | export class AppRoutingModule { } 40 | -------------------------------------------------------------------------------- /examples/EUROSAT_RAW_format/README.md: -------------------------------------------------------------------------------- 1 | # EUROSAT and VGG16 2 | 3 | The following VGG16 TensorFlow deep learning model has been used in Kafka-ML for this example using the EUROSAT dataset: 4 | 5 | ``` 6 | base_model = tf.keras.applications.VGG16(include_top=False, weights='imagenet', input_shape=(64,64,3)) 7 | x = tf.keras.layers.Flatten()(base_model.output) 8 | x = tf.keras.layers.Dense(1000, activation='relu')(x) 9 | x = tf.keras.layers.Dense(512, activation='relu')(x) 10 | x = tf.keras.layers.Dense(128, activation='relu')(x) 11 | predictions = tf.keras.layers.Dense(10, activation = 'softmax')(x) 12 | 13 | model = tf.keras.Model(inputs = base_model.input, outputs = predictions) 14 | 15 | model.compile(optimizer=tf.keras.optimizers.SGD(0.001), 16 | loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 17 | metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]) 18 | ``` 19 | The batch_size used is 256 and the training configuration (epochs=50, shuffle=True). 20 | 21 | In the PyTorch Case, the following VGG16 deep learning model has been used in Kafka-ML for the EUROSAT dataset example: 22 | 23 | ``` 24 | class VGG16(nn.Module): 25 | def __init__(self): 26 | super(VGG16, self).__init__() 27 | self.pretrained = models.vgg16(pretrained=True) 28 | self.flatten = nn.Flatten() 29 | self.linear_relu_stack = nn.Sequential( 30 | nn.Linear(1000, 512), 31 | nn.ReLU(), 32 | nn.Linear(512, 256), 33 | nn.ReLU(), 34 | nn.Linear(256, 128), 35 | nn.ReLU(), 36 | nn.Linear(128, 10), 37 | nn.Softmax() 38 | ) 39 | 40 | def forward(self, x): 41 | x = self.pretrained(x) 42 | x = self.flatten(x) 43 | output = self.linear_relu_stack(x) 44 | return output 45 | 46 | def loss_fn(self): 47 | return nn.CrossEntropyLoss() 48 | 49 | def optimizer(self): 50 | return torch.optim.SGD(model.parameters(), lr=1e-3) 51 | 52 | def metrics(self): 53 | val_metrics = { 54 | "accuracy": Accuracy(), 55 | "loss": Loss(self.loss_fn()) 56 | } 57 | return val_metrics 58 | 59 | model = VGG16() 60 | ``` 61 | The batch_size used is 256 and the training configuration (max_epochs=50, shuffle=True) --------------------------------------------------------------------------------