├── .github └── workflows │ └── build.yaml ├── .gitignore ├── Power-consumption-keras ├── .dockerignore ├── .gitignore ├── Dockerfile ├── client │ ├── data.py │ ├── fedn.yaml │ ├── model.py │ ├── python_env.yaml │ ├── python_env_macosx.yaml │ ├── train.py │ └── validate.py ├── requirements-macos.txt └── requirements.txt ├── Power-consumption-pytorch ├── .dockerignore ├── .gitignore ├── Dockerfile ├── client │ ├── data.py │ ├── fedn.yaml │ ├── model.py │ ├── python_env.yaml │ ├── train.py │ └── validate.py └── requirements.txt ├── README.md ├── banner.png └── overview.png /.github/workflows/build.yaml: -------------------------------------------------------------------------------- 1 | name: Build on push/release 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - main 8 | release: 9 | types: [published] 10 | 11 | jobs: 12 | 13 | build_studio: 14 | name: Build studio 15 | runs-on: ubuntu-20.04 16 | outputs: 17 | image: ${{ steps.meta.outputs.json }} 18 | permissions: 19 | packages: write 20 | id-token: write 21 | contents: read 22 | steps: 23 | - uses: actions/checkout@v3 24 | - name: Docker meta 25 | id: meta 26 | uses: docker/metadata-action@v4 27 | with: 28 | images: | 29 | ghcr.io/scaleoutsystems/power-consumption 30 | tags: | 31 | type=ref,event=branch 32 | type=ref,event=pr 33 | type=semver,pattern={{version}} 34 | type=semver,pattern={{major}}.{{minor}} 35 | type=sha 36 | 37 | - name: Buildx driver 38 | run: | 39 | docker buildx create --use --driver=docker-container 40 | 41 | - name: Log in to GitHub Docker Registry 42 | uses: docker/login-action@v2 43 | with: 44 | registry: ghcr.io 45 | username: ${{ github.actor }} 46 | password: ${{ secrets.GITHUB_TOKEN }} 47 | 48 | - name: build and push studio 49 | uses: docker/build-push-action@v4 50 | with: 51 | context: ./ 52 | push: true 53 | tags: ${{ steps.meta.outputs.tags }} 54 | labels: ${{ steps.meta.outputs.labels }} 55 | cache-from: type=gha 56 | cache-to: type=gha,mode=max 57 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | data 2 | *.npz 3 | *.tgz 4 | *.tar.gz 5 | .power-consumption-keras 6 | client.yaml 7 | -------------------------------------------------------------------------------- /Power-consumption-keras/.dockerignore: -------------------------------------------------------------------------------- 1 | data 2 | seed.npz 3 | *.tgz 4 | *.tar.gz -------------------------------------------------------------------------------- /Power-consumption-keras/.gitignore: -------------------------------------------------------------------------------- 1 | data 2 | *.npz 3 | *.tgz 4 | *.tar.gz 5 | .mnist-keras 6 | client.yaml -------------------------------------------------------------------------------- /Power-consumption-keras/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10.6-slim as base 2 | LABEL maintainer="salman@scaleoutsystems.com" 3 | WORKDIR /app 4 | COPY requirements.txt . 5 | RUN apt-get update \ 6 | && apt-get install --no-install-recommends -y git \ 7 | && apt-get clean \ 8 | && rm -rf /var/lib/apt/lists/* \ 9 | && pip install git+https://github.com/scaleoutsystems/fedn.git@master#egg=fedn\&subdirectory=fedn \ 10 | && pip install --no-cache-dir -r requirements.txt 11 | 12 | 13 | FROM python:3.10.6-slim as build 14 | COPY --from=base /usr/local/lib/python3.10/site-packages/ /usr/local/lib/python3.10/site-packages/ 15 | COPY --from=base /usr/local/bin/fedn /usr/local/bin/ 16 | WORKDIR /app 17 | -------------------------------------------------------------------------------- /Power-consumption-keras/client/data.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | from math import floor 4 | 5 | import numpy as np 6 | dir_path = os.path.dirname(os.path.realpath(__file__)) 7 | abs_path = os.path.abspath(dir_path) 8 | 9 | def copy_files(source_dir, destination_dir): 10 | # Get a list of all files in the source directory 11 | files = os.listdir(source_dir) 12 | 13 | # Iterate through the files and copy them to the destination directory 14 | for file_name in files: 15 | source_file = os.path.join(source_dir, file_name) 16 | destination_file = os.path.join(destination_dir, file_name) 17 | shutil.copy2(source_file, destination_file) 18 | print(f"Copied {file_name} to {destination_file}") 19 | 20 | def get_data(out_dir='data'): 21 | # Make dir if necessary 22 | if not os.path.exists(out_dir): 23 | os.mkdir(out_dir) 24 | 25 | print('dir_path: ', dir_path) 26 | parent_dir = os.path.abspath(os.path.join(dir_path, os.pardir)) 27 | print('parent_dir: ', parent_dir) 28 | 29 | source_dir = parent_dir+'/data' 30 | destination_dir = dir_path+'/data' 31 | 32 | copy_files(source_dir, destination_dir) 33 | 34 | def load_data(data_path=None, is_train=True): 35 | """ Load data from disk. 36 | 37 | :param data_path: Path to data file. 38 | :type data_path: str 39 | :param is_train: Whether to load training or test data. 40 | :type is_train: bool 41 | :return: Tuple of data and labels. 42 | :rtype: tuple 43 | """ 44 | if data_path is None: 45 | data_path = os.environ.get("FEDN_DATA_PATH", abs_path+'/data/power.npz') 46 | 47 | data = np.load(data_path) 48 | 49 | if is_train: 50 | X = data['x_train'] 51 | y = data['y_train'] 52 | else: 53 | X = data['x_test'] 54 | y = data['y_test'] 55 | 56 | # Normalize 57 | X = X / 255 58 | 59 | return X, y 60 | 61 | if __name__ == '__main__': 62 | # Prepare data if not already done 63 | if not os.path.exists(abs_path+'/data'): 64 | print('Note: The data directory does not exist. Loading the data..') 65 | get_data() 66 | else: 67 | print('Good to go, The data directory exist.') 68 | 69 | -------------------------------------------------------------------------------- /Power-consumption-keras/client/fedn.yaml: -------------------------------------------------------------------------------- 1 | python_env: python_env.yaml 2 | entry_points: 3 | build: 4 | command: python model.py 5 | startup: 6 | command: python data.py 7 | train: 8 | command: python train.py 9 | validate: 10 | command: python validate.py 11 | -------------------------------------------------------------------------------- /Power-consumption-keras/client/model.py: -------------------------------------------------------------------------------- 1 | 2 | import json 3 | import os 4 | 5 | import numpy as np 6 | import tensorflow as tf 7 | 8 | 9 | from fedn.utils.helpers.helpers import get_helper, save_metadata, save_metrics 10 | 11 | HELPER_MODULE = 'numpyhelper' 12 | helper = get_helper(HELPER_MODULE) 13 | 14 | 15 | def compile_model(img_rows=28, img_cols=28): 16 | # Set input shape 17 | #input_shape = (img_rows, img_cols, 1) 18 | 19 | # Define model 20 | opt = tf.keras.optimizers.SGD(lr=0.0001) 21 | model = tf.keras.models.Sequential() 22 | model.add(tf.keras.layers.Dense(64, input_dim=4, activation="relu")) 23 | model.add(tf.keras.layers.Dense(32, activation="relu")) 24 | model.add(tf.keras.layers.Dense(1, activation="linear")) 25 | #model.summary() 26 | model.compile(loss = "mse", optimizer = opt,metrics=['mae']) 27 | 28 | return model 29 | 30 | def init_seed(out_path='seed.npz'): 31 | 32 | weights = compile_model().get_weights() 33 | helper.save(weights, out_path) 34 | 35 | if __name__ == "__main__": 36 | init_seed('../seed.npz') 37 | -------------------------------------------------------------------------------- /Power-consumption-keras/client/python_env.yaml: -------------------------------------------------------------------------------- 1 | name: power-consumption-keras 2 | build_dependencies: 3 | - pip 4 | - setuptools 5 | - wheel==0.37.1 6 | dependencies: 7 | - tensorflow==2.13.1 8 | - fire==0.3.1 9 | - fedn==0.9.0 10 | -------------------------------------------------------------------------------- /Power-consumption-keras/client/python_env_macosx.yaml: -------------------------------------------------------------------------------- 1 | name: mnist-keras 2 | build_dependencies: 3 | - pip 4 | - setuptools 5 | - wheel==0.37.1 6 | dependencies: 7 | - tensorflow-macos 8 | - tensorflow-metal 9 | - fire==0.3.1 10 | - fedn==0.9.0b2 11 | -------------------------------------------------------------------------------- /Power-consumption-keras/client/train.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import sys 4 | import fire 5 | import numpy as np 6 | import tensorflow as tf 7 | 8 | from data import load_data 9 | from model import compile_model 10 | 11 | from fedn.utils.helpers.helpers import get_helper, save_metadata, save_metrics 12 | 13 | HELPER_MODULE = 'numpyhelper' 14 | helper = get_helper(HELPER_MODULE) 15 | 16 | def train(in_model_path, out_model_path, data_path=None, batch_size=32, epochs=1): 17 | """ Complete a model update. 18 | 19 | Load model paramters from in_model_path (managed by the FEDn client), 20 | perform a model update, and write updated paramters 21 | to out_model_path (picked up by the FEDn client). 22 | 23 | :param in_model_path: The path to the input model. 24 | :type in_model_path: str 25 | :param out_model_path: The path to save the output model to. 26 | :type out_model_path: str 27 | :param data_path: The path to the data file. 28 | :type data_path: str 29 | :param batch_size: The batch size to use. 30 | :type batch_size: int 31 | :param epochs: The number of epochs to train. 32 | :type epochs: int 33 | """ 34 | # Load data 35 | x_train, y_train = load_data(data_path) 36 | 37 | # Load model 38 | model = compile_model() 39 | #weights = load_parameters(in_model_path) 40 | weights = helper.load(in_model_path) 41 | model.set_weights(weights) 42 | 43 | # Train 44 | model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs) 45 | 46 | # Metadata needed for aggregation server side 47 | metadata = { 48 | # num_examples are mandatory 49 | 'num_examples': len(x_train), 50 | 'batch_size': batch_size, 51 | 'epochs': epochs, 52 | } 53 | 54 | # Save JSON metadata file (mandatory) 55 | save_metadata(metadata, out_model_path) 56 | 57 | # Save model update (mandatory) 58 | weights = model.get_weights() 59 | helper.save(weights, out_model_path) 60 | 61 | 62 | if __name__ == "__main__": 63 | train(sys.argv[1], sys.argv[2]) 64 | 65 | -------------------------------------------------------------------------------- /Power-consumption-keras/client/validate.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import sys 4 | import fire 5 | import numpy as np 6 | import tensorflow as tf 7 | 8 | from data import load_data 9 | from model import compile_model 10 | 11 | from fedn.utils.helpers.helpers import get_helper, save_metadata, save_metrics 12 | 13 | HELPER_MODULE = 'numpyhelper' 14 | helper = get_helper(HELPER_MODULE) 15 | 16 | dir_path = os.path.dirname(os.path.realpath(__file__)) 17 | abs_path = os.path.abspath(dir_path) 18 | 19 | 20 | def validate(in_model_path, out_json_path, data_path=None): 21 | """ Validate model. 22 | 23 | :param in_model_path: The path to the input model. 24 | :type in_model_path: str 25 | :param out_json_path: The path to save the output JSON to. 26 | :type out_json_path: str 27 | :param data_path: The path to the data file. 28 | :type data_path: str 29 | """ 30 | 31 | # Load data 32 | x_train, y_train = load_data(data_path) 33 | x_test, y_test = load_data(data_path, is_train=False) 34 | 35 | # Load model 36 | model = compile_model() 37 | helper = get_helper(HELPER_MODULE) 38 | weights = helper.load(in_model_path) 39 | model.set_weights(weights) 40 | 41 | # Evaluate 42 | model_score_train = model.evaluate(x_train, y_train) 43 | model_score_test = model.evaluate(x_test, y_test) 44 | 45 | # JSON schema 46 | report = { 47 | "training_mse": model_score_train[0], 48 | "test_mse": model_score_test[0] 49 | } 50 | 51 | # Save JSON 52 | save_metrics(report, out_json_path) 53 | 54 | if __name__ == "__main__": 55 | validate(sys.argv[1], sys.argv[2]) 56 | -------------------------------------------------------------------------------- /Power-consumption-keras/requirements-macos.txt: -------------------------------------------------------------------------------- 1 | tensorflow-macos 2 | tensorflow-metal 3 | fire==0.3.1 4 | docker==5.0.2 5 | -------------------------------------------------------------------------------- /Power-consumption-keras/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow==2.13.1 2 | fire==0.3.1 3 | docker==6.1.1 4 | fedn==0.9.0 5 | -------------------------------------------------------------------------------- /Power-consumption-pytorch/.dockerignore: -------------------------------------------------------------------------------- 1 | data 2 | seed.npz 3 | *.tgz 4 | *.tar.gz -------------------------------------------------------------------------------- /Power-consumption-pytorch/.gitignore: -------------------------------------------------------------------------------- 1 | data 2 | *.npz 3 | *.tgz 4 | *.tar.gz 5 | .power-consumption-pytorch 6 | client.yaml 7 | -------------------------------------------------------------------------------- /Power-consumption-pytorch/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10.6-slim as base 2 | LABEL maintainer="salman@scaleoutsystems.com" 3 | WORKDIR /app 4 | COPY requirements.txt . 5 | RUN apt-get update \ 6 | && apt-get install --no-install-recommends -y git \ 7 | && apt-get clean \ 8 | && rm -rf /var/lib/apt/lists/* \ 9 | && pip install git+https://github.com/scaleoutsystems/fedn.git@master#egg=fedn\&subdirectory=fedn \ 10 | && pip install --no-cache-dir -r requirements.txt 11 | 12 | 13 | FROM python:3.10.6-slim as build 14 | COPY --from=base /usr/local/lib/python3.10/site-packages/ /usr/local/lib/python3.10/site-packages/ 15 | COPY --from=base /usr/local/bin/fedn /usr/local/bin/ 16 | WORKDIR /app 17 | -------------------------------------------------------------------------------- /Power-consumption-pytorch/client/data.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | from math import floor 4 | 5 | import torch 6 | import torchvision 7 | import numpy as np 8 | dir_path = os.path.dirname(os.path.realpath(__file__)) 9 | abs_path = os.path.abspath(dir_path) 10 | 11 | def copy_files(source_dir, destination_dir): 12 | # Get a list of all files in the source directory 13 | files = os.listdir(source_dir) 14 | 15 | # Iterate through the files and copy them to the destination directory 16 | for file_name in files: 17 | source_file = os.path.join(source_dir, file_name) 18 | destination_file = os.path.join(destination_dir, file_name) 19 | shutil.copy2(source_file, destination_file) 20 | print(f"Copied {file_name} to {destination_file}") 21 | 22 | 23 | def get_data(out_dir='data'): 24 | # Make dir if necessary 25 | if not os.path.exists(out_dir): 26 | os.mkdir(out_dir) 27 | 28 | print('dir_path: ', dir_path) 29 | parent_dir = os.path.abspath(os.path.join(dir_path, os.pardir)) 30 | print('parent_dir: ', parent_dir) 31 | 32 | source_dir = parent_dir+'/data' 33 | destination_dir = dir_path+'/data' 34 | 35 | copy_files(source_dir, destination_dir) 36 | 37 | def load_data(data_path=None, is_train=True): 38 | """ Load data from disk. 39 | 40 | :param data_path: Path to data file. 41 | :type data_path: str 42 | :param is_train: Whether to load training or test data. 43 | :type is_train: bool 44 | :return: Tuple of data and labels. 45 | :rtype: tuple 46 | """ 47 | if data_path is None: 48 | data_path = os.environ.get("FEDN_DATA_PATH", abs_path+'/data/power.npz') 49 | 50 | data = np.load(data_path) 51 | 52 | if is_train: 53 | X = data['x_train'] 54 | y = data['y_train'] 55 | else: 56 | X = data['x_test'] 57 | y = data['y_test'] 58 | 59 | # Normalize 60 | X = X / 255 61 | 62 | return X, y 63 | 64 | 65 | def splitset(dataset, parts): 66 | n = dataset.shape[0] 67 | local_n = floor(n/parts) 68 | result = [] 69 | for i in range(parts): 70 | result.append(dataset[i*local_n: (i+1)*local_n]) 71 | return result 72 | 73 | 74 | def split(out_dir='data'): 75 | 76 | n_splits = int(os.environ.get("FEDN_NUM_DATA_SPLITS", 2)) 77 | 78 | # Make dir 79 | if not os.path.exists(f'{out_dir}/clients'): 80 | os.mkdir(f'{out_dir}/clients') 81 | 82 | # Load and convert to dict 83 | train_data = torchvision.datasets.MNIST( 84 | root=f'{out_dir}/train', transform=torchvision.transforms.ToTensor, train=True) 85 | test_data = torchvision.datasets.MNIST( 86 | root=f'{out_dir}/test', transform=torchvision.transforms.ToTensor, train=False) 87 | data = { 88 | 'x_train': splitset(train_data.data, n_splits), 89 | 'y_train': splitset(train_data.targets, n_splits), 90 | 'x_test': splitset(test_data.data, n_splits), 91 | 'y_test': splitset(test_data.targets, n_splits), 92 | } 93 | 94 | # Make splits 95 | for i in range(n_splits): 96 | subdir = f'{out_dir}/clients/{str(i+1)}' 97 | if not os.path.exists(subdir): 98 | os.mkdir(subdir) 99 | torch.save({ 100 | 'x_train': data['x_train'][i], 101 | 'y_train': data['y_train'][i], 102 | 'x_test': data['x_test'][i], 103 | 'y_test': data['y_test'][i], 104 | }, 105 | f'{subdir}/mnist.pt') 106 | 107 | 108 | if __name__ == '__main__': 109 | # Prepare data if not already done 110 | if not os.path.exists(abs_path+'/data'): 111 | print('Note: The data directory does not exist. Loading the data..') 112 | get_data() 113 | else: 114 | print('Good to go, The data directory exist.') 115 | -------------------------------------------------------------------------------- /Power-consumption-pytorch/client/fedn.yaml: -------------------------------------------------------------------------------- 1 | python_env: python_env.yaml 2 | entry_points: 3 | build: 4 | command: python model.py 5 | startup: 6 | command: python data.py 7 | train: 8 | command: python train.py 9 | validate: 10 | command: python validate.py -------------------------------------------------------------------------------- /Power-consumption-pytorch/client/model.py: -------------------------------------------------------------------------------- 1 | import collections 2 | 3 | import torch 4 | 5 | from fedn.utils.helpers.helpers import get_helper 6 | 7 | HELPER_MODULE = 'numpyhelper' 8 | helper = get_helper(HELPER_MODULE) 9 | 10 | from torch.nn import Linear 11 | from torch.nn import ReLU 12 | from torch.nn import Sigmoid 13 | from torch.nn import Module 14 | from torch.optim import SGD 15 | 16 | from torch.nn.init import kaiming_uniform_ 17 | from torch.nn.init import xavier_uniform_ 18 | 19 | 20 | def compile_model(): 21 | """ Compile the pytorch model. 22 | 23 | :return: The compiled model. 24 | :rtype: torch.nn.Module 25 | """ 26 | class Net(torch.nn.Module): 27 | def __init__(self): 28 | super(Net, self).__init__() 29 | 30 | self.hidden1 = torch.nn.Linear(4, 64) 31 | kaiming_uniform_(self.hidden1.weight, nonlinearity='relu') 32 | self.act1 = ReLU() 33 | 34 | self.hidden2 = Linear(64, 32) 35 | kaiming_uniform_(self.hidden2.weight, nonlinearity='relu') 36 | self.act2 = ReLU() 37 | 38 | self.hidden3 = Linear(32, 1) 39 | xavier_uniform_(self.hidden3.weight) 40 | 41 | 42 | def forward(self, x): 43 | 44 | # input to first hidden layer 45 | x = self.hidden1(x) 46 | x = self.act1(x) 47 | 48 | # second hidden layer 49 | x = self.hidden2(x) 50 | x = self.act2(x) 51 | 52 | # third hidden layer and output 53 | x = self.hidden3(x) 54 | #x = self.act3(x) 55 | 56 | return x 57 | 58 | # Return model 59 | return Net() 60 | 61 | 62 | 63 | def save_parameters(model, out_path): 64 | """ Save model paramters to file. 65 | 66 | :param model: The model to serialize. 67 | :type model: torch.nn.Module 68 | :param out_path: The path to save to. 69 | :type out_path: str 70 | """ 71 | parameters_np = [val.cpu().numpy() for _, val in model.state_dict().items()] 72 | helper.save(parameters_np, out_path) 73 | 74 | 75 | def load_parameters(model_path): 76 | """ Load model parameters from file and populate model. 77 | 78 | param model_path: The path to load from. 79 | :type model_path: str 80 | :return: The loaded model. 81 | :rtype: torch.nn.Module 82 | """ 83 | model = compile_model() 84 | parameters_np = helper.load(model_path) 85 | 86 | params_dict = zip(model.state_dict().keys(), parameters_np) 87 | state_dict = collections.OrderedDict({key: torch.tensor(x) for key, x in params_dict}) 88 | model.load_state_dict(state_dict, strict=True) 89 | return model 90 | 91 | 92 | def init_seed(out_path='seed.npz'): 93 | """ Initialize seed model and save it to file. 94 | 95 | :param out_path: The path to save the seed model to. 96 | :type out_path: str 97 | """ 98 | # Init and save 99 | model = compile_model() 100 | save_parameters(model, out_path) 101 | 102 | 103 | if __name__ == "__main__": 104 | init_seed('../seed.npz') 105 | -------------------------------------------------------------------------------- /Power-consumption-pytorch/client/python_env.yaml: -------------------------------------------------------------------------------- 1 | name: power-consumption-pytorch 2 | build_dependencies: 3 | - pip 4 | - setuptools 5 | - wheel 6 | dependencies: 7 | - torch==2.2.1 8 | - torchvision==0.17.1 9 | - fedn 10 | - numpy==1.26.4 11 | 12 | -------------------------------------------------------------------------------- /Power-consumption-pytorch/client/train.py: -------------------------------------------------------------------------------- 1 | import math 2 | import os 3 | import sys 4 | 5 | import torch 6 | from data import load_data 7 | from model import load_parameters, save_parameters 8 | 9 | import numpy as np 10 | from fedn.utils.helpers.helpers import save_metadata 11 | 12 | dir_path = os.path.dirname(os.path.realpath(__file__)) 13 | sys.path.append(os.path.abspath(dir_path)) 14 | 15 | 16 | def train(in_model_path, out_model_path, data_path=None, batch_size=32, epochs=1, lr=0.01): 17 | """ Complete a model update. 18 | 19 | Load model paramters from in_model_path (managed by the FEDn client), 20 | perform a model update, and write updated paramters 21 | to out_model_path (picked up by the FEDn client). 22 | 23 | :param in_model_path: The path to the input model. 24 | :type in_model_path: str 25 | :param out_model_path: The path to save the output model to. 26 | :type out_model_path: str 27 | :param data_path: The path to the data file. 28 | :type data_path: str 29 | :param batch_size: The batch size to use. 30 | :type batch_size: int 31 | :param epochs: The number of epochs to train. 32 | :type epochs: int 33 | :param lr: The learning rate to use. 34 | :type lr: float 35 | """ 36 | # Load data 37 | x_train, y_train = load_data(data_path) 38 | 39 | # Load parmeters and initialize model 40 | model = load_parameters(in_model_path) 41 | 42 | # Train 43 | optimizer = torch.optim.SGD(model.parameters(), lr=lr) 44 | n_batches = int(math.ceil(len(x_train) / batch_size)) 45 | #criterion = torch.nn.NLLLoss() 46 | criterion = torch.nn.L1Loss() 47 | 48 | for e in range(epochs): # epoch loop 49 | for b in range(n_batches): # batch loop 50 | # Retrieve current batch 51 | #batch_x = x_train[b * batch_size:(b + 1) * batch_size] 52 | batch_x_tmp = torch.from_numpy(x_train[b * batch_size:(b + 1) * batch_size]) 53 | batch_x = torch.tensor(batch_x_tmp, dtype=torch.float32) 54 | 55 | #batch_y = y_train[b * batch_size:(b + 1) * batch_size] 56 | batch_y_tmp = torch.from_numpy(np.expand_dims(y_train[b * batch_size:(b + 1) * batch_size],-1)) 57 | batch_y = torch.tensor(batch_y_tmp, dtype=torch.float32) 58 | 59 | # Train on batch 60 | optimizer.zero_grad() 61 | outputs = model(batch_x) 62 | loss = criterion(outputs, batch_y) 63 | loss.backward() 64 | optimizer.step() 65 | # Log 66 | if b % 100 == 0: 67 | print( 68 | f"Epoch {e}/{epochs-1} | Batch: {b}/{n_batches-1} | Loss: {loss.item()}") 69 | 70 | # Metadata needed for aggregation server side 71 | metadata = { 72 | # num_examples are mandatory 73 | 'num_examples': len(x_train), 74 | 'batch_size': batch_size, 75 | 'epochs': epochs, 76 | 'lr': lr 77 | } 78 | 79 | # Save JSON metadata file (mandatory) 80 | save_metadata(metadata, out_model_path) 81 | 82 | # Save model update (mandatory) 83 | save_parameters(model, out_model_path) 84 | 85 | 86 | if __name__ == "__main__": 87 | train(sys.argv[1], sys.argv[2]) 88 | -------------------------------------------------------------------------------- /Power-consumption-pytorch/client/validate.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | import torch 5 | from data import load_data 6 | from model import load_parameters 7 | 8 | import numpy as np 9 | 10 | from fedn.utils.helpers.helpers import save_metrics 11 | 12 | dir_path = os.path.dirname(os.path.realpath(__file__)) 13 | sys.path.append(os.path.abspath(dir_path)) 14 | 15 | 16 | def validate(in_model_path, out_json_path, data_path=None): 17 | """ Validate model. 18 | 19 | :param in_model_path: The path to the input model. 20 | :type in_model_path: str 21 | :param out_json_path: The path to save the output JSON to. 22 | :type out_json_path: str 23 | :param data_path: The path to the data file. 24 | :type data_path: str 25 | """ 26 | # Load data 27 | x_train, y_train = load_data(data_path) 28 | x_test, y_test = load_data(data_path, is_train=False) 29 | 30 | # Load model 31 | model = load_parameters(in_model_path) 32 | model.eval() 33 | 34 | # Evaluate 35 | criterion_mae = torch.nn.L1Loss() 36 | criterion_mse = torch.nn.MSELoss() 37 | with torch.no_grad(): 38 | x_train_t = torch.tensor(x_train, dtype=torch.float32) 39 | train_out = model(x_train_t) 40 | 41 | y_train = torch.from_numpy(np.expand_dims(y_train,-1)) 42 | y_train_t = torch.tensor(y_train, dtype=torch.float32) 43 | 44 | training_loss_mae = criterion_mae(train_out, y_train_t) 45 | training_loss_mse = criterion_mse(train_out, y_train_t) 46 | 47 | x_test_t = torch.tensor(x_test, dtype=torch.float32) 48 | test_out = model(x_test_t) 49 | 50 | y_test = torch.from_numpy(np.expand_dims(y_test,-1)) 51 | y_test_t = torch.tensor(y_test, dtype=torch.float32) 52 | 53 | test_loss_mae = criterion_mae(test_out, y_test_t) 54 | test_loss_mse = criterion_mse(test_out, y_test_t) 55 | 56 | # JSON schema 57 | report = { 58 | "test_mae": str(test_loss_mae.item()), 59 | "test_mse": str(test_loss_mse.item()), 60 | "training_mae": str(training_loss_mae.item()), 61 | "training_mse": str(training_loss_mse.item()), 62 | 63 | } 64 | 65 | # Save JSON 66 | save_metrics(report, out_json_path) 67 | 68 | if __name__ == "__main__": 69 | validate(sys.argv[1], sys.argv[2]) 70 | -------------------------------------------------------------------------------- /Power-consumption-pytorch/requirements.txt: -------------------------------------------------------------------------------- 1 | torch==2.2.1 2 | torchvision==0.17.1 3 | fedn==0.9.0 4 | 5 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Power consumption prediction for data centers (TensorFlow/Keras and PyTorch) 2 | 3 | ![Power Consumption Example.](banner.png) 4 | 5 | This is an example of a neural network regression model in a federated setting. Time series data from two data centers in Sweden and Finland are used to predict the relationship between CPU and Network usage and power consumption. The tutorial is based on the following article that has more background information on the use-case: 6 | 7 | - Towards Smart e-Infrastructures, A Community Driven Approach Based on Real Datasets 8 | https://ieeexplore.ieee.org/document/9289758 9 | 10 | The model in this example is a simplified version of the model used in the article, aimed at reducing the compute requirements on the client side. A typical laptop or workstation should be capable of handling multiple clients. A partition of the dataset needed for this example is publically accessible. 11 | 12 | ```sh 13 | wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1r_dlOEZAnCLhRjY1qFwlRAkeB4PvhgAU' -O power.npz 14 | ``` 15 | 16 | To access the complete dataset, please get in touch with Scaleout staff. The following figure illustrates the overall concept of the example. 17 | 18 | ![Overview figure highlighting input parameters and expected output.](overview.png) 19 | 20 | 21 | In case you have any questions, feel free to contact us. 22 | 23 | 24 | ## Prerequisites, when running clients in Docker containers 25 | 26 | - [Python 3.8, 3.9, 3.10 or 3.11](https://www.python.org/downloads) 27 | 28 | 29 | Creating the compute package and seed model 30 | ------------------------------------------- 31 | 32 | Install fedn: 33 | 34 | ```sh 35 | pip install fedn 36 | ``` 37 | 38 | Clone this repository, then locate into this directory: 39 | 40 | ```sh 41 | https://github.com/scaleoutsystems/power-consumption-tutorial.git 42 | ``` 43 | For PyTorch example: 44 | 45 | ```sh 46 | cd power-consumption-tutorial/Power-consumption-pytorch 47 | ``` 48 | 49 | Or for Keras example: 50 | 51 | ```sh 52 | cd power-consumption-tutorial/Power-consumption-keras 53 | ``` 54 | 55 | Create the compute package: 56 | 57 | ```sh 58 | fedn package create --path client 59 | ``` 60 | 61 | This should create a file 'package.tgz' in the project folder. 62 | 63 | Next, generate a seed model (the first model in a global model trail): 64 | 65 | ```sh 66 | fedn run build --path client 67 | ``` 68 | 69 | This will create a seed model called 'seed.npz' in the root of the project. This step will take a few minutes, depending on hardware and internet connection (builds a virtualenv). 70 | 71 | Download the dataset: 72 | 73 | For Linux and MacOS 74 | 75 | ```sh 76 | mkdir power-consumption-tutorial/Power-consumption-pytorch/data 77 | wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1r_dlOEZAnCLhRjY1qFwlRAkeB4PvhgAU' -O power.npz 78 | ``` 79 | 80 | For Windows users, download the dataset using your preferred browser or tool by following the link below. 81 | 82 | `https://docs.google.com/uc?export=download&id=1r_dlOEZAnCLhRjY1qFwlRAkeB4PvhgAU` 83 | 84 | Using FEDn Studio 85 | ----------------- 86 | 87 | Follow the guide here to set up your FEDn Studio project and learn how to connect clients (using token authentication): [Studio guide](https://fedn.readthedocs.io/en/stable/studio.html). On the step "Upload Files", upload 'package.tgz' and 'seed.npz' created above. 88 | 89 | Connecting clients: 90 | 91 | ```sh 92 | export FEDN_PACKAGE_EXTRACT_DIR=package 93 | export FEDN_DATA_PATH=/data/power.npz 94 | ``` 95 | 96 | ```sh 97 | fedn client start -in client.yaml --secure=True --force-ssl 98 | ``` 99 | 100 | 101 | Connecting clients using Docker: 102 | 103 | For convenience, there is a Docker image hosted on ghrc.io with fedn preinstalled. To start a client using Docker: 104 | 105 | ```sh 106 | docker run \ 107 | -v $PWD/client.yaml:/app/client.yaml \ 108 | -v $PWD/data/power.npz:/app/data/power.npz \ 109 | -e FEDN_PACKAGE_EXTRACT_DIR=package \ 110 | -e FEDN_DATA_PATH=/app/data/power.npz \ 111 | ghcr.io/scaleoutsystems/fedn/fedn:0.9.0 run client -in client.yaml --force-ssl --secure=True 112 | ``` 113 | 114 | Now on the session tab in the Studio, click on the start session to initiate the training rounds. 115 | 116 | For more details about FEDn SDK or FEDn Studio, click on the following [link](https://fedn.readthedocs.io/en/stable/introduction.html) 117 | 118 | -------------------------------------------------------------------------------- /banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scaleoutsystems/power-consumption-tutorial/9188b0ff6c74560d5e766b7ede45413e690f7ff6/banner.png -------------------------------------------------------------------------------- /overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scaleoutsystems/power-consumption-tutorial/9188b0ff6c74560d5e766b7ede45413e690f7ff6/overview.png --------------------------------------------------------------------------------