├── .gitattributes ├── .gitignore ├── FedUtils ├── fed │ ├── client.py │ ├── fedavg.py │ ├── fedavg_sgd.py │ ├── fedcurv.py │ ├── fedprox.py │ ├── fedreg.py │ ├── scaffold.py │ └── server.py └── models │ ├── CT │ ├── cnn.py │ ├── cnn_parallel.py │ ├── densenet.py │ └── densenet_norm_.py │ ├── cifar10 │ └── resnet9.py │ ├── cifar100 │ └── resnet9.py │ ├── emnist │ └── cnn.py │ ├── landmark │ ├── densenet.py │ └── densenet_.py │ ├── mnist │ └── cnn.py │ ├── shakespeare │ └── LSTM.py │ ├── transformer │ └── model.py │ └── utils.py ├── README.md ├── config_example.py ├── fixup └── cifar │ ├── cifar_train.py │ ├── models │ ├── __init__.py │ ├── fixup_resnet_cifar.py │ └── resnet_cifar.py │ └── utils.py ├── framework.png ├── main.py ├── requirements.txt └── tasks ├── CT ├── FedAvg_cnn_e20_lr5e4 │ ├── config.py │ └── train.log ├── FedAvg_e20_lr1e3 │ ├── config.py │ └── train.log ├── FedCurv_cnn_e20_lr1e3_g3 │ ├── config.py │ └── train.log ├── FedCurv_e20_lr1e3_g4 │ ├── config.py │ └── train.log ├── FedProx_cnn_e20_lr5e4_g1 │ ├── config.py │ └── train.log ├── FedProx_e20_lr1e3_g10 │ ├── config.py │ └── train.log ├── FedReg │ ├── config.py │ └── train.log ├── FedReg_cnn │ ├── config.py │ └── train.log ├── SCAFFOLD_e20_lr5e4 │ ├── config.py │ └── train.log ├── SGD_cnn_lr5e4 │ ├── config.py │ └── train.log └── SGD_lr1e4 │ ├── config.py │ └── train.log ├── cifar10 ├── FedAvg_e30_lr05 │ ├── config.py │ └── train.log ├── FedCurv_e30_lr05_g5 │ ├── config.py │ └── train.log ├── FedProx_e30_lr05_g001 │ ├── config.py │ └── train.log ├── FedReg_e30_lr10 │ ├── config.py │ └── train.log ├── SCAFFOLD_e30_lr01 │ ├── config.py │ └── train.log └── SGD │ ├── config.py │ └── train.log ├── cifar100 ├── FedAvg_e10_lr05 │ ├── config.py │ └── train.log ├── FedCurv_e10_lr5_g3 │ ├── config.py │ └── train.log ├── FedProx_e10_lr5_g01 │ ├── config.py │ └── train.log ├── FedReg │ ├── config.py │ └── train.log └── SGD │ ├── config.py │ └── train.log ├── cifar100_transformer ├── FedAvg_e5_lr10 │ ├── config.py │ └── train.log ├── FedCurv_e5_lr10_g5 │ ├── config.py │ └── train.log ├── FedProx_e5_lr10_g001 │ ├── config.py │ └── train.log ├── FedReg_e5_lr10 │ ├── config.py │ └── train.log └── SGD │ ├── config.py │ └── train.log ├── cifar10_sc ├── FedAvg_e20_lr05 │ ├── config.py │ └── train.log ├── FedCurv_e20_lr05_g3 │ ├── config.py │ └── train.log ├── FedProx_e20_lr05_g01 │ ├── config.py │ └── train.log ├── FedReg_e20_lr05 │ ├── config.py │ └── train.log ├── SCAFFOLD_e20_lr05 │ ├── config.py │ └── train.log └── SGD │ ├── config.py │ └── train.log ├── emnist ├── FedAvg_e20_lr2 │ ├── config.py │ └── train.log ├── FedCurv_e20_lr2_g4 │ ├── config.py │ └── train.log ├── FedProx_e20_lr2_g001 │ ├── config.py │ └── train.log ├── FedReg_e20_lr2_g4 │ ├── config.py │ └── train.log ├── SCAFFOLD_e20_lr1 │ ├── config.py │ └── train.log └── SGD │ ├── config.py │ └── train.log ├── landmark ├── FedAvg │ ├── config.py │ └── train.log ├── FedReg │ ├── config.py │ └── train.log └── SGD │ ├── config.py │ └── train.log ├── mnist ├── FedAvg_e40_lr1 │ ├── config.py │ └── train.log ├── FedCurv_e40_lr1_g4 │ ├── config.py │ └── train.log ├── FedProx_e40_lr1_g001 │ ├── config.py │ └── train.log ├── FedReg_e40_lr1_g4 │ ├── config.py │ └── train.log ├── SCAFFOLD_e40_lr1 │ ├── config.py │ └── train.log └── SGD │ ├── config.py │ └── train.log ├── mnist_fedprox ├── FedAvg │ ├── config.py │ └── train.log ├── FedProx │ ├── config.py │ └── train.log └── FedReg │ ├── config.py │ └── train.log ├── mnist_sc ├── FedAvg_e20_lr1 │ ├── config.py │ └── train.log ├── FedCurv_e20_lr1_g4 │ ├── config.py │ └── train.log ├── FedProx_e20_lr1_g01 │ ├── config.py │ └── train.log ├── FedReg_e20_lr1_g3 │ ├── config.py │ └── train.log ├── SCAFFOLD_e20_lr1 │ ├── config.py │ └── train.log └── SGD │ ├── config.py │ └── train.log ├── nist ├── FedAvg │ ├── config.py │ └── train.log ├── FedProx │ ├── config.py │ └── train.log └── FedReg │ ├── config.py │ └── train.log └── shakespeare ├── FedAvg ├── config.py └── train.log ├── FedProx ├── config.py └── train.log └── FedReg ├── config.py └── train.log /.gitattributes: -------------------------------------------------------------------------------- 1 | data.tar.gz filter=lfs diff=lfs merge=lfs -text 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | #*.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # custom 132 | data/ 133 | #task*/ 134 | script/ 135 | log/ 136 | -------------------------------------------------------------------------------- /FedUtils/fed/client.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.utils import CusDataset 2 | from torch.utils.data import DataLoader 3 | import torch.multiprocessing 4 | torch.multiprocessing.set_sharing_strategy('file_system') 5 | 6 | 7 | class Client(object): 8 | def __init__(self, id, group, train_data, eval_data, model, batchsize, train_transform=None, test_transform=None, traincusdataset=None, evalcusdataset=None): 9 | super(Client, self).__init__() 10 | self.model = model 11 | self.id = id 12 | self.group = group 13 | self.train_samplenum = len(train_data["x"]) 14 | self.num_train_samples = len(train_data["x"]) 15 | self.num_test_samples = [len(ed["x"]) for ed in eval_data] 16 | drop_last = False 17 | if traincusdataset: # load data use costomer's dataset 18 | self.train_data = DataLoader(traincusdataset(train_data, transform=train_transform), batch_size=batchsize, shuffle=True, drop_last=drop_last) 19 | self.train_data_fortest = DataLoader(evalcusdataset(train_data, transform=test_transform), batch_size=batchsize, shuffle=False,) 20 | num_workers = 0 21 | self.eval_data = [DataLoader(evalcusdataset(ed, transform=test_transform), batch_size=100, shuffle=False, num_workers=num_workers) for ed in eval_data] 22 | else: 23 | self.train_data = DataLoader(CusDataset(train_data, transform=train_transform), batch_size=batchsize, shuffle=True, drop_last=drop_last) 24 | self.train_data_fortest = DataLoader(CusDataset(train_data, transform=test_transform), batch_size=batchsize, shuffle=False) 25 | self.eval_data = [DataLoader(CusDataset(ed, transform=test_transform), batch_size=100, shuffle=False) for ed in eval_data] 26 | self.train_iter = iter(self.train_data) 27 | 28 | def set_param(self, state_dict): 29 | self.model.set_param(state_dict) 30 | return True 31 | 32 | def get_param(self): 33 | return self.model.get_param() 34 | 35 | def solve_grad(self): 36 | bytes_w = self.model.size 37 | grads, comp = self.model.get_gradients(self.train_data) 38 | bytes_r = self.model.size 39 | return ((self.num_train_samples, grads), (bytes_w, comp, bytes_r)) 40 | 41 | def solve_inner(self, num_epochs=1, step_func=None): 42 | bytes_w = self.model.size 43 | soln, comp, weight = self.model.solve_inner(self.train_data, num_epochs=num_epochs, step_func=step_func) 44 | bytes_r = self.model.size 45 | return (self.num_train_samples*weight, soln), (bytes_w, comp, bytes_r) 46 | 47 | def test(self): 48 | TC = [] 49 | LS = [] 50 | for ed in self.eval_data: 51 | total_correct, loss = self.model.test(ed) 52 | TC.append(total_correct) 53 | LS.append(loss) 54 | return TC, self.num_test_samples 55 | 56 | def train_error_and_loss(self): 57 | tot_correct, loss = self.model.test(self.train_data_fortest) 58 | return tot_correct, loss, self.train_samplenum 59 | -------------------------------------------------------------------------------- /FedUtils/fed/fedavg.py: -------------------------------------------------------------------------------- 1 | from .server import Server 2 | from loguru import logger 3 | import numpy as np 4 | from FedUtils.models.utils import decode_stat 5 | import torch 6 | 7 | 8 | def step_func(model, data): 9 | lr = model.learning_rate 10 | parameters = list(model.parameters()) 11 | flop = model.flop 12 | 13 | def func(d): 14 | nonlocal flop, lr 15 | model.train() 16 | model.zero_grad() 17 | x, y = d 18 | pred = model.forward(x) 19 | loss = model.loss(pred, y).mean() 20 | grad = torch.autograd.grad(loss, parameters) 21 | for p, g in zip(parameters, grad): 22 | p.data.add_(-lr*g) 23 | return flop*len(x) 24 | return func 25 | 26 | 27 | class FedAvg(Server): 28 | step = 0 29 | 30 | def train(self): 31 | logger.info("Train with {} workers...".format(self.clients_per_round)) 32 | for r in range(self.num_rounds): 33 | if r % self.eval_every == 0: 34 | logger.info("-- Log At Round {} --".format(r)) 35 | stats = self.test() 36 | if self.eval_train: 37 | stats_train = self.train_error_and_loss() 38 | else: 39 | stats_train = stats 40 | logger.info("-- TEST RESULTS --") 41 | decode_stat(stats) 42 | logger.info("-- TRAIN RESULTS --") 43 | decode_stat(stats_train) 44 | 45 | indices, selected_clients = self.select_clients(r, num_clients=self.clients_per_round) 46 | np.random.seed(r) 47 | active_clients = np.random.choice(selected_clients, round(self.clients_per_round*(1.0-self.drop_percent)), replace=False) 48 | csolns = {} 49 | w = 0 50 | 51 | for idx, c in enumerate(active_clients): 52 | c.set_param(self.model.get_param()) 53 | soln, stats = c.solve_inner(num_epochs=self.num_epochs, step_func=step_func) # stats has (byte w, comp, byte r) 54 | soln = [1.0, soln[1]] 55 | w += soln[0] 56 | if len(csolns) == 0: 57 | csolns = {x: soln[1][x].detach()*soln[0] for x in soln[1]} 58 | else: 59 | for x in csolns: 60 | csolns[x].data.add_(soln[1][x]*soln[0]) 61 | del c 62 | csolns = [[w, {x: csolns[x]/w for x in csolns}]] 63 | 64 | self.latest_model = self.aggregate(csolns) 65 | 66 | logger.info("-- Log At Round {} --".format(r)) 67 | stats = self.test() 68 | if self.eval_train: 69 | stats_train = self.train_error_and_loss() 70 | else: 71 | stats_train = stats 72 | logger.info("-- TEST RESULTS --") 73 | decode_stat(stats) 74 | logger.info("-- TRAIN RESULTS --") 75 | decode_stat(stats_train) 76 | -------------------------------------------------------------------------------- /FedUtils/fed/fedavg_sgd.py: -------------------------------------------------------------------------------- 1 | from .server import Server 2 | from loguru import logger 3 | import numpy as np 4 | from FedUtils.models.utils import decode_stat 5 | import torch 6 | 7 | 8 | def step_func(model, data): 9 | lr = model.learning_rate 10 | parameters = list(model.parameters()) 11 | flop = model.flop 12 | 13 | def func(d): 14 | nonlocal flop, lr 15 | model.train() 16 | model.zero_grad() 17 | x, y = d 18 | pred = model.forward(x) 19 | loss = model.loss(pred, y).mean() 20 | grad = torch.autograd.grad(loss, parameters) 21 | for p, g in zip(parameters, grad): 22 | p.data.add_(-lr*g) 23 | return flop*len(x) 24 | return func 25 | 26 | 27 | class FedAvg(Server): 28 | step = 0 29 | 30 | def train(self): 31 | logger.info("Train with {} workers...".format(self.clients_per_round)) 32 | for r in range(self.num_rounds): 33 | if r % self.eval_every == 0: 34 | logger.info("-- Log At Round {} --".format(r)) 35 | stats = self.test() 36 | if self.eval_train: 37 | stats_train = self.train_error_and_loss() 38 | else: 39 | stats_train = stats 40 | logger.info("-- TEST RESULTS --") 41 | decode_stat(stats) 42 | logger.info("-- TRAIN RESULTS --") 43 | decode_stat(stats_train) 44 | 45 | indices, selected_clients = self.select_clients(r, num_clients=self.clients_per_round) 46 | np.random.seed(r) 47 | active_clients = np.random.choice(selected_clients, round(self.clients_per_round*(1.0-self.drop_percent)), replace=False) 48 | csolns = {} 49 | w = 0 50 | 51 | for idx, c in enumerate(active_clients): 52 | c.set_param(self.model.get_param()) 53 | for d in c.train_data: # only one step 54 | train_data = [d] 55 | break 56 | c.train_data = train_data 57 | soln, stats = c.solve_inner(num_epochs=self.num_epochs, step_func=step_func) # stats has (byte w, comp, byte r) 58 | soln = [1.0, soln[1]] 59 | w += soln[0] 60 | if len(csolns) == 0: 61 | csolns = {x: soln[1][x].detach()*soln[0] for x in soln[1]} 62 | else: 63 | for x in csolns: 64 | csolns[x].data.add_(soln[1][x]*soln[0]) 65 | del c 66 | csolns = [[w, {x: csolns[x]/w for x in csolns}]] 67 | 68 | self.latest_model = self.aggregate(csolns) 69 | 70 | logger.info("-- Log At Round {} --".format(r)) 71 | stats = self.test() 72 | if self.eval_train: 73 | stats_train = self.train_error_and_loss() 74 | else: 75 | stats_train = stats 76 | logger.info("-- TEST RESULTS --") 77 | decode_stat(stats) 78 | logger.info("-- TRAIN RESULTS --") 79 | decode_stat(stats_train) 80 | -------------------------------------------------------------------------------- /FedUtils/fed/fedcurv.py: -------------------------------------------------------------------------------- 1 | from .server import Server 2 | from loguru import logger 3 | import numpy as np 4 | from FedUtils.models.utils import decode_stat 5 | import torch 6 | from functools import partial 7 | 8 | 9 | def step_func(model, data, fed): 10 | lr = model.learning_rate 11 | parameters = list(model.parameters()) 12 | flop = model.flop 13 | fisher, theta_fisher, gamma = fed.fisher, fed.theta_fisher, fed.gamma 14 | 15 | def func(d): 16 | nonlocal lr, flop, gamma 17 | model.train() 18 | model.zero_grad() 19 | x, y = d 20 | pred = model.forward(x) 21 | loss = model.loss(pred, y).mean() 22 | if fisher is not None: 23 | for p, f, tf in zip(parameters, fisher, theta_fisher): 24 | loss += ((p**2*f)*gamma-2*gamma*tf*p).sum() 25 | grad = torch.autograd.grad(loss, parameters) 26 | for p, g in zip(parameters, grad): 27 | p.data.add_(-lr*g) 28 | return flop*len(x) # only consider the flop in NN 29 | return func 30 | 31 | 32 | class FedCurv(Server): 33 | def train(self): 34 | logger.info("Train with {} workers...".format(self.clients_per_round)) 35 | self.fisher = None 36 | self.theta_fisher = None 37 | for r in range(self.num_rounds): 38 | if r % self.eval_every == 0: 39 | logger.info("-- Log At Round {} --".format(r)) 40 | stats = self.test() 41 | if self.eval_train: 42 | stats_train = self.train_error_and_loss() 43 | else: 44 | stats_train = stats 45 | logger.info("-- TEST RESULTS --") 46 | decode_stat(stats) 47 | logger.info("-- TRAIN RESULTS --") 48 | decode_stat(stats_train) 49 | 50 | indices, selected_clients = self.select_clients(r, num_clients=self.clients_per_round) 51 | np.random.seed(r) 52 | active_clients = np.random.choice(selected_clients, round(self.clients_per_round*(1.0-self.drop_percent)), replace=False) 53 | 54 | csolns = {} 55 | w = 0 56 | temp_fisher = None 57 | temp_theta_fisher = None 58 | for idx, c in enumerate(active_clients): 59 | c.set_param(self.model.get_param()) 60 | soln, stats = c.solve_inner(num_epochs=self.num_epochs, step_func=partial(step_func, fed=self)) # stats has (byte w, comp, byte r) 61 | soln = [1.0, soln[1]] 62 | w += soln[0] 63 | if len(csolns) == 0: 64 | csolns = {x: soln[1][x].detach()*soln[0] for x in soln[1]} 65 | else: 66 | for x in csolns: 67 | csolns[x].data.add_(soln[1][x]*soln[0]) 68 | size = 0.0 69 | cfisher = None 70 | ctfisher = None 71 | for d in c.train_data: 72 | x, y = d 73 | size += len(x) 74 | c.model.eval() 75 | gradients = [] 76 | for i in range(len(x)): 77 | loss = c.model.loss(c.model(x[i].unsqueeze(0)), y[i].unsqueeze(0)).squeeze() 78 | gradient = torch.autograd.grad(loss, c.model.parameters()) 79 | with torch.no_grad(): 80 | gradients.append([_.detach() for _ in gradient]) 81 | fs = [[a*a for a in x] for x in gradients] 82 | fs = [sum([x[i] for x in fs]).detach()*1.0 for i in range(len(fs[0]))] 83 | with torch.no_grad(): 84 | if cfisher is None: 85 | cfisher = fs 86 | ctfisher = [a*b for a, b in zip(fs, c.model.parameters())] 87 | else: 88 | cfisher = [a+b for a, b in zip(cfisher, fs)] 89 | ctfisher = [a+b*c for a, b, c in zip(ctfisher, fs, c.model.parameters())] 90 | cfisher = [a.detach()/size for a in cfisher] 91 | ctfisher = [a.detach()/size for a in ctfisher] 92 | if temp_fisher is None: 93 | temp_fisher = cfisher 94 | temp_theta_fisher = ctfisher 95 | else: 96 | temp_fisher = [a+b for a, b in zip(temp_fisher, cfisher)] 97 | temp_theta_fisher = [a+b for a, b in zip(temp_theta_fisher, ctfisher)] 98 | del c 99 | # csolns.append(soln) 100 | csolns = [[w, {x: csolns[x]/w for x in csolns}]] 101 | 102 | self.latest_model = self.aggregate(csolns) 103 | self.fisher = temp_fisher 104 | self.theta_fisher = temp_theta_fisher 105 | logger.info("-- Log At Round {} --".format(r)) 106 | stats = self.test() 107 | if self.eval_train: 108 | stats_train = self.train_error_and_loss() 109 | else: 110 | stats_train = stats 111 | logger.info("-- TEST RESULTS --") 112 | decode_stat(stats) 113 | logger.info("-- TRAIN RESULTS --") 114 | decode_stat(stats_train) 115 | -------------------------------------------------------------------------------- /FedUtils/fed/fedprox.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | from .server import Server 3 | from loguru import logger 4 | import numpy as np 5 | from FedUtils.models.utils import decode_stat 6 | import torch 7 | 8 | 9 | def step_func(model, data, fed): 10 | lr = model.learning_rate 11 | parameters = list(model.parameters()) 12 | flop = model.flop 13 | gamma, old_parameters = fed.gamma, list(fed.model.parameters()) 14 | 15 | def func(d): 16 | nonlocal lr, flop, gamma 17 | model.train() 18 | model.zero_grad() 19 | x, y = d 20 | pred = model.forward(x) 21 | loss = model.loss(pred, y).mean() 22 | for p, op in zip(parameters, old_parameters): 23 | loss += ((p-op.detach())**2).sum()*gamma 24 | grad = torch.autograd.grad(loss, parameters) 25 | for p, g in zip(parameters, grad): 26 | p.data.add_(-lr*g) 27 | return flop*len(x) # only consider the flop in NN 28 | return func 29 | 30 | 31 | class FedProx(Server): 32 | def train(self): 33 | logger.info("Train with {} workers...".format(self.clients_per_round)) 34 | for r in range(self.num_rounds): 35 | if r % self.eval_every == 0: 36 | logger.info("-- Log At Round {} --".format(r)) 37 | stats = self.test() 38 | if self.eval_train: 39 | stats_train = self.train_error_and_loss() 40 | else: 41 | stats_train = stats 42 | logger.info("-- TEST RESULTS --") 43 | decode_stat(stats) 44 | logger.info("-- TRAIN RESULTS --") 45 | decode_stat(stats_train) 46 | 47 | indices, selected_clients = self.select_clients(r, num_clients=self.clients_per_round) 48 | np.random.seed(r) 49 | active_clients = np.random.choice(selected_clients, round(self.clients_per_round*(1.0-self.drop_percent)), replace=False) 50 | 51 | csolns = {} 52 | w = 0 53 | 54 | for idx, c in enumerate(active_clients): 55 | c.set_param(self.model.get_param()) 56 | soln, stats = c.solve_inner(num_epochs=self.num_epochs, step_func=partial(step_func, fed=self)) # stats has (byte w, comp, byte r) 57 | soln = [1.0, soln[1]] 58 | w += soln[0] 59 | if len(csolns) == 0: 60 | csolns = {x: soln[1][x].detach()*soln[0] for x in soln[1]} 61 | else: 62 | for x in csolns: 63 | csolns[x].data.add_(soln[1][x]*soln[0]) 64 | del c 65 | csolns = [[w, {x: csolns[x]/w for x in csolns}]] 66 | 67 | self.latest_model = self.aggregate(csolns) 68 | logger.info("-- Log At Round {} --".format(r)) 69 | stats = self.test() 70 | if self.eval_train: 71 | stats_train = self.train_error_and_loss() 72 | else: 73 | stats_train = stats 74 | logger.info("-- TEST RESULTS --") 75 | decode_stat(stats) 76 | logger.info("-- TRAIN RESULTS --") 77 | decode_stat(stats_train) 78 | -------------------------------------------------------------------------------- /FedUtils/fed/fedreg.py: -------------------------------------------------------------------------------- 1 | from .server import Server 2 | from loguru import logger 3 | import numpy as np 4 | from FedUtils.models.utils import decode_stat 5 | import torch 6 | from functools import partial 7 | import copy 8 | 9 | 10 | def step_func(model, data, fed): 11 | lr = model.learning_rate 12 | parameters = list(model.parameters()) 13 | flop = model.flop 14 | gamma = fed.gamma 15 | add_mask = fed.add_mask 16 | beta = 0.5 17 | 18 | psuedo_data, perturb_data = [], [] 19 | for d in data: 20 | x, y = d 21 | psuedo, perturb = fed.model.generate_fake(x, y) 22 | psuedo_data.append(psuedo) 23 | perturb_data.append(perturb) 24 | idx = 0 25 | median_model, old_model, penal_model = copy.deepcopy(fed.model), copy.deepcopy(fed.model), copy.deepcopy(fed.model) 26 | median_parameters = list(median_model.parameters()) 27 | old_parameters = list(old_model.parameters()) 28 | penal_parameters = list(penal_model.parameters()) 29 | 30 | def func(d): 31 | nonlocal idx, add_mask, beta, flop, gamma, lr 32 | model.train() 33 | median_model.train() 34 | penal_model.train() 35 | model.zero_grad() 36 | median_model.zero_grad() 37 | penal_model.zero_grad() 38 | 39 | x, y = d 40 | psd, ptd = psuedo_data[idx % len(psuedo_data)], perturb_data[idx % len(perturb_data)] 41 | idx += 1 42 | 43 | for p, m, o in zip(parameters, median_parameters, old_parameters): 44 | m.data.copy_(gamma*p+(1-gamma)*o) 45 | 46 | mloss = median_model.loss(median_model(x), y).mean() 47 | grad1 = torch.autograd.grad(mloss, median_parameters) 48 | 49 | if add_mask > 0: 50 | fnx, fny, pred_fny = old_model.generate_fake(x, y)[0] 51 | avg_fny = (1.0-0*pred_fny)/pred_fny.shape[-1] 52 | mask_grad = torch.autograd.grad(median_model.loss(median_model(fnx), avg_fny).mean(), median_parameters) 53 | 54 | sm = sum([(gm * gm).sum() for gm in mask_grad]) 55 | sw = (sum([(g1 * gm).sum() for g1, gm in zip(grad1, mask_grad)])) / sm.add(1e-30) 56 | grad1 = [a-sw*b for a, b in zip(grad1, mask_grad)] 57 | 58 | for g1, p in zip(grad1, parameters): 59 | p.data.add_(-lr*g1) 60 | 61 | for p, o, pp in zip(parameters, old_parameters, penal_parameters): 62 | pp.data.copy_(p*beta+o*(1-beta)) 63 | 64 | ploss = penal_model.loss(penal_model(psd[0]), psd[2]).mean() 65 | grad2 = torch.autograd.grad(ploss, penal_parameters) 66 | with torch.no_grad(): 67 | dtheta = [(p-o) for p, o in zip(parameters, old_parameters)] 68 | s2 = sum([(g2*g2).sum() for g2 in grad2]) 69 | w = (sum([(g0*g2).sum() for g0, g2 in zip(dtheta, grad2)]))/s2.add(1e-30) 70 | w = w.clamp(0.0, ) 71 | 72 | pertub_ploss = penal_model.loss(penal_model(ptd[0]), ptd[1]).mean() 73 | grad3 = torch.autograd.grad(pertub_ploss, penal_parameters) 74 | s3 = sum([(g3*g3).sum() for g3 in grad3]) 75 | w1 = (sum([((g0-w*g2)*g3).sum() for g0, g2, g3 in zip(dtheta, grad2, grad3)]))/s3.add(1e-30) 76 | w1 = w1.clamp(0.0,) 77 | 78 | for g2, g3, p in zip(grad2, grad3, parameters): 79 | p.data.add_(-w*g2-w1*g3) 80 | if add_mask: 81 | return flop*len(x)*4 # only consider the flop in NN 82 | else: 83 | return flop*len(x)*3 84 | return func 85 | 86 | 87 | class FedReg(Server): 88 | def train(self): 89 | logger.info("Train with {} workers...".format(self.clients_per_round)) 90 | epochs = self.num_epochs 91 | for r in range(self.num_rounds): 92 | self.round = r 93 | 94 | if r % self.eval_every == 0: 95 | logger.info("-- Log At Round {} --".format(r)) 96 | stats = self.test() 97 | if self.eval_train: 98 | stats_train = self.train_error_and_loss() 99 | else: 100 | stats_train = stats 101 | logger.info("-- TEST RESULTS --") 102 | decode_stat(stats) 103 | logger.info("-- TRAIN RESULTS --") 104 | decode_stat(stats_train) 105 | 106 | indices, selected_clients = self.select_clients(r, num_clients=self.clients_per_round) 107 | np.random.seed(r) 108 | active_clients = np.random.choice(selected_clients, round(self.clients_per_round*(1.0-self.drop_percent)), replace=False) 109 | 110 | csolns = [] 111 | 112 | w = 0 113 | for idx, c in enumerate(active_clients): 114 | c.set_param(self.model.get_param()) 115 | soln, stats = c.solve_inner(num_epochs=self.num_epochs, step_func=partial(step_func, fed=self)) 116 | soln = [1.0, soln[1]] 117 | w += soln[0] 118 | if len(csolns) == 0: 119 | csolns = {x: soln[1][x].detach()*soln[0] for x in soln[1]} 120 | else: 121 | for x in csolns: 122 | csolns[x].data.add_(soln[1][x]*soln[0]) 123 | del c 124 | 125 | csolns = [[w, {x: csolns[x]/w for x in csolns}]] 126 | 127 | self.latest_model = self.aggregate(csolns) 128 | 129 | logger.info("-- Log At Round {} --".format(r)) 130 | stats = self.test() 131 | if self.eval_train: 132 | stats_train = self.train_error_and_loss() 133 | else: 134 | stats_train = stats 135 | logger.info("-- TEST RESULTS --") 136 | decode_stat(stats) 137 | logger.info("-- TRAIN RESULTS --") 138 | decode_stat(stats_train) 139 | -------------------------------------------------------------------------------- /FedUtils/fed/server.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | import numpy as np 3 | from .client import Client 4 | 5 | 6 | class Server(object): 7 | def __init__(self, config, Model, datasets, train_transform=None, test_transform=None, traincusdataset=None, evalcusdataset=None): 8 | super(Server, self).__init__() 9 | self.config = config 10 | self.model_param = config["model_param"] 11 | self.inner_opt = config["inner_opt"] 12 | self.clients_per_round = config["clients_per_round"] 13 | self.num_rounds = config["num_rounds"] 14 | self.eval_every = config["eval_every"] 15 | self.batch_size = config["batch_size"] 16 | self.drop_percent = config["drop_percent"] 17 | self.num_epochs = config["num_epochs"] 18 | self.eval_train = config["eval_train"] 19 | if "gamma" in config: 20 | self.gamma = config["gamma"] 21 | else: 22 | self.gamma = 1.0 23 | 24 | if "add_mask" in config: 25 | self.add_mask = config["add_mask"] 26 | else: 27 | self.add_mask = -1 28 | self.train_transform = train_transform 29 | self.test_transform = test_transform 30 | 31 | self.model = Model(*self.model_param, self.inner_opt) 32 | self.cmodel = Model(*self.model_param, self.inner_opt) 33 | self.traincusdataset = traincusdataset 34 | self.evalcusdataset = evalcusdataset 35 | self.clients = self.__set_clients(datasets, Model) 36 | 37 | def __set_clients(self, dataset, Model): 38 | users, groups, train_data, test_data = dataset 39 | if len(groups) == 0: 40 | groups = [None for _ in users] 41 | all_clients = [(u, g, train_data[u], [td[u] for td in test_data], Model, self.batch_size, self.train_transform, self.test_transform) for u, g in zip(users, groups)] 42 | return all_clients 43 | 44 | def set_param(self, state_dict): 45 | self.model.set_param(state_dict) 46 | return True 47 | 48 | def get_param(self): 49 | return self.model.get_param() 50 | 51 | def _aggregate(self, wstate_dicts): 52 | old_params = self.get_param() 53 | state_dict = {x: 0.0 for x in self.get_param()} 54 | wtotal = 0.0 55 | for w, st in wstate_dicts: 56 | wtotal += w 57 | for name in state_dict.keys(): 58 | assert name in state_dict 59 | state_dict[name] += st[name]*w 60 | state_dict = {x: state_dict[x]/wtotal for x in state_dict} 61 | return state_dict 62 | 63 | def aggregate(self, wstate_dicts): 64 | state_dict = self._aggregate(wstate_dicts) 65 | return self.set_param(state_dict) 66 | 67 | def select_clients(self, seed, num_clients=20): 68 | num_clients = min(num_clients, len(self.clients)) 69 | np.random.seed(seed) 70 | indices = np.random.choice(range(len(self.clients)), num_clients, replace=False) 71 | clients = [self.clients[c] for c in indices] 72 | clients = [Client(c[0], c[1], c[2], c[3], self.cmodel, c[5], c[6], c[7], self.traincusdataset, self.evalcusdataset) for c in clients] 73 | return indices, clients 74 | 75 | def save(self): 76 | raise NotImplementedError 77 | 78 | def train(self): 79 | raise NotImplementedError 80 | 81 | def test(self): 82 | num_samples = [] 83 | tot_correct = [] 84 | clients = [x for x in self.clients if len(x[3][0]['x']) > 0] 85 | clients = [Client(c[0], c[1], c[2], c[3], self.cmodel, c[5], c[6], c[7], self.traincusdataset, self.evalcusdataset) for c in clients] 86 | [m.set_param(self.get_param()) for m in clients] 87 | 88 | for c in clients: 89 | ct, ns = c.test() 90 | tot_correct.append(ct) 91 | num_samples.append(ns) 92 | ids = [c.id for c in clients] 93 | groups = [c.group for c in clients] 94 | num_test = len(tot_correct[0]) 95 | tot_correct = [[a[i] for a in tot_correct] for i in range(num_test)] 96 | num_samples = [[a[i] for a in num_samples] for i in range(num_test)] 97 | return ids, groups, num_samples, tot_correct 98 | 99 | def train_error_and_loss(self): 100 | num_samples = [] 101 | tot_correct = [] 102 | losses = [] 103 | clients = self.clients 104 | clients = [Client(c[0], c[1], c[2], c[3], self.cmodel, c[5], c[6], c[7], self.traincusdataset, self.evalcusdataset) for c in clients] 105 | [m.set_param(self.get_param()) for m in clients] 106 | for c in clients: 107 | ct, cl, ns = c.train_error_and_loss() 108 | tot_correct.append(ct*1.0) 109 | num_samples.append(ns) 110 | losses.append(cl*1.0) 111 | ids = [c.id for c in clients] 112 | groups = [c.group for c in clients] 113 | return ids, groups, num_samples, tot_correct, losses 114 | -------------------------------------------------------------------------------- /FedUtils/models/CT/densenet.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | from FedUtils.models.utils import Flops, FSGM 3 | import torch 4 | import sys 5 | from .densenet_norm_ import densenet121 6 | 7 | 8 | class DenseNetModel(nn.Module): 9 | def __init__(self, num_classes, optimizer=None, learning_rate=None, seed=1, p_iters=10, ps_eta=0.1, pt_eta=0.001): 10 | super(DenseNetModel, self).__init__() 11 | self.num_classes = num_classes 12 | self.num_inp = 224*224*3 13 | torch.manual_seed(123+seed) 14 | 15 | self.net = densenet121(num_classes=self.num_classes) 16 | self.size = sys.getsizeof(self.state_dict()) 17 | self.softmax = nn.Softmax(-1) 18 | 19 | if optimizer is not None: 20 | self.optimizer = optimizer(self.parameters()) 21 | else: 22 | assert learning_rate, "should provide at least one of optimizer and learning rate" 23 | self.learning_rate = learning_rate 24 | 25 | self.p_iters = p_iters 26 | self.ps_eta = ps_eta 27 | self.pt_eta = pt_eta 28 | 29 | self.flop = Flops(self, torch.tensor([[0.0 for _ in range(self.num_inp)]])) 30 | if torch.cuda.device_count() > 0: 31 | self.net = self.net.cuda() 32 | 33 | def set_param(self, state_dict): 34 | self.load_state_dict(state_dict) 35 | return True 36 | 37 | def get_param(self): 38 | return self.state_dict() 39 | 40 | def predict(self, x): 41 | self.eval() 42 | with torch.no_grad(): 43 | return self.softmax(self.forward(x)) 44 | 45 | def generate_fake(self, x, y): 46 | self.eval() 47 | psuedo, perturb = x.detach(), x.detach() 48 | if psuedo.device != next(self.parameters()).device: 49 | psuedo = psuedo.to(next(self.parameters()).device) 50 | perturb = perturb.to(next(self.parameters()).device) 51 | psuedo = FSGM(self, psuedo, y, self.p_iters, self.ps_eta) 52 | perturb = FSGM(self, perturb, y, self.p_iters, self.pt_eta) 53 | psuedo_y, perturb_y = self.predict(psuedo), self.predict(perturb) 54 | return [psuedo, y, psuedo_y], [perturb, y, perturb_y] 55 | 56 | def loss(self, pred, gt): 57 | pred = self.softmax(pred) 58 | if gt.device != pred.device: 59 | gt = gt.to(pred.device) 60 | if len(gt.shape) != len(pred.shape): 61 | gt = nn.functional.one_hot(gt.long(), self.num_classes).float() 62 | assert len(gt.shape) == len(pred.shape) 63 | loss = -gt*torch.log(pred+1e-12) 64 | loss = loss.sum(1) 65 | return loss 66 | 67 | def forward(self, data): 68 | if data.device != next(self.parameters()).device: 69 | data = data.to(next(self.parameters()).device) 70 | data = data.reshape(-1, 3, 224, 224) 71 | out = self.net(data) 72 | return out 73 | 74 | def train_onestep(self, data): 75 | self.train() 76 | self.zero_grad() 77 | self.optimizer.zero_grad() 78 | x, y = data 79 | pred = self.forward(x) 80 | loss = self.loss(pred, y).mean() 81 | loss.backward() 82 | self.optimizer.step() 83 | 84 | return self.flop*len(x) 85 | 86 | def solve_inner(self, data, num_epochs=1, step_func=None): 87 | comp = 0.0 88 | weight = 1.0 89 | steps = 0 90 | if step_func is None: 91 | func = self.train_onestep 92 | else: 93 | func = step_func(self, data) 94 | 95 | for _ in range(num_epochs): 96 | for x, y in data: 97 | c = func([x, y]) 98 | comp += c 99 | steps += 1.0 100 | soln = self.get_param() 101 | return soln, comp, weight 102 | 103 | def test(self, data): 104 | tot_correct = 0.0 105 | loss = 0.0 106 | self.eval() 107 | for d in data: 108 | x, y = d 109 | with torch.no_grad(): 110 | pred = self.forward(x) 111 | loss += self.loss(pred, y).sum() 112 | pred_max = pred.argmax(-1).float() 113 | assert len(pred_max.shape) == len(y.shape) 114 | if pred_max.device != y.device: 115 | pred_max = pred_max.detach().to(y.device) 116 | tot_correct += (pred_max == y).float().sum() 117 | return tot_correct, loss 118 | -------------------------------------------------------------------------------- /FedUtils/models/emnist/cnn.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | from FedUtils.models.utils import Flops, FSGM 3 | import torch 4 | import sys 5 | 6 | 7 | class Reshape(nn.Module): 8 | def forward(self, x): 9 | return x.reshape(-1, 576) 10 | 11 | 12 | class Model(nn.Module): 13 | def __init__(self, num_classes, optimizer=None, learning_rate=None, seed=1, p_iters=10, ps_eta=0.1, pt_eta=0.001): 14 | super(Model, self).__init__() 15 | self.num_classes = num_classes 16 | self.num_inp = 784 17 | torch.manual_seed(123+seed) 18 | 19 | self.net = nn.Sequential(*[nn.Conv2d(1, 32, 5), nn.ReLU(), nn.Conv2d(32, 32, 5), nn.MaxPool2d(2), nn.ReLU(), nn.Conv2d(32, 64, 5), 20 | nn.MaxPool2d(2), nn.ReLU(), Reshape(), nn.Linear(576, 256), nn.ReLU(), nn.Linear(256, self.num_classes)]) 21 | self.size = sys.getsizeof(self.state_dict()) 22 | self.softmax = nn.Softmax(-1) 23 | 24 | if optimizer is not None: 25 | self.optimizer = optimizer(self.parameters()) 26 | else: 27 | assert learning_rate, "should provide at least one of optimizer and learning rate" 28 | self.learning_rate = learning_rate 29 | 30 | self.p_iters = p_iters 31 | self.ps_eta = ps_eta 32 | self.pt_eta = pt_eta 33 | 34 | self.flop = Flops(self, torch.tensor([[0.0 for _ in range(self.num_inp)]])) 35 | if torch.cuda.device_count() > 0: 36 | self.net = self.net.cuda() 37 | 38 | def set_param(self, state_dict): 39 | self.load_state_dict(state_dict) 40 | return True 41 | 42 | def get_param(self): 43 | return self.state_dict() 44 | 45 | def predict(self, x): 46 | self.eval() 47 | with torch.no_grad(): 48 | return self.softmax(self.forward(x)) 49 | 50 | def generate_fake(self, x, y): 51 | self.eval() 52 | psuedo, perturb = x.detach(), x.detach() 53 | if psuedo.device != next(self.parameters()).device: 54 | psuedo = psuedo.to(next(self.parameters()).device) 55 | perturb = perturb.to(next(self.parameters()).device) 56 | psuedo = FSGM(self, psuedo, y, self.p_iters, self.ps_eta) 57 | perturb = FSGM(self, perturb, y, self.p_iters, self.pt_eta) 58 | psuedo_y, perturb_y = self.predict(psuedo), self.predict(perturb) 59 | return [psuedo, y, psuedo_y], [perturb, y, perturb_y] 60 | 61 | def loss(self, pred, gt): 62 | pred = self.softmax(pred) 63 | if gt.device != pred.device: 64 | gt = gt.to(pred.device) 65 | if len(gt.shape) != len(pred.shape): 66 | gt = nn.functional.one_hot(gt.long(), self.num_classes).float() 67 | assert len(gt.shape) == len(pred.shape) 68 | loss = -gt*torch.log(pred+1e-12) 69 | loss = loss.sum(1) 70 | return loss 71 | 72 | def forward(self, data): 73 | if data.device != next(self.parameters()).device: 74 | data = data.to(next(self.parameters()).device) 75 | data = data.reshape(-1, 1, 28, 28) 76 | out = self.net(data) 77 | return out 78 | 79 | def train_onestep(self, data): 80 | self.train() 81 | self.zero_grad() 82 | self.optimizer.zero_grad() 83 | x, y = data 84 | pred = self.forward(x) 85 | loss = self.loss(pred, y).mean() 86 | loss.backward() 87 | self.optimizer.step() 88 | 89 | return self.flop*len(x) 90 | 91 | def solve_inner(self, data, num_epochs=1, step_func=None): 92 | comp = 0.0 93 | weight = 1.0 94 | steps = 0 95 | if step_func is None: 96 | func = self.train_onestep 97 | else: 98 | func = step_func(self, data) 99 | 100 | for _ in range(num_epochs): 101 | for x, y in data: 102 | c = func([x, y]) 103 | comp += c 104 | steps += 1.0 105 | soln = self.get_param() 106 | return soln, comp, weight 107 | 108 | def test(self, data): 109 | tot_correct = 0.0 110 | loss = 0.0 111 | self.eval() 112 | for d in data: 113 | x, y = d 114 | with torch.no_grad(): 115 | pred = self.forward(x) 116 | loss += self.loss(pred, y).sum() 117 | pred_max = pred.argmax(-1).float() 118 | assert len(pred_max.shape) == len(y.shape) 119 | if pred_max.device != y.device: 120 | pred_max = pred_max.detach().to(y.device) 121 | tot_correct += (pred_max == y).float().sum() 122 | return tot_correct, loss 123 | -------------------------------------------------------------------------------- /FedUtils/models/landmark/densenet.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | from FedUtils.models.utils import Flops, FSGM 3 | import torch 4 | import sys 5 | from .densenet_ import densenet121 6 | 7 | 8 | class DenseNetModel(nn.Module): 9 | def __init__(self, num_classes, optimizer=None, learning_rate=None, seed=1, p_iters=10, ps_eta=0.1, pt_eta=0.001, withclassifier=True): 10 | super(DenseNetModel, self).__init__() 11 | self.num_classes = num_classes 12 | self.num_inp = 64*64*3 13 | torch.manual_seed(123+seed) 14 | 15 | self.net = densenet121(num_classes=self.num_classes, withclassifier=withclassifier) 16 | self.size = sys.getsizeof(self.state_dict()) 17 | self.flop = Flops(self, torch.tensor([[0.0 for _ in range(self.num_inp)]])) 18 | if torch.cuda.device_count() > 0: 19 | self.net = self.net.cuda() 20 | 21 | self.softmax = nn.Softmax(-1) 22 | 23 | if optimizer is not None: 24 | self.optimizer = optimizer(self.parameters()) 25 | else: 26 | assert learning_rate, "should provide at least one of optimizer and learning rate" 27 | self.learning_rate = learning_rate 28 | 29 | self.p_iters = p_iters 30 | self.ps_eta = ps_eta 31 | self.pt_eta = pt_eta 32 | 33 | def set_param(self, state_dict): 34 | self.load_state_dict(state_dict) 35 | return True 36 | 37 | def get_param(self): 38 | return self.state_dict() 39 | 40 | def predict(self, x): 41 | self.eval() 42 | with torch.no_grad(): 43 | return self.softmax(self.forward(x)) 44 | 45 | def generate_fake(self, x, y): 46 | self.eval() 47 | psuedo, perturb = x.detach(), x.detach() 48 | if psuedo.device != next(self.parameters()).device: 49 | psuedo = psuedo.to(next(self.parameters()).device) 50 | perturb = perturb.to(next(self.parameters()).device) 51 | psuedo = FSGM(self, psuedo, y, self.p_iters, self.ps_eta) 52 | perturb = FSGM(self, perturb, y, self.p_iters, self.pt_eta) 53 | psuedo_y, perturb_y = self.predict(psuedo), self.predict(perturb) 54 | return [psuedo, y, psuedo_y], [perturb, y, perturb_y] 55 | 56 | def loss(self, pred, gt): 57 | pred = self.softmax(pred) 58 | if gt.device != pred.device: 59 | gt = gt.to(pred.device) 60 | if len(gt.shape) != len(pred.shape): 61 | gt = nn.functional.one_hot(gt.long(), self.num_classes).float() 62 | assert len(gt.shape) == len(pred.shape) 63 | loss = -gt*torch.log(pred+1e-12) 64 | loss = loss.sum(1) 65 | return loss 66 | 67 | def forward(self, data): 68 | if data.device != next(self.parameters()).device: 69 | data = data.to(next(self.parameters()).device) 70 | data = data.reshape(-1, 3, 64, 64) 71 | out = self.net(data) 72 | return out 73 | 74 | def train_onestep(self, data): 75 | self.train() 76 | self.zero_grad() 77 | self.optimizer.zero_grad() 78 | x, y = data 79 | pred = self.forward(x) 80 | loss = self.loss(pred, y).mean() 81 | loss.backward() 82 | self.optimizer.step() 83 | 84 | return self.flop*len(x) 85 | 86 | def solve_inner(self, data, num_epochs=1, step_func=None): 87 | comp = 0.0 88 | weight = 1.0 89 | steps = 0 90 | if step_func is None: 91 | func = self.train_onestep 92 | else: 93 | func = step_func(self, data) 94 | 95 | for _ in range(num_epochs): 96 | for x, y in data: 97 | c = func([x, y]) 98 | comp += c 99 | steps += 1.0 100 | soln = self.get_param() 101 | return soln, comp, weight 102 | 103 | def test(self, data): 104 | tot_correct = 0.0 105 | loss = 0.0 106 | self.eval() 107 | for d in data: 108 | x, y = d 109 | with torch.no_grad(): 110 | pred = self.forward(x) 111 | loss += self.loss(pred, y).sum() 112 | pred_max = pred.argmax(-1).float() 113 | assert len(pred_max.shape) == len(y.shape) 114 | if pred_max.device != y.device: 115 | pred_max = pred_max.detach().to(y.device) 116 | tot_correct += (pred_max == y).float().sum() 117 | return tot_correct, loss 118 | -------------------------------------------------------------------------------- /FedUtils/models/mnist/cnn.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | from FedUtils.models.utils import Flops, FSGM 3 | import torch 4 | import sys 5 | 6 | 7 | class Reshape(nn.Module): 8 | def forward(self, x): 9 | return x.reshape(-1, 576) 10 | 11 | 12 | class Model(nn.Module): 13 | def __init__(self, num_classes, optimizer=None, learning_rate=None, seed=1, p_iters=10, ps_eta=0.1, pt_eta=0.001): 14 | super(Model, self).__init__() 15 | self.num_classes = num_classes 16 | self.num_inp = 784 17 | torch.manual_seed(123+seed) 18 | 19 | self.net = nn.Sequential(*[nn.Conv2d(1, 32, 5), nn.ReLU(), nn.Conv2d(32, 32, 5), nn.MaxPool2d(2), nn.ReLU(), nn.Conv2d(32, 64, 5), 20 | nn.MaxPool2d(2), nn.ReLU(), Reshape(), nn.Linear(576, 256), nn.ReLU(), nn.Linear(256, self.num_classes)]) 21 | self.size = sys.getsizeof(self.state_dict()) 22 | self.softmax = nn.Softmax(-1) 23 | 24 | if optimizer is not None: 25 | self.optimizer = optimizer(self.parameters()) 26 | else: 27 | assert learning_rate, "should provide at least one of optimizer and learning rate" 28 | self.learning_rate = learning_rate 29 | 30 | self.p_iters = p_iters 31 | self.ps_eta = ps_eta 32 | self.pt_eta = pt_eta 33 | 34 | self.flop = Flops(self, torch.tensor([[0.0 for _ in range(self.num_inp)]])) 35 | if torch.cuda.device_count() > 0: 36 | self.net = self.net.cuda() 37 | 38 | def set_param(self, state_dict): 39 | self.load_state_dict(state_dict) 40 | return True 41 | 42 | def get_param(self): 43 | return self.state_dict() 44 | 45 | def predict(self, x): 46 | self.eval() 47 | with torch.no_grad(): 48 | return self.softmax(self.forward(x)) 49 | 50 | def generate_fake(self, x, y): 51 | self.eval() 52 | psuedo, perturb = x.detach(), x.detach() 53 | if psuedo.device != next(self.parameters()).device: 54 | psuedo = psuedo.to(next(self.parameters()).device) 55 | perturb = perturb.to(next(self.parameters()).device) 56 | psuedo = FSGM(self, psuedo, y, self.p_iters, self.ps_eta) 57 | perturb = FSGM(self, perturb, y, self.p_iters, self.pt_eta) 58 | psuedo_y, perturb_y = self.predict(psuedo), self.predict(perturb) 59 | return [psuedo, y, psuedo_y], [perturb, y, perturb_y] 60 | 61 | def loss(self, pred, gt): 62 | pred = self.softmax(pred) 63 | if gt.device != pred.device: 64 | gt = gt.to(pred.device) 65 | if len(gt.shape) != len(pred.shape): 66 | gt = nn.functional.one_hot(gt.long(), self.num_classes).float() 67 | assert len(gt.shape) == len(pred.shape) 68 | loss = -gt*torch.log(pred+1e-12) 69 | loss = loss.sum(1) 70 | return loss 71 | 72 | def forward(self, data): 73 | if data.device != next(self.parameters()).device: 74 | data = data.to(next(self.parameters()).device) 75 | data = data.reshape(-1, 1, 28, 28) 76 | out = self.net(data) 77 | return out 78 | 79 | def train_onestep(self, data): 80 | self.train() 81 | self.zero_grad() 82 | self.optimizer.zero_grad() 83 | x, y = data 84 | pred = self.forward(x) 85 | loss = self.loss(pred, y).mean() 86 | loss.backward() 87 | self.optimizer.step() 88 | 89 | return self.flop*len(x) 90 | 91 | def solve_inner(self, data, num_epochs=1, step_func=None): 92 | comp = 0.0 93 | weight = 1.0 94 | steps = 0 95 | if step_func is None: 96 | func = self.train_onestep 97 | else: 98 | func = step_func(self, data) 99 | 100 | for _ in range(num_epochs): 101 | for x, y in data: 102 | c = func([x, y]) 103 | comp += c 104 | steps += 1.0 105 | soln = self.get_param() 106 | return soln, comp, weight 107 | 108 | def test(self, data): 109 | tot_correct = 0.0 110 | loss = 0.0 111 | self.eval() 112 | for d in data: 113 | x, y = d 114 | with torch.no_grad(): 115 | pred = self.forward(x) 116 | loss += self.loss(pred, y).sum() 117 | pred_max = pred.argmax(-1).float() 118 | assert len(pred_max.shape) == len(y.shape) 119 | if pred_max.device != y.device: 120 | pred_max = pred_max.detach().to(y.device) 121 | tot_correct += (pred_max == y).float().sum() 122 | return tot_correct, loss 123 | -------------------------------------------------------------------------------- /FedUtils/models/shakespeare/LSTM.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | from FedUtils.models.utils import Flops 3 | import torch 4 | import sys 5 | import numpy as np 6 | import random 7 | 8 | 9 | class Attack(object): 10 | def __init__(self, min_idx, max_idx, rep_idx, ps_iters, pt_iters): 11 | self.min_idx = min_idx 12 | self.max_idx = max_idx 13 | self.rep_idx = rep_idx # default word for replacement 14 | self.ps_iters = ps_iters # the number of iters to generate psuedo data 15 | self.pt_iters = pt_iters # the number of iters to generate perturbed data 16 | 17 | def perturb(self, x): 18 | x = np.copy(x) 19 | batchsize, length = x.shape 20 | 21 | for idx in range(batchsize): 22 | replace_idx = random.randint(0, length-1) 23 | x[idx, replace_idx] = random.randint(self.min_idx, self.max_idx-1) 24 | return x 25 | 26 | def generate_adversary(self, x, y): 27 | psuedo, perturb = x, x 28 | 29 | for _ in range(self.ps_iters): 30 | psuedo = self.perturb(psuedo) 31 | for _ in range(self.pt_iters): 32 | perturb = self.perturb(perturb) 33 | return psuedo, perturb 34 | 35 | 36 | class Model(nn.Module): 37 | def __init__(self, num_classes, optimizer=None, learning_rate=None, seed=1, ps_iters=40, pt_iters=10): 38 | super(Model, self).__init__() 39 | self.num_classes = num_classes 40 | self.num_inp = 80 41 | torch.manual_seed(123+seed) 42 | self.embed = nn.Embedding(num_classes+1, 8) # embed with linear 43 | self.net = nn.LSTM(input_size=8, hidden_size=256, num_layers=2, batch_first=True) 44 | self.outnet = nn.Linear(256, num_classes) 45 | self.softmax = nn.Softmax(-1) 46 | 47 | self.size = sys.getsizeof(self.state_dict()) 48 | self.flop = Flops(self, torch.tensor([[0.0 for _ in range(self.num_inp)]])) 49 | if optimizer is not None: 50 | self.optimizer = optimizer(self.parameters()) 51 | else: 52 | assert learning_rate, "should provide at least one of optimizer and learning rate" 53 | self.learning_rate = learning_rate 54 | self.attack = Attack(0, self.num_classes-1, self.num_classes, ps_iters=ps_iters, pt_iters=pt_iters) 55 | 56 | if torch.cuda.device_count() > 0: 57 | self.embed = self.embed.cuda() 58 | self.net = self.net.cuda() 59 | self.outnet = self.outnet.cuda() 60 | 61 | def set_param(self, state_dict): 62 | self.load_state_dict(state_dict) 63 | return True 64 | 65 | def get_param(self): 66 | return self.state_dict() 67 | 68 | def generate_fake(self, x, y): # generate adversary by randomly set word as empty 69 | psuedo, perturb = self.attack.generate_adversary(x.long().detach().cpu().numpy(), y.cpu().numpy()) 70 | psuedo, perturb = torch.tensor(psuedo).to(self.embed.weight.device), torch.tensor(perturb).to(self.embed.weight.device) 71 | psuedo_y, perturb_y = self.predict(psuedo), self.predict(perturb) 72 | return [psuedo, y, psuedo_y], [perturb, y, perturb_y] 73 | 74 | def loss(self, pred, gt): 75 | pred = self.softmax(pred) 76 | if gt.device != pred.device: 77 | gt = gt.to(pred.device) 78 | if len(gt.shape) < len(pred.shape): 79 | gt = nn.functional.one_hot(gt.long(), self.num_classes).float() 80 | assert len(gt.shape) == len(pred.shape) 81 | loss = -gt*torch.log(pred+1e-12) 82 | loss = loss.sum(-1) 83 | return loss 84 | 85 | def forward(self, data): 86 | if data.device != self.embed.weight.device: 87 | data = data.to(self.embed.weight.device) 88 | if len(data.shape) == 2: 89 | data = self.embed(data.long()) 90 | out = self.outnet(self.net(data)[0][:, -1:, :]) 91 | return out 92 | 93 | def predict(self, x): 94 | self.eval() 95 | with torch.no_grad(): 96 | return self.softmax(self.forward(x)) 97 | 98 | def train_onestep(self, data): 99 | assert self.optimizer, "the optimizer of model should be provided" 100 | self.train() 101 | self.zero_grad() 102 | self.optimizer.zero_grad() 103 | x, y = data 104 | pred = self.forward(x) 105 | loss = self.loss(pred, y).mean() 106 | loss.backward() 107 | self.optimizer.step() 108 | return self.flop*len(x) 109 | 110 | def solve_inner(self, data, step_func=None, num_epochs=1): # step_func should by a closure whose input is (model, data) and output is a callable func to carry out training 111 | comp = 0.0 112 | weight = 1.0 113 | steps = 0 114 | if step_func is None: 115 | func = self.train_onestep 116 | else: 117 | func = step_func(self, data) 118 | 119 | for _ in range(num_epochs): 120 | for x, y in data: 121 | c = func([x, y]) 122 | comp += c 123 | steps += 1.0 124 | soln = self.get_param() 125 | return soln, comp, weight 126 | 127 | def test(self, data): 128 | tot_correct = 0.0 129 | loss = 0.0 130 | self.eval() 131 | for d in data: 132 | x, y = d 133 | with torch.no_grad(): 134 | pred = self.forward(x) 135 | loss += self.loss(pred, y).sum() 136 | if len(y.shape) < len(pred.shape): 137 | y = y.float().squeeze(1) 138 | else: 139 | y = y.argmax(-1).float().squeeze(1) 140 | pred_max = pred.argmax(-1).float().squeeze(1) 141 | assert len(pred_max.shape) == len(y.shape) 142 | assert len(y.shape) == 1 143 | if pred_max.device != y.device: 144 | pred_max = pred_max.detach().to(y.device) 145 | tot_correct += (pred_max == y).float().sum() 146 | return tot_correct, loss 147 | -------------------------------------------------------------------------------- /FedUtils/models/transformer/model.py: -------------------------------------------------------------------------------- 1 | from vision_transformer_pytorch import VisionTransformer 2 | import torch.nn as nn 3 | import torch 4 | import sys 5 | from FedUtils.models.utils import Flops, FSGM 6 | 7 | 8 | class Model(nn.Module): 9 | def __init__(self, num_classes, optimizer=None, learning_rate=None, seed=1, p_iters=10, ps_eta=0.1, pt_eta=0.001): 10 | super(Model, self).__init__() 11 | self.num_classes = num_classes 12 | self.num_inp = 384*384*3 13 | torch.manual_seed(123+seed) 14 | 15 | self.net = VisionTransformer.from_pretrained("ViT-B_32") 16 | classifier = self.net.classifier 17 | s1, s2 = classifier.weight.shape 18 | self.net.classifier = nn.Linear(s2, self.num_classes) 19 | self.net.classifier.weight.data.mul_(0).add_(1.0) 20 | self.net.classifier.bias.data.mul_(0) 21 | self.size = sys.getsizeof(self.state_dict()) 22 | self.flop = Flops(self, torch.tensor([[0.0 for _ in range(self.num_inp)]])) 23 | if torch.cuda.device_count() > 0: 24 | self.net = self.net.cuda() 25 | 26 | self.softmax = nn.Softmax(-1) 27 | 28 | if optimizer is not None: 29 | self.optimizer = optimizer(self.parameters()) 30 | else: 31 | assert learning_rate, "should provide at least one of optimizer and learning rate" 32 | self.learning_rate = learning_rate 33 | 34 | self.p_iters = p_iters 35 | self.ps_eta = ps_eta 36 | self.pt_eta = pt_eta 37 | 38 | def set_param(self, state_dict): 39 | self.load_state_dict(state_dict) 40 | return True 41 | 42 | def get_param(self): 43 | return self.state_dict() 44 | 45 | def predict(self, x): 46 | self.eval() 47 | with torch.no_grad(): 48 | return self.softmax(self.forward(x)) 49 | 50 | def generate_fake(self, x, y): 51 | self.eval() 52 | psuedo, perturb = x.detach(), x.detach() 53 | if psuedo.device != next(self.parameters()).device: 54 | psuedo = psuedo.to(next(self.parameters()).device) 55 | perturb = perturb.to(next(self.parameters()).device) 56 | psuedo = FSGM(self, psuedo, y, self.p_iters, self.ps_eta) 57 | perturb = FSGM(self, perturb, y, self.p_iters, self.pt_eta) 58 | psuedo_y, perturb_y = self.predict(psuedo), self.predict(perturb) 59 | return [psuedo, y, psuedo_y], [perturb, y, perturb_y] 60 | 61 | def loss(self, pred, gt): 62 | pred = self.softmax(pred) 63 | if gt.device != pred.device: 64 | gt = gt.to(pred.device) 65 | if len(gt.shape) != len(pred.shape): 66 | gt = nn.functional.one_hot(gt.long(), self.num_classes).float() 67 | assert len(gt.shape) == len(pred.shape) 68 | loss = -gt*torch.log(pred+1e-12) 69 | loss = loss.sum(1) 70 | return loss 71 | 72 | def forward(self, data): 73 | if data.device != next(self.parameters()).device: 74 | data = data.to(next(self.parameters()).device) 75 | data = data.reshape(-1, 3, 384, 384) 76 | out = self.net(data) 77 | return out 78 | 79 | def train_onestep(self, data): 80 | self.train() 81 | self.zero_grad() 82 | self.optimizer.zero_grad() 83 | x, y = data 84 | pred = self.forward(x) 85 | loss = self.loss(pred, y).mean() 86 | loss.backward() 87 | self.optimizer.step() 88 | 89 | return self.flop*len(x) 90 | 91 | def solve_inner(self, data, num_epochs=1, step_func=None): 92 | comp = 0.0 93 | weight = 1.0 94 | steps = 0 95 | if step_func is None: 96 | func = self.train_onestep 97 | else: 98 | func = step_func(self, data) 99 | 100 | for _ in range(num_epochs): 101 | for x, y in data: 102 | c = func([x, y]) 103 | comp += c 104 | steps += 1.0 105 | soln = self.get_param() 106 | return soln, comp, weight 107 | 108 | def test(self, data): 109 | tot_correct = 0.0 110 | loss = 0.0 111 | self.eval() 112 | for d in data: 113 | x, y = d 114 | with torch.no_grad(): 115 | pred = self.forward(x) 116 | loss += self.loss(pred, y).sum() 117 | pred_max = pred.argmax(-1).float() 118 | assert len(pred_max.shape) == len(y.shape) 119 | if pred_max.device != y.device: 120 | pred_max = pred_max.detach().to(y.device) 121 | tot_correct += (pred_max == y).float().sum() 122 | return tot_correct, loss 123 | -------------------------------------------------------------------------------- /FedUtils/models/utils.py: -------------------------------------------------------------------------------- 1 | from thop import profile 2 | import os 3 | import json 4 | from torch.utils.data import TensorDataset 5 | import numpy as np 6 | import torch 7 | from loguru import logger 8 | from PIL import Image 9 | import h5py 10 | 11 | 12 | def FSGM(model, inp, label, iters, eta): 13 | inp.requires_grad = True 14 | minv, maxv = float(inp.min().detach().cpu().numpy()), float(inp.max().detach().cpu().numpy()) 15 | for _ in range(iters): 16 | loss = model.loss(model.forward(inp), label).mean() 17 | dp = torch.sign(torch.autograd.grad(loss, inp)[0]) 18 | inp.data.add_(eta*dp.detach()).clamp(minv, maxv) 19 | return inp 20 | 21 | 22 | class CusDataset(TensorDataset): 23 | def __init__(self, data, transform=None): 24 | assert "x" in data 25 | assert "y" in data 26 | self.data = {} 27 | self.data["x"] = (data["x"]) 28 | self.data["y"] = (data["y"]) 29 | self.transform = transform 30 | 31 | def __getitem__(self, item): 32 | if self.transform is None: 33 | ret = torch.tensor(self.data['x'][item]) 34 | else: 35 | ret = np.array(self.data["x"][item]).astype("uint8") 36 | if ret.shape[-1] == 3: 37 | ret = ret 38 | elif ret.shape[0] == 3: 39 | ret = ret.transpose(1, 2, 0) 40 | else: 41 | ret = ret 42 | ret = self.transform(Image.fromarray(ret)) 43 | 44 | return [ret, torch.tensor(self.data["y"][item])] 45 | 46 | def __len__(self): 47 | return len(self.data["x"]) 48 | 49 | 50 | class ImageDataset(TensorDataset): 51 | def __init__(self, data, transform=None, image_path=None): 52 | self.transform = transform 53 | 54 | assert "x" in data 55 | assert "y" in data 56 | self.data = {} 57 | self.data["x"] = (data["x"]) 58 | self.data["y"] = (data["y"]) 59 | if len(self.data["x"]) < 20000: 60 | File = h5py.File(image_path, "r") 61 | self.image_path = {} 62 | for name in self.data["x"]: 63 | name = name.replace(".png", "") 64 | self.image_path[name+"_X"] = np.array(File[name+"_X"]) 65 | self.image_path[name+"_Y"] = np.array(File[name+"_Y"]) 66 | File.close() 67 | else: 68 | self.image_path = h5py.File(image_path, "r") 69 | 70 | def __getitem__(self, item): 71 | path = self.data["x"][item] 72 | path = path.replace(".png", "") 73 | image, y = Image.fromarray((np.array(self.image_path[path+"_X"])*255).transpose(1, 2, 0).astype(np.uint8)), self.image_path[path+"_Y"] 74 | if self.transform is None: 75 | ret = torch.tensor(image) 76 | else: 77 | try: 78 | assert image.mode == "RGB" 79 | except: 80 | image = image.convert("RGB") 81 | ret = self.transform(image) 82 | 83 | return [ret, torch.tensor(self.data["y"][item])] 84 | 85 | def __len__(self): 86 | return len(self.data["x"]) 87 | 88 | 89 | def Flops(model, inp): 90 | return profile(model, inputs=(inp,), verbose=False)[0] 91 | 92 | 93 | def read_data(train_data_path, test_data_path): 94 | if not isinstance(test_data_path, list): 95 | test_data_path = [test_data_path, ] 96 | groups = [] 97 | train_data = {} 98 | test_data = [{} for _ in test_data_path] 99 | train_files = os.listdir(train_data_path) 100 | train_files = [f for f in train_files if f.endswith(".json")] 101 | for f in train_files: 102 | file_path = os.path.join(train_data_path, f) 103 | with open(file_path, "r") as inf: 104 | cdata = json.load(inf) 105 | if "hierarchies" in cdata: 106 | groups.extend(cdata["hierarchies"]) 107 | train_data.update(cdata["user_data"]) 108 | for F, td in zip(test_data_path, test_data): 109 | test_files = os.listdir(F) 110 | test_files = [f for f in test_files if f.endswith(".json")] 111 | for f in test_files: 112 | file_path = os.path.join(F, f) 113 | with open(file_path, "r") as inf: 114 | cdata = json.load(inf) 115 | td.update(cdata["user_data"]) 116 | clients = list(sorted(train_data.keys())) 117 | return clients, groups, train_data, test_data 118 | 119 | 120 | def decode_stat(stat): 121 | if len(stat) == 4: 122 | ids, groups, num_samples, tot_correct = stat 123 | if isinstance(num_samples[0], list): 124 | assert len(num_samples) == len(tot_correct) 125 | idx = 0 126 | for a, b in zip(tot_correct, num_samples): 127 | logger.info("Test_{} Accuracy: {}".format(idx, sum(a) * 1.0 / sum(b))) 128 | idx += 1 129 | else: 130 | logger.info("Accuracy: {}".format(sum(tot_correct) / sum(num_samples))) 131 | elif len(stat) == 5: 132 | ids, groups, num_samples, tot_correct, losses = stat 133 | logger.info("Accuracy: {} Loss: {}".format(sum(tot_correct) / sum(num_samples), sum(losses) / sum(num_samples))) 134 | else: 135 | raise ValueError 136 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Acceleration of Federated Learning with Alleviated Forgetting in Local Training 2 | 3 | ## Abstract 4 | 5 | Federated learning (FL) enables distributed optimization of machine learning models while protecting privacy by independently training local models on each client and then aggregating parameters on a central server, thereby producing an effective global model. Although a variety of FL algorithms have been proposed, their training efficiency remains low when the data are not independently and identically distributed (non-i.i.d.) across different clients. We observe that the slow convergence rates of the existing methods are (at least partially) caused by the catastrophic forgetting issue during the local training stage on each individual client, which leads to a large increase in the loss function concerning the previous training data provided at other clients. Here, we propose FedReg, an algorithm to accelerate FL with alleviated knowledge forgetting in the local training stage by regularizing locally trained parameters with the loss on generated pseudo data, which encode the knowledge of previous training data learned by the global model. Our comprehensive experiments demonstrate that FedReg not only significantly improves the convergence rate of FL, especially when the neural network architecture is deep and the clients' data are extremely non-i.i.d., but is also able to protect privacy better in classification problems and more robust against gradient inversion attacks. 6 | 7 | ![Illustration of FedReg.](./framework.png) 8 | 9 | ## Requirements 10 | 11 | Anaconda is recommended to run the project. 12 | ~~~ 13 | conda create -n FedReg python=3.8 14 | source activate FedReg 15 | ~~~ 16 | 17 | Install torch: 18 | ~~~ 19 | pip install torch torchvision torchaudio 20 | ~~~ 21 | 22 | Install related packages: 23 | ~~~ 24 | pip install -r requirements.txt 25 | ~~~ 26 | 27 | Get data from [here](https://drive.google.com/file/d/1w6L9enAEB8e0rsJKDoRfoVDmOm-vfaXg/view?usp=sharing) and unzip it: 28 | ~~~ 29 | tar -xzvf data.tar.gz 30 | ~~~ 31 | 32 | ## Run experiments 33 | 34 | The experiments can be run by: 35 | ~~~ 36 | python main.py -c PATH_TO_CONFIG 37 | ~~~ 38 | For example: 39 | ~~~ 40 | python main.py -c tasks/mnist/FedReg_e40_lr1_g4/config 41 | ~~~ 42 | Please refer to *config_example.py* for the format of the config file. In the "tasks" directory, the config files for all the tasks presented in our paper are provided. 43 | -------------------------------------------------------------------------------- /config_example.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | from functools import partial 3 | from FedUtils.fed.fedreg import FedReg 4 | from torch.optim import SGD 5 | 6 | config = { 7 | 8 | "seed": 1, # random seed 9 | "model": partial(Model, learning_rate=1e-1, p_iters=10, ps_eta=2e-1, pt_eta=2e-3), # the model to be trained 10 | "inner_opt": None, # optimizer, in FedReg, only the learning rate is used 11 | "optimizer": FedReg, # FL optimizer, can be FedAvg, FedProx, FedCurv or SCAFFOLD 12 | "model_param": (10,), # the input of the model, used to initialize the model 13 | "inp_size": (784,), # the input shape 14 | "train_path": "data/mnist_10000/data/train/", # the path to the train data 15 | "test_path": "data/mnist_10000/data/valid/", # the path to the test data 16 | "clients_per_round": 10, # number of clients sampled in each round 17 | "num_rounds": 500, # number of total rounds 18 | "eval_every": 1, # the number of rounds to evaluate the model performance. 1 is recommend here. 19 | "drop_percent": 0.0, # the rate to drop a client. 0 is used in our experiments 20 | "num_epochs": 40, # the number of epochs in local training stage 21 | "batch_size": 10, # the batch size in local training stage 22 | "use_fed": 1, # whether use federated learning alrogithms 23 | "log_path": "tasks/mnist/FedReg/train.log", # the path to save the log file 24 | "train_transform": None, # the preprocessing of train data, please refer to torchvision.transforms 25 | "test_transform": None, # the preprocessing of test dasta 26 | "eval_train": True, # whether to evaluate the model performance on the training data. Recommend to False when the training dataset is too large 27 | "gamma": 0.4, # the value of gamma when FedReg is used, the weight for the proximal term when FedProx is used, or the value of lambda when FedCurv is used 28 | 29 | 30 | } 31 | -------------------------------------------------------------------------------- /fixup/cifar/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .fixup_resnet_cifar import * 2 | from .resnet_cifar import * -------------------------------------------------------------------------------- /fixup/cifar/models/fixup_resnet_cifar.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import numpy as np 4 | 5 | 6 | __all__ = ['FixupResNet', 'fixup_resnet20', 'fixup_resnet32', 'fixup_resnet44', 'fixup_resnet56', 'fixup_resnet110', 'fixup_resnet1202'] 7 | 8 | 9 | def conv3x3(in_planes, out_planes, stride=1): 10 | """3x3 convolution with padding""" 11 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 12 | padding=1, bias=False) 13 | def conv7x7(in_planes, out_planes, stride=1): 14 | """3x3 convolution with padding""" 15 | return nn.Conv2d(in_planes, out_planes, kernel_size=7, stride=stride, 16 | padding=3, bias=False) 17 | 18 | 19 | class FixupBasicBlock(nn.Module): 20 | expansion = 1 21 | 22 | def __init__(self, inplanes, planes, stride=1, downsample=None): 23 | super(FixupBasicBlock, self).__init__() 24 | # Both self.conv1 and self.downsample layers downsample the input when stride != 1 25 | self.bias1a = nn.Parameter(torch.zeros(1)) 26 | self.conv1 = conv3x3(inplanes, planes, stride) 27 | self.bias1b = nn.Parameter(torch.zeros(1)) 28 | self.relu = nn.ReLU(inplace=True) 29 | self.bias2a = nn.Parameter(torch.zeros(1)) 30 | self.conv2 = conv3x3(planes, planes) 31 | self.scale = nn.Parameter(torch.ones(1)) 32 | self.bias2b = nn.Parameter(torch.zeros(1)) 33 | self.downsample = downsample 34 | 35 | def forward(self, x): 36 | identity = x 37 | 38 | out = self.conv1(x + self.bias1a) 39 | out = self.relu(out + self.bias1b) 40 | 41 | out = self.conv2(out + self.bias2a) 42 | out = out * self.scale + self.bias2b 43 | 44 | if self.downsample is not None: 45 | identity = self.downsample(x + self.bias1a) 46 | identity = torch.cat((identity, torch.zeros_like(identity)), 1) 47 | 48 | out += identity 49 | out = self.relu(out) 50 | 51 | return out 52 | 53 | 54 | class FixupResNet(nn.Module): 55 | 56 | def __init__(self, block, layers, num_classes=10): 57 | super(FixupResNet, self).__init__() 58 | self.num_layers = sum(layers) 59 | self.inplanes = 16 60 | self.conv1 = conv3x3(3, 16) 61 | self.bias1 = nn.Parameter(torch.zeros(1)) 62 | self.relu = nn.ReLU(inplace=True) 63 | self.layer1 = self._make_layer(block, 16, layers[0]) 64 | self.layer2 = self._make_layer(block, 32, layers[1], stride=2) 65 | self.layer3 = self._make_layer(block, 64, layers[2], stride=2) 66 | self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) 67 | self.bias2 = nn.Parameter(torch.zeros(1)) 68 | self.fc = nn.Linear(64, num_classes) 69 | 70 | for m in self.modules(): 71 | if isinstance(m, FixupBasicBlock): 72 | nn.init.normal_(m.conv1.weight, mean=0, std=np.sqrt(2 / (m.conv1.weight.shape[0] * np.prod(m.conv1.weight.shape[2:]))) * self.num_layers ** (-0.5)) 73 | nn.init.constant_(m.conv2.weight, 0) 74 | elif isinstance(m, nn.Linear): 75 | nn.init.constant_(m.weight, 0) 76 | nn.init.constant_(m.bias, 0) 77 | 78 | def _make_layer(self, block, planes, blocks, stride=1): 79 | downsample = None 80 | if stride != 1: 81 | downsample = nn.AvgPool2d(1, stride=stride) 82 | 83 | layers = [] 84 | layers.append(block(self.inplanes, planes, stride, downsample)) 85 | self.inplanes = planes 86 | for _ in range(1, blocks): 87 | layers.append(block(planes, planes)) 88 | 89 | return nn.Sequential(*layers) 90 | 91 | def forward(self, x): 92 | x = self.conv1(x) 93 | x = self.relu(x + self.bias1) 94 | 95 | x = self.layer1(x) 96 | x = self.layer2(x) 97 | x = self.layer3(x) 98 | 99 | x = self.avgpool(x) 100 | x = x.view(x.size(0), -1) 101 | x = self.fc(x + self.bias2) 102 | 103 | return x 104 | 105 | 106 | def fixup_resnet20(**kwargs): 107 | """Constructs a Fixup-ResNet-20 model. 108 | 109 | """ 110 | model = FixupResNet(FixupBasicBlock, [3, 3, 3], **kwargs) 111 | return model 112 | 113 | 114 | def fixup_resnet32(**kwargs): 115 | """Constructs a Fixup-ResNet-32 model. 116 | 117 | """ 118 | model = FixupResNet(FixupBasicBlock, [5, 5, 5], **kwargs) 119 | return model 120 | 121 | 122 | def fixup_resnet44(**kwargs): 123 | """Constructs a Fixup-ResNet-44 model. 124 | 125 | """ 126 | model = FixupResNet(FixupBasicBlock, [7, 7, 7], **kwargs) 127 | return model 128 | 129 | 130 | def fixup_resnet56(**kwargs): 131 | """Constructs a Fixup-ResNet-56 model. 132 | 133 | """ 134 | model = FixupResNet(FixupBasicBlock, [9, 9, 9], **kwargs) 135 | return model 136 | 137 | 138 | def fixup_resnet110(**kwargs): 139 | """Constructs a Fixup-ResNet-110 model. 140 | 141 | """ 142 | model = FixupResNet(FixupBasicBlock, [18, 18, 18], **kwargs) 143 | return model 144 | 145 | 146 | def fixup_resnet1202(**kwargs): 147 | """Constructs a Fixup-ResNet-1202 model. 148 | 149 | """ 150 | model = FixupResNet(FixupBasicBlock, [200, 200, 200], **kwargs) 151 | return model 152 | -------------------------------------------------------------------------------- /fixup/cifar/models/resnet_cifar.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import numpy as np 4 | 5 | 6 | __all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202'] 7 | 8 | 9 | def conv3x3(in_planes, out_planes, stride=1): 10 | """3x3 convolution with padding""" 11 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 12 | padding=1, bias=False) 13 | 14 | 15 | class BasicBlock(nn.Module): 16 | expansion = 1 17 | 18 | def __init__(self, inplanes, planes, stride=1, downsample=None): 19 | super(BasicBlock, self).__init__() 20 | # Both self.conv1 and self.downsample layers downsample the input when stride != 1 21 | self.conv1 = conv3x3(inplanes, planes, stride) 22 | self.bn1 = nn.BatchNorm2d(planes) 23 | self.relu = nn.ReLU(inplace=True) 24 | self.conv2 = conv3x3(planes, planes) 25 | self.bn2 = nn.BatchNorm2d(planes) 26 | self.downsample = downsample 27 | 28 | def forward(self, x): 29 | identity = x 30 | 31 | out = self.conv1(x) 32 | out = self.bn1(out) 33 | out = self.relu(out) 34 | 35 | out = self.conv2(out) 36 | out = self.bn2(out) 37 | 38 | if self.downsample is not None: 39 | identity = self.downsample(x) 40 | identity = torch.cat((identity, torch.zeros_like(identity)), 1) 41 | 42 | out += identity 43 | out = self.relu(out) 44 | 45 | return out 46 | 47 | 48 | class ResNet(nn.Module): 49 | 50 | def __init__(self, block, layers, num_classes=10): 51 | super(ResNet, self).__init__() 52 | self.num_layers = sum(layers) 53 | self.inplanes = 16 54 | self.conv1 = conv3x3(3, 16) 55 | self.bn1 = nn.BatchNorm2d(16) 56 | self.relu = nn.ReLU(inplace=True) 57 | self.layer1 = self._make_layer(block, 16, layers[0]) 58 | self.layer2 = self._make_layer(block, 32, layers[1], stride=2) 59 | self.layer3 = self._make_layer(block, 64, layers[2], stride=2) 60 | self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) 61 | self.fc = nn.Linear(64, num_classes) 62 | 63 | for m in self.modules(): 64 | if isinstance(m, nn.Conv2d): 65 | nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 66 | elif isinstance(m, nn.BatchNorm2d): 67 | nn.init.constant_(m.weight, 1) 68 | nn.init.constant_(m.bias, 0) 69 | 70 | # Zero-initialize the last BN in each residual branch, 71 | # so that the residual branch starts with zeros, and each residual block behaves like an identity. 72 | # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 73 | for m in self.modules(): 74 | if isinstance(m, BasicBlock): 75 | nn.init.constant_(m.bn2.weight, 0) 76 | 77 | def _make_layer(self, block, planes, blocks, stride=1): 78 | downsample = None 79 | if stride != 1: 80 | downsample = nn.Sequential( 81 | nn.AvgPool2d(1, stride=stride), 82 | nn.BatchNorm2d(self.inplanes), 83 | ) 84 | 85 | layers = [] 86 | layers.append(block(self.inplanes, planes, stride, downsample)) 87 | self.inplanes = planes 88 | for _ in range(1, blocks): 89 | layers.append(block(planes, planes)) 90 | 91 | return nn.Sequential(*layers) 92 | 93 | def forward(self, x): 94 | x = self.conv1(x) 95 | x = self.bn1(x) 96 | x = self.relu(x) 97 | 98 | x = self.layer1(x) 99 | x = self.layer2(x) 100 | x = self.layer3(x) 101 | 102 | x = self.avgpool(x) 103 | x = x.view(x.size(0), -1) 104 | x = self.fc(x) 105 | 106 | return x 107 | 108 | 109 | def resnet20(**kwargs): 110 | """Constructs a ResNet-20 model. 111 | 112 | """ 113 | model = ResNet(BasicBlock, [3, 3, 3], **kwargs) 114 | return model 115 | 116 | 117 | def resnet32(**kwargs): 118 | """Constructs a ResNet-32 model. 119 | 120 | """ 121 | model = ResNet(BasicBlock, [5, 5, 5], **kwargs) 122 | return model 123 | 124 | 125 | def resnet44(**kwargs): 126 | """Constructs a ResNet-44 model. 127 | 128 | """ 129 | model = ResNet(BasicBlock, [7, 7, 7], **kwargs) 130 | return model 131 | 132 | 133 | def resnet56(**kwargs): 134 | """Constructs a ResNet-56 model. 135 | 136 | """ 137 | model = ResNet(BasicBlock, [9, 9, 9], **kwargs) 138 | return model 139 | 140 | 141 | def resnet110(**kwargs): 142 | """Constructs a ResNet-110 model. 143 | 144 | """ 145 | model = ResNet(BasicBlock, [18, 18, 18], **kwargs) 146 | return model 147 | 148 | 149 | def resnet1202(**kwargs): 150 | """Constructs a ResNet-1202 model. 151 | 152 | """ 153 | model = ResNet(BasicBlock, [200, 200, 200], **kwargs) 154 | return model -------------------------------------------------------------------------------- /fixup/cifar/utils.py: -------------------------------------------------------------------------------- 1 | '''Some helper functions for PyTorch, including: 2 | - get_mean_and_std: calculate the mean and std value of dataset. 3 | - msr_init: net parameter initialization. 4 | - progress_bar: progress bar mimic xlua.progress. 5 | ''' 6 | import os 7 | import sys 8 | import time 9 | import math 10 | 11 | import torch.nn as nn 12 | import torch.nn.init as init 13 | import torch.nn.functional as F 14 | 15 | import numpy as np 16 | import torch 17 | 18 | def mixup_data(x, y, alpha=1.0, use_cuda=True, per_sample=False): 19 | 20 | '''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda''' 21 | batch_size = x.size()[0] 22 | if use_cuda: 23 | index = torch.randperm(batch_size).cuda() 24 | else: 25 | index = torch.randperm(batch_size) 26 | 27 | if alpha > 0. and not per_sample: 28 | lam = torch.zeros(y.size()).fill_(np.random.beta(alpha, alpha)).cuda() 29 | mixed_x = lam.view(-1, 1, 1, 1) * x + (1 - lam.view(-1, 1, 1, 1)) * x[index,:] 30 | elif alpha > 0.: 31 | lam = torch.Tensor(np.random.beta(alpha, alpha, size=y.size())).cuda() 32 | mixed_x = lam.view(-1, 1, 1, 1) * x + (1 - lam.view(-1, 1, 1, 1)) * x[index,:] 33 | else: 34 | lam = torch.ones(y.size()).cuda() 35 | mixed_x = x 36 | 37 | y_a, y_b = y, y[index] 38 | return mixed_x, y_a, y_b, lam 39 | 40 | def mixup_lam_idx(batch_size, alpha, use_cuda=True): 41 | '''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda''' 42 | if alpha > 0.: 43 | lam = np.random.beta(alpha, alpha) 44 | else: 45 | lam = 1. 46 | if use_cuda: 47 | index = torch.randperm(batch_size).cuda() 48 | else: 49 | index = torch.randperm(batch_size) 50 | 51 | return lam, index 52 | 53 | def mixup_criterion(y_a, y_b, lam): 54 | return lambda criterion, pred: criterion(pred, y_a, lam) + criterion(pred, y_b, 1 - lam) 55 | 56 | def get_mean_and_std(dataset): 57 | '''Compute the mean and std value of dataset.''' 58 | dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2) 59 | mean = torch.zeros(3) 60 | std = torch.zeros(3) 61 | print('==> Computing mean and std..') 62 | for inputs, targets in dataloader: 63 | for i in range(3): 64 | mean[i] += inputs[:,i,:,:].mean() 65 | std[i] += inputs[:,i,:,:].std() 66 | mean.div_(len(dataset)) 67 | std.div_(len(dataset)) 68 | return mean, std 69 | 70 | def init_params(net): 71 | '''Init layer parameters.''' 72 | for m in net.modules(): 73 | if isinstance(m, nn.Conv2d): 74 | init.kaiming_normal(m.weight, mode='fan_out') 75 | if m.bias: 76 | init.constant(m.bias, 0) 77 | elif isinstance(m, nn.BatchNorm2d): 78 | init.constant(m.weight, 1) 79 | init.constant(m.bias, 0) 80 | elif isinstance(m, nn.Linear): 81 | init.normal(m.weight, std=1e-3) 82 | if m.bias: 83 | init.constant(m.bias, 0) 84 | 85 | 86 | _, term_width = os.popen('stty size', 'r').read().split() 87 | term_width = int(term_width) 88 | 89 | TOTAL_BAR_LENGTH = 65. 90 | last_time = time.time() 91 | begin_time = last_time 92 | 93 | def progress_bar(current, total, msg=None): 94 | global last_time, begin_time 95 | if current == 0: 96 | begin_time = time.time() # Reset for new bar. 97 | 98 | cur_len = int(TOTAL_BAR_LENGTH*current/total) 99 | rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1 100 | 101 | sys.stdout.write(' [') 102 | for i in range(cur_len): 103 | sys.stdout.write('=') 104 | sys.stdout.write('>') 105 | for i in range(rest_len): 106 | sys.stdout.write('.') 107 | sys.stdout.write(']') 108 | 109 | cur_time = time.time() 110 | step_time = cur_time - last_time 111 | last_time = cur_time 112 | tot_time = cur_time - begin_time 113 | 114 | L = [] 115 | L.append(' Step: %s' % format_time(step_time)) 116 | L.append(' | Tot: %s' % format_time(tot_time)) 117 | if msg: 118 | L.append(' | ' + msg) 119 | 120 | msg = ''.join(L) 121 | sys.stdout.write(msg) 122 | for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3): 123 | sys.stdout.write(' ') 124 | 125 | # Go back to the center of the bar. 126 | for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2): 127 | sys.stdout.write('\b') 128 | sys.stdout.write(' %d/%d ' % (current+1, total)) 129 | 130 | if current < total-1: 131 | sys.stdout.write('\r') 132 | else: 133 | sys.stdout.write('\n') 134 | sys.stdout.flush() 135 | 136 | def format_time(seconds): 137 | days = int(seconds / 3600/24) 138 | seconds = seconds - days*3600*24 139 | hours = int(seconds / 3600) 140 | seconds = seconds - hours*3600 141 | minutes = int(seconds / 60) 142 | seconds = seconds - minutes*60 143 | secondsf = int(seconds) 144 | seconds = seconds - secondsf 145 | millis = int(seconds*1000) 146 | 147 | f = '' 148 | i = 1 149 | if days > 0: 150 | f += str(days) + 'D' 151 | i += 1 152 | if hours > 0 and i <= 2: 153 | f += str(hours) + 'h' 154 | i += 1 155 | if minutes > 0 and i <= 2: 156 | f += str(minutes) + 'm' 157 | i += 1 158 | if secondsf > 0 and i <= 2: 159 | f += str(secondsf) + 's' 160 | i += 1 161 | if millis > 0 and i <= 2: 162 | f += str(millis) + 'ms' 163 | i += 1 164 | if f == '': 165 | f = '0ms' 166 | return f 167 | -------------------------------------------------------------------------------- /framework.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Zoesgithub/FedReg/34b081f0925cca15a32372f640fca4d125e14bf4/framework.png -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import argparse 3 | import importlib 4 | import random 5 | import torch 6 | from FedUtils.models.utils import read_data, CusDataset, ImageDataset 7 | from torch.utils.data import DataLoader 8 | from loguru import logger 9 | from functools import partial 10 | import os 11 | torch.backends.cudnn.deterministic = True 12 | 13 | 14 | def allocate_memory(): 15 | total, used = os.popen( 16 | '"nvidia-smi" --query-gpu=memory.total,memory.used --format=csv,nounits,noheader' 17 | ).read().split('\n')[0].split(',') 18 | total = int(total) 19 | total = int(total * 0.7) 20 | n = torch.cuda.device_count() 21 | for _ in range(n): 22 | x = torch.rand((256, 1024, total)).cuda(_) 23 | del x 24 | 25 | 26 | def main(): 27 | parser = argparse.ArgumentParser() 28 | parser.add_argument("-c", "--config", help="The config file") 29 | args = parser.parse_args() 30 | config = importlib.import_module(args.config.replace("/", ".")) 31 | config = config.config 32 | logger.add(config["log_path"]) 33 | 34 | random.seed(1+config["seed"]) 35 | np.random.seed(12+config["seed"]) 36 | torch.manual_seed(123+config["seed"]) 37 | torch.cuda.manual_seed(123+config["seed"]) 38 | 39 | Model = config["model"] 40 | inner_opt = config["inner_opt"] 41 | if "landmarks" in config["train_path"]: # load landmark data 42 | assert "image_path" in config 43 | Dataset = partial(ImageDataset, image_path=config["image_path"]) 44 | clients, groups, train_data, eval_data = read_data(config["train_path"], config["test_path"]) 45 | else: # load other data 46 | clients, groups, train_data, eval_data = read_data(config["train_path"], config["test_path"]) 47 | Dataset = CusDataset 48 | 49 | if config["use_fed"]: 50 | Optimizer = config["optimizer"] 51 | t = Optimizer(config, Model, [clients, groups, train_data, eval_data], train_transform=config["train_transform"], 52 | test_transform=config['test_transform'], traincusdataset=Dataset, evalcusdataset=Dataset) 53 | t.train() 54 | else: 55 | train_data_total = {"x": [], "y": []} 56 | eval_data_total = {"x": [], "y": []} 57 | for t in train_data: 58 | train_data_total["x"].extend(train_data[t]["x"]) 59 | train_data_total["y"].extend(train_data[t]["y"]) 60 | for t in eval_data: 61 | eval_data_total["x"].extend(eval_data[t]["x"]) 62 | eval_data_total["y"].extend(eval_data[t]["y"]) 63 | train_data_size = len(train_data_total["x"]) 64 | eval_data_size = len(eval_data_total["x"]) 65 | train_data_total_fortest = DataLoader(Dataset(train_data_total, config["test_transform"]), batch_size=config["batch_size"], shuffle=False,) 66 | train_data_total = DataLoader(Dataset(train_data_total, config["train_transform"]), batch_size=config["batch_size"], shuffle=True, ) 67 | eval_data_total = DataLoader(Dataset(eval_data_total, config["test_transform"]), batch_size=config["batch_size"], shuffle=False,) 68 | model = Model(*config["model_param"], optimizer=inner_opt) 69 | for r in range(config["num_rounds"]): 70 | model.solve_inner(train_data_total) 71 | stats = model.test(eval_data_total) 72 | train_stats = model.test(train_data_total_fortest) 73 | logger.info("-- Log At Round {} --".format(r)) 74 | logger.info("-- TEST RESULTS --") 75 | logger.info("Accuracy: {}".format(stats[0]*1.0/eval_data_size)) 76 | logger.info("-- TRAIN RESULTS --") 77 | logger.info( 78 | "Accuracy: {} Loss: {}".format(train_stats[0]/train_data_size, train_stats[1]/train_data_size)) 79 | 80 | 81 | if __name__ == "__main__": 82 | main() 83 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | vision-transformer-pytorch 2 | torchsummary 3 | importlib 4 | loguru 5 | thop 6 | PIL 7 | h5py -------------------------------------------------------------------------------- /tasks/CT/FedAvg_cnn_e20_lr5e4/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.CT.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg import FedAvg 5 | from torchvision import transforms, utils 6 | 7 | 8 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) 9 | transform_train = transforms.Compose([ 10 | # transforms.Grayscale(), 11 | transforms.Resize(200), 12 | transforms.RandomCrop((128,)), 13 | transforms.RandomHorizontalFlip(), 14 | transforms.ToTensor(), 15 | normalize 16 | ]) 17 | 18 | # Normalize test set same as training set without augmentation 19 | transform_test = transforms.Compose([ 20 | # transforms.Grayscale(), 21 | transforms.Resize(128), 22 | transforms.CenterCrop(128), 23 | transforms.ToTensor(), 24 | normalize 25 | ]) 26 | config = { 27 | 28 | "seed": 1, 29 | "model": partial(Model, learning_rate=5e-4), 30 | "inner_opt": None, 31 | "optimizer": FedAvg, 32 | "model_param": (2,), 33 | "inp_size": (3*244*244,), 34 | "train_path": "data/COVID-CT/train/", 35 | "test_path": ["data/COVID-CT/valid/", "data/COVID-CT/test/"], 36 | "clients_per_round": 10, 37 | "num_rounds": 10, 38 | "eval_every": 1, 39 | "drop_percent": 0.0, 40 | "num_epochs": 20, 41 | "batch_size": 10, 42 | "use_fed": 1, 43 | "log_path": "tasks/CT/FedAvg_cnn_e20_lr5e4/train.log", 44 | "train_transform": transform_train, 45 | "test_transform": transform_test, 46 | "eval_train": True, 47 | 48 | } 49 | -------------------------------------------------------------------------------- /tasks/CT/FedAvg_cnn_e20_lr5e4/train.log: -------------------------------------------------------------------------------- 1 | 2021-12-30 03:46:44.022 | INFO | FedUtils.fed.fedavg:train:10 - Train with 10 workers... 2 | 2021-12-30 03:46:44.024 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 0 -- 3 | 2021-12-30 03:56:07.653 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 4 | 2021-12-30 03:56:07.667 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.43478262424468994 5 | 2021-12-30 03:56:07.667 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.4984423518180847 6 | 2021-12-30 03:56:07.667 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 7 | 2021-12-30 03:56:07.669 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.44063326716423035 Loss: 0.7099535465240479 8 | 2021-12-30 04:23:03.122 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 1 -- 9 | 2021-12-30 04:32:27.327 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 10 | 2021-12-30 04:32:27.343 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5652173757553101 11 | 2021-12-30 04:32:27.345 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5077881217002869 12 | 2021-12-30 04:32:27.346 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 13 | 2021-12-30 04:32:27.348 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5488126873970032 Loss: 0.6761136054992676 14 | 2021-12-30 05:04:17.801 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 2 -- 15 | 2021-12-30 05:13:45.787 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 16 | 2021-12-30 05:13:45.803 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6304348111152649 17 | 2021-12-30 05:13:45.803 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5327102541923523 18 | 2021-12-30 05:13:45.803 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 19 | 2021-12-30 05:13:45.805 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6517150402069092 Loss: 0.6485335230827332 20 | 2021-12-30 05:46:48.458 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 3 -- 21 | 2021-12-30 05:56:14.980 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 22 | 2021-12-30 05:56:14.982 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6086956858634949 23 | 2021-12-30 05:56:14.984 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5358255505561829 24 | 2021-12-30 05:56:14.985 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 25 | 2021-12-30 05:56:14.987 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6781002879142761 Loss: 0.6442999243736267 26 | 2021-12-30 06:21:46.445 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 4 -- 27 | 2021-12-30 06:31:14.692 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 28 | 2021-12-30 06:31:14.694 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5652173757553101 29 | 2021-12-30 06:31:14.696 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5545170903205872 30 | 2021-12-30 06:31:14.697 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 31 | 2021-12-30 06:31:14.699 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6754617691040039 Loss: 0.6380926370620728 32 | 2021-12-30 07:00:17.058 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 5 -- 33 | 2021-12-30 07:09:46.956 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 34 | 2021-12-30 07:09:46.958 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5652173757553101 35 | 2021-12-30 07:09:46.960 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5202491879463196 36 | 2021-12-30 07:09:46.961 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 37 | 2021-12-30 07:09:46.963 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6543536186218262 Loss: 0.6447949409484863 38 | 2021-12-30 07:30:25.482 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 6 -- 39 | 2021-12-30 07:39:58.105 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 40 | 2021-12-30 07:39:58.119 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.695652186870575 41 | 2021-12-30 07:39:58.121 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5576323866844177 42 | 2021-12-30 07:39:58.130 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 43 | 2021-12-30 07:39:58.132 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6358839273452759 Loss: 0.6256346702575684 44 | 2021-12-30 08:04:10.818 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 7 -- 45 | 2021-12-30 08:13:46.268 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 46 | 2021-12-30 08:13:46.270 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6739130616188049 47 | 2021-12-30 08:13:46.273 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5732086896896362 48 | 2021-12-30 08:13:46.274 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 49 | 2021-12-30 08:13:46.276 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6912928819656372 Loss: 0.6483279466629028 50 | 2021-12-30 08:42:31.713 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 8 -- 51 | 2021-12-30 08:51:57.277 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 52 | 2021-12-30 08:51:57.289 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6521739363670349 53 | 2021-12-30 08:51:57.291 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5514018535614014 54 | 2021-12-30 08:51:57.292 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 55 | 2021-12-30 08:51:57.294 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.625329852104187 Loss: 0.6655440330505371 56 | 2021-12-30 09:21:14.352 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 9 -- 57 | 2021-12-30 09:30:46.661 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 58 | 2021-12-30 09:30:46.662 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6739130616188049 59 | 2021-12-30 09:30:46.663 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5389407873153687 60 | 2021-12-30 09:30:46.663 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 61 | 2021-12-30 09:30:46.664 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6701847314834595 Loss: 0.6838037967681885 62 | 2021-12-30 10:10:32.317 | INFO | FedUtils.fed.fedavg:train:46 - -- Log At Round 9 -- 63 | 2021-12-30 10:20:03.219 | INFO | FedUtils.fed.fedavg:train:53 - -- TEST RESULTS -- 64 | 2021-12-30 10:20:03.231 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.695652186870575 65 | 2021-12-30 10:20:03.232 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5576323866844177 66 | 2021-12-30 10:20:03.234 | INFO | FedUtils.fed.fedavg:train:55 - -- TRAIN RESULTS -- 67 | 2021-12-30 10:20:03.236 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6992084980010986 Loss: 0.6568948030471802 68 | -------------------------------------------------------------------------------- /tasks/CT/FedAvg_e20_lr1e3/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.CT.densenet import DenseNetModel 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg import FedAvg 5 | from torchvision import transforms, utils 6 | 7 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) 8 | transform_train = transforms.Compose([ 9 | transforms.Resize(256), 10 | transforms.RandomResizedCrop((224,), scale=(0.5, 1.0)), 11 | transforms.RandomHorizontalFlip(), 12 | transforms.ToTensor(), 13 | normalize 14 | ]) 15 | 16 | # Normalize test set same as training set without augmentation 17 | transform_test = transforms.Compose([ 18 | transforms.Resize(224), 19 | transforms.CenterCrop(224), 20 | transforms.ToTensor(), 21 | normalize 22 | ]) 23 | 24 | config = { 25 | 26 | "seed": 1, 27 | "model": partial(DenseNetModel, learning_rate=1e-3), 28 | "inner_opt": None, 29 | "optimizer": FedAvg, 30 | "model_param": (2,), 31 | "inp_size": (3*244*244,), 32 | "train_path": "data/COVID-CT/train/", 33 | "test_path": ["data/COVID-CT/valid/", "data/COVID-CT/test/"], 34 | "clients_per_round": 10, 35 | "num_rounds": 10, 36 | "eval_every": 1, 37 | "drop_percent": 0.0, 38 | "num_epochs": 20, 39 | "batch_size": 10, 40 | "use_fed": 1, 41 | "log_path": "tasks/CT/FedAvg_e20_lr1e3/train.log", 42 | "train_transform": transform_train, 43 | "test_transform": transform_test, 44 | "eval_train": True, 45 | 46 | 47 | } 48 | -------------------------------------------------------------------------------- /tasks/CT/FedAvg_e20_lr1e3/train.log: -------------------------------------------------------------------------------- 1 | 2021-12-27 18:38:59.516 | INFO | FedUtils.fed.fedavg:train:10 - Train with 10 workers... 2 | 2021-12-27 18:38:59.517 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 0 -- 3 | 2021-12-27 18:41:35.831 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 4 | 2021-12-27 18:41:35.836 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6086956858634949 5 | 2021-12-27 18:41:35.838 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5077881217002869 6 | 2021-12-27 18:41:35.839 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 7 | 2021-12-27 18:41:35.843 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5039578080177307 Loss: 0.697583794593811 8 | 2021-12-27 18:50:58.526 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 1 -- 9 | 2021-12-27 18:54:13.857 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 10 | 2021-12-27 18:54:13.858 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5652173757553101 11 | 2021-12-27 18:54:13.860 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5077881217002869 12 | 2021-12-27 18:54:13.861 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 13 | 2021-12-27 18:54:13.863 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5488126873970032 Loss: 2.3471977710723877 14 | 2021-12-27 19:05:58.872 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 2 -- 15 | 2021-12-27 19:09:00.094 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 16 | 2021-12-27 19:09:00.098 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5652173757553101 17 | 2021-12-27 19:09:00.100 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5077881217002869 18 | 2021-12-27 19:09:00.101 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 19 | 2021-12-27 19:09:00.105 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5488126873970032 Loss: 4.014869689941406 20 | 2021-12-27 19:22:50.547 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 3 -- 21 | 2021-12-27 19:26:05.356 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 22 | 2021-12-27 19:26:05.364 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6304348111152649 23 | 2021-12-27 19:26:05.366 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5981308221817017 24 | 2021-12-27 19:26:05.368 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 25 | 2021-12-27 19:26:05.371 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.7229551672935486 Loss: 0.6717199683189392 26 | 2021-12-27 19:34:06.481 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 4 -- 27 | 2021-12-27 19:37:07.844 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 28 | 2021-12-27 19:37:07.847 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.695652186870575 29 | 2021-12-27 19:37:07.850 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5295950174331665 30 | 2021-12-27 19:37:07.851 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 31 | 2021-12-27 19:37:07.852 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6992084980010986 Loss: 0.9565095901489258 32 | 2021-12-27 19:47:09.226 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 5 -- 33 | 2021-12-27 19:50:04.792 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 34 | 2021-12-27 19:50:04.795 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5652173757553101 35 | 2021-12-27 19:50:04.797 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5077881217002869 36 | 2021-12-27 19:50:04.797 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 37 | 2021-12-27 19:50:04.800 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5646438002586365 Loss: 1.9078631401062012 38 | 2021-12-27 19:57:37.578 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 6 -- 39 | 2021-12-27 20:00:10.472 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 40 | 2021-12-27 20:00:10.484 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.739130437374115 41 | 2021-12-27 20:00:10.487 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5638629198074341 42 | 2021-12-27 20:00:10.487 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 43 | 2021-12-27 20:00:10.489 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6833773255348206 Loss: 0.8366662263870239 44 | 2021-12-27 20:08:18.943 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 7 -- 45 | 2021-12-27 20:11:16.724 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 46 | 2021-12-27 20:11:16.732 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6304348111152649 47 | 2021-12-27 20:11:16.734 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5327102541923523 48 | 2021-12-27 20:11:16.734 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 49 | 2021-12-27 20:11:16.736 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.688654363155365 Loss: 1.0429166555404663 50 | 2021-12-27 20:22:47.039 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 8 -- 51 | 2021-12-27 20:25:52.897 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 52 | 2021-12-27 20:25:52.899 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.804347813129425 53 | 2021-12-27 20:25:52.902 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5887850522994995 54 | 2021-12-27 20:25:52.902 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 55 | 2021-12-27 20:25:52.905 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.688654363155365 Loss: 0.8406270146369934 56 | 2021-12-27 20:36:52.558 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 9 -- 57 | 2021-12-27 20:40:07.743 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 58 | 2021-12-27 20:40:07.746 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.739130437374115 59 | 2021-12-27 20:40:07.749 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5264797210693359 60 | 2021-12-27 20:40:07.749 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 61 | 2021-12-27 20:40:07.752 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6701847314834595 Loss: 1.0794126987457275 62 | 2021-12-27 20:57:27.138 | INFO | FedUtils.fed.fedavg:train:46 - -- Log At Round 9 -- 63 | 2021-12-27 21:00:38.768 | INFO | FedUtils.fed.fedavg:train:53 - -- TEST RESULTS -- 64 | 2021-12-27 21:00:38.772 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.8695652484893799 65 | 2021-12-27 21:00:38.774 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.6137071251869202 66 | 2021-12-27 21:00:38.775 | INFO | FedUtils.fed.fedavg:train:55 - -- TRAIN RESULTS -- 67 | 2021-12-27 21:00:38.779 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.7467018961906433 Loss: 0.7009042501449585 68 | -------------------------------------------------------------------------------- /tasks/CT/FedCurv_cnn_e20_lr1e3_g3/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.CT.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedcurv import FedCurv 5 | from torchvision import transforms, utils 6 | 7 | 8 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) 9 | transform_train = transforms.Compose([ 10 | # transforms.Grayscale(), 11 | transforms.Resize(200), 12 | transforms.RandomCrop((128,)), 13 | transforms.RandomHorizontalFlip(), 14 | transforms.ToTensor(), 15 | normalize 16 | ]) 17 | 18 | # Normalize test set same as training set without augmentation 19 | transform_test = transforms.Compose([ 20 | # transforms.Grayscale(), 21 | transforms.Resize(128), 22 | transforms.CenterCrop(128), 23 | transforms.ToTensor(), 24 | normalize 25 | ]) 26 | config = { 27 | 28 | "seed": 1, 29 | "model": partial(Model, learning_rate=1e-3), 30 | "inner_opt": None, 31 | "optimizer": FedCurv, 32 | "model_param": (2,), 33 | "inp_size": (3*244*244,), 34 | "train_path": "data/COVID-CT/train/", 35 | "test_path": ["data/COVID-CT/valid/", "data/COVID-CT/test/"], 36 | "clients_per_round": 10, 37 | "num_rounds": 10, 38 | "eval_every": 1, 39 | "drop_percent": 0.0, 40 | "num_epochs": 20, 41 | "batch_size": 10, 42 | "use_fed": 1, 43 | "log_path": "tasks/CT/FedCurv_cnn_e20_lr1e3_g3/train.log", 44 | "train_transform": transform_train, 45 | "test_transform": transform_test, 46 | "eval_train": True, 47 | "gamma": 0.001 48 | 49 | 50 | } 51 | -------------------------------------------------------------------------------- /tasks/CT/FedCurv_cnn_e20_lr1e3_g3/train.log: -------------------------------------------------------------------------------- 1 | 2022-01-02 10:11:19.024 | INFO | FedUtils.fed.fedcurv:train:36 - Train with 10 workers... 2 | 2022-01-02 10:11:19.029 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 0 -- 3 | 2022-01-02 10:14:28.033 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 4 | 2022-01-02 10:14:28.034 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.43478262424468994 5 | 2022-01-02 10:14:28.034 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.49532708525657654 6 | 2022-01-02 10:14:28.035 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 7 | 2022-01-02 10:14:28.035 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.4511873424053192 Loss: 0.7145864963531494 8 | 2022-01-02 10:25:18.051 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 1 -- 9 | 2022-01-02 10:28:34.456 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 10 | 2022-01-02 10:28:34.458 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.3913043439388275 11 | 2022-01-02 10:28:34.458 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5732086896896362 12 | 2022-01-02 10:28:34.459 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 13 | 2022-01-02 10:28:34.460 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5092348456382751 Loss: 0.6931116580963135 14 | 2022-01-02 10:41:21.192 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 2 -- 15 | 2022-01-02 10:44:37.842 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 16 | 2022-01-02 10:44:37.845 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5652173757553101 17 | 2022-01-02 10:44:37.846 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5077881217002869 18 | 2022-01-02 10:44:37.847 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 19 | 2022-01-02 10:44:37.848 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5488126873970032 Loss: 0.681006133556366 20 | 2022-01-02 10:58:04.743 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 3 -- 21 | 2022-01-02 11:01:13.945 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 22 | 2022-01-02 11:01:13.946 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.54347825050354 23 | 2022-01-02 11:01:13.946 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5825545191764832 24 | 2022-01-02 11:01:13.946 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 25 | 2022-01-02 11:01:13.947 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6807388067245483 Loss: 0.6759541034698486 26 | 2022-01-02 11:10:52.691 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 4 -- 27 | 2022-01-02 11:13:50.826 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 28 | 2022-01-02 11:13:50.828 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6739130616188049 29 | 2022-01-02 11:13:50.828 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5825545191764832 30 | 2022-01-02 11:13:50.829 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 31 | 2022-01-02 11:13:50.830 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6200528144836426 Loss: 0.6677777171134949 32 | 2022-01-02 11:25:19.909 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 5 -- 33 | 2022-01-02 11:28:22.113 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 34 | 2022-01-02 11:28:22.114 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5652173757553101 35 | 2022-01-02 11:28:22.115 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5171339511871338 36 | 2022-01-02 11:28:22.115 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 37 | 2022-01-02 11:28:22.116 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6675462126731873 Loss: 0.6447362303733826 38 | 2022-01-02 11:36:01.594 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 6 -- 39 | 2022-01-02 11:38:57.216 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 40 | 2022-01-02 11:38:57.218 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5869565606117249 41 | 2022-01-02 11:38:57.218 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5264797210693359 42 | 2022-01-02 11:38:57.219 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 43 | 2022-01-02 11:38:57.220 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.664907693862915 Loss: 0.6402248740196228 44 | 2022-01-02 11:49:04.724 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 7 -- 45 | 2022-01-02 11:52:10.969 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 46 | 2022-01-02 11:52:10.970 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5869565606117249 47 | 2022-01-02 11:52:10.971 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5794392228126526 48 | 2022-01-02 11:52:10.971 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 49 | 2022-01-02 11:52:10.972 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6543536186218262 Loss: 0.6290531158447266 50 | 2022-01-02 12:03:18.152 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 8 -- 51 | 2022-01-02 12:06:27.297 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 52 | 2022-01-02 12:06:27.298 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6304348111152649 53 | 2022-01-02 12:06:27.299 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.6168224215507507 54 | 2022-01-02 12:06:27.299 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 55 | 2022-01-02 12:06:27.300 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.664907693862915 Loss: 0.6257737278938293 56 | 2022-01-02 12:17:57.078 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 9 -- 57 | 2022-01-02 12:22:01.966 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 58 | 2022-01-02 12:22:01.967 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5652173757553101 59 | 2022-01-02 12:22:01.967 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5794392228126526 60 | 2022-01-02 12:22:01.967 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 61 | 2022-01-02 12:22:01.968 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6754617691040039 Loss: 0.6313847899436951 62 | 2022-01-02 12:36:55.959 | INFO | FedUtils.fed.fedcurv:train:108 - -- Log At Round 9 -- 63 | 2022-01-02 12:40:08.688 | INFO | FedUtils.fed.fedcurv:train:114 - -- TEST RESULTS -- 64 | 2022-01-02 12:40:08.689 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5652173757553101 65 | 2022-01-02 12:40:08.689 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.6199376583099365 66 | 2022-01-02 12:40:08.690 | INFO | FedUtils.fed.fedcurv:train:116 - -- TRAIN RESULTS -- 67 | 2022-01-02 12:40:08.691 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6860158443450928 Loss: 0.6177168488502502 68 | -------------------------------------------------------------------------------- /tasks/CT/FedCurv_e20_lr1e3_g4/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.CT.densenet import DenseNetModel 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedcurv import FedCurv 5 | from torchvision import transforms, utils 6 | 7 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) 8 | transform_train = transforms.Compose([ 9 | transforms.Resize(256), 10 | transforms.RandomResizedCrop((224,), scale=(0.5, 1.0)), 11 | transforms.RandomHorizontalFlip(), 12 | transforms.ToTensor(), 13 | normalize 14 | ]) 15 | 16 | # Normalize test set same as training set without augmentation 17 | transform_test = transforms.Compose([ 18 | transforms.Resize(224), 19 | transforms.CenterCrop(224), 20 | transforms.ToTensor(), 21 | normalize 22 | ]) 23 | 24 | config = { 25 | 26 | "seed": 1, 27 | "model": partial(DenseNetModel, learning_rate=1e-3), 28 | "inner_opt": None, 29 | "optimizer": FedCurv, 30 | "model_param": (2,), 31 | "inp_size": (3*244*244,), 32 | "train_path": "data/COVID-CT/train/", 33 | "test_path": ["data/COVID-CT/valid/", "data/COVID-CT/test/"], 34 | "clients_per_round": 10, 35 | "num_rounds": 10, 36 | "eval_every": 1, 37 | "drop_percent": 0.0, 38 | "num_epochs": 20, 39 | "batch_size": 10, 40 | "use_fed": 1, 41 | "log_path": "tasks/CT/FedCurv_e20_lr1e3_g4/train.log", 42 | "train_transform": transform_train, 43 | "test_transform": transform_test, 44 | "eval_train": True, 45 | "gamma": 1e-4 46 | 47 | 48 | } 49 | -------------------------------------------------------------------------------- /tasks/CT/FedCurv_e20_lr1e3_g4/train.log: -------------------------------------------------------------------------------- 1 | 2021-12-29 07:52:26.687 | INFO | FedUtils.fed.fedcurv:train:36 - Train with 10 workers... 2 | 2021-12-29 07:52:26.703 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 0 -- 3 | 2021-12-29 07:56:53.614 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 4 | 2021-12-29 07:56:53.616 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6086956858634949 5 | 2021-12-29 07:56:53.616 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5077881217002869 6 | 2021-12-29 07:56:53.617 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 7 | 2021-12-29 07:56:53.618 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5039578080177307 Loss: 0.697583794593811 8 | 2021-12-29 08:08:23.091 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 1 -- 9 | 2021-12-29 08:13:14.922 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 10 | 2021-12-29 08:13:14.929 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.52173912525177 11 | 2021-12-29 08:13:14.930 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5763239860534668 12 | 2021-12-29 08:13:14.931 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 13 | 2021-12-29 08:13:14.932 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6596306562423706 Loss: 0.625461995601654 14 | 2021-12-29 08:26:42.424 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 2 -- 15 | 2021-12-29 08:29:50.077 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 16 | 2021-12-29 08:29:50.089 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6086956858634949 17 | 2021-12-29 08:29:50.089 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5389407873153687 18 | 2021-12-29 08:29:50.089 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 19 | 2021-12-29 08:29:50.091 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6569921374320984 Loss: 0.8453195691108704 20 | 2021-12-29 08:43:21.818 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 3 -- 21 | 2021-12-29 08:46:28.616 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 22 | 2021-12-29 08:46:28.618 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5652173757553101 23 | 2021-12-29 08:46:28.619 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5077881217002869 24 | 2021-12-29 08:46:28.619 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 25 | 2021-12-29 08:46:28.621 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5488126873970032 Loss: 1.2129160165786743 26 | 2021-12-29 08:57:19.900 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 4 -- 27 | 2021-12-29 09:00:35.349 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 28 | 2021-12-29 09:00:35.353 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5652173757553101 29 | 2021-12-29 09:00:35.354 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5109034180641174 30 | 2021-12-29 09:00:35.355 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 31 | 2021-12-29 09:00:35.357 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6068601608276367 Loss: 1.5219067335128784 32 | 2021-12-29 09:12:44.714 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 5 -- 33 | 2021-12-29 09:16:07.735 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 34 | 2021-12-29 09:16:07.738 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6739130616188049 35 | 2021-12-29 09:16:07.740 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.545171320438385 36 | 2021-12-29 09:16:07.741 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 37 | 2021-12-29 09:16:07.743 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6464380025863647 Loss: 1.01143479347229 38 | 2021-12-29 09:24:57.265 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 6 -- 39 | 2021-12-29 09:28:27.848 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 40 | 2021-12-29 09:28:27.849 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.717391312122345 41 | 2021-12-29 09:28:27.850 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5545170903205872 42 | 2021-12-29 09:28:27.850 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 43 | 2021-12-29 09:28:27.851 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.649076521396637 Loss: 1.039368987083435 44 | 2021-12-29 09:38:42.707 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 7 -- 45 | 2021-12-29 09:42:01.800 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 46 | 2021-12-29 09:42:01.801 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.695652186870575 47 | 2021-12-29 09:42:01.802 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5202491879463196 48 | 2021-12-29 09:42:01.802 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 49 | 2021-12-29 09:42:01.804 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6992084980010986 Loss: 0.9628093242645264 50 | 2021-12-29 09:54:02.306 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 8 -- 51 | 2021-12-29 09:57:24.929 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 52 | 2021-12-29 09:57:24.932 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.739130437374115 53 | 2021-12-29 09:57:24.933 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5950155854225159 54 | 2021-12-29 09:57:24.934 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 55 | 2021-12-29 09:57:24.936 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6464380025863647 Loss: 1.0776187181472778 56 | 2021-12-29 10:10:10.850 | INFO | FedUtils.fed.fedcurv:train:41 - -- Log At Round 9 -- 57 | 2021-12-29 10:13:36.037 | INFO | FedUtils.fed.fedcurv:train:47 - -- TEST RESULTS -- 58 | 2021-12-29 10:13:36.039 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6521739363670349 59 | 2021-12-29 10:13:36.040 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5109034180641174 60 | 2021-12-29 10:13:36.041 | INFO | FedUtils.fed.fedcurv:train:49 - -- TRAIN RESULTS -- 61 | 2021-12-29 10:13:36.043 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6279683709144592 Loss: 1.3198076486587524 62 | 2021-12-29 10:31:42.366 | INFO | FedUtils.fed.fedcurv:train:108 - -- Log At Round 9 -- 63 | 2021-12-29 10:36:36.332 | INFO | FedUtils.fed.fedcurv:train:114 - -- TEST RESULTS -- 64 | 2021-12-29 10:36:36.359 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.8260869979858398 65 | 2021-12-29 10:36:36.359 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.6137071251869202 66 | 2021-12-29 10:36:36.360 | INFO | FedUtils.fed.fedcurv:train:116 - -- TRAIN RESULTS -- 67 | 2021-12-29 10:36:36.361 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.7255936861038208 Loss: 0.7254661321640015 68 | -------------------------------------------------------------------------------- /tasks/CT/FedProx_cnn_e20_lr5e4_g1/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.CT.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedprox import FedProx 5 | from torchvision import transforms, utils 6 | 7 | 8 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) 9 | transform_train = transforms.Compose([ 10 | # transforms.Grayscale(), 11 | transforms.Resize(200), 12 | transforms.RandomCrop((128,)), 13 | transforms.RandomHorizontalFlip(), 14 | transforms.ToTensor(), 15 | normalize 16 | ]) 17 | 18 | # Normalize test set same as training set without augmentation 19 | transform_test = transforms.Compose([ 20 | # transforms.Grayscale(), 21 | transforms.Resize(128), 22 | transforms.CenterCrop(128), 23 | transforms.ToTensor(), 24 | normalize 25 | ]) 26 | config = { 27 | 28 | "seed": 1, 29 | "model": partial(Model, learning_rate=5e-4), 30 | "inner_opt": None, 31 | "optimizer": FedProx, 32 | "model_param": (2,), 33 | "inp_size": (3*244*244,), 34 | "train_path": "data/COVID-CT/train/", 35 | "test_path": ["data/COVID-CT/valid/", "data/COVID-CT/test/"], 36 | "clients_per_round": 10, 37 | "num_rounds": 10, 38 | "eval_every": 1, 39 | "drop_percent": 0.0, 40 | "num_epochs": 20, 41 | "batch_size": 10, 42 | "use_fed": 1, 43 | "log_path": "tasks/CT/FedProx_cnn_e20_lr5e4_g1/train.log", 44 | "train_transform": transform_train, 45 | "test_transform": transform_test, 46 | "eval_train": True, 47 | "gamma": 0.1 48 | # "add_mask":True 49 | 50 | 51 | 52 | } 53 | -------------------------------------------------------------------------------- /tasks/CT/FedProx_cnn_e20_lr5e4_g1/train.log: -------------------------------------------------------------------------------- 1 | 2022-01-03 15:46:14.575 | INFO | FedUtils.fed.fedprox:train:17 - Train with 10 workers... 2 | 2022-01-03 15:46:14.577 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 0 -- 3 | 2022-01-03 15:49:26.800 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 4 | 2022-01-03 15:49:26.802 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.43478262424468994 5 | 2022-01-03 15:49:26.802 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.49532708525657654 6 | 2022-01-03 15:49:26.802 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 7 | 2022-01-03 15:49:26.803 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.4511873424053192 Loss: 0.7145864963531494 8 | 2022-01-03 15:59:29.049 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 1 -- 9 | 2022-01-03 16:02:50.843 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 10 | 2022-01-03 16:02:50.845 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5652173757553101 11 | 2022-01-03 16:02:50.845 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5077881217002869 12 | 2022-01-03 16:02:50.845 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 13 | 2022-01-03 16:02:50.846 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5488126873970032 Loss: 0.6783795952796936 14 | 2022-01-03 16:15:42.255 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 2 -- 15 | 2022-01-03 16:19:17.923 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 16 | 2022-01-03 16:19:17.924 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6521739363670349 17 | 2022-01-03 16:19:17.924 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5171339511871338 18 | 2022-01-03 16:19:17.925 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 19 | 2022-01-03 16:19:17.925 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5910290479660034 Loss: 0.6514546275138855 20 | 2022-01-03 16:32:46.804 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 3 -- 21 | 2022-01-03 16:36:22.781 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 22 | 2022-01-03 16:36:22.782 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5869565606117249 23 | 2022-01-03 16:36:22.782 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5638629198074341 24 | 2022-01-03 16:36:22.782 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 25 | 2022-01-03 16:36:22.783 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6622691750526428 Loss: 0.6398707628250122 26 | 2022-01-03 16:47:41.291 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 4 -- 27 | 2022-01-03 16:52:17.977 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 28 | 2022-01-03 16:52:17.978 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6521739363670349 29 | 2022-01-03 16:52:17.979 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5327102541923523 30 | 2022-01-03 16:52:17.979 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 31 | 2022-01-03 16:52:17.980 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6279683709144592 Loss: 0.6366153955459595 32 | 2022-01-03 17:03:41.443 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 5 -- 33 | 2022-01-03 17:07:10.933 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 34 | 2022-01-03 17:07:10.934 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6304348111152649 35 | 2022-01-03 17:07:10.934 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5233644843101501 36 | 2022-01-03 17:07:10.934 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 37 | 2022-01-03 17:07:10.935 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5963060855865479 Loss: 0.649633526802063 38 | 2022-01-03 17:14:34.358 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 6 -- 39 | 2022-01-03 17:17:53.525 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 40 | 2022-01-03 17:17:53.526 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.739130437374115 41 | 2022-01-03 17:17:53.527 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.6417445540428162 42 | 2022-01-03 17:17:53.527 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 43 | 2022-01-03 17:17:53.528 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.7018470168113708 Loss: 0.5901943445205688 44 | 2022-01-03 17:26:30.068 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 7 -- 45 | 2022-01-03 17:29:42.332 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 46 | 2022-01-03 17:29:42.333 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5869565606117249 47 | 2022-01-03 17:29:42.334 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5887850522994995 48 | 2022-01-03 17:29:42.334 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 49 | 2022-01-03 17:29:42.335 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6754617691040039 Loss: 0.6139389276504517 50 | 2022-01-03 17:40:04.611 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 8 -- 51 | 2022-01-03 17:43:17.935 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 52 | 2022-01-03 17:43:17.936 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.695652186870575 53 | 2022-01-03 17:43:17.936 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.585669755935669 54 | 2022-01-03 17:43:17.936 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 55 | 2022-01-03 17:43:17.937 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6992084980010986 Loss: 0.6135647296905518 56 | 2022-01-03 17:53:55.509 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 9 -- 57 | 2022-01-03 17:56:49.632 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 58 | 2022-01-03 17:56:49.633 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6304348111152649 59 | 2022-01-03 17:56:49.634 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5358255505561829 60 | 2022-01-03 17:56:49.634 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 61 | 2022-01-03 17:56:49.634 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6860158443450928 Loss: 0.6364127397537231 62 | 2022-01-03 18:10:27.798 | INFO | FedUtils.fed.fedprox:train:53 - -- Log At Round 9 -- 63 | 2022-01-03 18:13:19.934 | INFO | FedUtils.fed.fedprox:train:59 - -- TEST RESULTS -- 64 | 2022-01-03 18:13:19.936 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6521739363670349 65 | 2022-01-03 18:13:19.936 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5514018535614014 66 | 2022-01-03 18:13:19.936 | INFO | FedUtils.fed.fedprox:train:61 - -- TRAIN RESULTS -- 67 | 2022-01-03 18:13:19.937 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6411609649658203 Loss: 0.6558610796928406 68 | -------------------------------------------------------------------------------- /tasks/CT/FedProx_e20_lr1e3_g10/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.CT.densenet import DenseNetModel 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedprox import FedProx 5 | from torchvision import transforms, utils 6 | 7 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) 8 | transform_train = transforms.Compose([ 9 | transforms.Resize(256), 10 | transforms.RandomResizedCrop((224,), scale=(0.5, 1.0)), 11 | transforms.RandomHorizontalFlip(), 12 | transforms.ToTensor(), 13 | normalize 14 | ]) 15 | 16 | # Normalize test set same as training set without augmentation 17 | transform_test = transforms.Compose([ 18 | transforms.Resize(224), 19 | transforms.CenterCrop(224), 20 | transforms.ToTensor(), 21 | normalize 22 | ]) 23 | 24 | config = { 25 | 26 | "seed": 1, 27 | "model": partial(DenseNetModel, learning_rate=1e-3), 28 | "inner_opt": None, 29 | "optimizer": FedProx, 30 | "model_param": (2,), 31 | "inp_size": (3*244*244,), 32 | "train_path": "data/COVID-CT/train/", 33 | "test_path": ["data/COVID-CT/valid/", "data/COVID-CT/test/"], 34 | "clients_per_round": 10, 35 | "num_rounds": 10, 36 | "eval_every": 1, 37 | "drop_percent": 0.0, 38 | "num_epochs": 20, 39 | "batch_size": 10, 40 | "use_fed": 1, 41 | "log_path": "tasks/CT/FedProx_e20_lr1e3_g10/train.log", 42 | "train_transform": transform_train, 43 | "test_transform": transform_test, 44 | "eval_train": True, 45 | "gamma": 1.0 46 | 47 | 48 | } 49 | -------------------------------------------------------------------------------- /tasks/CT/FedProx_e20_lr1e3_g10/train.log: -------------------------------------------------------------------------------- 1 | 2021-12-29 11:09:16.214 | INFO | FedUtils.fed.fedprox:train:17 - Train with 10 workers... 2 | 2021-12-29 11:09:16.217 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 0 -- 3 | 2021-12-29 11:13:28.432 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 4 | 2021-12-29 11:13:28.434 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6086956858634949 5 | 2021-12-29 11:13:28.434 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5077881217002869 6 | 2021-12-29 11:13:28.434 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 7 | 2021-12-29 11:13:28.435 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5039578080177307 Loss: 0.697583794593811 8 | 2021-12-29 11:24:40.697 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 1 -- 9 | 2021-12-29 11:28:23.178 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 10 | 2021-12-29 11:28:23.179 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5652173757553101 11 | 2021-12-29 11:28:23.179 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5077881217002869 12 | 2021-12-29 11:28:23.179 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 13 | 2021-12-29 11:28:23.180 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5488126873970032 Loss: 2.0535974502563477 14 | 2021-12-29 11:40:29.345 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 2 -- 15 | 2021-12-29 11:44:06.835 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 16 | 2021-12-29 11:44:06.845 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5652173757553101 17 | 2021-12-29 11:44:06.846 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5077881217002869 18 | 2021-12-29 11:44:06.848 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 19 | 2021-12-29 11:44:06.850 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5488126873970032 Loss: 4.154041767120361 20 | 2021-12-29 11:56:56.980 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 3 -- 21 | 2021-12-29 12:00:40.183 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 22 | 2021-12-29 12:00:40.188 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5652173757553101 23 | 2021-12-29 12:00:40.189 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5077881217002869 24 | 2021-12-29 12:00:40.189 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 25 | 2021-12-29 12:00:40.190 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5488126873970032 Loss: 1.206191062927246 26 | 2021-12-29 12:10:45.231 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 4 -- 27 | 2021-12-29 12:14:17.099 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 28 | 2021-12-29 12:14:17.100 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.717391312122345 29 | 2021-12-29 12:14:17.101 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5545170903205872 30 | 2021-12-29 12:14:17.101 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 31 | 2021-12-29 12:14:17.102 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.7071240544319153 Loss: 0.8829018473625183 32 | 2021-12-29 12:26:22.641 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 5 -- 33 | 2021-12-29 12:29:58.924 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 34 | 2021-12-29 12:29:58.925 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5652173757553101 35 | 2021-12-29 12:29:58.926 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5077881217002869 36 | 2021-12-29 12:29:58.926 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 37 | 2021-12-29 12:29:58.927 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.559366762638092 Loss: 1.903255820274353 38 | 2021-12-29 12:38:20.734 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 6 -- 39 | 2021-12-29 12:41:48.457 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 40 | 2021-12-29 12:41:48.468 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.739130437374115 41 | 2021-12-29 12:41:48.468 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5700934529304504 42 | 2021-12-29 12:41:48.468 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 43 | 2021-12-29 12:41:48.469 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6939314007759094 Loss: 0.8175726532936096 44 | 2021-12-29 12:52:54.853 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 7 -- 45 | 2021-12-29 12:55:51.768 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 46 | 2021-12-29 12:55:51.776 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6521739363670349 47 | 2021-12-29 12:55:51.776 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5233644843101501 48 | 2021-12-29 12:55:51.776 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 49 | 2021-12-29 12:55:51.778 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6569921374320984 Loss: 1.1785521507263184 50 | 2021-12-29 13:09:12.934 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 8 -- 51 | 2021-12-29 13:12:24.290 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 52 | 2021-12-29 13:12:24.294 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.782608687877655 53 | 2021-12-29 13:12:24.295 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.585669755935669 54 | 2021-12-29 13:12:24.295 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 55 | 2021-12-29 13:12:24.297 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6833773255348206 Loss: 0.8747690320014954 56 | 2021-12-29 13:23:37.820 | INFO | FedUtils.fed.fedprox:train:20 - -- Log At Round 9 -- 57 | 2021-12-29 13:26:53.610 | INFO | FedUtils.fed.fedprox:train:26 - -- TEST RESULTS -- 58 | 2021-12-29 13:26:53.613 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.760869562625885 59 | 2021-12-29 13:26:53.613 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5295950174331665 60 | 2021-12-29 13:26:53.614 | INFO | FedUtils.fed.fedprox:train:28 - -- TRAIN RESULTS -- 61 | 2021-12-29 13:26:53.616 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.6807388067245483 Loss: 0.9919778108596802 62 | 2021-12-29 13:43:07.625 | INFO | FedUtils.fed.fedprox:train:53 - -- Log At Round 9 -- 63 | 2021-12-29 13:46:29.768 | INFO | FedUtils.fed.fedprox:train:59 - -- TEST RESULTS -- 64 | 2021-12-29 13:46:29.770 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.8695652484893799 65 | 2021-12-29 13:46:29.772 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.6199376583099365 66 | 2021-12-29 13:46:29.772 | INFO | FedUtils.fed.fedprox:train:61 - -- TRAIN RESULTS -- 67 | 2021-12-29 13:46:29.774 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.7493404150009155 Loss: 0.6864253282546997 68 | -------------------------------------------------------------------------------- /tasks/CT/FedReg/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.CT.densenet import DenseNetModel 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedreg import FedReg 5 | from torchvision import transforms, utils 6 | 7 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) 8 | transform_train = transforms.Compose([ 9 | transforms.Resize(256), 10 | transforms.RandomResizedCrop((224,), scale=(0.5, 1.0)), 11 | transforms.RandomHorizontalFlip(), 12 | transforms.ToTensor(), 13 | normalize 14 | ]) 15 | 16 | # Normalize test set same as training set without augmentation 17 | transform_test = transforms.Compose([ 18 | transforms.Resize(224), 19 | transforms.CenterCrop(224), 20 | transforms.ToTensor(), 21 | normalize 22 | ]) 23 | 24 | config = { 25 | 26 | "seed": 1, 27 | "model": partial(DenseNetModel, learning_rate=5e-3, p_iters=10, ps_eta=1e-6, pt_eta=1e-8), 28 | "inner_opt": None, 29 | "optimizer": FedReg, 30 | "model_param": (2,), 31 | "inp_size": (3*244*244,), 32 | "train_path": "data/COVID-CT/train/", 33 | "test_path": ["data/COVID-CT/valid/", "data/COVID-CT/test/"], 34 | "clients_per_round": 10, 35 | "num_rounds": 10, 36 | "eval_every": 1, 37 | "drop_percent": 0.0, 38 | "num_epochs": 20, 39 | "batch_size": 10, 40 | "use_fed": 1, 41 | "log_path": "tasks/CT/FedReg/train.log", 42 | "train_transform": transform_train, 43 | "test_transform": transform_test, 44 | "eval_train": True, 45 | "gamma": 0.5 46 | } 47 | -------------------------------------------------------------------------------- /tasks/CT/FedReg_cnn/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.CT.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedreg import FedReg 5 | from torchvision import transforms, utils 6 | 7 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) 8 | transform_train = transforms.Compose([ 9 | # transforms.Grayscale(), 10 | transforms.Resize(200), 11 | transforms.RandomCrop((128,)), 12 | transforms.RandomHorizontalFlip(), 13 | transforms.ToTensor(), 14 | normalize 15 | ]) 16 | 17 | # Normalize test set same as training set without augmentation 18 | transform_test = transforms.Compose([ 19 | # transforms.Grayscale(), 20 | transforms.Resize(128), 21 | transforms.CenterCrop(128), 22 | transforms.ToTensor(), 23 | normalize 24 | ]) 25 | 26 | config = { 27 | 28 | "seed": 1, 29 | "model": partial(Model, learning_rate=5e-4, p_iters=10, ps_eta=1e-6, pt_eta=1e-8), 30 | "inner_opt": None, 31 | "optimizer": FedReg, 32 | "model_param": (2,), 33 | "inp_size": (3*128*128,), 34 | "train_path": "data/COVID-CT/train/", 35 | "test_path": ["data/COVID-CT/valid/", "data/COVID-CT/test/"], 36 | "clients_per_round": 10, 37 | "num_rounds": 10, 38 | "eval_every": 1, 39 | "drop_percent": 0.0, 40 | "num_epochs": 20, 41 | "batch_size": 10, 42 | "use_fed": 1, 43 | "log_path": "tasks/CT/FedReg_cnn/train.log", 44 | "train_transform": transform_train, 45 | "test_transform": transform_test, 46 | "eval_train": True, 47 | "gamma": 0.5 48 | } 49 | -------------------------------------------------------------------------------- /tasks/CT/SCAFFOLD_e20_lr5e4/config.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from FedUtils.models.CT.densenet import DenseNetModel 3 | from functools import partial 4 | from FedUtils.fed.scaffold import SCAFFOLD, Optim 5 | from torchvision import transforms, utils 6 | 7 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) 8 | transform_train = transforms.Compose([ 9 | transforms.Resize(256), 10 | transforms.RandomResizedCrop((224,), scale=(0.5, 1.0)), 11 | transforms.RandomHorizontalFlip(), 12 | transforms.ToTensor(), 13 | normalize 14 | ]) 15 | 16 | # Normalize test set same as training set without augmentation 17 | transform_test = transforms.Compose([ 18 | transforms.Resize(224), 19 | transforms.CenterCrop(224), 20 | transforms.ToTensor(), 21 | normalize 22 | ]) 23 | 24 | config = { 25 | 26 | "seed": 1, 27 | "model": DenseNetModel, 28 | "inner_opt": partial(Optim, lr=5e-4, weight_decay=0), 29 | "optimizer": SCAFFOLD, 30 | "model_param": (2,), 31 | "inp_size": (3*244*244,), 32 | "train_path": "data/COVID-CT/train/", 33 | "test_path": ["data/COVID-CT/valid/", "data/COVID-CT/test/"], 34 | "clients_per_round": 10, 35 | "num_rounds": 10, 36 | "eval_every": 1, 37 | "drop_percent": 0.0, 38 | "num_epochs": 20, 39 | "batch_size": 10, 40 | "use_fed": 1, 41 | "log_path": "tasks/CT/SCAFFOLD_e20_lr5e4/train.log", 42 | "train_transform": transform_train, 43 | "test_transform": transform_test, 44 | "eval_train": True, 45 | 46 | 47 | } 48 | -------------------------------------------------------------------------------- /tasks/CT/SGD_cnn_lr5e4/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.CT.cnn_parallel import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg_sgd import FedAvg 5 | from torchvision import transforms, utils 6 | 7 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) 8 | transform_train = transforms.Compose([ 9 | transforms.Resize(200), 10 | transforms.RandomResizedCrop((128,),), 11 | transforms.RandomHorizontalFlip(), 12 | transforms.ToTensor(), 13 | normalize 14 | ]) 15 | 16 | # Normalize test set same as training set without augmentation 17 | transform_test = transforms.Compose([ 18 | transforms.Resize(128), 19 | transforms.CenterCrop(128), 20 | transforms.ToTensor(), 21 | normalize 22 | ]) 23 | 24 | config = { 25 | 26 | "seed": 1, 27 | "model": partial(Model, learning_rate=5e-4), 28 | "inner_opt": None, 29 | "optimizer": FedAvg, 30 | "model_param": (2,), 31 | "inp_size": (3*128*128,), 32 | "train_path": "data/COVID-CT/train/", 33 | "test_path": ["data/COVID-CT/valid/", "data/COVID-CT/test/"], 34 | "clients_per_round": 10, 35 | "num_rounds": 10, 36 | "eval_every": 1, 37 | "drop_percent": 0.0, 38 | "num_epochs": 1, 39 | "batch_size": 100000, 40 | "use_fed": 1, 41 | "log_path": "tasks/CT/SGD_cnn_lr5e4/train.log", 42 | "train_transform": transform_train, 43 | "test_transform": transform_test, 44 | "eval_train": True, 45 | 46 | 47 | 48 | } 49 | -------------------------------------------------------------------------------- /tasks/CT/SGD_lr1e4/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.CT.densenet import DenseNetModel 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg_sgd import FedAvg 5 | from torchvision import transforms, utils 6 | 7 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) 8 | transform_train = transforms.Compose([ 9 | transforms.Resize(256), 10 | transforms.RandomResizedCrop((224,), scale=(0.5, 1.0)), 11 | transforms.RandomHorizontalFlip(), 12 | transforms.ToTensor(), 13 | normalize 14 | ]) 15 | 16 | # Normalize test set same as training set without augmentation 17 | transform_test = transforms.Compose([ 18 | transforms.Resize(224), 19 | transforms.CenterCrop(224), 20 | transforms.ToTensor(), 21 | normalize 22 | ]) 23 | 24 | config = { 25 | 26 | "seed": 1, 27 | "model": partial(DenseNetModel, learning_rate=1e-4), 28 | "inner_opt": None, 29 | "optimizer": FedAvg, 30 | "model_param": (2,), 31 | "inp_size": (3*244*244,), 32 | "train_path": "data/COVID-CT/train/", 33 | "test_path": ["data/COVID-CT/valid/", "data/COVID-CT/test/"], 34 | "clients_per_round": 10, 35 | "num_rounds": 10, 36 | "eval_every": 1, 37 | "drop_percent": 0.0, 38 | "num_epochs": 1, 39 | "batch_size": 1000000, 40 | "use_fed": 1, 41 | "log_path": "tasks/CT/SGD_lr1e4/train.log", 42 | "train_transform": transform_train, 43 | "test_transform": transform_test, 44 | "eval_train": True, 45 | 46 | 47 | } 48 | -------------------------------------------------------------------------------- /tasks/CT/SGD_lr1e4/train.log: -------------------------------------------------------------------------------- 1 | 2021-12-30 14:14:42.284 | INFO | FedUtils.fed.fedavg:train:10 - Train with 10 workers... 2 | 2021-12-30 14:14:42.287 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 0 -- 3 | 2021-12-30 14:17:53.380 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 4 | 2021-12-30 14:17:53.381 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6086956858634949 5 | 2021-12-30 14:17:53.382 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5077881217002869 6 | 2021-12-30 14:17:53.382 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 7 | 2021-12-30 14:17:53.383 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5039578080177307 Loss: 0.697583794593811 8 | 2021-12-30 14:18:25.786 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 1 -- 9 | 2021-12-30 14:21:43.264 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 10 | 2021-12-30 14:21:43.272 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.3913043439388275 11 | 2021-12-30 14:21:43.272 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.4112149477005005 12 | 2021-12-30 14:21:43.273 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 13 | 2021-12-30 14:21:43.274 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.47229552268981934 Loss: 0.7022945880889893 14 | 2021-12-30 14:22:19.569 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 2 -- 15 | 2021-12-30 14:25:33.374 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 16 | 2021-12-30 14:25:33.375 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5652173757553101 17 | 2021-12-30 14:25:33.375 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.4672897160053253 18 | 2021-12-30 14:25:33.376 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 19 | 2021-12-30 14:25:33.377 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.49340370297431946 Loss: 0.6963467597961426 20 | 2021-12-30 14:26:08.883 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 3 -- 21 | 2021-12-30 14:29:52.097 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 22 | 2021-12-30 14:29:52.099 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.3913043439388275 23 | 2021-12-30 14:29:52.099 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.426791250705719 24 | 2021-12-30 14:29:52.099 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 25 | 2021-12-30 14:29:52.100 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.4802111089229584 Loss: 0.7012420296669006 26 | 2021-12-30 14:30:20.595 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 4 -- 27 | 2021-12-30 14:33:31.239 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 28 | 2021-12-30 14:33:31.240 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5869565606117249 29 | 2021-12-30 14:33:31.240 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.5109034180641174 30 | 2021-12-30 14:33:31.241 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 31 | 2021-12-30 14:33:31.242 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5408971309661865 Loss: 0.6942801475524902 32 | 2021-12-30 14:34:04.359 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 5 -- 33 | 2021-12-30 14:37:17.945 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 34 | 2021-12-30 14:37:17.958 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6086956858634949 35 | 2021-12-30 14:37:17.959 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.4984423518180847 36 | 2021-12-30 14:37:17.960 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 37 | 2021-12-30 14:37:17.962 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5303430557250977 Loss: 0.6933442950248718 38 | 2021-12-30 14:37:41.051 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 6 -- 39 | 2021-12-30 14:40:52.491 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 40 | 2021-12-30 14:40:52.504 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.6086956858634949 41 | 2021-12-30 14:40:52.505 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.4984423518180847 42 | 2021-12-30 14:40:52.505 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 43 | 2021-12-30 14:40:52.506 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.519788920879364 Loss: 0.6925984621047974 44 | 2021-12-30 14:41:17.475 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 7 -- 45 | 2021-12-30 14:44:32.118 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 46 | 2021-12-30 14:44:32.121 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.54347825050354 47 | 2021-12-30 14:44:32.121 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.4548286497592926 48 | 2021-12-30 14:44:32.121 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 49 | 2021-12-30 14:44:32.122 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.4802111089229584 Loss: 0.7060717940330505 50 | 2021-12-30 14:45:02.267 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 8 -- 51 | 2021-12-30 14:48:25.710 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 52 | 2021-12-30 14:48:25.711 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5 53 | 2021-12-30 14:48:25.712 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.44859811663627625 54 | 2021-12-30 14:48:25.712 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 55 | 2021-12-30 14:48:25.713 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5092348456382751 Loss: 0.6934479475021362 56 | 2021-12-30 14:48:53.531 | INFO | FedUtils.fed.fedavg:train:13 - -- Log At Round 9 -- 57 | 2021-12-30 14:52:26.973 | INFO | FedUtils.fed.fedavg:train:19 - -- TEST RESULTS -- 58 | 2021-12-30 14:52:26.997 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.5869565606117249 59 | 2021-12-30 14:52:26.998 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.485981285572052 60 | 2021-12-30 14:52:26.998 | INFO | FedUtils.fed.fedavg:train:21 - -- TRAIN RESULTS -- 61 | 2021-12-30 14:52:26.999 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.5303430557250977 Loss: 0.6904211044311523 62 | 2021-12-30 14:53:10.498 | INFO | FedUtils.fed.fedavg:train:46 - -- Log At Round 9 -- 63 | 2021-12-30 14:56:34.674 | INFO | FedUtils.fed.fedavg:train:53 - -- TEST RESULTS -- 64 | 2021-12-30 14:56:34.676 | INFO | FedUtils.models.utils:decode_stat:157 - Test_0 Accuracy: 0.45652174949645996 65 | 2021-12-30 14:56:34.676 | INFO | FedUtils.models.utils:decode_stat:157 - Test_1 Accuracy: 0.4672897160053253 66 | 2021-12-30 14:56:34.676 | INFO | FedUtils.fed.fedavg:train:55 - -- TRAIN RESULTS -- 67 | 2021-12-30 14:56:34.677 | INFO | FedUtils.models.utils:decode_stat:163 - Accuracy: 0.4696570038795471 Loss: 0.7154242396354675 68 | -------------------------------------------------------------------------------- /tasks/cifar10/FedAvg_e30_lr05/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.cifar10.resnet9 import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg import FedAvg 5 | from torchvision import transforms, utils 6 | 7 | transform_train = transforms.Compose([ 8 | transforms.RandomCrop(32, padding=4), 9 | transforms.RandomHorizontalFlip(), 10 | transforms.ToTensor(), 11 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 12 | ]) 13 | 14 | # Normalize test set same as training set without augmentation 15 | transform_test = transforms.Compose([ 16 | transforms.ToTensor(), 17 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 18 | ]) 19 | 20 | config = { 21 | 22 | "seed": 1, 23 | "model": partial(Model, learning_rate=5e-2), 24 | "inner_opt": None, 25 | "optimizer": FedAvg, 26 | "model_param": (10,), 27 | "inp_size": (3*32*32,), 28 | "train_path": "data/cifar-10-batches-py/data_uni/train/", 29 | "test_path": ["data/cifar-10-batches-py/data_uni/valid/", "data/cifar-10-batches-py/data_uni/test/"], 30 | "clients_per_round": 100, 31 | "num_rounds": 240, 32 | "eval_every": 1, 33 | "drop_percent": 0.0, 34 | "num_epochs": 30, 35 | "batch_size": 5, 36 | "use_fed": 1, 37 | "log_path": "tasks/cifar10/FedAvg_e30_lr05/train.log", 38 | "train_transform": transform_train, 39 | "test_transform": transform_test, 40 | "eval_train": False 41 | 42 | 43 | } 44 | -------------------------------------------------------------------------------- /tasks/cifar10/FedCurv_e30_lr05_g5/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.cifar10.resnet9 import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedcurv import FedCurv 5 | from torchvision import transforms 6 | 7 | transform_train = transforms.Compose([ 8 | transforms.RandomCrop(32, padding=4), 9 | transforms.RandomHorizontalFlip(), 10 | transforms.ToTensor(), 11 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 12 | ]) 13 | 14 | # Normalize test set same as training set without augmentation 15 | transform_test = transforms.Compose([ 16 | transforms.ToTensor(), 17 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 18 | ]) 19 | 20 | config = { 21 | 22 | "seed": 1, 23 | "model": partial(Model, learning_rate=5e-2), 24 | "inner_opt": None, 25 | "optimizer": FedCurv, 26 | "model_param": (10,), 27 | "inp_size": (3*32*32,), 28 | "train_path": "data/cifar-10-batches-py/data_uni/train/", 29 | "test_path": ["data/cifar-10-batches-py/data_uni/valid/", "data/cifar-10-batches-py/data_uni/test/"], 30 | "clients_per_round": 100, 31 | "num_rounds": 240, 32 | "eval_every": 1, 33 | "drop_percent": 0.0, 34 | "num_epochs": 30, 35 | "batch_size": 5, 36 | "use_fed": 1, 37 | "log_path": "tasks/cifar10/FedCurv_e30_lr05_g5/train.log", 38 | "train_transform": transform_train, 39 | "test_transform": transform_test, 40 | "eval_train": False, 41 | "gamma": 1e-5 42 | 43 | 44 | } 45 | -------------------------------------------------------------------------------- /tasks/cifar10/FedProx_e30_lr05_g001/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.cifar10.resnet9 import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedprox import FedProx 5 | from torchvision import transforms, utils 6 | 7 | transform_train = transforms.Compose([ 8 | transforms.RandomCrop(32, padding=4), 9 | transforms.RandomHorizontalFlip(), 10 | transforms.ToTensor(), 11 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 12 | ]) 13 | 14 | # Normalize test set same as training set without augmentation 15 | transform_test = transforms.Compose([ 16 | transforms.ToTensor(), 17 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 18 | ]) 19 | 20 | config = { 21 | 22 | "seed": 1, 23 | "model": partial(Model, learning_rate=5e-2), 24 | "inner_opt": None, 25 | "optimizer": FedProx, 26 | "model_param": (10,), 27 | "inp_size": (3*32*32,), 28 | "train_path": "data/cifar-10-batches-py/data_uni/train/", 29 | "test_path": ["data/cifar-10-batches-py/data_uni/valid/", "data/cifar-10-batches-py/data_uni/test/"], 30 | "clients_per_round": 100, 31 | "num_rounds": 240, 32 | "eval_every": 1, 33 | "drop_percent": 0.0, 34 | "num_epochs": 30, 35 | "batch_size": 5, 36 | "use_fed": 1, 37 | "log_path": "tasks/cifar10/FedProx_e30_lr05_g001/train.log", 38 | "train_transform": transform_train, 39 | "test_transform": transform_test, 40 | "eval_train": False, 41 | "gamma": 0.001 42 | 43 | 44 | } 45 | -------------------------------------------------------------------------------- /tasks/cifar10/FedReg_e30_lr10/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.cifar10.resnet9 import Model 2 | from functools import partial 3 | from FedUtils.fed.fedreg import FedReg 4 | from torch.optim import SGD 5 | from torchvision import transforms 6 | 7 | 8 | transform_train = transforms.Compose([ 9 | transforms.RandomCrop(32, padding=4), 10 | transforms.RandomHorizontalFlip(), 11 | transforms.ToTensor(), 12 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 13 | ]) 14 | 15 | # Normalize test set same as training set without augmentation 16 | transform_test = transforms.Compose([ 17 | transforms.ToTensor(), 18 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 19 | ]) 20 | config = { 21 | 22 | "seed": 1, 23 | "model": partial(Model, learning_rate=1e-1, p_iters=10, ps_eta=1e-1, pt_eta=1e-3), 24 | "inner_opt": None, 25 | "optimizer": FedReg, 26 | "model_param": (10,), 27 | "inp_size": (3*32*32,), 28 | "train_path": "data/cifar-10-batches-py/data_uni/train/", 29 | "test_path": ["data/cifar-10-batches-py/data_uni/valid/", "data/cifar-10-batches-py/data_uni/test/"], 30 | "clients_per_round": 100, 31 | "num_rounds": 240, 32 | "eval_every": 1, 33 | "drop_percent": 0.0, 34 | "num_epochs": 30, 35 | "batch_size": 5, 36 | "use_fed": 1, 37 | "log_path": "tasks/cifar10/FedReg_e30_lr10/train.log", 38 | "train_transform": transform_train, 39 | "test_transform": transform_test, 40 | "eval_train": False, 41 | "gamma": 0.5, 42 | 43 | } 44 | -------------------------------------------------------------------------------- /tasks/cifar10/SCAFFOLD_e30_lr01/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.cifar10.resnet9 import Model 2 | from functools import partial 3 | from FedUtils.fed.scaffold import SCAFFOLD, Optim 4 | from torchvision import transforms, utils 5 | 6 | transform_train = transforms.Compose([ 7 | transforms.RandomCrop(32, padding=4), 8 | transforms.RandomHorizontalFlip(), 9 | transforms.ToTensor(), 10 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 11 | ]) 12 | 13 | # Normalize test set same as training set without augmentation 14 | transform_test = transforms.Compose([ 15 | transforms.ToTensor(), 16 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 17 | ]) 18 | 19 | config = { 20 | 21 | "seed": 1, 22 | "model": Model, 23 | "inner_opt": partial(Optim, lr=1e-2, weight_decay=0.0), 24 | "optimizer": SCAFFOLD, 25 | "model_param": (10,), 26 | "inp_size": (3*32*32,), 27 | "train_path": "data/cifar-10-batches-py/data_uni/train/", 28 | "test_path": ["data/cifar-10-batches-py/data_uni/valid/", "data/cifar-10-batches-py/data_uni/test/"], 29 | "clients_per_round": 100, 30 | "num_rounds": 240, 31 | "eval_every": 1, 32 | "drop_percent": 0.0, 33 | "num_epochs": 30, 34 | "batch_size": 5, 35 | "use_fed": 1, 36 | "log_path": "tasks/cifar10/SCAFFOLD_e30_lr01/train.log", 37 | "train_transform": transform_train, 38 | "test_transform": transform_test, 39 | "eval_train": False 40 | 41 | 42 | } 43 | -------------------------------------------------------------------------------- /tasks/cifar10/SGD/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.cifar10.resnet9 import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg_sgd import FedAvg 5 | from torchvision import transforms 6 | 7 | transform_train = transforms.Compose([ 8 | transforms.RandomCrop(32, padding=4), 9 | transforms.RandomHorizontalFlip(), 10 | transforms.ToTensor(), 11 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 12 | ]) 13 | 14 | # Normalize test set same as training set without augmentation 15 | transform_test = transforms.Compose([ 16 | transforms.ToTensor(), 17 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 18 | ]) 19 | 20 | config = { 21 | 22 | "seed": 1, 23 | "model": partial(Model, learning_rate=1e-1), 24 | "inner_opt": None, 25 | "optimizer": FedAvg, 26 | "model_param": (10,), 27 | "inp_size": (3*32*32,), 28 | "train_path": "data/cifar-10-batches-py/data_uni/train/", 29 | "test_path": ["data/cifar-10-batches-py/data_uni/valid/", "data/cifar-10-batches-py/data_uni/test/"], 30 | "clients_per_round": 100, 31 | "num_rounds": 240, 32 | "eval_every": 1, 33 | "drop_percent": 0.0, 34 | "num_epochs": 1, 35 | "batch_size": 5, 36 | "use_fed": 1, 37 | "log_path": "tasks/cifar10/SGD/train.log", 38 | "train_transform": transform_train, 39 | "test_transform": transform_test, 40 | "eval_train": False 41 | 42 | 43 | } 44 | -------------------------------------------------------------------------------- /tasks/cifar100/FedAvg_e10_lr05/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.cifar100.resnet9 import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg import FedAvg 5 | from torchvision import transforms, utils 6 | 7 | transform_train = transforms.Compose([ 8 | transforms.RandomCrop(32, padding=4), 9 | transforms.RandomHorizontalFlip(), 10 | transforms.ToTensor(), 11 | transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276]) 12 | ]) 13 | 14 | # Normalize test set same as training set without augmentation 15 | transform_test = transforms.Compose([ 16 | transforms.ToTensor(), 17 | transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276]) 18 | ]) 19 | 20 | config = { 21 | 22 | "seed": 1, 23 | "model": partial(Model, learning_rate=5e-2), 24 | "inner_opt": None, 25 | "optimizer": FedAvg, 26 | "model_param": (100,), 27 | "inp_size": (3*32*32,), 28 | "train_path": "data/cifar-100-python/data/train/", 29 | "test_path": ["data/cifar-100-python/data/valid/", "data/cifar-100-python/data/test/"], 30 | "clients_per_round": 100, 31 | "num_rounds": 1200, 32 | "eval_every": 1, 33 | "drop_percent": 0.0, 34 | "num_epochs": 10, 35 | "batch_size": 5, 36 | "use_fed": 1, 37 | "log_path": "tasks/cifar100/FedAvg_e10_lr05/train.log", 38 | "train_transform": transform_train, 39 | "test_transform": transform_test, 40 | "eval_train": False, 41 | 42 | 43 | } 44 | -------------------------------------------------------------------------------- /tasks/cifar100/FedCurv_e10_lr5_g3/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.cifar100.resnet9 import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedcurv import FedCurv 5 | from torchvision import transforms, utils 6 | 7 | transform_train = transforms.Compose([ 8 | transforms.RandomCrop(32, padding=4), 9 | transforms.RandomHorizontalFlip(), 10 | transforms.ToTensor(), 11 | transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276]) 12 | ]) 13 | 14 | # Normalize test set same as training set without augmentation 15 | transform_test = transforms.Compose([ 16 | transforms.ToTensor(), 17 | transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276]) 18 | ]) 19 | 20 | config = { 21 | 22 | "seed": 1, 23 | "model": partial(Model, learning_rate=5e-2), 24 | "inner_opt": None, 25 | "optimizer": FedCurv, 26 | "model_param": (100,), 27 | "inp_size": (3*32*32,), 28 | "train_path": "data/cifar-100-python/data/train/", 29 | "test_path": ["data/cifar-100-python/data/valid/", "data/cifar-100-python/data/test/"], 30 | "clients_per_round": 100, 31 | "num_rounds": 1200, 32 | "eval_every": 1, 33 | "drop_percent": 0.0, 34 | "num_epochs": 10, 35 | "batch_size": 5, 36 | "use_fed": 1, 37 | "log_path": "tasks/cifar100/FedCurv_e10_lr5_g3/train.log", 38 | "train_transform": transform_train, 39 | "test_transform": transform_test, 40 | "eval_train": False, 41 | "gamma": 1e-3 42 | 43 | 44 | } 45 | -------------------------------------------------------------------------------- /tasks/cifar100/FedProx_e10_lr5_g01/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.cifar100.resnet9 import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedprox import FedProx 5 | from torchvision import transforms, utils 6 | 7 | transform_train = transforms.Compose([ 8 | transforms.RandomCrop(32, padding=4), 9 | transforms.RandomHorizontalFlip(), 10 | transforms.ToTensor(), 11 | transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276]) 12 | ]) 13 | 14 | # Normalize test set same as training set without augmentation 15 | transform_test = transforms.Compose([ 16 | transforms.ToTensor(), 17 | transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276]) 18 | ]) 19 | 20 | config = { 21 | 22 | "seed": 1, 23 | "model": partial(Model, learning_rate=5e-2), 24 | "inner_opt": None, 25 | "optimizer": FedProx, 26 | "model_param": (100,), 27 | "inp_size": (3*32*32,), 28 | "train_path": "data/cifar-100-python/data/train/", 29 | "test_path": ["data/cifar-100-python/data/valid/", "data/cifar-100-python/data/test/"], 30 | "clients_per_round": 100, 31 | "num_rounds": 1200, 32 | "eval_every": 1, 33 | "drop_percent": 0.0, 34 | "num_epochs": 10, 35 | "batch_size": 5, 36 | "use_fed": 1, 37 | "log_path": "tasks/cifar100/FedProx_e10_lr5_g01/train.log", 38 | "train_transform": transform_train, 39 | "test_transform": transform_test, 40 | "eval_train": False, 41 | "gamma": 0.01 42 | 43 | 44 | } 45 | -------------------------------------------------------------------------------- /tasks/cifar100/FedReg/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.cifar100.resnet9 import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedreg import FedReg 5 | from torchvision import transforms, utils 6 | 7 | transform_train = transforms.Compose([ 8 | transforms.RandomCrop(32, padding=4), 9 | transforms.RandomHorizontalFlip(), 10 | transforms.ToTensor(), 11 | transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276]) 12 | ]) 13 | 14 | # Normalize test set same as training set without augmentation 15 | transform_test = transforms.Compose([ 16 | transforms.ToTensor(), 17 | transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276]) 18 | ]) 19 | 20 | config = { 21 | 22 | "seed": 1, 23 | "model": partial(Model, learning_rate=1e-1, p_iters=10, ps_eta=1e-1, pt_eta=1e-3), 24 | "inner_opt": None, 25 | "optimizer": FedReg, 26 | "model_param": (100,), 27 | "inp_size": (3*32*32,), 28 | "train_path": "data/cifar-100-python/data/train/", 29 | "test_path": ["data/cifar-100-python/data/valid/", "data/cifar-100-python/data/test/"], 30 | "clients_per_round": 100, 31 | "num_rounds": 1200, 32 | "eval_every": 1, 33 | "drop_percent": 0.0, 34 | "num_epochs": 10, 35 | "batch_size": 5, 36 | "use_fed": 1, 37 | "log_path": "tasks/cifar100/FedReg/train.log", 38 | "train_transform": transform_train, 39 | "test_transform": transform_test, 40 | "eval_train": False, 41 | "gamma": 0.25, 42 | 43 | } 44 | -------------------------------------------------------------------------------- /tasks/cifar100/SGD/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.cifar100.resnet9 import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg_sgd import FedAvg 5 | from torchvision import transforms, utils 6 | 7 | transform_train = transforms.Compose([ 8 | transforms.RandomCrop(32, padding=4), 9 | transforms.RandomHorizontalFlip(), 10 | transforms.ToTensor(), 11 | transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276]) 12 | ]) 13 | 14 | # Normalize test set same as training set without augmentation 15 | transform_test = transforms.Compose([ 16 | transforms.ToTensor(), 17 | transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276]) 18 | ]) 19 | 20 | config = { 21 | 22 | "seed": 1, 23 | "model": partial(Model, learning_rate=1e-1), 24 | "inner_opt": None, 25 | "optimizer": FedAvg, 26 | "model_param": (100,), 27 | "inp_size": (3*32*32,), 28 | "train_path": "data/cifar-100-python/data/train/", 29 | "test_path": ["data/cifar-100-python/data/valid/", "data/cifar-100-python/data/test/"], 30 | "clients_per_round": 100, 31 | "num_rounds": 1200, 32 | "eval_every": 1, 33 | "drop_percent": 0.0, 34 | "num_epochs": 1, 35 | "batch_size": 5, 36 | "use_fed": 1, 37 | "log_path": "tasks/cifar100/SGD/train.log", 38 | "train_transform": transform_train, 39 | "test_transform": transform_test, 40 | "eval_train": False, 41 | 42 | 43 | } 44 | -------------------------------------------------------------------------------- /tasks/cifar100_transformer/FedAvg_e5_lr10/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.transformer.model import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg import FedAvg 5 | from torchvision import transforms, utils 6 | 7 | transform_train = transforms.Compose([ 8 | transforms.Resize(512), 9 | transforms.RandomCrop(384), 10 | transforms.RandomHorizontalFlip(), 11 | transforms.ToTensor(), 12 | transforms.Normalize(mean=.5, std=.5) 13 | ]) 14 | 15 | # Normalize test set same as training set without augmentation 16 | transform_test = transforms.Compose([ 17 | transforms.Resize(384), 18 | transforms.ToTensor(), 19 | transforms.Normalize(mean=.5, std=.5) 20 | ]) 21 | 22 | config = { 23 | 24 | "seed": 1, 25 | "model": partial(Model, learning_rate=1e-1), 26 | "inner_opt": None, 27 | "optimizer": FedAvg, 28 | "model_param": (100,), 29 | "inp_size": (3*384*384,), 30 | "train_path": "data/cifar-100-python/data/train/", 31 | "test_path": ["data/cifar-100-python/data/valid/", "data/cifar-100-python/data/test/"], 32 | "clients_per_round": 500, 33 | "num_rounds": 100, 34 | "eval_every": 1, 35 | "drop_percent": 0.0, 36 | "num_epochs": 5, 37 | "batch_size": 1, 38 | "use_fed": 1, 39 | "log_path": "tasks/cifar100_transformer/FedAvg_e5_lr10/train.log", 40 | "train_transform": transform_train, 41 | "test_transform": transform_test, 42 | "eval_train": False, 43 | 44 | 45 | } 46 | -------------------------------------------------------------------------------- /tasks/cifar100_transformer/FedCurv_e5_lr10_g5/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.transformer.model import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedcurv import FedCurv 5 | from torchvision import transforms, utils 6 | 7 | transform_train = transforms.Compose([ 8 | transforms.Resize(512), 9 | transforms.RandomCrop(384), 10 | transforms.RandomHorizontalFlip(), 11 | transforms.ToTensor(), 12 | transforms.Normalize(mean=.5, std=.5) 13 | ]) 14 | 15 | # Normalize test set same as training set without augmentation 16 | transform_test = transforms.Compose([ 17 | transforms.Resize(384), 18 | transforms.ToTensor(), 19 | transforms.Normalize(mean=.5, std=.5) 20 | ]) 21 | 22 | config = { 23 | 24 | "seed": 1, 25 | "model": partial(Model, learning_rate=1e-1), 26 | "inner_opt": None, 27 | "optimizer": FedCurv, 28 | "model_param": (100,), 29 | "inp_size": (3*384*384,), 30 | "train_path": "data/cifar-100-python/data/train/", 31 | "test_path": ["data/cifar-100-python/data/valid/", "data/cifar-100-python/data/test/"], 32 | "clients_per_round": 500, 33 | "num_rounds": 100, 34 | "eval_every": 1, 35 | "drop_percent": 0.0, 36 | "num_epochs": 5, 37 | "batch_size": 1, 38 | "use_fed": 1, 39 | "log_path": "tasks/cifar100_transformer/FedCurv_e5_lr10_g5/train.log", 40 | "train_transform": transform_train, 41 | "test_transform": transform_test, 42 | "eval_train": False, 43 | "gamma": 1e-5 44 | 45 | 46 | } 47 | -------------------------------------------------------------------------------- /tasks/cifar100_transformer/FedProx_e5_lr10_g001/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.transformer.model import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedprox import FedProx 5 | from torchvision import transforms, utils 6 | 7 | transform_train = transforms.Compose([ 8 | transforms.Resize(512), 9 | transforms.RandomCrop(384), 10 | transforms.RandomHorizontalFlip(), 11 | transforms.ToTensor(), 12 | transforms.Normalize(mean=.5, std=.5) 13 | ]) 14 | 15 | # Normalize test set same as training set without augmentation 16 | transform_test = transforms.Compose([ 17 | transforms.Resize(384), 18 | transforms.ToTensor(), 19 | transforms.Normalize(mean=.5, std=.5) 20 | ]) 21 | 22 | config = { 23 | 24 | "seed": 1, 25 | "model": partial(Model, learning_rate=1e-1), 26 | "inner_opt": None, 27 | "optimizer": FedProx, 28 | "model_param": (100,), 29 | "inp_size": (3*384*384,), 30 | "train_path": "data/cifar-100-python/data/train/", 31 | "test_path": ["data/cifar-100-python/data/valid/", "data/cifar-100-python/data/test/"], 32 | "clients_per_round": 500, 33 | "num_rounds": 100, 34 | "eval_every": 1, 35 | "drop_percent": 0.0, 36 | "num_epochs": 5, 37 | "batch_size": 1, 38 | "use_fed": 1, 39 | "log_path": "tasks/cifar100_transformer/FedProx_e5_lr10_g001/train.log", 40 | "train_transform": transform_train, 41 | "test_transform": transform_test, 42 | "eval_train": False, 43 | "gamma": 0.001 44 | 45 | 46 | } 47 | -------------------------------------------------------------------------------- /tasks/cifar100_transformer/FedReg_e5_lr10/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.transformer.model import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedreg import FedReg 5 | from torchvision import transforms, utils 6 | 7 | transform_train = transforms.Compose([ 8 | transforms.Resize(512), 9 | transforms.RandomCrop(384), 10 | transforms.RandomHorizontalFlip(), 11 | transforms.ToTensor(), 12 | transforms.Normalize(mean=0.5, std=0.5) 13 | ]) 14 | 15 | # Normalize test set same as training set without augmentation 16 | transform_test = transforms.Compose([ 17 | transforms.Resize(384), 18 | transforms.ToTensor(), 19 | transforms.Normalize(mean=0.5, std=0.5) 20 | ]) 21 | 22 | 23 | config = { 24 | 25 | "seed": 1, 26 | "model": partial(Model, learning_rate=1e-1, p_iters=10, ps_eta=1e-2, pt_eta=1e-4), 27 | "inner_opt": None, 28 | "optimizer": FedReg, 29 | "model_param": (100,), 30 | "inp_size": (3*32*32,), 31 | "train_path": "data/cifar-100-python/data/train/", 32 | "test_path": ["data/cifar-100-python/data/valid/", "data/cifar-100-python/data/test/"], 33 | "clients_per_round": 500, 34 | "num_rounds": 100, 35 | "eval_every": 1, 36 | "drop_percent": 0.0, 37 | "num_epochs": 5, 38 | "batch_size": 5, 39 | "use_fed": 1, 40 | "log_path": "tasks/cifar100_transformer/FedReg_e5_lr10/train.log", 41 | "train_transform": transform_train, 42 | "test_transform": transform_test, 43 | "eval_train": False, 44 | "gamma": 0.02, # gamma_func, 45 | 46 | 47 | 48 | } 49 | -------------------------------------------------------------------------------- /tasks/cifar100_transformer/SGD/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.transformer.model import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg_sgd import FedAvg 5 | from torchvision import transforms, utils 6 | 7 | transform_train = transforms.Compose([ 8 | transforms.Resize(512), 9 | transforms.RandomCrop(384), 10 | transforms.RandomHorizontalFlip(), 11 | transforms.ToTensor(), 12 | transforms.Normalize(mean=.5, std=.5) 13 | ]) 14 | 15 | # Normalize test set same as training set without augmentation 16 | transform_test = transforms.Compose([ 17 | transforms.Resize(384), 18 | transforms.ToTensor(), 19 | transforms.Normalize(mean=.5, std=.5) 20 | ]) 21 | 22 | config = { 23 | 24 | "seed": 1, 25 | "model": partial(Model, learning_rate=1e-1), 26 | "inner_opt": None, 27 | "optimizer": FedAvg, 28 | "model_param": (100,), 29 | "inp_size": (3*384*384,), 30 | "train_path": "data/cifar-100-python/data/train/", 31 | "test_path": ["data/cifar-100-python/data/valid/", "data/cifar-100-python/data/test/"], 32 | "clients_per_round": 500, 33 | "num_rounds": 100, 34 | "eval_every": 1, 35 | "drop_percent": 0.0, 36 | "num_epochs": 1, 37 | "batch_size": 10, 38 | "use_fed": 1, 39 | "log_path": "tasks/cifar100_transformer/SGD/train.log", 40 | "train_transform": transform_train, 41 | "test_transform": transform_test, 42 | "eval_train": False, 43 | 44 | 45 | } 46 | -------------------------------------------------------------------------------- /tasks/cifar10_sc/FedAvg_e20_lr05/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.cifar10.resnet9 import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg import FedAvg 5 | from torchvision import transforms, utils 6 | 7 | transform_train = transforms.Compose([ 8 | transforms.RandomCrop(32, padding=4), 9 | transforms.RandomHorizontalFlip(), 10 | transforms.ToTensor(), 11 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 12 | ]) 13 | 14 | # Normalize test set same as training set without augmentation 15 | transform_test = transforms.Compose([ 16 | transforms.ToTensor(), 17 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 18 | ]) 19 | 20 | config = { 21 | 22 | "seed": 1, 23 | "model": partial(Model, learning_rate=5e-2), 24 | "inner_opt": None, 25 | "optimizer": FedAvg, 26 | "model_param": (10,), 27 | "inp_size": (3*32*32,), 28 | "train_path": "data/cifar-10-batches-py/data/train/", 29 | "test_path": ["data/cifar-10-batches-py/data/test/", "data/cifar-10-batches-py/data/valid/"], 30 | "clients_per_round": 100, 31 | "num_rounds": 240, 32 | "eval_every": 1, 33 | "drop_percent": 0.0, 34 | "num_epochs": 20, 35 | "batch_size": 5, 36 | "use_fed": 1, 37 | "log_path": "tasks/cifar10_sc/FedAvg_e20_lr05/train.log", 38 | "train_transform": transform_train, 39 | "test_transform": transform_test, 40 | "eval_train": False 41 | 42 | 43 | } 44 | -------------------------------------------------------------------------------- /tasks/cifar10_sc/FedCurv_e20_lr05_g3/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.cifar10.resnet9 import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedcurv import FedCurv 5 | from torchvision import transforms 6 | 7 | transform_train = transforms.Compose([ 8 | transforms.RandomCrop(32, padding=4), 9 | transforms.RandomHorizontalFlip(), 10 | transforms.ToTensor(), 11 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 12 | ]) 13 | 14 | # Normalize test set same as training set without augmentation 15 | transform_test = transforms.Compose([ 16 | transforms.ToTensor(), 17 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 18 | ]) 19 | 20 | config = { 21 | 22 | "seed": 1, 23 | "model": partial(Model, learning_rate=5e-2), 24 | "inner_opt": None, 25 | "optimizer": FedCurv, 26 | "model_param": (10,), 27 | "inp_size": (3*32*32,), 28 | "train_path": "data/cifar-10-batches-py/data/train/", 29 | "test_path": ["data/cifar-10-batches-py/data/test/", "data/cifar-10-batches-py/data/valid/"], 30 | "clients_per_round": 100, 31 | "num_rounds": 240, 32 | "eval_every": 1, 33 | "drop_percent": 0.0, 34 | "num_epochs": 20, 35 | "batch_size": 5, 36 | "use_fed": 1, 37 | "log_path": "tasks/cifar10_sc/FedCurv_e20_lr05_g3/train.log", 38 | "train_transform": transform_train, 39 | "test_transform": transform_test, 40 | "eval_train": False, 41 | "gamma": 1e-3 42 | 43 | 44 | } 45 | -------------------------------------------------------------------------------- /tasks/cifar10_sc/FedProx_e20_lr05_g01/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.cifar10.resnet9 import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedprox import FedProx 5 | from torchvision import transforms 6 | 7 | transform_train = transforms.Compose([ 8 | transforms.RandomCrop(32, padding=4), 9 | transforms.RandomHorizontalFlip(), 10 | transforms.ToTensor(), 11 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 12 | ]) 13 | 14 | # Normalize test set same as training set without augmentation 15 | transform_test = transforms.Compose([ 16 | transforms.ToTensor(), 17 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 18 | ]) 19 | 20 | config = { 21 | 22 | "seed": 1, 23 | "model": partial(Model, learning_rate=5e-2), 24 | "inner_opt": None, 25 | "optimizer": FedProx, 26 | "model_param": (10,), 27 | "inp_size": (3*32*32,), 28 | "train_path": "data/cifar-10-batches-py/data/train/", 29 | "test_path": ["data/cifar-10-batches-py/data/test/", "data/cifar-10-batches-py/data/valid/"], 30 | "clients_per_round": 100, 31 | "num_rounds": 240, 32 | "eval_every": 1, 33 | "drop_percent": 0.0, 34 | "num_epochs": 20, 35 | "batch_size": 5, 36 | "use_fed": 1, 37 | "log_path": "tasks/cifar10_sc/FedProx_e20_lr05_g01/train.log", 38 | "train_transform": transform_train, 39 | "test_transform": transform_test, 40 | "eval_train": False, 41 | "gamma": 0.01 42 | 43 | } 44 | -------------------------------------------------------------------------------- /tasks/cifar10_sc/FedReg_e20_lr05/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.cifar10.resnet9 import Model 2 | from functools import partial 3 | from FedUtils.fed.fedreg import FedReg 4 | from torch.optim import SGD 5 | from torchvision import transforms 6 | 7 | 8 | transform_train = transforms.Compose([ 9 | transforms.RandomCrop(32, padding=4), 10 | transforms.RandomHorizontalFlip(), 11 | transforms.ToTensor(), 12 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 13 | ]) 14 | 15 | # Normalize test set same as training set without augmentation 16 | transform_test = transforms.Compose([ 17 | transforms.ToTensor(), 18 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 19 | ]) 20 | config = { 21 | 22 | "seed": 1, 23 | "model": partial(Model, learning_rate=5e-2, p_iters=10, ps_eta=1e-1, pt_eta=1e-3), 24 | "inner_opt": None, 25 | "optimizer": FedReg, 26 | "model_param": (10,), 27 | "inp_size": (3*32*32,), 28 | "train_path": "data/cifar-10-batches-py/data/train/", 29 | "test_path": ["data/cifar-10-batches-py/data/test/", "data/cifar-10-batches-py/data/valid/"], 30 | "clients_per_round": 100, 31 | "num_rounds": 240, 32 | "eval_every": 1, 33 | "drop_percent": 0.0, 34 | "num_epochs": 20, 35 | "batch_size": 5, 36 | "use_fed": 1, 37 | "log_path": "tasks/cifar10_sc/FedReg_e20_lr05/train.log", 38 | "train_transform": transform_train, 39 | "test_transform": transform_test, 40 | "eval_train": False, 41 | "gamma": 0.5, 42 | 43 | } 44 | -------------------------------------------------------------------------------- /tasks/cifar10_sc/SCAFFOLD_e20_lr05/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.cifar10.resnet9 import Model 2 | from functools import partial 3 | from FedUtils.fed.scaffold import SCAFFOLD, Optim 4 | from torchvision import transforms, utils 5 | 6 | transform_train = transforms.Compose([ 7 | transforms.RandomCrop(32, padding=4), 8 | transforms.RandomHorizontalFlip(), 9 | transforms.ToTensor(), 10 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 11 | ]) 12 | 13 | # Normalize test set same as training set without augmentation 14 | transform_test = transforms.Compose([ 15 | transforms.ToTensor(), 16 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 17 | ]) 18 | config = { 19 | 20 | "seed": 1, 21 | "model": Model, 22 | "inner_opt": partial(Optim, lr=5e-2, weight_decay=0.0), 23 | "optimizer": SCAFFOLD, 24 | "model_param": (10,), 25 | "inp_size": (3*32*32,), 26 | "train_path": "data/cifar-10-batches-py/data/train/", 27 | "test_path": ["data/cifar-10-batches-py/data/test/", "data/cifar-10-batches-py/data/valid/"], 28 | "clients_per_round": 100, 29 | "num_rounds": 240, 30 | "eval_every": 1, 31 | "drop_percent": 0.0, 32 | "num_epochs": 20, 33 | "batch_size": 5, 34 | "use_fed": 1, 35 | "log_path": "tasks/cifar10_sc/SCAFFOLD_e20_lr05/train.log", 36 | "train_transform": transform_train, 37 | "test_transform": transform_test, 38 | "eval_train": False 39 | 40 | 41 | } 42 | -------------------------------------------------------------------------------- /tasks/cifar10_sc/SGD/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.cifar10.resnet9 import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg_sgd import FedAvg 5 | from torchvision import transforms 6 | 7 | transform_train = transforms.Compose([ 8 | transforms.RandomCrop(32, padding=4), 9 | transforms.RandomHorizontalFlip(), 10 | transforms.ToTensor(), 11 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 12 | ]) 13 | 14 | # Normalize test set same as training set without augmentation 15 | transform_test = transforms.Compose([ 16 | transforms.ToTensor(), 17 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 18 | ]) 19 | 20 | config = { 21 | 22 | "seed": 1, 23 | "model": partial(Model, learning_rate=1e-1), 24 | "inner_opt": None, 25 | "optimizer": FedAvg, 26 | "model_param": (10,), 27 | "inp_size": (3*32*32,), 28 | "train_path": "data/cifar-10-batches-py/data/train/", 29 | "test_path": ["data/cifar-10-batches-py/data/test/", "data/cifar-10-batches-py/data/valid/"], 30 | "clients_per_round": 100, 31 | "num_rounds": 240, 32 | "eval_every": 1, 33 | "drop_percent": 0.0, 34 | "num_epochs": 1, 35 | "batch_size": 5, 36 | "use_fed": 1, 37 | "log_path": "tasks/cifar10_sc/SGD/train.log", 38 | "train_transform": transform_train, 39 | "test_transform": transform_test, 40 | "eval_train": False 41 | 42 | 43 | } 44 | -------------------------------------------------------------------------------- /tasks/emnist/FedAvg_e20_lr2/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.emnist.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg import FedAvg 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=2e-1), 10 | "inner_opt": None, 11 | "optimizer": FedAvg, 12 | "model_param": (10,), 13 | "inp_size": (784,), 14 | "train_path": "data/emnist/data_10000/train/", 15 | "test_path": ["data/emnist/data_10000/valid/", "data/emnist/data_10000/test/"], 16 | "clients_per_round": 20, 17 | "num_rounds": 500, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 20, 21 | "batch_size": 24, 22 | "use_fed": 1, 23 | "log_path": "tasks/emnist/FedAvg_e20_lr2/train.log", 24 | "train_transform": None, 25 | "test_transform": None, 26 | "eval_train": False 27 | 28 | 29 | } 30 | -------------------------------------------------------------------------------- /tasks/emnist/FedCurv_e20_lr2_g4/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.emnist.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedcurv import FedCurv 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=2e-1), 10 | "inner_opt": None, 11 | "optimizer": FedCurv, 12 | "model_param": (10,), 13 | "inp_size": (784,), 14 | "train_path": "data/emnist/data_10000/train/", 15 | "test_path": ["data/emnist/data_10000/valid/", "data/emnist/data_10000/test/"], 16 | "clients_per_round": 20, 17 | "num_rounds": 500, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 20, 21 | "batch_size": 24, 22 | "use_fed": 1, 23 | "log_path": "tasks/emnist/FedCurv_e20_lr2_g4/train.log", 24 | "eval_train": False, 25 | "train_transform": None, 26 | "test_transform": None, 27 | "gamma": 1e-4 28 | 29 | 30 | } 31 | -------------------------------------------------------------------------------- /tasks/emnist/FedProx_e20_lr2_g001/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.emnist.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedprox import FedProx 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=2e-1), 10 | "inner_opt": None, 11 | "optimizer": FedProx, 12 | "model_param": (10,), 13 | "inp_size": (784,), 14 | "train_path": "data/emnist/data_10000/train/", 15 | "test_path": ["data/emnist/data_10000/valid/", "data/emnist/data_10000/test/"], 16 | "clients_per_round": 20, 17 | "num_rounds": 500, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 20, 21 | "batch_size": 24, 22 | "use_fed": 1, 23 | "log_path": "tasks/emnist/FedProx_e20_lr2_g001/train.log", 24 | "eval_train": False, 25 | "train_transform": None, 26 | "test_transform": None, 27 | "gamma": 0.001 28 | 29 | 30 | } 31 | -------------------------------------------------------------------------------- /tasks/emnist/FedReg_e20_lr2_g4/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.emnist.cnn import Model 2 | from functools import partial 3 | from FedUtils.fed.fedreg import FedReg 4 | from torch.optim import SGD 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=2e-1, p_iters=10, ps_eta=5e-2, pt_eta=5e-4), 10 | "inner_opt": None, 11 | "optimizer": FedReg, 12 | "model_param": (10,), 13 | "inp_size": (784,), 14 | "train_path": "data/emnist/data_10000/train/", 15 | "test_path": ["data/emnist/data_10000/valid/", "data/emnist/data_10000/test/"], 16 | "clients_per_round": 20, 17 | "num_rounds": 500, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 20, 21 | "batch_size": 24, 22 | "use_fed": 1, 23 | "log_path": "tasks/emnist/FedReg_e20_lr2_g4/train.log", 24 | "train_transform": None, 25 | "test_transform": None, 26 | "eval_train": False, 27 | "gamma": 0.4, 28 | } 29 | -------------------------------------------------------------------------------- /tasks/emnist/SCAFFOLD_e20_lr1/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.emnist.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.scaffold import SCAFFOLD, Optim 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": Model, 10 | "inner_opt": partial(Optim, lr=1e-1), 11 | "optimizer": SCAFFOLD, 12 | "model_param": (10,), 13 | "inp_size": (784,), 14 | "train_path": "data/emnist/data_10000/train/", 15 | "test_path": ["data/emnist/data_10000/valid/", "data/emnist/data_10000/test/"], 16 | "clients_per_round": 20, 17 | "num_rounds": 500, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 20, 21 | "batch_size": 24, 22 | "use_fed": 1, 23 | "log_path": "tasks/emnist/SCAFFOLD_e20_lr1/train.log", 24 | "train_transform": None, 25 | "test_transform": None, 26 | "eval_train": False 27 | } 28 | -------------------------------------------------------------------------------- /tasks/emnist/SGD/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.emnist.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg_sgd import FedAvg 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=2e-1), 10 | "inner_opt": None, 11 | "optimizer": FedAvg, 12 | "model_param": (10,), 13 | "inp_size": (784,), 14 | "train_path": "data/emnist/data_10000/train/", 15 | "test_path": ["data/emnist/data_10000/valid/", "data/emnist/data_10000/test/"], 16 | "clients_per_round": 20, 17 | "num_rounds": 500, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 1, 21 | "batch_size": 24, 22 | "use_fed": 1, 23 | "log_path": "tasks/emnist/SGD/train.log", 24 | "train_transform": None, 25 | "test_transform": None, 26 | "eval_train": False 27 | 28 | 29 | } 30 | -------------------------------------------------------------------------------- /tasks/landmark/FedAvg/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.landmark.densenet import DenseNetModel 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg import FedAvg 5 | from torchvision import transforms, utils 6 | normalize = transforms.Normalize(mean=[0.4852, 0.4936, 0.4863], std=[0.2540, 0.2565, 0.2917]) 7 | transform_train = transforms.Compose([ 8 | transforms.Resize(72), 9 | transforms.RandomResizedCrop((64,)), 10 | transforms.RandomHorizontalFlip(), 11 | transforms.ToTensor(), 12 | normalize 13 | ]) 14 | 15 | # Normalize test set same as training set without augmentation 16 | transform_test = transforms.Compose([ 17 | transforms.Resize(64), 18 | transforms.CenterCrop(64), 19 | transforms.ToTensor(), 20 | normalize 21 | ]) 22 | 23 | config = { 24 | 25 | "seed": 1, 26 | "model": partial(DenseNetModel, learning_rate=1e-1), 27 | "inner_opt": None, 28 | "optimizer": FedAvg, 29 | "model_param": (2028,), 30 | "inp_size": (3*64*64,), 31 | "train_path": "data/landmarks/train/", 32 | "test_path": ["data/landmarks/valid/", "data/landmarks/test/"], 33 | "image_path": "./data/landmarks/summary.hdf5", 34 | "clients_per_round": 100, 35 | "num_rounds": 1000000, 36 | "eval_every": 1, 37 | "drop_percent": 0.0, 38 | "num_epochs": 40, 39 | "batch_size": 30, 40 | "use_fed": 1, 41 | "log_path": "tasks/landmark/FedAvg/train.log", 42 | "train_transform": transform_train, 43 | "test_transform": transform_test, 44 | "eval_train": False, 45 | 46 | 47 | 48 | } 49 | -------------------------------------------------------------------------------- /tasks/landmark/FedReg/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.landmark.densenet import DenseNetModel 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedreg import FedReg 5 | from torchvision import transforms, utils 6 | normalize = transforms.Normalize(mean=[0.4852, 0.4936, 0.4863], std=[0.2540, 0.2565, 0.2917]) 7 | transform_train = transforms.Compose([ 8 | transforms.Resize(72), 9 | transforms.RandomResizedCrop((64,)), 10 | transforms.RandomHorizontalFlip(), 11 | transforms.ToTensor(), 12 | normalize 13 | ]) 14 | 15 | # Normalize test set same as training set without augmentation 16 | transform_test = transforms.Compose([ 17 | transforms.Resize(64), 18 | transforms.CenterCrop(64), 19 | transforms.ToTensor(), 20 | normalize 21 | ]) 22 | 23 | config = { 24 | 25 | "seed": 1, 26 | "model": partial(DenseNetModel, learning_rate=1e-1, p_iters=10, ps_eta=1e-1, pt_eta=1e-3), 27 | "inner_opt": None, 28 | "optimizer": FedReg, 29 | "model_param": (2028,), 30 | "inp_size": (3*64*64,), 31 | "train_path": "data/landmarks/train/", 32 | "test_path": ["data/landmarks/valid/", "data/landmarks/test/"], 33 | "image_path": "./data/landmarks/summary.hdf5", 34 | "clients_per_round": 100, 35 | "num_rounds": 1000000, 36 | "eval_every": 1, 37 | "drop_percent": 0.0, 38 | "num_epochs": 40, 39 | "batch_size": 30, 40 | "use_fed": 1, 41 | "log_path": "tasks/landmark/FedReg/train.log", 42 | "train_transform": transform_train, 43 | "test_transform": transform_test, 44 | "eval_train": False, 45 | "gamma": 0.5, 46 | 47 | 48 | 49 | } 50 | -------------------------------------------------------------------------------- /tasks/landmark/SGD/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.landmark.densenet import DenseNetModel 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg_sgd import FedAvg 5 | from torchvision import transforms, utils 6 | normalize = transforms.Normalize(mean=[0.4852, 0.4936, 0.4863], std=[0.2540, 0.2565, 0.2917]) 7 | transform_train = transforms.Compose([ 8 | transforms.Resize(72), 9 | transforms.RandomResizedCrop((64,)), 10 | transforms.RandomHorizontalFlip(), 11 | transforms.ToTensor(), 12 | normalize 13 | ]) 14 | 15 | # Normalize test set same as training set without augmentation 16 | transform_test = transforms.Compose([ 17 | transforms.Resize(64), 18 | transforms.CenterCrop(64), 19 | transforms.ToTensor(), 20 | normalize 21 | ]) 22 | 23 | config = { 24 | 25 | "seed": 1, 26 | "model": partial(DenseNetModel, learning_rate=1e-1), 27 | "inner_opt": None, 28 | "optimizer": FedAvg, 29 | "model_param": (2028,), 30 | "inp_size": (3*64*64,), 31 | "train_path": "data/landmarks/train/", 32 | "test_path": ["data/landmarks/valid/", "data/landmarks/test/"], 33 | "image_path": "./data/landmarks/summary.hdf5", 34 | "clients_per_round": 100, 35 | "num_rounds": 1000000, 36 | "eval_every": 1, 37 | "drop_percent": 0.0, 38 | "num_epochs": 1, 39 | "batch_size": 128, 40 | "use_fed": 1, 41 | "log_path": "tasks/landmark/SGD/train.log", 42 | "train_transform": transform_train, 43 | "test_transform": transform_test, 44 | "eval_train": False, 45 | 46 | 47 | 48 | } 49 | -------------------------------------------------------------------------------- /tasks/mnist/FedAvg_e40_lr1/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg import FedAvg 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=1e-1), 10 | "inner_opt": None, 11 | "optimizer": FedAvg, 12 | "model_param": (10,), 13 | "inp_size": (784,), 14 | "train_path": "data/mnist_10000/data/train/", 15 | "test_path": ["data/mnist_10000/data/valid/", "data/mnist_10000/data/test/"], 16 | "clients_per_round": 10, 17 | "num_rounds": 500, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 40, 21 | "batch_size": 10, 22 | "use_fed": 1, 23 | "log_path": "tasks/mnist/FedAvg_e40_lr1/train.log", 24 | 25 | "train_transform": None, 26 | "test_transform": None, 27 | "eval_train": True, 28 | 29 | 30 | 31 | } 32 | -------------------------------------------------------------------------------- /tasks/mnist/FedCurv_e40_lr1_g4/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedcurv import FedCurv 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=1e-1), 10 | "inner_opt": None, 11 | "optimizer": FedCurv, 12 | "model_param": (10,), 13 | "inp_size": (784,), 14 | "train_path": "data/mnist_10000/data/train/", 15 | "test_path": ["data/mnist_10000/data/valid/", "data/mnist_10000/data/test/"], 16 | "clients_per_round": 10, 17 | "num_rounds": 500, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 40, 21 | "batch_size": 10, 22 | "use_fed": 1, 23 | "log_path": "tasks/mnist/FedCurv_e40_lr1_g4/train.log", 24 | "gamma": 1e-4, 25 | 26 | "train_transform": None, 27 | "test_transform": None, 28 | "eval_train": True 29 | 30 | 31 | } 32 | -------------------------------------------------------------------------------- /tasks/mnist/FedProx_e40_lr1_g001/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedprox import FedProx 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=1e-1), 10 | "inner_opt": None, 11 | "optimizer": FedProx, 12 | "model_param": (10,), 13 | "inp_size": (784,), 14 | "train_path": "data/mnist_10000/data/train/", 15 | "test_path": ["data/mnist_10000/data/valid/", "data/mnist_10000/data/test/"], 16 | "clients_per_round": 10, 17 | "num_rounds": 500, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 40, 21 | "batch_size": 10, 22 | "use_fed": 1, 23 | "log_path": "tasks/mnist/FedProx_e40_lr1_g001/train.log", 24 | "gamma": 0.001, 25 | 26 | "train_transform": None, 27 | "test_transform": None, 28 | "eval_train": True 29 | 30 | 31 | } 32 | -------------------------------------------------------------------------------- /tasks/mnist/FedReg_e40_lr1_g4/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | from functools import partial 3 | from FedUtils.fed.fedreg import FedReg 4 | from torch.optim import SGD 5 | config = { 6 | 7 | "seed": 1, 8 | "model": partial(Model, learning_rate=1e-1, p_iters=10, ps_eta=2e-1, pt_eta=2e-3), 9 | "inner_opt": None, 10 | "optimizer": FedReg, 11 | "model_param": (10,), 12 | "inp_size": (784,), 13 | "train_path": "data/mnist_10000/data/train/", 14 | "test_path": ["data/mnist_10000/data/valid/", "data/mnist_10000/data/test/"], 15 | "clients_per_round": 10, 16 | "num_rounds": 500, 17 | "eval_every": 1, 18 | "drop_percent": 0.0, 19 | "num_epochs": 40, 20 | "batch_size": 10, 21 | "use_fed": 1, 22 | "log_path": "tasks/mnist/FedReg_e40_lr1_g4/train.log", 23 | "train_transform": None, 24 | "test_transform": None, 25 | "eval_train": True, 26 | "gamma": 0.4, 27 | 28 | } 29 | -------------------------------------------------------------------------------- /tasks/mnist/SCAFFOLD_e40_lr1/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | from functools import partial 3 | from FedUtils.fed.scaffold import SCAFFOLD, Optim 4 | 5 | config = { 6 | 7 | "seed": 1, 8 | "model": Model, 9 | "inner_opt": partial(Optim, lr=1e-1), 10 | "optimizer": SCAFFOLD, 11 | "model_param": (10,), 12 | "inp_size": (784,), 13 | "train_path": "data/mnist_10000/data/train/", 14 | "test_path": ["data/mnist_10000/data/valid/", "data/mnist_10000/data/test/"], 15 | "clients_per_round": 10, 16 | "num_rounds": 500, 17 | "eval_every": 1, 18 | "drop_percent": 0.0, 19 | "num_epochs": 40, 20 | "batch_size": 10, 21 | "use_fed": 1, 22 | "log_path": "tasks/mnist/SCAFFOLD_e40_lr1/train.log", 23 | "train_transform": None, 24 | "test_transform": None, 25 | "eval_train": True 26 | } 27 | -------------------------------------------------------------------------------- /tasks/mnist/SGD/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg_sgd import FedAvg 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=1e-1), 10 | "inner_opt": None, 11 | "optimizer": FedAvg, 12 | "model_param": (10,), 13 | "inp_size": (784,), 14 | "train_path": "data/mnist_10000/data/train/", 15 | "test_path": ["data/mnist_10000/data/valid/", "data/mnist_10000/data/test/"], 16 | "clients_per_round": 10, 17 | "num_rounds": 500, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 1, 21 | "batch_size": 100000, 22 | "use_fed": 1, 23 | "log_path": "tasks/mnist/SGD/train.log", 24 | 25 | "train_transform": None, 26 | "test_transform": None, 27 | "eval_train": True, 28 | 29 | 30 | 31 | } 32 | -------------------------------------------------------------------------------- /tasks/mnist_fedprox/FedAvg/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg import FedAvg 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=1e-1), 10 | "inner_opt": None, 11 | "optimizer": FedAvg, 12 | "model_param": (10,), 13 | "inp_size": (784,), 14 | "train_path": "data/mnist/data/train/", 15 | "test_path": ["data/mnist/data/valid/", "data/mnist/data/test/"], 16 | "clients_per_round": 10, 17 | "num_rounds": 200, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 20, 21 | "batch_size": 10, 22 | "use_fed": 1, 23 | "log_path": "tasks/mnist_fedprox/FedAvg/train.log", 24 | 25 | "train_transform": None, 26 | "test_transform": None, 27 | "eval_train": True, 28 | 29 | 30 | 31 | } 32 | -------------------------------------------------------------------------------- /tasks/mnist_fedprox/FedProx/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedprox import FedProx 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=1e-1), 10 | "inner_opt": None, 11 | "optimizer": FedProx, 12 | "model_param": (10,), 13 | "inp_size": (784,), 14 | "train_path": "data/mnist/data/train/", 15 | "test_path": ["data/mnist/data/valid/", "data/mnist/data/test/"], 16 | "clients_per_round": 10, 17 | "num_rounds": 200, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 20, 21 | "batch_size": 10, 22 | "use_fed": 1, 23 | "log_path": "tasks/mnist_fedprox/FedProx/train.log", 24 | "gamma": 0.001, 25 | 26 | "train_transform": None, 27 | "test_transform": None, 28 | "eval_train": True 29 | 30 | 31 | } 32 | -------------------------------------------------------------------------------- /tasks/mnist_fedprox/FedReg/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | from functools import partial 3 | from FedUtils.fed.fedreg import FedReg 4 | from torch.optim import SGD 5 | config = { 6 | 7 | "seed": 1, 8 | "model": partial(Model, learning_rate=1e-1, p_iters=10, ps_eta=2e-1, pt_eta=2e-3), 9 | "inner_opt": None, 10 | "optimizer": FedReg, 11 | "model_param": (10,), 12 | "inp_size": (784,), 13 | "train_path": "data/mnist/data/train/", 14 | "test_path": ["data/mnist/data/valid/", "data/mnist/data/test/"], 15 | "clients_per_round": 10, 16 | "num_rounds": 200, 17 | "eval_every": 1, 18 | "drop_percent": 0.0, 19 | "num_epochs": 20, 20 | "batch_size": 10, 21 | "use_fed": 1, 22 | "log_path": "tasks/mnist_fedprox/FedReg/train.log", 23 | "train_transform": None, 24 | "test_transform": None, 25 | "eval_train": True, 26 | "gamma": 0.5, 27 | 28 | } 29 | -------------------------------------------------------------------------------- /tasks/mnist_sc/FedAvg_e20_lr1/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg import FedAvg 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=1e-1), 10 | "inner_opt": None, 11 | "optimizer": FedAvg, 12 | "model_param": (10,), 13 | "inp_size": (784,), 14 | "train_path": "data/mnist_10000_sc/data/train/", 15 | "test_path": ["data/mnist_10000_sc/data/valid/", "data/mnist_10000_sc/data/test/"], 16 | "clients_per_round": 10, 17 | "num_rounds": 500, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 20, 21 | "batch_size": 10, 22 | "use_fed": 1, 23 | "log_path": "tasks/mnist_sc/FedAvg_e20_lr1/train.log", 24 | 25 | "train_transform": None, 26 | "test_transform": None, 27 | "eval_train": True, 28 | 29 | 30 | 31 | } 32 | -------------------------------------------------------------------------------- /tasks/mnist_sc/FedCurv_e20_lr1_g4/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedcurv import FedCurv 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=1e-1), 10 | "inner_opt": None, 11 | "optimizer": FedCurv, 12 | "model_param": (10,), 13 | "inp_size": (784,), 14 | "train_path": "data/mnist_10000_sc/data/train/", 15 | "test_path": ["data/mnist_10000_sc/data/valid/", "data/mnist_10000_sc/data/test/"], 16 | "clients_per_round": 10, 17 | "num_rounds": 500, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 20, 21 | "batch_size": 10, 22 | "use_fed": 1, 23 | "log_path": "tasks/mnist_sc/FedCurv_e20_lr1_g4/train.log", 24 | "gamma": 1e-4, 25 | 26 | "train_transform": None, 27 | "test_transform": None, 28 | "eval_train": True 29 | 30 | 31 | } 32 | -------------------------------------------------------------------------------- /tasks/mnist_sc/FedProx_e20_lr1_g01/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedprox import FedProx 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=1e-1), 10 | "inner_opt": None, 11 | "optimizer": FedProx, 12 | "model_param": (10,), 13 | "inp_size": (784,), 14 | "train_path": "data/mnist_10000_sc/data/train/", 15 | "test_path": ["data/mnist_10000_sc/data/valid/", "data/mnist_10000_sc/data/test/"], 16 | "clients_per_round": 10, 17 | "num_rounds": 500, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 20, 21 | "batch_size": 10, 22 | "use_fed": 1, 23 | "log_path": "tasks/mnist_sc/FedProx_e20_lr1_g01/train.log", 24 | "gamma": 0.01, 25 | 26 | "train_transform": None, 27 | "test_transform": None, 28 | "eval_train": True 29 | 30 | 31 | } 32 | -------------------------------------------------------------------------------- /tasks/mnist_sc/FedReg_e20_lr1_g3/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | from functools import partial 3 | from FedUtils.fed.fedreg import FedReg 4 | from torch.optim import SGD 5 | config = { 6 | 7 | "seed": 1, 8 | "model": partial(Model, learning_rate=1e-1, p_iters=10, ps_eta=2e-1, pt_eta=2e-3), 9 | "inner_opt": None, 10 | "optimizer": FedReg, 11 | "model_param": (10,), 12 | "inp_size": (784,), 13 | "train_path": "data/mnist_10000_sc/data/train/", 14 | "test_path": ["data/mnist_10000_sc/data/valid/", "data/mnist_10000_sc/data/test/"], 15 | "clients_per_round": 10, 16 | "num_rounds": 500, 17 | "eval_every": 1, 18 | "drop_percent": 0.0, 19 | "num_epochs": 20, 20 | "batch_size": 10, 21 | "use_fed": 1, 22 | "log_path": "tasks/mnist_sc/FedReg_e20_lr1_g3/train.log", 23 | "train_transform": None, 24 | "test_transform": None, 25 | "eval_train": True, 26 | "gamma": 0.3 27 | 28 | } 29 | -------------------------------------------------------------------------------- /tasks/mnist_sc/SCAFFOLD_e20_lr1/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | from functools import partial 3 | from FedUtils.fed.scaffold import SCAFFOLD, Optim 4 | 5 | config = { 6 | 7 | "seed": 1, 8 | "model": Model, 9 | "inner_opt": partial(Optim, lr=1e-1), 10 | "optimizer": SCAFFOLD, 11 | "model_param": (10,), 12 | "inp_size": (784,), 13 | "train_path": "data/mnist_10000_sc/data/train/", 14 | "test_path": ["data/mnist_10000_sc/data/valid/", "data/mnist_10000_sc/data/test/"], 15 | "clients_per_round": 10, 16 | "num_rounds": 500, 17 | "eval_every": 1, 18 | "drop_percent": 0.0, 19 | "num_epochs": 20, 20 | "batch_size": 10, 21 | "use_fed": 1, 22 | "log_path": "tasks/mnist_sc/SCAFFOLD_e20_lr1/train.log", 23 | "train_transform": None, 24 | "test_transform": None, 25 | "eval_train": True 26 | } 27 | -------------------------------------------------------------------------------- /tasks/mnist_sc/SGD/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg_sgd import FedAvg 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=1e-1), 10 | "inner_opt": None, 11 | "optimizer": FedAvg, 12 | "model_param": (10,), 13 | "inp_size": (784,), 14 | "train_path": "data/mnist_10000_sc/data/train/", 15 | "test_path": ["data/mnist_10000_sc/data/valid/", "data/mnist_10000_sc/data/test/"], 16 | "clients_per_round": 10, 17 | "num_rounds": 500, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 1, 21 | "batch_size": 10000, 22 | "use_fed": 1, 23 | "log_path": "tasks/mnist_sc/SGD/train.log", 24 | 25 | "train_transform": None, 26 | "test_transform": None, 27 | "eval_train": True, 28 | 29 | 30 | 31 | } 32 | -------------------------------------------------------------------------------- /tasks/nist/FedAvg/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg import FedAvg 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=1e-1), 10 | "inner_opt": None, 11 | "optimizer": FedAvg, 12 | "model_param": (62,), 13 | "inp_size": (784,), 14 | "train_path": "data/nist/train/", 15 | "test_path": ["data/nist/valid/", "data/nist/test/"], 16 | "clients_per_round": 10, 17 | "num_rounds": 200, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 10, 21 | "batch_size": 10, 22 | "use_fed": 1, 23 | "log_path": "tasks/nist/FedAvg/train.log", 24 | "train_transform": None, 25 | "test_transform": None, 26 | "eval_train": False 27 | 28 | 29 | } 30 | -------------------------------------------------------------------------------- /tasks/nist/FedProx/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedprox import FedProx 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=1e-1), 10 | "inner_opt": None, 11 | "optimizer": FedProx, 12 | "model_param": (62,), 13 | "inp_size": (784,), 14 | "train_path": "data/nist/train/", 15 | "test_path": ["data/nist/valid/", "data/nist/test/"], 16 | "clients_per_round": 10, 17 | "num_rounds": 200, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 10, 21 | "batch_size": 10, 22 | "use_fed": 1, 23 | "log_path": "tasks/nist/FedProx/train.log", 24 | "eval_train": False, 25 | "train_transform": None, 26 | "test_transform": None, 27 | "gamma": 0.001 28 | 29 | 30 | } 31 | -------------------------------------------------------------------------------- /tasks/nist/FedReg/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.mnist.cnn import Model 2 | from functools import partial 3 | from FedUtils.fed.fedreg import FedReg 4 | from torch.optim import SGD 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=1e-1, p_iters=10, ps_eta=1e-1, pt_eta=1e-3), 10 | "inner_opt": None, 11 | "optimizer": FedReg, 12 | "model_param": (62,), 13 | "inp_size": (784,), 14 | "train_path": "data/nist/train/", 15 | "test_path": ["data/nist/valid/", "data/nist/test/"], 16 | "clients_per_round": 10, 17 | "num_rounds": 200, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 10, 21 | "batch_size": 10, 22 | "use_fed": 1, 23 | "log_path": "tasks/nist/FedReg/train.log", 24 | "train_transform": None, 25 | "test_transform": None, 26 | "eval_train": False, 27 | "gamma": 0.5, 28 | } 29 | -------------------------------------------------------------------------------- /tasks/shakespeare/FedAvg/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.shakespeare.LSTM import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedavg import FedAvg 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=1, ps_iters=None, pt_iters=None), 10 | "inner_opt": None, 11 | "optimizer": FedAvg, 12 | "model_param": (77,), 13 | "inp_size": (77,), 14 | "train_path": "data/shakespeare/data/train/", 15 | "test_path": ["data/shakespeare/data/valid/", "data/shakespeare/data/test/"], 16 | "clients_per_round": 10, 17 | "num_rounds": 100, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 5, 21 | "batch_size": 10, 22 | "use_fed": 1, 23 | "log_path": "tasks/shakespeare/FedAvg/train.log", 24 | "train_transform": None, 25 | "test_transform": None, 26 | "eval_train": False 27 | 28 | 29 | } 30 | -------------------------------------------------------------------------------- /tasks/shakespeare/FedProx/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.shakespeare.LSTM import Model 2 | import torch 3 | from functools import partial 4 | from FedUtils.fed.fedprox import FedProx 5 | 6 | config = { 7 | 8 | "seed": 1, 9 | "model": partial(Model, learning_rate=1, ps_iters=None, pt_iters=None), 10 | "inner_opt": None, 11 | "optimizer": FedProx, 12 | "model_param": (77,), 13 | "inp_size": (77,), 14 | "train_path": "data/shakespeare/data/train/", 15 | "test_path": ["data/shakespeare/data/valid/", "data/shakespeare/data/test/"], 16 | "clients_per_round": 10, 17 | "num_rounds": 100, 18 | "eval_every": 1, 19 | "drop_percent": 0.0, 20 | "num_epochs": 5, 21 | "batch_size": 10, 22 | "use_fed": 1, 23 | "log_path": "tasks/shakespeare/FedProx/train.log", 24 | "eval_train": False, 25 | "train_transform": None, 26 | "test_transform": None, 27 | "gamma": 1e-3 28 | 29 | 30 | } 31 | -------------------------------------------------------------------------------- /tasks/shakespeare/FedReg/config.py: -------------------------------------------------------------------------------- 1 | from FedUtils.models.shakespeare.LSTM import Model 2 | from functools import partial 3 | from FedUtils.fed.fedreg import FedReg 4 | config = { 5 | "seed": 1, 6 | "model": partial(Model, learning_rate=1, ps_iters=40, pt_iters=0), 7 | "inner_opt": None, 8 | "optimizer": FedReg, 9 | "model_param": (77,), 10 | "inp_size": (77,), 11 | "train_path": "data/shakespeare/data/train/", 12 | "test_path": ["data/shakespeare/data/valid/", "data/shakespeare/data/test/"], 13 | "clients_per_round": 10, 14 | "num_rounds": 100, 15 | "eval_every": 1, 16 | "drop_percent": 0.0, 17 | "num_epochs": 5, 18 | "batch_size": 10, 19 | "use_fed": 1, 20 | "log_path": "tasks/shakespeare/FedReg/train.log", 21 | "train_transform": None, 22 | "test_transform": None, 23 | "eval_train": False, 24 | "gamma": 1., 25 | 26 | } 27 | --------------------------------------------------------------------------------