├── publications ├── PrADA │ ├── models │ │ ├── __init__.py │ │ ├── model_utils.py │ │ ├── classifier.py │ │ └── test_interaction_feature.py │ ├── data_process │ │ ├── __init__.py │ │ ├── census_process │ │ │ ├── __init__.py │ │ │ ├── utils.py │ │ │ └── census_data_creation_config.py │ │ ├── ppd_process │ │ │ ├── __init__.py │ │ │ ├── ppd_data_creation_config.py │ │ │ └── ppd_prepare_data_train_test.py │ │ └── data_process_utils.py │ ├── datasets │ │ ├── __init__.py │ │ ├── census_dataset.py │ │ ├── census_dataloader.py │ │ └── ppd_dataloader.py │ ├── experiments │ │ ├── __init__.py │ │ ├── ppd_loan │ │ │ ├── __init__.py │ │ │ ├── train_ppd_no_adaptation.py │ │ │ ├── train_ppd_no_fg_target_finetune.py │ │ │ ├── test_ppd_target.py │ │ │ ├── train_ppd_fg_target_finetune.py │ │ │ ├── train_config.py │ │ │ ├── train_ppd_no_fg_adapt_pretrain.py │ │ │ └── train_ppd_fg_adapt_pretrain.py │ │ ├── income_census │ │ │ ├── __init__.py │ │ │ ├── tsne_config.py │ │ │ ├── train_census_no_adaptation.py │ │ │ ├── train_census_no_fg_target_finetune.py │ │ │ ├── train_census_fg_target_finetune.py │ │ │ ├── test_census_target.py │ │ │ ├── train_config.py │ │ │ ├── produce_census_tsne_data.py │ │ │ └── draw_census_tsne_graph.py │ │ └── test_utils.py │ ├── figs │ │ └── prada.png │ └── statistics_utils.py ├── FedCG │ ├── figs │ │ ├── fedcg.png │ │ ├── clip_image002.png │ │ ├── clip_image006.jpg │ │ ├── clip_image008.jpg │ │ ├── clip_image010.jpg │ │ ├── clip_image012.png │ │ └── clip_image014.jpg │ ├── record.py │ ├── utils.py │ ├── run.sh │ ├── servers │ │ ├── fedsplit.py │ │ ├── fedavg.py │ │ ├── fedprox.py │ │ └── fedgen.py │ ├── main.py │ ├── config.py │ └── README.md ├── FedIPR │ └── figs │ │ ├── fig1.png │ │ ├── fig2.png │ │ ├── fig3.png │ │ ├── fig4.png │ │ ├── fig5.png │ │ └── fig6.png ├── ss_vfnas │ ├── figs │ │ ├── v2x.png │ │ └── example.png │ ├── visualize.py │ ├── .gitignore │ ├── models │ │ ├── manual_k_party_chexpert.py │ │ └── manual_k_party.py │ ├── architects │ │ ├── architect_two_party.py │ │ ├── architect_k_party.py │ │ ├── architect.py │ │ ├── architect_k_party_milenas.py │ │ └── architect_two_party_preg.py │ ├── README.md │ ├── dp_utils.py │ ├── test.py │ ├── operations.py │ ├── utils.py │ └── genotypes.py └── No-Free-Lunch-Theorem-FL │ └── figs │ ├── tfl.png │ ├── hist.png │ ├── title.png │ ├── diagram.jpg │ └── framework.png ├── datasets ├── federated_object_detection_benchmark │ ├── data │ │ ├── __init__.py │ │ ├── custom │ │ │ ├── classes.names │ │ │ └── train.txt │ │ ├── data_utils.py │ │ └── dataset.py │ ├── utils │ │ ├── __init__.py │ │ ├── augmentations.py │ │ ├── array_tool.py │ │ ├── model_dump.py │ │ ├── parse_config.py │ │ ├── config.py │ │ └── datasets.py │ ├── model │ │ ├── utils │ │ │ ├── __init__.py │ │ │ └── nms │ │ │ │ ├── __init__.py │ │ │ │ ├── build.py │ │ │ │ ├── _nms_gpu_post_py.py │ │ │ │ └── _nms_gpu_post.pyx │ │ ├── __init__.py │ │ └── roi_module.py │ ├── weights │ │ └── download_weights.sh │ ├── config │ │ ├── custom.data │ │ ├── coco.data │ │ └── yolov3-tiny.cfg │ ├── requirements.txt │ ├── stop.sh │ ├── run.sh │ ├── run_server.sh │ ├── experiments │ │ └── log_formatter.py │ ├── README.md │ └── Dataset_description.md └── Fed_Multiview_Gen │ ├── example.png │ ├── phong.blend │ └── README.md ├── fl_trend ├── figs │ ├── gradsec │ ├── Gradsec.png │ ├── ResilientEI.png │ ├── paper_count_table.png │ ├── Security_conf_fl_topics.png │ └── system_conf_fl_topics.png ├── top_conf_figs │ ├── top_ai_conf_fl_paper_trend.png │ ├── system_comm_conf_fl_paper_count.png │ └── top_security_conf_fl_paper_trend.png ├── fl_paper_accepted_ins_system_and_security_conferences.pdf └── README.md ├── DISCLAIMER ├── .gitignore └── CONTRIBUTING.md /publications/PrADA/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /publications/PrADA/data_process/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /publications/PrADA/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /publications/PrADA/experiments/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /publications/PrADA/experiments/ppd_loan/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /publications/PrADA/data_process/census_process/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /publications/PrADA/data_process/ppd_process/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /publications/PrADA/experiments/income_census/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/model/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fl_trend/figs/gradsec: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/fl_trend/figs/gradsec -------------------------------------------------------------------------------- /fl_trend/figs/Gradsec.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/fl_trend/figs/Gradsec.png -------------------------------------------------------------------------------- /fl_trend/figs/ResilientEI.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/fl_trend/figs/ResilientEI.png -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/model/__init__.py: -------------------------------------------------------------------------------- 1 | from model.faster_rcnn_vgg16 import FasterRCNNVGG16 2 | -------------------------------------------------------------------------------- /publications/FedCG/figs/fedcg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/FedCG/figs/fedcg.png -------------------------------------------------------------------------------- /publications/FedIPR/figs/fig1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/FedIPR/figs/fig1.png -------------------------------------------------------------------------------- /publications/FedIPR/figs/fig2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/FedIPR/figs/fig2.png -------------------------------------------------------------------------------- /publications/FedIPR/figs/fig3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/FedIPR/figs/fig3.png -------------------------------------------------------------------------------- /publications/FedIPR/figs/fig4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/FedIPR/figs/fig4.png -------------------------------------------------------------------------------- /publications/FedIPR/figs/fig5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/FedIPR/figs/fig5.png -------------------------------------------------------------------------------- /publications/FedIPR/figs/fig6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/FedIPR/figs/fig6.png -------------------------------------------------------------------------------- /publications/PrADA/figs/prada.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/PrADA/figs/prada.png -------------------------------------------------------------------------------- /fl_trend/figs/paper_count_table.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/fl_trend/figs/paper_count_table.png -------------------------------------------------------------------------------- /publications/ss_vfnas/figs/v2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/ss_vfnas/figs/v2x.png -------------------------------------------------------------------------------- /datasets/Fed_Multiview_Gen/example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/datasets/Fed_Multiview_Gen/example.png -------------------------------------------------------------------------------- /datasets/Fed_Multiview_Gen/phong.blend: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/datasets/Fed_Multiview_Gen/phong.blend -------------------------------------------------------------------------------- /publications/ss_vfnas/figs/example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/ss_vfnas/figs/example.png -------------------------------------------------------------------------------- /fl_trend/figs/Security_conf_fl_topics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/fl_trend/figs/Security_conf_fl_topics.png -------------------------------------------------------------------------------- /fl_trend/figs/system_conf_fl_topics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/fl_trend/figs/system_conf_fl_topics.png -------------------------------------------------------------------------------- /publications/FedCG/figs/clip_image002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/FedCG/figs/clip_image002.png -------------------------------------------------------------------------------- /publications/FedCG/figs/clip_image006.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/FedCG/figs/clip_image006.jpg -------------------------------------------------------------------------------- /publications/FedCG/figs/clip_image008.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/FedCG/figs/clip_image008.jpg -------------------------------------------------------------------------------- /publications/FedCG/figs/clip_image010.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/FedCG/figs/clip_image010.jpg -------------------------------------------------------------------------------- /publications/FedCG/figs/clip_image012.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/FedCG/figs/clip_image012.png -------------------------------------------------------------------------------- /publications/FedCG/figs/clip_image014.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/FedCG/figs/clip_image014.jpg -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/model/utils/nms/__init__.py: -------------------------------------------------------------------------------- 1 | from model.utils.nms.non_maximum_suppression import non_maximum_suppression -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/weights/download_weights.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | wget -c https://pjreddie.com/media/files/darknet53.conv.74 3 | -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/data/custom/classes.names: -------------------------------------------------------------------------------- 1 | basket 2 | carton 3 | chair 4 | electrombile 5 | gastank 6 | sunshade 7 | table 8 | -------------------------------------------------------------------------------- /publications/No-Free-Lunch-Theorem-FL/figs/tfl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/No-Free-Lunch-Theorem-FL/figs/tfl.png -------------------------------------------------------------------------------- /fl_trend/top_conf_figs/top_ai_conf_fl_paper_trend.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/fl_trend/top_conf_figs/top_ai_conf_fl_paper_trend.png -------------------------------------------------------------------------------- /publications/No-Free-Lunch-Theorem-FL/figs/hist.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/No-Free-Lunch-Theorem-FL/figs/hist.png -------------------------------------------------------------------------------- /publications/No-Free-Lunch-Theorem-FL/figs/title.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/No-Free-Lunch-Theorem-FL/figs/title.png -------------------------------------------------------------------------------- /publications/No-Free-Lunch-Theorem-FL/figs/diagram.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/No-Free-Lunch-Theorem-FL/figs/diagram.jpg -------------------------------------------------------------------------------- /fl_trend/top_conf_figs/system_comm_conf_fl_paper_count.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/fl_trend/top_conf_figs/system_comm_conf_fl_paper_count.png -------------------------------------------------------------------------------- /publications/No-Free-Lunch-Theorem-FL/figs/framework.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/publications/No-Free-Lunch-Theorem-FL/figs/framework.png -------------------------------------------------------------------------------- /fl_trend/top_conf_figs/top_security_conf_fl_paper_trend.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/fl_trend/top_conf_figs/top_security_conf_fl_paper_trend.png -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/config/custom.data: -------------------------------------------------------------------------------- 1 | classes= 7 2 | train=data/custom/train.txt 3 | valid=data/custom/valid.txt 4 | names=data/custom/classes.names 5 | -------------------------------------------------------------------------------- /fl_trend/fl_paper_accepted_ins_system_and_security_conferences.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FederatedAI/research/HEAD/fl_trend/fl_paper_accepted_ins_system_and_security_conferences.pdf -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/config/coco.data: -------------------------------------------------------------------------------- 1 | classes= 80 2 | train=data/coco/trainvalno5k.txt 3 | valid=data/coco/5k.txt 4 | names=data/coco.names 5 | backup=backup/ 6 | eval=coco 7 | -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | torchvision 3 | matplotlib 4 | terminaltables 5 | pillow 6 | tqdm 7 | sklearn 8 | socketIO_client 9 | flask 10 | flask_socketio 11 | scikit_image 12 | torchnet 13 | scipy 14 | cupy -------------------------------------------------------------------------------- /publications/PrADA/models/model_utils.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | def init_weights(m): 5 | if type(m) == nn.Linear: 6 | nn.init.kaiming_normal_(m.weight) 7 | # nn.init.xavier_uniform(m.weight) 8 | # m.bias.data.fill_(0.01) 9 | -------------------------------------------------------------------------------- /publications/PrADA/data_process/census_process/utils.py: -------------------------------------------------------------------------------- 1 | 2 | def bucketized_age(age): 3 | age_threshold = [18, 25, 30, 35, 40, 45, 50, 55, 60, 65] 4 | index = 0 5 | for t in age_threshold: 6 | if age < t: 7 | return index 8 | index += 1 9 | return index -------------------------------------------------------------------------------- /publications/PrADA/experiments/income_census/tsne_config.py: -------------------------------------------------------------------------------- 1 | tsne_embedding_creation = { 2 | "tsne_embedding_data_dir": "YOUR_ORIGINAL_DATA_DIR/tsne_emb/", 3 | "tsne_graph_output_dir": "YOUR_GRAPH_OUTPUT_DATA_DIR/output/", 4 | "apply_adaptation": True, 5 | "using_interaction": False 6 | } 7 | -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/utils/augmentations.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | import numpy as np 4 | 5 | 6 | def horisontal_flip(images, targets): 7 | images = torch.flip(images, [-1]) 8 | targets[:, 2] = 1 - targets[:, 2] 9 | return images, targets 10 | -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/stop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | DATASET=$1 3 | MODEL=$2 4 | if [ ! -n "$DATASET" ];then 5 | echo "Please input dataset" 6 | exit 7 | fi 8 | if [ ! -n "$MODEL" ];then 9 | echo "Please input model" 10 | exit 11 | fi 12 | ps -ef | grep ${DATASET}/${MODEL} | grep -v grep | awk '{print $2}' | xargs kill -9 13 | -------------------------------------------------------------------------------- /publications/PrADA/data_process/census_process/census_data_creation_config.py: -------------------------------------------------------------------------------- 1 | census_data_creation = { 2 | "original_data_dir": "YOUR_ORIGINAL_DATA_DIR/census/", 3 | "processed_data_dir": "YOUR_ORIGINAL_DATA_DIR/processed/", 4 | "train_data_file_name": "census-income.data", 5 | "test_data_file_name": "census-income.test", 6 | "positive_sample_ratio": 0.04, 7 | "number_target_samples": 4000, 8 | "data_tag": "all4000pos004" 9 | } 10 | -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/model/utils/nms/build.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Distutils import build_ext 4 | import numpy 5 | 6 | ext_modules = [Extension("_nms_gpu_post", ["_nms_gpu_post.pyx"], 7 | include_dirs=[numpy.get_include()])] 8 | setup( 9 | name="Hello pyx", 10 | cmdclass={'build_ext': build_ext}, 11 | ext_modules=ext_modules 12 | ) 13 | -------------------------------------------------------------------------------- /publications/PrADA/data_process/data_process_utils.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from sklearn.utils import shuffle 3 | 4 | 5 | def save_df_data(df_data, file_full_name): 6 | df_data.to_csv(file_full_name, index=False) 7 | print(f"[INFO] save data with shape {df_data.shape} to {file_full_name}") 8 | 9 | 10 | def combine_src_tgt_data(df_src_data, df_tgt_data): 11 | df_all_data = pd.concat((df_src_data, df_tgt_data), axis=0) 12 | df_all_data = shuffle(df_all_data) 13 | return df_all_data 14 | -------------------------------------------------------------------------------- /publications/PrADA/data_process/ppd_process/ppd_data_creation_config.py: -------------------------------------------------------------------------------- 1 | ppd_data_creation = { 2 | "original_data_dir": "YOUR ORIGINAL DATA DIR", 3 | "processed_data_dir": "YOUR PROCESSED DATA DIR", 4 | # "original_ppd_data_file_name": "PPD_data_all.csv", 5 | # "original_ppd_datetime_file_name": "PPD_data_datetime.csv", 6 | "meta_data_full_name": "./PPD_meta_data.json", 7 | "number_train_samples": 55000, 8 | "number_target_samples": 4000, 9 | "positive_samples_ratio": 0.04, 10 | "data_tag": "all4000pos004" 11 | } 12 | -------------------------------------------------------------------------------- /fl_trend/README.md: -------------------------------------------------------------------------------- 1 | ## FL Trend in Top AI Conferences 2 |  3 | 4 | 5 | 6 | 7 | 8 | ## FL Trend in Top Security Conferences 9 | 10 |  11 | (Most of accepted papers in 2019 and 2020 are about MPC) 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | ## FL Accepted Paper Counts in Top System & Communication Conferences 20 | 21 |  22 | 23 | -------------------------------------------------------------------------------- /publications/PrADA/datasets/census_dataset.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.utils.data import Dataset 3 | 4 | 5 | class SimpleDataset(Dataset): 6 | """An abstract Dataset class wrapped around Pytorch Dataset class. 7 | """ 8 | 9 | def __init__(self, data, labels): 10 | self.data = data 11 | self.labels = labels 12 | 13 | def __len__(self): 14 | return len(self.data) 15 | 16 | def __getitem__(self, item_idx): 17 | data_i, target_i = self.data[item_idx], self.labels[item_idx] 18 | return torch.tensor(data_i).float(), torch.tensor(target_i, dtype=torch.long) 19 | 20 | -------------------------------------------------------------------------------- /publications/PrADA/experiments/test_utils.py: -------------------------------------------------------------------------------- 1 | from utils import test_classifier 2 | 3 | 4 | def test_model(task_id, init_model, trained_model_root_folder, target_test_loader): 5 | print("[INFO] load trained model") 6 | init_model.load_model(root=trained_model_root_folder, 7 | task_id=task_id, 8 | load_global_classifier=True, 9 | timestamp=None) 10 | 11 | init_model.print_parameters() 12 | 13 | print("[INFO] Run test") 14 | _, auc, ks = test_classifier(init_model, target_test_loader, "test") 15 | print(f"[INFO] test auc:{auc}, ks:{ks}") 16 | -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | set -e 4 | 5 | DATASET=$1 6 | NUM_CLIENT=$2 7 | MODEL=$3 8 | PORT=$4 9 | 10 | if [ ! -n "$DATASET" ];then 11 | echo "Please input dataset" 12 | exit 13 | fi 14 | 15 | if [ ! -n "$NUM_CLIENT" ];then 16 | echo "Please input num of client" 17 | exit 18 | fi 19 | 20 | if [ ! -n "$MODEL" ];then 21 | echo "please input model name" 22 | exit 23 | fi 24 | 25 | if [ ! -n "$PORT" ];then 26 | echo "please input server port" 27 | exit 28 | fi 29 | 30 | for i in $(seq 1 ${NUM_CLIENT}); do 31 | nohup python3 fl_client.py \ 32 | --gpu $((($i % 8)))\ 33 | --config_file data/task_configs/${MODEL}/${DATASET}/${MODEL}_task$i.json \ 34 | --ignore_load True \ 35 | --port ${PORT} & 36 | done 37 | -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/utils/array_tool.py: -------------------------------------------------------------------------------- 1 | """ 2 | tools to convert specified type 3 | """ 4 | import torch as t 5 | import numpy as np 6 | 7 | 8 | def tonumpy(data): 9 | if isinstance(data, np.ndarray): 10 | return data 11 | if isinstance(data, t.Tensor): 12 | return data.detach().cpu().numpy() 13 | 14 | 15 | def totensor(data, cuda=True): 16 | if isinstance(data, np.ndarray): 17 | tensor = t.from_numpy(data) 18 | if isinstance(data, t.Tensor): 19 | tensor = data.detach() 20 | if cuda: 21 | tensor = tensor.cuda() 22 | return tensor 23 | 24 | 25 | def scalar(data): 26 | if isinstance(data, np.ndarray): 27 | return data.reshape(1)[0] 28 | if isinstance(data, t.Tensor): 29 | return data.item() -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/run_server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | set -e 5 | 6 | DATASET=$1 7 | MODEL=$2 8 | PORT=$3 9 | 10 | if [ ! -n "$DATASET" ];then 11 | echo "Please input dataset" 12 | exit 13 | fi 14 | 15 | if [ ! -n "$MODEL" ];then 16 | echo "Please input model name" 17 | exit 18 | fi 19 | 20 | if [ ! -n "$PORT" ];then 21 | echo "please input server port" 22 | exit 23 | fi 24 | 25 | if [ ! -d "experiments/logs/`date +'%m%d'`/${MODEL}/${DATASET}" ];then 26 | mkdir -p "experiments/logs/`date +'%m%d'`/${MODEL}/${DATASET}" 27 | fi 28 | 29 | LOG="experiments/logs/`date +'%m%d'`/${MODEL}/${DATASET}/fl_server.log" 30 | echo Loggin output to "$LOG" 31 | 32 | nohup python3 fl_server.py --config_file data/task_configs/${MODEL}/${DATASET}/${MODEL}_task.json --port ${PORT} > ${LOG} & 33 | -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/model/utils/nms/_nms_gpu_post_py.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | 4 | def _nms_gpu_post( mask, 5 | n_bbox, 6 | threads_per_block, 7 | col_blocks 8 | ): 9 | n_selection = 0 10 | one_ull = np.array([1],dtype=np.uint64) 11 | selection = np.zeros((n_bbox,), dtype=np.int32) 12 | remv = np.zeros((col_blocks,), dtype=np.uint64) 13 | 14 | for i in range(n_bbox): 15 | nblock = i // threads_per_block 16 | inblock = i % threads_per_block 17 | 18 | if not (remv[nblock] & one_ull << inblock): 19 | selection[n_selection] = i 20 | n_selection += 1 21 | 22 | index = i * col_blocks 23 | for j in range(nblock, col_blocks): 24 | remv[j] |= mask[index + j] 25 | return selection, n_selection 26 | -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/utils/model_dump.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import codecs 3 | 4 | 5 | def obj_to_pickle_string(x, file_path=None): 6 | if file_path is not None: 7 | print("save model to file") 8 | output = open(file_path, 'wb') 9 | pickle.dump(x, output) 10 | return file_path 11 | else: 12 | print("turn model to byte") 13 | x = codecs.encode(pickle.dumps(x), "base64").decode() 14 | print(len(x)) 15 | return x 16 | # return msgpack.packb(x, default=msgpack_numpy.encode) 17 | # TODO: compare pickle vs msgpack vs json for serialization; tradeoff: computation vs network IO 18 | 19 | 20 | def pickle_string_to_obj(s): 21 | if ".pkl" in s: 22 | df = open(s, "rb") 23 | print("load model from file") 24 | return pickle.load(df) 25 | else: 26 | print("load model from byte") 27 | return pickle.loads(codecs.decode(s.encode(), "base64")) 28 | -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/model/utils/nms/_nms_gpu_post.pyx: -------------------------------------------------------------------------------- 1 | cimport numpy as np 2 | from libc.stdint cimport uint64_t 3 | 4 | import numpy as np 5 | 6 | def _nms_gpu_post(np.ndarray[np.uint64_t, ndim=1] mask, 7 | int n_bbox, 8 | int threads_per_block, 9 | int col_blocks 10 | ): 11 | cdef: 12 | int i, j, nblock, index 13 | uint64_t inblock 14 | int n_selection = 0 15 | uint64_t one_ull = 1 16 | np.ndarray[np.int32_t, ndim=1] selection 17 | np.ndarray[np.uint64_t, ndim=1] remv 18 | 19 | selection = np.zeros((n_bbox,), dtype=np.int32) 20 | remv = np.zeros((col_blocks,), dtype=np.uint64) 21 | 22 | for i in range(n_bbox): 23 | nblock = i // threads_per_block 24 | inblock = i % threads_per_block 25 | 26 | if not (remv[nblock] & one_ull << inblock): 27 | selection[n_selection] = i 28 | n_selection += 1 29 | 30 | index = i * col_blocks 31 | for j in range(nblock, col_blocks): 32 | remv[j] |= mask[index + j] 33 | return selection, n_selection 34 | -------------------------------------------------------------------------------- /datasets/Fed_Multiview_Gen/README.md: -------------------------------------------------------------------------------- 1 | # Fed_Multiview_Gen 2 | This repo contains code for generating multiview images from 3D CAD models for federated learning research. 3 | 4 | Main contributions of this repo: 5 | 1. Modified phong.blend for better image quality 6 | 2. Scripts for automating the process of the dataset generation 7 | 3. Scripts for post-processing images 8 | 9 | 10 | 11 | ## Requirements 12 | - CAD models can be found here: https://github.com/lmb-freiburg/orion 13 | - Convert CAD model to images, Windows version: https://github.com/zeaggler/ModelNet_Blender_OFF2Multiview 14 | - Convert CAD model to images, Linux version: https://github.com/WeiTang114/BlenderPhong 15 | 16 | Please refer to above github repos for the installation. 17 | 18 | ## Usage 19 | There are two steps for generating the dataset: 20 | 1. Generate png images from 3D CAD model using Blender; 21 | 2. Post-process images from step 1. to adjust object to image ratio. 22 | 23 | Firstly, specify the BLENDER_PATH in main.py: 24 | 25 | ```python 26 | BLENDER_PATH = "D:/Program Files/blender-2.79b-windows64/blender.exe" 27 | ``` 28 | 29 | Then generate the dataset with command: 30 | ```bash 31 | python main.py --model_dir ./dataset_samples --target_dir ./dataset_images --action all 32 | ``` 33 | 34 | -------------------------------------------------------------------------------- /publications/FedCG/record.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | client = 5 4 | experiments = 5 5 | bs = 16 6 | lr = 0.0003 7 | wd = 0.0001 8 | dataset = "domainnet" 9 | algs = ["local", "fedavg", "fedsplit", "fedprox", "fedgen", "fedcg_w", "feddf"] 10 | 11 | for alg in algs: 12 | 13 | print(alg) 14 | 15 | fdir = 'experiments/bs' + str(bs) + 'lr' + str(lr) + 'wd' + str(wd) + '/' + alg + '_' + dataset + str( 16 | client) + '_lenet5_' 17 | if alg == 'fedcg_w': 18 | fdir += "mse_" 19 | nums = [[] for _ in range(client)] 20 | avg_nums = [] 21 | 22 | for i in range(1, experiments + 1): 23 | fname = fdir + str(i) + '/log.txt' 24 | with open(fname, 'r') as f: 25 | lines = f.readlines()[-client:] 26 | sum_num = 0 27 | for j in range(client): 28 | num = float(lines[j].split(" test acc:")[1][:8]) 29 | nums[j].append(num) 30 | sum_num += num 31 | avg_nums.append(sum_num / client) 32 | 33 | for j in range(client): 34 | print("client:%2d, acc:%.4f(%.4f)" % (j + 1, np.mean(np.array(nums[j])), np.std(np.array(nums[j])))) 35 | print("total average") 36 | print("%.4f(%.4f)" % (np.mean(np.array(avg_nums)), np.std(np.array(avg_nums)))) 37 | -------------------------------------------------------------------------------- /publications/PrADA/experiments/ppd_loan/train_ppd_no_adaptation.py: -------------------------------------------------------------------------------- 1 | from experiments.ppd_loan.train_config import data_tag, data_hyperparameters, no_adaptation_hyperparameters 2 | from experiments.ppd_loan.train_ppd_fg_adapt_pretrain import create_fg_pdd_global_model 3 | from experiments.ppd_loan.train_ppd_no_fg_adapt_pretrain import create_no_fg_pdd_global_model 4 | from experiments.ppd_loan.train_ppd_utils import train_no_adaptation 5 | 6 | 7 | def get_model_meta(): 8 | no_da_root_dir = data_hyperparameters["ppd_no-ad_model_dir"] 9 | apply_feature_group = no_adaptation_hyperparameters['apply_feature_group'] 10 | if apply_feature_group: 11 | print("[INFO] feature grouping applied") 12 | model = create_fg_pdd_global_model(num_wide_feature=6) 13 | else: 14 | print("[INFO] no feature grouping applied") 15 | model = create_no_fg_pdd_global_model(aggregation_dim=5, num_wide_feature=6) 16 | return model, no_da_root_dir 17 | 18 | 19 | if __name__ == "__main__": 20 | init_model, ppd_no_ad_model_root_dir = get_model_meta() 21 | task_id_list = train_no_adaptation(data_tag, 22 | ppd_no_ad_model_root_dir, 23 | no_adaptation_hyperparameters, 24 | data_hyperparameters, 25 | model=init_model) 26 | print(f"[INFO] task id list:{task_id_list}") 27 | -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/utils/parse_config.py: -------------------------------------------------------------------------------- 1 | def parse_model_config(path): 2 | """Parses the yolo-v3 layer configuration file and returns module definitions""" 3 | file = open(path, 'r') 4 | lines = file.read().split('\n') 5 | lines = [x for x in lines if x and not x.startswith('#')] 6 | lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces 7 | module_defs = [] 8 | for line in lines: 9 | if line.startswith('['): # This marks the start of a new block 10 | module_defs.append({}) 11 | module_defs[-1]['type'] = line[1:-1].rstrip() 12 | if module_defs[-1]['type'] == 'convolutional': 13 | module_defs[-1]['batch_normalize'] = 0 14 | else: 15 | key, value = line.split("=") 16 | value = value.strip() 17 | module_defs[-1][key.rstrip()] = value.strip() 18 | 19 | return module_defs 20 | 21 | 22 | def parse_data_config(path): 23 | """Parses the data configuration file""" 24 | options = dict() 25 | options['gpus'] = '0,1,2,3' 26 | options['num_workers'] = '10' 27 | with open(path, 'r') as fp: 28 | lines = fp.readlines() 29 | for line in lines: 30 | line = line.strip() 31 | if line == '' or line.startswith('#'): 32 | continue 33 | key, value = line.split('=') 34 | options[key.strip()] = value.strip() 35 | return options 36 | -------------------------------------------------------------------------------- /publications/PrADA/experiments/income_census/train_census_no_adaptation.py: -------------------------------------------------------------------------------- 1 | from experiments.income_census.train_config import data_tag, no_adaptation_hyperparameters, data_hyperparameters 2 | from experiments.income_census.train_census_fg_adapt_pretrain import create_fg_census_global_model 3 | from experiments.income_census.train_census_no_fg_adapt_pretrain import create_no_fg_census_global_model 4 | from experiments.income_census.train_census_utils import train_no_adaptation 5 | 6 | 7 | def get_model_meta(): 8 | no_da_root_dir = data_hyperparameters["census_no-ad_model_dir"] 9 | apply_feature_group = no_adaptation_hyperparameters['apply_feature_group'] 10 | if apply_feature_group: 11 | print("[INFO] feature grouping applied") 12 | model = create_fg_census_global_model(num_wide_feature=5) 13 | else: 14 | print("[INFO] no feature grouping applied") 15 | model = create_no_fg_census_global_model(aggregation_dim=4, num_wide_feature=5) 16 | return model, no_da_root_dir 17 | 18 | 19 | if __name__ == "__main__": 20 | init_model, census_no_ad_root_dir = get_model_meta() 21 | task_id_list = train_no_adaptation(data_tag, 22 | census_no_ad_root_dir, 23 | no_adaptation_hyperparameters, 24 | data_hyperparameters, 25 | init_model) 26 | print(f"[INFO] task id list:{task_id_list}") 27 | -------------------------------------------------------------------------------- /publications/PrADA/experiments/income_census/train_census_no_fg_target_finetune.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from experiments.income_census.train_config import fine_tune_hyperparameters, data_hyperparameters 4 | from experiments.income_census.train_census_no_fg_adapt_pretrain import create_no_fg_census_global_model 5 | from experiments.income_census.train_census_utils import finetune_census 6 | 7 | 8 | def get_finetune_model_meta(): 9 | finetune_target_root_dir = data_hyperparameters['census_no-fg_ft_target_model_dir'] 10 | model = create_no_fg_census_global_model() 11 | return model, finetune_target_root_dir 12 | 13 | 14 | if __name__ == "__main__": 15 | 16 | parser = argparse.ArgumentParser("census_no-fg_target_fine_tune") 17 | parser.add_argument('--pretrain_task_id', type=str) 18 | args = parser.parse_args() 19 | pretrain_task_id = args.pretrain_task_id 20 | print(f"[INFO] fine-tune pre-trained model with pretrain task id : {pretrain_task_id}") 21 | 22 | census_pretain_model_root_dir = data_hyperparameters['census_no-fg_pretrained_model_dir'] 23 | init_model, census_finetune_target_model_root_dir = get_finetune_model_meta() 24 | task_id = finetune_census(pretrain_task_id, 25 | census_pretain_model_root_dir, 26 | census_finetune_target_model_root_dir, 27 | fine_tune_hyperparameters, 28 | data_hyperparameters, 29 | init_model) 30 | print(f"[INFO] finetune task id:{task_id}") 31 | -------------------------------------------------------------------------------- /publications/PrADA/experiments/ppd_loan/train_ppd_no_fg_target_finetune.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from experiments.ppd_loan.train_config import data_hyperparameters, fine_tune_hyperparameters 4 | from experiments.ppd_loan.train_ppd_no_fg_adapt_pretrain import create_no_fg_pdd_global_model 5 | from experiments.ppd_loan.train_ppd_utils import finetune_ppd 6 | 7 | 8 | def get_finetune_model_meta(): 9 | finetune_target_root_dir = data_hyperparameters['ppd_no-fg_ft_target_model_dir'] 10 | pos_class_weight = fine_tune_hyperparameters['pos_class_weight'] 11 | model = create_no_fg_pdd_global_model(pos_class_weight=pos_class_weight) 12 | return model, finetune_target_root_dir 13 | 14 | 15 | if __name__ == "__main__": 16 | parser = argparse.ArgumentParser("ppd_no-fg_target_fine_tune") 17 | parser.add_argument('--pretrain_task_id', type=str) 18 | args = parser.parse_args() 19 | pretrain_task_id = args.pretrain_task_id 20 | print(f"[INFO] fine-tune pre-trained model with pretrain task id : {pretrain_task_id}") 21 | 22 | ppd_pretain_model_root_dir = data_hyperparameters['ppd_no-fg_pretrained_model_dir'] 23 | init_model, ppd_finetune_target_model_root_dir = get_finetune_model_meta() 24 | 25 | task_id = finetune_ppd(pretrain_task_id, 26 | ppd_pretain_model_root_dir, 27 | ppd_finetune_target_model_root_dir, 28 | fine_tune_hyperparameters, 29 | data_hyperparameters, 30 | model=init_model) 31 | print(f"[INFO] task id:{task_id}") 32 | -------------------------------------------------------------------------------- /publications/PrADA/experiments/income_census/train_census_fg_target_finetune.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from experiments.income_census.train_config import fine_tune_hyperparameters, data_hyperparameters 4 | from experiments.income_census.train_census_fg_adapt_pretrain import create_fg_census_global_model 5 | from experiments.income_census.train_census_utils import finetune_census 6 | 7 | 8 | def get_finetune_model_meta(): 9 | finetune_target_root_dir = data_hyperparameters['census_fg_ft_target_model_dir'] 10 | using_interaction = fine_tune_hyperparameters['using_interaction'] 11 | model = create_fg_census_global_model(using_interaction=using_interaction) 12 | return model, finetune_target_root_dir 13 | 14 | 15 | if __name__ == "__main__": 16 | 17 | parser = argparse.ArgumentParser("census_fg_target_fine_tune") 18 | parser.add_argument('--pretrain_task_id', type=str) 19 | args = parser.parse_args() 20 | pretrain_task_id = args.pretrain_task_id 21 | print(f"[INFO] fine-tune pre-trained model with pretrain task id : {pretrain_task_id}") 22 | 23 | census_pretain_model_root_dir = data_hyperparameters['census_fg_pretrained_model_dir'] 24 | init_model, census_finetune_target_model_root_dir = get_finetune_model_meta() 25 | task_id = finetune_census(pretrain_task_id, 26 | census_pretain_model_root_dir, 27 | census_finetune_target_model_root_dir, 28 | fine_tune_hyperparameters, 29 | data_hyperparameters, 30 | init_model) 31 | print(f"[INFO] finetune task id:{task_id}") 32 | -------------------------------------------------------------------------------- /publications/PrADA/experiments/ppd_loan/test_ppd_target.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from datasets.ppd_dataloader import get_pdd_dataloaders 4 | from experiments.ppd_loan import train_ppd_fg_target_finetune as fg_finetune 5 | from experiments.ppd_loan import train_ppd_no_adaptation as no_ad_finetune 6 | from experiments.ppd_loan import train_ppd_no_fg_target_finetune as no_fg_finetune 7 | from experiments.ppd_loan.train_config import data_hyperparameters 8 | from experiments.test_utils import test_model 9 | 10 | if __name__ == "__main__": 11 | 12 | parser = argparse.ArgumentParser("ppd_test_target") 13 | parser.add_argument('--task_id', type=str) 14 | parser.add_argument('--model_tag', type=str) 15 | args = parser.parse_args() 16 | task_id = args.task_id 17 | model_tag = args.model_tag 18 | print(f"[INFO] perform test task on : [{model_tag}] with id: {task_id}") 19 | test_models_dir = {"fg": fg_finetune.get_finetune_model_meta, 20 | "no_fg": no_fg_finetune.get_finetune_model_meta, 21 | "no_ad": no_ad_finetune.get_model_meta} 22 | init_model, model_root_dir = test_models_dir[model_tag]() 23 | target_test_file_name = data_hyperparameters['target_ft_test_file_name'] 24 | print(f"[INFO] target_test_file_name: {target_test_file_name}.") 25 | 26 | print("[INFO] load test data") 27 | target_test_loader, _ = get_pdd_dataloaders(ds_file_name=target_test_file_name, 28 | batch_size=1024, 29 | split_ratio=1.0) 30 | 31 | test_model(task_id, init_model, model_root_dir, target_test_loader) 32 | -------------------------------------------------------------------------------- /publications/ss_vfnas/visualize.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import genotypes 3 | from graphviz import Digraph 4 | 5 | 6 | def plot(genotype, filename): 7 | g = Digraph( 8 | format='png', 9 | edge_attr=dict(fontsize='20', fontname="times"), 10 | node_attr=dict(style='filled', shape='rect', align='center', fontsize='20', height='0.5', width='0.5', penwidth='2', fontname="times"), 11 | engine='dot') 12 | g.body.extend(['rankdir=LR']) 13 | 14 | g.node("c_{k-2}", fillcolor='darkseagreen2') 15 | g.node("c_{k-1}", fillcolor='darkseagreen2') 16 | assert len(genotype) % 2 == 0 17 | steps = len(genotype) // 2 18 | 19 | for i in range(steps): 20 | g.node(str(i), fillcolor='lightblue') 21 | 22 | for i in range(steps): 23 | for k in [2*i, 2*i + 1]: 24 | op, j = genotype[k] 25 | if j == 0: 26 | u = "c_{k-2}" 27 | elif j == 1: 28 | u = "c_{k-1}" 29 | else: 30 | u = str(j-2) 31 | v = str(i) 32 | g.edge(u, v, label=op, fillcolor="gray") 33 | 34 | g.node("c_{k}", fillcolor='palegoldenrod') 35 | for i in range(steps): 36 | g.edge(str(i), "c_{k}", fillcolor="gray") 37 | 38 | g.render(filename, view=False) 39 | 40 | 41 | if __name__ == '__main__': 42 | if len(sys.argv) != 2: 43 | print("usage:\n python {} ARCH_NAME".format(sys.argv[0])) 44 | sys.exit(1) 45 | 46 | genotype_name = sys.argv[1] 47 | try: 48 | genotype = eval('genotypes.{}'.format(genotype_name)) 49 | except AttributeError: 50 | print("{} is not specified in genotypes.py".format(genotype_name)) 51 | sys.exit(1) 52 | 53 | plot(genotype.normal, "B_normal") 54 | plot(genotype.reduce, "B_reduction") 55 | 56 | -------------------------------------------------------------------------------- /publications/FedCG/utils.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import numpy as np 4 | import torch 5 | import torch.nn as nn 6 | 7 | 8 | class AvgMeter(): 9 | 10 | def __init__(self): 11 | self.reset() 12 | 13 | def reset(self): 14 | self.val = 0. 15 | self.n = 0 16 | self.avg = 0. 17 | 18 | def update(self, val, n=1): 19 | assert n > 0 20 | self.val += val 21 | self.n += n 22 | self.avg = self.val / self.n 23 | 24 | def get(self): 25 | return self.avg 26 | 27 | 28 | class BestMeter(): 29 | def __init__(self): 30 | self.reset() 31 | 32 | def reset(self): 33 | self.val = 0. 34 | self.n = -1 35 | 36 | def update(self, val, n): 37 | assert n > self.n 38 | if val > self.val: 39 | self.val = val 40 | self.n = n 41 | 42 | def get(self): 43 | return self.val, self.n 44 | 45 | 46 | def weights_init(m): 47 | classname = m.__class__.__name__ 48 | if classname.find('Conv') != -1: 49 | nn.init.normal_(m.weight.data, 0.0, 0.02) 50 | elif classname.find('BatchNorm') != -1: 51 | nn.init.normal_(m.weight.data, 1.0, 0.02) 52 | nn.init.constant_(m.bias.data, 0) 53 | 54 | 55 | def add_gaussian_noise(tensor, mean, std): 56 | return torch.randn(tensor.size()) * std + mean 57 | 58 | 59 | def set_seed(manual_seed): 60 | random.seed(manual_seed) 61 | np.random.seed(manual_seed) 62 | torch.manual_seed(manual_seed) 63 | if torch.cuda.is_available(): 64 | torch.cuda.manual_seed_all(manual_seed) 65 | torch.backends.cudnn.benchmark = False 66 | torch.backends.cudnn.deterministic = True 67 | -------------------------------------------------------------------------------- /publications/PrADA/experiments/income_census/test_census_target.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from datasets.census_dataloader import get_income_census_dataloaders 4 | from experiments.income_census import train_census_fg_target_finetune as fg_finetune 5 | from experiments.income_census import train_census_no_adaptation as no_ad_finetune 6 | from experiments.income_census import train_census_no_fg_target_finetune as no_fg_finetune 7 | from experiments.income_census.train_config import data_hyperparameters 8 | from experiments.test_utils import test_model 9 | 10 | if __name__ == "__main__": 11 | 12 | parser = argparse.ArgumentParser("census_test_target") 13 | parser.add_argument('--task_id', type=str) 14 | parser.add_argument('--model_tag', type=str) 15 | args = parser.parse_args() 16 | task_id = args.task_id 17 | model_tag = args.model_tag 18 | print(f"[INFO] perform test task on : [{model_tag}] with id: {task_id}") 19 | test_models_dir = {"fg": fg_finetune.get_finetune_model_meta, 20 | "no_fg": no_fg_finetune.get_finetune_model_meta, 21 | "no_ad": no_ad_finetune.get_model_meta} 22 | init_model, model_root_dir = test_models_dir[model_tag]() 23 | target_test_file_name = data_hyperparameters['target_ft_test_file_name'] 24 | print(f"[INFO] target_test_file_name: {target_test_file_name}.") 25 | 26 | print("[INFO] load test data") 27 | target_test_loader, _ = get_income_census_dataloaders(ds_file_name=target_test_file_name, 28 | batch_size=1024, 29 | split_ratio=1.0) 30 | 31 | test_model(task_id, init_model, model_root_dir, target_test_loader) 32 | -------------------------------------------------------------------------------- /publications/PrADA/experiments/ppd_loan/train_ppd_fg_target_finetune.py: -------------------------------------------------------------------------------- 1 | from experiments.ppd_loan.train_config import data_hyperparameters, fine_tune_hyperparameters 2 | from experiments.ppd_loan.train_ppd_fg_adapt_pretrain import create_fg_pdd_global_model 3 | from experiments.ppd_loan.train_ppd_utils import finetune_ppd 4 | import argparse 5 | 6 | 7 | def get_finetune_model_meta(): 8 | finetune_target_root_dir = data_hyperparameters['ppd_fg_ft_target_model_dir'] 9 | using_interaction = fine_tune_hyperparameters['using_interaction'] 10 | pos_class_weight = fine_tune_hyperparameters['pos_class_weight'] 11 | model = create_fg_pdd_global_model(pos_class_weight=pos_class_weight, 12 | using_interaction=using_interaction) 13 | return model, finetune_target_root_dir 14 | 15 | 16 | if __name__ == "__main__": 17 | 18 | # parser = argparse.ArgumentParser("ppd_fg_target_fine_tune") 19 | # parser.add_argument('--pretrain_task_id', type=str) 20 | # args = parser.parse_args() 21 | # pretrain_task_id = args.pretrain_task_id 22 | pretrain_task_id = "20210731_ppd_fg_adapt_all4000pos004_intrFalse_pw1.0_lr0.0005_bs64_me600_ts1627666700" 23 | print(f"[INFO] fine-tune pre-trained model with pretrain task id : {pretrain_task_id}") 24 | 25 | ppd_pretain_model_root_dir = data_hyperparameters['ppd_fg_pretrained_model_dir'] 26 | init_model, ppd_finetune_target_model_root_dir = get_finetune_model_meta() 27 | 28 | task_id = finetune_ppd(pretrain_task_id, 29 | ppd_pretain_model_root_dir, 30 | ppd_finetune_target_model_root_dir, 31 | fine_tune_hyperparameters, 32 | data_hyperparameters, 33 | model=init_model) 34 | print(f"[INFO] task id:{task_id}") 35 | -------------------------------------------------------------------------------- /DISCLAIMER: -------------------------------------------------------------------------------- 1 | DISCLAIMER 2 | 3 | The copyright owner and all the contributors (collectively, the "Licensors") of FATE/FederatedAI (the "Work") non-exclusively grant any individual or legal entity (the "Licensee") the license to use the Work on condition that the Licensee consents to and agrees to be bound by this Disclaimer. 4 | 5 | The Licensee acknowledges that the Licensors provide the Work on an "AS IS" basis and makes no warranties or conditions, express or implied, on the Work to the Licensee. The Licensors assume no obligations and responsibilities other than providing the Work. 6 | 7 | The Licensee agrees that the Licensee shall use the Work at its own risk. Any problems or consequences arising from the use of the Work by the Licensee shall not be attributed to the Licensors and shall be borne by the Licensee, including but not limited to system failure, security vulnerabilities, title defects, third-party claims, commercial loss, etc. 8 | 9 | The Licensee shall also acknowledge to abide by the rules on the use of the Work, not to infringe the rights and interests of the Licensors and the Work, and not to cause the Licensors to suffer any loss or assume any responsibility. Whether the Licensee is held accountable or not, the Licensors shall be exempted from any liability. 10 | 11 | An act of use of the Work by the Licensee shall be deemed as the consent to be bound by this Disclaimer. 12 | 13 | 14 | 15 | 免责条款 16 | 17 | FATE/FederatedAI(简称为“作品”)的著作权人及每一位贡献者(统称为“授权人”)开放地授予任何个人或法人实体(简称为“被授权人”)使用许可,但被授权人接受许可还应当同意本免责条款,并受到本免责条款的约束。 18 | 19 | 被授权人确认,授权人是以“原样”(AS IS)基础向被授权人提供作品,且不对作品向被授权人作出任何明示或暗示的保证或条件。除提供作品之外,授权人不承担任何义务与责任。 20 | 21 | 被授权人同意,被授权人应当自担使用作品的风险。被授权人使用作品过程中发生任何的问题或产生的任何后果与授权人无关,均由被授权人自行承担,包括但不限于系统故障、安全漏洞、权利瑕疵、第三人主张、商业损失等。 22 | 23 | 被授权人还应当承诺遵守作品的使用规则,不得作出任何侵害授权人以及作品权益的行为,不得使授权人遭受任何损失或承担任何责任。不论被授权人是否被追责均应使得授权人免于任何责任。 24 | 25 | 被授权人使用作品的行为即表示其受到本免责条款的约束,对被授权人具有法律约束力。 26 | -------------------------------------------------------------------------------- /publications/ss_vfnas/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | # Log 9 | search/ 10 | search*/ 11 | eval*/ 12 | self_train_*/ 13 | # Data 14 | data/ 15 | # Distribution / packaging 16 | .Python 17 | env/ 18 | build/ 19 | develop-eggs/ 20 | dist/ 21 | downloads/ 22 | eggs/ 23 | .eggs/ 24 | lib/ 25 | lib64/ 26 | parts/ 27 | sdist/ 28 | var/ 29 | wheels/ 30 | *.egg-info/ 31 | .installed.cfg 32 | *.egg 33 | .idea 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | .hypothesis/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | local_settings.py 62 | 63 | # Flask stuff: 64 | instance/ 65 | .webassets-cache 66 | 67 | # Scrapy stuff: 68 | .scrapy 69 | 70 | # Sphinx documentation 71 | docs/_build/ 72 | 73 | # PyBuilder 74 | target/ 75 | 76 | # Jupyter Notebook 77 | .ipynb_checkpoints 78 | 79 | # pyenv 80 | .python-version 81 | 82 | # celery beat schedule file 83 | celerybeat-schedule 84 | 85 | # SageMath parsed files 86 | *.sage.py 87 | 88 | # dotenv 89 | .env 90 | 91 | # virtualenv 92 | .venv 93 | venv/ 94 | ENV/ 95 | 96 | # Spyder project settings 97 | .spyderproject 98 | .spyproject 99 | 100 | # Rope project settings 101 | .ropeproject 102 | 103 | # mkdocs documentation 104 | /site 105 | 106 | # mypy 107 | .mypy_cache/ 108 | search-*/ 109 | eval-*/ 110 | runs/ 111 | 112 | # Swap 113 | [._]*.s[a-v][a-z] 114 | [._]*.sw[a-p] 115 | [._]s[a-v][a-z] 116 | [._]sw[a-p] 117 | 118 | # Session 119 | Session.vim 120 | 121 | # Temporary 122 | .netrwhist 123 | *~ 124 | # Auto-generated tag files 125 | tags 126 | -------------------------------------------------------------------------------- /publications/ss_vfnas/models/manual_k_party_chexpert.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from torchvision import models 3 | import torch 4 | 5 | 6 | class Manual_A(nn.Module): 7 | 8 | def __init__(self, num_classes, layers, u_dim=64, k=2): 9 | self.num_classes = num_classes 10 | super(Manual_A, self).__init__() 11 | if layers == 18: 12 | self.net = models.resnet18(pretrained=False, num_classes=u_dim) 13 | elif layers == 50: 14 | self.net = models.resnet50(pretrained=False, num_classes=u_dim) 15 | elif layers == 101: 16 | self.net = models.resnet101(pretrained=False, num_classes=u_dim) 17 | elif layers == 19: 18 | self.net = models.mobilenet_v2(pretrained=False, num_classes=u_dim) 19 | else: 20 | raise ValueError("Wrong number of layers for resnet") 21 | for i in range(1, num_classes + 1): 22 | setattr(self, "fc_" + str(i), nn.Linear(u_dim * k, 1)) 23 | 24 | def forward(self, input, U_B): 25 | out = self.net(input) 26 | if U_B is not None: 27 | out = torch.cat([out] + [U for U in U_B], dim=1) 28 | logits = list() 29 | for i in range(1, self.num_classes + 1): 30 | classifier = getattr(self, "fc_" + str(i)) 31 | logit = classifier(out) 32 | logits.append(logit) 33 | return logits 34 | 35 | class Manual_B(nn.Module): 36 | 37 | def __init__(self, layers, u_dim=64): 38 | super(Manual_B, self).__init__() 39 | if layers == 18: 40 | self.net = models.resnet18(pretrained=False, num_classes=u_dim) 41 | elif layers == 50: 42 | self.net = models.resnet50(pretrained=False, num_classes=u_dim) 43 | elif layers == 101: 44 | self.net = models.resnet101(pretrained=False, num_classes=u_dim) 45 | elif layers == 19: 46 | self.net = models.mobilenet_v2(pretrained=False, num_classes=u_dim) 47 | else: 48 | raise ValueError("Wrong number of layers for resnet") 49 | 50 | def forward(self, input): 51 | out = self.net(input) 52 | return out 53 | -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/experiments/log_formatter.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | 4 | if __name__ == '__main__': 5 | parser = argparse.ArgumentParser() 6 | parser.add_argument("--log", type=str, required=True, help="path to log file") 7 | parser.add_argument("--output_dir", type=str, default="formatted_logs", help="path to output file") 8 | opt = parser.parse_args() 9 | log_file_name = os.path.basename(opt.log) 10 | if not os.path.exists(opt.log): 11 | raise FileNotFoundError("wrong log file path") 12 | if not os.path.exists(opt.output_dir): 13 | os.mkdir(opt.output_dir) 14 | output = open(os.path.join(opt.output_dir, log_file_name.replace(".log", ".csv")), 'w') 15 | header = ["train_loss", "aggr_test_loss", "aggr_test_map", "aggr_test_recall", "server_test_loss", 16 | "server_test_map", "server_test_recall"] 17 | round_, train_loss, aggr_test_loss, aggr_test_map, aggr_test_recall, server_test_loss, server_test_map, server_test_recall = [ 18 | list() for _ in range(8)] 19 | log_file = open(opt.log).readlines() 20 | for line in log_file: 21 | line = line.strip() 22 | if "Round" in line: 23 | round_.append(int(line.split(" ")[-2])) 24 | elif "aggr_train_loss" in line: 25 | train_loss.append(round(float(line.split(" ")[-1]), 4)) 26 | elif "aggr_test_loss" in line: 27 | aggr_test_loss.append(round(float(line.split(" ")[-1]), 4)) 28 | elif "aggr_test_map" in line: 29 | aggr_test_map.append(round(float(line.split(" ")[-1]), 4)) 30 | elif "aggr_test_recall" in line: 31 | aggr_test_recall.append(round(float(line.split(" ")[-1]), 4)) 32 | elif "server_test_loss" in line: 33 | server_test_loss.append(round(float(line.split(" ")[-1]), 4)) 34 | elif "server_test_map" in line: 35 | server_test_map.append(round(float(line.split(" ")[-1]), 4)) 36 | elif "server_test_recall" in line: 37 | server_test_recall.append(round(float(line.split(" ")[-1]), 4)) 38 | output.write("round,train_loss,test_map,test_recall\n") 39 | for r, loss, mAP, recall in zip(round_, train_loss, server_test_map, server_test_recall): 40 | output.write("{},{},{},{}\n".format(r, loss, mAP, recall)) 41 | -------------------------------------------------------------------------------- /publications/PrADA/experiments/ppd_loan/train_config.py: -------------------------------------------------------------------------------- 1 | from data_process.ppd_process.ppd_data_creation_config import ppd_data_creation 2 | 3 | feature_extractor_architecture_list = [ 4 | [15, 20, 15, 6], 5 | [85, 100, 60, 8], 6 | [30, 50, 30, 6], 7 | [18, 30, 18, 6], 8 | [55, 70, 30, 8]] 9 | 10 | no_fg_feature_extractor_architecture = [203, 210, 70, 20] 11 | 12 | pre_train_hyperparameters = { 13 | "using_interaction": False, 14 | "momentum": 0.99, 15 | "weight_decay": 0.00001, 16 | "lr": 5e-4, 17 | "batch_size": 64, 18 | "max_epochs": 600, 19 | "epoch_patience": 3, 20 | "pos_class_weight": 3.0, 21 | "valid_metric": ('ks', 'auc') 22 | } 23 | 24 | fine_tune_hyperparameters = { 25 | "using_interaction": False, 26 | "load_global_classifier": False, 27 | "momentum": 0.99, 28 | "weight_decay": 0.0, 29 | "lr": 6e-4, 30 | "batch_size": 64, 31 | "pos_class_weight": 1.0, 32 | "valid_metric": ('ks', 'auc') 33 | } 34 | 35 | no_adaptation_hyperparameters = { 36 | "apply_feature_group": False, 37 | "train_data_tag": 'all', # can be either 'all' or 'tgt' 38 | "momentum": 0.99, 39 | "weight_decay": 0.00001, 40 | "lr": 5e-4, 41 | "batch_size": 64, 42 | "max_epochs": 600, 43 | "epoch_patience": 3, 44 | "valid_metric": ('ks', 'auc'), 45 | "pos_class_weight": 3.0 46 | } 47 | 48 | data_dir = ppd_data_creation['processed_data_dir'] 49 | data_tag = 'all4000pos004' 50 | 51 | data_hyperparameters = { 52 | "source_ad_train_file_name": data_dir + f"PPD_2014_src_1to9_ad_{data_tag}_train.csv", 53 | "source_ad_valid_file_name": data_dir + f'PPD_2014_src_1to9_ad_{data_tag}_valid.csv', 54 | "src_tgt_train_file_name": data_dir + f"PPD_2014_src_tgt_{data_tag}_train.csv", 55 | 56 | "target_ad_train_file_name": data_dir + f'PPD_2014_tgt_10to12_ad_{data_tag}_train.csv', 57 | "target_ft_train_file_name": data_dir + f'PPD_2014_tgt_10to12_ft_{data_tag}_train.csv', 58 | "target_ft_valid_file_name": data_dir + f'PPD_2014_tgt_10to12_ft_{data_tag}_valid.csv', 59 | "target_ft_test_file_name": data_dir + f'PPD_2014_tgt_10to12_ft_{data_tag}_test.csv', 60 | 61 | "ppd_fg_pretrained_model_dir": "ppd_fg_pretrained_model", 62 | "ppd_fg_ft_target_model_dir": "ppd_fg_ft_target_model", 63 | "ppd_no-fg_pretrained_model_dir": "ppd_no-fg_pretrained_model", 64 | "ppd_no-fg_ft_target_model_dir": "ppd_no-fg_ft_target_model", 65 | "ppd_no-ad_model_dir": "ppd_no-ad_model" 66 | } 67 | -------------------------------------------------------------------------------- /publications/PrADA/datasets/census_dataloader.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | from torch.utils.data import DataLoader 4 | 5 | from data_process.census_process.mapping_resource import continuous_cols, categorical_cols, target_col_name 6 | from datasets.census_dataset import SimpleDataset 7 | 8 | 9 | def shuffle_data(data): 10 | len = data.shape[0] 11 | perm_idxs = np.random.permutation(len) 12 | return data[perm_idxs] 13 | 14 | 15 | def get_datasets(ds_file_name, shuffle=False, split_ratio=0.9): 16 | dataframe = pd.read_csv(ds_file_name, skipinitialspace=True) 17 | 18 | COLUMNS_TO_LOAD = continuous_cols + categorical_cols + [target_col_name] 19 | print("[INFO] COLUMNS_TO_LOAD:", COLUMNS_TO_LOAD) 20 | samples = dataframe[COLUMNS_TO_LOAD].values 21 | # print(samples) 22 | if shuffle: 23 | samples = shuffle_data(samples) 24 | 25 | if split_ratio == 1.0: 26 | print(f"samples shape: {samples.shape}, {samples.dtype}") 27 | train_dataset = SimpleDataset(samples[:, :-1], samples[:, -1]) 28 | return train_dataset, None 29 | else: 30 | num_train = int(split_ratio * samples.shape[0]) 31 | train_samples = samples[:num_train].astype(np.float) 32 | val_samples = samples[num_train:].astype(np.float) 33 | print(f"train samples shape: {train_samples.shape}, {train_samples.dtype}") 34 | print(f"valid samples shape: {val_samples.shape}, {train_samples.dtype}") 35 | train_dataset = SimpleDataset(train_samples[:, :-1], train_samples[:, -1]) 36 | val_dataset = SimpleDataset(val_samples[:, :-1], val_samples[:, -1]) 37 | return train_dataset, val_dataset 38 | 39 | 40 | def get_dataloaders(train_dataset: SimpleDataset, valid_dataset: SimpleDataset, batch_size=32, num_workers=1): 41 | mnist_train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) 42 | mnist_valid_loader = None 43 | if valid_dataset is not None: 44 | mnist_valid_loader = DataLoader(valid_dataset, batch_size=batch_size * 2, shuffle=True, num_workers=num_workers) 45 | return mnist_train_loader, mnist_valid_loader 46 | 47 | 48 | def get_income_census_dataloaders(ds_file_name, split_ratio=0.9, batch_size=64, num_workers=2): 49 | train_dataset, valid_dataset = get_datasets(ds_file_name=ds_file_name, shuffle=True, split_ratio=split_ratio) 50 | return get_dataloaders(train_dataset, valid_dataset, batch_size=batch_size, num_workers=num_workers) 51 | -------------------------------------------------------------------------------- /publications/PrADA/models/classifier.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | 4 | 5 | class TransformMatrix(nn.Module): 6 | def __init__(self, input_dim, output_dim): 7 | super(TransformMatrix, self).__init__() 8 | self.fc = nn.Linear(in_features=input_dim, out_features=output_dim, bias=False) 9 | # self.fc.apply(init_weights) 10 | 11 | def transform(self, x): 12 | return self.fc(x) 13 | 14 | def transpose_transform(self, x): 15 | return F.linear(x, self.fc.weight.t()) 16 | 17 | 18 | class Classifier(nn.Module): 19 | def __init__(self, input_dim): 20 | super(Classifier, self).__init__() 21 | self.classifier = nn.Sequential( 22 | nn.Linear(in_features=input_dim, out_features=100), 23 | nn.BatchNorm1d(100), 24 | nn.ReLU(), 25 | nn.Linear(in_features=100, out_features=100), 26 | nn.BatchNorm1d(100), 27 | nn.ReLU(), 28 | nn.Linear(in_features=100, out_features=10) 29 | ) 30 | 31 | def forward(self, x): 32 | x = self.classifier(x) 33 | return F.softmax(x, dim=1) 34 | 35 | 36 | class RegionClassifier(nn.Module): 37 | def __init__(self, input_dim): 38 | super(RegionClassifier, self).__init__() 39 | self.classifier = nn.Sequential( 40 | nn.Linear(in_features=input_dim, out_features=3), 41 | ) 42 | 43 | def forward(self, x): 44 | return self.classifier(x) 45 | 46 | 47 | activation_fn = nn.LeakyReLU() 48 | 49 | 50 | class CensusFeatureAggregator(nn.Module): 51 | def __init__(self, input_dim, output_dim=1): 52 | super(CensusFeatureAggregator, self).__init__() 53 | self.aggregator = nn.Sequential( 54 | nn.Linear(in_features=input_dim, out_features=output_dim), 55 | ) 56 | # self.aggregator.apply(init_weights) 57 | 58 | def forward(self, x): 59 | return self.aggregator(x) 60 | 61 | 62 | class IdentityRegionAggregator(nn.Module): 63 | def __init__(self): 64 | super(IdentityRegionAggregator, self).__init__() 65 | 66 | def forward(self, x): 67 | return x 68 | 69 | 70 | class GlobalClassifier(nn.Module): 71 | def __init__(self, input_dim): 72 | super(GlobalClassifier, self).__init__() 73 | self.classifier = nn.Sequential( 74 | nn.Linear(in_features=input_dim, out_features=1), 75 | ) 76 | 77 | def forward(self, x): 78 | return self.classifier(x) 79 | -------------------------------------------------------------------------------- /publications/ss_vfnas/models/manual_k_party.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from torchvision import models 3 | import torch 4 | 5 | 6 | class Manual_A(nn.Module): 7 | 8 | def __init__(self, num_classes, layers, u_dim=64, k=2): 9 | super(Manual_A, self).__init__() 10 | if layers == 18: 11 | self.net = models.resnet18(pretrained=False, num_classes=u_dim) 12 | elif layers == 34: 13 | self.net = models.resnet34(pretrained=False, num_classes=u_dim) 14 | elif layers == 50: 15 | self.net = models.resnet50(pretrained=False, num_classes=u_dim) 16 | elif layers == 101: 17 | self.net = models.resnet101(pretrained=False, num_classes=u_dim) 18 | elif layers == 19: 19 | self.net = models.mobilenet_v2(pretrained=False, num_classes=u_dim) 20 | elif layers == 51: 21 | self.net = models.shufflenet_v2_x1_0(pretrained=False, num_classes=u_dim) 22 | elif layers == 52: 23 | self.net = models.squeezenet1_0(pretrained=False, num_classes=u_dim) 24 | else: 25 | raise ValueError("Wrong number of layers for model") 26 | self.classifier = nn.Linear(u_dim * k, num_classes) 27 | 28 | def forward(self, input, U_B): 29 | out = self.net(input) 30 | if U_B is not None: 31 | out = torch.cat([out] + [U for U in U_B], dim=1) 32 | logits = self.classifier(out) 33 | return logits 34 | 35 | 36 | class Manual_B(nn.Module): 37 | 38 | def __init__(self, layers, u_dim=64): 39 | super(Manual_B, self).__init__() 40 | if layers == 18: 41 | self.net = models.resnet18(pretrained=False, num_classes=u_dim) 42 | elif layers == 34: 43 | self.net = models.resnet34(pretrained=False, num_classes=u_dim) 44 | elif layers == 50: 45 | self.net = models.resnet50(pretrained=False, num_classes=u_dim) 46 | elif layers == 101: 47 | self.net = models.resnet101(pretrained=False, num_classes=u_dim) 48 | elif layers == 19: 49 | self.net = models.mobilenet_v2(pretrained=False, num_classes=u_dim) 50 | elif layers == 51: 51 | self.net = models.shufflenet_v2_x1_0(pretrained=False, num_classes=u_dim) 52 | elif layers == 52: 53 | self.net = models.squeezenet1_0(pretrained=False, num_classes=u_dim) 54 | else: 55 | raise ValueError("Wrong number of layers for model") 56 | 57 | def forward(self, input): 58 | out = self.net(input) 59 | return out 60 | -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/data/data_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | import xml.etree.ElementTree as ET 4 | 5 | 6 | def show_dir(): 7 | dir_list = [a for a in os.listdir("./") if os.path.isdir(a) and a != 'test'] 8 | total = 0 9 | categories = {} 10 | for dire in dir_list: 11 | jpeg_path = osp.join(dire, 'Annotations') 12 | xml_list = os.listdir(jpeg_path) 13 | total += len(xml_list) 14 | categories[dire] = len(xml_list) 15 | list1 = sorted(categories.items(), key=lambda x: x[1], reverse=True) 16 | for i, (directory, num) in enumerate(list1): 17 | print(directory, num - 2680) 18 | print(total) 19 | 20 | 21 | def merge_test(): 22 | dir_list = [a for a in os.listdir("./") if os.path.isdir(a) and a != 'test'] 23 | for dir_name in dir_list: 24 | Anno_path = osp.join(dir_name, "Annotations") 25 | Jpeg_path = osp.join(dir_name, "JPEGImages") 26 | Imag_path = osp.join(dir_name, "ImageSets", "Main") 27 | if not osp.exists(Imag_path): 28 | os.makedirs(Imag_path) 29 | test_anno_path = osp.join("test", "Annotations") 30 | test_jpeg_path = osp.join("test", "JPEGImages") 31 | test_txt_path = osp.join("test", "ImageSets", "Main", "test.txt") 32 | train_txt = open(osp.join(Imag_path, "train.txt"), 'w') 33 | valid_txt = open(osp.join(Imag_path, "valid.txt"), 'w') 34 | test_txt = open(osp.join(Imag_path, "test.txt"), 'w') 35 | anno_list = os.listdir(Anno_path) 36 | for anno_name in anno_list: 37 | anno_name = anno_name.replace(".xml", "\n") 38 | train_txt.write(anno_name) 39 | os.system("cp {}/* {}".format(test_anno_path, Anno_path)) 40 | os.system("cp {}/* {}".format(test_jpeg_path, Jpeg_path)) 41 | os.system("cp {} {}".format(test_txt_path, Imag_path)) 42 | os.system("cp {} {}".format(test_txt_path, osp.join(Imag_path, 'valid.txt'))) 43 | 44 | 45 | def make_txt(): 46 | dir_list = [a for a in os.listdir("./") if os.path.isdir(a) and a != 'test'] 47 | for dir_name in dir_list: 48 | Anno_path = osp.join(dir_name, "Annotations") 49 | Imag_path = osp.join(dir_name, "ImageSets", "Main") 50 | ftest = open(osp.join(Imag_path, "test.txt"), 'r').readlines() 51 | ftrain = open(osp.join(Imag_path, "train.txt"), 'w') 52 | annos = os.listdir(Anno_path) 53 | for anno in annos: 54 | anno = anno.replace(".xml", "\n") 55 | if anno not in ftest: 56 | ftrain.write(anno) 57 | -------------------------------------------------------------------------------- /publications/PrADA/models/test_interaction_feature.py: -------------------------------------------------------------------------------- 1 | from models.classifier import TransformMatrix 2 | from models.interaction_models import AttentiveFeatureInteractionComputer, compute_interactive_key 3 | import numpy as np 4 | import torch 5 | 6 | 7 | def initialize_transform_matrix(hidden_dim_a, hidden_dim_b): 8 | return TransformMatrix(hidden_dim_a, hidden_dim_b) 9 | 10 | 11 | def initialize_transform_matrix_dict(hidden_dim_list): 12 | trans_matrix_dict = dict() 13 | for idx_a, hidden_dim_a in enumerate(hidden_dim_list): 14 | for idx_b, hidden_dim_b in enumerate(hidden_dim_list): 15 | transform_matrix = initialize_transform_matrix(hidden_dim_a, hidden_dim_b) 16 | key_1 = compute_interactive_key(idx_a, idx_b) 17 | key_2 = compute_interactive_key(idx_b, idx_a) 18 | if trans_matrix_dict.get(key_1) is None and trans_matrix_dict.get(key_2) is None: 19 | trans_matrix_dict[key_1] = transform_matrix 20 | return trans_matrix_dict 21 | 22 | 23 | class TestInteractiveFeatureComputer(AttentiveFeatureInteractionComputer): 24 | 25 | def __init__(self, transform_matrix_dict): 26 | super(TestInteractiveFeatureComputer, self).__init__(transform_matrix_dict) 27 | 28 | def compute_score(self, feat_a, feat_b, idx_a, idx_b): 29 | return 1.0 30 | 31 | 32 | if __name__ == "__main__": 33 | # list_1 = [] 34 | # list_2 = [2, 3, 4] 35 | # list_3 = [5, 6, 7] 36 | # list = list_1 + list_2 + list_3 37 | # print(list) 38 | 39 | feat_1 = np.array([[0.1, 0.2, 0.3], 40 | [0.4, 0.5, 0.6]]) 41 | feat_2 = np.array([[0.4, 0.5, 0.6, 0.7, 0.8], 42 | [0.14, 0.15, 0.16, 0.17, 0.18]]) 43 | feat_3 = np.array([[0.71, 0.8, 0.9, 0.10], 44 | [0.24, 0.25, 0.26, 0.27]]) 45 | feat_list = [feat_1, feat_2, feat_3] 46 | 47 | print("feat_list:", feat_list) 48 | 49 | hidden_dim_list = [f.shape[-1] for f in feat_list] 50 | print("hidden_dim_list:", hidden_dim_list) 51 | 52 | transform_matrix_dict = initialize_transform_matrix_dict(hidden_dim_list) 53 | print("transform_matrix_dict: \n", transform_matrix_dict) 54 | 55 | computer = AttentiveFeatureInteractionComputer(transform_matrix_dict) 56 | 57 | feat_tensor_list = [torch.tensor(f, dtype=torch.float) for f in feat_list] 58 | 59 | score_map = computer.build(feat_tensor_list) 60 | print("score_map: \n", score_map) 61 | 62 | int_feat_list = computer.fit(feat_tensor_list) 63 | print("int_feat_list: \n") 64 | for int_feat in int_feat_list: 65 | print(int_feat) 66 | 67 | print() 68 | print("parameter: \n", computer.parameters()) 69 | -------------------------------------------------------------------------------- /publications/FedCG/run.sh: -------------------------------------------------------------------------------- 1 | OMP_NUM_THREADS=1 python main.py --algorithm="fedgen" --dataset="office" --model="lenet5" --seed=1 --gpu=1 & 2 | OMP_NUM_THREADS=1 python main.py --algorithm="fedgen" --dataset="office" --model="lenet5" --seed=2 --gpu=2 & 3 | OMP_NUM_THREADS=1 python main.py --algorithm="fedgen" --dataset="office" --model="lenet5" --seed=3 --gpu=3 & 4 | OMP_NUM_THREADS=1 python main.py --algorithm="fedgen" --dataset="office" --model="lenet5" --seed=4 --gpu=4 & 5 | OMP_NUM_THREADS=1 python main.py --algorithm="fedgen" --dataset="office" --model="lenet5" --seed=5 --gpu=5 6 | 7 | OMP_NUM_THREADS=1 python main.py --algorithm="fedavg" --dataset="office" --model="lenet5" --seed=1 --gpu=1 & 8 | OMP_NUM_THREADS=1 python main.py --algorithm="fedavg" --dataset="office" --model="lenet5" --seed=2 --gpu=2 & 9 | OMP_NUM_THREADS=1 python main.py --algorithm="fedavg" --dataset="office" --model="lenet5" --seed=3 --gpu=3 & 10 | OMP_NUM_THREADS=1 python main.py --algorithm="fedavg" --dataset="office" --model="lenet5" --seed=4 --gpu=4 & 11 | OMP_NUM_THREADS=1 python main.py --algorithm="fedavg" --dataset="office" --model="lenet5" --seed=5 --gpu=5 12 | 13 | OMP_NUM_THREADS=1 python main.py --algorithm="fedsplit" --dataset="office" --model="lenet5" --seed=1 --gpu=1 & 14 | OMP_NUM_THREADS=1 python main.py --algorithm="fedsplit" --dataset="office" --model="lenet5" --seed=2 --gpu=2 & 15 | OMP_NUM_THREADS=1 python main.py --algorithm="fedsplit" --dataset="office" --model="lenet5" --seed=3 --gpu=3 & 16 | OMP_NUM_THREADS=1 python main.py --algorithm="fedsplit" --dataset="office" --model="lenet5" --seed=4 --gpu=4 & 17 | OMP_NUM_THREADS=1 python main.py --algorithm="fedsplit" --dataset="office" --model="lenet5" --seed=5 --gpu=5 18 | 19 | OMP_NUM_THREADS=1 python main.py --algorithm="fedprox" --dataset="office" --model="lenet5" --seed=1 --gpu=1 & 20 | OMP_NUM_THREADS=1 python main.py --algorithm="fedprox" --dataset="office" --model="lenet5" --seed=2 --gpu=2 & 21 | OMP_NUM_THREADS=1 python main.py --algorithm="fedprox" --dataset="office" --model="lenet5" --seed=3 --gpu=3 & 22 | OMP_NUM_THREADS=1 python main.py --algorithm="fedprox" --dataset="office" --model="lenet5" --seed=4 --gpu=4 & 23 | OMP_NUM_THREADS=1 python main.py --algorithm="fedprox" --dataset="office" --model="lenet5" --seed=5 --gpu=5 24 | 25 | OMP_NUM_THREADS=1 python main.py --algorithm="local" --dataset="office" --model="lenet5" --seed=1 --gpu=1 & 26 | OMP_NUM_THREADS=1 python main.py --algorithm="local" --dataset="office" --model="lenet5" --seed=2 --gpu=2 & 27 | OMP_NUM_THREADS=1 python main.py --algorithm="local" --dataset="office" --model="lenet5" --seed=3 --gpu=3 & 28 | OMP_NUM_THREADS=1 python main.py --algorithm="local" --dataset="office" --model="lenet5" --seed=4 --gpu=4 & 29 | OMP_NUM_THREADS=1 python main.py --algorithm="local" --dataset="office" --model="lenet5" --seed=5 --gpu=5 30 | 31 | 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/README.md: -------------------------------------------------------------------------------- 1 | # Federated-Benchmark: A Benchmark of Real-world Images Dataset for Federated Learning 2 | 3 | ## Overview 4 | We present a real-world image dataset, reflecting the characteristic real-world federated learning scenarios, and provide an extensive benchmark on model performance, efficiency, and communication in a federated learning setting. 5 | 6 | ## Resources 7 | * Dataset: [dataset.fedai.org](https://dataset.fedai.org) 8 | * Paper: ["Real-World Image Datasets for Federated Learning"](https://arxiv.org/abs/1910.11089) 9 | 10 | ### Street_Dataset 11 | * Overview: Image Dataset 12 | * Details: 7 different classes, 956 images with pixels of 704 by 576, 5 or 20 devices 13 | * Task: Object detection for federated learning 14 | * [Dataset_description.md](https://github.com/FederatedAI/FATE/blob/master/research/federated_object_detection_benchmark/README.md) 15 | 16 | ## Getting Started 17 | We implemented two mainstream object detection algorithms (YOLOv3 and Faster R-CNN). Code for YOLOv3 is borrowed from [PyTorch-YOLOv3](https://github.com/eriklindernoren/PyTorch-YOLOv3.git) and Faster R-CNN from [simple-faster-rcnn-pytorch](https://github.com/chenyuntc/simple-faster-rcnn-pytorch.git). 18 | ### Install dependencies 19 | * requires PyTorch with GPU (code are GPU only) 20 | * install cupy, you can install via `pip install cupy-cuda80` or (cupy-cuda90, cupy-cuda91, etc) 21 | * install other dependencies, `pip install -r requirements.txt` 22 | * Optional but strongly recommended: build cython code `nms_gpu_post`: 23 | ```bash 24 | cd model/utils/nms/ 25 | python build.py build_ext --inplace 26 | cd - 27 | ``` 28 | ### Prepare data 29 | 1. Download the dataset, refer to [dataset.fedai](https://dataset.fedai.org/) 30 | 2. It should have the basic structure for faster r-cnn 31 | ```bash 32 | Federated-Benchmark/data/street_5/$DEVICE_ID$/ImageSets 33 | Federated-Benchmark/data/street_5/$DEVICE_ID$/JPEGImages 34 | Federated-Benchmark/data/street_5/$DEVICE_ID$/Annotations 35 | ``` 36 | 4. Generate config file for federated learning 37 | ```bash 38 | cd data 39 | python3 generate_task_json.py 40 | ``` 41 | ### Train 42 | 1. Start server 43 | ```bash 44 | sh ./run_server.sh street_5 yolo 1234 45 | ``` 46 | 2. Start clients 47 | ```bash 48 | sh ./run.sh street_5 5 yolo 1234 49 | ``` 50 | 3. Stop training 51 | ```bash 52 | sh ./stop.sh street_5 yolo 53 | ``` 54 | ### Citation 55 | * If you use this code or dataset for your research, please kindly cite our paper: 56 | ```bash 57 | @article{luo2019real, 58 | title={Real-World Image Datasets for Federated Learning}, 59 | author={Luo, Jiahuan and Wu, Xueyang and Luo, Yun and Huang, Anbu and Huang, Yunfeng and Liu, Yang and Yang, Qiang}, 60 | journal={arXiv preprint arXiv:1910.11089}, 61 | year={2019} 62 | } 63 | ``` -------------------------------------------------------------------------------- /publications/PrADA/data_process/ppd_process/ppd_prepare_data_train_test.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from sklearn.utils import shuffle 3 | 4 | from data_process.ppd_process.ppd_data_creation_config import ppd_data_creation 5 | 6 | 7 | def create_train_and_test(df_data, df_datetime, num_train, to_dir): 8 | 9 | year = 2014 10 | df_data_2014 = df_data[df_datetime['ListingInfo_Year'] == year] 11 | df_datetime_2014 = df_datetime[df_datetime['ListingInfo_Year'] == year] 12 | 13 | df_data_2014, df_datetime_2014 = shuffle(df_data_2014, df_datetime_2014) 14 | 15 | df_data_train = df_data_2014[:num_train] 16 | df_datetime_train = df_datetime_2014[:num_train] 17 | 18 | df_data_test = df_data_2014[num_train:] 19 | df_datetime_test = df_datetime_2014[num_train:] 20 | 21 | print(f"[INFO] df_data_train with shape: {df_data_train.shape}") 22 | print(f"[INFO] df_data_test with shape: {df_data_test.shape}") 23 | print(f"[INFO] df_datetime_train with shape: {df_datetime_train.shape}") 24 | print(f"[INFO] df_datetime_test with shape: {df_datetime_test.shape}") 25 | 26 | tag = str(year) 27 | df_data_train.to_csv("{}/PPD_data_{}_{}_train.csv".format(to_dir, tag, str(num_train)), index=False) 28 | df_data_test.to_csv("{}/PPD_data_{}_{}_test.csv".format(to_dir, tag, str(num_train)), index=False) 29 | df_datetime_train.to_csv("{}/PPD_datetime_{}_{}_train.csv".format(to_dir, tag, str(num_train)), index=False) 30 | df_datetime_test.to_csv("{}/PPD_datetime_{}_{}_test.csv".format(to_dir, tag, str(num_train)), index=False) 31 | 32 | 33 | def prepare_ppd_data(): 34 | print(f"========================= prepare ppd data ============================ ") 35 | 36 | original_data_dir = ppd_data_creation['original_data_dir'] 37 | to_dir = ppd_data_creation['processed_data_dir'] 38 | 39 | data_all = original_data_dir + ppd_data_creation['original_ppd_data_file_name'] 40 | data_datetime = original_data_dir + ppd_data_creation['original_ppd_datetime_file_name'] 41 | 42 | df_data_all = pd.read_csv(data_all, skipinitialspace=True) 43 | df_data_datetime = pd.read_csv(data_datetime, skipinitialspace=True) 44 | 45 | print(f"[INFO] df_data_all: {df_data_all.shape}") 46 | print(f"[INFO] df_data_datetime: {df_data_datetime.shape}") 47 | # print(f"[INFO] 2015:{df_data_all[df_data_datetime['ListingInfo_Year'] == 2015].shape}") 48 | print(f"[INFO] 2014:{df_data_all[df_data_datetime['ListingInfo_Year'] == 2014].shape}") 49 | # print(f"[INFO] 2013:{df_data_all[df_data_datetime['ListingInfo_Year'] == 2013].shape}") 50 | # print(f"[INFO] 2012:{df_data_all[df_data_datetime['ListingInfo_Year'] == 2012].shape}") 51 | 52 | num_train = ppd_data_creation['number_train_samples'] 53 | create_train_and_test(df_data_all, df_data_datetime, num_train, to_dir) 54 | 55 | 56 | if __name__ == "__main__": 57 | prepare_ppd_data() 58 | -------------------------------------------------------------------------------- /publications/PrADA/experiments/income_census/train_config.py: -------------------------------------------------------------------------------- 1 | from data_process.census_process.census_data_creation_config import census_data_creation 2 | 3 | fg_feature_extractor_architecture_list = [[28, 56, 28, 14], 4 | [25, 50, 25, 12], 5 | [56, 86, 56, 18], 6 | [27, 54, 27, 13]] 7 | 8 | intr_fg_feature_extractor_for_architecture_list = [[53, 78, 53, 15], 9 | [84, 120, 84, 20], 10 | [55, 81, 55, 15], 11 | [81, 120, 81, 20], 12 | [52, 78, 52, 15], 13 | [83, 120, 83, 20]] 14 | 15 | no_fg_feature_extractor_architecture = [136, 150, 60, 20] 16 | 17 | pre_train_hyperparameters = { 18 | "using_interaction": False, 19 | "momentum": 0.99, 20 | "weight_decay": 0.00001, 21 | "lr": 5e-4, 22 | "batch_size": 128, 23 | "max_epochs": 600, 24 | "epoch_patience": 2, 25 | "valid_metric": ('ks', 'auc') 26 | } 27 | 28 | fine_tune_hyperparameters = { 29 | "using_interaction": False, 30 | "load_global_classifier": False, 31 | "momentum": 0.99, 32 | "weight_decay": 0.0, 33 | "lr": 8e-4, 34 | "batch_size": 128, 35 | "valid_metric": ('ks', 'auc') 36 | } 37 | 38 | no_adaptation_hyperparameters = { 39 | "apply_feature_group": False, 40 | "train_data_tag": 'all', # can be either 'all' or 'tgt' 41 | "momentum": 0.99, 42 | "weight_decay": 0.00001, 43 | "lr": 5e-4, 44 | "batch_size": 128, 45 | "max_epochs": 600, 46 | "epoch_patience": 2, 47 | "valid_metric": ('ks', 'auc') 48 | } 49 | 50 | data_dir = census_data_creation['processed_data_dir'] 51 | data_tag = 'all4000pos004' 52 | 53 | data_hyperparameters = { 54 | "source_ad_train_file_name": data_dir + f'undergrad_census9495_ad_{data_tag}_train.csv', 55 | "source_ad_valid_file_name": data_dir + f'undergrad_census9495_ad_{data_tag}_valid.csv', 56 | "src_tgt_train_file_name": data_dir + f'degree_src_tgt_census9495_{data_tag}_train.csv', 57 | 58 | "target_ad_train_file_name": data_dir + f'grad_census9495_ad_{data_tag}_train.csv', 59 | "target_ft_train_file_name": data_dir + f'grad_census9495_ft_{data_tag}_train.csv', 60 | "target_ft_valid_file_name": data_dir + f'grad_census9495_ft_{data_tag}_valid.csv', 61 | "target_ft_test_file_name": data_dir + f'grad_census9495_ft_{data_tag}_test.csv', 62 | 63 | "census_fg_pretrained_model_dir": "census_fg_pretrained_model", 64 | "census_fg_ft_target_model_dir": "census_fg_ft_target_model", 65 | "census_no-fg_pretrained_model_dir": "census_no-fg_pretrained_model", 66 | "census_no-fg_ft_target_model_dir": "census_no-fg_ft_target_model", 67 | "census_no-ad_model_dir": "census_no-ad_model" 68 | } 69 | -------------------------------------------------------------------------------- /publications/ss_vfnas/architects/architect_two_party.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import torch.nn as nn 4 | from torch.autograd import Variable 5 | 6 | 7 | def _concat(xs): 8 | return torch.cat([x.view(-1) for x in xs]) 9 | 10 | 11 | class Architect_A(object): 12 | def __init__(self, model, args): 13 | self.network_momentum = args.momentum 14 | self.network_weight_decay = args.weight_decay 15 | self.model = model 16 | self.optimizer = torch.optim.Adam(self.model.arch_parameters(), 17 | lr=args.arch_learning_rate, betas=(0.5, 0.999), 18 | weight_decay=args.arch_weight_decay) 19 | 20 | def update_alpha(self, input_valid, U_B_valid, target_valid): 21 | U_B_valid = torch.autograd.Variable(U_B_valid, requires_grad=True).cuda() 22 | self.optimizer.zero_grad() 23 | loss, _ = self.model._loss(input_valid, U_B_valid, target_valid) 24 | U_B_gradients = torch.autograd.grad(loss, U_B_valid, retain_graph=True) 25 | loss.backward() 26 | self.optimizer.step() 27 | return U_B_gradients 28 | 29 | def update_weights(self, input_train, U_B_train, target_train, weights_optim, grad_clip): 30 | U_B_train = torch.autograd.Variable(U_B_train, requires_grad=True).cuda() 31 | weights_optim.zero_grad() 32 | loss, logits = self.model._loss(input_train, U_B_train, target_train) 33 | U_B_gradients = torch.autograd.grad(loss, U_B_train, retain_graph=True) 34 | loss.backward() 35 | nn.utils.clip_grad_norm_(self.model.parameters(), grad_clip) 36 | weights_optim.step() 37 | return U_B_gradients, logits, loss 38 | 39 | 40 | class Architect_B(object): 41 | def __init__(self, model, args): 42 | self.network_momentum = args.momentum 43 | self.network_weight_decay = args.weight_decay 44 | self.model = model 45 | self.optimizer = torch.optim.Adam(self.model.arch_parameters(), 46 | lr=args.arch_learning_rate, betas=(0.5, 0.999), 47 | weight_decay=args.arch_weight_decay) 48 | 49 | def update_alpha(self, U_B_val, U_B_gradients): 50 | model_B_alpha_gradients = torch.autograd.grad(U_B_val, self.model.arch_parameters(), grad_outputs=U_B_gradients) 51 | self.optimizer.zero_grad() 52 | for w, g in zip(self.model.arch_parameters(), model_B_alpha_gradients): 53 | w.grad = g.detach() 54 | self.optimizer.step() 55 | 56 | def update_weights(self, U_B_train, U_B_gradients, weights_optim, grad_clip): 57 | model_B_weight_gradients = torch.autograd.grad(U_B_train, self.model.parameters(), grad_outputs=U_B_gradients) 58 | weights_optim.zero_grad() 59 | for w, g in zip(self.model.parameters(), model_B_weight_gradients): 60 | w.grad = g.detach() 61 | nn.utils.clip_grad_norm_(self.model.parameters(), grad_clip) 62 | weights_optim.step() -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/utils/config.py: -------------------------------------------------------------------------------- 1 | from pprint import pprint 2 | 3 | 4 | # Default Configs for training 5 | # NOTE that, config items could be overwriten by passing argument through command line. 6 | # e.g. --voc-data-dir='./data/' 7 | 8 | class Config: 9 | model_name = "" 10 | # data 11 | voc_data_dir = 'data/VOC2007' 12 | min_size = 600 # image resize 13 | max_size = 1000 # image resize 14 | num_workers = 8 15 | test_num_workers = 8 16 | 17 | # sigma for l1_smooth_loss 18 | rpn_sigma = 3. 19 | roi_sigma = 1. 20 | 21 | # param for optimizer 22 | # 0.0005 in origin paper but 0.0001 in tf-faster-rcnn 23 | weight_decay = 0.0005 24 | lr_decay = 0.1 # 1e-4 -> 1e-5 25 | lr = 1e-4 26 | 27 | 28 | # visualization 29 | env = 'faster-rcnn' # visdom env 30 | port = 8097 31 | plot_every = 40 # vis every N iter 32 | log_filename = '/tmp/logfile' 33 | 34 | # preset 35 | data = 'voc' 36 | pretrained_model = 'vgg16' 37 | batch_size = 1 38 | 39 | # training 40 | epoch = 14 41 | 42 | 43 | use_adam = False # Use Adam optimizer 44 | use_chainer = False # try match everything as chainer 45 | use_drop = False # use dropout in RoIHead 46 | # debug 47 | debug_file = '/tmp/debugf' 48 | 49 | test_num = 10000 50 | # model 51 | load_path = None 52 | 53 | caffe_pretrain = True # use caffe pretrained model instead of torchvision 54 | caffe_pretrain_path = 'checkpoints/vgg16_caffe.pth' 55 | 56 | # dataset 57 | label_names = ['aeroplane', 58 | 'bicycle', 59 | 'bird', 60 | 'boat', 61 | 'bottle', 62 | 'bus', 63 | 'car', 64 | 'cat', 65 | 'chair', 66 | 'cow', 67 | 'diningtable', 68 | 'dog', 69 | 'horse', 70 | 'motorbike', 71 | 'person', 72 | 'pottedplant', 73 | 'sheep', 74 | 'sofa', 75 | 'train', 76 | 'tvmonitor'] 77 | def _parse(self, kwargs): 78 | state_dict = self._state_dict() 79 | for k, v in kwargs.items(): 80 | if k not in state_dict: 81 | raise ValueError('UnKnown Option: "--%s"' % k) 82 | if k == 'label_names': 83 | if isinstance(v, str): 84 | v = eval(v) 85 | setattr(self, k, v) 86 | 87 | print('======user config========') 88 | pprint(self._state_dict()) 89 | print('==========end============') 90 | 91 | def _state_dict(self): 92 | return {k: getattr(self, k) for k, _ in Config.__dict__.items() \ 93 | if not k.startswith('_')} 94 | 95 | opt = Config() 96 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Byte-compiled / optimized / DLL files 3 | __pycache__/ 4 | *.py[cod] 5 | *$py.class 6 | 7 | # C extensions 8 | *.so 9 | 10 | # Distribution / packaging 11 | .Python 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | pip-wheel-metadata/ 25 | share/python-wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .nox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | *.py,cover 52 | .hypothesis/ 53 | .pytest_cache/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | local_settings.py 62 | db.sqlite3 63 | db.sqlite3-journal 64 | 65 | # Flask stuff: 66 | instance/ 67 | .webassets-cache 68 | 69 | # Scrapy stuff: 70 | .scrapy 71 | 72 | # Sphinx documentation 73 | docs/_build/ 74 | 75 | # PyBuilder 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | .python-version 87 | 88 | # pipenv 89 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 90 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 91 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 92 | # install all needed dependencies. 93 | #Pipfile.lock 94 | 95 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 96 | __pypackages__/ 97 | 98 | # Celery stuff 99 | celerybeat-schedule 100 | celerybeat.pid 101 | 102 | # SageMath parsed files 103 | *.sage.py 104 | 105 | # Environments 106 | .env 107 | .venv 108 | env/ 109 | venv/ 110 | ENV/ 111 | env.bak/ 112 | venv.bak/ 113 | 114 | # Spyder project settings 115 | .spyderproject 116 | .spyproject 117 | 118 | # Rope project settings 119 | .ropeproject 120 | 121 | # mkdocs documentation 122 | /site 123 | 124 | # mypy 125 | .mypy_cache/ 126 | .dmypy.json 127 | dmypy.json 128 | 129 | # Pyre type checker 130 | .pyre/ 131 | 132 | LibriSpeech/ 133 | trained_models 134 | .idea 135 | .ipynb_checkpoints/ 136 | .DS_Store 137 | *.tar.gz 138 | *.pyc 139 | *.json 140 | *.csv 141 | *.pth 142 | chinese_data_features 143 | train_100_corpus 144 | data/ 145 | result/ 146 | federated_learning/__pycache__ 147 | *.zip 148 | credit_g/ 149 | experimental_results/ 150 | spambase/ 151 | multiple_region_dann/ 152 | census_dann/ 153 | census_target/ 154 | cell_manage_dann/ 155 | cell_manage_target/ 156 | lending_dann/ 157 | lending_target/ 158 | resilient/ 159 | data_process/cell_process/.ipynb_checkpoints/ 160 | singleton_dann/ 161 | datasets/census_original 162 | datasets/census_processed 163 | communication_efficient_experiment/ 164 | 165 | -------------------------------------------------------------------------------- /datasets/federated_object_detection_benchmark/config/yolov3-tiny.cfg: -------------------------------------------------------------------------------- 1 | [net] 2 | # Testing 3 | batch=1 4 | subdivisions=1 5 | # Training 6 | # batch=64 7 | # subdivisions=2 8 | width=416 9 | height=416 10 | channels=3 11 | momentum=0.9 12 | decay=0.0005 13 | angle=0 14 | saturation = 1.5 15 | exposure = 1.5 16 | hue=.1 17 | 18 | learning_rate=0.001 19 | burn_in=1000 20 | max_batches = 500200 21 | policy=steps 22 | steps=400000,450000 23 | scales=.1,.1 24 | 25 | # 0 26 | [convolutional] 27 | batch_normalize=1 28 | filters=16 29 | size=3 30 | stride=1 31 | pad=1 32 | activation=leaky 33 | 34 | # 1 35 | [maxpool] 36 | size=2 37 | stride=2 38 | 39 | # 2 40 | [convolutional] 41 | batch_normalize=1 42 | filters=32 43 | size=3 44 | stride=1 45 | pad=1 46 | activation=leaky 47 | 48 | # 3 49 | [maxpool] 50 | size=2 51 | stride=2 52 | 53 | # 4 54 | [convolutional] 55 | batch_normalize=1 56 | filters=64 57 | size=3 58 | stride=1 59 | pad=1 60 | activation=leaky 61 | 62 | # 5 63 | [maxpool] 64 | size=2 65 | stride=2 66 | 67 | # 6 68 | [convolutional] 69 | batch_normalize=1 70 | filters=128 71 | size=3 72 | stride=1 73 | pad=1 74 | activation=leaky 75 | 76 | # 7 77 | [maxpool] 78 | size=2 79 | stride=2 80 | 81 | # 8 82 | [convolutional] 83 | batch_normalize=1 84 | filters=256 85 | size=3 86 | stride=1 87 | pad=1 88 | activation=leaky 89 | 90 | # 9 91 | [maxpool] 92 | size=2 93 | stride=2 94 | 95 | # 10 96 | [convolutional] 97 | batch_normalize=1 98 | filters=512 99 | size=3 100 | stride=1 101 | pad=1 102 | activation=leaky 103 | 104 | # 11 105 | [maxpool] 106 | size=2 107 | stride=1 108 | 109 | # 12 110 | [convolutional] 111 | batch_normalize=1 112 | filters=1024 113 | size=3 114 | stride=1 115 | pad=1 116 | activation=leaky 117 | 118 | ########### 119 | 120 | # 13 121 | [convolutional] 122 | batch_normalize=1 123 | filters=256 124 | size=1 125 | stride=1 126 | pad=1 127 | activation=leaky 128 | 129 | # 14 130 | [convolutional] 131 | batch_normalize=1 132 | filters=512 133 | size=3 134 | stride=1 135 | pad=1 136 | activation=leaky 137 | 138 | # 15 139 | [convolutional] 140 | size=1 141 | stride=1 142 | pad=1 143 | filters=255 144 | activation=linear 145 | 146 | 147 | 148 | # 16 149 | [yolo] 150 | mask = 3,4,5 151 | anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 152 | classes=80 153 | num=6 154 | jitter=.3 155 | ignore_thresh = .7 156 | truth_thresh = 1 157 | random=1 158 | 159 | # 17 160 | [route] 161 | layers = -4 162 | 163 | # 18 164 | [convolutional] 165 | batch_normalize=1 166 | filters=128 167 | size=1 168 | stride=1 169 | pad=1 170 | activation=leaky 171 | 172 | # 19 173 | [upsample] 174 | stride=2 175 | 176 | # 20 177 | [route] 178 | layers = -1, 8 179 | 180 | # 21 181 | [convolutional] 182 | batch_normalize=1 183 | filters=256 184 | size=3 185 | stride=1 186 | pad=1 187 | activation=leaky 188 | 189 | # 22 190 | [convolutional] 191 | size=1 192 | stride=1 193 | pad=1 194 | filters=255 195 | activation=linear 196 | 197 | # 23 198 | [yolo] 199 | mask = 1,2,3 200 | anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 201 | classes=80 202 | num=6 203 | jitter=.3 204 | ignore_thresh = .7 205 | truth_thresh = 1 206 | random=1 207 | -------------------------------------------------------------------------------- /publications/PrADA/experiments/income_census/produce_census_tsne_data.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from experiments.income_census.tsne_config import tsne_embedding_creation 4 | from datasets.census_dataloader import get_income_census_dataloaders 5 | from experiments.income_census import train_census_fg_adapt_pretrain as fg_dann 6 | from experiments.income_census import train_census_no_fg_adapt_pretrain as no_fg_dann 7 | from experiments.income_census.train_config import data_tag, data_dir, data_hyperparameters 8 | from utils import produce_data_for_distribution 9 | 10 | if __name__ == "__main__": 11 | 12 | parser = argparse.ArgumentParser("census_tsne_distribution") 13 | parser.add_argument('--task_id', type=str, required=True) 14 | args = parser.parse_args() 15 | task_id = args.task_id 16 | apply_adaptation = tsne_embedding_creation["apply_adaptation"] 17 | using_interaction = tsne_embedding_creation["using_interaction"] 18 | 19 | print(f"[INFO] task id : {task_id}") 20 | print(f"[INFO] apply adaptation : {apply_adaptation}") 21 | print(f"[INFO] using interaction : {using_interaction}") 22 | 23 | if apply_adaptation: 24 | tag = "ad" 25 | model_dir = data_hyperparameters["census_fg_pretrained_model_dir"] 26 | else: 27 | tag = "no-ad" 28 | model_dir = data_hyperparameters["census_no-ad_model_dir"] 29 | 30 | feature_group_name_list = ['employment', 'demographics', 'migration', 'household'] 31 | if using_interaction: 32 | feature_grp_intr_name_list = ['emp-demo', 'emp-mig', 'emp-house', 'demo-mig', 'demo-house', 'mig-house'] 33 | feature_group_name_list = feature_group_name_list + feature_grp_intr_name_list 34 | 35 | tsne_embedding_dir = tsne_embedding_creation["tsne_embedding_data_dir"] 36 | 37 | source_train_file_name = data_dir + f'undergrad_census9495_ad_{data_tag}_train.csv' 38 | target_train_file_name = data_dir + f'grad_census9495_ad_{data_tag}_train.csv' 39 | 40 | # load pre-trained model 41 | print("[INFO] load pre-trained model.") 42 | use_feature_group = True 43 | if use_feature_group: 44 | model = fg_dann.create_fg_census_global_model(using_interaction=using_interaction) 45 | else: 46 | model = no_fg_dann.create_no_fg_census_global_model() 47 | 48 | model.load_model(root=model_dir, 49 | task_id=task_id, 50 | load_global_classifier=True, 51 | timestamp=None) 52 | 53 | print("[INFO] load data.") 54 | batch_size = 4000 55 | target_train_loader, _ = get_income_census_dataloaders( 56 | ds_file_name=target_train_file_name, batch_size=batch_size, split_ratio=1.0) 57 | source_train_loader, _ = get_income_census_dataloaders( 58 | ds_file_name=source_train_file_name, batch_size=batch_size, split_ratio=1.0) 59 | 60 | print("[INFO] produce data for drawing TSNE feature distribution.") 61 | produce_data_for_distribution(model, 62 | source_train_loader, 63 | target_train_loader, 64 | feature_group_name_list, 65 | tsne_embedding_dir, 66 | tag + str(batch_size)) 67 | -------------------------------------------------------------------------------- /publications/FedCG/servers/fedsplit.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | 5 | from config import args, logger, device 6 | from models import Classifier 7 | from utils import set_seed 8 | 9 | 10 | class Server(): 11 | 12 | def __init__(self, clients): 13 | self.clients = clients 14 | 15 | self.weights = [] 16 | for client in clients: 17 | self.weights.append(client.train_size) 18 | self.weights = np.array(self.weights) / np.sum(self.weights) 19 | logger.info("client weights: %s" % str(self.weights.tolist())) 20 | 21 | self.init_net() 22 | 23 | def get_params(self, models): 24 | params = [] 25 | for model in models: 26 | params.append({"params": self.global_net[model].parameters()}) 27 | return params 28 | 29 | def frozen_net(self, models, frozen): 30 | for model in models: 31 | for param in self.global_net[model].parameters(): 32 | param.requires_grad = not frozen 33 | if frozen: 34 | self.global_net[model].eval() 35 | else: 36 | self.global_net[model].train() 37 | 38 | def init_net(self): 39 | ############################################################## 40 | # frozen all models' parameters, unfrozen when need to train # 41 | ############################################################## 42 | set_seed(args.seed) 43 | self.global_net = nn.ModuleDict() 44 | 45 | self.global_net["classifier"] = Classifier() 46 | self.frozen_net(["classifier"], True) 47 | 48 | self.global_net.to(device) 49 | 50 | self.KL_criterion = nn.KLDivLoss(reduction="batchmean").to(device) 51 | self.CE_criterion = nn.CrossEntropyLoss().to(device) 52 | 53 | def load_client(self): 54 | for i in range(len(self.clients)): 55 | checkpoint = torch.load(args.checkpoint_dir + "/client" + str(i) + ".pkl") 56 | self.clients[i].net.load_state_dict(checkpoint["net"]) 57 | self.clients[i].EC_optimizer.load_state_dict(checkpoint["EC_optimizer"]) 58 | 59 | def save_client(self): 60 | for i in range(len(self.clients)): 61 | optim_dict = { 62 | "net": self.clients[i].net.state_dict(), 63 | "EC_optimizer": self.clients[i].EC_optimizer.state_dict(), 64 | } 65 | torch.save(optim_dict, args.checkpoint_dir + "/client" + str(i) + ".pkl") 66 | 67 | def receive(self, models): 68 | 69 | for model in models: 70 | avg_param = {} 71 | params = [] 72 | for client in self.clients: 73 | params.append(client.net[model].state_dict()) 74 | 75 | for key in params[0].keys(): 76 | avg_param[key] = params[0][key] * self.weights[0] 77 | for idx in range(1, len(self.clients)): 78 | avg_param[key] += params[idx][key] * self.weights[idx] 79 | self.global_net[model].load_state_dict(avg_param) 80 | 81 | def send(self, models): 82 | for model in models: 83 | global_param = self.global_net[model].state_dict() 84 | for client in self.clients: 85 | client.net[model].load_state_dict(global_param) 86 | -------------------------------------------------------------------------------- /publications/PrADA/datasets/ppd_dataloader.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | from torch.utils.data import DataLoader 4 | 5 | from datasets.census_dataset import SimpleDataset 6 | 7 | # wide_columns = ['UserInfo_10', 'UserInfo_14', 'UserInfo_15', 'UserInfo_18', 'UserInfo_22', 'UserInfo_3', 8 | # 'UserInfo_16', 'UserInfo_1', 'UserInfo_23', 'UserInfo_9'] 9 | 10 | 11 | wide_columns = ['UserInfo_10', 'UserInfo_14', 'UserInfo_15', 'UserInfo_18', 'UserInfo_22', 'UserInfo_23'] 12 | 13 | 14 | def get_selected_columns(df, use_all_features=True): 15 | """ 16 | select subset of all columns for training 17 | """ 18 | all_columns = list(df.columns) 19 | if use_all_features: 20 | return all_columns 21 | deep_columns = all_columns[17:] 22 | select_columns = wide_columns + deep_columns 23 | return select_columns 24 | 25 | 26 | def shuffle_data(data): 27 | len = data.shape[0] 28 | perm_idxs = np.random.permutation(len) 29 | return data[perm_idxs] 30 | 31 | 32 | def get_datasets(ds_file_name, shuffle=False, split_ratio=0.9): 33 | dataframe = pd.read_csv(ds_file_name, skipinitialspace=True) 34 | samples = dataframe[get_selected_columns(dataframe, use_all_features=True)].values 35 | num_pos = np.sum(samples[:, -1]) 36 | num_neg = len(samples) - num_pos 37 | print(f"[INFO] ---- number of positive sample:{num_pos}") 38 | print(f"[INFO] ---- number of negative sample:{num_neg}") 39 | 40 | # print(samples) 41 | if shuffle: 42 | samples = shuffle_data(samples) 43 | 44 | if split_ratio == 1.0: 45 | print(f"samples shape: {samples.shape}, {samples.dtype}") 46 | train_dataset = SimpleDataset(samples[:, :-1], samples[:, -1]) 47 | return train_dataset, None 48 | else: 49 | num_train = int(split_ratio * samples.shape[0]) 50 | train_samples = samples[:num_train].astype(np.float) 51 | val_samples = samples[num_train:].astype(np.float) 52 | print(f"train samples shape: {train_samples.shape}, {train_samples.dtype}") 53 | print(f"valid samples shape: {val_samples.shape}, {train_samples.dtype}") 54 | train_dataset = SimpleDataset(train_samples[:, :-1], train_samples[:, -1]) 55 | val_dataset = SimpleDataset(val_samples[:, :-1], val_samples[:, -1]) 56 | return train_dataset, val_dataset 57 | 58 | 59 | def get_dataloader(train_dataset, batch_size=32, num_workers=6): 60 | mnist_train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) 61 | return mnist_train_loader 62 | 63 | 64 | def get_dataloaders(train_dataset, valid_dataset, batch_size=32, num_workers=6): 65 | mnist_train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) 66 | mnist_valid_loader = None 67 | if valid_dataset is not None: 68 | mnist_valid_loader = DataLoader(valid_dataset, batch_size=batch_size * 2, shuffle=True, num_workers=num_workers) 69 | return mnist_train_loader, mnist_valid_loader 70 | 71 | 72 | def get_pdd_dataloaders(ds_file_name, split_ratio=0.9, batch_size=64, num_workers=6): 73 | train_dataset, valid_dataset = get_datasets(ds_file_name=ds_file_name, shuffle=True, split_ratio=split_ratio) 74 | return get_dataloaders(train_dataset, valid_dataset, batch_size=batch_size, num_workers=num_workers) 75 | -------------------------------------------------------------------------------- /publications/FedCG/servers/fedavg.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | 5 | from config import args, logger, device 6 | from models import Classifier, Extractor 7 | from utils import set_seed 8 | 9 | 10 | class Server(): 11 | 12 | def __init__(self, clients): 13 | self.clients = clients 14 | 15 | self.weights = [] 16 | for client in clients: 17 | self.weights.append(client.train_size) 18 | self.weights = np.array(self.weights) / np.sum(self.weights) 19 | logger.info("client weights: %s" % str(self.weights.tolist())) 20 | 21 | self.init_net() 22 | 23 | def get_params(self, models): 24 | params = [] 25 | for model in models: 26 | params.append({"params": self.global_net[model].parameters()}) 27 | return params 28 | 29 | def frozen_net(self, models, frozen): 30 | for model in models: 31 | for param in self.global_net[model].parameters(): 32 | param.requires_grad = not frozen 33 | if frozen: 34 | self.global_net[model].eval() 35 | else: 36 | self.global_net[model].train() 37 | 38 | def init_net(self): 39 | ############################################################## 40 | # frozen all models' parameters, unfrozen when need to train # 41 | ############################################################## 42 | set_seed(args.seed) 43 | self.global_net = nn.ModuleDict() 44 | 45 | self.global_net["extractor"] = Extractor() 46 | self.global_net["classifier"] = Classifier() 47 | self.frozen_net(["extractor", "classifier"], True) 48 | 49 | self.global_net.to(device) 50 | 51 | self.KL_criterion = nn.KLDivLoss(reduction="batchmean").to(device) 52 | self.CE_criterion = nn.CrossEntropyLoss().to(device) 53 | 54 | def load_client(self): 55 | for i in range(len(self.clients)): 56 | checkpoint = torch.load(args.checkpoint_dir + "/client" + str(i) + ".pkl") 57 | self.clients[i].net.load_state_dict(checkpoint["net"]) 58 | self.clients[i].EC_optimizer.load_state_dict(checkpoint["EC_optimizer"]) 59 | 60 | def save_client(self): 61 | for i in range(len(self.clients)): 62 | optim_dict = { 63 | "net": self.clients[i].net.state_dict(), 64 | "EC_optimizer": self.clients[i].EC_optimizer.state_dict(), 65 | } 66 | torch.save(optim_dict, args.checkpoint_dir + "/client" + str(i) + ".pkl") 67 | 68 | def receive(self, models): 69 | for model in models: 70 | avg_param = {} 71 | params = [] 72 | for client in self.clients: 73 | params.append(client.net[model].state_dict()) 74 | 75 | for key in params[0].keys(): 76 | avg_param[key] = params[0][key] * self.weights[0] 77 | for idx in range(1, len(self.clients)): 78 | avg_param[key] += params[idx][key] * self.weights[idx] 79 | self.global_net[model].load_state_dict(avg_param) 80 | 81 | def send(self, models): 82 | for model in models: 83 | global_param = self.global_net[model].state_dict() 84 | for client in self.clients: 85 | client.net[model].load_state_dict(global_param) 86 | -------------------------------------------------------------------------------- /publications/FedCG/servers/fedprox.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | 5 | from config import args, logger, device 6 | from models import Classifier, Extractor 7 | from utils import set_seed 8 | 9 | 10 | class Server(): 11 | 12 | def __init__(self, clients): 13 | self.clients = clients 14 | 15 | self.weights = [] 16 | for client in clients: 17 | self.weights.append(client.train_size) 18 | self.weights = np.array(self.weights) / np.sum(self.weights) 19 | logger.info("client weights: %s" % str(self.weights.tolist())) 20 | 21 | self.init_net() 22 | 23 | def get_params(self, models): 24 | params = [] 25 | for model in models: 26 | params.append({"params": self.global_net[model].parameters()}) 27 | return params 28 | 29 | def frozen_net(self, models, frozen): 30 | for model in models: 31 | for param in self.global_net[model].parameters(): 32 | param.requires_grad = not frozen 33 | if frozen: 34 | self.global_net[model].eval() 35 | else: 36 | self.global_net[model].train() 37 | 38 | def init_net(self): 39 | ############################################################## 40 | # frozen all models' parameters, unfrozen when need to train # 41 | ############################################################## 42 | set_seed(args.seed) 43 | self.global_net = nn.ModuleDict() 44 | 45 | self.global_net["extractor"] = Extractor() 46 | self.global_net["classifier"] = Classifier() 47 | self.frozen_net(["extractor", "classifier"], True) 48 | 49 | self.global_net.to(device) 50 | 51 | self.KL_criterion = nn.KLDivLoss(reduction="batchmean").to(device) 52 | self.CE_criterion = nn.CrossEntropyLoss().to(device) 53 | 54 | def load_client(self): 55 | for i in range(len(self.clients)): 56 | checkpoint = torch.load(args.checkpoint_dir + "/client" + str(i) + ".pkl") 57 | self.clients[i].net.load_state_dict(checkpoint["net"]) 58 | self.clients[i].EC_optimizer.load_state_dict(checkpoint["EC_optimizer"]) 59 | 60 | def save_client(self): 61 | for i in range(len(self.clients)): 62 | optim_dict = { 63 | "net": self.clients[i].net.state_dict(), 64 | "EC_optimizer": self.clients[i].EC_optimizer.state_dict(), 65 | } 66 | torch.save(optim_dict, args.checkpoint_dir + "/client" + str(i) + ".pkl") 67 | 68 | def receive(self, models): 69 | 70 | for model in models: 71 | avg_param = {} 72 | params = [] 73 | for client in self.clients: 74 | params.append(client.net[model].state_dict()) 75 | 76 | for key in params[0].keys(): 77 | avg_param[key] = params[0][key] * self.weights[0] 78 | for idx in range(1, len(self.clients)): 79 | avg_param[key] += params[idx][key] * self.weights[idx] 80 | self.global_net[model].load_state_dict(avg_param) 81 | 82 | def send(self, models): 83 | for model in models: 84 | global_param = self.global_net[model].state_dict() 85 | for client in self.clients: 86 | client.net[model].load_state_dict(global_param) 87 | -------------------------------------------------------------------------------- /publications/ss_vfnas/README.md: -------------------------------------------------------------------------------- 1 | # Cross-silo Federated Neural Architecture Search for Heterogeneous and Cooperative Systems 2 | 3 |
4 |
5 |
10 |
11 |
25 |
26 |
34 |
35 |
45 |
46 |
53 |
54 |
61 |
62 |