├── federatedscope
├── attack
│ ├── __init__.py
│ ├── models
│ │ └── __init__.py
│ ├── privacy_attacks
│ │ └── __init__.py
│ ├── worker_as_attacker
│ │ └── __init__.py
│ └── trainer
│ │ └── PIA_trainer.py
├── gfl
│ ├── __init__.py
│ ├── baseline
│ │ ├── __init__.py
│ │ ├── fedavg_gcn_fullbatch_on_dblpnew.yaml
│ │ ├── local_gnn_node_fullbatch_citation.yaml
│ │ ├── fedavg_sage_minibatch_on_dblpnew.yaml
│ │ ├── fedavg_gcn_minibatch_on_hiv.yaml
│ │ ├── repro_exp
│ │ │ ├── graph_level
│ │ │ │ └── args_multi_graph_fedalgo.sh
│ │ │ └── hpo
│ │ │ │ └── run_hpo.sh
│ │ ├── fedavg_gnn_node_fullbatch_citation.yaml
│ │ ├── mini_graph_dc
│ │ │ └── fedavg.yaml
│ │ ├── fedavg_gcn_fullbatch_on_kg.yaml
│ │ ├── fedavg_on_cSBM.yaml
│ │ ├── fedavg_gin_minibatch_on_cikmcup.yaml
│ │ ├── isolated_gin_minibatch_on_cikmcup.yaml
│ │ └── fedavg_wpsn_on_cSBM.yaml
│ ├── fedsageplus
│ │ ├── __init__.py
│ │ └── fedsageplus_on_cora.yaml
│ ├── flitplus
│ │ ├── __init__.py
│ │ └── fedalgo_cls.yaml
│ ├── gcflplus
│ │ ├── __init__.py
│ │ └── gcflplus_on_multi_task.yaml
│ ├── dataset
│ │ ├── preprocess
│ │ │ └── __init__.py
│ │ └── __init__.py
│ ├── loss
│ │ └── __init__.py
│ ├── dataloader
│ │ └── __init__.py
│ └── trainer
│ │ └── __init__.py
├── contrib
│ ├── __init__.py
│ ├── data
│ │ └── __init__.py
│ ├── loss
│ │ ├── __init__.py
│ │ └── example.py
│ ├── model
│ │ ├── __init__.py
│ │ └── example.py
│ ├── metrics
│ │ ├── __init__.py
│ │ └── example.py
│ ├── optimizer
│ │ ├── __init__.py
│ │ └── example.py
│ ├── scheduler
│ │ ├── __init__.py
│ │ └── example.py
│ ├── splitter
│ │ └── __init__.py
│ ├── trainer
│ │ ├── __init__.py
│ │ ├── sam_trainer.py
│ │ ├── example.py
│ │ └── local_entropy_trainer.py
│ ├── worker
│ │ └── __init__.py
│ └── configs
│ │ └── __init__.py
├── core
│ ├── secure
│ │ ├── __init__.py
│ │ └── encrypt
│ │ │ └── __init__.py
│ ├── auxiliaries
│ │ ├── __init__.py
│ │ └── ReIterator.py
│ ├── feature
│ │ ├── __init__.py
│ │ ├── hfl
│ │ │ ├── __init__.py
│ │ │ ├── selection
│ │ │ │ ├── __init__.py
│ │ │ │ ├── correlation_filter.py
│ │ │ │ └── variance_filter.py
│ │ │ └── preprocess
│ │ │ │ ├── __init__.py
│ │ │ │ ├── min_max_norm.py
│ │ │ │ ├── standard.py
│ │ │ │ ├── log_transform.py
│ │ │ │ ├── quantile_binning.py
│ │ │ │ └── uniform_binning.py
│ │ └── vfl
│ │ │ ├── selection
│ │ │ └── __init__.py
│ │ │ └── preprocess
│ │ │ └── __init__.py
│ ├── optimizers
│ │ └── __init__.py
│ ├── parallel
│ │ └── __init__.py
│ ├── regularizer
│ │ └── __init__.py
│ ├── secret_sharing
│ │ └── __init__.py
│ ├── __init__.py
│ ├── splitters
│ │ ├── __init__.py
│ │ ├── generic
│ │ │ └── __init__.py
│ │ └── graph
│ │ │ └── __init__.py
│ ├── workers
│ │ ├── wrapper
│ │ │ └── __init__.py
│ │ └── __init__.py
│ ├── proto
│ │ └── __init__.py
│ ├── compression
│ │ └── __init__.py
│ ├── monitors
│ │ └── __init__.py
│ ├── lr.py
│ ├── data
│ │ ├── raw_translator.py
│ │ └── __init__.py
│ ├── strategy.py
│ └── gRPC_server.py
├── mf
│ ├── baseline
│ │ ├── __init__.py
│ │ ├── hfl_fedavg_standalone_on_movielens1m.yaml
│ │ ├── vfl_fedavg_standalone_on_movielens1m.yaml
│ │ ├── hfl_fedavg_standalone_on_netflix.yaml
│ │ ├── hfl-sgdmf_fedavg_standalone_on_movielens1m.yaml
│ │ └── vfl-sgdmf_fedavg_standalone_on_movielens1m.yaml
│ ├── __init__.py
│ ├── dataloader
│ │ └── __init__.py
│ ├── model
│ │ ├── __init__.py
│ │ └── model_builder.py
│ ├── dataset
│ │ └── __init__.py
│ └── trainer
│ │ └── __init__.py
├── nlp
│ ├── metric
│ │ ├── __init__.py
│ │ ├── rouge
│ │ │ └── __init__.py
│ │ ├── bleu
│ │ │ └── __init__.py
│ │ └── meteor
│ │ │ └── __init__.py
│ ├── hetero_tasks
│ │ ├── __init__.py
│ │ ├── model
│ │ │ └── __init__.py
│ │ ├── trainer
│ │ │ └── __init__.py
│ │ ├── aggregator
│ │ │ └── __init__.py
│ │ ├── dataloader
│ │ │ └── __init__.py
│ │ ├── worker
│ │ │ └── __init__.py
│ │ ├── metric
│ │ │ ├── __init__.py
│ │ │ └── cnndm.py
│ │ ├── dataset
│ │ │ └── __init__.py
│ │ ├── run_pretrain.sh
│ │ ├── run_fedavg.sh
│ │ ├── run_isolated.sh
│ │ └── run_atc.sh
│ ├── __init__.py
│ ├── loss
│ │ └── __init__.py
│ ├── dataloader
│ │ └── __init__.py
│ ├── model
│ │ └── __init__.py
│ ├── dataset
│ │ ├── __init__.py
│ │ └── preprocess
│ │ │ ├── get_embs.sh
│ │ │ └── get_embs.py
│ ├── trainer
│ │ └── __init__.py
│ └── baseline
│ │ ├── fedavg_lr_on_synthetic.yaml
│ │ ├── fedavg_lstm_on_subreddit.yaml
│ │ ├── fedavg_lstm_on_shakespeare.yaml
│ │ ├── fedavg_lr_on_twitter.yaml
│ │ └── fedavg_bert_on_sst2.yaml
├── organizer
│ ├── __init__.py
│ ├── cfg_server.py
│ └── cfg_client.py
├── tabular
│ ├── __init__.py
│ ├── model
│ │ ├── __init__.py
│ │ └── quadratic.py
│ └── dataloader
│ │ ├── __init__.py
│ │ └── quadratic.py
├── vertical_fl
│ ├── Paillier
│ │ └── __init__.py
│ ├── linear_model
│ │ ├── __init__.py
│ │ ├── worker
│ │ │ └── __init__.py
│ │ └── baseline
│ │ │ ├── vertical_fl.yaml
│ │ │ └── vertical_on_adult.yaml
│ ├── tree_based_models
│ │ ├── __init__.py
│ │ ├── model
│ │ │ └── __init__.py
│ │ ├── trainer
│ │ │ └── __init__.py
│ │ ├── baseline
│ │ │ ├── rf_feature_gathering_on_abalone.yaml
│ │ │ ├── rf_feature_gathering_on_adult.yaml
│ │ │ ├── xgb_feature_gathering_on_adult.yaml
│ │ │ ├── gbdt_feature_gathering_on_adult.yaml
│ │ │ ├── xgb_feature_gathering_on_abalone.yaml
│ │ │ ├── xgb_feature_gathering_on_adult_by_he_eval.yaml
│ │ │ ├── gbdt_feature_gathering_on_abalone.yaml
│ │ │ ├── xgb_label_scattering_on_adult.yaml
│ │ │ ├── xgb_feature_gathering_op_boost_on_adult.yaml
│ │ │ └── xgb_feature_gathering_dp_on_adult.yaml
│ │ └── worker
│ │ │ └── __init__.py
│ ├── __init__.py
│ ├── dataloader
│ │ └── __init__.py
│ ├── loss
│ │ ├── __init__.py
│ │ └── utils.py
│ └── dataset
│ │ └── __init__.py
├── cl
│ ├── __init__.py
│ ├── dataloader
│ │ └── __init__.py
│ ├── loss
│ │ └── __init__.py
│ ├── model
│ │ └── __init__.py
│ ├── trainer
│ │ └── __init__.py
│ └── baseline
│ │ ├── repro_exp
│ │ ├── args_cifar10_fedsimclr.sh
│ │ └── args_cifar10_fedgc.sh
│ │ ├── supervised_local_on_cifar10.yaml
│ │ ├── unpretrained_linearprob_on_cifar10.yaml
│ │ ├── fedsimclr_on_cifar10.yaml
│ │ ├── fedgc_on_cifar10.yaml
│ │ ├── supervised_fedavg_on_cifar10.yaml
│ │ └── fedcontrastlearning_linearprob_on_cifar10.yaml
├── cv
│ ├── __init__.py
│ ├── dataloader
│ │ └── __init__.py
│ ├── dataset
│ │ └── __init__.py
│ ├── model
│ │ └── __init__.py
│ ├── trainer
│ │ └── trainer.py
│ └── baseline
│ │ ├── fedavg_convnet2_on_celeba.yaml
│ │ └── fedavg_convnet2_on_femnist.yaml
├── autotune
│ ├── fts
│ │ └── __init__.py
│ ├── fedex
│ │ └── __init__.py
│ ├── pfedhpo
│ │ └── __init__.py
│ ├── __init__.py
│ └── baseline
│ │ └── vfl_ss.yaml
├── cross_backends
│ ├── __init__.py
│ ├── distributed_tf_server.yaml
│ └── distributed_tf_client_3.yaml
└── __init__.py
├── benchmark
├── FedHPOBench
│ ├── demo
│ │ └── __init__.py
│ ├── fedhpobench
│ │ ├── utils
│ │ │ └── __init__.py
│ │ ├── benchmarks
│ │ │ └── __init__.py
│ │ ├── optimizers
│ │ │ └── __init__.py
│ │ └── __init__.py
│ ├── data
│ │ ├── tabular_data
│ │ │ └── README.md
│ │ └── surrogate_model
│ │ │ └── README.md
│ ├── scripts
│ │ ├── exp
│ │ │ ├── cora.yaml
│ │ │ ├── graph.yaml
│ │ │ ├── citeseer.yaml
│ │ │ ├── pubmed.yaml
│ │ │ ├── 10101@openml.yaml
│ │ │ ├── 146818@openml.yaml
│ │ │ ├── 146821@openml.yaml
│ │ │ ├── 146822@openml.yaml
│ │ │ ├── 31@openml.yaml
│ │ │ ├── 3917@openml.yaml
│ │ │ ├── 53@openml.yaml
│ │ │ ├── 9952@openml.yaml
│ │ │ ├── femnist.yaml
│ │ │ ├── cola@huggingface_datasets.yaml
│ │ │ ├── sst2@huggingface_datasets.yaml
│ │ │ ├── run_avg.sh
│ │ │ ├── run_opt.sh
│ │ │ └── run_avg2.sh
│ │ ├── lr
│ │ │ ├── openml_lr.yaml
│ │ │ ├── 31@openml.yaml
│ │ │ ├── 3@openml.yaml
│ │ │ ├── 53@openml.yaml
│ │ │ ├── 12@openml.yaml
│ │ │ ├── 3917@openml.yaml
│ │ │ ├── 7592@openml.yaml
│ │ │ ├── 9952@openml.yaml
│ │ │ ├── 9977@openml.yaml
│ │ │ ├── 9981@openml.yaml
│ │ │ ├── 10101@openml.yaml
│ │ │ ├── 146212@openml.yaml
│ │ │ ├── 146606@openml.yaml
│ │ │ ├── 146818@openml.yaml
│ │ │ ├── 146821@openml.yaml
│ │ │ ├── 146822@openml.yaml
│ │ │ ├── 14965@openml.yaml
│ │ │ ├── 167119@openml.yaml
│ │ │ ├── 167120@openml.yaml
│ │ │ ├── 168911@openml.yaml
│ │ │ └── 168912@openml.yaml
│ │ ├── mlp
│ │ │ ├── openml_mlp.yaml
│ │ │ ├── 3@openml.yaml
│ │ │ ├── 12@openml.yaml
│ │ │ ├── 31@openml.yaml
│ │ │ ├── 53@openml.yaml
│ │ │ ├── 10101@openml.yaml
│ │ │ ├── 146212@openml.yaml
│ │ │ ├── 146606@openml.yaml
│ │ │ ├── 146818@openml.yaml
│ │ │ ├── 146821@openml.yaml
│ │ │ ├── 146822@openml.yaml
│ │ │ ├── 14965@openml.yaml
│ │ │ ├── 167119@openml.yaml
│ │ │ ├── 167120@openml.yaml
│ │ │ ├── 168911@openml.yaml
│ │ │ ├── 168912@openml.yaml
│ │ │ ├── 3917@openml.yaml
│ │ │ ├── 7592@openml.yaml
│ │ │ ├── 9952@openml.yaml
│ │ │ ├── 9977@openml.yaml
│ │ │ └── 9981@openml.yaml
│ │ ├── gcn
│ │ │ ├── cora.yaml
│ │ │ ├── citeseer.yaml
│ │ │ ├── pubmed.yaml
│ │ │ ├── cora_prox.yaml
│ │ │ └── cora_dp.yaml
│ │ ├── cross_device
│ │ │ └── twitter.yaml
│ │ └── bert
│ │ │ ├── cola@huggingface_datasets.yaml
│ │ │ └── sst2@huggingface_datasets.yaml
│ └── example.py
├── B-FHTL
│ └── scripts
│ │ ├── Text-DT
│ │ ├── run_ditto.sh
│ │ ├── run_fedbn.sh
│ │ ├── run_fedavg.sh
│ │ ├── run_fedprox.sh
│ │ ├── run_local.sh
│ │ ├── run_fedbn_ft.sh
│ │ ├── run_fedavg_ft.sh
│ │ └── run_maml.sh
│ │ └── Graph-DC
│ │ ├── fedbn.yaml
│ │ ├── hpo
│ │ ├── fedbn_gnn_minibatch_on_multi_task.yaml
│ │ └── fedavg_gnn_minibatch_on_multi_task.yaml
│ │ ├── fedavg.yaml
│ │ ├── ditto.yaml
│ │ ├── fedprox.yaml
│ │ ├── fedbn_ft.yaml
│ │ └── fedavg_ft.yaml
└── pFL-Bench
│ └── FEMNIST-s02
│ ├── run_fedopt_bn_plus_sweep.sh
│ └── fedavg_convnet2_on_femnist.yaml
├── scripts
├── example_configs
│ ├── sha_wrap_fedex_ss.yaml
│ ├── fedex_grid_search_space.yaml
│ ├── pfedhpo
│ │ ├── mini_graph_dc
│ │ │ └── ss.yaml
│ │ ├── twitter
│ │ │ └── ss.yaml
│ │ └── cifar
│ │ │ ├── ss.yaml
│ │ │ └── run.sh
│ ├── toy_hpo_ss.yaml
│ ├── cora
│ │ ├── hpo_ss_fedex_grid.yaml
│ │ ├── hpo_ss_fedex.yaml
│ │ ├── run.sh
│ │ ├── hpo_ss_sha.yaml
│ │ ├── hpo_ss_fedex_arm_table.yaml
│ │ └── hpo_ss_fedex_arm.yaml
│ ├── femnist
│ │ ├── hpo_ss_fedex.yaml
│ │ ├── hpo_ss_fedex_grid.yaml
│ │ ├── run.sh
│ │ ├── hpo_ss_fedex_arm_table.yaml
│ │ ├── avg
│ │ │ └── ss.yaml
│ │ ├── hpo_ss_sha.yaml
│ │ └── hpo_ss_fedex_arm.yaml
│ ├── quadratic_clientwise.yaml
│ ├── single_process.yaml
│ ├── toy_rs.yaml
│ ├── quadratic.yaml
│ ├── toy_sha.yaml
│ ├── sha_wrap_fedex_ss_table.yaml
│ ├── openml_lr.yaml
│ ├── fedex_for_lr.yaml
│ ├── fedex_flat_search_space.yaml
│ ├── sha_wrap_fedex_arm.yaml
│ ├── sha_wrap_fedex.yaml
│ ├── fed_node_cls.yaml
│ ├── femnist.yaml
│ └── femnist_global_train.yaml
├── optimization_exp_scripts
│ ├── fedprox_exp_scripts
│ │ ├── run_fedprox_lr.sh
│ │ ├── run_fedprox_femnist.sh
│ │ └── run_fedprox_shakespeare.sh
│ └── fedopt_exp_scripts
│ │ ├── run_fedopt_lr.sh
│ │ ├── run_fedopt_shakespeare.sh
│ │ └── run_fedopt_femnist.sh
├── wide_valley_exp_scripts
│ ├── run_on_cifar10.sh
│ ├── hpo_for_fedentsgd.sh
│ ├── search_space_for_fedentsgd.yaml
│ └── run_fedentsgd_on_cifar10.sh
├── flit_exp_scripts
│ ├── fedalgo.sh
│ ├── run_fedavg-fedfocal-flit_cls.sh
│ ├── run_fedprox_cls.sh
│ ├── run_fedvat_cls.sh
│ └── run_flitplus_cls.sh
├── mf_exp_scripts
│ ├── run_movielens1m_vfl_standalone.sh
│ ├── run_movielens1m_hfl_standalone.sh
│ ├── run_movielens1m_hflsgdmf_standalone.sh
│ └── run_movielens1m_vflsgdmf_standalone.sh
├── dp_exp_scripts
│ └── run_femnist_dp_standalone.sh
├── distributed_scripts
│ ├── distributed_configs
│ │ ├── distributed_server_no_data.yaml
│ │ ├── distributed_server.yaml
│ │ ├── distributed_client_1.yaml
│ │ ├── distributed_client_2.yaml
│ │ └── distributed_client_3.yaml
│ ├── run_distributed_xgb.sh
│ ├── run_distributed_conv_femnist.sh
│ └── run_distributed_lr.sh
├── attack_exp_scripts
│ └── privacy_attack
│ │ ├── PIA_toy.yaml
│ │ └── CRA_fedavg_convnet2_on_femnist.yaml
└── personalization_exp_scripts
│ ├── ditto
│ ├── ditto_lr_on_synthetic.yaml
│ └── ditto_convnet2_on_femnist.yaml
│ ├── fedem
│ ├── fedem_lr_on_synthetic.yaml
│ ├── fedem_lstm_on_shakespeare.yaml
│ └── fedem_convnet2_on_femnist.yaml
│ ├── pfedme
│ └── pfedme_lr_on_synthetic.yaml
│ └── fedbn
│ └── fedbn_convnet2_on_femnist.yaml
├── .style.yapf
├── .github
├── ISSUE_TEMPLATE
│ ├── custom.md
│ └── feature_request.md
├── workflows
│ ├── pre-commit.yml
│ └── sphinx.yml
└── release-drafter.yml
├── environment
├── requirements-torch1.10.txt
├── extra_dependencies_torch1.10-application.sh
├── docker_files
│ └── federatedscope-jupyterhub
│ │ └── start-singleuser.sh
├── requirements-torch1.10-application.txt
└── requirements-torch1.8-application.txt
├── materials
└── tutorial
│ └── KDD_2022
│ └── README.md
├── doc
├── README.md
├── news
│ └── 06-13-2022_Declaration_of_Emergency.txt
└── source
│ ├── mf.rst
│ ├── gfl.rst
│ ├── nlp.rst
│ ├── cv.rst
│ └── attack.rst
├── .pre-commit-config.yaml
└── .flake8
/federatedscope/attack/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/gfl/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/demo/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/contrib/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/core/secure/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/mf/baseline/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/nlp/metric/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/organizer/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/tabular/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/attack/models/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/core/auxiliaries/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/core/feature/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/core/feature/hfl/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/core/optimizers/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/core/parallel/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/gfl/baseline/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/gfl/fedsageplus/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/gfl/flitplus/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/gfl/gcflplus/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/nlp/hetero_tasks/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/nlp/metric/rouge/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/Paillier/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/fedhpobench/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/core/feature/hfl/selection/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/core/feature/vfl/selection/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/linear_model/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/core/feature/hfl/preprocess/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/core/feature/hfl/preprocess/min_max_norm.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/core/feature/hfl/preprocess/standard.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/core/feature/vfl/preprocess/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/tree_based_models/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/core/feature/hfl/preprocess/log_transform.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/core/feature/hfl/preprocess/quantile_binning.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/core/feature/hfl/preprocess/uniform_binning.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/core/feature/hfl/selection/correlation_filter.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/federatedscope/core/feature/hfl/selection/variance_filter.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/data/tabular_data/README.md:
--------------------------------------------------------------------------------
1 | This is where the logs and dataframes are stored.
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/data/surrogate_model/README.md:
--------------------------------------------------------------------------------
1 | This is where the pickled surrogate model is stored.
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.vertical_fl.Paillier.abstract_paillier import *
2 |
--------------------------------------------------------------------------------
/federatedscope/core/regularizer/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.core.regularizer.proximal_regularizer import *
2 |
--------------------------------------------------------------------------------
/scripts/example_configs/sha_wrap_fedex_ss.yaml:
--------------------------------------------------------------------------------
1 | hpo.fedex.eta0:
2 | type: cate
3 | choices: [-1.0, 0.005, 0.05]
4 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/exp/cora.yaml:
--------------------------------------------------------------------------------
1 | benchmark:
2 | device: 0
3 | optimizer:
4 | min_budget: 3
5 | max_budget: 81
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/exp/graph.yaml:
--------------------------------------------------------------------------------
1 | benchmark:
2 | device: 0
3 | optimizer:
4 | min_budget: 3
5 | max_budget: 81
--------------------------------------------------------------------------------
/federatedscope/nlp/metric/bleu/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.nlp.metric.bleu.bleu import Bleu
2 |
3 | __all__ = ['Bleu']
4 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/exp/citeseer.yaml:
--------------------------------------------------------------------------------
1 | benchmark:
2 | device: 0
3 | optimizer:
4 | min_budget: 3
5 | max_budget: 81
--------------------------------------------------------------------------------
/federatedscope/nlp/metric/meteor/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.nlp.metric.meteor.meteor import Meteor
2 |
3 | __all__ = ['Meteor']
4 |
--------------------------------------------------------------------------------
/federatedscope/core/secret_sharing/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.core.secret_sharing.secret_sharing import \
2 | AdditiveSecretSharing
3 |
--------------------------------------------------------------------------------
/federatedscope/tabular/model/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.tabular.model.quadratic import QuadraticModel
2 |
3 | __all__ = ['QuadraticModel']
4 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/exp/pubmed.yaml:
--------------------------------------------------------------------------------
1 | benchmark:
2 | device: 0
3 | optimizer:
4 | min_budget: 3
5 | max_budget: 81
6 | limit_time: 172800
--------------------------------------------------------------------------------
/federatedscope/cl/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import print_function
3 | from __future__ import division
4 |
--------------------------------------------------------------------------------
/federatedscope/core/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import print_function
3 | from __future__ import division
4 |
--------------------------------------------------------------------------------
/federatedscope/core/splitters/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.core.splitters.base_splitter import BaseSplitter
2 |
3 | __all__ = ['BaseSplitter']
4 |
--------------------------------------------------------------------------------
/federatedscope/cv/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import print_function
3 | from __future__ import division
4 |
--------------------------------------------------------------------------------
/federatedscope/cv/dataloader/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.cv.dataloader.dataloader import load_cv_dataset
2 |
3 | __all__ = ['load_cv_dataset']
4 |
--------------------------------------------------------------------------------
/federatedscope/mf/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import print_function
3 | from __future__ import division
4 |
--------------------------------------------------------------------------------
/federatedscope/nlp/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import print_function
3 | from __future__ import division
4 |
--------------------------------------------------------------------------------
/federatedscope/nlp/hetero_tasks/model/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.nlp.hetero_tasks.model.model import ATCModel
2 |
3 | __all__ = ['ATCModel']
4 |
--------------------------------------------------------------------------------
/federatedscope/nlp/loss/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.nlp.loss.character_loss import *
2 | from federatedscope.nlp.loss.label_smooth_loss import *
3 |
--------------------------------------------------------------------------------
/scripts/example_configs/fedex_grid_search_space.yaml:
--------------------------------------------------------------------------------
1 | train.optimizer.lr: [0.01, 0.02, 0.04, 0.08]
2 | train.optimizer.momentum: [0.1, 0.2, 0.4, 0.8]
3 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/exp/10101@openml.yaml:
--------------------------------------------------------------------------------
1 | benchmark:
2 | device: 0
3 | optimizer:
4 | min_budget: 3
5 | max_budget: 81
6 | limit_time: 43200
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/exp/146818@openml.yaml:
--------------------------------------------------------------------------------
1 | benchmark:
2 | device: 0
3 | optimizer:
4 | min_budget: 3
5 | max_budget: 81
6 | limit_time: 43200
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/exp/146821@openml.yaml:
--------------------------------------------------------------------------------
1 | benchmark:
2 | device: 0
3 | optimizer:
4 | min_budget: 3
5 | max_budget: 81
6 | limit_time: 43200
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/exp/146822@openml.yaml:
--------------------------------------------------------------------------------
1 | benchmark:
2 | device: 0
3 | optimizer:
4 | min_budget: 3
5 | max_budget: 81
6 | limit_time: 43200
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/exp/31@openml.yaml:
--------------------------------------------------------------------------------
1 | benchmark:
2 | device: 0
3 | optimizer:
4 | min_budget: 3
5 | max_budget: 81
6 | limit_time: 43200
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/exp/3917@openml.yaml:
--------------------------------------------------------------------------------
1 | benchmark:
2 | device: 0
3 | optimizer:
4 | min_budget: 3
5 | max_budget: 81
6 | limit_time: 43200
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/exp/53@openml.yaml:
--------------------------------------------------------------------------------
1 | benchmark:
2 | device: 0
3 | optimizer:
4 | min_budget: 3
5 | max_budget: 81
6 | limit_time: 43200
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/exp/9952@openml.yaml:
--------------------------------------------------------------------------------
1 | benchmark:
2 | device: 0
3 | optimizer:
4 | min_budget: 3
5 | max_budget: 81
6 | limit_time: 43200
--------------------------------------------------------------------------------
/federatedscope/cl/dataloader/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.cl.dataloader.Cifar10 import load_cifar_dataset
2 |
3 | __all__ = ['load_cifar_dataset']
4 |
--------------------------------------------------------------------------------
/federatedscope/nlp/dataloader/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.nlp.dataloader.dataloader import load_nlp_dataset
2 |
3 | __all__ = ['load_nlp_dataset']
4 |
--------------------------------------------------------------------------------
/.style.yapf:
--------------------------------------------------------------------------------
1 | [style]
2 | based_on_style=pep8
3 | allow_split_before_dict_value=False
4 | join_multiple_lines=False
5 | blank_line_before_nested_class_or_def=False
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/exp/femnist.yaml:
--------------------------------------------------------------------------------
1 | benchmark:
2 | device: 0
3 | optimizer:
4 | min_budget: 3
5 | max_budget: 81
6 | limit_time: 172800000
7 |
--------------------------------------------------------------------------------
/federatedscope/core/workers/wrapper/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.core.workers.wrapper.fedswa import wrap_swa_server
2 |
3 | __all__ = ['wrap_swa_server']
4 |
--------------------------------------------------------------------------------
/federatedscope/nlp/hetero_tasks/trainer/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.nlp.hetero_tasks.trainer.trainer import ATCTrainer
2 |
3 | __all__ = ['ATCTrainer']
4 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/exp/cola@huggingface_datasets.yaml:
--------------------------------------------------------------------------------
1 | benchmark:
2 | device: 0
3 | optimizer:
4 | min_budget: 3
5 | max_budget: 27
6 | limit_time: 864000
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/exp/sst2@huggingface_datasets.yaml:
--------------------------------------------------------------------------------
1 | benchmark:
2 | device: 0
3 | optimizer:
4 | min_budget: 3
5 | max_budget: 27
6 | limit_time: 864000
--------------------------------------------------------------------------------
/federatedscope/gfl/dataset/preprocess/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import print_function
3 | from __future__ import division
4 |
--------------------------------------------------------------------------------
/federatedscope/core/proto/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.core.proto.gRPC_comm_manager_pb2 import *
2 | from federatedscope.core.proto.gRPC_comm_manager_pb2_grpc import *
3 |
--------------------------------------------------------------------------------
/federatedscope/nlp/hetero_tasks/aggregator/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.nlp.hetero_tasks.aggregator.aggregator import ATCAggregator
2 |
3 | __all__ = ['ATCAggregator']
4 |
--------------------------------------------------------------------------------
/federatedscope/tabular/dataloader/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.tabular.dataloader.quadratic import load_quadratic_dataset
2 |
3 | __all__ = ['load_quadratic_dataset']
4 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/dataloader/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.vertical_fl.dataloader.dataloader import load_vertical_data
2 |
3 | __all__ = ['load_vertical_data']
4 |
--------------------------------------------------------------------------------
/federatedscope/core/secure/encrypt/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.core.secure.encrypt.dummy_encrypt import \
2 | DummyEncryptKeypair
3 |
4 | __all__ = ['DummyEncryptKeypair']
5 |
--------------------------------------------------------------------------------
/federatedscope/mf/dataloader/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.mf.dataloader.dataloader import load_mf_dataset, \
2 | MFDataLoader
3 |
4 | __all__ = ['load_mf_dataset', 'MFDataLoader']
5 |
--------------------------------------------------------------------------------
/federatedscope/nlp/hetero_tasks/dataloader/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.nlp.hetero_tasks.dataloader.dataloader import \
2 | load_heteroNLP_data
3 |
4 | __all__ = ['load_heteroNLP_data']
5 |
--------------------------------------------------------------------------------
/federatedscope/autotune/fts/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.autotune.fts.server import FTSServer
2 | from federatedscope.autotune.fts.client import FTSClient
3 |
4 | __all__ = ['FTSServer', 'FTSClient']
5 |
--------------------------------------------------------------------------------
/scripts/example_configs/pfedhpo/mini_graph_dc/ss.yaml:
--------------------------------------------------------------------------------
1 | train.optimizer.lr:
2 | type: cate
3 | choices: [ 1e-1, 1e-2, 1e-3, 1e-4 ]
4 | train.local_update_steps:
5 | type: cate
6 | choices: [1,2,3,4]
7 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/custom.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Custom issue template
3 | about: Describe this issue template's purpose here.
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 |
11 |
--------------------------------------------------------------------------------
/federatedscope/cl/loss/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import print_function
3 | from __future__ import division
4 |
5 | from federatedscope.cl.loss.NT_xentloss import *
6 |
--------------------------------------------------------------------------------
/federatedscope/autotune/fedex/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.autotune.fedex.server import FedExServer
2 | from federatedscope.autotune.fedex.client import FedExClient
3 |
4 | __all__ = ['FedExServer', 'FedExClient']
5 |
--------------------------------------------------------------------------------
/scripts/example_configs/toy_hpo_ss.yaml:
--------------------------------------------------------------------------------
1 | train.optimizer.lr:
2 | type: float
3 | lower: 0.001
4 | upper: 0.5
5 | log: True
6 | train.optimizer.weight_decay:
7 | type: cate
8 | choices: [0.0, 0.0005, 0.005]
9 |
--------------------------------------------------------------------------------
/federatedscope/nlp/model/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.nlp.model.rnn import LSTM
2 | from federatedscope.nlp.model.model_builder import get_rnn, get_transformer
3 |
4 | __all__ = ['LSTM', 'get_rnn', 'get_transformer']
5 |
--------------------------------------------------------------------------------
/benchmark/B-FHTL/scripts/Text-DT/run_ditto.sh:
--------------------------------------------------------------------------------
1 | cd ../FederatedScope/federatedscope/
2 |
3 | python main.py --cfg contrib/configs/config_ditto.yaml --cfg_client contrib/configs/config_client_ditto.yaml outdir exp/sts_imdb_squad/ditto/
4 |
--------------------------------------------------------------------------------
/benchmark/B-FHTL/scripts/Text-DT/run_fedbn.sh:
--------------------------------------------------------------------------------
1 | cd ../FederatedScope/federatedscope/
2 |
3 | python main.py --cfg contrib/configs/config_fedbn.yaml --cfg_client contrib/configs/config_client_fedbn.yaml outdir exp/sts_imdb_squad/fedbn/
4 |
--------------------------------------------------------------------------------
/benchmark/B-FHTL/scripts/Text-DT/run_fedavg.sh:
--------------------------------------------------------------------------------
1 | cd ../FederatedScope/federatedscope/
2 |
3 | python main.py --cfg contrib/configs/config_fedavg.yaml --cfg_client contrib/configs/config_client_fedavg.yaml outdir exp/sts_imdb_squad/fedavg/
4 |
--------------------------------------------------------------------------------
/federatedscope/mf/model/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.mf.model.model import BasicMFNet, VMFNet, HMFNet
2 | from federatedscope.mf.model.model_builder import get_mfnet
3 |
4 | __all__ = ["get_mfnet", "BasicMFNet", "VMFNet", "HMFNet"]
5 |
--------------------------------------------------------------------------------
/federatedscope/nlp/hetero_tasks/worker/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.nlp.hetero_tasks.worker.client import ATCClient
2 | from federatedscope.nlp.hetero_tasks.worker.server import ATCServer
3 |
4 | __all__ = ['ATCClient', 'ATCServer']
5 |
--------------------------------------------------------------------------------
/benchmark/B-FHTL/scripts/Text-DT/run_fedprox.sh:
--------------------------------------------------------------------------------
1 | cd ../FederatedScope/federatedscope/
2 |
3 | python main.py --cfg contrib/configs/config_fedprox.yaml --cfg_client contrib/configs/config_client_fedprox.yaml outdir exp/sts_imdb_squad/fedprox/
4 |
--------------------------------------------------------------------------------
/benchmark/B-FHTL/scripts/Text-DT/run_local.sh:
--------------------------------------------------------------------------------
1 | cd ../FederatedScope/federatedscope/
2 |
3 | python main.py --cfg contrib/configs/config_local.yaml --cfg_client contrib/configs/config_client_local_local.yaml outdir exp/sts_imdb_squad/isolated/
4 |
--------------------------------------------------------------------------------
/federatedscope/cl/model/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import print_function
3 | from __future__ import division
4 |
5 | from federatedscope.cl.model.SimCLR import get_simclr
6 |
7 | __all__ = ['get_simclr']
8 |
--------------------------------------------------------------------------------
/federatedscope/cross_backends/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.cross_backends.tf_lr import LogisticRegression
2 | from federatedscope.cross_backends.tf_aggregator import FedAvgAggregator
3 |
4 | __all__ = ['LogisticRegression', 'FedAvgAggregator']
5 |
--------------------------------------------------------------------------------
/federatedscope/core/splitters/generic/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.core.splitters.generic.lda_splitter import LDASplitter
2 | from federatedscope.core.splitters.generic.iid_splitter import IIDSplitter
3 |
4 | __all__ = ['LDASplitter', 'IIDSplitter']
5 |
--------------------------------------------------------------------------------
/federatedscope/gfl/loss/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import print_function
3 | from __future__ import division
4 |
5 | from federatedscope.gfl.loss.greedy_loss import GreedyLoss
6 |
7 | __all__ = ['GreedyLoss']
8 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/exp/run_avg.sh:
--------------------------------------------------------------------------------
1 | bash run_mode.sh 10101@openml tabular mlp 3 avg
2 |
3 | bash run_mode.sh 146818@openml tabular mlp 1 avg
4 |
5 | bash run_mode.sh 146821@openml tabular mlp 3 avg
6 |
7 | bash run_mode.sh 146822@openml tabular mlp 2 avg
--------------------------------------------------------------------------------
/scripts/example_configs/pfedhpo/twitter/ss.yaml:
--------------------------------------------------------------------------------
1 | train.optimizer.lr:
2 | type: cate
3 | choices: [1e-5, 5e-5, 1e-4, 5e-4, 1e-3, 5e-3, 1e-2]
4 | train.optimizer.weight_decay:
5 | type: cate
6 | choices: [ 0, 1e-5, 1e-4, 1e-3, 1e-2 ]
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/tree_based_models/model/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.vertical_fl.tree_based_models.model.Tree \
2 | import MultipleXGBTrees, MultipleGBDTTrees, RandomForest
3 |
4 | __all__ = ['MultipleXGBTrees', 'MultipleGBDTTrees', 'RandomForest']
5 |
--------------------------------------------------------------------------------
/federatedscope/core/compression/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.core.compression.utils import \
2 | symmetric_uniform_quantization, symmetric_uniform_dequantization
3 |
4 | __all__ = [
5 | 'symmetric_uniform_quantization', 'symmetric_uniform_dequantization'
6 | ]
7 |
--------------------------------------------------------------------------------
/federatedscope/organizer/cfg_server.py:
--------------------------------------------------------------------------------
1 | result_backend = 'redis://'
2 |
3 | task_serializer = 'json'
4 | result_serializer = 'json'
5 | accept_content = ['json']
6 | timezone = 'Europe/Oslo'
7 | enable_utc = True
8 | task_annotations = {'tasks.add': {'rate_limit': '10/m'}}
9 |
--------------------------------------------------------------------------------
/federatedscope/mf/dataset/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.mf.dataset.movielens import *
2 |
3 | __all__ = [
4 | 'VMFDataset', 'HMFDataset', 'MovieLensData', 'MovieLens1M', 'MovieLens10M',
5 | 'VFLMovieLens1M', 'HFLMovieLens1M', 'VFLMovieLens10M', 'HFLMovieLens10M'
6 | ]
7 |
--------------------------------------------------------------------------------
/federatedscope/cl/trainer/__init__.py:
--------------------------------------------------------------------------------
1 | from os.path import dirname, basename, isfile, join
2 | import glob
3 |
4 | modules = glob.glob(join(dirname(__file__), "*.py"))
5 | __all__ = [
6 | basename(f)[:-3] for f in modules
7 | if isfile(f) and not f.endswith('__init__.py')
8 | ]
9 |
--------------------------------------------------------------------------------
/federatedscope/contrib/data/__init__.py:
--------------------------------------------------------------------------------
1 | from os.path import dirname, basename, isfile, join
2 | import glob
3 |
4 | modules = glob.glob(join(dirname(__file__), "*.py"))
5 | __all__ = [
6 | basename(f)[:-3] for f in modules
7 | if isfile(f) and not f.endswith('__init__.py')
8 | ]
9 |
--------------------------------------------------------------------------------
/federatedscope/contrib/loss/__init__.py:
--------------------------------------------------------------------------------
1 | from os.path import dirname, basename, isfile, join
2 | import glob
3 |
4 | modules = glob.glob(join(dirname(__file__), "*.py"))
5 | __all__ = [
6 | basename(f)[:-3] for f in modules
7 | if isfile(f) and not f.endswith('__init__.py')
8 | ]
9 |
--------------------------------------------------------------------------------
/federatedscope/contrib/model/__init__.py:
--------------------------------------------------------------------------------
1 | from os.path import dirname, basename, isfile, join
2 | import glob
3 |
4 | modules = glob.glob(join(dirname(__file__), "*.py"))
5 | __all__ = [
6 | basename(f)[:-3] for f in modules
7 | if isfile(f) and not f.endswith('__init__.py')
8 | ]
9 |
--------------------------------------------------------------------------------
/federatedscope/cv/dataset/__init__.py:
--------------------------------------------------------------------------------
1 | from os.path import dirname, basename, isfile, join
2 | import glob
3 |
4 | modules = glob.glob(join(dirname(__file__), "*.py"))
5 | __all__ = [
6 | basename(f)[:-3] for f in modules
7 | if isfile(f) and not f.endswith('__init__.py')
8 | ]
9 |
--------------------------------------------------------------------------------
/federatedscope/nlp/dataset/__init__.py:
--------------------------------------------------------------------------------
1 | from os.path import dirname, basename, isfile, join
2 | import glob
3 |
4 | modules = glob.glob(join(dirname(__file__), "*.py"))
5 | __all__ = [
6 | basename(f)[:-3] for f in modules
7 | if isfile(f) and not f.endswith('__init__.py')
8 | ]
9 |
--------------------------------------------------------------------------------
/federatedscope/nlp/trainer/__init__.py:
--------------------------------------------------------------------------------
1 | from os.path import dirname, basename, isfile, join
2 | import glob
3 |
4 | modules = glob.glob(join(dirname(__file__), "*.py"))
5 | __all__ = [
6 | basename(f)[:-3] for f in modules
7 | if isfile(f) and not f.endswith('__init__.py')
8 | ]
9 |
--------------------------------------------------------------------------------
/federatedscope/contrib/metrics/__init__.py:
--------------------------------------------------------------------------------
1 | from os.path import dirname, basename, isfile, join
2 | import glob
3 |
4 | modules = glob.glob(join(dirname(__file__), "*.py"))
5 | __all__ = [
6 | basename(f)[:-3] for f in modules
7 | if isfile(f) and not f.endswith('__init__.py')
8 | ]
9 |
--------------------------------------------------------------------------------
/federatedscope/contrib/optimizer/__init__.py:
--------------------------------------------------------------------------------
1 | from os.path import dirname, basename, isfile, join
2 | import glob
3 |
4 | modules = glob.glob(join(dirname(__file__), "*.py"))
5 | __all__ = [
6 | basename(f)[:-3] for f in modules
7 | if isfile(f) and not f.endswith('__init__.py')
8 | ]
9 |
--------------------------------------------------------------------------------
/federatedscope/contrib/scheduler/__init__.py:
--------------------------------------------------------------------------------
1 | from os.path import dirname, basename, isfile, join
2 | import glob
3 |
4 | modules = glob.glob(join(dirname(__file__), "*.py"))
5 | __all__ = [
6 | basename(f)[:-3] for f in modules
7 | if isfile(f) and not f.endswith('__init__.py')
8 | ]
9 |
--------------------------------------------------------------------------------
/federatedscope/contrib/splitter/__init__.py:
--------------------------------------------------------------------------------
1 | from os.path import dirname, basename, isfile, join
2 | import glob
3 |
4 | modules = glob.glob(join(dirname(__file__), "*.py"))
5 | __all__ = [
6 | basename(f)[:-3] for f in modules
7 | if isfile(f) and not f.endswith('__init__.py')
8 | ]
9 |
--------------------------------------------------------------------------------
/federatedscope/contrib/trainer/__init__.py:
--------------------------------------------------------------------------------
1 | from os.path import dirname, basename, isfile, join
2 | import glob
3 |
4 | modules = glob.glob(join(dirname(__file__), "*.py"))
5 | __all__ = [
6 | basename(f)[:-3] for f in modules
7 | if isfile(f) and not f.endswith('__init__.py')
8 | ]
9 |
--------------------------------------------------------------------------------
/federatedscope/contrib/worker/__init__.py:
--------------------------------------------------------------------------------
1 | from os.path import dirname, basename, isfile, join
2 | import glob
3 |
4 | modules = glob.glob(join(dirname(__file__), "*.py"))
5 | __all__ = [
6 | basename(f)[:-3] for f in modules
7 | if isfile(f) and not f.endswith('__init__.py')
8 | ]
9 |
--------------------------------------------------------------------------------
/scripts/example_configs/cora/hpo_ss_fedex_grid.yaml:
--------------------------------------------------------------------------------
1 | train.optimizer.lr: [0.01, 0.01668, 0.02783, 0.04642, 0.07743, 0.12915, 0.21544, 0.35938, 0.59948, 1.0]
2 | train.optimizer.weight_decay: [0.0, 0.001, 0.01, 0.1]
3 | model.dropout: [0.0, 0.5]
4 | train.local_update_steps: [1, 2, 3, 4, 5, 6, 7, 8]
--------------------------------------------------------------------------------
/federatedscope/nlp/hetero_tasks/metric/__init__.py:
--------------------------------------------------------------------------------
1 | from os.path import dirname, basename, isfile, join
2 | import glob
3 |
4 | modules = glob.glob(join(dirname(__file__), "*.py"))
5 | __all__ = [
6 | basename(f)[:-3] for f in modules
7 | if isfile(f) and not f.endswith('__init__.py')
8 | ]
9 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/linear_model/worker/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.vertical_fl.linear_model.worker.vertical_client import\
2 | vFLClient
3 | from federatedscope.vertical_fl.linear_model.worker.vertical_server import\
4 | vFLServer
5 |
6 | __all__ = ['vFLServer', 'vFLClient']
7 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/loss/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.vertical_fl.loss.binary_cls import BinaryClsLoss
2 | from federatedscope.vertical_fl.loss.regression import RegressionMAELoss, \
3 | RegressionMSELoss
4 |
5 | __all__ = ['BinaryClsLoss', 'RegressionMSELoss', 'RegressionMAELoss']
6 |
--------------------------------------------------------------------------------
/scripts/optimization_exp_scripts/fedprox_exp_scripts/run_fedprox_lr.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ../..
4 |
5 | echo "Run fedopt on synthetic."
6 |
7 | python federatedscope/main.py --cfg federatedscope/nlp/baseline/fedavg_lr_on_synthetic.yaml \
8 | fedprox.use True \
9 | fedprox.mu 0.1
10 |
--------------------------------------------------------------------------------
/federatedscope/nlp/hetero_tasks/dataset/__init__.py:
--------------------------------------------------------------------------------
1 | from os.path import dirname, basename, isfile, join
2 | import glob
3 |
4 | modules = glob.glob(join(dirname(__file__), "*.py"))
5 | __all__ = [
6 | basename(f)[:-3] for f in modules
7 | if isfile(f) and not f.endswith('__init__.py')
8 | ]
9 |
--------------------------------------------------------------------------------
/scripts/example_configs/cora/hpo_ss_fedex.yaml:
--------------------------------------------------------------------------------
1 | hpo.fedex.eta0:
2 | type: cate
3 | choices: [-1.0, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0]
4 | hpo.fedex.gamma:
5 | type: float
6 | lower: 0.0
7 | upper: 1.0
8 | log: False
9 | hpo.fedex.diff:
10 | type: cate
11 | choices: [True, False]
--------------------------------------------------------------------------------
/scripts/optimization_exp_scripts/fedprox_exp_scripts/run_fedprox_femnist.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ../..
4 |
5 | echo "Run fedprox on femnist."
6 |
7 | python federatedscope/main.py --cfg federatedscope/cv/baseline/fedavg_convnet2_on_femnist.yaml \
8 | fedprox.use True \
9 | fedprox.mu 0.1
10 |
--------------------------------------------------------------------------------
/federatedscope/cl/baseline/repro_exp/args_cifar10_fedsimclr.sh:
--------------------------------------------------------------------------------
1 | # ---------------------------------------------------------------------- #
2 | # Fedsimclr
3 | # ---------------------------------------------------------------------- #
4 |
5 | bash run_contrastive_learning.sh 1 fedsimclr cifar10 0.1
6 |
7 |
8 |
--------------------------------------------------------------------------------
/scripts/example_configs/femnist/hpo_ss_fedex.yaml:
--------------------------------------------------------------------------------
1 | hpo.fedex.eta0:
2 | type: cate
3 | choices: [-1.0, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0]
4 | hpo.fedex.gamma:
5 | type: float
6 | lower: 0.0
7 | upper: 1.0
8 | log: False
9 | hpo.fedex.diff:
10 | type: cate
11 | choices: [True, False]
--------------------------------------------------------------------------------
/scripts/example_configs/quadratic_clientwise.yaml:
--------------------------------------------------------------------------------
1 | client_1:
2 | optimizer:
3 | lr: 0.625
4 | client_2:
5 | optimizer:
6 | lr: 0.125
7 | client_3:
8 | optimizer:
9 | lr: 0.125
10 | client_4:
11 | optimizer:
12 | lr: 0.125
13 | client_5:
14 | optimizer:
15 | lr: 0.025
16 |
--------------------------------------------------------------------------------
/benchmark/B-FHTL/scripts/Text-DT/run_fedbn_ft.sh:
--------------------------------------------------------------------------------
1 | cd ../FederatedScope/federatedscope/
2 |
3 | python main.py --cfg contrib/configs/config_ft.yaml --cfg_client contrib/configs/config_client_fedbn_ft.yaml outdir exp/sts_imdb_squad/fedbn_ft/ federate.method fedbn federate.load_from exp/sts_imdb_squad/fedbn/ckpt/
4 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/exp/run_opt.sh:
--------------------------------------------------------------------------------
1 | bash run_mode.sh 3917@openml tabular lr 3 opt
2 |
3 | bash run_mode.sh 10101@openml tabular lr 3 opt
4 |
5 | bash run_mode.sh 146818@openml tabular lr 1 opt
6 |
7 | bash run_mode.sh 146821@openml tabular lr 3 opt
8 |
9 | bash run_mode.sh 146822@openml tabular lr 2 opt
--------------------------------------------------------------------------------
/scripts/optimization_exp_scripts/fedprox_exp_scripts/run_fedprox_shakespeare.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ../..
4 |
5 | echo "Run fedprox on shakespeare."
6 |
7 | python federatedscope/main.py --cfg federatedscope/nlp/baseline/fedavg_lstm_on_shakespeare.yaml \
8 | fedprox.use True \
9 | fedprox.mu 0.1
10 |
--------------------------------------------------------------------------------
/benchmark/B-FHTL/scripts/Text-DT/run_fedavg_ft.sh:
--------------------------------------------------------------------------------
1 | cd ../FederatedScope/federatedscope/
2 |
3 | python main.py --cfg contrib/configs/config_ft.yaml --cfg_client contrib/configs/config_client_fedavg_ft.yaml outdir exp/sts_imdb_squad/fedavg_ft/ federate.method fedavg federate.load_from exp/sts_imdb_squad/fedavg/ckpt/
4 |
--------------------------------------------------------------------------------
/federatedscope/core/monitors/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.core.monitors.early_stopper import EarlyStopper
2 | from federatedscope.core.monitors.metric_calculator import MetricCalculator
3 | from federatedscope.core.monitors.monitor import Monitor
4 |
5 | __all__ = ['EarlyStopper', 'MetricCalculator', 'Monitor']
6 |
--------------------------------------------------------------------------------
/scripts/example_configs/femnist/hpo_ss_fedex_grid.yaml:
--------------------------------------------------------------------------------
1 | train.optimizer.lr: [0.01, 0.01668, 0.02783, 0.04642, 0.07743, 0.12915, 0.21544, 0.35938, 0.59948, 1.0]
2 | train.optimizer.weight_decay: [0.0, 0.001, 0.01, 0.1]
3 | model.dropout: [0.0, 0.5]
4 | train.local_update_steps: [1, 2, 3, 4]
5 | dataloader.batch_size: [16, 32, 64]
--------------------------------------------------------------------------------
/federatedscope/cl/baseline/repro_exp/args_cifar10_fedgc.sh:
--------------------------------------------------------------------------------
1 | # ---------------------------------------------------------------------- #
2 | # Fedgc
3 | # ---------------------------------------------------------------------- #
4 |
5 | bash run_contrastive_learning.sh 3 fedgc cifar10 0.1
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/federatedscope/nlp/hetero_tasks/run_pretrain.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ../../..
4 |
5 | DEVICE=$1
6 | DEBUG=False
7 |
8 | python federatedscope/main.py \
9 | --cfg federatedscope/nlp/hetero_tasks/baseline/config_pretrain.yaml \
10 | outdir exp/atc/pretrain/ \
11 | device $DEVICE \
12 | data.is_debug $DEBUG \
13 |
--------------------------------------------------------------------------------
/scripts/example_configs/cora/run.sh:
--------------------------------------------------------------------------------
1 | # SHA
2 | python hpo.py --cfg scripts/example_configs/cora/sha.yaml
3 |
4 | # SHA wrap FedEX (FedEX related param)
5 | python hpo.py --cfg scripts/example_configs/cora/sha_wrap_fedex.yaml
6 |
7 | # SHA wrap FedEX (arm)
8 | python hpo.py --cfg scripts/example_configs/cora/sha_wrap_fedex_arm.yaml
--------------------------------------------------------------------------------
/scripts/optimization_exp_scripts/fedopt_exp_scripts/run_fedopt_lr.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ../..
4 |
5 | echo "Run fedopt on synthetic."
6 |
7 | python federatedscope/main.py --cfg federatedscope/nlp/baseline/fedavg_lr_on_synthetic.yaml \
8 | fedopt.use True \
9 | federate.method FedOpt \
10 | fedopt.optimizer.lr 1. \
11 |
--------------------------------------------------------------------------------
/scripts/example_configs/pfedhpo/cifar/ss.yaml:
--------------------------------------------------------------------------------
1 | train.optimizer.lr:
2 | type: cate
3 | choices: [0.001, 0.005, 0.01, 0.05, 0.1]
4 |
5 | train.optimizer.weight_decay:
6 | type: cate
7 | choices: [ 0, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1 ]
8 |
9 | model.dropout:
10 | type: cate
11 | choices: [0, 0.1, 0.2, 0.3, 0.4, 0.5]
12 |
13 |
--------------------------------------------------------------------------------
/federatedscope/attack/privacy_attacks/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.attack.privacy_attacks.GAN_based_attack import *
2 | from federatedscope.attack.privacy_attacks.passive_PIA import *
3 | from federatedscope.attack.privacy_attacks.reconstruction_opt import *
4 |
5 | __all__ = ['DLG', 'InvertGradient', 'GANCRA', 'PassivePropertyInference']
6 |
--------------------------------------------------------------------------------
/scripts/example_configs/femnist/run.sh:
--------------------------------------------------------------------------------
1 | # SHA
2 | python hpo.py --cfg scripts/example_configs/femnist/sha.yaml
3 |
4 | # SHA wrap FedEX (FedEX related param)
5 | python hpo.py --cfg scripts/example_configs/femnist/sha_wrap_fedex.yaml
6 |
7 | # SHA wrap FedEX (arm)
8 | python hpo.py --cfg scripts/example_configs/femnist/sha_wrap_fedex_arm.yaml
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/fedhpobench/benchmarks/__init__.py:
--------------------------------------------------------------------------------
1 | from fedhpobench.benchmarks.raw_benchmark import RawBenchmark
2 | from fedhpobench.benchmarks.tabular_benchmark import TabularBenchmark
3 | from fedhpobench.benchmarks.surrogate_benchmark import SurrogateBenchmark
4 |
5 | __all__ = ['RawBenchmark', 'TabularBenchmark', 'SurrogateBenchmark']
6 |
--------------------------------------------------------------------------------
/scripts/optimization_exp_scripts/fedopt_exp_scripts/run_fedopt_shakespeare.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ../..
4 |
5 | echo "Run fedopt on shakespeare."
6 |
7 | python federatedscope/main.py --cfg federatedscope/nlp/baseline/fedavg_lstm_on_shakespeare.yaml \
8 | fedopt.use True \
9 | federate.method FedOpt \
10 | fedopt.optimizer.lr 1.
11 |
--------------------------------------------------------------------------------
/federatedscope/cv/model/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import print_function
3 | from __future__ import division
4 |
5 | from federatedscope.cv.model.cnn import ConvNet2, ConvNet5, VGG11
6 | from federatedscope.cv.model.model_builder import get_cnn
7 |
8 | __all__ = ['ConvNet2', 'ConvNet5', 'VGG11', 'get_cnn']
9 |
--------------------------------------------------------------------------------
/scripts/wide_valley_exp_scripts/run_on_cifar10.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | algo=$1
4 | alpha=$2
5 |
6 | for (( i=0; i<5; i++ ))
7 | do
8 | CUDA_VISIBLE_DEVICES="${i}" python federatedscope/main.py --cfg scripts/wide_valley_exp_scripts/${algo}_on_cifar10.yaml seed $i data.splitter_args "[{'alpha': ${alpha}}]" expname ${algo}_${alpha}_${i} &
9 | done
10 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/dataset/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.vertical_fl.dataset.adult import Adult
2 | from federatedscope.vertical_fl.dataset.abalone import Abalone
3 | from federatedscope.vertical_fl.dataset.blog import Blog
4 | from federatedscope.vertical_fl.dataset.credit import Credit
5 |
6 | __all__ = ['Adult', 'Abalone', 'Blog', 'Credit']
7 |
--------------------------------------------------------------------------------
/federatedscope/nlp/dataset/preprocess/get_embs.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | if [ ! -f 'glove.6B.300d.txt' ]; then
4 | wget http://nlp.stanford.edu/data/glove.6B.zip
5 | unzip glove.6B.zip
6 | rm glove.6B.50d.txt glove.6B.100d.txt glove.6B.200d.txt glove.6B.zip
7 | fi
8 |
9 | if [ ! -f embs.json ]; then
10 | python3 get_embs.py
11 | fi
--------------------------------------------------------------------------------
/federatedscope/autotune/pfedhpo/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.autotune.pfedhpo.server import pFedHPOServer
2 | from federatedscope.autotune.pfedhpo.client import pFedHPOClient
3 | from federatedscope.autotune.pfedhpo.fl_server import pFedHPOFLServer
4 |
5 | __all__ = [
6 | 'pFedHPOServer',
7 | 'pFedHPOClient',
8 | 'pFedHPOFLServer',
9 | ]
10 |
--------------------------------------------------------------------------------
/federatedscope/mf/trainer/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.mf.trainer.trainer import MFTrainer
2 | from federatedscope.mf.trainer.trainer_sgdmf import wrap_MFTrainer, \
3 | init_sgdmf_ctx, embedding_clip, hook_on_batch_backward
4 |
5 | __all__ = [
6 | 'MFTrainer', 'wrap_MFTrainer', 'init_sgdmf_ctx', 'embedding_clip',
7 | 'hook_on_batch_backward'
8 | ]
9 |
--------------------------------------------------------------------------------
/federatedscope/core/lr.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | class LogisticRegression(torch.nn.Module):
5 | def __init__(self, in_channels, class_num, use_bias=True):
6 | super(LogisticRegression, self).__init__()
7 | self.fc = torch.nn.Linear(in_channels, class_num, bias=use_bias)
8 |
9 | def forward(self, x):
10 | return self.fc(x)
11 |
--------------------------------------------------------------------------------
/scripts/example_configs/single_process.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | federate:
4 | mode: 'standalone'
5 | total_round_num: 20
6 | make_global_eval: False
7 | client_num: 5
8 | share_local_model: True
9 | online_aggr: True
10 | trainer:
11 | type: 'general'
12 | eval:
13 | freq: 10
14 | data:
15 | type: 'toy'
16 | model:
17 | type: 'lr'
18 |
--------------------------------------------------------------------------------
/scripts/wide_valley_exp_scripts/hpo_for_fedentsgd.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | alpha=$1
4 | device=$2
5 |
6 | echo $alpha
7 | echo $device
8 |
9 | CUDA_VISIBLE_DEVICES="${device}" python federatedscope/hpo.py --cfg scripts/wide_valley_exp_scripts/fedentsgd_on_cifar10.yaml hpo.working_folder bo_gp_fedentsgd_${device} outdir bo_gp_fedentsgd_${device} >/dev/null 2>/dev/null
10 |
--------------------------------------------------------------------------------
/environment/requirements-torch1.10.txt:
--------------------------------------------------------------------------------
1 | numpy==1.21.2
2 | scikit-learn==1.0.2
3 | scipy==1.7.3
4 | pandas==1.4.1
5 | scikit-learn
6 | pytorch==1.10.1
7 | torchvision==0.11.2
8 | torchaudio==0.10.1
9 | cudatoolkit==11.3.1
10 | wandb
11 | tensorboard
12 | tensorboardX
13 | grpcio
14 | grpcio-tools
15 | protobuf==3.19.4
16 | setuptools==61.2.0
17 | fvcore
18 | pympler
19 | iopath
--------------------------------------------------------------------------------
/scripts/example_configs/cora/hpo_ss_sha.yaml:
--------------------------------------------------------------------------------
1 | train.optimizer.lr:
2 | type: float
3 | lower: 0.01
4 | upper: 1.0
5 | log: True
6 | train.optimizer.weight_decay:
7 | type: cate
8 | choices: [0.0, 0.001, 0.01, 0.1]
9 | model.dropout:
10 | type: cate
11 | choices: [0.0, 0.5]
12 | train.local_update_steps:
13 | type: cate
14 | choices: [1, 2, 3, 4, 5, 6, 7, 8]
--------------------------------------------------------------------------------
/federatedscope/core/data/raw_translator.py:
--------------------------------------------------------------------------------
1 | from federatedscope.core.data.base_translator import BaseDataTranslator
2 |
3 |
4 | class RawDataTranslator(BaseDataTranslator):
5 | def __init__(self, global_cfg, client_cfgs=None):
6 | self.global_cfg = global_cfg
7 | self.client_cfgs = client_cfgs
8 |
9 | def __call__(self, dataset):
10 | return dataset
11 |
--------------------------------------------------------------------------------
/federatedscope/nlp/hetero_tasks/run_fedavg.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ../../..
4 |
5 | DEVICE=$1
6 | DEBUG=False
7 |
8 | python federatedscope/main.py \
9 | --cfg federatedscope/nlp/hetero_tasks/baseline/config_fedavg.yaml \
10 | --client_cfg federatedscope/nlp/hetero_tasks/baseline/config_client_fedavg.yaml \
11 | outdir exp/fedavg/ \
12 | device $DEVICE \
13 | data.is_debug $DEBUG \
14 |
--------------------------------------------------------------------------------
/scripts/example_configs/toy_rs.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | federate:
3 | mode: 'standalone'
4 | total_round_num: 10
5 | make_global_eval: False
6 | client_num: 5
7 | trainer:
8 | type: 'general'
9 | eval:
10 | freq: 5
11 | model:
12 | type: 'lr'
13 | data:
14 | type: 'toy'
15 | hpo:
16 | num_workers: 3
17 | init_cand_num: 3
18 | ss: scripts/example_configs/toy_hpo_ss.yaml
19 |
--------------------------------------------------------------------------------
/scripts/flit_exp_scripts/fedalgo.sh:
--------------------------------------------------------------------------------
1 | bash run_flitplus.sh 1 bbbp flitplustrainer 0.01 0.1 &
2 |
3 | bash run_flitplus.sh 2 bbbp flittrainer 0.1 &
4 |
5 | bash run_flitplus.sh 3 bbbp fedfocaltrainer 0.1 &
6 |
7 | bash run_flitplus.sh 4 bbbp fedvattrainer 0.1 0.1 &
8 |
9 | bash run_flitplus.sh 5 bbbp graphminibatch_trainer 0.1 &
10 |
11 | bash run_fedprox.sh 6 bbbp graphminibatch_trainer 0.1 0.1 &
--------------------------------------------------------------------------------
/federatedscope/nlp/hetero_tasks/run_isolated.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ../../..
4 |
5 | DEVICE=$1
6 | DEBUG=False
7 |
8 | python federatedscope/main.py \
9 | --cfg federatedscope/nlp/hetero_tasks/baseline/config_isolated.yaml \
10 | --client_cfg federatedscope/nlp/hetero_tasks/baseline/config_client_isolated.yaml \
11 | outdir exp/isolated/ \
12 | device $DEVICE \
13 | data.is_debug $DEBUG \
14 |
--------------------------------------------------------------------------------
/scripts/example_configs/cora/hpo_ss_fedex_arm_table.yaml:
--------------------------------------------------------------------------------
1 | hpo.table.idx:
2 | type: cate
3 | choices: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80]
4 |
--------------------------------------------------------------------------------
/federatedscope/contrib/trainer/sam_trainer.py:
--------------------------------------------------------------------------------
1 | from federatedscope.register import register_trainer
2 | from federatedscope.core.trainers import BaseTrainer
3 |
4 |
5 | def call_sam_trainer(trainer_type):
6 | if trainer_type == 'sam_trainer':
7 | from federatedscope.contrib.trainer.sam import SAMTrainer
8 | return SAMTrainer
9 |
10 |
11 | register_trainer('sam_trainer', call_sam_trainer)
12 |
--------------------------------------------------------------------------------
/scripts/example_configs/femnist/hpo_ss_fedex_arm_table.yaml:
--------------------------------------------------------------------------------
1 | hpo.table.idx:
2 | type: cate
3 | choices: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80]
4 |
--------------------------------------------------------------------------------
/scripts/mf_exp_scripts/run_movielens1m_vfl_standalone.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ..
4 |
5 | echo "Run MF task on movielens1m."
6 |
7 | python federatedscope/main.py --cfg federatedscope/mf/baseline/vfl_fedavg_standalone_on_movielens1m.yaml \
8 | sgdmf.use False \
9 | train.optimizer.lr 0.8 \
10 | train.local_update_steps 20 \
11 | federate.total_round_num 50 \
12 | dataloader.batch_size 32
--------------------------------------------------------------------------------
/federatedscope/core/workers/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.core.workers.base_worker import Worker
2 | from federatedscope.core.workers.base_server import BaseServer
3 | from federatedscope.core.workers.base_client import BaseClient
4 | from federatedscope.core.workers.server import Server
5 | from federatedscope.core.workers.client import Client
6 |
7 | __all__ = ['Worker', 'BaseServer', 'BaseClient', 'Server', 'Client']
8 |
--------------------------------------------------------------------------------
/scripts/mf_exp_scripts/run_movielens1m_hfl_standalone.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ..
4 |
5 | echo "Run MF task on movielens1m."
6 |
7 | python federatedscope/main.py --cfg federatedscope/mf/baseline/hfl_fedavg_standalone_on_movielens1m.yaml \
8 | sgdmf.use False \
9 | train.optimizer.lr 0.8 \
10 | train.local_update_steps 20 \
11 | federate.total_round_num 50 \
12 | dataloader.batch_size 32
13 |
--------------------------------------------------------------------------------
/scripts/optimization_exp_scripts/fedopt_exp_scripts/run_fedopt_femnist.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ../..
4 |
5 | echo "Run fedopt on femnist."
6 |
7 | python federatedscope/main.py --cfg federatedscope/cv/baseline/fedavg_convnet2_on_femnist.yaml\
8 | fedopt.use True \
9 | federate.method FedOpt \
10 | fedopt.optimizer.lr 1. \
11 |
--------------------------------------------------------------------------------
/scripts/example_configs/cora/hpo_ss_fedex_arm.yaml:
--------------------------------------------------------------------------------
1 | train.optimizer.lr:
2 | type: cate
3 | choices: [0.01, 0.01668, 0.02783, 0.04642, 0.07743, 0.12915, 0.21544, 0.35938, 0.59948, 1.0]
4 | train.optimizer.weight_decay:
5 | type: cate
6 | choices: [0.0, 0.001, 0.01, 0.1]
7 | model.dropout:
8 | type: cate
9 | choices: [0.0, 0.5]
10 | train.local_update_steps:
11 | type: cate
12 | choices: [1, 2, 3, 4, 5, 6, 7, 8]
--------------------------------------------------------------------------------
/benchmark/B-FHTL/scripts/Text-DT/run_maml.sh:
--------------------------------------------------------------------------------
1 | cd ../FederatedScope/federatedscope/
2 |
3 | python main.py --cfg contrib/configs/config_maml.yaml --cfg_client contrib/configs/config_client_maml.yaml outdir exp/sts_imdb_squad/maml/
4 |
5 | python main.py --cfg contrib/configs/config_ft.yaml --cfg_client contrib/configs/config_client_maml_ft.yaml outdir exp/sts_imdb_squad/maml/ federate.method maml federate.load_from exp/sts_imdb_squad/maml/ckpt/
6 |
--------------------------------------------------------------------------------
/scripts/example_configs/femnist/avg/ss.yaml:
--------------------------------------------------------------------------------
1 | train.optimizer.lr:
2 | type: float
3 | lower: 0.01
4 | upper: 1.0
5 | log: True
6 | train.optimizer.weight_decay:
7 | type: float
8 | lower: 0.0
9 | upper: 1.0
10 | model.dropout:
11 | type: cate
12 | choices: [0.0, 0.5]
13 | train.local_update_steps:
14 | type: int
15 | lower: 1
16 | upper: 4
17 | dataloader.batch_size:
18 | type: cate
19 | choices: [16, 32, 64]
20 |
--------------------------------------------------------------------------------
/scripts/example_configs/femnist/hpo_ss_sha.yaml:
--------------------------------------------------------------------------------
1 | train.optimizer.lr:
2 | type: float
3 | lower: 0.01
4 | upper: 1.0
5 | log: True
6 | train.optimizer.weight_decay:
7 | type: cate
8 | choices: [0.0, 0.001, 0.01, 0.1]
9 | model.dropout:
10 | type: cate
11 | choices: [0.0, 0.5]
12 | train.local_update_steps:
13 | type: cate
14 | choices: [1, 2, 3, 4]
15 | dataloader.batch_size:
16 | type: cate
17 | choices: [16, 32, 64]
--------------------------------------------------------------------------------
/federatedscope/nlp/hetero_tasks/run_atc.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ../../..
4 |
5 | DEVICE=$1
6 | DEBUG=False
7 |
8 | python federatedscope/main.py \
9 | --cfg federatedscope/nlp/hetero_tasks/baseline/config_atc.yaml \
10 | --client_cfg federatedscope/nlp/hetero_tasks/baseline/config_client_atc.yaml \
11 | federate.atc_load_from exp/atc/pretrain/ckpt/ \
12 | outdir exp/atc/train/ \
13 | device $DEVICE \
14 | data.is_debug $DEBUG \
15 |
--------------------------------------------------------------------------------
/federatedscope/tabular/model/quadratic.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | class QuadraticModel(torch.nn.Module):
5 | def __init__(self, in_channels, class_num):
6 | super(QuadraticModel, self).__init__()
7 | x = torch.ones((in_channels, 1))
8 | self.x = torch.nn.parameter.Parameter(x.uniform_(-10.0, 10.0).float())
9 |
10 | def forward(self, A):
11 | return torch.sum(self.x * torch.matmul(A, self.x), -1)
12 |
--------------------------------------------------------------------------------
/scripts/example_configs/quadratic.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: False
2 | federate:
3 | mode: 'standalone'
4 | total_round_num: 5
5 | make_global_eval: False
6 | client_num: 5
7 | share_local_model: False
8 | online_aggr: False
9 | trainer:
10 | type: 'general'
11 | eval:
12 | freq: 1
13 | data:
14 | type: 'quadratic'
15 | model:
16 | type: 'quadratic'
17 | criterion:
18 | type: 'L1Loss'
19 | train:
20 | optimizer:
21 | lr: 0.01
22 |
--------------------------------------------------------------------------------
/federatedscope/gfl/dataloader/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.gfl.dataloader.dataloader_node import \
2 | load_nodelevel_dataset
3 | from federatedscope.gfl.dataloader.dataloader_graph import \
4 | load_graphlevel_dataset
5 | from federatedscope.gfl.dataloader.dataloader_link import \
6 | load_linklevel_dataset
7 |
8 | __all__ = [
9 | 'load_nodelevel_dataset', 'load_graphlevel_dataset',
10 | 'load_linklevel_dataset'
11 | ]
12 |
--------------------------------------------------------------------------------
/scripts/dp_exp_scripts/run_femnist_dp_standalone.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ..
4 |
5 | echo "Run NbAFL on femnist."
6 |
7 | python federatedscope/main.py --cfg federatedscope/cv/baseline/fedavg_convnet2_on_femnist.yaml\
8 | nbafl.use True \
9 | nbafl.mu 0.1 \
10 | nbafl.epsilon 20. \
11 | nbafl.constant 1. \
12 | nbafl.w_clip 0.1 \
13 | federate.join_in_info ['num_sample']
--------------------------------------------------------------------------------
/environment/extra_dependencies_torch1.10-application.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | # Graph
4 | conda install -y pyg==2.0.4 -c pyg
5 | conda install -y rdkit=2021.09.4=py39hccf6a74_0 -c conda-forge
6 | conda install -y nltk
7 |
8 | # Speech and NLP
9 | conda install -y sentencepiece textgrid typeguard -c conda-forge
10 | conda install -y transformers==4.16.2 tokenizers==0.10.3 datasets -c huggingface -c conda-forge
11 |
12 | # Tabular
13 | conda install -y openml==0.12.2
14 |
--------------------------------------------------------------------------------
/scripts/example_configs/toy_sha.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | federate:
3 | mode: 'standalone'
4 | total_round_num: 10
5 | make_global_eval: False
6 | client_num: 5
7 | seed: 12345
8 | trainer:
9 | type: 'general'
10 | eval:
11 | freq: 5
12 | model:
13 | type: 'lr'
14 | data:
15 | type: 'toy'
16 | hpo:
17 | scheduler: sha
18 | num_workers: 0
19 | init_cand_num: 5
20 | ss: scripts/example_configs/toy_hpo_ss.yaml
21 | sha:
22 | budgets: [1, 1]
23 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/exp/run_avg2.sh:
--------------------------------------------------------------------------------
1 | bash run_mode.sh 31@openml tabular mlp 3 avg
2 |
3 | bash run_mode.sh 53@openml tabular mlp 3 avg
4 |
5 | bash run_mode.sh 3917@openml tabular mlp 3 avg
6 |
7 |
8 | bash run_mode.sh cola@huggingface_datasets tabular bert 3 avg
9 | bash run_mode.sh cola@huggingface_datasets tabular bert 3 opt
10 |
11 | bash run_mode.sh sst2@huggingface_datasets tabular bert 3 avg
12 | bash run_mode.sh sst2@huggingface_datasets tabular bert 3 opt
--------------------------------------------------------------------------------
/materials/tutorial/KDD_2022/README.md:
--------------------------------------------------------------------------------
1 | # A Practical Introduction to Federated Learning
2 |
3 | You can find more information and download our slide for KDD'2022 tutorial through the following link: [https://joneswong.github.io/KDD22FLTutorial/](https://joneswong.github.io/KDD22FLTutorial/).
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/doc/README.md:
--------------------------------------------------------------------------------
1 | ## FederatedScope Documentation
2 | Please run the following commands from this directory to compile the documentation. Note that FederatedScope must be installed first.
3 | * The `requirements.txt` is only for documentation of API by Sphinx, which can be automatically generated by Github actions `FederatedScope/.github/workflows/sphinx.yml`.
4 | * Download via `Artifacts` in Github actions.
5 |
6 | ```
7 | pip install -r requirements.txt
8 | make html
9 | ```
10 |
--------------------------------------------------------------------------------
/environment/docker_files/federatedscope-jupyterhub/start-singleuser.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) Jupyter Development Team.
3 | # Distributed under the terms of the Modified BSD License.
4 |
5 | set -e
6 |
7 | # set default ip to 0.0.0.0
8 | if [[ "${NOTEBOOK_ARGS} $*" != *"--ip="* ]]; then
9 | NOTEBOOK_ARGS="--ip=0.0.0.0 ${NOTEBOOK_ARGS}"
10 | fi
11 |
12 | # shellcheck disable=SC1091,SC2086
13 | . /usr/local/bin/start.sh jupyterhub-singleuser ${NOTEBOOK_ARGS} "$@"
14 |
--------------------------------------------------------------------------------
/federatedscope/autotune/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.autotune.choice_types import Continuous, Discrete
2 | from federatedscope.autotune.utils import parse_search_space, \
3 | config2cmdargs, config2str
4 | from federatedscope.autotune.algos import get_scheduler
5 | from federatedscope.autotune.run import run_scheduler
6 |
7 | __all__ = [
8 | 'Continuous', 'Discrete', 'parse_search_space', 'config2cmdargs',
9 | 'config2str', 'get_scheduler', 'run_scheduler'
10 | ]
11 |
--------------------------------------------------------------------------------
/federatedscope/core/data/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.core.data.base_data import StandaloneDataDict, ClientData
2 | from federatedscope.core.data.base_translator import BaseDataTranslator
3 | from federatedscope.core.data.dummy_translator import DummyDataTranslator
4 | from federatedscope.core.data.raw_translator import RawDataTranslator
5 |
6 | __all__ = [
7 | 'StandaloneDataDict', 'ClientData', 'BaseDataTranslator',
8 | 'DummyDataTranslator', 'RawDataTranslator'
9 | ]
10 |
--------------------------------------------------------------------------------
/scripts/distributed_scripts/distributed_configs/distributed_server_no_data.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | federate:
3 | client_num: 3
4 | mode: 'distributed'
5 | total_round_num: 20
6 | make_global_eval: False
7 | online_aggr: False
8 | distribute:
9 | use: True
10 | server_host: '127.0.0.1'
11 | server_port: 50051
12 | role: 'server'
13 | trainer:
14 | type: 'general'
15 | eval:
16 | freq: 10
17 | data:
18 | type: ''
19 | model:
20 | type: 'lr'
21 | input_shape: (5,)
--------------------------------------------------------------------------------
/scripts/example_configs/femnist/hpo_ss_fedex_arm.yaml:
--------------------------------------------------------------------------------
1 | train.optimizer.lr:
2 | type: cate
3 | choices: [0.01, 0.01668, 0.02783, 0.04642, 0.07743, 0.12915, 0.21544, 0.35938, 0.59948, 1.0]
4 | train.optimizer.weight_decay:
5 | type: cate
6 | choices: [0.0, 0.001, 0.01, 0.1]
7 | model.dropout:
8 | type: cate
9 | choices: [0.0, 0.5]
10 | train.local_update_steps:
11 | type: cate
12 | choices: [1, 2, 3, 4]
13 | dataloader.batch_size:
14 | type: cate
15 | choices: [16, 32, 64]
--------------------------------------------------------------------------------
/federatedscope/contrib/metrics/example.py:
--------------------------------------------------------------------------------
1 | from federatedscope.register import register_metric
2 |
3 | METRIC_NAME = 'example'
4 |
5 |
6 | def MyMetric(ctx, **kwargs):
7 | return ctx.num_train_data
8 |
9 |
10 | def call_my_metric(types):
11 | if METRIC_NAME in types:
12 | the_larger_the_better = True
13 | metric_builder = MyMetric
14 | return METRIC_NAME, metric_builder, the_larger_the_better
15 |
16 |
17 | register_metric(METRIC_NAME, call_my_metric)
18 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/fedhpobench/optimizers/__init__.py:
--------------------------------------------------------------------------------
1 | from fedhpobench.optimizers.dehb_optimizer import run_dehb
2 | from fedhpobench.optimizers.hpbandster_optimizer import run_hpbandster
3 | from fedhpobench.optimizers.optuna_optimizer import run_optuna
4 | from fedhpobench.optimizers.smac_optimizer import run_smac
5 | from fedhpobench.optimizers.grid_search import run_grid_search
6 |
7 | __all__ = [
8 | 'run_dehb', 'run_hpbandster', 'run_optuna', 'run_smac', 'run_grid_search'
9 | ]
10 |
--------------------------------------------------------------------------------
/federatedscope/contrib/trainer/example.py:
--------------------------------------------------------------------------------
1 | from federatedscope.register import register_trainer
2 | from federatedscope.core.trainers.torch_trainer import GeneralTorchTrainer
3 |
4 |
5 | # Build your trainer here.
6 | class MyTrainer(GeneralTorchTrainer):
7 | pass
8 |
9 |
10 | def call_my_trainer(trainer_type):
11 | if trainer_type == 'mytrainer':
12 | trainer_builder = MyTrainer
13 | return trainer_builder
14 |
15 |
16 | register_trainer('mytrainer', call_my_trainer)
17 |
--------------------------------------------------------------------------------
/federatedscope/contrib/loss/example.py:
--------------------------------------------------------------------------------
1 | from federatedscope.register import register_criterion
2 |
3 |
4 | def call_my_criterion(type, device):
5 | try:
6 | import torch.nn as nn
7 | except ImportError:
8 | nn = None
9 | criterion = None
10 |
11 | if type == 'mycriterion':
12 | if nn is not None:
13 | criterion = nn.CrossEntropyLoss().to(device)
14 | return criterion
15 |
16 |
17 | register_criterion('mycriterion', call_my_criterion)
18 |
--------------------------------------------------------------------------------
/federatedscope/contrib/trainer/local_entropy_trainer.py:
--------------------------------------------------------------------------------
1 | from federatedscope.register import register_trainer
2 | from federatedscope.core.trainers import BaseTrainer
3 |
4 |
5 | def call_local_entropy_trainer(trainer_type):
6 | if trainer_type == 'local_entropy_trainer':
7 | from federatedscope.contrib.trainer.local_entropy \
8 | import LocalEntropyTrainer
9 | return LocalEntropyTrainer
10 |
11 |
12 | register_trainer('local_entropy_trainer', call_local_entropy_trainer)
13 |
--------------------------------------------------------------------------------
/federatedscope/cross_backends/distributed_tf_server.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: False
2 | backend: 'tensorflow'
3 | federate:
4 | client_num: 3
5 | mode: 'distributed'
6 | total_round_num: 20
7 | make_global_eval: True
8 | online_aggr: False
9 | distribute:
10 | use: True
11 | server_host: '127.0.0.1'
12 | server_port: 50051
13 | role: 'server'
14 | trainer:
15 | type: 'general'
16 | eval:
17 | freq: 10
18 | data:
19 | type: 'file'
20 | file_path: 'toy_data/server_data'
21 | model:
22 | type: 'lr'
--------------------------------------------------------------------------------
/scripts/distributed_scripts/distributed_configs/distributed_server.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | federate:
3 | client_num: 3
4 | mode: 'distributed'
5 | total_round_num: 20
6 | make_global_eval: True
7 | online_aggr: False
8 | distribute:
9 | use: True
10 | server_host: '127.0.0.1'
11 | server_port: 50051
12 | role: 'server'
13 | data_idx: 0
14 | trainer:
15 | type: 'general'
16 | eval:
17 | freq: 10
18 | data:
19 | type: 'file'
20 | file_path: 'toy_data/all_data'
21 | model:
22 | type: 'lr'
--------------------------------------------------------------------------------
/scripts/example_configs/sha_wrap_fedex_ss_table.yaml:
--------------------------------------------------------------------------------
1 | #hpo.table.idx:
2 | # type: cate
3 | # choices: [0, 1, 2, 3, 4]
4 | hpo.fedex.gamma:
5 | type: float
6 | lower: 0
7 | upper: 1.0
8 | fedopt.optimizer.lr:
9 | type: cate
10 | choices: [0.001, 0.01, 0.1, 1.0]
11 | train.local_update_steps:
12 | type: int
13 | lower: 1
14 | upper: 8
15 | train.optimizer.lr:
16 | type: float
17 | lower: 0.002
18 | upper: 0.05
19 | log: True
20 | model.dropout:
21 | type: cate
22 | choices: [0.0, 0.5]
23 |
--------------------------------------------------------------------------------
/scripts/mf_exp_scripts/run_movielens1m_hflsgdmf_standalone.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ..
4 |
5 | echo "Run hfl-sgdmf task on movielens1m."
6 |
7 | python federatedscope/main.py --cfg federatedscope/mf/baseline/hfl-sgdmf_fedavg_standalone_on_movielens1m.yaml \
8 | sgdmf.use True \
9 | sgdmf.epsilon 0.5 \
10 | sgdmf.delta 0.5 \
11 | train.optimizer.lr 0.1 \
12 | train.local_update_steps 20 \
13 | federate.total_round_num 50 \
14 | dataloader.batch_size 64
15 |
--------------------------------------------------------------------------------
/scripts/mf_exp_scripts/run_movielens1m_vflsgdmf_standalone.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ..
4 |
5 | echo "Run vfl-sgdmf task on movielens1m."
6 |
7 | python federatedscope/main.py --cfg federatedscope/mf/baseline/vfl-sgdmf_fedavg_standalone_on_movielens1m.yaml \
8 | sgdmf.use True \
9 | sgdmf.epsilon 0.5 \
10 | sgdmf.delta 0.5 \
11 | train.optimizer.lr 0.1 \
12 | train.local_update_steps 20 \
13 | federate.total_round_num 50 \
14 | dataloader.batch_size 64
15 |
--------------------------------------------------------------------------------
/federatedscope/contrib/configs/__init__.py:
--------------------------------------------------------------------------------
1 | import copy
2 | from os.path import dirname, basename, isfile, join
3 | import glob
4 |
5 | modules = glob.glob(join(dirname(__file__), "*.py"))
6 | __all__ = [
7 | basename(f)[:-3] for f in modules
8 | if isfile(f) and not f.endswith('__init__.py')
9 | ]
10 |
11 | # to ensure the sub-configs registered before set up the global config
12 | all_sub_configs_contrib = copy.copy(__all__)
13 | if "config" in all_sub_configs_contrib:
14 | all_sub_configs_contrib.remove('config')
15 |
--------------------------------------------------------------------------------
/federatedscope/attack/worker_as_attacker/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import print_function
3 | from __future__ import division
4 |
5 | from federatedscope.attack.worker_as_attacker.active_client import *
6 | from federatedscope.attack.worker_as_attacker.server_attacker import *
7 |
8 | __all__ = [
9 | 'plot_target_loss', 'sav_target_loss', 'callback_funcs_for_finish',
10 | 'add_atk_method_to_Client_GradAscent', 'PassiveServer', 'PassivePIAServer',
11 | 'BackdoorServer'
12 | ]
13 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/linear_model/baseline/vertical_fl.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: False
2 | federate:
3 | mode: standalone
4 | client_num: 2
5 | total_round_num: 30
6 | model:
7 | type: lr
8 | use_bias: False
9 | train:
10 | optimizer:
11 | lr: 0.05
12 | data:
13 | type: synthetic_vfl_data
14 | dataloader:
15 | type: raw
16 | batch_size: 50
17 | vertical:
18 | use: True
19 | key_size: 256
20 | algo: 'lr'
21 | trainer:
22 | type: none
23 | eval:
24 | freq: 5
25 | best_res_update_round_wise_key: test_loss
26 |
--------------------------------------------------------------------------------
/federatedscope/contrib/optimizer/example.py:
--------------------------------------------------------------------------------
1 | from federatedscope.register import register_optimizer
2 |
3 |
4 | def call_my_optimizer(model, type, lr, **kwargs):
5 | try:
6 | import torch.optim as optim
7 | except ImportError:
8 | optim = None
9 | optimizer = None
10 |
11 | if type == 'myoptimizer':
12 | if optim is not None:
13 | optimizer = optim.Adam(model.parameters(), lr=lr, **kwargs)
14 | return optimizer
15 |
16 |
17 | register_optimizer('myoptimizer', call_my_optimizer)
18 |
--------------------------------------------------------------------------------
/federatedscope/cv/trainer/trainer.py:
--------------------------------------------------------------------------------
1 | from federatedscope.register import register_trainer
2 | from federatedscope.core.trainers import GeneralTorchTrainer
3 |
4 |
5 | class CVTrainer(GeneralTorchTrainer):
6 | """
7 | ``CVTrainer`` is the same as ``core.trainers.GeneralTorchTrainer``.
8 | """
9 | pass
10 |
11 |
12 | def call_cv_trainer(trainer_type):
13 | if trainer_type == 'cvtrainer':
14 | trainer_builder = CVTrainer
15 | return trainer_builder
16 |
17 |
18 | register_trainer('cvtrainer', call_cv_trainer)
19 |
--------------------------------------------------------------------------------
/environment/requirements-torch1.10-application.txt:
--------------------------------------------------------------------------------
1 | numpy==1.21.2
2 | scikit-learn==1.0.2
3 | scipy==1.7.3
4 | pandas==1.4.1
5 | scikit-learn
6 | pytorch==1.10.1
7 | torchvision==0.11.2
8 | torchaudio==0.10.1
9 | cudatoolkit
10 | wandb
11 | tensorboard
12 | tensorboardX
13 | grpcio
14 | grpcio-tools
15 | protobuf==3.19.4
16 | setuptools==61.2.0
17 | pyg==2.0.4
18 | rdkit=2021.09.4
19 | sentencepiece
20 | textgrid
21 | typeguard
22 | nltk
23 | transformers==4.16.2
24 | tokenizers==0.10.3
25 | torchtext
26 | datasets
27 | fvcore
28 | pympler
29 | iopath
30 |
31 |
--------------------------------------------------------------------------------
/federatedscope/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, division, print_function
2 |
3 | __version__ = '0.3.0'
4 |
5 |
6 | def _setup_logger():
7 | import logging
8 |
9 | logging_fmt = "%(asctime)s (%(module)s:%(lineno)d)" \
10 | "%(levelname)s: %(message)s"
11 | logger = logging.getLogger("federatedscope")
12 | handler = logging.StreamHandler()
13 | handler.setFormatter(logging.Formatter(logging_fmt))
14 | logger.addHandler(handler)
15 | logger.propagate = False
16 |
17 |
18 | _setup_logger()
19 |
--------------------------------------------------------------------------------
/scripts/distributed_scripts/run_distributed_xgb.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ..
4 |
5 | echo "Test distributed mode with XGB..."
6 |
7 | ### server
8 | python federatedscope/main.py --cfg scripts/distributed_scripts/distributed_configs/distributed_xgb_server.yaml &
9 | sleep 2
10 |
11 | # clients
12 | python federatedscope/main.py --cfg scripts/distributed_scripts/distributed_configs/distributed_xgb_client_1.yaml &
13 | sleep 2
14 | python federatedscope/main.py --cfg scripts/distributed_scripts/distributed_configs/distributed_xgb_client_2.yaml &
15 | sleep 2
16 |
17 |
--------------------------------------------------------------------------------
/environment/requirements-torch1.8-application.txt:
--------------------------------------------------------------------------------
1 | numpy==1.19.5
2 | scikit-learn==1.0
3 | scipy==1.6.0
4 | pandas==1.2.1
5 | scikit-learn
6 | pytorch==1.8.0
7 | torchvision==0.9.0
8 | torchaudio==0.8.0
9 | cudatoolkit==10.2.89
10 | wandb
11 | tensorboard
12 | tensorboardX
13 | grpcio
14 | grpcio-tools
15 | protobuf==3.19.1
16 | setuptools==58.0.4
17 | pyg==2.0.1
18 | rdkit=2021.09.4
19 | sentencepiece
20 | textgrid
21 | typeguard
22 | nltk
23 | transformers==4.16.2
24 | tokenizers==0.10.3
25 | torchtext
26 | datasets
27 | fvcore
28 | pympler
29 | iopath
30 |
31 |
32 |
--------------------------------------------------------------------------------
/federatedscope/cross_backends/distributed_tf_client_3.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: False
2 | backend: 'tensorflow'
3 | federate:
4 | client_num: 3
5 | mode: 'distributed'
6 | total_round_num: 20
7 | make_global_eval: False
8 | online_aggr: False
9 | distribute:
10 | use: True
11 | server_host: '127.0.0.1'
12 | server_port: 50051
13 | client_host: '127.0.0.1'
14 | client_port: 50054
15 | role: 'client'
16 | trainer:
17 | type: 'general'
18 | eval:
19 | freq: 10
20 | data:
21 | type: 'file'
22 | file_path: 'toy_data/client_3_data'
23 | model:
24 | type: 'lr'
--------------------------------------------------------------------------------
/scripts/distributed_scripts/distributed_configs/distributed_client_1.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | federate:
3 | client_num: 3
4 | mode: 'distributed'
5 | total_round_num: 20
6 | make_global_eval: False
7 | online_aggr: False
8 | distribute:
9 | use: True
10 | server_host: '127.0.0.1'
11 | server_port: 50051
12 | client_host: '127.0.0.1'
13 | client_port: 50052
14 | role: 'client'
15 | data_idx: 1
16 | trainer:
17 | type: 'general'
18 | eval:
19 | freq: 10
20 | data:
21 | type: 'file'
22 | file_path: 'toy_data/all_data'
23 | model:
24 | type: 'lr'
--------------------------------------------------------------------------------
/scripts/distributed_scripts/distributed_configs/distributed_client_2.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | federate:
3 | client_num: 3
4 | mode: 'distributed'
5 | total_round_num: 20
6 | make_global_eval: False
7 | online_aggr: False
8 | distribute:
9 | use: True
10 | server_host: '127.0.0.1'
11 | server_port: 50051
12 | client_host: '127.0.0.1'
13 | client_port: 50053
14 | role: 'client'
15 | data_idx: 2
16 | trainer:
17 | type: 'general'
18 | eval:
19 | freq: 10
20 | data:
21 | type: 'file'
22 | file_path: 'toy_data/all_data'
23 | model:
24 | type: 'lr'
--------------------------------------------------------------------------------
/scripts/distributed_scripts/distributed_configs/distributed_client_3.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | federate:
3 | client_num: 3
4 | mode: 'distributed'
5 | total_round_num: 20
6 | make_global_eval: False
7 | online_aggr: False
8 | distribute:
9 | use: True
10 | server_host: '127.0.0.1'
11 | server_port: 50051
12 | client_host: '127.0.0.1'
13 | client_port: 50054
14 | role: 'client'
15 | data_idx: 3
16 | trainer:
17 | type: 'general'
18 | eval:
19 | freq: 10
20 | data:
21 | type: 'file'
22 | file_path: 'toy_data/all_data'
23 | model:
24 | type: 'lr'
--------------------------------------------------------------------------------
/federatedscope/core/auxiliaries/ReIterator.py:
--------------------------------------------------------------------------------
1 | class ReIterator:
2 | def __init__(self, loader):
3 | self.loader = loader
4 | self.iterator = iter(loader)
5 | self.reset_flag = False
6 |
7 | def __iter__(self):
8 | return self
9 |
10 | def __next__(self):
11 | try:
12 | item = next(self.iterator)
13 | except StopIteration:
14 | self.reset()
15 | item = next(self.iterator)
16 | return item
17 |
18 | def reset(self):
19 | self.iterator = iter(self.loader)
20 |
--------------------------------------------------------------------------------
/federatedscope/gfl/dataset/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import print_function
3 | from __future__ import division
4 |
5 | from federatedscope.gfl.dataset.recsys import RecSys
6 | from federatedscope.gfl.dataset.dblp_new import DBLPNew
7 | from federatedscope.gfl.dataset.kg import KG
8 | from federatedscope.gfl.dataset.cSBM_dataset import dataset_ContextualSBM
9 | from federatedscope.gfl.dataset.cikm_cup import CIKMCUPDataset
10 |
11 | __all__ = [
12 | 'RecSys', 'DBLPNew', 'KG', 'dataset_ContextualSBM', 'CIKMCUPDataset'
13 | ]
14 |
--------------------------------------------------------------------------------
/scripts/example_configs/openml_lr.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 400
8 | client_num: 5
9 | share_local_model: True
10 | online_aggr: True
11 | trainer:
12 | type: 'general'
13 | eval:
14 | freq: 10
15 | metrics: ['acc', 'correct']
16 | data:
17 | type: '10101@openml' # task_id@openml
18 | splits: [0.8, 0.1, 0.1]
19 | splitter: 'lda'
20 | splitter_args: [{'alpha': 0.5}]
21 | model:
22 | type: lr
23 | out_channels: 2
24 | criterion:
25 | type: CrossEntropyLoss
26 |
--------------------------------------------------------------------------------
/federatedscope/contrib/model/example.py:
--------------------------------------------------------------------------------
1 | from federatedscope.register import register_model
2 |
3 |
4 | # Build you torch or tf model class here
5 | class MyNet(object):
6 | pass
7 |
8 |
9 | # Instantiate your model class with config and data
10 | def ModelBuilder(model_config, local_data):
11 |
12 | model = MyNet()
13 |
14 | return model
15 |
16 |
17 | def call_my_net(model_config, local_data):
18 | if model_config.type == "mynet":
19 | model = ModelBuilder(model_config, local_data)
20 | return model
21 |
22 |
23 | register_model("mynet", call_my_net)
24 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v4.3.0
4 | hooks:
5 | - id: check-yaml
6 | exclude: |
7 | (?x)^(
8 | meta.yaml
9 | )$
10 | - repo: https://github.com/pre-commit/mirrors-yapf
11 | rev: v0.32.0
12 | hooks:
13 | - id: yapf
14 | - repo: https://github.com/PyCQA/flake8
15 | rev: 4.0.1
16 | hooks:
17 | - id: flake8
18 | - repo: https://github.com/regebro/pyroma
19 | rev: "4.0"
20 | hooks:
21 | - id: pyroma
22 | args: [--min=10, .]
--------------------------------------------------------------------------------
/benchmark/pFL-Bench/FEMNIST-s02/run_fedopt_bn_plus_sweep.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | wandb sweep sweep_fedOpt.yaml
4 | wandb sweep sweep_fedOpt_FT.yaml
5 |
6 | wandb sweep sweep_ditto_fedBN.yaml
7 | wandb sweep sweep_ditto_fedBN_fedOpt.yaml
8 | wandb sweep sweep_ditto_FT_fedBN.yaml
9 | wandb sweep sweep_ditto_FT_fedBN_fedOpt.yaml
10 |
11 | wandb sweep sweep_fedBN_fedOpt.yaml
12 | wandb sweep sweep_fedBN_FT_fedOpt.yaml
13 |
14 | wandb sweep sweep_fedEM_fedBN.yaml
15 | wandb sweep sweep_fedEM_fedBN_fedOpt.yaml
16 | wandb sweep sweep_fedEM_FT_fedBN.yaml
17 | wandb sweep sweep_fedEM_FT_fedBN_fedOpt.yaml
18 |
--------------------------------------------------------------------------------
/federatedscope/core/strategy.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 |
4 | class Strategy(object):
5 | def __init__(self, stg_type=None, threshold=0):
6 | self._stg_type = stg_type
7 | self._threshold = threshold
8 |
9 | @property
10 | def stg_type(self):
11 | return self._stg_type
12 |
13 | @stg_type.setter
14 | def stg_type(self, value):
15 | self._stg_type = value
16 |
17 | @property
18 | def threshold(self):
19 | return self._threshold
20 |
21 | @threshold.setter
22 | def threshold(self, value):
23 | self._threshold = value
24 |
--------------------------------------------------------------------------------
/federatedscope/nlp/baseline/fedavg_lr_on_synthetic.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 5
5 | federate:
6 | mode: standalone
7 | total_round_num: 200
8 | sample_client_rate: 0.2
9 | client_num: 300
10 | data:
11 | root: data/
12 | type: synthetic
13 | subsample: 1.0
14 | model:
15 | type: lr
16 | out_channels: 2
17 | train:
18 | local_update_steps: 10
19 | optimizer:
20 | lr: 0.1
21 | weight_decay: 0.0
22 | criterion:
23 | type: CrossEntropyLoss
24 | trainer:
25 | type: nlptrainer
26 | eval:
27 | freq: 10
28 | metrics: ['acc', 'correct']
--------------------------------------------------------------------------------
/scripts/wide_valley_exp_scripts/search_space_for_fedentsgd.yaml:
--------------------------------------------------------------------------------
1 | trainer.local_entropy.gamma:
2 | type: float
3 | lower: 0.0
4 | upper: 0.1
5 | trainer.local_entropy.inc_factor:
6 | type: cate
7 | choices: [0.0, 1.0001, 1.001]
8 | trainer.local_entropy.eps:
9 | type: float
10 | lower: 1e-5
11 | upper: 1e-2
12 | log: True
13 | trainer.local_entropy.alpha:
14 | type: float
15 | lower: 0.75
16 | upper: 1.0
17 | fedopt.optimizer.lr:
18 | type: float
19 | lower: 0.01
20 | upper: 10.0
21 | log: True
22 | fedopt.annealing:
23 | type: cate
24 | choices: [False, True]
25 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/example.py:
--------------------------------------------------------------------------------
1 | from fedhpobench.config import fhb_cfg
2 | from fedhpobench.benchmarks import TabularBenchmark
3 |
4 | benchmark = TabularBenchmark('cnn', 'femnist', 'avg')
5 |
6 | # get hyperparameters space
7 | config_space = benchmark.get_configuration_space(CS=True)
8 |
9 | # get fidelity space
10 | fidelity_space = benchmark.get_fidelity_space(CS=True)
11 |
12 | # get results
13 | res = benchmark(config_space.sample_configuration(),
14 | fidelity_space.sample_configuration(),
15 | fhb_cfg=fhb_cfg,
16 | seed=12345)
17 | print(res)
18 |
--------------------------------------------------------------------------------
/federatedscope/contrib/scheduler/example.py:
--------------------------------------------------------------------------------
1 | from federatedscope.register import register_scheduler
2 |
3 |
4 | def call_my_scheduler(optimizer, reg_type):
5 | try:
6 | import torch.optim as optim
7 | except ImportError:
8 | optim = None
9 | scheduler = None
10 |
11 | if reg_type == 'myscheduler':
12 | if optim is not None:
13 | lr_lambda = [lambda epoch: epoch // 30]
14 | scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
15 | return scheduler
16 |
17 |
18 | register_scheduler('myscheduler', call_my_scheduler)
19 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/loss/utils.py:
--------------------------------------------------------------------------------
1 | def get_vertical_loss(loss_type, model_type):
2 | if loss_type == 'CrossEntropyLoss':
3 | from federatedscope.vertical_fl.loss import BinaryClsLoss
4 | return BinaryClsLoss(model_type=model_type)
5 | elif loss_type == 'RegressionMSELoss':
6 | from federatedscope.vertical_fl.loss import RegressionMSELoss
7 | return RegressionMSELoss(model_type=model_type)
8 | elif loss_type == 'RegressionMAELoss':
9 | from federatedscope.vertical_fl.loss import RegressionMAELoss
10 | return RegressionMAELoss(model_type=model_type)
11 |
--------------------------------------------------------------------------------
/federatedscope/gfl/trainer/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import print_function
3 | from __future__ import division
4 |
5 | from federatedscope.gfl.trainer.graphtrainer import GraphMiniBatchTrainer
6 | from federatedscope.gfl.trainer.linktrainer import LinkFullBatchTrainer, \
7 | LinkMiniBatchTrainer
8 | from federatedscope.gfl.trainer.nodetrainer import NodeFullBatchTrainer, \
9 | NodeMiniBatchTrainer
10 |
11 | __all__ = [
12 | 'GraphMiniBatchTrainer', 'LinkFullBatchTrainer', 'LinkMiniBatchTrainer',
13 | 'NodeFullBatchTrainer', 'NodeMiniBatchTrainer'
14 | ]
15 |
--------------------------------------------------------------------------------
/federatedscope/mf/baseline/hfl_fedavg_standalone_on_movielens1m.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: standalone
7 | total_round_num: 100
8 | client_num: 5
9 | data:
10 | root: data/
11 | type: HFLMovieLens1M
12 | dataloader:
13 | type: mf
14 | model:
15 | type: HMFNet
16 | hidden: 20
17 | train:
18 | local_update_steps: 50
19 | optimizer:
20 | lr: 1.
21 | criterion:
22 | type: MSELoss
23 | trainer:
24 | type: mftrainer
25 | eval:
26 | freq: 100
27 | metrics: []
28 | best_res_update_round_wise_key: test_avg_loss
29 | count_flops: False
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/fedhpobench/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import print_function
3 | from __future__ import division
4 |
5 | __version__ = '0.0.1'
6 |
7 |
8 | def _setup_logger():
9 | import logging
10 |
11 | logging_fmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(" \
12 | "message)s"
13 | logger = logging.getLogger("fedhpobench")
14 | handler = logging.StreamHandler()
15 | handler.setFormatter(logging.Formatter(logging_fmt))
16 | logger.addHandler(handler)
17 | logger.propagate = False
18 |
19 |
20 | _setup_logger()
21 |
--------------------------------------------------------------------------------
/doc/news/06-13-2022_Declaration_of_Emergency.txt:
--------------------------------------------------------------------------------
1 | [06-13-2022] Declaration of Emergency:
2 | Based on our preliminary findings, it seems that our project is receiving an unknown attack. The number of stars increases abnormally with around 200 stars from "virtual" users. As of June 10 2022, the number of stars was around 374. We are paying close attention to solve this issue.
3 |
4 | To attackers: No matter what the purpose is, please stop attacking our project in this cheap way!
5 |
6 | To developers/researchers: You are always welcome to contribute and join FederatedScope; we believe we together can advance the field of Federated Learning!
7 |
--------------------------------------------------------------------------------
/scripts/example_configs/fedex_for_lr.yaml:
--------------------------------------------------------------------------------
1 | outdir: 'fedex_test'
2 | use_gpu: True
3 | device: 1
4 | federate:
5 | mode: 'standalone'
6 | total_round_num: 20
7 | make_global_eval: False
8 | client_num: 5
9 | share_local_model: True
10 | online_aggr: True
11 | save_to: 'fedex_test/lr.pth'
12 | use_diff: True
13 | trainer:
14 | type: 'general'
15 | eval:
16 | freq: 10
17 | data:
18 | type: 'toy'
19 | model:
20 | type: 'lr'
21 | hpo:
22 | fedex:
23 | use: True
24 | # ss: 'scripts/example_configs/fedex_flat_search_space.yaml'
25 | ss: 'scripts/example_configs/fedex_grid_search_space.yaml'
26 | diff: True
27 |
--------------------------------------------------------------------------------
/scripts/flit_exp_scripts/run_fedavg-fedfocal-flit_cls.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ../..
4 |
5 | cudaid=$1
6 | dataset=$2
7 | trainer=$3
8 | alpha=$4
9 |
10 | if [ ! -d "out" ];then
11 | mkdir out
12 | fi
13 |
14 | for k in {1..3}
15 | do
16 | echo "k=${k}, Trainer=${trainer}, data=${dataset}, alpha=${alpha} starts..."
17 | python federatedscope/main.py --cfg federatedscope/gfl/flitplus/fedalgo_cls.yaml device ${cudaid} data.type ${dataset} trainer.type ${trainer} flitplus.alpha ${alpha} seed ${k} >>out/${trainer}_on_${dataset}_k${k}_alpha${alpha}.log 2>&1
18 | echo "k=${k}, Trainer=${trainer}, data=${dataset}, alpha=${alpha} ends."
19 | done
20 |
--------------------------------------------------------------------------------
/federatedscope/mf/baseline/vfl_fedavg_standalone_on_movielens1m.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 10
5 | federate:
6 | mode: standalone
7 | total_round_num: 100
8 | client_num: 5
9 | data:
10 | root: data/
11 | type: VFLMovieLens1M
12 | dataloader:
13 | type: mf
14 | model:
15 | type: VMFNet
16 | hidden: 20
17 | train:
18 | local_update_steps: 20
19 | optimizer:
20 | lr: 1.
21 | criterion:
22 | type: MSELoss
23 | trainer:
24 | type: mftrainer
25 | eval:
26 | freq: 100
27 | metrics: []
28 | best_res_update_round_wise_key: test_avg_loss
29 | count_flops: False
30 | sgdmf:
31 | use: False
32 |
--------------------------------------------------------------------------------
/scripts/example_configs/fedex_flat_search_space.yaml:
--------------------------------------------------------------------------------
1 | arm0:
2 | train.optimizer.lr: 0.01
3 | train.optimizer.momentum: 0.1
4 | arm1:
5 | train.optimizer.lr: 0.01
6 | train.optimizer.momentum: 0.9
7 | arm2:
8 | train.optimizer.lr: 0.02
9 | train.optimizer.momentum: 0.2
10 | arm3:
11 | train.optimizer.lr: 0.02
12 | train.optimizer.momentum: 0.8
13 | arm4:
14 | train.optimizer.lr: 0.04
15 | train.optimizer.momentum: 0.1
16 | arm5:
17 | train.optimizer.lr: 0.04
18 | train.optimizer.momentum: 0.7
19 | arm6:
20 | train.optimizer.lr: 0.08
21 | train.optimizer.momentum: 0.3
22 | arm7:
23 | train.optimizer.lr: 0.08
24 | train.optimizer.momentum: 0.8
25 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/openml_lr.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | splits: [0.8, 0.1, 0.1]
20 | splitter: 'lda'
21 | splitter_args: [{'alpha': 0.5}]
22 | model:
23 | type: lr
24 | out_channels: 2
25 | optimizer:
26 | lr: 0.0001
27 | weight_decay: 0.0
28 | criterion:
29 | type: CrossEntropyLoss
30 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/openml_mlp.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | splits: [0.8, 0.1, 0.1]
20 | splitter: 'lda'
21 | splitter_args: [{'alpha': 0.5}]
22 | model:
23 | type: mlp
24 | out_channels: 2
25 | optimizer:
26 | lr: 0.0001
27 | weight_decay: 0.0
28 | criterion:
29 | type: CrossEntropyLoss
30 |
--------------------------------------------------------------------------------
/scripts/attack_exp_scripts/privacy_attack/PIA_toy.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: False
2 | device: 0
3 | seed: 12345
4 | federate:
5 | mode: standalone
6 | total_round_num: 10
7 | sample_client_num: 5
8 | client_num: 10
9 | data:
10 | root: data/
11 | type: toy
12 | splits: [0.6,0.2,0.2]
13 | subsample: 0.0001
14 | dataloader:
15 | batch_size: 1
16 | model:
17 | type: lr
18 | hidden: 2048
19 | out_channels: 62
20 | train:
21 | local_update_steps: 2
22 | optimizer:
23 | lr: 0.01
24 | weight_decay: 0.0
25 | trainer:
26 | type: general
27 | eval:
28 | freq: 10
29 | metrics: ['acc', 'correct']
30 | attack:
31 | attack_method: PassivePIA
32 | classifier_PIA: svm
--------------------------------------------------------------------------------
/federatedscope/core/gRPC_server.py:
--------------------------------------------------------------------------------
1 | import queue
2 | from collections import deque
3 |
4 | from federatedscope.core.proto import gRPC_comm_manager_pb2, \
5 | gRPC_comm_manager_pb2_grpc
6 |
7 |
8 | class gRPCComServeFunc(gRPC_comm_manager_pb2_grpc.gRPCComServeFuncServicer):
9 | def __init__(self):
10 | self.msg_queue = deque()
11 |
12 | def sendMessage(self, request, context):
13 | self.msg_queue.append(request)
14 |
15 | return gRPC_comm_manager_pb2.MessageResponse(msg='ACK')
16 |
17 | def receive(self):
18 | while len(self.msg_queue) == 0:
19 | continue
20 | msg = self.msg_queue.popleft()
21 | return msg
22 |
--------------------------------------------------------------------------------
/federatedscope/gfl/baseline/fedavg_gcn_fullbatch_on_dblpnew.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 7
3 | early_stop:
4 | patience: 100
5 | improve_indicator_mode: mean
6 | federate:
7 | mode: standalone
8 | make_global_eval: True
9 | total_round_num: 400
10 | data:
11 | root: data/
12 | type: dblp_conf
13 | splits: [0.5, 0.2, 0.3]
14 | dataloader:
15 | type: pyg
16 | batch_size: 1
17 | model:
18 | type: gcn
19 | hidden: 1024
20 | out_channels: 4
21 | task: node
22 | train:
23 | optimizer:
24 | lr: 0.05
25 | weight_decay: 0.0005
26 | criterion:
27 | type: CrossEntropyLoss
28 | trainer:
29 | type: nodefullbatch_trainer
30 | eval:
31 | metrics: ['acc', 'correct']
32 |
--------------------------------------------------------------------------------
/federatedscope/gfl/baseline/local_gnn_node_fullbatch_citation.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 100
5 | improve_indicator_mode: mean
6 | federate:
7 | make_global_eval: True
8 | client_num: 5
9 | total_round_num: 400
10 | method: 'local'
11 | data:
12 | root: data/
13 | type: cora
14 | splitter: 'louvain'
15 | dataloader:
16 | type: pyg
17 | batch_size: 1
18 | model:
19 | type: gcn
20 | hidden: 64
21 | dropout: 0.5
22 | out_channels: 7
23 | task: node
24 | train:
25 | optimizer:
26 | lr: 0.05
27 | weight_decay: 0.0005
28 | type: SGD
29 | criterion:
30 | type: CrossEntropyLoss
31 | trainer:
32 | type: graphfullbatch
33 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/31@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 31@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/3@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 3@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/53@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 53@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 4
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/3@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 3@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/scripts/example_configs/sha_wrap_fedex_arm.yaml:
--------------------------------------------------------------------------------
1 | outdir: 'sha_wrap_fedex'
2 | use_gpu: True
3 | federate:
4 | mode: 'standalone'
5 | total_round_num: 20
6 | make_global_eval: False
7 | client_num: 5
8 | share_local_model: False
9 | online_aggr: False
10 | use_diff: True
11 | trainer:
12 | type: 'general'
13 | eval:
14 | freq: 10
15 | data:
16 | type: 'toy'
17 | model:
18 | type: 'lr'
19 | fedopt:
20 | use: True
21 | hpo:
22 | scheduler: wrap_sha
23 | #num_workers: 1
24 | num_workers: 0
25 | init_cand_num: 5
26 | ss: 'scripts/example_configs/sha_wrap_fedex_ss_table.yaml'
27 | table:
28 | num: 4
29 | sha:
30 | budgets: [2, 2]
31 | fedex:
32 | use: True
33 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/12@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 12@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 10
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/3917@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 3917@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/7592@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 7592@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/9952@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 9952@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/9977@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 9977@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/9981@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 9981@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 9
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/12@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 12@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 10
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/31@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 31@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/53@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 53@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 4
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/federatedscope/mf/baseline/hfl_fedavg_standalone_on_netflix.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: standalone
7 | total_round_num: 100
8 | client_num: 480189
9 | online_aggr: True
10 | share_local_model: True
11 | sample_client_rate: 0.0001
12 | data:
13 | root: data/
14 | type: HFLNetflix
15 | dataloader:
16 | type: mf
17 | model:
18 | type: HMFNet
19 | hidden: 10
20 | train:
21 | local_update_steps: 50
22 | optimizer:
23 | lr: 1.
24 | criterion:
25 | type: MSELoss
26 | trainer:
27 | type: mftrainer
28 | eval:
29 | freq: 100
30 | metrics: []
31 | best_res_update_round_wise_key: test_avg_loss
32 | count_flops: False
--------------------------------------------------------------------------------
/federatedscope/nlp/baseline/fedavg_lstm_on_subreddit.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 10
5 | federate:
6 | mode: standalone
7 | total_round_num: 100
8 | sample_client_num: 10
9 | data:
10 | root: data/
11 | type: subreddit
12 | subsample: 1.0
13 | dataloader:
14 | batch_size: 5
15 | model:
16 | type: lstm
17 | in_channels: 10000
18 | out_channels: 10000
19 | hidden: 256
20 | embed_size: 200
21 | dropout: 0.0
22 | train:
23 | local_update_steps: 10
24 | optimizer:
25 | lr: 8.0
26 | weight_decay: 0.0
27 | criterion:
28 | type: CrossEntropyLoss
29 | trainer:
30 | type: nlptrainer
31 | eval:
32 | freq: 10
33 | metrics: ['acc', 'correct']
--------------------------------------------------------------------------------
/scripts/flit_exp_scripts/run_fedprox_cls.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ../..
4 |
5 | cudaid=$1
6 | dataset=$2
7 | trainer=$3
8 | mu=$4
9 | alpha=$5
10 |
11 | if [ ! -d "out" ];then
12 | mkdir out
13 | fi
14 |
15 | for k in {1..3}
16 | do
17 | echo "k=${k}, Trainer=${trainer}, data=${dataset}, mu=${mu}, alpha=${alpha} starts..."
18 | python federatedscope/main.py --cfg federatedscope/gfl/flitplus/fedalgo_cls.yaml device ${cudaid} data.type ${dataset} trainer.type ${trainer} fedprox.use True fedprox.mu ${mu} flitplus.alpha ${alpha} seed ${k} >>out/${trainer}_on_${dataset}_k${k}_mu${mu}_alpha${alpha}.log 2>&1
19 | echo "k=${k}, Trainer=${trainer}, data=${dataset}, mu=${mu}, alpha=${alpha} ends."
20 | done
21 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/10101@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 10101@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/146212@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 146212@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 7
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/146606@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 146606@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/146818@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 146818@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/146821@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 146821@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 4
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/146822@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 146822@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 7
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/14965@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 14965@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/167119@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 167119@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 3
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/167120@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 167120@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/168911@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 168911@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/lr/168912@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 168912@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: lr
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/10101@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 10101@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/146212@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 146212@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 7
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/146606@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 146606@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/146818@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 146818@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/146821@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 146821@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 4
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/146822@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 146822@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 7
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/14965@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 14965@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/167119@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 167119@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 3
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/167120@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 167120@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/168911@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 168911@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/168912@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 168912@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/3917@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 3917@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/7592@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 7592@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/9952@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 9952@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/9977@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 9977@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 2
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/mlp/9981@openml.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: 'standalone'
7 | total_round_num: 250
8 | batch_or_epoch: 'epoch'
9 | client_num: 5
10 | share_local_model: True
11 | online_aggr: True
12 | trainer:
13 | type: 'general'
14 | eval:
15 | freq: 1
16 | metrics: ['acc', 'correct', 'f1']
17 | split: ['train', 'val', 'test']
18 | data:
19 | type: 9981@openml
20 | splits: [0.8, 0.1, 0.1]
21 | splitter: 'lda'
22 | splitter_args: [{'alpha': 0.5}]
23 | model:
24 | type: mlp
25 | out_channels: 9
26 | optimizer:
27 | lr: 0.0001
28 | weight_decay: 0.0
29 | criterion:
30 | type: CrossEntropyLoss
31 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/tree_based_models/trainer/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.vertical_fl.tree_based_models.trainer.trainer \
2 | import VerticalTrainer
3 | from federatedscope.vertical_fl.tree_based_models.trainer.\
4 | random_forest_trainer import RandomForestTrainer
5 | from federatedscope.vertical_fl.tree_based_models.trainer.\
6 | feature_order_protected_trainer import createFeatureOrderProtectedTrainer
7 | from federatedscope.vertical_fl.tree_based_models.trainer.\
8 | label_protected_trainer import createLabelProtectedTrainer
9 |
10 | __all__ = [
11 | 'VerticalTrainer', 'RandomForestTrainer',
12 | 'createFeatureOrderProtectedTrainer', 'createLabelProtectedTrainer'
13 | ]
14 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/scripts/personalization_exp_scripts/ditto/ditto_lr_on_synthetic.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 5
5 | seed: 1
6 | federate:
7 | mode: standalone
8 | total_round_num: 200
9 | sample_client_rate: 0.2
10 | client_num: 300
11 | data:
12 | root: data/
13 | type: synthetic
14 | subsample: 1.0
15 | personalization:
16 | local_update_steps: 30
17 | lr: 0.1
18 | regular_weight: 0.1
19 | model:
20 | type: lr
21 | out_channels: 2
22 | train:
23 | local_update_steps: 30
24 | optimizer:
25 | lr: 0.1
26 | weight_decay: 0.0
27 | criterion:
28 | type: CrossEntropyLoss
29 | trainer:
30 | type: nlptrainer
31 | eval:
32 | freq: 10
33 | metrics: ['acc', 'correct']
--------------------------------------------------------------------------------
/federatedscope/autotune/baseline/vfl_ss.yaml:
--------------------------------------------------------------------------------
1 | train.optimizer.lr:
2 | type: float
3 | lower: 0.01
4 | upper: 1.0
5 | log: True
6 | model.num_of_trees:
7 | type: int
8 | lower: 3
9 | upper: 5
10 | vertical.algo:
11 | type: cate
12 | choices: ['lr', 'xgb']
13 | feat_engr.type:
14 | type: cate
15 | choices: ['', 'min_max_norm', 'instance_norm', 'standardization', 'log_transform', 'uniform_binning', 'quantile_binning', 'correlation_filter', 'variance_filter', 'iv_filter']
16 | condition1:
17 | type: equal
18 | child: model.num_of_trees
19 | parent: vertical.algo
20 | value: 'xgb'
21 | condition2:
22 | type: equal
23 | child: train.optimizer.lr
24 | parent: vertical.algo
25 | value: 'lr'
26 |
--------------------------------------------------------------------------------
/scripts/distributed_scripts/run_distributed_conv_femnist.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ..
4 |
5 | echo "Run distributed mode with ConvNet-2 on FEMNIST..."
6 |
7 | ### server
8 | python federatedscope/main.py --cfg scripts/distributed_scripts/distributed_configs/distributed_femnist_server.yaml &
9 | sleep 2
10 |
11 | # clients
12 | python federatedscope/main.py --cfg scripts/distributed_scripts/distributed_configs/distributed_femnist_client_1.yaml &
13 | sleep 2
14 | python federatedscope/main.py --cfg scripts/distributed_scripts/distributed_configs/distributed_femnist_client_2.yaml &
15 | sleep 2
16 | python federatedscope/main.py --cfg scripts/distributed_scripts/distributed_configs/distributed_femnist_client_3.yaml &
17 |
18 |
--------------------------------------------------------------------------------
/federatedscope/attack/trainer/PIA_trainer.py:
--------------------------------------------------------------------------------
1 | from typing import Type
2 |
3 | from federatedscope.core.trainers import GeneralTorchTrainer
4 | from federatedscope.attack.auxiliary.utils import get_data_property
5 |
6 |
7 | def wrap_ActivePIATrainer(
8 | base_trainer: Type[GeneralTorchTrainer]) -> Type[GeneralTorchTrainer]:
9 | base_trainer.ctx.alpha_prop_loss = base_trainer._cfg.attack.alpha_prop_loss
10 |
11 |
12 | def hood_on_batch_start_get_prop(ctx):
13 | ctx.prop = get_data_property(ctx.data_batch)
14 |
15 |
16 | def hook_on_batch_forward_add_PIA_loss(ctx):
17 | ctx.loss_batch = ctx.alpha_prop_loss * ctx.loss_batch + (
18 | 1 - ctx.alpha_prop_loss) * ctx.criterion(ctx.y_prob, ctx.prop)
19 |
--------------------------------------------------------------------------------
/scripts/personalization_exp_scripts/fedem/fedem_lr_on_synthetic.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 5
5 | seed: 1
6 | federate:
7 | mode: standalone
8 | total_round_num: 200
9 | sample_client_rate: 0.2
10 | client_num: 300
11 | data:
12 | root: data/
13 | type: synthetic
14 | subsample: 1.0
15 | personalization:
16 | local_update_steps: 30
17 | lr: 0.01
18 | model:
19 | model_num_per_trainer: 3
20 | type: lr
21 | out_channels: 2
22 | train:
23 | local_update_steps: 30
24 | optimizer:
25 | lr: 0.01
26 | weight_decay: 0.0
27 | criterion:
28 | type: CrossEntropyLoss
29 | trainer:
30 | type: nlptrainer
31 | eval:
32 | freq: 10
33 | metrics: ['acc', 'correct']
--------------------------------------------------------------------------------
/federatedscope/gfl/baseline/fedavg_sage_minibatch_on_dblpnew.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 100
5 | improve_indicator_mode: mean
6 | federate:
7 | mode: standalone
8 | make_global_eval: True
9 | total_round_num: 400
10 | data:
11 | root: data/
12 | type: dblp_conf
13 | dataloader:
14 | type: graphsaint-rw
15 | batch_size: 256
16 | model:
17 | type: sage
18 | hidden: 1024
19 | out_channels: 4
20 | task: node
21 | train:
22 | local_update_steps: 16
23 | optimizer:
24 | lr: 0.05
25 | weight_decay: 0.0005
26 | type: SGD
27 | criterion:
28 | type: CrossEntropyLoss
29 | trainer:
30 | type: nodeminibatch_trainer
31 | eval:
32 | metrics: ['acc', 'correct']
33 |
--------------------------------------------------------------------------------
/federatedscope/mf/baseline/hfl-sgdmf_fedavg_standalone_on_movielens1m.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 1000
5 | federate:
6 | mode: standalone
7 | total_round_num: 100
8 | client_num: 5
9 | data:
10 | root: data/
11 | type: HFLMovieLens1M
12 | dataloader:
13 | type: mf
14 | theta: -1
15 | model:
16 | type: HMFNet
17 | hidden: 20
18 | train:
19 | local_update_steps: 10
20 | optimizer:
21 | lr: 1.
22 | criterion:
23 | type: MSELoss
24 | trainer:
25 | type: mftrainer
26 | eval:
27 | freq: 2000
28 | metrics: []
29 | best_res_update_round_wise_key: test_avg_loss
30 | count_flops: False
31 | sgdmf:
32 | use: True
33 | epsilon: 2.
34 | delta: 0.5
35 | R: 5.
36 |
--------------------------------------------------------------------------------
/federatedscope/nlp/baseline/fedavg_lstm_on_shakespeare.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 10
5 | federate:
6 | mode: standalone
7 | total_round_num: 1000
8 | sample_client_rate: 0.2
9 | data:
10 | root: data/
11 | type: shakespeare
12 | subsample: 0.2
13 | splits: [0.6,0.2,0.2]
14 | model:
15 | type: lstm
16 | in_channels: 80
17 | out_channels: 80
18 | embed_size: 8
19 | hidden: 256
20 | dropout: 0.0
21 | train:
22 | local_update_steps: 1
23 | batch_or_epoch: epoch
24 | optimizer:
25 | lr: 0.8
26 | weight_decay: 0.0
27 | criterion:
28 | type: character_loss
29 | trainer:
30 | type: nlptrainer
31 | eval:
32 | freq: 10
33 | metrics: ['acc', 'correct']
34 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/linear_model/baseline/vertical_on_adult.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: False
2 | device: 0
3 | backend: torch
4 | federate:
5 | mode: standalone
6 | client_num: 2
7 | total_round_num: 30
8 | model:
9 | type: lr
10 | use_bias: False
11 | train:
12 | optimizer:
13 | lr: 0.5
14 | data:
15 | root: data/
16 | type: adult
17 | splits: [1.0, 0.0]
18 | args: [{normalization: False, standardization: True}]
19 | dataloader:
20 | type: raw
21 | batch_size: 50
22 | criterion:
23 | type: CrossEntropyLoss
24 | trainer:
25 | type: none
26 | vertical:
27 | use: True
28 | dims: [7, 14]
29 | algo: 'lr'
30 | key_size: 256
31 | eval:
32 | freq: 5
33 | best_res_update_round_wise_key: test_loss
34 |
--------------------------------------------------------------------------------
/doc/source/mf.rst:
--------------------------------------------------------------------------------
1 | Federated Matrix Factorization Module References
2 | =======================
3 |
4 | federatedscope.mf.dataset
5 | -----------------------
6 |
7 | .. automodule:: federatedscope.mf.dataset
8 | :members:
9 | :private-members:
10 |
11 | federatedscope.mf.model
12 | -----------------------
13 |
14 | .. automodule:: federatedscope.mf.model
15 | :members:
16 | :private-members:
17 |
18 | federatedscope.mf.dataloader
19 | -----------------------
20 |
21 | .. automodule:: federatedscope.mf.dataloader
22 | :members:
23 | :private-members:
24 |
25 | federatedscope.mf.trainer
26 | -----------------------
27 |
28 | .. automodule:: federatedscope.mf.trainer
29 | :members:
30 | :private-members:
31 |
--------------------------------------------------------------------------------
/federatedscope/gfl/baseline/fedavg_gcn_minibatch_on_hiv.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 20
5 | improve_indicator_mode: mean
6 | federate:
7 | mode: 'standalone'
8 | make_global_eval: True
9 | total_round_num: 400
10 | client_num: 5
11 | data:
12 | root: data/
13 | type: hiv
14 | splitter: scaffold
15 | dataloader:
16 | type: pyg
17 | model:
18 | type: gcn
19 | hidden: 64
20 | out_channels: 2
21 | task: graph
22 | train:
23 | local_update_steps: 16
24 | optimizer:
25 | lr: 0.25
26 | weight_decay: 0.0005
27 | criterion:
28 | type: CrossEntropyLoss
29 | trainer:
30 | type: graphminibatch_trainer
31 | eval:
32 | freq: 5
33 | metrics: ['acc', 'correct', 'roc_auc']
34 |
--------------------------------------------------------------------------------
/federatedscope/nlp/dataset/preprocess/get_embs.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import json
3 |
4 | parser = argparse.ArgumentParser()
5 |
6 | parser.add_argument(
7 | '-f',
8 | help='path to .txt file containing word embedding information;',
9 | type=str,
10 | default='glove.6B.300d.txt')
11 |
12 | args = parser.parse_args()
13 |
14 | lines = []
15 | with open(args.f, 'r') as inf:
16 | lines = inf.readlines()
17 | lines = [i.split() for i in lines]
18 | vocab = [i[0] for i in lines]
19 | emb_floats = [[float(n) for n in i[1:]] for i in lines]
20 | emb_floats.append([0.0 for _ in range(300)]) # for unknown word
21 | js = {'vocab': vocab, 'emba': emb_floats}
22 | with open('embs.json', 'w') as ouf:
23 | json.dump(js, ouf)
24 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/tree_based_models/baseline/rf_feature_gathering_on_abalone.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: False
2 | device: 0
3 | backend: torch
4 | federate:
5 | mode: standalone
6 | client_num: 2
7 | model:
8 | type: random_forest
9 | lambda_: 0.1
10 | gamma: 0
11 | num_of_trees: 10
12 | max_tree_depth: 4
13 | data:
14 | root: data/
15 | type: abalone
16 | splits: [0.8, 0.2]
17 | dataloader:
18 | type: raw
19 | batch_size: 2000
20 | criterion:
21 | type: RegressionMSELoss
22 | trainer:
23 | type: verticaltrainer
24 | vertical:
25 | use: True
26 | dims: [4, 8]
27 | algo: 'rf'
28 | data_size_for_debug: 2000
29 | feature_subsample_ratio: 0.5
30 | eval:
31 | freq: 3
32 | best_res_update_round_wise_key: test_loss
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/tree_based_models/baseline/rf_feature_gathering_on_adult.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: False
2 | device: 0
3 | backend: torch
4 | federate:
5 | mode: standalone
6 | client_num: 2
7 | model:
8 | type: random_forest
9 | lambda_: 0.1
10 | gamma: 0
11 | num_of_trees: 10
12 | max_tree_depth: 5
13 | data:
14 | root: data/
15 | type: adult
16 | splits: [1.0, 0.0]
17 | dataloader:
18 | type: raw
19 | batch_size: 2000
20 | criterion:
21 | type: CrossEntropyLoss
22 | trainer:
23 | type: verticaltrainer
24 | vertical:
25 | use: True
26 | dims: [7, 14]
27 | algo: 'rf'
28 | data_size_for_debug: 2000
29 | feature_subsample_ratio: 0.8
30 | eval:
31 | freq: 3
32 | best_res_update_round_wise_key: test_loss
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | doc/source/conf.py
4 | scripts/*
5 | materials/*
6 | tests/*
7 | benchmark/B-FHTL/*
8 | benchmark/pFL-Bench/*
9 | demo/*
10 | federatedscope/core/proto/gRPC_comm_manager_pb2.py
11 | max-line-length = 79
12 | inline-quotes = "
13 | ignore =
14 | C408
15 | E121
16 | E123
17 | E126
18 | E226
19 | E24
20 | E402
21 | E704
22 | E712
23 | E713
24 | E722
25 | E731
26 | F401
27 | F403
28 | F405
29 | F841
30 | W503
31 | W504
32 | W605
33 | I
34 | N
35 | B001
36 | B002
37 | B003
38 | B004
39 | B005
40 | B007
41 | B008
42 | B009
43 | B010
44 | B011
45 | B012
46 | B013
47 | B014
48 | B015
49 | B016
50 | B017
51 | avoid-escape = no
52 |
--------------------------------------------------------------------------------
/doc/source/gfl.rst:
--------------------------------------------------------------------------------
1 | Federated Graph Learning Module References
2 | =======================
3 |
4 | federatedscope.gfl.dataset
5 | -----------------------
6 |
7 | .. automodule:: federatedscope.gfl.dataset
8 | :members:
9 | :private-members:
10 |
11 | federatedscope.gfl.dataloader
12 | -----------------------
13 |
14 | .. automodule:: federatedscope.gfl.dataloader
15 | :members:
16 | :private-members:
17 |
18 | federatedscope.gfl.model
19 | -----------------------
20 |
21 | .. automodule:: federatedscope.gfl.model
22 | :members:
23 | :private-members:
24 |
25 | federatedscope.gfl.trainer
26 | -----------------------
27 |
28 | .. automodule:: federatedscope.gfl.trainer
29 | :members:
30 | :private-members:
31 |
--------------------------------------------------------------------------------
/scripts/flit_exp_scripts/run_fedvat_cls.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ../..
4 |
5 | cudaid=$1
6 | dataset=$2
7 | trainer=$3
8 | weightReg=$4
9 | alpha=$5
10 |
11 | if [ ! -d "out" ];then
12 | mkdir out
13 | fi
14 |
15 | for k in {1..3}
16 | do
17 | echo "k=${k}, Trainer=${trainer}, data=${dataset}, weight=${weightReg}, alpha=${alpha} starts..."
18 | python federatedscope/main.py --cfg federatedscope/gfl/flitplus/fedalgo_cls.yaml device ${cudaid} data.type ${dataset} trainer.type ${trainer} flitplus.weightReg ${weightReg} flitplus.alpha ${alpha} seed ${k} >>out/${trainer}_on_${dataset}_k${k}_weight${weightReg}_alpha${alpha}.log 2>&1
19 | echo "k=${k}, Trainer=${trainer}, data=${dataset}, weight=${weightReg}, alpha=${alpha} ends."
20 | done
21 |
--------------------------------------------------------------------------------
/scripts/flit_exp_scripts/run_flitplus_cls.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ../..
4 |
5 | cudaid=$1
6 | dataset=$2
7 | trainer=$3
8 | lambdavat=$4
9 | alpha=$5
10 |
11 | if [ ! -d "out" ];then
12 | mkdir out
13 | fi
14 |
15 | for k in {1..3}
16 | do
17 | echo "k=${k}, Trainer=${trainer}, data=${dataset}, lambda=${lambdavat}, alpha=${alpha} starts..."
18 | python federatedscope/main.py --cfg federatedscope/gfl/flitplus/fedalgo_cls.yaml device ${cudaid} data.type ${dataset} trainer.type ${trainer} flitplus.lambdavat ${lambdavat} flitplus.alpha ${alpha} seed ${k} >>out/${trainer}_on_${dataset}_k${k}_lambda${lambdavat}_alpha${alpha}.log 2>&1
19 | echo "k=${k}, Trainer=${trainer}, data=${dataset}, lambda=${lambdavat}, alpha=${alpha} ends."
20 | done
21 |
--------------------------------------------------------------------------------
/federatedscope/mf/baseline/vfl-sgdmf_fedavg_standalone_on_movielens1m.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 200
5 | federate:
6 | mode: standalone
7 | total_round_num: 20
8 | client_num: 5
9 | data:
10 | root: data/
11 | type: VFLMovieLens1M
12 | dataloader:
13 | type: mf
14 | theta: -1
15 | batch_size: 8
16 | model:
17 | type: VMFNet
18 | hidden: 20
19 | train:
20 | local_update_steps: 50
21 | optimizer:
22 | lr: 0.5
23 | criterion:
24 | type: MSELoss
25 | trainer:
26 | type: mftrainer
27 | eval:
28 | freq: 2000
29 | metrics: []
30 | best_res_update_round_wise_key: test_avg_loss
31 | count_flops: False
32 | sgdmf:
33 | use: True
34 | epsilon: 4.
35 | delta: 0.75
36 | R: 5.
37 |
--------------------------------------------------------------------------------
/scripts/example_configs/sha_wrap_fedex.yaml:
--------------------------------------------------------------------------------
1 | outdir: 'sha_wrap_fedex'
2 | use_gpu: True
3 | federate:
4 | mode: 'standalone'
5 | total_round_num: 20
6 | make_global_eval: False
7 | client_num: 5
8 | share_local_model: True
9 | online_aggr: True
10 | use_diff: True
11 | trainer:
12 | type: 'general'
13 | eval:
14 | freq: 10
15 | data:
16 | type: 'toy'
17 | model:
18 | type: 'lr'
19 | hpo:
20 | scheduler: sha
21 | num_workers: 1
22 | init_cand_num: 5
23 | ss: scripts/example_configs/sha_wrap_fedex_ss.yaml
24 | sha:
25 | budgets: [2, 4]
26 | fedex:
27 | use: True
28 | ss: 'scripts/example_configs/fedex_flat_search_space.yaml'
29 | # ss: 'scripts/example_configs/fedex_grid_search_space.yaml'
30 | diff: True
31 |
--------------------------------------------------------------------------------
/doc/source/nlp.rst:
--------------------------------------------------------------------------------
1 | Federated Natural Language Processing Module References
2 | =======================
3 |
4 | federatedscope.nlp.dataset
5 | -----------------------
6 |
7 | .. automodule:: federatedscope.nlp.dataset
8 | :members:
9 | :private-members:
10 |
11 | federatedscope.nlp.dataloader
12 | -----------------------
13 |
14 | .. automodule:: federatedscope.nlp.dataloader
15 | :members:
16 | :private-members:
17 |
18 | federatedscope.nlp.model
19 | -----------------------
20 |
21 | .. automodule:: federatedscope.nlp.model
22 | :members:
23 | :private-members:
24 |
25 | federatedscope.nlp.trainer
26 | -----------------------
27 |
28 | .. automodule:: federatedscope.nlp.trainer
29 | :members:
30 | :private-members:
31 |
--------------------------------------------------------------------------------
/.github/workflows/pre-commit.yml:
--------------------------------------------------------------------------------
1 | name: Pre-commit (Required)
2 |
3 | on: [push, pull_request]
4 |
5 | jobs:
6 | run:
7 | runs-on: ${{ matrix.os }}
8 | strategy:
9 | fail-fast: True
10 | matrix:
11 | os: [ubuntu-latest]
12 | env:
13 | OS: ${{ matrix.os }}
14 | PYTHON: '3.9'
15 | steps:
16 | - uses: actions/checkout@master
17 | - name: Setup Python
18 | uses: actions/setup-python@master
19 | with:
20 | python-version: 3.9
21 | - name: Install dependencies
22 | run: |
23 | pip install pre-commit
24 | pre-commit install
25 | - name: Pre-commit starts
26 | run: |
27 | pre-commit run --all-files
28 | [ $? -eq 1 ] && exit 1 || echo "Passed"
29 |
--------------------------------------------------------------------------------
/federatedscope/cl/baseline/supervised_local_on_cifar10.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 2
3 | federate:
4 | mode: standalone
5 | total_round_num: 50
6 | client_num: 5
7 | sample_client_rate: 1.0
8 | method: local
9 | data:
10 | root: 'data'
11 | type: 'Cifar4LP'
12 | batch_size: 256
13 | splitter: 'lda'
14 | splitter_args: [{'alpha': 0.5}]
15 | num_workers: 4
16 | model:
17 | type: 'supervised_local'
18 | train:
19 | local_update_steps: 1
20 | batch_or_epoch: 'epoch'
21 | optimizer:
22 | lr: 0.1
23 | momentum: 0.9
24 | weight_decay: 0.0
25 | early_stop:
26 | patience: 0
27 | criterion:
28 | type: CrossEntropyLoss
29 | trainer:
30 | type: general
31 | eval:
32 | freq: 2
33 | metrics: ['acc']
34 | split: ['val', 'test']
--------------------------------------------------------------------------------
/federatedscope/gfl/baseline/repro_exp/graph_level/args_multi_graph_fedalgo.sh:
--------------------------------------------------------------------------------
1 | # ---------------------------------------------------------------------- #
2 | # FedOpt
3 | # ---------------------------------------------------------------------- #
4 |
5 | # mol
6 | bash run_multi_opt.sh 5 mol gcn 0.25 16 &
7 |
8 | bash run_multi_opt.sh 7 mol gin 0.25 4 &
9 |
10 | bash run_multi_opt.sh 5 mol gat 0.25 16 &
11 |
12 | # ---------------------------------------------------------------------- #
13 | # FedProx
14 | # ---------------------------------------------------------------------- #
15 |
16 | # mol
17 | bash run_multi_prox.sh 7 mol gcn 0.25 16 &
18 |
19 | bash run_multi_prox.sh 5 mol gin 0.01 4 &
20 |
21 | bash run_multi_prox.sh 7 mol gat 0.25 16 &
22 |
23 |
24 |
--------------------------------------------------------------------------------
/scripts/personalization_exp_scripts/pfedme/pfedme_lr_on_synthetic.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 5
5 | seed: 1
6 | federate:
7 | mode: standalone
8 | total_round_num: 200
9 | sample_client_rate: 0.2
10 | client_num: 300
11 | data:
12 | root: data/
13 | type: synthetic
14 | batch_size: 64
15 | subsample: 1.0
16 | personalization:
17 | K: 3
18 | beta: 1
19 | local_update_steps: 30
20 | lr: 0.5
21 | regular_weight: 1
22 | model:
23 | type: lr
24 | out_channels: 2
25 | train:
26 | local_update_steps: 30
27 | optimizer:
28 | lr: 0.5
29 | weight_decay: 0.0
30 | criterion:
31 | type: CrossEntropyLoss
32 | trainer:
33 | type: nlptrainer
34 | eval:
35 | freq: 10
36 | metrics: ['acc', 'correct']
--------------------------------------------------------------------------------
/federatedscope/cl/baseline/unpretrained_linearprob_on_cifar10.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 2
3 | federate:
4 | mode: standalone
5 | total_round_num: 50
6 | client_num: 5
7 | sample_client_rate: 1.0
8 | method: local
9 | data:
10 | root: 'data'
11 | type: 'Cifar4LP'
12 | batch_size: 256
13 | splitter: 'lda'
14 | splitter_args: [{'alpha': 0.5}]
15 | num_workers: 4
16 | model:
17 | type: 'SimCLR_linear'
18 | train:
19 | local_update_steps: 1
20 | batch_or_epoch: 'epoch'
21 | optimizer:
22 | lr: 0.1
23 | momentum: 0.9
24 | weight_decay: 0.0
25 | early_stop:
26 | patience: 0
27 | criterion:
28 | type: CrossEntropyLoss
29 | trainer:
30 | type: general
31 | eval:
32 | freq: 2
33 | metrics: ['acc']
34 | split: ['val', 'test']
--------------------------------------------------------------------------------
/federatedscope/nlp/baseline/fedavg_lr_on_twitter.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 5
5 | federate:
6 | mode: standalone
7 | total_round_num: 100
8 | sample_client_num: 10
9 | share_local_model: True
10 | online_aggr: True
11 | data:
12 | root: data/
13 | type: twitter
14 | subsample: 0.005
15 | dataloader:
16 | batch_size: 5
17 | model:
18 | type: lr
19 | out_channels: 2
20 | dropout: 0.0
21 | train:
22 | local_update_steps: 10
23 | optimizer:
24 | lr: 0.0003
25 | weight_decay: 0.0
26 | criterion:
27 | type: CrossEntropyLoss
28 | trainer:
29 | type: nlptrainer
30 | eval:
31 | freq: 1
32 | metrics: ['acc', 'correct', 'f1']
33 | split: [ 'train' ]
34 | best_res_update_round_wise_key: 'train_loss'
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/gcn/cora.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 100
5 | seed: 12345
6 | federate:
7 | mode: standalone
8 | make_global_eval: True
9 | client_num: 5
10 | local_update_steps: 1
11 | total_round_num: 500
12 | share_local_model: True
13 | online_aggr: True
14 | data:
15 | root: data/
16 | type: cora
17 | splitter: 'louvain'
18 | batch_size: 1
19 | model:
20 | type: gcn
21 | hidden: 64
22 | dropout: 0.5
23 | out_channels: 7
24 | task: node
25 | optimizer:
26 | lr: 0.25
27 | weight_decay: 0.0005
28 | criterion:
29 | type: CrossEntropyLoss
30 | trainer:
31 | type: nodefullbatch_trainer
32 | eval:
33 | freq: 1
34 | metrics: ['acc', 'correct', 'f1']
35 | split: ['test', 'val', 'train']
--------------------------------------------------------------------------------
/federatedscope/gfl/flitplus/fedalgo_cls.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | federate:
4 | mode: 'standalone'
5 | make_global_eval: True
6 | total_round_num: 30
7 | client_num: 4
8 | sample_client_num: 3
9 | data:
10 | root: data/
11 | splitter: scaffold_lda
12 | transform: ['AddSelfLoops']
13 | splitter_args: [{'alpha': 0.1}]
14 | dataloader:
15 | type: pyg
16 | batch_size: 64
17 | model:
18 | type: mpnn
19 | hidden: 64
20 | task: graph
21 | out_channels: 2
22 | flitplus:
23 | tmpFed: 0.5
24 | factor_ema: 0.8
25 | train:
26 | local_update_steps: 333
27 | optimizer:
28 | type: 'Adam'
29 | lr: 0.0001
30 | weight_decay: 0.00001
31 | criterion:
32 | type: CrossEntropyLoss
33 | eval:
34 | freq: 50
35 | metrics: ['roc_auc']
36 |
--------------------------------------------------------------------------------
/scripts/wide_valley_exp_scripts/run_fedentsgd_on_cifar10.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | lda_alpha=$1
4 | cudaid=$2
5 | gamma=$3
6 | lr=$4
7 | eps=$5
8 | alpha=$6
9 | annealing=$7
10 |
11 |
12 | echo $lda_alpha
13 | echo $cudaid
14 | echo $gamma
15 | echo $lr
16 | echo $eps
17 | echo $alpha
18 | echo $annealing
19 |
20 | for (( i=0; i<5; i++ ))
21 | do
22 | CUDA_VISIBLE_DEVICES="${cudaid}" python federatedscope/main.py --cfg scripts/wide_valley_exp_scripts/fedentsgd_on_cifar10.yaml seed $i data.splitter_args "[{'alpha': ${lda_alpha}}]" trainer.local_entropy.gamma $gamma fedopt.optimizer.lr 1.0 fedopt.annealing $annealing trainer.local_entropy.eps $eps trainer.local_entropy.alpha $alpha train.optimizer.lr $lr expname fedentsgd_${lda_alpha}_${gamma}_${eps}_${annealing}_${i}
23 | done
24 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/gcn/citeseer.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 100
5 | seed: 12345
6 | federate:
7 | mode: standalone
8 | make_global_eval: True
9 | client_num: 5
10 | local_update_steps: 1
11 | total_round_num: 500
12 | share_local_model: True
13 | online_aggr: True
14 | data:
15 | root: data/
16 | type: citeseer
17 | splitter: 'louvain'
18 | batch_size: 1
19 | model:
20 | type: gcn
21 | hidden: 64
22 | dropout: 0.5
23 | out_channels: 6
24 | task: node
25 | optimizer:
26 | lr: 0.25
27 | weight_decay: 0.0005
28 | criterion:
29 | type: CrossEntropyLoss
30 | trainer:
31 | type: nodefullbatch_trainer
32 | eval:
33 | freq: 1
34 | metrics: ['acc', 'correct', 'f1']
35 | split: ['test', 'val', 'train']
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/gcn/pubmed.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 100
5 | seed: 12345
6 | federate:
7 | mode: standalone
8 | make_global_eval: True
9 | client_num: 5
10 | local_update_steps: 1
11 | total_round_num: 500
12 | share_local_model: True
13 | online_aggr: True
14 | data:
15 | root: data/
16 | type: pubmed
17 | splitter: 'louvain'
18 | batch_size: 1
19 | model:
20 | type: gcn
21 | hidden: 64
22 | dropout: 0.5
23 | out_channels: 3
24 | task: node
25 | optimizer:
26 | lr: 0.25
27 | weight_decay: 0.0005
28 | criterion:
29 | type: CrossEntropyLoss
30 | trainer:
31 | type: nodefullbatch_trainer
32 | eval:
33 | freq: 1
34 | metrics: ['acc', 'correct', 'f1']
35 | split: ['test', 'val', 'train']
--------------------------------------------------------------------------------
/federatedscope/gfl/baseline/fedavg_gnn_node_fullbatch_citation.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 100
5 | improve_indicator_mode: mean
6 | federate:
7 | mode: standalone
8 | make_global_eval: True
9 | client_num: 5
10 | total_round_num: 400
11 | data:
12 | root: data/
13 | type: cora
14 | splitter: 'louvain'
15 | dataloader:
16 | type: pyg
17 | batch_size: 1
18 | model:
19 | type: gcn
20 | hidden: 64
21 | dropout: 0.5
22 | out_channels: 7
23 | task: node
24 | train:
25 | local_update_steps: 4
26 | optimizer:
27 | lr: 0.25
28 | weight_decay: 0.0005
29 | type: SGD
30 | criterion:
31 | type: CrossEntropyLoss
32 | trainer:
33 | type: nodefullbatch_trainer
34 | eval:
35 | metrics: ['acc', 'correct']
36 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/tree_based_models/baseline/xgb_feature_gathering_on_adult.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: False
2 | device: 0
3 | backend: torch
4 | federate:
5 | mode: standalone
6 | client_num: 2
7 | model:
8 | type: xgb_tree
9 | lambda_: 0.1
10 | gamma: 0
11 | num_of_trees: 10
12 | max_tree_depth: 3
13 | data:
14 | root: data/
15 | type: adult
16 | splits: [1.0, 0.0]
17 | dataloader:
18 | type: raw
19 | batch_size: 2000
20 | criterion:
21 | type: CrossEntropyLoss
22 | trainer:
23 | type: verticaltrainer
24 | train:
25 | optimizer:
26 | # learning rate for xgb model
27 | eta: 0.5
28 | vertical:
29 | use: True
30 | dims: [7, 14]
31 | algo: 'xgb'
32 | data_size_for_debug: 2000
33 | eval:
34 | freq: 3
35 | best_res_update_round_wise_key: test_loss
--------------------------------------------------------------------------------
/federatedscope/gfl/baseline/mini_graph_dc/fedavg.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 100
5 | improve_indicator_mode: mean
6 | federate:
7 | mode: 'standalone'
8 | make_global_eval: False
9 | total_round_num: 400
10 | share_local_model: False
11 | data:
12 | root: data/
13 | type: mini-graph-dc
14 | dataloader:
15 | type: pyg
16 | model:
17 | task: graph
18 | type: gin
19 | hidden: 64
20 | personalization:
21 | local_param: ['encoder_atom', 'encoder', 'clf']
22 | train:
23 | batch_or_epoch: epoch
24 | local_update_steps: 1
25 | optimizer:
26 | type: SGD
27 | trainer:
28 | type: graphminibatch_trainer
29 | eval:
30 | freq: 1
31 | metrics: ['acc', 'correct']
32 | count_flops: False
33 | split: ['train', 'val', 'test']
34 |
--------------------------------------------------------------------------------
/federatedscope/gfl/fedsageplus/fedsageplus_on_cora.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | federate:
4 | mode: standalone
5 | make_global_eval: True
6 | client_num: 3
7 | total_round_num: 100
8 | method: fedsageplus
9 | train:
10 | batch_or_epoch: epoch
11 | data:
12 | root: data/
13 | type: 'cora'
14 | splitter: 'louvain'
15 | dataloader:
16 | type: pyg
17 | batch_size: 1
18 | model:
19 | type: sage
20 | hidden: 64
21 | dropout: 0.5
22 | out_channels: 7
23 | fedsageplus:
24 | num_pred: 5
25 | gen_hidden: 64
26 | hide_portion: 0.5
27 | fedgen_epoch: 20
28 | loc_epoch: 1
29 | a: 1.0
30 | b: 1.0
31 | c: 1.0
32 | criterion:
33 | type: 'CrossEntropyLoss'
34 | trainer:
35 | type: nodefullbatch_trainer
36 | eval:
37 | metrics: ['acc', 'correct']
38 |
--------------------------------------------------------------------------------
/federatedscope/tabular/dataloader/quadratic.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def load_quadratic_dataset(config):
5 | data_dict = dict()
6 | d = config.data.quadratic.dim
7 | base = np.exp(
8 | np.log(config.data.quadratic.max_curv / config.data.quadratic.min_curv)
9 | / (config.federate.client_num - 1))
10 | for i in range(1, 1 + config.federate.client_num):
11 | # TODO: enable sphere
12 | a = 0.02 * base**(i - 1) * np.identity(d)
13 | # TODO: enable non-zero minimizer, i.e., provide a shift
14 | data_dict[i] = {
15 | 'train': [(a.astype(np.float32), .0)],
16 | 'val': [(a.astype(np.float32), .0)],
17 | 'test': [(a.astype(np.float32), .0)]
18 | }
19 | return data_dict, config
20 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/tree_based_models/baseline/gbdt_feature_gathering_on_adult.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: False
2 | device: 0
3 | backend: torch
4 | federate:
5 | mode: standalone
6 | client_num: 2
7 | model:
8 | type: gbdt_tree
9 | lambda_: 0.1
10 | gamma: 0
11 | num_of_trees: 10
12 | max_tree_depth: 3
13 | data:
14 | root: data/
15 | type: adult
16 | splits: [1.0, 0.0]
17 | dataloader:
18 | type: raw
19 | batch_size: 2000
20 | criterion:
21 | type: CrossEntropyLoss
22 | trainer:
23 | type: verticaltrainer
24 | train:
25 | optimizer:
26 | # learning rate for xgb model
27 | eta: 0.5
28 | vertical:
29 | use: True
30 | dims: [7, 14]
31 | algo: 'gbdt'
32 | data_size_for_debug: 2000
33 | eval:
34 | freq: 3
35 | best_res_update_round_wise_key: test_loss
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/tree_based_models/baseline/xgb_feature_gathering_on_abalone.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: False
2 | device: 0
3 | backend: torch
4 | federate:
5 | mode: standalone
6 | client_num: 2
7 | model:
8 | type: xgb_tree
9 | lambda_: 0.1
10 | gamma: 0
11 | num_of_trees: 10
12 | max_tree_depth: 3
13 | data:
14 | root: data/
15 | type: abalone
16 | splits: [0.8, 0.2]
17 | dataloader:
18 | type: raw
19 | batch_size: 2000
20 | criterion:
21 | type: RegressionMSELoss
22 | trainer:
23 | type: verticaltrainer
24 | train:
25 | optimizer:
26 | # learning rate for xgb model
27 | eta: 0.5
28 | vertical:
29 | use: True
30 | dims: [4, 8]
31 | algo: 'xgb'
32 | data_size_for_debug: 2000
33 | eval:
34 | freq: 5
35 | best_res_update_round_wise_key: test_loss
--------------------------------------------------------------------------------
/federatedscope/cv/baseline/fedavg_convnet2_on_celeba.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 10
5 | federate:
6 | mode: standalone
7 | total_round_num: 100
8 | sample_client_num: 10
9 | data:
10 | root: data/
11 | type: celeba
12 | splits: [0.6,0.2,0.2]
13 | subsample: 0.1
14 | transform: [['ToTensor'], ['Normalize', {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}]]
15 | dataloader:
16 | batch_size: 5
17 | model:
18 | type: convnet2
19 | hidden: 2048
20 | out_channels: 2
21 | dropout: 0.0
22 | train:
23 | local_update_steps: 10
24 | optimizer:
25 | lr: 0.001
26 | weight_decay: 0.0
27 | criterion:
28 | type: CrossEntropyLoss
29 | trainer:
30 | type: cvtrainer
31 | eval:
32 | freq: 10
33 | metrics: ['acc', 'correct']
--------------------------------------------------------------------------------
/scripts/example_configs/fed_node_cls.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 100
5 | improve_indicator_mode: mean
6 | federate:
7 | mode: standalone
8 | make_global_eval: True
9 | client_num: 5
10 | total_round_num: 400
11 | data:
12 | root: data/
13 | type: cora
14 | splitter: 'louvain'
15 | dataloader:
16 | batch_size: 1
17 | model:
18 | type: gcn
19 | hidden: 64
20 | dropout: 0.5
21 | out_channels: 7
22 | optimizer:
23 | weight_decay: 0.0005
24 | type: SGD
25 | criterion:
26 | type: CrossEntropyLoss
27 | trainer:
28 | type: nodefullbatch_trainer
29 | eval:
30 | metrics: ['acc', 'correct']
31 | hpo:
32 | scheduler: sha
33 | larger_better: True
34 | metric: 'server_global_eval.test_acc'
35 | sha:
36 | budgets: [1,3,9]
37 |
--------------------------------------------------------------------------------
/scripts/personalization_exp_scripts/fedem/fedem_lstm_on_shakespeare.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | seed: 1
4 | federate:
5 | mode: standalone
6 | total_round_num: 1000
7 | sample_client_rate: 0.2
8 | data:
9 | root: data/
10 | type: shakespeare
11 | subsample: 0.2
12 | splits: [0.6,0.2,0.2]
13 | model:
14 | model_num_per_trainer: 3
15 | type: lstm
16 | in_channels: 80
17 | out_channels: 80
18 | embed_size: 8
19 | hidden: 256
20 | personalization:
21 | local_update_steps: 1
22 | lr: 1.5
23 | train:
24 | batch_or_epoch: epoch
25 | local_update_steps: 1
26 | optimizer:
27 | lr: 1.5
28 | weight_decay: 0.0
29 | criterion:
30 | type: character_loss
31 | trainer:
32 | type: nlptrainer
33 | eval:
34 | freq: 10
35 | metrics: ['acc', 'correct']
36 |
--------------------------------------------------------------------------------
/federatedscope/gfl/baseline/fedavg_gcn_fullbatch_on_kg.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 20
5 | improve_indicator_mode: mean
6 | federate:
7 | mode: standalone
8 | make_global_eval: True
9 | total_round_num: 400
10 | client_num: 5
11 | data:
12 | root: data/
13 | type: wn18
14 | splitter: rel_type
15 | pre_transform: ['Constant', {'value':1.0, 'cat':False}]
16 | dataloader:
17 | type: pyg
18 | model:
19 | type: gcn
20 | hidden: 64
21 | out_channels: 18
22 | task: link
23 | train:
24 | local_update_steps: 16
25 | optimizer:
26 | lr: 0.25
27 | weight_decay: 0.0005
28 | criterion:
29 | type: CrossEntropyLoss
30 | trainer:
31 | type: linkfullbatch_trainer
32 | eval:
33 | freq: 5
34 | metrics: ['hits@1', 'hits@5', 'hits@10']
35 |
--------------------------------------------------------------------------------
/federatedscope/mf/model/model_builder.py:
--------------------------------------------------------------------------------
1 | def get_mfnet(model_config, data_shape):
2 | """Return the MF model according to model configs
3 |
4 | Arguments:
5 | model_config: the model related parameters
6 | data_shape (int): the input shape of the model
7 | """
8 | if model_config.type.lower() == 'vmfnet':
9 | from federatedscope.mf.model.model import VMFNet
10 | return VMFNet(num_user=model_config.num_user,
11 | num_item=data_shape,
12 | num_hidden=model_config.hidden)
13 | else:
14 | from federatedscope.mf.model.model import HMFNet
15 | return HMFNet(num_user=data_shape,
16 | num_item=model_config.num_item,
17 | num_hidden=model_config.hidden)
18 |
--------------------------------------------------------------------------------
/scripts/example_configs/femnist.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 5
5 | seed: 12345
6 | federate:
7 | mode: standalone
8 | total_round_num: 300
9 | sample_client_rate: 0.2
10 | data:
11 | root: data/
12 | type: femnist
13 | splits: [0.6,0.2,0.2]
14 | subsample: 0.05
15 | transform: [['ToTensor'], ['Normalize', {'mean': [0.9637], 'std': [0.1592]}]]
16 | dataloader:
17 | batch_size: 10
18 | model:
19 | type: convnet2
20 | hidden: 2048
21 | out_channels: 62
22 | train:
23 | local_update_steps: 1
24 | batch_or_epoch: epoch
25 | optimizer:
26 | lr: 0.01
27 | weight_decay: 0.0
28 | grad:
29 | grad_clip: 5.0
30 | criterion:
31 | type: CrossEntropyLoss
32 | trainer:
33 | type: cvtrainer
34 | eval:
35 | freq: 10
36 | metrics: ['acc', 'correct']
--------------------------------------------------------------------------------
/federatedscope/gfl/baseline/repro_exp/hpo/run_hpo.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cudaid=$1
4 | dataset=$2
5 |
6 | cd ../../../../..
7 |
8 | if [ ! -d "hpo_${dataset}" ];then
9 | mkdir hpo_${dataset}
10 | fi
11 |
12 | if [ ! -d "hpo" ];then
13 | mkdir hpo
14 | fi
15 |
16 | rs=(1 2 4 8)
17 | samples=(1 2 4 5)
18 |
19 | for (( s=0; s<${#samples[@]}; s++ ))
20 | do
21 | for (( r=0; r<${#rs[@]}; r++ ))
22 | do
23 | for k in {1..5}
24 | do
25 | python federatedscope/hpo.py --cfg federatedscope/gfl/baseline/fedavg_gnn_node_fullbatch_citation.yaml federate.sample_client_num ${samples[$s]} device ${cudaid} data.type ${dataset} hpo.r ${rs[$r]} seed $k >>hpo/hpo_on_${dataset}_${rs[$r]}_sample${samples[$s]}.log 2>&1
26 | rm hpo_${dataset}/*
27 | done
28 | done
29 | done
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/gcn/cora_prox.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 100
5 | seed: 12345
6 | federate:
7 | mode: standalone
8 | make_global_eval: True
9 | client_num: 5
10 | total_round_num: 500
11 | join_in_info: ['num_sample']
12 | data:
13 | root: data/
14 | type: cora
15 | splitter: 'louvain'
16 | batch_size: 1
17 | model:
18 | type: gcn
19 | hidden: 64
20 | dropout: 0.5
21 | out_channels: 7
22 | task: node
23 | criterion:
24 | type: CrossEntropyLoss
25 | train:
26 | local_update_steps: 1
27 | optimizer:
28 | lr: 0.25
29 | weight_decay: 0.0005
30 | trainer:
31 | type: nodefullbatch_trainer
32 | eval:
33 | freq: 1
34 | metrics: ['acc', 'correct', 'f1']
35 | split: ['test', 'val', 'train']
36 | fedprox:
37 | use: True
38 | mu: 5.0
--------------------------------------------------------------------------------
/federatedscope/gfl/baseline/fedavg_on_cSBM.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 2
3 | early_stop:
4 | patience: 200
5 | improve_indicator_mode: mean
6 | # monitoring: ['dissim']
7 | federate:
8 | mode: standalone
9 | total_round_num: 400
10 | data:
11 | root: data/
12 | type: 'csbm'
13 | #type: 'csbm_data_feb_07_2022-00:19'
14 | cSBM_phi: [0.1, 0.5, 0.9]
15 | dataloader:
16 | type: pyg
17 | batch_size: 1
18 | model:
19 | type: gpr
20 | hidden: 256
21 | out_channels: 2
22 | task: node
23 | #personalization:
24 | #local_param: ['prop1']
25 | train:
26 | local_update_steps: 2
27 | optimizer:
28 | lr: 0.5
29 | weight_decay: 0.0005
30 | type: SGD
31 | criterion:
32 | type: CrossEntropyLoss
33 | trainer:
34 | type: nodefullbatch_trainer
35 | eval:
36 | metrics: ['acc', 'correct']
37 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/tree_based_models/baseline/xgb_feature_gathering_on_adult_by_he_eval.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: False
2 | device: 0
3 | backend: torch
4 | federate:
5 | mode: standalone
6 | client_num: 2
7 | model:
8 | type: xgb_tree
9 | lambda_: 0.1
10 | gamma: 0
11 | num_of_trees: 10
12 | max_tree_depth: 3
13 | data:
14 | root: data/
15 | type: adult
16 | splits: [1.0, 0.0]
17 | dataloader:
18 | type: raw
19 | batch_size: 2000
20 | criterion:
21 | type: CrossEntropyLoss
22 | trainer:
23 | type: verticaltrainer
24 | train:
25 | optimizer:
26 | # learning rate for xgb model
27 | eta: 0.5
28 | vertical:
29 | use: True
30 | dims: [7, 14]
31 | algo: 'xgb'
32 | eval_protection: 'he'
33 | data_size_for_debug: 2000
34 | eval:
35 | freq: 3
36 | best_res_update_round_wise_key: test_loss
--------------------------------------------------------------------------------
/benchmark/B-FHTL/scripts/Graph-DC/fedbn.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 20
5 | improve_indicator_mode: mean
6 | federate:
7 | mode: 'standalone'
8 | make_global_eval: False
9 | local_update_steps: 10
10 | batch_or_epoch: 'epoch'
11 | total_round_num: 400
12 | share_local_model: False
13 | data:
14 | root: data/
15 | type: fs_contest_data
16 | splitter: ooxx
17 | model:
18 | type: gin
19 | hidden: 64
20 | out_channels: 0
21 | task: graph
22 | personalization:
23 | local_param: [ 'encoder_atom', 'encoder', 'clf', 'norms' ] # pre, post + FedBN
24 | optimizer:
25 | lr: 0.05
26 | weight_decay: 0.0005
27 | type: SGD
28 | criterion:
29 | type: CrossEntropyLoss
30 | trainer:
31 | type: graphminibatch_trainer
32 | eval:
33 | freq: 5
34 | metrics: ['acc', 'correct']
--------------------------------------------------------------------------------
/benchmark/B-FHTL/scripts/Graph-DC/hpo/fedbn_gnn_minibatch_on_multi_task.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 20
5 | improve_indicator_mode: mean
6 | federate:
7 | mode: 'standalone'
8 | make_global_eval: False
9 | local_update_steps: 1
10 | total_round_num: 400
11 | share_local_model: False
12 | data:
13 | root: data/
14 | type: fs_contest_data
15 | splitter: ooxx
16 | model:
17 | type: gin
18 | hidden: 64
19 | out_channels: 0
20 | task: graph
21 | personalization:
22 | local_param: [ 'encoder_atom', 'encoder', 'clf', 'norms' ] # pre, post + FedBN
23 | optimizer:
24 | lr: 0.25
25 | weight_decay: 0.0005
26 | type: SGD
27 | criterion:
28 | type: CrossEntropyLoss
29 | trainer:
30 | type: graphminibatch_trainer
31 | eval:
32 | freq: 5
33 | metrics: ['acc', 'correct']
--------------------------------------------------------------------------------
/federatedscope/organizer/cfg_client.py:
--------------------------------------------------------------------------------
1 | # ---------------------------------------------------------------------- #
2 | # Lobby related (global variable stored in Redis)
3 | # ---------------------------------------------------------------------- #
4 | server_ip = '172.17.138.149'
5 | broker_url = f'redis://{server_ip}:6379/0'
6 | result_backend = f'redis://{server_ip}/0'
7 |
8 | task_serializer = 'json'
9 | result_serializer = 'json'
10 | accept_content = ['json']
11 | timezone = 'Europe/Oslo'
12 | enable_utc = True
13 |
14 | # ---------------------------------------------------------------------- #
15 | # ECS ssh related (To verify and launch the virtual environment)
16 | # ---------------------------------------------------------------------- #
17 | env_name = 'test_org'
18 | root_path = 'test_org'
19 | fs_version = '0.2.0'
20 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/tree_based_models/baseline/gbdt_feature_gathering_on_abalone.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: False
2 | device: 0
3 | backend: torch
4 | federate:
5 | mode: standalone
6 | client_num: 2
7 | model:
8 | type: gbdt_tree
9 | lambda_: 0.1
10 | gamma: 0
11 | num_of_trees: 10
12 | max_tree_depth: 4
13 | data:
14 | root: data/
15 | type: abalone
16 | splits: [0.8, 0.2]
17 | dataloader:
18 | type: raw
19 | batch_size: 2000
20 | criterion:
21 | type: RegressionMSELoss
22 | trainer:
23 | type: verticaltrainer
24 | train:
25 | optimizer:
26 | # learning rate for xgb model
27 | eta: 0.5
28 | vertical:
29 | use: True
30 | dims: [4, 8]
31 | algo: 'gbdt'
32 | data_size_for_debug: 2000
33 | feature_subsample_ratio: 0.5
34 | eval:
35 | freq: 3
36 | best_res_update_round_wise_key: test_loss
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/cross_device/twitter.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 100
5 | federate:
6 | mode: standalone
7 | total_round_num: 500
8 | sample_client_rate: 0.01
9 | make_global_eval: True
10 | merge_test_data: True
11 | share_local_model: True
12 | online_aggr: True
13 | data:
14 | root: data/
15 | type: twitter
16 | batch_size: 5
17 | subsample: 0.005
18 | num_workers: 0
19 | model:
20 | type: lr
21 | out_channels: 2
22 | dropout: 0.0
23 | train:
24 | local_update_steps: 10
25 | optimizer:
26 | lr: 0.0003
27 | weight_decay: 0.0
28 | criterion:
29 | type: CrossEntropyLoss
30 | trainer:
31 | type: nlptrainer
32 | eval:
33 | freq: 1
34 | metrics: ['acc', 'correct', 'f1']
35 | split: [ 'test' ]
36 | best_res_update_round_wise_key: 'test_loss'
--------------------------------------------------------------------------------
/federatedscope/cl/baseline/fedsimclr_on_cifar10.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 2
3 | federate:
4 | mode: standalone
5 | total_round_num: 100
6 | client_num: 5
7 | sample_client_rate: 1.0
8 | method: FedAvg
9 | save_to: '../SimCLR_on_Cifar4CL_global_lr0.03_lus10_rn100epoch_repairsave.ckpt'
10 | data:
11 | root: 'data'
12 | type: 'Cifar4CL'
13 | batch_size: 512
14 | splitter: 'lda'
15 | splitter_args: [{'alpha': 0.1}]
16 | num_workers: 4
17 | model:
18 | type: 'SimCLR'
19 | train:
20 | local_update_steps: 10
21 | batch_or_epoch: 'epoch'
22 | optimizer:
23 | lr: 0.03
24 | momentum: 0.9
25 | weight_decay: 0.0001
26 | early_stop:
27 | patience: 0
28 | criterion:
29 | type: 'NT_xentloss'
30 | trainer:
31 | type: 'cltrainer'
32 | eval:
33 | freq: 5
34 | metrics: ['loss']
35 | split: ['val', 'test']
--------------------------------------------------------------------------------
/federatedscope/gfl/baseline/fedavg_gin_minibatch_on_cikmcup.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 20
5 | improve_indicator_mode: mean
6 | federate:
7 | mode: 'standalone'
8 | make_global_eval: False
9 | total_round_num: 100
10 | share_local_model: False
11 | data:
12 | root: data/
13 | type: cikmcup
14 | dataloader:
15 | type: pyg
16 | model:
17 | type: gin
18 | hidden: 64
19 | personalization:
20 | local_param: ['encoder_atom', 'encoder', 'clf']
21 | train:
22 | batch_or_epoch: epoch
23 | local_update_steps: 1
24 | optimizer:
25 | weight_decay: 0.0005
26 | type: SGD
27 | trainer:
28 | type: graphminibatch_trainer
29 | eval:
30 | freq: 5
31 | metrics: ['imp_ratio']
32 | report: ['avg']
33 | best_res_update_round_wise_key: val_imp_ratio
34 | count_flops: False
35 | base: 0.
--------------------------------------------------------------------------------
/federatedscope/gfl/baseline/isolated_gin_minibatch_on_cikmcup.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 20
5 | improve_indicator_mode: mean
6 | federate:
7 | mode: standalone
8 | method: local
9 | make_global_eval: False
10 | total_round_num: 10
11 | share_local_model: False
12 | data:
13 | batch_size: 64
14 | root: data/
15 | type: cikmcup
16 | dataloader:
17 | type: pyg
18 | model:
19 | type: gin
20 | hidden: 64
21 | personalization:
22 | local_param: ['encoder_atom', 'encoder', 'clf']
23 | train:
24 | batch_or_epoch: epoch
25 | local_update_steps: 21
26 | optimizer:
27 | weight_decay: 0.0005
28 | type: SGD
29 | trainer:
30 | type: graphminibatch_trainer
31 | eval:
32 | freq: 5
33 | report: ['avg']
34 | best_res_update_round_wise_key: val_loss
35 | count_flops: False
36 |
--------------------------------------------------------------------------------
/scripts/example_configs/femnist_global_train.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 5
5 | seed: 12345
6 | federate:
7 | mode: standalone
8 | total_round_num: 300
9 | sample_client_rate: 0.2
10 | method: global
11 | train:
12 | local_update_steps: 1
13 | batch_or_epoch: epoch
14 | optimizer:
15 | lr: 0.01
16 | weight_decay: 0.0
17 | data:
18 | root: data/
19 | type: femnist
20 | splits: [0.6,0.2,0.2]
21 | subsample: 0.05
22 | transform: [['ToTensor'], ['Normalize', {'mean': [0.9637], 'std': [0.1592]}]]
23 | dataloader:
24 | batch_size: 10
25 | model:
26 | type: convnet2
27 | hidden: 2048
28 | out_channels: 62
29 | grad:
30 | grad_clip: 5.0
31 | criterion:
32 | type: CrossEntropyLoss
33 | trainer:
34 | type: cvtrainer
35 | eval:
36 | freq: 10
37 | metrics: ['acc', 'correct']
--------------------------------------------------------------------------------
/benchmark/B-FHTL/scripts/Graph-DC/fedavg.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 20
5 | improve_indicator_mode: mean
6 | federate:
7 | mode: 'standalone'
8 | make_global_eval: False
9 | local_update_steps: 4
10 | batch_or_epoch: 'epoch'
11 | total_round_num: 400
12 | share_local_model: False
13 | data:
14 | root: data/
15 | type: fs_contest_data
16 | splitter: ooxx
17 | model:
18 | type: gin
19 | hidden: 64
20 | out_channels: 0
21 | task: graph
22 | personalization:
23 | local_param: ['encoder_atom', 'encoder', 'clf'] # to handle size-different pre & post layers
24 | optimizer:
25 | lr: 0.5
26 | weight_decay: 0.0005
27 | type: SGD
28 | criterion:
29 | type: CrossEntropyLoss
30 | trainer:
31 | type: graphminibatch_trainer
32 | eval:
33 | freq: 5
34 | metrics: ['acc', 'correct']
35 |
--------------------------------------------------------------------------------
/benchmark/B-FHTL/scripts/Graph-DC/hpo/fedavg_gnn_minibatch_on_multi_task.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 20
5 | improve_indicator_mode: mean
6 | federate:
7 | mode: 'standalone'
8 | make_global_eval: False
9 | local_update_steps: 1
10 | total_round_num: 400
11 | share_local_model: False
12 | data:
13 | root: data/
14 | type: fs_contest_data
15 | splitter: ooxx
16 | model:
17 | type: gin
18 | hidden: 64
19 | out_channels: 0
20 | task: graph
21 | personalization:
22 | local_param: ['encoder_atom', 'encoder', 'clf'] # to handle size-different pre & post layers
23 | optimizer:
24 | lr: 0.5
25 | weight_decay: 0.0005
26 | type: SGD
27 | criterion:
28 | type: CrossEntropyLoss
29 | trainer:
30 | type: graphminibatch_trainer
31 | eval:
32 | freq: 5
33 | metrics: ['acc', 'correct']
34 |
--------------------------------------------------------------------------------
/doc/source/cv.rst:
--------------------------------------------------------------------------------
1 | Federated Computer Vision Module References
2 | =======================
3 |
4 | federatedscope.cv.dataset
5 | -----------------------
6 |
7 | .. automodule:: federatedscope.cv.dataset.leaf
8 | :members:
9 | :private-members:
10 |
11 | .. automodule:: federatedscope.cv.dataset.leaf_cv
12 | :members:
13 | :private-members:
14 |
15 | federatedscope.cv.dataloader
16 | -----------------------
17 |
18 | .. automodule:: federatedscope.cv.dataloader
19 | :members:
20 | :private-members:
21 |
22 | federatedscope.cv.model
23 | -----------------------
24 |
25 | .. automodule:: federatedscope.cv.model
26 | :members:
27 | :private-members:
28 |
29 | federatedscope.cv.trainer
30 | -----------------------
31 |
32 | .. automodule:: federatedscope.cv.trainer
33 | :members:
34 | :private-members:
35 |
--------------------------------------------------------------------------------
/federatedscope/cl/baseline/fedgc_on_cifar10.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | seed: 1
3 | device: 2
4 | federate:
5 | mode: standalone
6 | total_round_num: 10
7 | client_num: 5
8 | share_local_model: False
9 | online_aggr: True
10 | sample_client_rate: 1.0
11 | method: fedgc
12 | save_to: 'test.ckpt'
13 | data:
14 | root: 'data'
15 | type: 'Cifar4CL'
16 | batch_size: 512
17 | splitter: 'lda'
18 | splitter_args: [{'alpha': 0.1}]
19 | num_workers: 4
20 | model:
21 | type: 'SimCLR'
22 | train:
23 | local_update_steps: 5
24 | batch_or_epoch: 'batch'
25 | optimizer:
26 | lr: 0.05
27 | momentum: 0.9
28 | weight_decay: 0.0001
29 | early_stop:
30 | patience: 0
31 | criterion:
32 | type: 'NT_xentloss'
33 | trainer:
34 | type: 'cltrainer'
35 | eval:
36 | freq: 5
37 | metrics: ['loss']
38 | split: ['val', 'test']
--------------------------------------------------------------------------------
/federatedscope/nlp/hetero_tasks/metric/cnndm.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | from federatedscope.register import register_metric
4 | from federatedscope.nlp.metric.rouge.utils import test_rouge
5 |
6 | logger = logging.getLogger(__name__)
7 |
8 |
9 | def load_cnndm_metrics(ctx, **kwargs):
10 | tmp_dir = os.path.join(ctx.cfg.outdir, 'temp')
11 | rouges = test_rouge(tmp_dir, ctx.pred_path, ctx.tgt_path)
12 | results = {
13 | k: v
14 | for k, v in rouges.items()
15 | if k in {'rouge_1_f_score', 'rouge_2_f_score', 'rouge_l_f_score'}
16 | }
17 | return results
18 |
19 |
20 | def call_cnndm_metric(types):
21 | if 'cnndm' in types:
22 | the_larger_the_better = True
23 | return 'cnndm', load_cnndm_metrics, the_larger_the_better
24 |
25 |
26 | register_metric('cnndm', call_cnndm_metric)
27 |
--------------------------------------------------------------------------------
/federatedscope/cv/baseline/fedavg_convnet2_on_femnist.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 5
5 | seed: 12345
6 | federate:
7 | mode: standalone
8 | total_round_num: 300
9 | sample_client_rate: 0.2
10 | data:
11 | root: data/
12 | type: femnist
13 | splits: [0.6,0.2,0.2]
14 | subsample: 0.05
15 | transform: [['ToTensor'], ['Normalize', {'mean': [0.9637], 'std': [0.1592]}]]
16 | dataloader:
17 | batch_size: 10
18 | model:
19 | type: convnet2
20 | hidden: 2048
21 | out_channels: 62
22 | dropout: 0.0
23 | train:
24 | local_update_steps: 1
25 | batch_or_epoch: epoch
26 | optimizer:
27 | lr: 0.01
28 | weight_decay: 0.0
29 | grad:
30 | grad_clip: 5.0
31 | criterion:
32 | type: CrossEntropyLoss
33 | trainer:
34 | type: cvtrainer
35 | eval:
36 | freq: 10
37 | metrics: ['acc', 'correct']
38 |
--------------------------------------------------------------------------------
/benchmark/B-FHTL/scripts/Graph-DC/ditto.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 20
5 | improve_indicator_mode: mean
6 | federate:
7 | mode: 'standalone'
8 | method: Ditto
9 | make_global_eval: False
10 | local_update_steps: 2
11 | total_round_num: 400
12 | share_local_model: False
13 | data:
14 | root: data/
15 | type: fs_contest_data
16 | splitter: ooxx
17 | model:
18 | type: gin
19 | hidden: 64
20 | out_channels: 0
21 | task: graph
22 | personalization:
23 | regular_weight: 0.1
24 | local_param: ['encoder_atom', 'encoder', 'clf'] # to handle size-different pre & post layers
25 | optimizer:
26 | lr: 0.5
27 | weight_decay: 0.0005
28 | type: SGD
29 | criterion:
30 | type: CrossEntropyLoss
31 | trainer:
32 | type: graphminibatch_trainer
33 | eval:
34 | freq: 5
35 | metrics: ['acc', 'correct']
36 |
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/bert/cola@huggingface_datasets.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 1
3 | federate:
4 | mode: standalone
5 | local_update_steps: 1
6 | total_round_num: 40
7 | batch_or_epoch: 'epoch'
8 | client_num: 5
9 | share_local_model: True
10 | online_aggr: True
11 | data:
12 | root: 'glue'
13 | type: 'cola@huggingface_datasets'
14 | args: [{'max_len': 128}]
15 | batch_size: 128
16 | splitter: 'lda'
17 | splitter_args: [{'alpha': 0.5}]
18 | num_workers: 0
19 | model:
20 | type: 'google/bert_uncased_L-2_H-128_A-2@transformers'
21 | task: 'SequenceClassification'
22 | out_channels: 2
23 | optimizer:
24 | lr: 0.0001
25 | weight_decay: 0.0
26 | criterion:
27 | type: 'CrossEntropyLoss'
28 | trainer:
29 | type: 'nlptrainer'
30 | eval:
31 | freq: 1
32 | metrics: ['acc', 'correct', 'f1']
33 | split: ['val', 'train']
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/bert/sst2@huggingface_datasets.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | federate:
4 | mode: standalone
5 | local_update_steps: 1
6 | total_round_num: 40
7 | batch_or_epoch: 'epoch'
8 | client_num: 5
9 | share_local_model: True
10 | online_aggr: True
11 | data:
12 | root: 'glue'
13 | type: 'sst2@huggingface_datasets'
14 | args: [{'max_len': 512}]
15 | batch_size: 128
16 | splitter: 'lda'
17 | splitter_args: [{'alpha': 0.5}]
18 | num_workers: 0
19 | model:
20 | type: 'google/bert_uncased_L-2_H-128_A-2@transformers'
21 | task: 'SequenceClassification'
22 | out_channels: 2
23 | optimizer:
24 | lr: 0.3
25 | weight_decay: 0.0
26 | criterion:
27 | type: 'CrossEntropyLoss'
28 | trainer:
29 | type: 'nlptrainer'
30 | eval:
31 | freq: 1
32 | metrics: ['acc', 'correct', 'f1']
33 | split: ['val', 'train']
--------------------------------------------------------------------------------
/federatedscope/core/splitters/graph/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.core.splitters.graph.louvain_splitter import \
2 | LouvainSplitter
3 | from federatedscope.core.splitters.graph.random_splitter import RandomSplitter
4 | from federatedscope.core.splitters.graph.reltype_splitter import \
5 | RelTypeSplitter
6 | from federatedscope.core.splitters.graph.scaffold_splitter import \
7 | ScaffoldSplitter
8 | from federatedscope.core.splitters.graph.randchunk_splitter import \
9 | RandChunkSplitter
10 |
11 | from federatedscope.core.splitters.graph.analyzer import Analyzer
12 | from federatedscope.core.splitters.graph.scaffold_lda_splitter import \
13 | ScaffoldLdaSplitter
14 |
15 | __all__ = [
16 | 'LouvainSplitter', 'RandomSplitter', 'RelTypeSplitter', 'ScaffoldSplitter',
17 | 'RandChunkSplitter', 'Analyzer', 'ScaffoldLdaSplitter'
18 | ]
19 |
--------------------------------------------------------------------------------
/benchmark/pFL-Bench/FEMNIST-s02/fedavg_convnet2_on_femnist.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: -1
3 | early_stop:
4 | patience: 5
5 | seed: 1
6 | federate:
7 | mode: standalone
8 | local_update_steps: 1
9 | batch_or_epoch: epoch
10 | total_round_num: 1000
11 | sample_client_rate: 0.2
12 | unseen_clients_rate: 0.2
13 | data:
14 | root: data/
15 | type: femnist
16 | splits: [0.6,0.2,0.2]
17 | batch_size: 32
18 | subsample: 0.05
19 | num_workers: 0
20 | transform: [['ToTensor'], ['Normalize', {'mean': [0.1307], 'std': [0.3081]}]]
21 | model:
22 | type: convnet2
23 | hidden: 2048
24 | out_channels: 62
25 | dropout: 0.0
26 | optimizer:
27 | lr: 0.01
28 | weight_decay: 0.0
29 | grad_clip: 5.0
30 | criterion:
31 | type: CrossEntropyLoss
32 | trainer:
33 | type: cvtrainer
34 | eval:
35 | freq: 10
36 | metrics: ['acc', 'correct']
37 |
--------------------------------------------------------------------------------
/federatedscope/cl/baseline/supervised_fedavg_on_cifar10.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | federate:
4 | mode: standalone
5 | total_round_num: 100
6 | client_num: 5
7 | sample_client_rate: 1.0
8 | share_local_model: True
9 | online_aggr: True
10 | method: FedAvg
11 | save_to: '../test_supervised.ckpt'
12 | data:
13 | root: 'data'
14 | type: 'Cifar4LP'
15 | batch_size: 256
16 | splitter: 'lda'
17 | splitter_args: [{'alpha': 0.1}]
18 | num_workers: 4
19 | model:
20 | type: 'supervised_fedavg'
21 | train:
22 | local_update_steps: 3
23 | batch_or_epoch: 'epoch'
24 | optimizer:
25 | lr: 0.03
26 | momentum: 0.9
27 | weight_decay: 0.0
28 | early_stop:
29 | patience: 0
30 | criterion:
31 | type: CrossEntropyLoss
32 | trainer:
33 | type: general
34 | eval:
35 | freq: 10
36 | metrics: ['acc']
37 | split: ['val', 'test']
--------------------------------------------------------------------------------
/benchmark/B-FHTL/scripts/Graph-DC/fedprox.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 20
5 | improve_indicator_mode: mean
6 | federate:
7 | mode: 'standalone'
8 | make_global_eval: False
9 | local_update_steps: 4
10 | total_round_num: 400
11 | share_local_model: False
12 | data:
13 | root: data/
14 | type: fs_contest_data
15 | splitter: ooxx
16 | model:
17 | type: gin
18 | hidden: 64
19 | out_channels: 0
20 | task: graph
21 | personalization:
22 | local_param: ['encoder_atom', 'encoder', 'clf'] # to handle size-different pre & post layers
23 | optimizer:
24 | lr: 0.5
25 | weight_decay: 0.0005
26 | type: SGD
27 | criterion:
28 | type: CrossEntropyLoss
29 | trainer:
30 | type: graphminibatch_trainer
31 | eval:
32 | freq: 5
33 | metrics: ['acc', 'correct', 'loss_regular']
34 | fedprox:
35 | use: True
36 | mu: 0.5
--------------------------------------------------------------------------------
/benchmark/FedHPOBench/scripts/gcn/cora_dp.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 100
5 | seed: 12345
6 | federate:
7 | mode: standalone
8 | make_global_eval: True
9 | client_num: 5
10 | total_round_num: 500
11 | join_in_info: ['num_sample']
12 | data:
13 | root: data/
14 | type: cora
15 | splitter: 'louvain'
16 | batch_size: 1
17 | model:
18 | type: gcn
19 | hidden: 64
20 | dropout: 0.5
21 | out_channels: 7
22 | task: node
23 | criterion:
24 | type: CrossEntropyLoss
25 | train:
26 | local_update_steps: 1
27 | optimizer:
28 | lr: 0.25
29 | weight_decay: 0.0005
30 | trainer:
31 | type: nodefullbatch_trainer
32 | eval:
33 | freq: 1
34 | metrics: ['acc', 'correct', 'f1']
35 | split: ['test', 'val', 'train']
36 | nbafl:
37 | use: True
38 | mu: 0.0
39 | w_clip: 0.1
40 | epsilon: 20
41 | constant: 1
--------------------------------------------------------------------------------
/federatedscope/gfl/baseline/fedavg_wpsn_on_cSBM.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 2
3 | early_stop:
4 | patience: 200
5 | improve_indicator_mode: mean
6 | # monitoring: ['dissim']
7 | federate:
8 | mode: standalone
9 | total_round_num: 400
10 | data:
11 | root: data/
12 | type: 'csbm'
13 | #type: 'csbm_data_feb_05_2022-19:23'
14 | cSBM_phi: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
15 | dataloader:
16 | type: pyg
17 | model:
18 | type: gpr
19 | hidden: 256
20 | out_channels: 2
21 | task: node
22 | personalization:
23 | local_param: ['prop1']
24 | train:
25 | local_update_steps: 2
26 | optimizer:
27 | lr: 0.5
28 | weight_decay: 0.0005
29 | type: SGD
30 | criterion:
31 | type: CrossEntropyLoss
32 | trainer:
33 | type: nodeminibatch_trainer
34 | finetune:
35 | local_update_steps: 2
36 | eval:
37 | metrics: ['acc', 'correct']
38 |
--------------------------------------------------------------------------------
/federatedscope/gfl/gcflplus/gcflplus_on_multi_task.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | federate:
4 | mode: standalone
5 | total_round_num: 100
6 | method: gcflplus
7 | data:
8 | root: data/
9 | type: graph_multi_domain_mix
10 | pre_transform: ['Constant', {'value':1.0, 'cat':False}]
11 | dataloader:
12 | type: pyg
13 | model:
14 | type: gin
15 | hidden: 64
16 | dropout: 0.5
17 | out_channels: 0
18 | task: graph
19 | gcflplus:
20 | EPS_1: 0.05
21 | EPS_2: 0.1
22 | seq_length: 5
23 | standardize: False
24 | train:
25 | batch_or_epoch: epoch
26 | optimizer:
27 | lr: 0.5
28 | weight_decay: 0.0005
29 | type: SGD
30 | criterion:
31 | type: CrossEntropyLoss
32 | personalization:
33 | local_param: ['encoder_atom', 'encoder', 'clf']
34 | trainer:
35 | type: graphminibatch_trainer
36 | eval:
37 | metrics: ['acc', 'correct']
38 |
--------------------------------------------------------------------------------
/scripts/personalization_exp_scripts/fedbn/fedbn_convnet2_on_femnist.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 5
5 | seed: 1
6 | federate:
7 | mode: standalone
8 | total_round_num: 300
9 | sample_client_rate: 0.2
10 | data:
11 | root: data/
12 | type: femnist
13 | splits: [0.6,0.2,0.2]
14 | subsample: 0.05
15 | transform: [['ToTensor'], ['Normalize', {'mean': [0.9637], 'std': [0.1592]}]]
16 | model:
17 | type: convnet2
18 | hidden: 2048
19 | out_channels: 62
20 | personalization:
21 | local_param: [ 'bn', 'norms' ] # FedBN
22 | train:
23 | batch_or_epoch: epoch
24 | local_update_steps: 1
25 | optimizer:
26 | lr: 0.1
27 | weight_decay: 0.0
28 | grad:
29 | grad_clip: 5.0
30 | criterion:
31 | type: CrossEntropyLoss
32 | trainer:
33 | type: cvtrainer
34 | eval:
35 | freq: 10
36 | metrics: ['acc', 'correct']
37 |
--------------------------------------------------------------------------------
/.github/workflows/sphinx.yml:
--------------------------------------------------------------------------------
1 | name: "API Reference"
2 | on:
3 | pull_request:
4 | types: [opened, synchronize, edited]
5 |
6 | jobs:
7 | docs:
8 | if: true == contains(github.event.pull_request.title, 'DOC')
9 | runs-on: ubuntu-latest
10 | timeout-minutes: 20
11 | env:
12 | OS: ${{ matrix.os }}
13 | PYTHON: '3.9'
14 | steps:
15 | - uses: actions/checkout@master
16 | - name: Setup Python ${{ matrix.python-version }}
17 | uses: actions/setup-python@master
18 | with:
19 | python-version: ${{ matrix.python-version }}
20 | - name: Generate Documentation
21 | uses: ammaraskar/sphinx-action@master
22 | with:
23 | docs-folder: "doc/"
24 | - name: Upload Documentation
25 | uses: actions/upload-artifact@v3
26 | with:
27 | name: APIReference
28 | path: doc/build/html/
29 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/tree_based_models/baseline/xgb_label_scattering_on_adult.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: False
2 | device: 0
3 | backend: torch
4 | federate:
5 | mode: standalone
6 | client_num: 2
7 | model:
8 | type: xgb_tree
9 | lambda_: 0.1
10 | gamma: 0
11 | num_of_trees: 10
12 | max_tree_depth: 3
13 | data:
14 | root: data/
15 | type: adult
16 | splits: [1.0, 0.0]
17 | dataloader:
18 | type: raw
19 | batch_size: 2000
20 | criterion:
21 | type: CrossEntropyLoss
22 | trainer:
23 | type: verticaltrainer
24 | train:
25 | optimizer:
26 | # learning rate for xgb model
27 | eta: 0.5
28 | vertical:
29 | use: True
30 | mode: 'label_scattering'
31 | dims: [7, 14]
32 | algo: 'xgb'
33 | data_size_for_debug: 2000
34 | protect_object: 'grad_and_hess'
35 | protect_method: 'he'
36 | eval:
37 | freq: 3
38 | best_res_update_round_wise_key: test_loss
--------------------------------------------------------------------------------
/.github/release-drafter.yml:
--------------------------------------------------------------------------------
1 | name-template: 'v$RESOLVED_VERSION 🌈'
2 | tag-template: 'v$RESOLVED_VERSION'
3 | categories:
4 | - title: '🚀 Features'
5 | labels:
6 | - 'feature'
7 | - 'enhancement'
8 | - 'Feature'
9 | - title: '🐛 Bug Fixes'
10 | labels:
11 | - 'fix'
12 | - 'bugfix'
13 | - 'bug'
14 | - 'hotfix'
15 | - title: '🧰 Maintenance'
16 | label: 'chore'
17 | exclude-labels:
18 | - 'skip-changelog'
19 | change-template: '- $TITLE @$AUTHOR (#$NUMBER)'
20 | change-title-escapes: '\<*_&' # You can add # and @ to disable mentions, and add ` to disable code blocks.
21 | version-resolver:
22 | major:
23 | labels:
24 | - 'major'
25 | minor:
26 | labels:
27 | - 'minor'
28 | patch:
29 | labels:
30 | - 'patch'
31 | default: patch
32 | template: |
33 | ## Changes
34 |
35 | $CHANGES
36 |
37 |
--------------------------------------------------------------------------------
/benchmark/B-FHTL/scripts/Graph-DC/fedbn_ft.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 20
5 | improve_indicator_mode: mean
6 | federate:
7 | mode: 'standalone'
8 | make_global_eval: False
9 | local_update_steps: 10
10 | batch_or_epoch: 'epoch'
11 | total_round_num: 400
12 | share_local_model: False
13 | data:
14 | root: data/
15 | type: fs_contest_data
16 | splitter: ooxx
17 | model:
18 | type: gin
19 | hidden: 64
20 | out_channels: 0
21 | task: graph
22 | personalization:
23 | local_param: [ 'encoder_atom', 'encoder', 'clf', 'norms' ] # pre, post + FedBN
24 | optimizer:
25 | lr: 0.01
26 | weight_decay: 0.0005
27 | type: SGD
28 | criterion:
29 | type: CrossEntropyLoss
30 | trainer:
31 | type: graphminibatch_trainer
32 | finetune:
33 | before_eval: True
34 | steps: 5
35 | eval:
36 | freq: 5
37 | metrics: ['acc', 'correct']
--------------------------------------------------------------------------------
/doc/source/attack.rst:
--------------------------------------------------------------------------------
1 | Attack Module References
2 | ======================
3 |
4 | federatedscope.attack.privacy_attacks
5 | -------------------------------------------
6 |
7 | .. automodule:: federatedscope.attack.privacy_attacks
8 | :members:
9 | :private-members:
10 |
11 |
12 | federatedscope.attack.worker_as_attacker
13 | -------------------------------------------
14 |
15 | .. automodule:: federatedscope.attack.worker_as_attacker
16 | :members:
17 | :private-members:
18 |
19 | federatedscope.attack.auxiliary
20 | --------------------------------
21 |
22 | .. automodule:: federatedscope.attack.auxiliary
23 | :members:
24 | :private-members:
25 |
26 | federatedscope.attack.trainer
27 | ---------------------------------
28 |
29 | .. automodule:: federatedscope.attack.trainer
30 | :members:
31 | :private-members:
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/federatedscope/nlp/baseline/fedavg_bert_on_sst2.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 2
3 | federate:
4 | mode: standalone
5 | total_round_num: 40
6 | client_num: 5
7 | share_local_model: True
8 | online_aggr: True
9 | sample_client_rate: 1.0
10 | data:
11 | root: 'glue'
12 | type: 'sst2@huggingface_datasets'
13 | args: [{'max_len': 512}]
14 | splitter: 'lda'
15 | splitter_args: [{'alpha': 0.5}]
16 | dataloader:
17 | batch_size: 128
18 | model:
19 | type: 'google/bert_uncased_L-2_H-128_A-2@transformers'
20 | task: 'SequenceClassification'
21 | out_channels: 2
22 | train:
23 | local_update_steps: 1
24 | batch_or_epoch: 'epoch'
25 | optimizer:
26 | lr: 0.0001
27 | weight_decay: 0.0
28 | criterion:
29 | type: 'CrossEntropyLoss'
30 | trainer:
31 | type: 'nlptrainer'
32 | eval:
33 | freq: 2
34 | metrics: ['acc', 'correct', 'f1']
35 | split: ['val', 'train']
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/tree_based_models/worker/__init__.py:
--------------------------------------------------------------------------------
1 | from federatedscope.vertical_fl.tree_based_models.worker.TreeClient import \
2 | TreeClient
3 | from federatedscope.vertical_fl.tree_based_models.worker.TreeServer import \
4 | TreeServer
5 | from federatedscope.vertical_fl.tree_based_models.worker.train_wrapper import \
6 | wrap_server_for_train, wrap_client_for_train
7 | from federatedscope.vertical_fl.tree_based_models.worker.evaluation_wrapper \
8 | import wrap_server_for_evaluation, wrap_client_for_evaluation
9 | from federatedscope.vertical_fl.tree_based_models.worker.he_evaluation_wrapper\
10 | import wrap_client_for_he_evaluation
11 |
12 | __all__ = [
13 | 'TreeServer', 'TreeClient', 'wrap_server_for_train',
14 | 'wrap_client_for_train', 'wrap_server_for_evaluation',
15 | 'wrap_client_for_evaluation', 'wrap_client_for_he_evaluation'
16 | ]
17 |
--------------------------------------------------------------------------------
/scripts/distributed_scripts/run_distributed_lr.sh:
--------------------------------------------------------------------------------
1 | set -e
2 |
3 | cd ..
4 |
5 | echo "Test distributed mode with LR..."
6 |
7 | python scripts/distributed_scripts/gen_data.py
8 |
9 | ### server owns global test data
10 | python federatedscope/main.py --cfg scripts/distributed_scripts/distributed_configs/distributed_server.yaml &
11 | ### server doesn't own data
12 | # python federatedscope/main.py --cfg scripts/distributed_scripts/distributed_configs/distributed_server_no_data.yaml &
13 | sleep 2
14 |
15 | # clients
16 | python federatedscope/main.py --cfg scripts/distributed_scripts/distributed_configs/distributed_client_1.yaml &
17 | sleep 2
18 | python federatedscope/main.py --cfg scripts/distributed_scripts/distributed_configs/distributed_client_2.yaml &
19 | sleep 2
20 | python federatedscope/main.py --cfg scripts/distributed_scripts/distributed_configs/distributed_client_3.yaml &
21 |
22 |
--------------------------------------------------------------------------------
/scripts/personalization_exp_scripts/ditto/ditto_convnet2_on_femnist.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 5
5 | seed: 2
6 | federate:
7 | mode: standalone
8 | total_round_num: 300
9 | sample_client_rate: 0.2
10 | data:
11 | root: data/
12 | type: femnist
13 | splits: [0.6,0.2,0.2]
14 | subsample: 0.05
15 | transform: [['ToTensor'], ['Normalize', {'mean': [0.9637], 'std': [0.1592]}]]
16 | model:
17 | type: convnet2
18 | hidden: 2048
19 | out_channels: 62
20 | personalization:
21 | local_update_steps: 3
22 | lr: 0.5
23 | regular_weight: 0.1
24 | train:
25 | batch_or_epoch: epoch
26 | local_update_steps: 3
27 | optimizer:
28 | lr: 0.5
29 | weight_decay: 0.0
30 | grad:
31 | grad_clip: 5.0
32 | criterion:
33 | type: CrossEntropyLoss
34 | trainer:
35 | type: cvtrainer
36 | eval:
37 | freq: 10
38 | metrics: ['acc', 'correct']
39 |
--------------------------------------------------------------------------------
/scripts/personalization_exp_scripts/fedem/fedem_convnet2_on_femnist.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 5
5 | seed: 2
6 | federate:
7 | mode: standalone
8 | total_round_num: 300
9 | sample_client_rate: 0.2
10 | data:
11 | root: data/
12 | type: femnist
13 | splits: [0.6,0.2,0.2]
14 | subsample: 0.05
15 | transform: [['ToTensor'], ['Normalize', {'mean': [0.9637], 'std': [0.1592]}]]
16 | model:
17 | model_num_per_trainer: 3
18 | type: convnet2
19 | hidden: 2048
20 | out_channels: 62
21 | personalization:
22 | local_update_steps: 1
23 | lr: 0.5
24 | train:
25 | batch_or_epoch: epoch
26 | local_update_steps: 1
27 | optimizer:
28 | lr: 0.5
29 | weight_decay: 0.0
30 | grad:
31 | grad_clip: 5.0
32 | criterion:
33 | type: CrossEntropyLoss
34 | trainer:
35 | type: cvtrainer
36 | eval:
37 | freq: 10
38 | metrics: ['acc', 'correct']
39 |
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/tree_based_models/baseline/xgb_feature_gathering_op_boost_on_adult.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: False
2 | device: 0
3 | backend: torch
4 | federate:
5 | mode: standalone
6 | client_num: 2
7 | model:
8 | type: xgb_tree
9 | lambda_: 0.1
10 | gamma: 0
11 | num_of_trees: 10
12 | max_tree_depth: 3
13 | data:
14 | root: data/
15 | type: adult
16 | splits: [1.0, 0.0]
17 | dataloader:
18 | type: raw
19 | batch_size: 2000
20 | criterion:
21 | type: CrossEntropyLoss
22 | trainer:
23 | type: verticaltrainer
24 | train:
25 | optimizer:
26 | # learning rate for xgb model
27 | eta: 0.5
28 | vertical:
29 | use: True
30 | dims: [7, 14]
31 | algo: 'xgb'
32 | protect_object: 'feature_order'
33 | protect_method: 'op_boost'
34 | protect_args: [{'algo': 'global'}]
35 | data_size_for_debug: 2000
36 | eval:
37 | freq: 3
38 | best_res_update_round_wise_key: test_loss
--------------------------------------------------------------------------------
/benchmark/B-FHTL/scripts/Graph-DC/fedavg_ft.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 20
5 | improve_indicator_mode: mean
6 | federate:
7 | mode: 'standalone'
8 | make_global_eval: False
9 | local_update_steps: 4
10 | batch_or_epoch: 'epoch'
11 | total_round_num: 400
12 | share_local_model: False
13 | data:
14 | root: data/
15 | type: fs_contest_data
16 | splitter: ooxx
17 | model:
18 | type: gin
19 | hidden: 64
20 | out_channels: 0
21 | task: graph
22 | personalization:
23 | local_param: ['encoder_atom', 'encoder', 'clf'] # to handle size-different pre & post layers
24 | optimizer:
25 | lr: 0.1
26 | weight_decay: 0.0005
27 | type: SGD
28 | criterion:
29 | type: CrossEntropyLoss
30 | trainer:
31 | type: graphminibatch_trainer
32 | finetune:
33 | before_eval: True
34 | steps: 5
35 | eval:
36 | freq: 5
37 | metrics: ['acc', 'correct']
38 |
--------------------------------------------------------------------------------
/federatedscope/cl/baseline/fedcontrastlearning_linearprob_on_cifar10.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | seed: 1
3 | device: 2
4 | federate:
5 | mode: standalone
6 | total_round_num: 50
7 | client_num: 1
8 | sample_client_rate: 1.0
9 | method: global
10 | restore_from: '../test_supervised.ckpt'
11 | data:
12 | root: 'data'
13 | type: 'Cifar4LP'
14 | batch_size: 256
15 | splitter: 'lda'
16 | splitter_args: [{'alpha': 0.5}]
17 | num_workers: 4
18 | model:
19 | type: 'SimCLR_linear'
20 | train:
21 | local_update_steps: 1
22 | batch_or_epoch: 'epoch'
23 | optimizer:
24 | lr: 0.01
25 | momentum: 0.9
26 | weight_decay: 0.0
27 | scheduler:
28 | type: CosineAnnealingLR
29 | T_max: 50
30 | early_stop:
31 | patience: 0
32 | criterion:
33 | type: CrossEntropyLoss
34 | trainer:
35 | type: 'lptrainer'
36 | eval:
37 | freq: 5
38 | metrics: ['acc']
39 | split: ['val', 'test']
--------------------------------------------------------------------------------
/federatedscope/vertical_fl/tree_based_models/baseline/xgb_feature_gathering_dp_on_adult.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: False
2 | device: 0
3 | backend: torch
4 | federate:
5 | mode: standalone
6 | client_num: 2
7 | model:
8 | type: xgb_tree
9 | lambda_: 0.1
10 | gamma: 0
11 | num_of_trees: 10
12 | max_tree_depth: 3
13 | data:
14 | root: data/
15 | type: adult
16 | splits: [1.0, 0.0]
17 | dataloader:
18 | type: raw
19 | batch_size: 2000
20 | criterion:
21 | type: CrossEntropyLoss
22 | trainer:
23 | type: verticaltrainer
24 | train:
25 | optimizer:
26 | # learning rate for xgb model
27 | eta: 0.5
28 | vertical:
29 | use: True
30 | dims: [7, 14]
31 | algo: 'xgb'
32 | protect_object: 'feature_order'
33 | protect_method: 'dp'
34 | protect_args: [{'bucket_num': 100, 'epsilon':10}]
35 | data_size_for_debug: 2000
36 | eval:
37 | freq: 3
38 | best_res_update_round_wise_key: test_loss
--------------------------------------------------------------------------------
/scripts/attack_exp_scripts/privacy_attack/CRA_fedavg_convnet2_on_femnist.yaml:
--------------------------------------------------------------------------------
1 | use_gpu: True
2 | device: 0
3 | early_stop:
4 | patience: 100
5 | seed: 12345
6 | federate:
7 | mode: standalone
8 | total_round_num: 1000
9 | sample_client_num: 20
10 | client_num: 10
11 | data:
12 | root: data/
13 | type: femnist
14 | splits: [0.6,0.2,0.2]
15 | subsample: 0.0001
16 | transform: [['ToTensor'], ['Normalize', {'mean': [0.9637], 'std': [0.1592]}]]
17 | dataloader:
18 | batch_size: 10
19 | model:
20 | type: convnet2
21 | hidden: 2048
22 | out_channels: 62
23 | train:
24 | local_update_steps: 50
25 | optimizer:
26 | lr: 0.01
27 | weight_decay: 0.0
28 | criterion:
29 | type: CrossEntropyLoss
30 | trainer:
31 | type: cvtrainer
32 | eval:
33 | freq: 10
34 | metrics: ['acc', 'correct']
35 | attack:
36 | attack_method: gan_attack
37 | attacker_id: 5
38 | target_label_ind: 3
39 |
--------------------------------------------------------------------------------
/scripts/example_configs/pfedhpo/cifar/run.sh:
--------------------------------------------------------------------------------
1 | R=400
2 | E=cifar_exp
3 | P=cifar
4 | D=0
5 | s=12345
6 | mkdir $E/s$s
7 | CUDA_VISIBLE_DEVICES=$D python federatedscope/main.py --cfg scripts/example_configs/pfedhpo/$P/pfedhpo.yaml hpo.pfedhpo.train_fl True hpo.pfedhpo.train_anchor True federate.sample_client_rate 1.0 federate.total_round_num $R seed $s outdir $E/s$s hpo.working_folder $E/s$s/working device 0
8 | CUDA_VISIBLE_DEVICES=$D python federatedscope/main.py --cfg scripts/example_configs/pfedhpo/$P/pfedhpo.yaml hpo.pfedhpo.train_fl False hpo.pfedhpo.target_fl_total_round $R seed $s outdir $E/s$s hpo.working_folder $E/s$s/working device 0
9 | CUDA_VISIBLE_DEVICES=$D python federatedscope/main.py --cfg scripts/example_configs/pfedhpo/$P/pfedhpo.yaml hpo.pfedhpo.train_fl True federate.total_round_num $R seed $s outdir $E/s$s hpo.working_folder $E/s$s/working device 0
10 | #rm -rf $E/s$s/working/temp_model_round_*
--------------------------------------------------------------------------------