├── .gitignore ├── LICENSE ├── README.md ├── cluster_logs ├── corruption_logs │ └── .gitkeep ├── deepens_amoebanet │ └── .gitkeep ├── deepens_darts │ └── .gitkeep ├── deepens_rs │ └── .gitkeep ├── evaluate │ └── .gitkeep ├── nes_re │ └── .gitkeep └── nes_rs │ └── .gitkeep ├── cluster_scripts ├── darts │ ├── c100_icml_rebuttal │ │ ├── plot_data.sh │ │ ├── sbatch_scripts │ │ │ ├── amoeba_esa.sh │ │ │ ├── anchor_hyper.sh │ │ │ ├── darts_esa.sh │ │ │ ├── deepens_amoeba.sh │ │ │ ├── deepens_darts.sh │ │ │ ├── deepens_darts_anchor.sh │ │ │ ├── deepens_rs.sh │ │ │ ├── ensembles_from_pools.sh │ │ │ ├── evaluate_ensembles.sh │ │ │ ├── evaluate_ensembles_nas.sh │ │ │ ├── get_incumbents_rs.sh │ │ │ ├── nes_re.sh │ │ │ ├── nes_rs.sh │ │ │ ├── nes_rs_esa.sh │ │ │ └── submit.sh │ │ └── start_grid.sh │ ├── cifar10 │ │ ├── plot_data.sh │ │ └── sbatch_scripts │ │ │ ├── deepens_amoeba.sh │ │ │ ├── deepens_darts.sh │ │ │ ├── deepens_rs.sh │ │ │ ├── ensembles_from_pools.sh │ │ │ ├── evaluate_ensembles.sh │ │ │ ├── get_incumbents_rs.sh │ │ │ ├── nes_re.sh │ │ │ └── nes_rs.sh │ ├── cifar100 │ │ ├── plot_data.sh │ │ ├── sbatch_scripts │ │ │ ├── anchor_hyper.sh │ │ │ ├── darts_hyper.sh │ │ │ ├── darts_nes.sh │ │ │ ├── deepens_amoeba.sh │ │ │ ├── deepens_darts.sh │ │ │ ├── deepens_darts_anchor.sh │ │ │ ├── deepens_rs.sh │ │ │ ├── ensembles_from_pools.sh │ │ │ ├── evaluate_ensembles.sh │ │ │ ├── get_incumbents_rs.sh │ │ │ ├── nes_re.sh │ │ │ ├── nes_rs.sh │ │ │ └── submit.sh │ │ └── sbatch_scripts_hyper │ │ │ ├── deepens_darts.sh │ │ │ ├── ensembles_from_pools.sh │ │ │ ├── evaluate_ensembles.sh │ │ │ ├── hyperens.sh │ │ │ ├── joint.sh │ │ │ ├── nes_cell.sh │ │ │ ├── nes_depth_width.sh │ │ │ └── nes_re.sh │ ├── cifar10_nips21 │ │ ├── plot_data.sh │ │ ├── sbatch_scripts-49k │ │ │ ├── amoeba_esa.sh │ │ │ ├── anchor_hyper.sh │ │ │ ├── darts_esa.sh │ │ │ ├── deepens_amoeba.sh │ │ │ ├── deepens_amoeba_50k.sh │ │ │ ├── deepens_darts.sh │ │ │ ├── deepens_darts_50k.sh │ │ │ ├── deepens_darts_anchor.sh │ │ │ ├── deepens_rs.sh │ │ │ ├── ensembles_from_pools.sh │ │ │ ├── ensembles_from_pools_50k.sh │ │ │ ├── evaluate_ensembles.sh │ │ │ ├── evaluate_ensembles_50k.sh │ │ │ ├── evaluate_ensembles_oneshot.sh │ │ │ ├── get_incumbents_rs.sh │ │ │ ├── nes_re.sh │ │ │ ├── nes_re_50k.sh │ │ │ ├── nes_rs.sh │ │ │ ├── nes_rs_50k.sh │ │ │ ├── nes_rs_esa.sh │ │ │ ├── nes_rs_oneshot.sh │ │ │ ├── random_mutations.sh │ │ │ ├── submit.sh │ │ │ └── train_oneshot.sh │ │ ├── sbatch_scripts │ │ │ ├── amoeba_esa.sh │ │ │ ├── anchor_hyper.sh │ │ │ ├── darts_esa.sh │ │ │ ├── deepens_amoeba.sh │ │ │ ├── deepens_amoeba_50k.sh │ │ │ ├── deepens_darts.sh │ │ │ ├── deepens_darts_50k.sh │ │ │ ├── deepens_darts_anchor.sh │ │ │ ├── deepens_rs.sh │ │ │ ├── ensembles_from_pools.sh │ │ │ ├── ensembles_from_pools_50k.sh │ │ │ ├── evaluate_ensembles.sh │ │ │ ├── evaluate_ensembles_50k.sh │ │ │ ├── evaluate_ensembles_oneshot.sh │ │ │ ├── get_incumbents_rs.sh │ │ │ ├── nes_re.sh │ │ │ ├── nes_re_50k.sh │ │ │ ├── nes_rs.sh │ │ │ ├── nes_rs_50k.sh │ │ │ ├── nes_rs_darts.sh │ │ │ ├── nes_rs_esa.sh │ │ │ ├── nes_rs_oneshot.sh │ │ │ ├── random_mutations.sh │ │ │ ├── submit.sh │ │ │ └── train_oneshot.sh │ │ ├── sbatch_scripts_hyper │ │ │ ├── deepens_darts.sh │ │ │ ├── ensembles_from_pools.sh │ │ │ ├── evaluate_ensembles.sh │ │ │ ├── hyperens.sh │ │ │ ├── joint.sh │ │ │ ├── nes_cell.sh │ │ │ ├── nes_depth_width.sh │ │ │ └── nes_re.sh │ │ └── sbatch_scripts_hyper_2 │ │ │ ├── deepens_darts.sh │ │ │ ├── ensembles_from_pools.sh │ │ │ ├── evaluate_ensembles.sh │ │ │ ├── hyperens.sh │ │ │ ├── joint.sh │ │ │ ├── nes_cell.sh │ │ │ ├── nes_depth_width.sh │ │ │ └── nes_re.sh │ ├── fmnist │ │ ├── plot_data.sh │ │ └── sbatch_scripts │ │ │ ├── deepens_amoeba.sh │ │ │ ├── deepens_darts.sh │ │ │ ├── deepens_rs.sh │ │ │ ├── ensembles_from_pools.sh │ │ │ ├── evaluate_ensembles.sh │ │ │ ├── get_incumbents_rs.sh │ │ │ ├── nes_re.sh │ │ │ └── nes_rs.sh │ ├── tiny │ │ ├── plot_data.sh │ │ ├── sbatch_scripts │ │ │ ├── amoeba_esa.sh │ │ │ ├── anchor_hyper.sh │ │ │ ├── darts_esa.sh │ │ │ ├── deepens_amoeba.sh │ │ │ ├── deepens_darts.sh │ │ │ ├── deepens_darts_anchor.sh │ │ │ ├── deepens_rs.sh │ │ │ ├── ensembles_from_pools.sh │ │ │ ├── evaluate_ensembles.sh │ │ │ ├── evaluate_ensembles_nas.sh │ │ │ ├── get_incumbents_rs.sh │ │ │ ├── nes_re.sh │ │ │ ├── nes_rs.sh │ │ │ ├── nes_rs_esa.sh │ │ │ └── submit.sh │ │ └── start_grid.sh │ └── tiny_nips21 │ │ ├── plot_data.sh │ │ ├── sbatch_scripts │ │ ├── amoeba_esa.sh │ │ ├── anchor_hyper.sh │ │ ├── darts_esa.sh │ │ ├── deepens_amoeba.sh │ │ ├── deepens_darts.sh │ │ ├── deepens_darts_anchor.sh │ │ ├── deepens_rs.sh │ │ ├── ensembles_from_pools.sh │ │ ├── evaluate_ensembles.sh │ │ ├── evaluate_ensembles_nas.sh │ │ ├── get_incumbents_rs.sh │ │ ├── nes_re.sh │ │ ├── nes_rs.sh │ │ ├── nes_rs_esa.sh │ │ └── submit.sh │ │ └── start_grid.sh ├── generate_corrupted.sh ├── launcher.config ├── launchers.sh └── nb201 │ ├── cifar10 │ ├── plot_data.sh │ └── sbatch_scripts │ │ ├── deepens_darts.sh │ │ ├── deepens_gdas.sh │ │ ├── deepens_gm.sh │ │ ├── deepens_rs.sh │ │ ├── ensembles_from_pools.sh │ │ ├── evaluate_ensembles.sh │ │ ├── get_incumbents_rs.sh │ │ ├── nes_re.sh │ │ └── nes_rs.sh │ ├── cifar100 │ ├── plot_data.sh │ └── sbatch_scripts │ │ ├── deepens_darts.sh │ │ ├── deepens_gdas.sh │ │ ├── deepens_gm.sh │ │ ├── deepens_rs.sh │ │ ├── ensembles_from_pools.sh │ │ ├── evaluate_ensembles.sh │ │ ├── get_incumbents_rs.sh │ │ ├── nes_re.sh │ │ └── nes_rs.sh │ └── imagenet │ ├── plot_data.sh │ └── sbatch_scripts │ ├── deepens_darts.sh │ ├── deepens_gdas.sh │ ├── deepens_gm.sh │ ├── deepens_rs.sh │ ├── ensembles_from_pools.sh │ ├── evaluate_ensembles.sh │ ├── get_incumbents_rs.sh │ ├── nes_re.sh │ └── nes_rs.sh ├── data ├── cifar10-C │ └── frost_overlays │ │ ├── frost1.png │ │ ├── frost2.png │ │ ├── frost3.png │ │ ├── frost4.jpg │ │ ├── frost5.jpg │ │ └── frost6.jpg ├── corruptions.py └── generate_corrupted.py ├── experiments └── .gitkeep ├── figures ├── fmnist.png └── nes_re.png ├── nes ├── __init__.py ├── darts │ ├── baselearner_train │ │ ├── __init__.py │ │ ├── genotypes.py │ │ ├── model.py │ │ ├── model_imagenet.py │ │ ├── oneshot │ │ │ ├── darts_wrapper_discrete.py │ │ │ └── model_search.py │ │ ├── operations.py │ │ ├── space_encoding │ │ │ └── configspace.json │ │ ├── train.py │ │ └── utils.py │ ├── re │ │ ├── __init__.py │ │ ├── re_master.py │ │ └── re_sampler.py │ ├── scripts │ │ ├── darts_re.py │ │ ├── darts_rs.py │ │ ├── darts_rs_2.py │ │ ├── mutation_tsne.py │ │ ├── run_nes_re.py │ │ ├── run_nes_rs.py │ │ ├── train_deepens_baselearner.py │ │ └── tune_anchor.py │ └── worker.py ├── ensemble_selection │ ├── __init__.py │ ├── config.py │ ├── containers.py │ ├── create_baselearners.py │ ├── ensembles_from_pools.py │ ├── esas.py │ ├── evaluate_ensembles.py │ ├── plot_data.py │ ├── rs_incumbents.py │ └── utils.py ├── nasbench201 │ ├── __init__.py │ ├── re │ │ └── re_sampler.py │ ├── scripts │ │ ├── run_nes_re.py │ │ ├── run_nes_rs.py │ │ └── train_deepens_baselearner.py │ └── worker.py ├── plots │ ├── make_fig7_c10.py │ ├── make_plot_50k.py │ ├── make_plot_esas.py │ ├── make_plot_master.py │ ├── make_plot_neurips.py │ ├── make_table.py │ ├── plot_data.py │ ├── plot_data_1.py │ ├── plot_icml_rebuttal.py │ └── radar_plot.py └── utils │ ├── __init__.py │ ├── configs_to_genotype.py │ ├── data_loaders.py │ └── nb201 │ ├── DownsampledImageNet.py │ ├── __init__.py │ ├── api_utils.py │ ├── config_utils │ ├── __init__.py │ ├── attention_args.py │ ├── basic_args.py │ ├── cls_init_args.py │ ├── cls_kd_args.py │ ├── configure_utils.py │ ├── pruning_args.py │ ├── random_baseline.py │ ├── search_args.py │ ├── search_single_args.py │ └── share_args.py │ ├── configs │ ├── arch_to_id.pkl │ ├── check_if_3_seeds.py │ ├── cifar-split.txt │ ├── cifar10.pkl │ ├── cifar100-test-split.txt │ ├── cifar100.pkl │ ├── find_minimum.py │ ├── gen_3seed_data.py │ ├── gen_arch_id.py │ ├── imagenet-16-120-test-split.txt │ ├── imagenet.pkl │ └── optimal.txt │ └── models │ ├── CifarDenseNet.py │ ├── CifarResNet.py │ ├── CifarWideResNet.py │ ├── ImageNet_MobileNetV2.py │ ├── ImageNet_ResNet.py │ ├── SharedUtils.py │ ├── __init__.py │ ├── cell_infers │ ├── __init__.py │ ├── cells.py │ ├── nasnet_cifar.py │ └── tiny_network.py │ ├── cell_operations.py │ ├── cell_searchs │ ├── __init__.py │ ├── _test_module.py │ ├── generic_model.py │ ├── genotypes.py │ ├── search_cells.py │ ├── search_model_darts.py │ ├── search_model_darts_nasnet.py │ ├── search_model_enas.py │ ├── search_model_enas_utils.py │ ├── search_model_gdas.py │ ├── search_model_gdas_nasnet.py │ ├── search_model_random.py │ ├── search_model_setn.py │ └── search_model_setn_nasnet.py │ ├── clone_weights.py │ ├── initialization.py │ ├── shape_infers │ ├── InferCifarResNet.py │ ├── InferCifarResNet_depth.py │ ├── InferCifarResNet_width.py │ ├── InferImagenetResNet.py │ ├── InferMobileNetV2.py │ ├── InferTinyCellNet.py │ ├── __init__.py │ └── shared_utils.py │ └── shape_searchs │ ├── SearchCifarResNet.py │ ├── SearchCifarResNet_depth.py │ ├── SearchCifarResNet_width.py │ ├── SearchImagenetResNet.py │ ├── SearchSimResNet_width.py │ ├── SoftSelect.py │ ├── __init__.py │ ├── generic_size_tiny_cell_model.py │ └── test.py └── requirements.txt /cluster_logs/corruption_logs/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/cluster_logs/corruption_logs/.gitkeep -------------------------------------------------------------------------------- /cluster_logs/deepens_amoebanet/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/cluster_logs/deepens_amoebanet/.gitkeep -------------------------------------------------------------------------------- /cluster_logs/deepens_darts/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/cluster_logs/deepens_darts/.gitkeep -------------------------------------------------------------------------------- /cluster_logs/deepens_rs/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/cluster_logs/deepens_rs/.gitkeep -------------------------------------------------------------------------------- /cluster_logs/evaluate/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/cluster_logs/evaluate/.gitkeep -------------------------------------------------------------------------------- /cluster_logs/nes_re/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/cluster_logs/nes_re/.gitkeep -------------------------------------------------------------------------------- /cluster_logs/nes_rs/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/cluster_logs/nes_rs/.gitkeep -------------------------------------------------------------------------------- /cluster_scripts/darts/c100_icml_rebuttal/plot_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # source config 4 | #. cluster_scripts/launcher.config 5 | 6 | # Activate virtual environment 7 | #source activate python36 8 | 9 | PYTHONPATH=. python nes/ensemble_selection/make_plot_master.py \ 10 | --Ms 2 3 5 7 10 15 \ 11 | --methods nes_rs nes_re deepens_rs deepens_darts deepens_amoebanet deepens_darts_anchor \ 12 | --save_dir experiments/tiny/outputs/plots-new \ 13 | --load_plotting_data_dir experiments/tiny/outputs/plotting_data \ 14 | --dataset tiny \ 15 | --run run_1 run_2 run_3 run_4 run_5 \ 16 | --plot_type ensemble_size 17 | #PYTHONPATH=. python nes/ensemble_selection/plot_data.py \ 18 | #--Ms 2 3 5 7 10 15 \ 19 | #--methods nes_rs nes_re deepens_rs deepens_darts deepens_amoebanet darts_esa amoebanet_esa nes_rs_esa\ 20 | #--save_dir experiments/tiny/outputs/plots \ 21 | #--load_plotting_data_dir experiments/tiny/outputs/plotting_data \ 22 | #--dataset tiny \ 23 | #--run run_1 run_2 run_3 run_4 run_5 24 | -------------------------------------------------------------------------------- /cluster_scripts/darts/c100_icml_rebuttal/sbatch_scripts/amoeba_esa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 3 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 4 | #SBATCH -a 0-399 5 | #SBATCH -c 4 6 | #SBATCH -o ./cluster_logs/deepens_amoebanet/%A-%a.o 7 | #SBATCH -e ./cluster_logs/deepens_amoebanet/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J deepens-amoebanet # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source venv/bin/activate 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 21 | --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments/tiny/baselearners/amoebanet_esa/" \ 23 | --dataset tiny --num_epochs 100 --scheme amoebanet_esa \ 24 | --train_amoebanet --global_seed 1 --batch_size 128 \ 25 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine 26 | 27 | 28 | # Done 29 | echo "DONE" 30 | echo "Finished at $(date)" 31 | -------------------------------------------------------------------------------- /cluster_scripts/darts/c100_icml_rebuttal/sbatch_scripts/anchor_hyper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 10 #0-100 3 | #SBATCH -c 4 4 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J anch_hyper # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 19 | --seed_id $1 --arch_id $1 \ 20 | --working_directory "experiments-anchor/tiny_2" \ 21 | --dataset tiny --num_epochs 100 --scheme deepens_darts \ 22 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine \ 23 | --train_darts --global_seed $1 --batch_size 128 --anchor \ 24 | --lr 0.025 --wd 0.0 --anch_coeff $2 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/c100_icml_rebuttal/sbatch_scripts/darts_esa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 3 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 4 | #SBATCH -a 0-399 5 | #SBATCH -c 4 6 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 7 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J darts-esa # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source venv/bin/activate 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 21 | --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments/tiny/baselearners/darts_esa/" \ 23 | --dataset tiny --num_epochs 100 --scheme darts_esa \ 24 | --train_darts --global_seed 1 --batch_size 128 \ 25 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine 26 | 27 | 28 | # Done 29 | echo "DONE" 30 | echo "Finished at $(date)" 31 | -------------------------------------------------------------------------------- /cluster_scripts/darts/c100_icml_rebuttal/sbatch_scripts/deepens_amoeba.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -c 4 4 | #SBATCH -o ./cluster_logs/deepens_amoebanet/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_amoebanet/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J deepens-amoebanet # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | source venv/bin/activate 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 19 | --seed_id $SLURM_ARRAY_TASK_ID \ 20 | --working_directory "experiments/tiny/baselearners/deepens_amoebanet/" \ 21 | --dataset tiny --num_epochs 100 --scheme deepens_amoebanet \ 22 | --train_amoebanet --global_seed 1 --batch_size 128 \ 23 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine 24 | 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/c100_icml_rebuttal/sbatch_scripts/deepens_darts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -c 4 4 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J deepens-darts # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | source venv/bin/activate 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 19 | --seed_id $SLURM_ARRAY_TASK_ID \ 20 | --working_directory "experiments/tiny/baselearners/deepens_darts/" \ 21 | --dataset tiny --num_epochs 100 --scheme deepens_darts \ 22 | --train_darts --global_seed 1 --batch_size 128 \ 23 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine 24 | 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/c100_icml_rebuttal/sbatch_scripts/deepens_darts_anchor.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-15 3 | #SBATCH -c 4 4 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J anch_deepens-darts # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 19 | --seed_id $SLURM_ARRAY_TASK_ID \ 20 | --working_directory "experiments/tiny/baselearners/deepens_darts_anchor/" \ 21 | --dataset tiny --num_epochs 100 --scheme deepens_darts_anchor \ 22 | --train_darts --global_seed 1 --batch_size 128 \ 23 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine \ 24 | --anchor --anch_coeff 0.1 --wd 0.0 25 | 26 | 27 | # Done 28 | echo "DONE" 29 | echo "Finished at $(date)" 30 | -------------------------------------------------------------------------------- /cluster_scripts/darts/c100_icml_rebuttal/sbatch_scripts/ensembles_from_pools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -a 1-3 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | 8 | # Activate virtual environment 9 | source activate python37 10 | 11 | PYTHONPATH=$PWD python nes/ensemble_selection/ensembles_from_pools.py \ 12 | --M 10 \ 13 | --pool_name $1 \ 14 | --save_dir experiments-rebuttal/cifar100/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 15 | --load_bsls_dir "experiments/cifar100_low/baselearners/$1/run_$SLURM_ARRAY_TASK_ID" \ 16 | --dataset cifar100 \ 17 | --validation_size $2 18 | -------------------------------------------------------------------------------- /cluster_scripts/darts/c100_icml_rebuttal/sbatch_scripts/evaluate_ensembles.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%x.%A-%a.%N.o 3 | #SBATCH -e ./cluster_logs/evaluate/%x.%A-%a.%N.e 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -a 1-3 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | 8 | # Activate virtual environment 9 | source activate python37 10 | 11 | PYTHONPATH=$PWD python nes/ensemble_selection/evaluate_ensembles.py \ 12 | --M 10 \ 13 | --method $1 \ 14 | --save_dir experiments-rebuttal/cifar100/outputs/plotting_data/run_$SLURM_ARRAY_TASK_ID \ 15 | --nes_rs_bsls_dir experiments/cifar100_low/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 16 | --incumbents_dir experiments/cifar100_low/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID/incumbents.txt \ 17 | --load_bsls_dir "experiments/cifar100_low/baselearners/$1/run_$SLURM_ARRAY_TASK_ID" \ 18 | --load_ens_chosen_dir experiments-rebuttal/cifar100/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 19 | --dataset cifar100 \ 20 | --validation_size $2 21 | 22 | -------------------------------------------------------------------------------- /cluster_scripts/darts/c100_icml_rebuttal/sbatch_scripts/evaluate_ensembles_nas.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH --gres=gpu:1 # reserves GPUs 5 | # -a 0-14 # should be 5 x (number of ensemble sizes, i.e. length of ens_sizes in launcher.config) - 1 6 | 7 | # Activate virtual environment 8 | source activate python37 9 | 10 | # mapping from slurm task ID to parameters for python call. 11 | # . cluster_scripts/launcher.config 12 | # IFS=',' grid=( $(eval echo {"${ens_sizes[*]}"}+{"${methods[*]}"}) ) 13 | # IFS=' ' read -r -a arr <<< "${grid[*]}" 14 | # IFS=+ read M method <<< "${arr[$SLURM_ARRAY_TASK_ID]}" 15 | 16 | PYTHONPATH=$PWD python nes/ensemble_selection/evaluate_ensembles.py \ 17 | --M "$2" \ 18 | --method $1 \ 19 | --save_dir experiments/tiny/outputs/plotting_data/run_$SLURM_ARRAY_TASK_ID \ 20 | --nes_rs_bsls_dir experiments/tiny/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 21 | --incumbents_dir experiments/tiny/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID/incumbents.txt \ 22 | --load_bsls_dir "experiments/tiny/baselearners/$1/run_$SLURM_ARRAY_TASK_ID" \ 23 | --load_ens_chosen_dir experiments/tiny/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 24 | --dataset tiny 25 | -------------------------------------------------------------------------------- /cluster_scripts/darts/c100_icml_rebuttal/sbatch_scripts/get_incumbents_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 3 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu37,dlcgpu26 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -a 4-5 8 | #SBATCH -J get_incumbents_rs # sets the job name. If not specified, the file name will be used as job name 9 | 10 | # Info 11 | echo "Workingdir: $PWD" 12 | echo "Started at $(date)" 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 14 | 15 | # Activate virtual environment 16 | source venv/bin/activate 17 | 18 | # Arrayjob 19 | PYTHONPATH=$PWD python nes/ensemble_selection/rs_incumbents.py \ 20 | --save_dir experiments/tiny/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID \ 21 | --load_bsls_dir experiments/tiny/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 22 | --pool_name nes_rs \ 23 | --dataset tiny 24 | 25 | # Done 26 | echo "DONE" 27 | echo "Finished at $(date)" 28 | -------------------------------------------------------------------------------- /cluster_scripts/darts/c100_icml_rebuttal/sbatch_scripts/nes_re.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-20 3 | #SBATCH -c 4 4 | #SBATCH -o ./cluster_logs/nes_re/%A-%a.o 5 | #SBATCH -e ./cluster_logs/nes_re/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J imn-nes-re # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION, on gpu $SLURMD_NODENAME" 13 | 14 | # Activate virtual environment 15 | source venv/bin/activate 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/run_nes_re.py --array_id \ 19 | $SLURM_ARRAY_TASK_ID --total_num_workers=20 \ 20 | --num_iterations 200 --batch_size 128 --num_epochs 100 --population_size 50 --sample_size 10 \ 21 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine \ 22 | --nic_name eth0 --working_directory experiments/tiny/baselearners/nes_re \ 23 | --global_seed $1 --scheme nes_re --dataset tiny 24 | 25 | # Done 26 | echo "DONE" 27 | echo "Finished at $(date)" 28 | -------------------------------------------------------------------------------- /cluster_scripts/darts/c100_icml_rebuttal/sbatch_scripts/nes_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-199%100 3 | #SBATCH -p alldlc_gpu-rtx2080 4 | #SBATCH -x dlcgpu37,dlcgpu26 5 | #SBATCH -c 4 6 | #SBATCH -o ./cluster_logs/nes_rs/%A-%a.o 7 | #SBATCH -e ./cluster_logs/nes_rs/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J imn-nes-rs # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION, on gpu $SLURMD_NODENAME" 15 | 16 | # Activate virtual environment 17 | source venv/bin/activate 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/run_nes_rs.py --working_directory=experiments/tiny/baselearners/nes_rs --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID --dataset tiny --num_epochs 100 --batch_size 128 --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine --global_seed $1 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/darts/c100_icml_rebuttal/sbatch_scripts/submit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | algs=(nes_rs nes_re deepens_rs) 4 | nasalgs=(deepens_darts deepens_amoebanet deepens_darts_anchor) 5 | 6 | #for m in {2,3,5,7,10,15} 7 | #do 8 | #for alg in ${algs[@]} 9 | #do 10 | ##scancel -n ${m}-${alg} 11 | #sbatch --bosch -J ${m}-${alg} -a 1-5 cluster_scripts/tiny/sbatch_scripts/evaluate_ensembles.sh $alg $m 12 | #echo ${m}-${alg} 13 | #done 14 | #for nasalg in ${nasalgs[@]} 15 | #do 16 | ##scancel -n ${m}-${alg} 17 | #sbatch --bosch -J ${m}-${nasalg} -a 1 cluster_scripts/tiny/sbatch_scripts/evaluate_ensembles.sh $nasalg $m 18 | #echo ${m}-${nasalg} 19 | #done 20 | #done 21 | 22 | for m in {2,3,5,7,10,15} 23 | do 24 | sbatch -J ${m}-dartsesa -a 1 -p alldlc_gpu-rtx2080 cluster_scripts/tiny/sbatch_scripts/evaluate_ensembles_nas.sh darts_esa $m 25 | sbatch -J ${m}-amoebaesa -a 1 -p alldlc_gpu-rtx2080 cluster_scripts/tiny/sbatch_scripts/evaluate_ensembles_nas.sh amoebanet_esa $m 26 | sbatch -J ${m}-dartsesa -a 3 -p alldlc_gpu-rtx2080 cluster_scripts/tiny/sbatch_scripts/evaluate_ensembles_nas.sh nes_rs_esa $m 27 | done 28 | 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/c100_icml_rebuttal/start_grid.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | scheduler="cosine step" 4 | layers="8 11 14" 5 | channels="16 36 48" 6 | lrs=$(awk 'BEGIN{for(i=0.025;i<=0.1;i*=2)print i}') 7 | 8 | 9 | for sch in $scheduler; do 10 | for l in $layers; do 11 | for c in $channels; do 12 | for lr in $lrs; do 13 | sbatch cluster_scripts/tiny/eval_clip.sh $sch $l $c $lr 14 | echo submmited job $sch $l $c $lr 15 | done 16 | done 17 | done 18 | done 19 | 20 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10/plot_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # source config 4 | . cluster_scripts/launcher.config 5 | 6 | # Activate virtual environment 7 | source venv/bin/activate 8 | 9 | PYTHONPATH=. python nes/ensemble_selection/plot_data.py \ 10 | --Ms "${ens_sizes[@]}" \ 11 | --methods nes_rs nes_re deepens_darts deepens_amoebanet deepens_rs \ 12 | --save_dir experiments/cifar10/outputs/plots \ 13 | --load_plotting_data_dir experiments/cifar10/outputs/plotting_data \ 14 | --dataset cifar10 15 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10/sbatch_scripts/deepens_amoeba.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_amoebanet/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_amoebanet/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-amoebanet # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments/cifar10/baselearners/deepens_amoebanet/" --dataset cifar10 --num_epochs 100 --scheme deepens_amoebanet --train_amoebanet 18 | 19 | # Done 20 | echo "DONE" 21 | echo "Finished at $(date)" 22 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10/sbatch_scripts/deepens_darts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-darts # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments/cifar10/baselearners/deepens_darts/" --dataset cifar10 --num_epochs 100 --scheme deepens_darts --train_darts 18 | 19 | # Done 20 | echo "DONE" 21 | echo "Finished at $(date)" 22 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10/sbatch_scripts/deepens_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-rs # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --arch_id $1 --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments/cifar10/baselearners/deepens_rs/" --dataset cifar10 --num_epochs 100 --scheme deepens_rs --arch_path "experiments/cifar10/baselearners/nes_rs/random_archs" 18 | 19 | # Done 20 | echo "DONE" 21 | echo "Finished at $(date)" 22 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10/sbatch_scripts/ensembles_from_pools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH --gres=gpu:1 # reserves GPUs 5 | #SBATCH -J ens_from_pool # sets the job name. If not specified, the file name will be used as job name 6 | #SBATCH -a 0-5 # should be 2 x (number of ensemble sizes, i.e. length of ens_sizes in launcher.config) - 1 7 | 8 | # Activate virtual environment 9 | source venv/bin/activate 10 | 11 | # mapping from slurm task ID to parameters for python call. 12 | . cluster_scripts/launcher.config 13 | IFS=',' grid=( $(eval echo {"${ens_sizes[*]}"}+{"${pools[*]}"}) ) 14 | IFS=' ' read -r -a arr <<< "${grid[*]}" 15 | IFS=+ read M pool_name <<< "${arr[$SLURM_ARRAY_TASK_ID]}" 16 | 17 | PYTHONPATH=$PWD python nes/ensemble_selection/ensembles_from_pools.py \ 18 | --M "$M" \ 19 | --pool_name "$pool_name" \ 20 | --save_dir experiments/cifar10/ensembles_selected/ \ 21 | --load_bsls_dir "experiments/cifar10/baselearners/$pool_name" \ 22 | --dataset cifar10 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10/sbatch_scripts/evaluate_ensembles.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH --gres=gpu:1 # reserves GPUs 5 | #SBATCH -J eval_ens # sets the job name. If not specified, the file name will be used as job name 6 | #SBATCH -a 0-14 # should be 5 x (number of ensemble sizes, i.e. length of ens_sizes in launcher.config) - 1 7 | 8 | # Activate virtual environment 9 | source venv/bin/activate 10 | 11 | # mapping from slurm task ID to parameters for python call. 12 | . cluster_scripts/launcher.config 13 | IFS=',' grid=( $(eval echo {"${ens_sizes[*]}"}+{"${methods[*]}"}) ) 14 | IFS=' ' read -r -a arr <<< "${grid[*]}" 15 | IFS=+ read M method <<< "${arr[$SLURM_ARRAY_TASK_ID]}" 16 | 17 | PYTHONPATH=$PWD python nes/ensemble_selection/evaluate_ensembles.py \ 18 | --M "$M" \ 19 | --method "$method" \ 20 | --save_dir experiments/cifar10/outputs/plotting_data/ \ 21 | --nes_rs_bsls_dir experiments/cifar10/baselearners/nes_rs/ \ 22 | --incumbents_dir experiments/cifar10/outputs/deepens_rs/incumbents.txt \ 23 | --load_bsls_dir "experiments/cifar10/baselearners/$method" \ 24 | --load_ens_chosen_dir experiments/cifar10/ensembles_selected/ \ 25 | --dataset cifar10 26 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10/sbatch_scripts/get_incumbents_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 3 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 4 | #SBATCH --gres=gpu:1 # reserves GPUs 5 | #SBATCH -J get_incumbents_rs # sets the job name. If not specified, the file name will be used as job name 6 | 7 | # Info 8 | echo "Workingdir: $PWD" 9 | echo "Started at $(date)" 10 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 11 | 12 | # Activate virtual environment 13 | source venv/bin/activate 14 | 15 | # Arrayjob 16 | PYTHONPATH=$PWD python nes/ensemble_selection/rs_incumbents.py \ 17 | --save_dir experiments/cifar10/outputs/deepens_rs/ \ 18 | --load_bsls_dir experiments/cifar10/baselearners/nes_rs \ 19 | --pool_name nes_rs \ 20 | --dataset cifar10 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10/sbatch_scripts/nes_re.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-20 3 | #SBATCH -o ./cluster_logs/nes_re/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_re/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-re # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/run_nes_re.py --array_id \ 18 | $SLURM_ARRAY_TASK_ID --total_num_workers=20 \ 19 | --num_iterations 400 --num_epochs 100 --population_size 50 --sample_size 10 \ 20 | --nic_name eth0 --working_directory experiments/cifar10/baselearners/nes_re \ 21 | --seed 1 --scheme nes_re --dataset cifar10 22 | 23 | # Done 24 | echo "DONE" 25 | echo "Finished at $(date)" 26 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10/sbatch_scripts/nes_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -o ./cluster_logs/nes_rs/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_rs/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-rs # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/run_nes_rs.py --working_directory=experiments/cifar10/baselearners/nes_rs --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID --dataset cifar10 --num_epochs 100 18 | 19 | # Done 20 | echo "DONE" 21 | echo "Finished at $(date)" 22 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/plot_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # source config 4 | #. cluster_scripts/launcher.config 5 | 6 | # Activate virtual environment 7 | #source activate python36 8 | 9 | PYTHONPATH=. python nes/ensemble_selection/make_plot_master.py \ 10 | --Ms 2 3 5 7 10 15 20 30\ 11 | --methods nes_rs deepens_darts deepens_amoebanet darts_rs nes_re deepens_darts_anchor\ 12 | --save_dir experiments/cifar100_low/outputs/plots-new \ 13 | --load_plotting_data_dir experiments/cifar100_low/outputs/plotting_data \ 14 | --dataset cifar100 \ 15 | --run run_1 run_2 run_3 \ 16 | --plot_type ensemble_size 17 | #PYTHONPATH=. python nes/ensemble_selection/plot_data_1.py \ 18 | #--Ms 2 3 5 7 10 15 20 30\ 19 | #--methods nes_rs deepens_darts darts_hyper joint darts_rs nes_re \ 20 | #--save_dir experiments_hyper/cifar100/outputs/plots \ 21 | #--load_plotting_data_dir experiments_hyper/cifar100/outputs/plotting_data \ 22 | #--dataset cifar100 \ 23 | #--run run_1 24 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts/anchor_hyper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-6 #0-100 3 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J anch_hyper # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source activate python37 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 18 | --seed_id $SLURM_ARRAY_TASK_ID --arch_id $SLURM_ARRAY_TASK_ID \ 19 | --working_directory "experiments-anchor/" \ 20 | --dataset cifar100 --num_epochs 100 --scheme deepens_darts \ 21 | --n_layers 8 --init_channels 16 --scheduler cosine \ 22 | --train_darts --global_seed 1 --anchor \ 23 | --lr 0.025 --wd 0.0 --anch_coeff 0.$SLURM_ARRAY_TASK_ID 24 | 25 | # Done 26 | echo "DONE" 27 | echo "Finished at $(date)" 28 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts/darts_hyper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-100 3 | #SBATCH -p alldlc_gpu-rtx2080 4 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 5 | #SBATCH -c 4 6 | #SBATCH -o ./cluster_logs/darts_hyper/%A-%a.o 7 | #SBATCH -e ./cluster_logs/darts_hyper/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J darts-hyper # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source activate python36 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_rs.py \ 21 | --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments/cifar100/baselearners/darts_hyper/" \ 23 | --dataset cifar100 --num_epochs 100 --scheme darts_hyper \ 24 | --global_seed 1 --hyperensemble 25 | 26 | 27 | # Done 28 | echo "DONE" 29 | echo "Finished at $(date)" 30 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts/darts_nes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-100 3 | #SBATCH -c 4 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 6 | #SBATCH -o ./cluster_logs/darts_rs/%A-%a.o 7 | #SBATCH -e ./cluster_logs/darts_rs/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J darts-rs # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source activate python36 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_rs.py \ 21 | --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments/cifar100/baselearners/darts_rs/" \ 23 | --dataset cifar100 --num_epochs 100 --scheme darts_rs \ 24 | --global_seed 1 --nes 25 | 26 | 27 | # Done 28 | echo "DONE" 29 | echo "Finished at $(date)" 30 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts/deepens_amoeba.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_amoebanet/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_amoebanet/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-amoebanet # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 18 | --seed_id $SLURM_ARRAY_TASK_ID \ 19 | --working_directory "experiments/cifar100/baselearners/deepens_amoebanet/" \ 20 | --dataset cifar100 --num_epochs 100 --scheme deepens_amoebanet \ 21 | --train_amoebanet --global_seed $1 22 | 23 | # Done 24 | echo "DONE" 25 | echo "Finished at $(date)" 26 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts/deepens_darts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -c 4 4 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J deepens-darts # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | source venv/bin/activate 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 19 | --seed_id $SLURM_ARRAY_TASK_ID \ 20 | --working_directory "experiments/cifar100_low/baselearners/deepens_darts/" \ 21 | --dataset cifar100 --num_epochs 100 --scheme deepens_darts_anchor \ 22 | --train_darts --global_seed 1 23 | 24 | # Done 25 | echo "DONE" 26 | echo "Finished at $(date)" 27 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts/deepens_darts_anchor.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J anch_deepens-darts # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source activate python37 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 18 | --seed_id $SLURM_ARRAY_TASK_ID \ 19 | --working_directory "experiments/cifar100_low/baselearners/deepens_darts_anchor/" \ 20 | --dataset cifar100 --num_epochs 100 --scheme deepens_darts_anchor \ 21 | --train_darts --global_seed 1 --batch_size 100 \ 22 | --n_layers 8 --init_channels 16 --scheduler cosine \ 23 | --anchor --lr 0.025 --wd 0.0 --anch_coeff 0.4 24 | #--anchor --lr 0.03830591641545368 --wd 1.0221751529451768e-05 --anch_coeff 0.4189053283865941 25 | 26 | 27 | # .1,- .6 28 | 29 | # Done 30 | echo "DONE" 31 | echo "Finished at $(date)" 32 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts/deepens_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-rs # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 18 | --arch_id $1 --seed_id $SLURM_ARRAY_TASK_ID \ 19 | --working_directory "experiments/cifar100/baselearners/deepens_rs/" \ 20 | --dataset cifar100 --num_epochs 100 --scheme deepens_rs \ 21 | --arch_path "experiments/cifar100/baselearners/nes_rs/run_$1/random_archs" \ 22 | --global_seed $1 23 | 24 | # Done 25 | echo "DONE" 26 | echo "Finished at $(date)" 27 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts/ensembles_from_pools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH -p ml_gpu-rtx2080 5 | #SBATCH -a 1 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J ens_from_pool # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Activate virtual environment 10 | source activate python36 11 | 12 | # mapping from slurm task ID to parameters for python call. 13 | #. cluster_scripts/launcher.config 14 | #IFS=',' grid=( $(eval echo {"${ens_sizes[*]}"}+{"${pools[*]}"}) ) 15 | #IFS=' ' read -r -a arr <<< "${grid[*]}" 16 | #IFS=+ read M pool_name <<< "${arr[$SLURM_ARRAY_TASK_ID]}" 17 | 18 | PYTHONPATH=$PWD python nes/ensemble_selection/ensembles_from_pools.py \ 19 | --M "$2" \ 20 | --pool_name $1 \ 21 | --save_dir experiments/cifar100_low/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 22 | --load_bsls_dir "experiments/cifar100_low/baselearners/$1/run_$SLURM_ARRAY_TASK_ID" \ 23 | --dataset cifar100 24 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts/evaluate_ensembles.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%x.%A-%a.%N.o 3 | #SBATCH -e ./cluster_logs/evaluate/%x.%A-%a.%N.e 4 | #SBATCH -p bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | 7 | # Activate virtual environment 8 | source activate python37 9 | 10 | # mapping from slurm task ID to parameters for python call. 11 | #. cluster_scripts/launcher.config 12 | #IFS=',' grid=( $(eval echo {"${ens_sizes[*]}"}+{"${methods[*]}"}) ) 13 | #IFS=' ' read -r -a arr <<< "${grid[*]}" 14 | #IFS=+ read M method <<< "${arr[$SLURM_ARRAY_TASK_ID]}" 15 | 16 | PYTHONPATH=$PWD python nes/ensemble_selection/evaluate_ensembles.py \ 17 | --M "$2" \ 18 | --method "$1" \ 19 | --save_dir experiments/cifar100_low/outputs/plotting_data/run_$SLURM_ARRAY_TASK_ID \ 20 | --nes_rs_bsls_dir experiments/cifar100_low/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 21 | --incumbents_dir experiments/cifar100_low/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID/incumbents.txt \ 22 | --load_bsls_dir "experiments/cifar100_low/baselearners/$1/run_$SLURM_ARRAY_TASK_ID" \ 23 | --load_ens_chosen_dir experiments/cifar100_low/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 24 | --dataset cifar100 25 | 26 | 27 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts/get_incumbents_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 3 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 4 | #SBATCH --gres=gpu:1 # reserves GPUs 5 | #SBATCH -J get_incumbents_rs # sets the job name. If not specified, the file name will be used as job name 6 | 7 | # Info 8 | echo "Workingdir: $PWD" 9 | echo "Started at $(date)" 10 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 11 | 12 | # Activate virtual environment 13 | source venv/bin/activate 14 | 15 | # Arrayjob 16 | PYTHONPATH=$PWD python nes/ensemble_selection/rs_incumbents.py \ 17 | --save_dir experiments/cifar100/outputs/deepens_rs/ \ 18 | --load_bsls_dir experiments/cifar100/baselearners/nes_rs \ 19 | --pool_name nes_rs \ 20 | --dataset cifar100 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts/nes_re.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-20 3 | #SBATCH -o ./cluster_logs/nes_re/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_re/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-re # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/run_nes_re.py --array_id \ 18 | $SLURM_ARRAY_TASK_ID --total_num_workers=20 \ 19 | --num_iterations 400 --num_epochs 100 --population_size 50 --sample_size 10 \ 20 | --nic_name eth0 --working_directory experiments/cifar100/baselearners/nes_re \ 21 | --global_seed $1 --scheme nes_re --dataset cifar100 22 | 23 | # Done 24 | echo "DONE" 25 | echo "Finished at $(date)" 26 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts/nes_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -o ./cluster_logs/nes_rs/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_rs/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-rs # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/run_nes_rs.py --working_directory=experiments/cifar100/baselearners/nes_rs --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID --dataset cifar100 --num_epochs 100 --global_seed $1 18 | 19 | # Done 20 | echo "DONE" 21 | echo "Finished at $(date)" 22 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts/submit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | algs=(nes_rs nes_re deepens_rs) 4 | nasalgs=(deepens_darts deepens_amoebanet deepens_darts_anchor) 5 | 6 | for m in {2,3,5,7,10,15,20,30} 7 | do 8 | for alg in ${algs[@]} 9 | do 10 | #scancel -n ${m}-${alg} 11 | sbatch --bosch -J ${m}-${alg} -a 1-3 cluster_scripts/cifar100/sbatch_scripts/evaluate_ensembles.sh $alg $m 12 | echo ${m}-${alg} 13 | done 14 | for nasalg in ${nasalgs[@]} 15 | do 16 | #scancel -n ${m}-${alg} 17 | sbatch --bosch -J ${m}-${nasalg} -a 1 cluster_scripts/cifar100/sbatch_scripts/evaluate_ensembles.sh $nasalg $m 18 | echo ${m}-${nasalg} 19 | done 20 | done 21 | 22 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts_hyper/deepens_darts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-14 3 | #SBATCH -c 4 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 6 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 7 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J deepens-darts # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source activate python36 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_rs.py \ 21 | --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments_hyper/cifar100/baselearners/deepens_darts/" \ 23 | --dataset cifar100 --num_epochs 100 --scheme deepens_darts --global_seed 1 \ 24 | --lr 0.03704869432849922 --wd 0.001842328053779474 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts_hyper/ensembles_from_pools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH -a 1 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J ens_from_pool # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Activate virtual environment 9 | source activate python36 10 | 11 | PYTHONPATH=$PWD python nes/ensemble_selection/ensembles_from_pools.py \ 12 | --M "$2" \ 13 | --pool_name $1 \ 14 | --save_dir experiments_hyper/cifar100/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 15 | --load_bsls_dir "experiments_hyper/cifar100/baselearners/$1/run_$SLURM_ARRAY_TASK_ID" \ 16 | --dataset cifar100 17 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts_hyper/evaluate_ensembles.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH -a 1 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J eval_ens # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Activate virtual environment 9 | source activate python36 10 | 11 | PYTHONPATH=$PWD python nes/ensemble_selection/evaluate_ensembles.py \ 12 | --M "$2" \ 13 | --method "$1" \ 14 | --save_dir experiments_hyper/cifar100/outputs/plotting_data/run_$SLURM_ARRAY_TASK_ID \ 15 | --nes_rs_bsls_dir experiments_hyper/cifar100/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 16 | --load_bsls_dir "experiments_hyper/cifar100/baselearners/$1/run_$SLURM_ARRAY_TASK_ID" \ 17 | --load_ens_chosen_dir experiments_hyper/cifar100/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 18 | --dataset cifar100 19 | 20 | 21 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts_hyper/hyperens.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -c 4 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 6 | #SBATCH -o ./cluster_logs/darts_hyper/%A-%a.o 7 | #SBATCH -e ./cluster_logs/darts_hyper/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J hyperens # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source activate python36 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_rs.py \ 21 | --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments_hyper/cifar100/baselearners/darts_hyper/" \ 23 | --dataset cifar100 --num_epochs 100 --scheme darts_hyper \ 24 | --global_seed 1 --hyperensemble 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts_hyper/joint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -c 4 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 6 | #SBATCH -o ./cluster_logs/joint/%A-%a.o 7 | #SBATCH -e ./cluster_logs/joint/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J hyperens # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source activate python36 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_rs.py \ 21 | --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments_hyper/cifar100/baselearners/joint/" \ 23 | --dataset cifar100 --num_epochs 100 --scheme joint \ 24 | --global_seed 1 --hyperensemble --nes_cell 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts_hyper/nes_cell.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -c 4 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 6 | #SBATCH -o ./cluster_logs/nes_rs/%A-%a.o 7 | #SBATCH -e ./cluster_logs/nes_rs/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J nes-rs # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source activate python36 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_rs.py \ 21 | --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments_hyper/cifar100/baselearners/nes_rs/" \ 23 | --dataset cifar100 --num_epochs 100 --scheme nes_rs \ 24 | --global_seed 1 --nes_cell \ 25 | --lr 0.03704869432849922 --wd 0.001842328053779474 26 | 27 | # Done 28 | echo "DONE" 29 | echo "Finished at $(date)" 30 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts_hyper/nes_depth_width.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -c 4 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 6 | #SBATCH -o ./cluster_logs/darts_rs/%A-%a.o 7 | #SBATCH -e ./cluster_logs/darts_rs/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J nes-d-w # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source activate python36 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_rs.py \ 21 | --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments_hyper/cifar100/baselearners/darts_rs/" \ 23 | --dataset cifar100 --num_epochs 100 --scheme darts_rs \ 24 | --global_seed 1 --nes_depth_width \ 25 | --lr 0.03704869432849922 --wd 0.001842328053779474 26 | 27 | # Done 28 | echo "DONE" 29 | echo "Finished at $(date)" 30 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar100/sbatch_scripts_hyper/nes_re.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-20 3 | #SBATCH -o ./cluster_logs/nes_re/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_re/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-re # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | source activate python36 14 | 15 | # Arrayjob 16 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_re.py --array_id \ 17 | $SLURM_ARRAY_TASK_ID --total_num_workers=20 \ 18 | --num_iterations 400 --num_epochs 100 --population_size 50 --sample_size 10 \ 19 | --nic_name eth0 --working_directory experiments_hyper/cifar100/baselearners/nes_re \ 20 | --global_seed 1 --scheme nes_re --dataset cifar100 \ 21 | --lr 0.03704869432849922 --wd 0.001842328053779474 22 | 23 | # Done 24 | echo "DONE" 25 | echo "Finished at $(date)" 26 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/plot_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # source config 4 | #. cluster_scripts/launcher.config 5 | 6 | # Activate virtual environment 7 | #source activate python36 8 | 9 | PYTHONPATH=. python nes/ensemble_selection/plot_data_1.py \ 10 | --Ms 2 3 5 7 10 15\ 11 | --methods nes_rs deepens_darts darts_hyper darts_rs joint nes_re \ 12 | --save_dir experiments_hyper/cifar10/outputs/plots \ 13 | --load_plotting_data_dir experiments_hyper/cifar10/outputs/plotting_data \ 14 | --dataset cifar10 \ 15 | --run run_1 16 | 17 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/amoeba_esa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 119-199 3 | #SBATCH -p alldlc_gpu-rtx2080 4 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 5 | #SBATCH -o ./cluster_logs/deepens_amoebanet/%A-%a.o 6 | #SBATCH -e ./cluster_logs/deepens_amoebanet/%A-%a.e 7 | #SBATCH --gres=gpu:1 # reserves GPUs 8 | #SBATCH -J amoeba-esa # sets the job name. If not specified, the file name will be used as job name 9 | 10 | # Info 11 | echo "Workingdir: $PWD" 12 | echo "Started at $(date)" 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 14 | 15 | # Activate virtual environment 16 | #source venv/bin/activate 17 | source activate python37 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-nips21/cifar10/baselearners/amoebanet_esa/" --dataset cifar10 --num_epochs 100 --scheme amoebanet_esa --train_amoebanet 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/anchor_hyper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-6 #0-100 3 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J anch_hyper # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source activate python37 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 18 | --seed_id $SLURM_ARRAY_TASK_ID --arch_id $SLURM_ARRAY_TASK_ID \ 19 | --working_directory "experiments-anchor/c10" \ 20 | --dataset cifar10 --num_epochs 100 --scheme deepens_darts \ 21 | --n_layers 8 --init_channels 16 --scheduler cosine \ 22 | --train_darts --global_seed 1 --anchor \ 23 | --lr 0.025 --wd 0.0 --anch_coeff 0.$SLURM_ARRAY_TASK_ID 24 | 25 | # Done 26 | echo "DONE" 27 | echo "Finished at $(date)" 28 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/darts_esa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 146-199 3 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J darts-esa # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | #source venv/bin/activate 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-nips21/cifar10/baselearners/darts_esa/" --dataset cifar10 --num_epochs 100 --scheme darts_esa --train_darts 19 | 20 | # Done 21 | echo "DONE" 22 | echo "Finished at $(date)" 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/deepens_amoeba.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_amoebanet/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_amoebanet/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-amoebanet # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | #source venv/bin/activate 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-49k/cifar10/baselearners/deepens_amoebanet/" --dataset cifar10 --num_epochs 100 --scheme deepens_amoebanet --train_amoebanet --n_datapoints 49000 19 | 20 | # Done 21 | echo "DONE" 22 | echo "Finished at $(date)" 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/deepens_amoeba_50k.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_amoebanet/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_amoebanet/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-amoebanet # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | #source venv/bin/activate 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-49k/cifar10/baselearners/deepens_amoebanet_50k/" --dataset cifar10 --num_epochs 100 --scheme deepens_amoebanet_50k --train_amoebanet --full_train 19 | 20 | # Done 21 | echo "DONE" 22 | echo "Finished at $(date)" 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/deepens_darts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-darts # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | #source venv/bin/activate 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-49k/cifar10/baselearners/deepens_darts/" --dataset cifar10 --num_epochs 100 --scheme deepens_darts --train_darts --n_datapoints 49000 19 | 20 | # Done 21 | echo "DONE" 22 | echo "Finished at $(date)" 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/deepens_darts_50k.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_darts_50k/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_darts_50k/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-darts # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | #source venv/bin/activate 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-49k/cifar10/baselearners/deepens_darts_50k/" --dataset cifar10 --num_epochs 100 --scheme deepens_darts_50k --train_darts --full_train 19 | 20 | # Done 21 | echo "DONE" 22 | echo "Finished at $(date)" 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/deepens_darts_anchor.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J anch_deepens-darts # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source activate python37 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 18 | --seed_id $SLURM_ARRAY_TASK_ID \ 19 | --working_directory "experiments/cifar10_low/baselearners/deepens_darts_anchor/" \ 20 | --dataset cifar10 --num_epochs 100 --scheme deepens_darts_anchor \ 21 | --train_darts --global_seed 1 --batch_size 100 \ 22 | --n_layers 8 --init_channels 16 --scheduler cosine \ 23 | --anchor --lr 0.025 --wd 0.0 --anch_coeff 0.4 24 | #--anchor --lr 0.03830591641545368 --wd 1.0221751529451768e-05 --anch_coeff 0.4189053283865941 25 | 26 | 27 | # .1,- .6 28 | 29 | # Done 30 | echo "DONE" 31 | echo "Finished at $(date)" 32 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/deepens_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -p bosch_gpu-rtx2080 4 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J deepens-rs # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION, on gpu $SLURMD_NODENAME" 13 | 14 | # Activate virtual environment 15 | #source venv/bin/activate 16 | source activate python37 17 | 18 | # Arrayjob 19 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --arch_id $1 --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-nips21/cifar10/baselearners/deepens_rs/" --dataset cifar10 --num_epochs 100 --scheme deepens_rs --arch_path "experiments-nips21/cifar10/baselearners/nes_rs/run_${2}/random_archs" --global_seed $2 20 | 21 | # Done 22 | echo "DONE" 23 | echo "Finished at $(date)" 24 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/ensembles_from_pools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH --gres=gpu:1 # reserves GPUs 5 | #SBATCH -p alldlc_gpu-rtx2080 6 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 7 | #SBATCH -a 1 8 | #SBATCH -J ens_from_pool # sets the job name. If not specified, the file name will be used as job name 9 | 10 | # Activate virtual environment 11 | source activate python37 12 | 13 | PYTHONPATH=$PWD python nes/ensemble_selection/ensembles_from_pools.py \ 14 | --M $2 \ 15 | --pool_name $1 \ 16 | --save_dir experiments-49k/cifar10/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 17 | --load_bsls_dir experiments-49k/cifar10/baselearners/$1/run_$SLURM_ARRAY_TASK_ID \ 18 | --dataset cifar10 \ 19 | --esa beam_search \ 20 | --arch_id 0 # used only for DeepEns (RS) + ESA 21 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/ensembles_from_pools_50k.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH --gres=gpu:1 # reserves GPUs 5 | #SBATCH -p alldlc_gpu-rtx2080 6 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 7 | #SBATCH -a 1-3 8 | #SBATCH -J ens_from_pool # sets the job name. If not specified, the file name will be used as job name 9 | 10 | # Activate virtual environment 11 | source activate python37 12 | 13 | PYTHONPATH=$PWD python nes/ensemble_selection/ensembles_from_pools.py \ 14 | --M $2 \ 15 | --pool_name $1 \ 16 | --save_dir experiments-nips21/cifar10/ensembles_selected_50k/run_$SLURM_ARRAY_TASK_ID \ 17 | --load_bsls_dir experiments-nips21/cifar10/baselearners/$1/run_$SLURM_ARRAY_TASK_ID \ 18 | --dataset cifar10 \ 19 | --esa $4 \ 20 | --arch_id $3 # used only for DeepEns (RS) + ESA 21 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/evaluate_ensembles.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 6 | #SBATCH -a 1-3 7 | #SBATCH --gres=gpu:1 # reserves GPUs 8 | 9 | # Activate virtual environment 10 | source activate python37 11 | 12 | PYTHONPATH=$PWD python nes/ensemble_selection/evaluate_ensembles.py \ 13 | --M $2 \ 14 | --method $1 \ 15 | --save_dir experiments-nips21/cifar10/outputs/plotting_data/run_$SLURM_ARRAY_TASK_ID \ 16 | --nes_rs_bsls_dir experiments-nips21/cifar10/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 17 | --incumbents_dir experiments-nips21/cifar10/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID/incumbents.txt \ 18 | --load_bsls_dir "experiments-nips21/cifar10/baselearners/$1/run_$SLURM_ARRAY_TASK_ID" \ 19 | --load_ens_chosen_dir experiments-nips21/cifar10/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 20 | --dataset cifar10 \ 21 | --esa $3 \ 22 | --arch_id 0 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/evaluate_ensembles_oneshot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 6 | #SBATCH -a 1-3 7 | #SBATCH --gres=gpu:1 # reserves GPUs 8 | 9 | # Activate virtual environment 10 | source activate python37 11 | 12 | PYTHONPATH=$PWD python nes/ensemble_selection/evaluate_ensembles.py \ 13 | --M $1 \ 14 | --method nes_rs_oneshot \ 15 | --save_dir experiments-nips21/cifar10/outputs/plotting_data/run_$SLURM_ARRAY_TASK_ID \ 16 | --nes_rs_bsls_dir experiments-nips21/cifar10/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 17 | --incumbents_dir experiments-nips21/cifar10/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID/incumbents.txt \ 18 | --load_bsls_dir "experiments-nips21/cifar10/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID" \ 19 | --load_ens_chosen_dir experiments-nips21/cifar10/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 20 | --dataset cifar10 \ 21 | --esa beam_search \ 22 | --arch_id 0 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/get_incumbents_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 3 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -a 3 #1-3 7 | #SBATCH -J get_incumbents_rs # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | #source venv/bin/activate 16 | source activate python37 17 | 18 | # Arrayjob 19 | PYTHONPATH=$PWD python nes/ensemble_selection/rs_incumbents.py \ 20 | --save_dir experiments-nips21/cifar10/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID \ 21 | --load_bsls_dir experiments-nips21/cifar10/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 22 | --pool_name nes_rs \ 23 | --dataset cifar10 24 | 25 | # Done 26 | echo "DONE" 27 | echo "Finished at $(date)" 28 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/nes_re.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-20 3 | #SBATCH -o ./cluster_logs/nes_re/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_re/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-re # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | #source venv/bin/activate 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/run_nes_re.py --array_id \ 19 | $SLURM_ARRAY_TASK_ID --total_num_workers=20 \ 20 | --num_iterations 400 --num_epochs 100 --population_size 50 --sample_size 10 \ 21 | --nic_name eth0 --working_directory experiments-49k/cifar10/baselearners/nes_re \ 22 | --global_seed $1 --scheme nes_re --dataset cifar10 --n_datapoints 49000 23 | 24 | # Done 25 | echo "DONE" 26 | echo "Finished at $(date)" 27 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/nes_re_50k.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 3 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 4 | #SBATCH -o ./cluster_logs/deepens_darts_50k/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_darts_50k/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J nes_re_50k # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | #source venv/bin/activate 16 | source activate python37 17 | 18 | # Arrayjob 19 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --arch_id $1 --seed_id $1 --working_directory "experiments-nips21/cifar10/baselearners/nes_re_50k/" --dataset cifar10 --num_epochs 80 --scheme nes_re_50k --arch_path "experiments-nips21/cifar10/baselearners/nes_re/run_${2}/sampled_configs" --global_seed $2 --full_train 20 | 21 | # Done 22 | echo "DONE" 23 | echo "Finished at $(date)" 24 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/nes_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -o ./cluster_logs/nes_rs/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_rs/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-rs # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | #source venv/bin/activate 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/run_nes_rs.py --working_directory=experiments-49k/cifar10/baselearners/nes_rs --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID --dataset cifar10 --num_epochs 100 --global_seed $1 --n_datapoints 49000 19 | 20 | # Done 21 | echo "DONE" 22 | echo "Finished at $(date)" 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/nes_rs_50k.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 3 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 4 | #SBATCH -o ./cluster_logs/deepens_darts_50k/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_darts_50k/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J nes_rs_50k # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | #source venv/bin/activate 16 | source activate python37 17 | 18 | # Arrayjob 19 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --arch_id $1 --seed_id $1 --working_directory "experiments-nips21/cifar10/baselearners/nes_rs_50k/" --dataset cifar10 --num_epochs 80 --scheme nes_rs_50k --arch_path "experiments-nips21/cifar10/baselearners/nes_rs/run_${2}/random_archs" --global_seed $2 --full_train 20 | 21 | # Done 22 | echo "DONE" 23 | echo "Finished at $(date)" 24 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/nes_rs_esa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-199 3 | #SBATCH -p alldlc_gpu-rtx2080 4 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 5 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 6 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 7 | #SBATCH --gres=gpu:1 # reserves GPUs 8 | #SBATCH -J nes-rs-esa # sets the job name. If not specified, the file name will be used as job name 9 | 10 | # Info 11 | echo "Workingdir: $PWD" 12 | echo "Started at $(date)" 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION, on gpu $SLURMD_NODENAME" 14 | 15 | # Activate virtual environment 16 | source activate python37 17 | 18 | # seed 1: 164 19 | # seed 2: 47 20 | # seed 3: 48 21 | 22 | # Arrayjob 23 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --arch_id $1 --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-nips21/cifar10/baselearners/nes_rs_esa/" --dataset cifar10 --num_epochs 100 --scheme nes_rs_esa --arch_path "experiments-nips21/cifar10/baselearners/nes_rs/run_${2}/random_archs" --global_seed $2 24 | 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/nes_rs_oneshot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -p alldlc_gpu-rtx2080 4 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 5 | #SBATCH -o ./cluster_logs/nes_rs_oneshot/%A-%a.o 6 | #SBATCH -e ./cluster_logs/nes_rs_oneshot/%A-%a.e 7 | #SBATCH --gres=gpu:1 # reserves GPUs 8 | #SBATCH -J oneshot-nes # sets the job name. If not specified, the file name will be used as job name 9 | 10 | # Info 11 | echo "Workingdir: $PWD" 12 | echo "Started at $(date)" 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION, on gpu $SLURMD_NODENAME" 14 | 15 | # Activate virtual environment 16 | #source venv/bin/activate 17 | source activate python37 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_oneshot_bsl.py --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-nips21/cifar10/baselearners/nes_rs_oneshot/" --dataset cifar10 --num_epochs 100 --scheme nes_rs_oneshot --arch_path "experiments-nips21/cifar10/baselearners/nes_rs/run_${1}/random_archs" --global_seed $1 --only_predict --oneshot --saved_model "nes/randomNAS_release/oneshot_model" 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/random_mutations.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-5,11-19,21-29,31-39,41-49,51-59,110-120,210-220,310-320,410-420,510-520 3 | #SBATCH -p bosch_gpu-rtx2080 4 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 5 | #SBATCH -o ./cluster_logs/rand_mutations/%A-%a.o 6 | #SBATCH -e ./cluster_logs/rand_mutations/%A-%a.e 7 | #SBATCH --gres=gpu:1 # reserves GPUs 8 | #SBATCH -J mutations # sets the job name. If not specified, the file name will be used as job name 9 | 10 | # Info 11 | echo "Workingdir: $PWD" 12 | echo "Started at $(date)" 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION, on gpu $SLURMD_NODENAME" 14 | 15 | # Activate virtual environment 16 | source activate python37 17 | 18 | # Arrayjob 19 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-nips21/cifar10/baselearners/random_mutations/" --dataset cifar10 --num_epochs 100 --scheme rs_mutations --arch_path "experiments-nips21/cifar10/baselearners/random_mutations/run_1/random_archs" --global_seed 1 20 | 21 | # Done 22 | echo "DONE" 23 | echo "Finished at $(date)" 24 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/submit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #algs=(nes_rs nes_re deepens_rs) 4 | #nasalgs=(deepens_darts deepens_amoebanet deepens_darts_anchor) 5 | nasalgs=(deepens_darts_anchor) 6 | 7 | for m in {2,3,5,7,10,15,20,30} 8 | do 9 | #for alg in ${algs[@]} 10 | #do 11 | ##scancel -n ${m}-${alg} 12 | #sbatch --bosch -J ${m}-${alg} -a 1-3 cluster_scripts/cifar10/sbatch_scripts/evaluate_ensembles.sh $alg $m 13 | #echo ${m}-${alg} 14 | #done 15 | for nasalg in ${nasalgs[@]} 16 | do 17 | #scancel -n ${m}-${alg} 18 | sbatch -J ${m}-${nasalg} -a 1 cluster_scripts/cifar10/sbatch_scripts/evaluate_ensembles.sh $nasalg $m 19 | echo ${m}-${nasalg} 20 | done 21 | done 22 | 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts-49k/train_oneshot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1 3 | #SBATCH -o ./cluster_logs/nes_rs_oneshot/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_rs_oneshot/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J rsws # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | #source venv/bin/activate 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/randomNAS_release/searchers/random_weight_share.py --save_dir nes/randomNAS_release/oneshot_model 19 | 20 | # Done 21 | echo "DONE" 22 | echo "Finished at $(date)" 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/amoeba_esa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 119-199 3 | #SBATCH -p alldlc_gpu-rtx2080 4 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 5 | #SBATCH -o ./cluster_logs/deepens_amoebanet/%A-%a.o 6 | #SBATCH -e ./cluster_logs/deepens_amoebanet/%A-%a.e 7 | #SBATCH --gres=gpu:1 # reserves GPUs 8 | #SBATCH -J amoeba-esa # sets the job name. If not specified, the file name will be used as job name 9 | 10 | # Info 11 | echo "Workingdir: $PWD" 12 | echo "Started at $(date)" 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 14 | 15 | # Activate virtual environment 16 | #source venv/bin/activate 17 | source activate python37 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-nips21/cifar10/baselearners/amoebanet_esa/" --dataset cifar10 --num_epochs 100 --scheme amoebanet_esa --train_amoebanet 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/anchor_hyper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-6 #0-100 3 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J anch_hyper # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source activate python37 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 18 | --seed_id $SLURM_ARRAY_TASK_ID --arch_id $SLURM_ARRAY_TASK_ID \ 19 | --working_directory "experiments-anchor/c10" \ 20 | --dataset cifar10 --num_epochs 100 --scheme deepens_darts \ 21 | --n_layers 8 --init_channels 16 --scheduler cosine \ 22 | --train_darts --global_seed 1 --anchor \ 23 | --lr 0.025 --wd 0.0 --anch_coeff 0.$SLURM_ARRAY_TASK_ID 24 | 25 | # Done 26 | echo "DONE" 27 | echo "Finished at $(date)" 28 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/darts_esa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 146-199 3 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J darts-esa # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | #source venv/bin/activate 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-nips21/cifar10/baselearners/darts_esa/" --dataset cifar10 --num_epochs 100 --scheme darts_esa --train_darts 19 | 20 | # Done 21 | echo "DONE" 22 | echo "Finished at $(date)" 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/deepens_amoeba.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_amoebanet/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_amoebanet/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-amoebanet # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | #source venv/bin/activate 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-nips21/cifar10/baselearners/deepens_amoebanet/" --dataset cifar10 --num_epochs 100 --scheme deepens_amoebanet --train_amoebanet 19 | 20 | # Done 21 | echo "DONE" 22 | echo "Finished at $(date)" 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/deepens_amoeba_50k.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_amoebanet/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_amoebanet/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-amoebanet # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | #source venv/bin/activate 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-nips21/cifar10/baselearners/deepens_amoebanet_50k/" --dataset cifar10 --num_epochs 80 --scheme deepens_amoebanet_50k --train_amoebanet --full_train 19 | 20 | # Done 21 | echo "DONE" 22 | echo "Finished at $(date)" 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/deepens_darts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-darts # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | #source venv/bin/activate 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-nips21/cifar10/baselearners/deepens_darts/" --dataset cifar10 --num_epochs 100 --scheme deepens_darts --train_darts 19 | 20 | # Done 21 | echo "DONE" 22 | echo "Finished at $(date)" 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/deepens_darts_50k.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_darts_50k/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_darts_50k/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-darts # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | #source venv/bin/activate 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-nips21/cifar10/baselearners/deepens_darts_50k/" --dataset cifar10 --num_epochs 80 --scheme deepens_darts_50k --train_darts --full_train 19 | 20 | # Done 21 | echo "DONE" 22 | echo "Finished at $(date)" 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/deepens_darts_anchor.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J anch_deepens-darts # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source activate python37 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 18 | --seed_id $SLURM_ARRAY_TASK_ID \ 19 | --working_directory "experiments/cifar10_low/baselearners/deepens_darts_anchor/" \ 20 | --dataset cifar10 --num_epochs 100 --scheme deepens_darts_anchor \ 21 | --train_darts --global_seed 1 --batch_size 100 \ 22 | --n_layers 8 --init_channels 16 --scheduler cosine \ 23 | --anchor --lr 0.025 --wd 0.0 --anch_coeff 0.4 24 | #--anchor --lr 0.03830591641545368 --wd 1.0221751529451768e-05 --anch_coeff 0.4189053283865941 25 | 26 | 27 | # .1,- .6 28 | 29 | # Done 30 | echo "DONE" 31 | echo "Finished at $(date)" 32 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/deepens_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -p bosch_gpu-rtx2080 4 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J deepens-rs # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION, on gpu $SLURMD_NODENAME" 13 | 14 | # Activate virtual environment 15 | #source venv/bin/activate 16 | source activate python37 17 | 18 | # Arrayjob 19 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --arch_id $1 --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-nips21/cifar10/baselearners/deepens_rs/" --dataset cifar10 --num_epochs 100 --scheme deepens_rs --arch_path "experiments-nips21/cifar10/baselearners/nes_rs/run_${2}/random_archs" --global_seed $2 20 | 21 | # Done 22 | echo "DONE" 23 | echo "Finished at $(date)" 24 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/ensembles_from_pools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH --gres=gpu:1 # reserves GPUs 5 | #SBATCH -p alldlc_gpu-rtx2080 6 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 7 | #SBATCH -a 1-3 8 | #SBATCH -J ens_from_pool # sets the job name. If not specified, the file name will be used as job name 9 | 10 | # Activate virtual environment 11 | source activate python37 12 | # conda activate python37 13 | 14 | PYTHONPATH=$PWD python nes/ensemble_selection/ensembles_from_pools.py \ 15 | --M $2 \ 16 | --pool_name $1 \ 17 | --save_dir experiments-nips21/cifar10/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 18 | --load_bsls_dir experiments-nips21/cifar10/baselearners/$1/run_$SLURM_ARRAY_TASK_ID \ 19 | --dataset cifar10 \ 20 | --esa $3 \ 21 | --arch_id 0 \ 22 | --diversity_strength $4 # used only for esa = beam_search_with_div 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/ensembles_from_pools_50k.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH --gres=gpu:1 # reserves GPUs 5 | #SBATCH -p alldlc_gpu-rtx2080 6 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 7 | #SBATCH -a 1-3 8 | #SBATCH -J ens_from_pool # sets the job name. If not specified, the file name will be used as job name 9 | 10 | # Activate virtual environment 11 | source activate python37 12 | 13 | PYTHONPATH=$PWD python nes/ensemble_selection/ensembles_from_pools.py \ 14 | --M $2 \ 15 | --pool_name $1 \ 16 | --save_dir experiments-nips21/cifar10/ensembles_selected_50k/run_$SLURM_ARRAY_TASK_ID \ 17 | --load_bsls_dir experiments-nips21/cifar10/baselearners/$1/run_$SLURM_ARRAY_TASK_ID \ 18 | --dataset cifar10 \ 19 | --esa $4 \ 20 | --arch_id $3 # used only for DeepEns (RS) + ESA 21 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/evaluate_ensembles.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42,dlcgpu43 6 | #SBATCH -a 1-3 7 | #SBATCH --gres=gpu:1 # reserves GPUs 8 | 9 | # Activate virtual environment 10 | source activate python37 11 | 12 | PYTHONPATH=$PWD python nes/ensemble_selection/evaluate_ensembles.py \ 13 | --M $2 \ 14 | --method $1 \ 15 | --save_dir experiments-nips21/cifar10/outputs/plotting_data/run_$SLURM_ARRAY_TASK_ID \ 16 | --nes_rs_bsls_dir experiments-nips21/cifar10/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 17 | --incumbents_dir experiments-nips21/cifar10/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID/incumbents.txt \ 18 | --load_bsls_dir "experiments-nips21/cifar10/baselearners/$1/run_$SLURM_ARRAY_TASK_ID" \ 19 | --load_ens_chosen_dir experiments-nips21/cifar10/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 20 | --dataset cifar10 \ 21 | --esa $3 \ 22 | --arch_id 0 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/get_incumbents_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 3 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -a 3 #1-3 7 | #SBATCH -J get_incumbents_rs # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | #source venv/bin/activate 16 | source activate python37 17 | 18 | # Arrayjob 19 | PYTHONPATH=$PWD python nes/ensemble_selection/rs_incumbents.py \ 20 | --save_dir experiments-nips21/cifar10/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID \ 21 | --load_bsls_dir experiments-nips21/cifar10/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 22 | --pool_name nes_rs \ 23 | --dataset cifar10 24 | 25 | # Done 26 | echo "DONE" 27 | echo "Finished at $(date)" 28 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/nes_re.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-20 3 | #SBATCH -o ./cluster_logs/nes_re/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_re/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-re # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | #source venv/bin/activate 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/run_nes_re.py --array_id \ 19 | $SLURM_ARRAY_TASK_ID --total_num_workers=20 \ 20 | --num_iterations 400 --num_epochs 100 --population_size 50 --sample_size 10 \ 21 | --nic_name eth0 --working_directory experiments-nips21/cifar10/baselearners/nes_re \ 22 | --global_seed $1 --scheme nes_re --dataset cifar10 23 | 24 | # Done 25 | echo "DONE" 26 | echo "Finished at $(date)" 27 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/nes_re_50k.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 3 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 4 | #SBATCH -o ./cluster_logs/deepens_darts_50k/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_darts_50k/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J nes_re_50k # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | #source venv/bin/activate 16 | source activate python37 17 | 18 | # Arrayjob 19 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --arch_id $1 --seed_id $1 --working_directory "experiments-nips21/cifar10/baselearners/nes_re_50k/" --dataset cifar10 --num_epochs 80 --scheme nes_re_50k --arch_path "experiments-nips21/cifar10/baselearners/nes_re/run_${2}/sampled_configs" --global_seed $2 --full_train 20 | 21 | # Done 22 | echo "DONE" 23 | echo "Finished at $(date)" 24 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/nes_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 387,390,391,392,394,397 3 | #SBATCH -o ./cluster_logs/nes_rs/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_rs/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-rs # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | #source venv/bin/activate 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/run_nes_rs.py --working_directory=experiments-nips21/cifar10/baselearners/nes_rs --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID --dataset cifar10 --num_epochs 100 --global_seed $1 19 | 20 | # Done 21 | echo "DONE" 22 | echo "Finished at $(date)" 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/nes_rs_50k.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 3 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 4 | #SBATCH -o ./cluster_logs/deepens_darts_50k/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_darts_50k/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J nes_rs_50k # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | #source venv/bin/activate 16 | source activate python37 17 | 18 | # Arrayjob 19 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --arch_id $1 --seed_id $1 --working_directory "experiments-nips21/cifar10/baselearners/nes_rs_50k/" --dataset cifar10 --num_epochs 80 --scheme nes_rs_50k --arch_path "experiments-nips21/cifar10/baselearners/nes_rs/run_${2}/random_archs" --global_seed $2 --full_train 20 | 21 | # Done 22 | echo "DONE" 23 | echo "Finished at $(date)" 24 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/nes_rs_darts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -p alldlc_gpu-rtx2080 4 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 5 | #SBATCH -o ./cluster_logs/nes_rs_oneshot/%A-%a.o 6 | #SBATCH -e ./cluster_logs/nes_rs_oneshot/%A-%a.e 7 | #SBATCH --gres=gpu:1 # reserves GPUs 8 | #SBATCH -J oneshot-nes # sets the job name. If not specified, the file name will be used as job name 9 | 10 | # Info 11 | echo "Workingdir: $PWD" 12 | echo "Started at $(date)" 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION, on gpu $SLURMD_NODENAME" 14 | 15 | # Activate virtual environment 16 | #source venv/bin/activate 17 | source activate python37 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_oneshot_bsl.py --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-nips21/cifar10/baselearners/nes_rs_darts/" --dataset cifar10 --num_epochs 100 --scheme nes_rs_darts --arch_path "experiments-nips21/cifar10/baselearners/nes_rs/run_${1}/random_archs" --global_seed $1 --only_predict --oneshot --saved_model "/home/zelaa/playground/darts/cnn/search-darts_model-20210809-163644" 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/nes_rs_esa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-199 3 | #SBATCH -p alldlc_gpu-rtx2080 4 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 5 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 6 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 7 | #SBATCH --gres=gpu:1 # reserves GPUs 8 | #SBATCH -J nes-rs-esa # sets the job name. If not specified, the file name will be used as job name 9 | 10 | # Info 11 | echo "Workingdir: $PWD" 12 | echo "Started at $(date)" 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION, on gpu $SLURMD_NODENAME" 14 | 15 | # Activate virtual environment 16 | source activate python37 17 | 18 | # seed 1: 164 19 | # seed 2: 47 20 | # seed 3: 48 21 | 22 | # Arrayjob 23 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --arch_id $1 --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-nips21/cifar10/baselearners/nes_rs_esa/" --dataset cifar10 --num_epochs 100 --scheme nes_rs_esa --arch_path "experiments-nips21/cifar10/baselearners/nes_rs/run_${2}/random_archs" --global_seed $2 24 | 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/nes_rs_oneshot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -p alldlc_gpu-rtx2080 4 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 5 | #SBATCH -o ./cluster_logs/nes_rs_oneshot/%A-%a.o 6 | #SBATCH -e ./cluster_logs/nes_rs_oneshot/%A-%a.e 7 | #SBATCH --gres=gpu:1 # reserves GPUs 8 | #SBATCH -J oneshot-nes # sets the job name. If not specified, the file name will be used as job name 9 | 10 | # Info 11 | echo "Workingdir: $PWD" 12 | echo "Started at $(date)" 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION, on gpu $SLURMD_NODENAME" 14 | 15 | # Activate virtual environment 16 | #source venv/bin/activate 17 | source activate python37 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/darts/scripts/run_nes_rs.py --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-nips21/cifar10/baselearners/nes_rs_oneshot/" --dataset cifar10 --num_epochs 100 --scheme nes_rs_oneshot --arch_path "experiments-nips21/cifar10/baselearners/nes_rs/run_${1}/random_archs" --global_seed $1 --only_predict --oneshot --saved_model "nes/randomNAS_release/oneshot_model" 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/random_mutations.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-5,11-19,21-29,31-39,41-49,51-59,110-120,210-220,310-320,410-420,510-520 3 | #SBATCH -p bosch_gpu-rtx2080 4 | #SBATCH -x dlcgpu15,dlcgpu02,dlcgpu42 5 | #SBATCH -o ./cluster_logs/rand_mutations/%A-%a.o 6 | #SBATCH -e ./cluster_logs/rand_mutations/%A-%a.e 7 | #SBATCH --gres=gpu:1 # reserves GPUs 8 | #SBATCH -J mutations # sets the job name. If not specified, the file name will be used as job name 9 | 10 | # Info 11 | echo "Workingdir: $PWD" 12 | echo "Started at $(date)" 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION, on gpu $SLURMD_NODENAME" 14 | 15 | # Activate virtual environment 16 | source activate python37 17 | 18 | # Arrayjob 19 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments-nips21/cifar10/baselearners/random_mutations/" --dataset cifar10 --num_epochs 100 --scheme rs_mutations --arch_path "experiments-nips21/cifar10/baselearners/random_mutations/run_1/random_archs" --global_seed 1 20 | 21 | # Done 22 | echo "DONE" 23 | echo "Finished at $(date)" 24 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/submit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #algs=(nes_rs nes_re deepens_rs) 4 | #nasalgs=(deepens_darts deepens_amoebanet deepens_darts_anchor) 5 | nasalgs=(deepens_darts_anchor) 6 | 7 | for m in {2,3,5,7,10,15,20,30} 8 | do 9 | #for alg in ${algs[@]} 10 | #do 11 | ##scancel -n ${m}-${alg} 12 | #sbatch --bosch -J ${m}-${alg} -a 1-3 cluster_scripts/cifar10/sbatch_scripts/evaluate_ensembles.sh $alg $m 13 | #echo ${m}-${alg} 14 | #done 15 | for nasalg in ${nasalgs[@]} 16 | do 17 | #scancel -n ${m}-${alg} 18 | sbatch -J ${m}-${nasalg} -a 1 cluster_scripts/cifar10/sbatch_scripts/evaluate_ensembles.sh $nasalg $m 19 | echo ${m}-${nasalg} 20 | done 21 | done 22 | 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts/train_oneshot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1 3 | #SBATCH -o ./cluster_logs/nes_rs_oneshot/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_rs_oneshot/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J rsws # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | #source venv/bin/activate 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/randomNAS_release/searchers/random_weight_share.py --save_dir nes/randomNAS_release/oneshot_model 19 | 20 | # Done 21 | echo "DONE" 22 | echo "Finished at $(date)" 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts_hyper/deepens_darts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-14 3 | #SBATCH -c 4 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 6 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 7 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J deepens-darts # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source activate python36 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_rs.py \ 21 | --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments_hyper/cifar10/baselearners/deepens_darts/" \ 23 | --dataset cifar10 --num_epochs 100 --scheme deepens_darts --global_seed 1 \ 24 | --lr 0.02350287483028898 --wd 0.002570114417927049 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts_hyper/ensembles_from_pools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH -a 1 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J ens_from_pool # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Activate virtual environment 9 | source activate python36 10 | 11 | PYTHONPATH=$PWD python nes/ensemble_selection/ensembles_from_pools.py \ 12 | --M "$2" \ 13 | --pool_name $1 \ 14 | --save_dir experiments_hyper/cifar10/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 15 | --load_bsls_dir "experiments_hyper/cifar10/baselearners/$1/run_$SLURM_ARRAY_TASK_ID" \ 16 | --dataset cifar10 17 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts_hyper/evaluate_ensembles.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH -a 1 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J eval_ens # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Activate virtual environment 9 | source activate python36 10 | 11 | PYTHONPATH=$PWD python nes/ensemble_selection/evaluate_ensembles.py \ 12 | --M "$2" \ 13 | --method "$1" \ 14 | --save_dir experiments_hyper/cifar10/outputs/plotting_data/run_$SLURM_ARRAY_TASK_ID \ 15 | --nes_rs_bsls_dir experiments_hyper/cifar10/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 16 | --load_bsls_dir "experiments_hyper/cifar10/baselearners/$1/run_$SLURM_ARRAY_TASK_ID" \ 17 | --load_ens_chosen_dir experiments_hyper/cifar10/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 18 | --dataset cifar10 19 | 20 | 21 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts_hyper/hyperens.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -c 4 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 6 | #SBATCH -o ./cluster_logs/darts_hyper/%A-%a.o 7 | #SBATCH -e ./cluster_logs/darts_hyper/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J hyperens # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source activate python36 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_rs.py \ 21 | --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments_hyper/cifar10/baselearners/darts_hyper/" \ 23 | --dataset cifar10 --num_epochs 100 --scheme darts_hyper \ 24 | --global_seed 1 --hyperensemble 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts_hyper/joint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -c 4 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 6 | #SBATCH -o ./cluster_logs/joint/%A-%a.o 7 | #SBATCH -e ./cluster_logs/joint/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J hyperens # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source activate python36 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_rs.py \ 21 | --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments_hyper/cifar10/baselearners/joint/" \ 23 | --dataset cifar10 --num_epochs 100 --scheme joint \ 24 | --global_seed 1 --hyperensemble --nes_cell 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts_hyper/nes_cell.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -c 4 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 6 | #SBATCH -o ./cluster_logs/nes_rs/%A-%a.o 7 | #SBATCH -e ./cluster_logs/nes_rs/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J nes-rs # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source activate python36 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_rs.py \ 21 | --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments_hyper/cifar10/baselearners/nes_rs/" \ 23 | --dataset cifar10 --num_epochs 100 --scheme nes_rs \ 24 | --global_seed 1 --nes_cell \ 25 | --lr 0.02350287483028898 --wd 0.002570114417927049 26 | 27 | # Done 28 | echo "DONE" 29 | echo "Finished at $(date)" 30 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts_hyper/nes_depth_width.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -c 4 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 6 | #SBATCH -o ./cluster_logs/darts_rs/%A-%a.o 7 | #SBATCH -e ./cluster_logs/darts_rs/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J nes-d-w # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source activate python36 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_rs.py \ 21 | --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments_hyper/cifar10/baselearners/darts_rs/" \ 23 | --dataset cifar10 --num_epochs 100 --scheme darts_rs \ 24 | --global_seed 1 --nes_depth_width \ 25 | --lr 0.02350287483028898 --wd 0.002570114417927049 26 | 27 | # Done 28 | echo "DONE" 29 | echo "Finished at $(date)" 30 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts_hyper/nes_re.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-20 3 | #SBATCH -o ./cluster_logs/nes_re/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_re/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-re # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | source activate python36 14 | 15 | # Arrayjob 16 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_re.py --array_id \ 17 | $SLURM_ARRAY_TASK_ID --total_num_workers=20 \ 18 | --num_iterations 400 --num_epochs 100 --population_size 50 --sample_size 10 \ 19 | --nic_name eth0 --working_directory experiments_hyper/cifar10/baselearners/nes_re \ 20 | --global_seed 1 --scheme nes_re --dataset cifar10 \ 21 | --lr 0.02350287483028898 --wd 0.002570114417927049 22 | 23 | # Done 24 | echo "DONE" 25 | echo "Finished at $(date)" 26 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts_hyper_2/deepens_darts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-14 3 | #SBATCH -c 4 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 6 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 7 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J deepens-darts # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source activate python36 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_rs.py \ 21 | --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments_hyper/cifar10/baselearners/deepens_darts/" \ 23 | --dataset cifar10 --num_epochs 100 --scheme deepens_darts --global_seed 1 \ 24 | --lr 0.02350287483028898 --wd 0.002570114417927049 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts_hyper_2/ensembles_from_pools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH -a 1 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J ens_from_pool # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Activate virtual environment 9 | source activate python36 10 | 11 | PYTHONPATH=$PWD python nes/ensemble_selection/ensembles_from_pools.py \ 12 | --M "$2" \ 13 | --pool_name $1 \ 14 | --save_dir experiments_hyper/cifar10/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 15 | --load_bsls_dir "experiments_hyper/cifar10/baselearners/$1/run_$SLURM_ARRAY_TASK_ID" \ 16 | --dataset cifar10 17 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts_hyper_2/evaluate_ensembles.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH -a 1 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J eval_ens # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Activate virtual environment 9 | source activate python36 10 | 11 | PYTHONPATH=$PWD python nes/ensemble_selection/evaluate_ensembles.py \ 12 | --M "$2" \ 13 | --method "$1" \ 14 | --save_dir experiments_hyper/cifar10/outputs/plotting_data/run_$SLURM_ARRAY_TASK_ID \ 15 | --nes_rs_bsls_dir experiments_hyper/cifar10/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 16 | --load_bsls_dir "experiments_hyper/cifar10/baselearners/$1/run_$SLURM_ARRAY_TASK_ID" \ 17 | --load_ens_chosen_dir experiments_hyper/cifar10/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 18 | --dataset cifar10 19 | 20 | 21 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts_hyper_2/hyperens.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-199 3 | #SBATCH -c 4 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 6 | #SBATCH -o ./cluster_logs/darts_hyper/%A-%a.o 7 | #SBATCH -e ./cluster_logs/darts_hyper/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J hyperens # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source activate python36 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_rs_2.py \ 21 | --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments_hyper_2/cifar10/baselearners/darts_hyper/" \ 23 | --dataset cifar10 --num_epochs 100 --scheme darts_hyper \ 24 | --global_seed 1 --hyperensemble 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts_hyper_2/joint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-199 3 | #SBATCH -c 4 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 6 | #SBATCH -o ./cluster_logs/joint/%A-%a.o 7 | #SBATCH -e ./cluster_logs/joint/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J hyperens # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source activate python36 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_rs_2.py \ 21 | --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments_hyper_2/cifar10/baselearners/joint/" \ 23 | --dataset cifar10 --num_epochs 100 --scheme joint \ 24 | --global_seed 1 --hyperensemble --nes_cell 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts_hyper_2/nes_cell.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -c 4 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 6 | #SBATCH -o ./cluster_logs/nes_rs/%A-%a.o 7 | #SBATCH -e ./cluster_logs/nes_rs/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J nes-rs # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source activate python36 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_rs.py \ 21 | --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments_hyper/cifar10/baselearners/nes_rs/" \ 23 | --dataset cifar10 --num_epochs 100 --scheme nes_rs \ 24 | --global_seed 1 --nes_cell \ 25 | --lr 0.02350287483028898 --wd 0.002570114417927049 26 | 27 | # Done 28 | echo "DONE" 29 | echo "Finished at $(date)" 30 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts_hyper_2/nes_depth_width.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -c 4 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 6 | #SBATCH -o ./cluster_logs/darts_rs/%A-%a.o 7 | #SBATCH -e ./cluster_logs/darts_rs/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J nes-d-w # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source activate python36 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_rs.py \ 21 | --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments_hyper/cifar10/baselearners/darts_rs/" \ 23 | --dataset cifar10 --num_epochs 100 --scheme darts_rs \ 24 | --global_seed 1 --nes_depth_width \ 25 | --lr 0.02350287483028898 --wd 0.002570114417927049 26 | 27 | # Done 28 | echo "DONE" 29 | echo "Finished at $(date)" 30 | -------------------------------------------------------------------------------- /cluster_scripts/darts/cifar10_nips21/sbatch_scripts_hyper_2/nes_re.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-20 3 | #SBATCH -o ./cluster_logs/nes_re/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_re/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-re # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | source activate python36 14 | 15 | # Arrayjob 16 | PYTHONPATH=$PWD python nes/optimizers/scripts/darts_re.py --array_id \ 17 | $SLURM_ARRAY_TASK_ID --total_num_workers=20 \ 18 | --num_iterations 400 --num_epochs 100 --population_size 50 --sample_size 10 \ 19 | --nic_name eth0 --working_directory experiments_hyper/cifar10/baselearners/nes_re \ 20 | --global_seed 1 --scheme nes_re --dataset cifar10 \ 21 | --lr 0.02350287483028898 --wd 0.002570114417927049 22 | 23 | # Done 24 | echo "DONE" 25 | echo "Finished at $(date)" 26 | -------------------------------------------------------------------------------- /cluster_scripts/darts/fmnist/plot_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # source config 4 | . cluster_scripts/launcher.config 5 | 6 | # Activate virtual environment 7 | source venv/bin/activate 8 | 9 | PYTHONPATH=. python nes/ensemble_selection/plot_data.py \ 10 | --Ms "${ens_sizes[@]}" \ 11 | --methods nes_rs nes_re deepens_darts deepens_amoebanet deepens_rs \ 12 | --save_dir experiments/fmnist/outputs/plots \ 13 | --load_plotting_data_dir experiments/fmnist/outputs/plotting_data \ 14 | --dataset fmnist 15 | -------------------------------------------------------------------------------- /cluster_scripts/darts/fmnist/sbatch_scripts/deepens_amoeba.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_amoebanet/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_amoebanet/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-amoebanet # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments/fmnist/baselearners/deepens_amoebanet/" --dataset fmnist --num_epochs 15 --scheme deepens_amoebanet --train_amoebanet 18 | 19 | # Done 20 | echo "DONE" 21 | echo "Finished at $(date)" 22 | -------------------------------------------------------------------------------- /cluster_scripts/darts/fmnist/sbatch_scripts/deepens_darts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-darts # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments/fmnist/baselearners/deepens_darts/" --dataset fmnist --num_epochs 15 --scheme deepens_darts --train_darts 18 | 19 | # Done 20 | echo "DONE" 21 | echo "Finished at $(date)" 22 | -------------------------------------------------------------------------------- /cluster_scripts/darts/fmnist/sbatch_scripts/deepens_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-rs # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --arch_id $1 --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments/fmnist/baselearners/deepens_rs/" --dataset fmnist --num_epochs 15 --scheme deepens_rs --arch_path "experiments/fmnist/baselearners/nes_rs/random_archs" 18 | 19 | # Done 20 | echo "DONE" 21 | echo "Finished at $(date)" 22 | -------------------------------------------------------------------------------- /cluster_scripts/darts/fmnist/sbatch_scripts/ensembles_from_pools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH --gres=gpu:1 # reserves GPUs 5 | #SBATCH -J ens_from_pool # sets the job name. If not specified, the file name will be used as job name 6 | #SBATCH -a 0-5 # should be 2 x (number of ensemble sizes, i.e. length of ens_sizes in launcher.config) - 1 7 | 8 | # Activate virtual environment 9 | source venv/bin/activate 10 | 11 | # mapping from slurm task ID to parameters for python call. 12 | . cluster_scripts/launcher.config 13 | IFS=',' grid=( $(eval echo {"${ens_sizes[*]}"}+{"${pools[*]}"}) ) 14 | IFS=' ' read -r -a arr <<< "${grid[*]}" 15 | IFS=+ read M pool_name <<< "${arr[$SLURM_ARRAY_TASK_ID]}" 16 | 17 | PYTHONPATH=$PWD python nes/ensemble_selection/ensembles_from_pools.py \ 18 | --M "$M" \ 19 | --pool_name "$pool_name" \ 20 | --save_dir experiments/fmnist/ensembles_selected/ \ 21 | --load_bsls_dir "experiments/fmnist/baselearners/$pool_name" \ 22 | --dataset fmnist 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/fmnist/sbatch_scripts/evaluate_ensembles.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH --gres=gpu:1 # reserves GPUs 5 | #SBATCH -J eval_ens # sets the job name. If not specified, the file name will be used as job name 6 | #SBATCH -a 0-14 # should be 5 x (number of ensemble sizes, i.e. length of ens_sizes in launcher.config) - 1 7 | 8 | # Activate virtual environment 9 | source venv/bin/activate 10 | 11 | # mapping from slurm task ID to parameters for python call. 12 | . cluster_scripts/launcher.config 13 | IFS=',' grid=( $(eval echo {"${ens_sizes[*]}"}+{"${methods[*]}"}) ) 14 | IFS=' ' read -r -a arr <<< "${grid[*]}" 15 | IFS=+ read M method <<< "${arr[$SLURM_ARRAY_TASK_ID]}" 16 | 17 | PYTHONPATH=$PWD python nes/ensemble_selection/evaluate_ensembles.py \ 18 | --M "$M" \ 19 | --method "$method" \ 20 | --save_dir experiments/fmnist/outputs/plotting_data/ \ 21 | --nes_rs_bsls_dir experiments/fmnist/baselearners/nes_rs/ \ 22 | --incumbents_dir experiments/fmnist/outputs/deepens_rs/incumbents.txt \ 23 | --load_bsls_dir "experiments/fmnist/baselearners/$method" \ 24 | --load_ens_chosen_dir experiments/fmnist/ensembles_selected/ \ 25 | --dataset fmnist 26 | -------------------------------------------------------------------------------- /cluster_scripts/darts/fmnist/sbatch_scripts/get_incumbents_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 3 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 4 | #SBATCH --gres=gpu:1 # reserves GPUs 5 | #SBATCH -J get_incumbents_rs # sets the job name. If not specified, the file name will be used as job name 6 | 7 | # Info 8 | echo "Workingdir: $PWD" 9 | echo "Started at $(date)" 10 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 11 | 12 | # Activate virtual environment 13 | source venv/bin/activate 14 | 15 | # Arrayjob 16 | PYTHONPATH=$PWD python nes/ensemble_selection/rs_incumbents.py \ 17 | --save_dir experiments/fmnist/outputs/deepens_rs/ \ 18 | --load_bsls_dir experiments/fmnist/baselearners/nes_rs \ 19 | --pool_name nes_rs \ 20 | --dataset fmnist 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/darts/fmnist/sbatch_scripts/nes_re.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-20 3 | #SBATCH -o ./cluster_logs/nes_re/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_re/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-re # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/run_nes_re.py --array_id \ 18 | $SLURM_ARRAY_TASK_ID --total_num_workers=20 \ 19 | --num_iterations 400 --num_epochs 15 --population_size 50 --sample_size 10 \ 20 | --nic_name eth0 --working_directory experiments/fmnist/baselearners/nes_re \ 21 | --seed 1 --scheme nes_re --dataset fmnist 22 | 23 | # Done 24 | echo "DONE" 25 | echo "Finished at $(date)" 26 | -------------------------------------------------------------------------------- /cluster_scripts/darts/fmnist/sbatch_scripts/nes_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -o ./cluster_logs/nes_rs/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_rs/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-rs # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/optimizers/scripts/run_nes_rs.py \ 18 | --working_directory=experiments/fmnist/baselearners/nes_rs --arch_id \ 19 | $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID --dataset fmnist \ 20 | --num_epochs 15 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny/plot_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # source config 4 | #. cluster_scripts/launcher.config 5 | 6 | # Activate virtual environment 7 | #source activate python36 8 | 9 | PYTHONPATH=. python nes/ensemble_selection/make_plot_master.py \ 10 | --Ms 2 3 5 7 10 15 \ 11 | --methods nes_rs nes_re deepens_rs deepens_darts deepens_amoebanet deepens_darts_anchor \ 12 | --save_dir experiments/tiny/outputs/plots-new \ 13 | --load_plotting_data_dir experiments/tiny/outputs/plotting_data \ 14 | --dataset tiny \ 15 | --run run_1 run_2 run_3 run_4 run_5 \ 16 | --plot_type ensemble_size 17 | #PYTHONPATH=. python nes/ensemble_selection/plot_data.py \ 18 | #--Ms 2 3 5 7 10 15 \ 19 | #--methods nes_rs nes_re deepens_rs deepens_darts deepens_amoebanet darts_esa amoebanet_esa nes_rs_esa\ 20 | #--save_dir experiments/tiny/outputs/plots \ 21 | #--load_plotting_data_dir experiments/tiny/outputs/plotting_data \ 22 | #--dataset tiny \ 23 | #--run run_1 run_2 run_3 run_4 run_5 24 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny/sbatch_scripts/amoeba_esa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 3 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 4 | #SBATCH -a 0-399 5 | #SBATCH -c 4 6 | #SBATCH -o ./cluster_logs/deepens_amoebanet/%A-%a.o 7 | #SBATCH -e ./cluster_logs/deepens_amoebanet/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J deepens-amoebanet # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source venv/bin/activate 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 21 | --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments/tiny/baselearners/amoebanet_esa/" \ 23 | --dataset tiny --num_epochs 100 --scheme amoebanet_esa \ 24 | --train_amoebanet --global_seed 1 --batch_size 128 \ 25 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine 26 | 27 | 28 | # Done 29 | echo "DONE" 30 | echo "Finished at $(date)" 31 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny/sbatch_scripts/anchor_hyper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 10 #0-100 3 | #SBATCH -c 4 4 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J anch_hyper # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 19 | --seed_id $1 --arch_id $1 \ 20 | --working_directory "experiments-anchor/tiny_2" \ 21 | --dataset tiny --num_epochs 100 --scheme deepens_darts \ 22 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine \ 23 | --train_darts --global_seed $1 --batch_size 128 --anchor \ 24 | --lr 0.025 --wd 0.0 --anch_coeff $2 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny/sbatch_scripts/darts_esa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 3 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 4 | #SBATCH -a 0-399 5 | #SBATCH -c 4 6 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 7 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J darts-esa # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source venv/bin/activate 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 21 | --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments/tiny/baselearners/darts_esa/" \ 23 | --dataset tiny --num_epochs 100 --scheme darts_esa \ 24 | --train_darts --global_seed 1 --batch_size 128 \ 25 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine 26 | 27 | 28 | # Done 29 | echo "DONE" 30 | echo "Finished at $(date)" 31 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny/sbatch_scripts/deepens_amoeba.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -c 4 4 | #SBATCH -o ./cluster_logs/deepens_amoebanet/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_amoebanet/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J deepens-amoebanet # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | source venv/bin/activate 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 19 | --seed_id $SLURM_ARRAY_TASK_ID \ 20 | --working_directory "experiments/tiny/baselearners/deepens_amoebanet/" \ 21 | --dataset tiny --num_epochs 100 --scheme deepens_amoebanet \ 22 | --train_amoebanet --global_seed 1 --batch_size 128 \ 23 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine 24 | 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny/sbatch_scripts/deepens_darts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -c 4 4 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J deepens-darts # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | source venv/bin/activate 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 19 | --seed_id $SLURM_ARRAY_TASK_ID \ 20 | --working_directory "experiments/tiny/baselearners/deepens_darts/" \ 21 | --dataset tiny --num_epochs 100 --scheme deepens_darts \ 22 | --train_darts --global_seed 1 --batch_size 128 \ 23 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine 24 | 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny/sbatch_scripts/deepens_darts_anchor.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-15 3 | #SBATCH -c 4 4 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J anch_deepens-darts # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 19 | --seed_id $SLURM_ARRAY_TASK_ID \ 20 | --working_directory "experiments/tiny/baselearners/deepens_darts_anchor/" \ 21 | --dataset tiny --num_epochs 100 --scheme deepens_darts_anchor \ 22 | --train_darts --global_seed 1 --batch_size 128 \ 23 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine \ 24 | --anchor --anch_coeff 0.1 --wd 0.0 25 | 26 | 27 | # Done 28 | echo "DONE" 29 | echo "Finished at $(date)" 30 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny/sbatch_scripts/ensembles_from_pools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH -p ml_gpu-rtx2080 5 | #SBATCH -a 1 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J ens_from_pool # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Activate virtual environment 10 | #source venv/bin/activate 11 | source activate python36 12 | 13 | # mapping from slurm task ID to parameters for python call. 14 | #. cluster_scripts/launcher.config 15 | #IFS=',' grid=( $(eval echo {"${ens_sizes[*]}"}+{"${pools[*]}"}) ) 16 | #IFS=' ' read -r -a arr <<< "${grid[*]}" 17 | #IFS=+ read M pool_name <<< "${arr[$SLURM_ARRAY_TASK_ID]}" 18 | 19 | if [ "$1" = "nes_rs_esa" ]; then 20 | PYTHONPATH=$PWD python nes/ensemble_selection/ensembles_from_pools.py \ 21 | --M "$2" \ 22 | --pool_name $1 \ 23 | --save_dir experiments/tiny/ensembles_selected/run_3 \ 24 | --load_bsls_dir "experiments/tiny/baselearners/$1/run_3" \ 25 | --dataset tiny 26 | else 27 | PYTHONPATH=$PWD python nes/ensemble_selection/ensembles_from_pools.py \ 28 | --M "$2" \ 29 | --pool_name $1 \ 30 | --save_dir experiments/tiny/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 31 | --load_bsls_dir "experiments/tiny/baselearners/$1/run_$SLURM_ARRAY_TASK_ID" \ 32 | --dataset tiny 33 | fi 34 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny/sbatch_scripts/evaluate_ensembles_nas.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH --gres=gpu:1 # reserves GPUs 5 | # -a 0-14 # should be 5 x (number of ensemble sizes, i.e. length of ens_sizes in launcher.config) - 1 6 | 7 | # Activate virtual environment 8 | source activate python37 9 | 10 | # mapping from slurm task ID to parameters for python call. 11 | # . cluster_scripts/launcher.config 12 | # IFS=',' grid=( $(eval echo {"${ens_sizes[*]}"}+{"${methods[*]}"}) ) 13 | # IFS=' ' read -r -a arr <<< "${grid[*]}" 14 | # IFS=+ read M method <<< "${arr[$SLURM_ARRAY_TASK_ID]}" 15 | 16 | PYTHONPATH=$PWD python nes/ensemble_selection/evaluate_ensembles.py \ 17 | --M "$2" \ 18 | --method $1 \ 19 | --save_dir experiments/tiny/outputs/plotting_data/run_$SLURM_ARRAY_TASK_ID \ 20 | --nes_rs_bsls_dir experiments/tiny/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 21 | --incumbents_dir experiments/tiny/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID/incumbents.txt \ 22 | --load_bsls_dir "experiments/tiny/baselearners/$1/run_$SLURM_ARRAY_TASK_ID" \ 23 | --load_ens_chosen_dir experiments/tiny/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 24 | --dataset tiny 25 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny/sbatch_scripts/get_incumbents_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 3 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu37,dlcgpu26 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -a 4-5 8 | #SBATCH -J get_incumbents_rs # sets the job name. If not specified, the file name will be used as job name 9 | 10 | # Info 11 | echo "Workingdir: $PWD" 12 | echo "Started at $(date)" 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 14 | 15 | # Activate virtual environment 16 | source venv/bin/activate 17 | 18 | # Arrayjob 19 | PYTHONPATH=$PWD python nes/ensemble_selection/rs_incumbents.py \ 20 | --save_dir experiments/tiny/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID \ 21 | --load_bsls_dir experiments/tiny/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 22 | --pool_name nes_rs \ 23 | --dataset tiny 24 | 25 | # Done 26 | echo "DONE" 27 | echo "Finished at $(date)" 28 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny/sbatch_scripts/nes_re.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-20 3 | #SBATCH -c 4 4 | #SBATCH -o ./cluster_logs/nes_re/%A-%a.o 5 | #SBATCH -e ./cluster_logs/nes_re/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J imn-nes-re # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION, on gpu $SLURMD_NODENAME" 13 | 14 | # Activate virtual environment 15 | source venv/bin/activate 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/run_nes_re.py --array_id \ 19 | $SLURM_ARRAY_TASK_ID --total_num_workers=20 \ 20 | --num_iterations 200 --batch_size 128 --num_epochs 100 --population_size 50 --sample_size 10 \ 21 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine \ 22 | --nic_name eth0 --working_directory experiments/tiny/baselearners/nes_re \ 23 | --global_seed $1 --scheme nes_re --dataset tiny 24 | 25 | # Done 26 | echo "DONE" 27 | echo "Finished at $(date)" 28 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny/sbatch_scripts/nes_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-199%100 3 | #SBATCH -p alldlc_gpu-rtx2080 4 | #SBATCH -x dlcgpu37,dlcgpu26 5 | #SBATCH -c 4 6 | #SBATCH -o ./cluster_logs/nes_rs/%A-%a.o 7 | #SBATCH -e ./cluster_logs/nes_rs/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J imn-nes-rs # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION, on gpu $SLURMD_NODENAME" 15 | 16 | # Activate virtual environment 17 | source venv/bin/activate 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/run_nes_rs.py --working_directory=experiments/tiny/baselearners/nes_rs --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID --dataset tiny --num_epochs 100 --batch_size 128 --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine --global_seed $1 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny/sbatch_scripts/nes_rs_esa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-199 3 | #SBATCH -p bosch_gpu-rtx2080,ml_gpu-rtx2080 4 | #SBATCH -c 4 5 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 6 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 7 | #SBATCH --gres=gpu:1 # reserves GPUs 8 | #SBATCH -J nes-rs-esa # sets the job name. If not specified, the file name will be used as job name 9 | 10 | # Info 11 | echo "Workingdir: $PWD" 12 | echo "Started at $(date)" 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION, on gpu $SLURMD_NODENAME" 14 | 15 | # run this using the following loop: 16 | # for arch_id in $(cat < experiments/tiny/outputs/deepens_rs/run_1/incumbents.txt); do sbatch -p alldlc_gpu-rtx2080 cluster_scripts/tiny/sbatch_scripts/deepens_rs.sh $arch_id 1; done 17 | 18 | # Activate virtual environment 19 | source activate python36 20 | 21 | # Arrayjob 22 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py --arch_id 7 --seed_id $SLURM_ARRAY_TASK_ID --working_directory "experiments/tiny/baselearners/nes_rs_esa/" --dataset tiny --num_epochs 100 --scheme nes_rs_esa --arch_path "experiments/tiny/baselearners/nes_rs/run_3/random_archs" --global_seed 3 --batch_size 128 --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine 23 | 24 | 25 | # Done 26 | echo "DONE" 27 | echo "Finished at $(date)" 28 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny/sbatch_scripts/submit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | algs=(nes_rs nes_re deepens_rs) 4 | nasalgs=(deepens_darts deepens_amoebanet deepens_darts_anchor) 5 | 6 | #for m in {2,3,5,7,10,15} 7 | #do 8 | #for alg in ${algs[@]} 9 | #do 10 | ##scancel -n ${m}-${alg} 11 | #sbatch --bosch -J ${m}-${alg} -a 1-5 cluster_scripts/tiny/sbatch_scripts/evaluate_ensembles.sh $alg $m 12 | #echo ${m}-${alg} 13 | #done 14 | #for nasalg in ${nasalgs[@]} 15 | #do 16 | ##scancel -n ${m}-${alg} 17 | #sbatch --bosch -J ${m}-${nasalg} -a 1 cluster_scripts/tiny/sbatch_scripts/evaluate_ensembles.sh $nasalg $m 18 | #echo ${m}-${nasalg} 19 | #done 20 | #done 21 | 22 | for m in {2,3,5,7,10,15} 23 | do 24 | sbatch -J ${m}-dartsesa -a 1 -p alldlc_gpu-rtx2080 cluster_scripts/tiny/sbatch_scripts/evaluate_ensembles_nas.sh darts_esa $m 25 | sbatch -J ${m}-amoebaesa -a 1 -p alldlc_gpu-rtx2080 cluster_scripts/tiny/sbatch_scripts/evaluate_ensembles_nas.sh amoebanet_esa $m 26 | sbatch -J ${m}-dartsesa -a 3 -p alldlc_gpu-rtx2080 cluster_scripts/tiny/sbatch_scripts/evaluate_ensembles_nas.sh nes_rs_esa $m 27 | done 28 | 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny/start_grid.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | scheduler="cosine step" 4 | layers="8 11 14" 5 | channels="16 36 48" 6 | lrs=$(awk 'BEGIN{for(i=0.025;i<=0.1;i*=2)print i}') 7 | 8 | 9 | for sch in $scheduler; do 10 | for l in $layers; do 11 | for c in $channels; do 12 | for lr in $lrs; do 13 | sbatch cluster_scripts/tiny/eval_clip.sh $sch $l $c $lr 14 | echo submmited job $sch $l $c $lr 15 | done 16 | done 17 | done 18 | done 19 | 20 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny_nips21/plot_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # source config 4 | #. cluster_scripts/launcher.config 5 | 6 | # Activate virtual environment 7 | #source activate python36 8 | 9 | PYTHONPATH=. python nes/ensemble_selection/make_plot_esas.py \ 10 | --Ms 2 3 5 7 10 15 \ 11 | --methods nes_rs nes_re deepens_rs deepens_darts deepens_amoebanet darts_esa amoebanet_esa nes_rs_esa\ 12 | --save_dir experiments-nips21/tiny/outputs/plots/${1} \ 13 | --load_plotting_data_dir experiments-nips21/tiny/outputs/plotting_data \ 14 | --dataset tiny \ 15 | --run run_1 run_2 run_3 \ 16 | --esa $1 17 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny_nips21/sbatch_scripts/amoeba_esa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 3 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 4 | #SBATCH -a 0-399 5 | #SBATCH -c 4 6 | #SBATCH -o ./cluster_logs/deepens_amoebanet/%A-%a.o 7 | #SBATCH -e ./cluster_logs/deepens_amoebanet/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J deepens-amoebanet # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source venv/bin/activate 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 21 | --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments/tiny/baselearners/amoebanet_esa/" \ 23 | --dataset tiny --num_epochs 100 --scheme amoebanet_esa \ 24 | --train_amoebanet --global_seed 1 --batch_size 128 \ 25 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine 26 | 27 | 28 | # Done 29 | echo "DONE" 30 | echo "Finished at $(date)" 31 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny_nips21/sbatch_scripts/anchor_hyper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 10 #0-100 3 | #SBATCH -c 4 4 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J anch_hyper # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 19 | --seed_id $1 --arch_id $1 \ 20 | --working_directory "experiments-anchor/tiny_2" \ 21 | --dataset tiny --num_epochs 100 --scheme deepens_darts \ 22 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine \ 23 | --train_darts --global_seed $1 --batch_size 128 --anchor \ 24 | --lr 0.025 --wd 0.0 --anch_coeff $2 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny_nips21/sbatch_scripts/darts_esa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 3 | #SBATCH -x dlcgpu05,dlcgpu26,dlcgpu37,dlcgpu15 4 | #SBATCH -a 0-399 5 | #SBATCH -c 4 6 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 7 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 8 | #SBATCH --gres=gpu:1 # reserves GPUs 9 | #SBATCH -J darts-esa # sets the job name. If not specified, the file name will be used as job name 10 | 11 | # Info 12 | echo "Workingdir: $PWD" 13 | echo "Started at $(date)" 14 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 15 | 16 | # Activate virtual environment 17 | source venv/bin/activate 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 21 | --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory "experiments/tiny/baselearners/darts_esa/" \ 23 | --dataset tiny --num_epochs 100 --scheme darts_esa \ 24 | --train_darts --global_seed 1 --batch_size 128 \ 25 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine 26 | 27 | 28 | # Done 29 | echo "DONE" 30 | echo "Finished at $(date)" 31 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny_nips21/sbatch_scripts/deepens_amoeba.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -c 4 4 | #SBATCH -o ./cluster_logs/deepens_amoebanet/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_amoebanet/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J deepens-amoebanet # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | source venv/bin/activate 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 19 | --seed_id $SLURM_ARRAY_TASK_ID \ 20 | --working_directory "experiments/tiny/baselearners/deepens_amoebanet/" \ 21 | --dataset tiny --num_epochs 100 --scheme deepens_amoebanet \ 22 | --train_amoebanet --global_seed 1 --batch_size 128 \ 23 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine 24 | 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny_nips21/sbatch_scripts/deepens_darts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-29 3 | #SBATCH -c 4 4 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J deepens-darts # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | source venv/bin/activate 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 19 | --seed_id $SLURM_ARRAY_TASK_ID \ 20 | --working_directory "experiments/tiny/baselearners/deepens_darts/" \ 21 | --dataset tiny --num_epochs 100 --scheme deepens_darts \ 22 | --train_darts --global_seed 1 --batch_size 128 \ 23 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine 24 | 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny_nips21/sbatch_scripts/deepens_darts_anchor.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-15 3 | #SBATCH -c 4 4 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 5 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J anch_deepens-darts # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 13 | 14 | # Activate virtual environment 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/train_deepens_baselearner.py \ 19 | --seed_id $SLURM_ARRAY_TASK_ID \ 20 | --working_directory "experiments/tiny/baselearners/deepens_darts_anchor/" \ 21 | --dataset tiny --num_epochs 100 --scheme deepens_darts_anchor \ 22 | --train_darts --global_seed 1 --batch_size 128 \ 23 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine \ 24 | --anchor --anch_coeff 0.1 --wd 0.0 25 | 26 | 27 | # Done 28 | echo "DONE" 29 | echo "Finished at $(date)" 30 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny_nips21/sbatch_scripts/ensembles_from_pools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH -p bosch_gpu-rtx2080 5 | #SBATCH -a 1-3 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | 8 | # Activate virtual environment 9 | source activate python37 10 | 11 | PYTHONPATH=$PWD python nes/ensemble_selection/ensembles_from_pools.py \ 12 | --M $2 \ 13 | --pool_name $1 \ 14 | --save_dir experiments-nips21/tiny/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 15 | --load_bsls_dir "experiments/tiny/baselearners/$1/run_$SLURM_ARRAY_TASK_ID" \ 16 | --dataset tiny \ 17 | --esa $3 18 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny_nips21/sbatch_scripts/evaluate_ensembles.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%x.%A-%a.%N.o 3 | #SBATCH -e ./cluster_logs/evaluate/%x.%A-%a.%N.e 4 | #SBATCH -p bosch_gpu-rtx2080 5 | #SBATCH -a 1-3 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | 8 | # Activate virtual environment 9 | source activate python37 10 | 11 | PYTHONPATH=$PWD python nes/ensemble_selection/evaluate_ensembles.py \ 12 | --M $2 \ 13 | --method $1 \ 14 | --save_dir experiments-nips21/tiny/outputs/plotting_data/run_$SLURM_ARRAY_TASK_ID \ 15 | --nes_rs_bsls_dir experiments/tiny/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 16 | --incumbents_dir experiments-nips21/tiny/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID/incumbents.txt \ 17 | --load_bsls_dir "experiments/tiny/baselearners/$1/run_$SLURM_ARRAY_TASK_ID" \ 18 | --load_ens_chosen_dir experiments-nips21/tiny/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 19 | --dataset tiny \ 20 | --esa $3 21 | 22 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny_nips21/sbatch_scripts/evaluate_ensembles_nas.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH --gres=gpu:1 # reserves GPUs 5 | # -a 0-14 # should be 5 x (number of ensemble sizes, i.e. length of ens_sizes in launcher.config) - 1 6 | 7 | # Activate virtual environment 8 | source activate python37 9 | 10 | # mapping from slurm task ID to parameters for python call. 11 | # . cluster_scripts/launcher.config 12 | # IFS=',' grid=( $(eval echo {"${ens_sizes[*]}"}+{"${methods[*]}"}) ) 13 | # IFS=' ' read -r -a arr <<< "${grid[*]}" 14 | # IFS=+ read M method <<< "${arr[$SLURM_ARRAY_TASK_ID]}" 15 | 16 | PYTHONPATH=$PWD python nes/ensemble_selection/evaluate_ensembles.py \ 17 | --M "$2" \ 18 | --method $1 \ 19 | --save_dir experiments/tiny/outputs/plotting_data/run_$SLURM_ARRAY_TASK_ID \ 20 | --nes_rs_bsls_dir experiments/tiny/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 21 | --incumbents_dir experiments/tiny/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID/incumbents.txt \ 22 | --load_bsls_dir "experiments/tiny/baselearners/$1/run_$SLURM_ARRAY_TASK_ID" \ 23 | --load_ens_chosen_dir experiments/tiny/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 24 | --dataset tiny 25 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny_nips21/sbatch_scripts/get_incumbents_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 3 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 4 | #SBATCH -p alldlc_gpu-rtx2080 5 | #SBATCH -x dlcgpu37,dlcgpu26 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -a 4-5 8 | #SBATCH -J get_incumbents_rs # sets the job name. If not specified, the file name will be used as job name 9 | 10 | # Info 11 | echo "Workingdir: $PWD" 12 | echo "Started at $(date)" 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 14 | 15 | # Activate virtual environment 16 | source venv/bin/activate 17 | 18 | # Arrayjob 19 | PYTHONPATH=$PWD python nes/ensemble_selection/rs_incumbents.py \ 20 | --save_dir experiments/tiny/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID \ 21 | --load_bsls_dir experiments/tiny/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 22 | --pool_name nes_rs \ 23 | --dataset tiny 24 | 25 | # Done 26 | echo "DONE" 27 | echo "Finished at $(date)" 28 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny_nips21/sbatch_scripts/nes_re.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-20 3 | #SBATCH -c 4 4 | #SBATCH -t 6-00:00 5 | #SBATCH -o ./cluster_logs/nes_re/%A-%a.o 6 | #SBATCH -e ./cluster_logs/nes_re/%A-%a.e 7 | #SBATCH --gres=gpu:1 # reserves GPUs 8 | #SBATCH -J imn-nes-re # sets the job name. If not specified, the file name will be used as job name 9 | 10 | # Info 11 | echo "Workingdir: $PWD" 12 | echo "Started at $(date)" 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION, on gpu $SLURMD_NODENAME" 14 | 15 | # Activate virtual environment 16 | source activate python37 17 | 18 | # Arrayjob 19 | PYTHONPATH=$PWD python nes/optimizers/scripts/run_nes_re.py --array_id \ 20 | $SLURM_ARRAY_TASK_ID --total_num_workers=20 \ 21 | --num_iterations 400 --batch_size 128 --num_epochs 100 --population_size 50 --sample_size 10 \ 22 | --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine \ 23 | --nic_name eth0 --working_directory experiments/tiny/baselearners/nes_re \ 24 | --global_seed $1 --scheme nes_re --dataset tiny 25 | 26 | # Done 27 | echo "DONE" 28 | echo "Finished at $(date)" 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny_nips21/sbatch_scripts/nes_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -c 4 4 | #SBATCH -o ./cluster_logs/nes_rs/%A-%a.o 5 | #SBATCH -e ./cluster_logs/nes_rs/%A-%a.e 6 | #SBATCH --gres=gpu:1 # reserves GPUs 7 | #SBATCH -J nes-rs # sets the job name. If not specified, the file name will be used as job name 8 | 9 | # Info 10 | echo "Workingdir: $PWD" 11 | echo "Started at $(date)" 12 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION, on gpu $SLURMD_NODENAME" 13 | 14 | # Activate virtual environment 15 | source activate python37 16 | 17 | # Arrayjob 18 | PYTHONPATH=$PWD python nes/optimizers/scripts/run_nes_rs.py --working_directory=experiments-nips21/tiny/baselearners/nes_rs --arch_id $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID --dataset tiny --num_epochs 100 --batch_size 128 --n_layers 8 --init_channels 36 --grad_clip --lr 0.1 --scheduler cosine --global_seed $1 19 | 20 | # Done 21 | echo "DONE" 22 | echo "Finished at $(date)" 23 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny_nips21/sbatch_scripts/submit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | algs=(nes_rs nes_re deepens_rs) 4 | nasalgs=(deepens_darts deepens_amoebanet deepens_darts_anchor) 5 | 6 | #for m in {2,3,5,7,10,15} 7 | #do 8 | #for alg in ${algs[@]} 9 | #do 10 | ##scancel -n ${m}-${alg} 11 | #sbatch --bosch -J ${m}-${alg} -a 1-5 cluster_scripts/tiny/sbatch_scripts/evaluate_ensembles.sh $alg $m 12 | #echo ${m}-${alg} 13 | #done 14 | #for nasalg in ${nasalgs[@]} 15 | #do 16 | ##scancel -n ${m}-${alg} 17 | #sbatch --bosch -J ${m}-${nasalg} -a 1 cluster_scripts/tiny/sbatch_scripts/evaluate_ensembles.sh $nasalg $m 18 | #echo ${m}-${nasalg} 19 | #done 20 | #done 21 | 22 | for m in {2,3,5,7,10,15} 23 | do 24 | sbatch -J ${m}-dartsesa -a 1 -p alldlc_gpu-rtx2080 cluster_scripts/tiny/sbatch_scripts/evaluate_ensembles_nas.sh darts_esa $m 25 | sbatch -J ${m}-amoebaesa -a 1 -p alldlc_gpu-rtx2080 cluster_scripts/tiny/sbatch_scripts/evaluate_ensembles_nas.sh amoebanet_esa $m 26 | sbatch -J ${m}-dartsesa -a 3 -p alldlc_gpu-rtx2080 cluster_scripts/tiny/sbatch_scripts/evaluate_ensembles_nas.sh nes_rs_esa $m 27 | done 28 | 29 | -------------------------------------------------------------------------------- /cluster_scripts/darts/tiny_nips21/start_grid.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | scheduler="cosine step" 4 | layers="8 11 14" 5 | channels="16 36 48" 6 | lrs=$(awk 'BEGIN{for(i=0.025;i<=0.1;i*=2)print i}') 7 | 8 | 9 | for sch in $scheduler; do 10 | for l in $layers; do 11 | for c in $channels; do 12 | for lr in $lrs; do 13 | sbatch cluster_scripts/tiny/eval_clip.sh $sch $l $c $lr 14 | echo submmited job $sch $l $c $lr 15 | done 16 | done 17 | done 18 | done 19 | 20 | -------------------------------------------------------------------------------- /cluster_scripts/generate_corrupted.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # submit to the right queue 4 | #SBATCH --gres gpu:1 5 | #SBATCH -a 0-18 6 | # 7 | # redirect the output/error to some files 8 | #SBATCH -o ./cluster_logs/corruption_logs/%A-%a.o 9 | #SBATCH -e ./cluster_logs/corruption_logs/%A-%a.e 10 | # 11 | 12 | source venv/bin/activate 13 | PYTHONPATH=$PWD python data/generate_corrupted.py $SLURM_ARRAY_TASK_ID 14 | -------------------------------------------------------------------------------- /cluster_scripts/launcher.config: -------------------------------------------------------------------------------- 1 | declare -a ens_sizes=("5" "10" "20") 2 | declare -a pools=("nes_rs" "nes_re") 3 | declare -a methods=("nes_rs" "nes_re" "deepens_rs" "deepens_darts" "deepens_amoebanet") 4 | 5 | -------------------------------------------------------------------------------- /cluster_scripts/launchers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/plots/%A-%a.o 3 | #SBATCH -e ./cluster_logs/plots/%A-%a.e 4 | #SBATCH -a 1 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J plotting 7 | 8 | # Activate virtual environment 9 | source activate python36 10 | 11 | PYTHONPATH=. python nes/ensemble_selection/make_plot_master.py \ 12 | --Ms 2 3 5 7 10 15 \ 13 | --methods nes_rs nes_re deepens_darts darts_hyper darts_rs \ 14 | --save_dir experiments_hyper/cifar$1/outputs/plots \ 15 | --load_plotting_data_dir experiments_hyper/cifar$1/outputs/plotting_data \ 16 | --dataset cifar$1 \ 17 | --run run_1 \ 18 | --plot_type $2 19 | 20 | 21 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar10/plot_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # source config 4 | #. cluster_scripts/launcher.config 5 | 6 | # Activate virtual environment 7 | source activate pt1.3 8 | 9 | PYTHONPATH=. python nes/ensemble_selection/plot_data.py \ 10 | --Ms "3" \ 11 | --methods nes_rs deepens_rs deepens_gdas deepens_minimum \ 12 | --save_dir experiments-nb201/cifar10/outputs/plots \ 13 | --load_plotting_data_dir experiments-nb201/cifar10/outputs/plotting_data \ 14 | --dataset cifar10 \ 15 | --run run_1 run_2 run_3 16 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar10/sbatch_scripts/deepens_darts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-2 3 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-darts # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/nasbench201/scripts/train_deepens_baselearner.py \ 18 | --seed_id $SLURM_ARRAY_TASK_ID \ 19 | --working_directory "experiments/nb201/cifar10/baselearners/deepens_darts/" \ 20 | --dataset cifar10 --scheme deepens_darts --train_darts --global_seed 1 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar10/sbatch_scripts/deepens_gdas.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-2 3 | #SBATCH -o ./cluster_logs/deepens_gdas/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_gdas/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-gdas # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/nasbench201/scripts/train_deepens_baselearner.py \ 18 | --seed_id $SLURM_ARRAY_TASK_ID \ 19 | --working_directory "experiments/nb201/cifar10/baselearners/deepens_gdas/" \ 20 | --dataset cifar10 --global_seed 1 --scheme deepens_gdas --train_gdas 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar10/sbatch_scripts/deepens_gm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-2 3 | #SBATCH -o ./cluster_logs/deepens_minima/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_minima/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-gm # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/nasbench201/scripts/train_deepens_baselearner.py \ 18 | --seed_id $SLURM_ARRAY_TASK_ID \ 19 | --working_directory "experiments/nb201/cifar10/baselearners/deepens_minimum/" \ 20 | --dataset cifar10 --global_seed 1 --scheme deepens_minimum --train_global_optima 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar10/sbatch_scripts/deepens_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-2 3 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-rs # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # run this using the following loop: 14 | # for arch_id in $(cat < experiments-nb201/imagenet/outputs/deepens_rs/run_1/incumbents.txt); do sbatch -p ml_gpu-rtx2080 cluster_scripts/nb201_imagenet/sbatch_scripts/deepens_rs.sh $arch_id 1; done 15 | 16 | # Activate virtual environment 17 | source venv/bin/activate 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/nasbench201/scripts/train_deepens_baselearner.py \ 21 | --arch_id $1 --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory experiments/nb201/cifar10/baselearners/deepens_rs/ \ 23 | --dataset cifar10 --scheme deepens_rs \ 24 | --arch_path experiments/nb201/cifar10/baselearners/nes_rs/run_${2}/random_archs \ 25 | --global_seed $2 26 | 27 | # Done 28 | echo "DONE" 29 | echo "Finished at $(date)" 30 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar10/sbatch_scripts/ensembles_from_pools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-3 3 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 4 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J ens_from_pool # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Activate virtual environment 9 | source venv/bin/activate 10 | 11 | PYTHONPATH=$PWD python nes/ensemble_selection/ensembles_from_pools.py \ 12 | --M "3" \ 13 | --pool_name $1 \ 14 | --save_dir experiments/nb201/cifar10/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 15 | --load_bsls_dir experiments/nb201/cifar10/baselearners/$1/run_$SLURM_ARRAY_TASK_ID \ 16 | --dataset cifar10 --device -1 17 | 18 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar10/sbatch_scripts/evaluate_ensembles.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH --gres=gpu:1 # reserves GPUs 5 | #SBATCH -J eval_ens # sets the job name. If not specified, the file name will be used as job name 6 | #SBATCH -a 1-3 7 | 8 | # Activate virtual environment 9 | source venv/bin/activate 10 | 11 | PYTHONPATH=$PWD python nes/ensemble_selection/evaluate_ensembles.py \ 12 | --M "3" \ 13 | --method $1 \ 14 | --save_dir experiments/nb201/cifar10/outputs/plotting_data/run_$SLURM_ARRAY_TASK_ID \ 15 | --nes_rs_bsls_dir experiments/nb201/cifar10/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 16 | --incumbents_dir experiments/nb201/cifar10/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID/incumbents.txt \ 17 | --load_bsls_dir experiments/nb201/cifar10/baselearners/$1/run_$SLURM_ARRAY_TASK_ID \ 18 | --load_ens_chosen_dir experiments/nb201/cifar10/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 19 | --dataset cifar10 20 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar10/sbatch_scripts/get_incumbents_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-3 3 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J get_incumbents_rs # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/ensemble_selection/rs_incumbents.py \ 18 | --save_dir experiments-nb201/cifar10/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID \ 19 | --load_bsls_dir experiments-nb201/cifar10/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 20 | --pool_name nes_rs \ 21 | --dataset cifar10 22 | 23 | # Done 24 | echo "DONE" 25 | echo "Finished at $(date)" 26 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar10/sbatch_scripts/nes_re.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-3 3 | #SBATCH -o ./cluster_logs/nes_re/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_re/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-re # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/nasbench201/scripts/run_nes_re_nb201.py \ 18 | --num_iterations 400 --population_size 50 --sample_size 10 \ 19 | --working_directory experiments/nb201/cifar10/baselearners/nes_re --severity_list "0 5" \ 20 | --global_seed $SLURM_ARRAY_TASK_ID --scheme nes_re --dataset cifar10 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar10/sbatch_scripts/nes_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -o ./cluster_logs/nes_rs/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_rs/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-rs # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/nasbench201/scripts/run_nes_rs.py \ 18 | --working_directory=experiments/nb201/cifar10/baselearners/nes_rs --arch_id \ 19 | $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID --dataset cifar10 \ 20 | --global_seed $1 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar100/plot_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # source config 4 | #. cluster_scripts/launcher.config 5 | 6 | # Activate virtual environment 7 | source activate pt1.3 8 | 9 | PYTHONPATH=. python nes/ensemble_selection/plot_data.py \ 10 | --Ms "3" \ 11 | --methods nes_rs nes_re deepens_rs deepens_gdas deepens_minimum \ 12 | --save_dir experiments-nb201/cifar100/outputs/plots \ 13 | --load_plotting_data_dir experiments-nb201/cifar100/outputs/plotting_data \ 14 | --dataset cifar100 \ 15 | --run run_1 run_2 run_3 16 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar100/sbatch_scripts/deepens_darts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-2 3 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-darts # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/nasbench201/scripts/train_deepens_baselearner.py \ 18 | --seed_id $SLURM_ARRAY_TASK_ID \ 19 | --working_directory "experiments/nb201/cifar100/baselearners/deepens_darts/" \ 20 | --dataset cifar100 --scheme deepens_darts --train_darts --global_seed 1 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar100/sbatch_scripts/deepens_gdas.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-2 3 | #SBATCH -o ./cluster_logs/deepens_gdas/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_gdas/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-gdas # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/nasbench201/scripts/train_deepens_baselearner.py \ 18 | --seed_id $SLURM_ARRAY_TASK_ID \ 19 | --working_directory "experiments/nb201/cifar100/baselearners/deepens_gdas/" \ 20 | --dataset cifar100 --global_seed 1 --scheme deepens_gdas --train_gdas 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar100/sbatch_scripts/deepens_gm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-2 3 | #SBATCH -o ./cluster_logs/deepens_minima/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_minima/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-gm # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/nasbench201/scripts/train_deepens_baselearner.py \ 18 | --seed_id $SLURM_ARRAY_TASK_ID \ 19 | --working_directory "experiments/nb201/cifar100/baselearners/deepens_minimum/" \ 20 | --dataset cifar100 --global_seed 1 --scheme deepens_minimum --train_global_optima 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar100/sbatch_scripts/deepens_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-2 3 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-rs # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # run this using the following loop: 14 | # for arch_id in $(cat < experiments-nb201/imagenet/outputs/deepens_rs/run_1/incumbents.txt); do sbatch -p ml_gpu-rtx2080 cluster_scripts/nb201_imagenet/sbatch_scripts/deepens_rs.sh $arch_id 1; done 15 | 16 | # Activate virtual environment 17 | source venv/bin/activate 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/nasbench201/scripts/train_deepens_baselearner.py \ 21 | --arch_id $1 --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory experiments/nb201/cifar100/baselearners/deepens_rs/ \ 23 | --dataset cifar100 --scheme deepens_rs \ 24 | --arch_path experiments/nb201/cifar100/baselearners/nes_rs/run_${2}/random_archs \ 25 | --global_seed $2 26 | 27 | # Done 28 | echo "DONE" 29 | echo "Finished at $(date)" 30 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar100/sbatch_scripts/ensembles_from_pools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-3 3 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 4 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J ens_from_pool # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Activate virtual environment 9 | source venv/bin/activate 10 | 11 | PYTHONPATH=$PWD python nes/ensemble_selection/ensembles_from_pools.py \ 12 | --M "3" \ 13 | --pool_name $1 \ 14 | --save_dir experiments/nb201/cifar100/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 15 | --load_bsls_dir experiments/nb201/cifar100/baselearners/$1/run_$SLURM_ARRAY_TASK_ID \ 16 | --dataset cifar100 --device -1 17 | 18 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar100/sbatch_scripts/evaluate_ensembles.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH --gres=gpu:1 # reserves GPUs 5 | #SBATCH -J eval_ens # sets the job name. If not specified, the file name will be used as job name 6 | #SBATCH -a 1-3 7 | 8 | # Activate virtual environment 9 | source venv/bin/activate 10 | 11 | PYTHONPATH=$PWD python nes/ensemble_selection/evaluate_ensembles.py \ 12 | --M "3" \ 13 | --method $1 \ 14 | --save_dir experiments/nb201/cifar100/outputs/plotting_data/run_$SLURM_ARRAY_TASK_ID \ 15 | --nes_rs_bsls_dir experiments/nb201/cifar100/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 16 | --incumbents_dir experiments/nb201/cifar100/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID/incumbents.txt \ 17 | --load_bsls_dir experiments/nb201/cifar100/baselearners/$1/run_$SLURM_ARRAY_TASK_ID \ 18 | --load_ens_chosen_dir experiments/nb201/cifar100/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 19 | --dataset cifar100 20 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar100/sbatch_scripts/get_incumbents_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-3 3 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J get_incumbents_rs # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/ensemble_selection/rs_incumbents.py \ 18 | --save_dir experiments-nb201/cifar100/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID \ 19 | --load_bsls_dir experiments-nb201/cifar100/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 20 | --pool_name nes_rs \ 21 | --dataset cifar100 22 | 23 | # Done 24 | echo "DONE" 25 | echo "Finished at $(date)" 26 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar100/sbatch_scripts/nes_re.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-3 3 | #SBATCH -o ./cluster_logs/nes_re/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_re/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-re # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/nasbench201/scripts/run_nes_re_nb201.py \ 18 | --num_iterations 400 --population_size 50 --sample_size 10 \ 19 | --working_directory experiments/nb201/cifar100/baselearners/nes_re --severity_list "0 5" \ 20 | --global_seed $SLURM_ARRAY_TASK_ID --scheme nes_re --dataset cifar100 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/cifar100/sbatch_scripts/nes_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -o ./cluster_logs/nes_rs/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_rs/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-rs # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/nasbench201/scripts/run_nes_rs.py \ 18 | --working_directory=experiments/nb201/cifar100/baselearners/nes_rs --arch_id \ 19 | $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID --dataset cifar100 \ 20 | --global_seed $1 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/imagenet/plot_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # source config 4 | #. cluster_scripts/launcher.config 5 | 6 | # Activate virtual environment 7 | source activate python37 8 | 9 | PYTHONPATH=. python nes/ensemble_selection/plot_data_nb201.py \ 10 | --Ms "3" \ 11 | --methods nes_rs deepens_rs nes_re deepens_minimum deepens_gdas \ 12 | --save_dir experiments-nb201/imagenet/outputs/plots \ 13 | --load_plotting_data_dir experiments-nb201/imagenet/outputs/plotting_data \ 14 | --dataset imagenet \ 15 | --run run_1 run_2 run_3 16 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/imagenet/sbatch_scripts/deepens_darts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-2 3 | #SBATCH -o ./cluster_logs/deepens_darts/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_darts/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-darts # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/nasbench201/scripts/train_deepens_baselearner.py \ 18 | --seed_id $SLURM_ARRAY_TASK_ID \ 19 | --working_directory "experiments/nb201/imagenet/baselearners/deepens_darts/" \ 20 | --dataset imagenet --scheme deepens_darts --train_darts --global_seed 1 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/imagenet/sbatch_scripts/deepens_gdas.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-2 3 | #SBATCH -o ./cluster_logs/deepens_gdas/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_gdas/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-gdas # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/nasbench201/scripts/train_deepens_baselearner.py \ 18 | --seed_id $SLURM_ARRAY_TASK_ID \ 19 | --working_directory "experiments/nb201/imagenet/baselearners/deepens_gdas/" \ 20 | --dataset imagenet --global_seed 1 --scheme deepens_gdas --train_gdas 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/imagenet/sbatch_scripts/deepens_gm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-2 3 | #SBATCH -o ./cluster_logs/deepens_minima/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_minima/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-gm # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/nasbench201/scripts/train_deepens_baselearner.py \ 18 | --seed_id $SLURM_ARRAY_TASK_ID \ 19 | --working_directory "experiments/nb201/imagenet/baselearners/deepens_minimum/" \ 20 | --dataset imagenet --global_seed 1 --scheme deepens_minimum --train_global_optima 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/imagenet/sbatch_scripts/deepens_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-2 3 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J deepens-rs # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # run this using the following loop: 14 | # for arch_id in $(cat < experiments-nb201/imagenet/outputs/deepens_rs/run_1/incumbents.txt); do sbatch -p ml_gpu-rtx2080 cluster_scripts/nb201_imagenet/sbatch_scripts/deepens_rs.sh $arch_id 1; done 15 | 16 | # Activate virtual environment 17 | source venv/bin/activate 18 | 19 | # Arrayjob 20 | PYTHONPATH=$PWD python nes/nasbench201/scripts/train_deepens_baselearner.py \ 21 | --arch_id $1 --seed_id $SLURM_ARRAY_TASK_ID \ 22 | --working_directory experiments/nb201/imagenet/baselearners/deepens_rs/ \ 23 | --dataset imagenet --scheme deepens_rs \ 24 | --arch_path experiments/nb201/imagenet/baselearners/nes_rs/run_${2}/random_archs \ 25 | --global_seed $2 26 | 27 | # Done 28 | echo "DONE" 29 | echo "Finished at $(date)" 30 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/imagenet/sbatch_scripts/ensembles_from_pools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-3 3 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 4 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J ens_from_pool # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Activate virtual environment 9 | source venv/bin/activate 10 | 11 | PYTHONPATH=$PWD python nes/ensemble_selection/ensembles_from_pools.py \ 12 | --M "3" \ 13 | --pool_name $1 \ 14 | --save_dir experiments/nb201/imagenet/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 15 | --load_bsls_dir experiments/nb201/imagenet/baselearners/$1/run_$SLURM_ARRAY_TASK_ID \ 16 | --dataset imagenet --device -1 17 | 18 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/imagenet/sbatch_scripts/evaluate_ensembles.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -o ./cluster_logs/evaluate/%A-%a.o 3 | #SBATCH -e ./cluster_logs/evaluate/%A-%a.e 4 | #SBATCH --gres=gpu:1 # reserves GPUs 5 | #SBATCH -J eval_ens # sets the job name. If not specified, the file name will be used as job name 6 | #SBATCH -a 1-3 7 | 8 | # Activate virtual environment 9 | source venv/bin/activate 10 | 11 | PYTHONPATH=$PWD python nes/ensemble_selection/evaluate_ensembles.py \ 12 | --M "3" \ 13 | --method $1 \ 14 | --save_dir experiments/nb201/imagenet/outputs/plotting_data/run_$SLURM_ARRAY_TASK_ID \ 15 | --nes_rs_bsls_dir experiments/nb201/imagenet/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 16 | --incumbents_dir experiments/nb201/imagenet/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID/incumbents.txt \ 17 | --load_bsls_dir experiments/nb201/imagenet/baselearners/$1/run_$SLURM_ARRAY_TASK_ID \ 18 | --load_ens_chosen_dir experiments/nb201/imagenet/ensembles_selected/run_$SLURM_ARRAY_TASK_ID \ 19 | --dataset imagenet 20 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/imagenet/sbatch_scripts/get_incumbents_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-3 3 | #SBATCH -o ./cluster_logs/deepens_rs/%A-%a.o 4 | #SBATCH -e ./cluster_logs/deepens_rs/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J get_incumbents_rs # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/ensemble_selection/rs_incumbents.py \ 18 | --save_dir experiments-nb201/imagenet/outputs/deepens_rs/run_$SLURM_ARRAY_TASK_ID \ 19 | --load_bsls_dir experiments-nb201/imagenet/baselearners/nes_rs/run_$SLURM_ARRAY_TASK_ID \ 20 | --pool_name nes_rs \ 21 | --dataset imagenet 22 | 23 | # Done 24 | echo "DONE" 25 | echo "Finished at $(date)" 26 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/imagenet/sbatch_scripts/nes_re.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 1-3 3 | #SBATCH -o ./cluster_logs/nes_re/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_re/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-re # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/nasbench201/scripts/run_nes_re_nb201.py \ 18 | --num_iterations 400 --population_size 50 --sample_size 10 \ 19 | --working_directory experiments/nb201/imagenet/baselearners/nes_re --severity_list "0 5" \ 20 | --global_seed $SLURM_ARRAY_TASK_ID --scheme nes_re --dataset imagenet 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /cluster_scripts/nb201/imagenet/sbatch_scripts/nes_rs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -a 0-399 3 | #SBATCH -o ./cluster_logs/nes_rs/%A-%a.o 4 | #SBATCH -e ./cluster_logs/nes_rs/%A-%a.e 5 | #SBATCH --gres=gpu:1 # reserves GPUs 6 | #SBATCH -J nes-rs # sets the job name. If not specified, the file name will be used as job name 7 | 8 | # Info 9 | echo "Workingdir: $PWD" 10 | echo "Started at $(date)" 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 12 | 13 | # Activate virtual environment 14 | source venv/bin/activate 15 | 16 | # Arrayjob 17 | PYTHONPATH=$PWD python nes/nasbench201/scripts/run_nes_rs.py \ 18 | --working_directory=experiments/nb201/imagenet/baselearners/nes_rs --arch_id \ 19 | $SLURM_ARRAY_TASK_ID --seed_id $SLURM_ARRAY_TASK_ID --dataset imagenet \ 20 | --global_seed $1 21 | 22 | # Done 23 | echo "DONE" 24 | echo "Finished at $(date)" 25 | -------------------------------------------------------------------------------- /data/cifar10-C/frost_overlays/frost1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/data/cifar10-C/frost_overlays/frost1.png -------------------------------------------------------------------------------- /data/cifar10-C/frost_overlays/frost2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/data/cifar10-C/frost_overlays/frost2.png -------------------------------------------------------------------------------- /data/cifar10-C/frost_overlays/frost3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/data/cifar10-C/frost_overlays/frost3.png -------------------------------------------------------------------------------- /data/cifar10-C/frost_overlays/frost4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/data/cifar10-C/frost_overlays/frost4.jpg -------------------------------------------------------------------------------- /data/cifar10-C/frost_overlays/frost5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/data/cifar10-C/frost_overlays/frost5.jpg -------------------------------------------------------------------------------- /data/cifar10-C/frost_overlays/frost6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/data/cifar10-C/frost_overlays/frost6.jpg -------------------------------------------------------------------------------- /experiments/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/experiments/.gitkeep -------------------------------------------------------------------------------- /figures/fmnist.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/figures/fmnist.png -------------------------------------------------------------------------------- /figures/nes_re.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/figures/nes_re.png -------------------------------------------------------------------------------- /nes/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/nes/__init__.py -------------------------------------------------------------------------------- /nes/darts/baselearner_train/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/nes/darts/baselearner_train/__init__.py -------------------------------------------------------------------------------- /nes/darts/re/__init__.py: -------------------------------------------------------------------------------- 1 | from .re_master import RegularizedEvolution 2 | -------------------------------------------------------------------------------- /nes/ensemble_selection/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/nes/ensemble_selection/__init__.py -------------------------------------------------------------------------------- /nes/nasbench201/__init__.py: -------------------------------------------------------------------------------- 1 | from .worker import NB201Worker 2 | -------------------------------------------------------------------------------- /nes/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/nes/utils/__init__.py -------------------------------------------------------------------------------- /nes/utils/configs_to_genotype.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | 4 | from nes.optimizers.baselearner_train.utils import parse_config 5 | from nes.optimizers.cluster_worker import REWorker 6 | 7 | run_id = sys.argv[1] 8 | 9 | path = os.path.join('experiments-nips21/cifar10/baselearners/nes_re', run_id, 10 | "configs.json") 11 | 12 | with open(path) as f: 13 | configs = [eval(x[:-1]) for x in f.readlines()] 14 | 15 | config_space = REWorker.get_configspace() 16 | 17 | save_dir = os.path.join('experiments-nips21/cifar10/baselearners/nes_re', 18 | run_id, 19 | 'sampled_configs') 20 | 21 | if not os.path.exists(save_dir): 22 | os.makedirs(save_dir, exist_ok=True) 23 | 24 | for config in configs: 25 | config_id = config[0][0] 26 | config_arch = config[1] 27 | 28 | genotype = parse_config(config_arch, config_space) 29 | print(config_id) 30 | print(genotype) 31 | 32 | with open(os.path.join(save_dir, "arch_%d.txt"%config_id), "w") as f: 33 | f.write("%s"%(str(genotype))) 34 | -------------------------------------------------------------------------------- /nes/utils/nb201/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/nes/utils/nb201/__init__.py -------------------------------------------------------------------------------- /nes/utils/nb201/config_utils/__init__.py: -------------------------------------------------------------------------------- 1 | ################################################## 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # 3 | ################################################## 4 | from .configure_utils import load_config, dict2config, configure2str 5 | from .basic_args import obtain_basic_args 6 | from .attention_args import obtain_attention_args 7 | from .random_baseline import obtain_RandomSearch_args 8 | from .cls_kd_args import obtain_cls_kd_args 9 | from .cls_init_args import obtain_cls_init_args 10 | from .search_single_args import obtain_search_single_args 11 | from .search_args import obtain_search_args 12 | # for network pruning 13 | from .pruning_args import obtain_pruning_args 14 | -------------------------------------------------------------------------------- /nes/utils/nb201/config_utils/cls_init_args.py: -------------------------------------------------------------------------------- 1 | import random, argparse 2 | from .share_args import add_shared_args 3 | 4 | def obtain_cls_init_args(): 5 | parser = argparse.ArgumentParser(description='Train a classification model on typical image classification datasets.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) 6 | parser.add_argument('--resume' , type=str, help='Resume path.') 7 | parser.add_argument('--init_model' , type=str, help='The initialization model path.') 8 | parser.add_argument('--model_config', type=str, help='The path to the model configuration') 9 | parser.add_argument('--optim_config', type=str, help='The path to the optimizer configuration') 10 | parser.add_argument('--procedure' , type=str, help='The procedure basic prefix.') 11 | parser.add_argument('--init_checkpoint', type=str, help='The checkpoint path to the initial model.') 12 | add_shared_args( parser ) 13 | # Optimization options 14 | parser.add_argument('--batch_size', type=int, default=2, help='Batch size for training.') 15 | args = parser.parse_args() 16 | 17 | if args.rand_seed is None or args.rand_seed < 0: 18 | args.rand_seed = random.randint(1, 100000) 19 | assert args.save_dir is not None, 'save-path argument can not be None' 20 | return args 21 | -------------------------------------------------------------------------------- /nes/utils/nb201/config_utils/share_args.py: -------------------------------------------------------------------------------- 1 | import os, sys, time, random, argparse 2 | 3 | def add_shared_args( parser ): 4 | # Data Generation 5 | parser.add_argument('--dataset', type=str, help='The dataset name.') 6 | parser.add_argument('--data_path', type=str, help='The dataset name.') 7 | parser.add_argument('--cutout_length', type=int, help='The cutout length, negative means not use.') 8 | # Printing 9 | parser.add_argument('--print_freq', type=int, default=100, help='print frequency (default: 200)') 10 | parser.add_argument('--print_freq_eval', type=int, default=100, help='print frequency (default: 200)') 11 | # Checkpoints 12 | parser.add_argument('--eval_frequency', type=int, default=1, help='evaluation frequency (default: 200)') 13 | parser.add_argument('--save_dir', type=str, help='Folder to save checkpoints and log.') 14 | # Acceleration 15 | parser.add_argument('--workers', type=int, default=8, help='number of data loading workers (default: 8)') 16 | # Random Seed 17 | parser.add_argument('--rand_seed', type=int, default=-1, help='manual seed') 18 | -------------------------------------------------------------------------------- /nes/utils/nb201/configs/arch_to_id.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/nes/utils/nb201/configs/arch_to_id.pkl -------------------------------------------------------------------------------- /nes/utils/nb201/configs/check_if_3_seeds.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | 3 | PATH = '/data/aad/image_datasets/nb201_new/NAS-BENCH-102-4-v1.0-archive/' 4 | 5 | 6 | def get_id(arch): 7 | with open('arch_to_id.pkl', 'rb') as f: 8 | data = pickle.load(f) 9 | return data[arch] 10 | 11 | def check_seed(arch_id): 12 | with open('imagenet.pkl', 'rb') as f: 13 | d = pickle.load(f) 14 | return arch_id in d 15 | 16 | if __name__ == '__main__': 17 | 18 | #pcdarts 19 | #arch = '|nor_conv_3x3~0|+|avg_pool_3x3~0|nor_conv_3x3~1|+|skip_connect~0|avg_pool_3x3~1|avg_pool_3x3~2|' 20 | arch = '|none~0|+|skip_connect~0|none~1|+|skip_connect~0|none~1|nor_conv_1x1~2|' 21 | print(get_id(arch)[0]) 22 | 23 | print(check_seed(get_id(arch)[0])) 24 | 25 | 26 | -------------------------------------------------------------------------------- /nes/utils/nb201/configs/cifar10.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/nes/utils/nb201/configs/cifar10.pkl -------------------------------------------------------------------------------- /nes/utils/nb201/configs/cifar100.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/nes/utils/nb201/configs/cifar100.pkl -------------------------------------------------------------------------------- /nes/utils/nb201/configs/gen_arch_id.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import os 3 | import pickle 4 | from tqdm import tqdm 5 | import sys 6 | import multiprocessing 7 | 8 | PATH = '/data/aad/image_datasets/nb201_new/NAS-BENCH-102-4-v1.0-archive/' 9 | 10 | sorted_by_idx = sorted(os.listdir(PATH)) 11 | 12 | 13 | def save_pkl(list_of_ids, pkl_name): 14 | with open(pkl_name + '.pkl', 'ab') as f: 15 | pickle.dump(list_of_ids, f, protocol=pickle.HIGHEST_PROTOCOL) 16 | 17 | d_dict = dict() 18 | 19 | 20 | def multiprocessing_func(start, end): 21 | for m in tqdm(sorted_by_idx[start: end]): 22 | xdata = torch.load(PATH + m) 23 | key = xdata['full']['arch_str'] 24 | d_dict[key] = (xdata['full']['arch_index'], m) 25 | 26 | 27 | if __name__ == '__main__': 28 | #processes = [] 29 | #for i in [(0, 2500), (2500, 5000), (5000, 7500), (7500, 10000), (10000, 30 | # 12500), 31 | # (12500, len(sorted_by_idx))]: 32 | # p = multiprocessing.Process(target=multiprocessing_func, args=(i[0], 33 | # i[1])) 34 | # processes.append(p) 35 | # p.start() 36 | 37 | #for process in processes: 38 | # process.join() 39 | multiprocessing_func(0, len(sorted_by_idx)) 40 | save_pkl(d_dict, 'arch_to_id') 41 | -------------------------------------------------------------------------------- /nes/utils/nb201/configs/imagenet.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nes/37c8f331dc6a135371e3e0a0594f779ccb9cb1b3/nes/utils/nb201/configs/imagenet.pkl -------------------------------------------------------------------------------- /nes/utils/nb201/configs/optimal.txt: -------------------------------------------------------------------------------- 1 | {'cifar10-valid': 14174, 'cifar100': 13934, 'ImageNet16-120': 3621} -------------------------------------------------------------------------------- /nes/utils/nb201/models/SharedUtils.py: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # 3 | ##################################################### 4 | import torch 5 | import torch.nn as nn 6 | 7 | 8 | def additive_func(A, B): 9 | assert A.dim() == B.dim() and A.size(0) == B.size(0), '{:} vs {:}'.format(A.size(), B.size()) 10 | C = min(A.size(1), B.size(1)) 11 | if A.size(1) == B.size(1): 12 | return A + B 13 | elif A.size(1) < B.size(1): 14 | out = B.clone() 15 | out[:,:C] += A 16 | return out 17 | else: 18 | out = A.clone() 19 | out[:,:C] += B 20 | return out 21 | 22 | 23 | def change_key(key, value): 24 | def func(m): 25 | if hasattr(m, key): 26 | setattr(m, key, value) 27 | return func 28 | 29 | 30 | def parse_channel_info(xstring): 31 | blocks = xstring.split(' ') 32 | blocks = [x.split('-') for x in blocks] 33 | blocks = [[int(_) for _ in x] for x in blocks] 34 | return blocks 35 | -------------------------------------------------------------------------------- /nes/utils/nb201/models/cell_infers/__init__.py: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # 3 | ##################################################### 4 | from nes.utils.nb201.models.cell_infers.tiny_network import TinyNetwork 5 | from nes.utils.nb201.models.cell_infers.nasnet_cifar import NASNetonCIFAR 6 | -------------------------------------------------------------------------------- /nes/utils/nb201/models/cell_searchs/__init__.py: -------------------------------------------------------------------------------- 1 | ################################################## 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # 3 | ################################################## 4 | # The macro structure is defined in NAS-Bench-201 5 | from .search_model_darts import TinyNetworkDarts 6 | from .search_model_gdas import TinyNetworkGDAS 7 | from .search_model_setn import TinyNetworkSETN 8 | from .search_model_enas import TinyNetworkENAS 9 | from .search_model_random import TinyNetworkRANDOM 10 | from .generic_model import GenericNAS201Model 11 | from .genotypes import Structure as CellStructure, architectures as CellArchitectures 12 | # NASNet-based macro structure 13 | from .search_model_gdas_nasnet import NASNetworkGDAS 14 | from .search_model_darts_nasnet import NASNetworkDARTS 15 | 16 | 17 | nas201_super_nets = {'DARTS-V1': TinyNetworkDarts, 18 | "DARTS-V2": TinyNetworkDarts, 19 | "GDAS": TinyNetworkGDAS, 20 | "SETN": TinyNetworkSETN, 21 | "ENAS": TinyNetworkENAS, 22 | "RANDOM": TinyNetworkRANDOM, 23 | "generic": GenericNAS201Model} 24 | 25 | nasnet_super_nets = {"GDAS": NASNetworkGDAS, 26 | "DARTS": NASNetworkDARTS} 27 | -------------------------------------------------------------------------------- /nes/utils/nb201/models/cell_searchs/_test_module.py: -------------------------------------------------------------------------------- 1 | ################################################## 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # 3 | ################################################## 4 | import torch 5 | from search_model_enas_utils import Controller 6 | 7 | def main(): 8 | controller = Controller(6, 4) 9 | predictions = controller() 10 | 11 | if __name__ == '__main__': 12 | main() 13 | -------------------------------------------------------------------------------- /nes/utils/nb201/models/initialization.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | def initialize_resnet(m): 6 | if isinstance(m, nn.Conv2d): 7 | nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 8 | if m.bias is not None: 9 | nn.init.constant_(m.bias, 0) 10 | elif isinstance(m, nn.BatchNorm2d): 11 | nn.init.constant_(m.weight, 1) 12 | if m.bias is not None: 13 | nn.init.constant_(m.bias, 0) 14 | elif isinstance(m, nn.Linear): 15 | nn.init.normal_(m.weight, 0, 0.01) 16 | nn.init.constant_(m.bias, 0) 17 | 18 | 19 | -------------------------------------------------------------------------------- /nes/utils/nb201/models/shape_infers/__init__.py: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # 3 | ##################################################### 4 | from .InferCifarResNet_width import InferWidthCifarResNet 5 | from .InferImagenetResNet import InferImagenetResNet 6 | from .InferCifarResNet_depth import InferDepthCifarResNet 7 | from .InferCifarResNet import InferCifarResNet 8 | from .InferMobileNetV2 import InferMobileNetV2 9 | from .InferTinyCellNet import DynamicShapeTinyNet -------------------------------------------------------------------------------- /nes/utils/nb201/models/shape_infers/shared_utils.py: -------------------------------------------------------------------------------- 1 | def parse_channel_info(xstring): 2 | blocks = xstring.split(' ') 3 | blocks = [x.split('-') for x in blocks] 4 | blocks = [[int(_) for _ in x] for x in blocks] 5 | return blocks 6 | -------------------------------------------------------------------------------- /nes/utils/nb201/models/shape_searchs/__init__.py: -------------------------------------------------------------------------------- 1 | ################################################## 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # 3 | ################################################## 4 | from .SearchCifarResNet_width import SearchWidthCifarResNet 5 | from .SearchCifarResNet_depth import SearchDepthCifarResNet 6 | from .SearchCifarResNet import SearchShapeCifarResNet 7 | from .SearchSimResNet_width import SearchWidthSimResNet 8 | from .SearchImagenetResNet import SearchShapeImagenetResNet 9 | from .generic_size_tiny_cell_model import GenericNAS301Model 10 | -------------------------------------------------------------------------------- /nes/utils/nb201/models/shape_searchs/test.py: -------------------------------------------------------------------------------- 1 | ################################################## 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # 3 | ################################################## 4 | import torch 5 | import torch.nn as nn 6 | from SoftSelect import ChannelWiseInter 7 | 8 | 9 | if __name__ == '__main__': 10 | 11 | tensors = torch.rand((16, 128, 7, 7)) 12 | 13 | for oc in range(200, 210): 14 | out_v1 = ChannelWiseInter(tensors, oc, 'v1') 15 | out_v2 = ChannelWiseInter(tensors, oc, 'v2') 16 | assert (out_v1 == out_v2).any().item() == 1 17 | for oc in range(48, 160): 18 | out_v1 = ChannelWiseInter(tensors, oc, 'v1') 19 | out_v2 = ChannelWiseInter(tensors, oc, 'v2') 20 | assert (out_v1 == out_v2).any().item() == 1 21 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | Cython 2 | ConfigSpace 3 | dill 4 | hpbandster 5 | opencv-python 6 | scikit-image 7 | Wand 8 | tqdm 9 | numpy==1.22.0 10 | torch==1.4.0 11 | torchvision==0.4.2 12 | --------------------------------------------------------------------------------