├── .gitignore ├── LICENSE ├── README.md ├── cluster_scripts ├── bohb_one_shot_scripts │ ├── bohb-darts.sh │ ├── start_exp.sh │ └── warmstart.sh ├── start-bohb.sh ├── start-hb.sh ├── start-smac.sh ├── start-tpe.sh └── start.sh ├── data └── .gitkeep ├── experiments ├── analysis │ ├── experiment_database.py │ ├── plot_results.py │ └── utils.py └── bohb_logs │ └── .gitkeep ├── nasbench_analysis ├── __init__.py ├── architecture_inductive_bias │ ├── baseline_script.sh │ ├── model_search.py │ └── train.py ├── eval_darts_one_shot_model_in_nasbench.py ├── eval_random_search_ws_in_nasbench.py ├── evaluate_correlation.sh ├── evaluate_correlation_enas.sh ├── evaluate_one_shot_models.py ├── evaluate_one_shot_models_enas.py ├── search_spaces │ ├── __init__.py │ ├── search_space.py │ ├── search_space_1.py │ ├── search_space_2.py │ └── search_space_3.py ├── single_architecture_training │ ├── baseline_script.sh │ ├── baseline_script_nasbench.sh │ ├── baseline_script_sgdr.sh │ ├── model_search.py │ ├── train.py │ ├── train_nasbench_like.py │ └── train_sgdr.py └── utils.py └── optimizers ├── __init__.py ├── bohb └── run_bohb.py ├── bohb_one_shot ├── __init__.py ├── custom_bohb │ ├── __init__.py │ ├── bohb.py │ ├── bohb_gen.py │ └── utils.py ├── master.py ├── plots │ ├── incumbents.py │ ├── result.py │ ├── uncertanty_plots.py │ └── util.py └── worker.py ├── darts ├── __init__.py ├── architect.py ├── cluster_scripts │ ├── search_space_1 │ │ ├── baseline_script.sh │ │ ├── baseline_script_trans.sh │ │ ├── baseline_warmstart_script.sh │ │ ├── cutout.sh │ │ ├── learning_rate_eval.sh │ │ ├── second_order_script.sh │ │ ├── second_order_script_cutout.sh │ │ └── weight_decay_script.sh │ ├── search_space_2 │ │ ├── baseline_script.sh │ │ ├── baseline_script_trans.sh │ │ ├── baseline_warmstart_script.sh │ │ ├── cutout.sh │ │ ├── learning_rate_eval.sh │ │ ├── second_order_script.sh │ │ ├── second_order_script_cutout.sh │ │ └── weight_decay_script.sh │ └── search_space_3 │ │ ├── baseline_script.sh │ │ ├── baseline_script_trans.sh │ │ ├── baseline_warmstart_script.sh │ │ ├── cutout.sh │ │ ├── learning_rate_eval.sh │ │ ├── second_order_script.sh │ │ ├── second_order_script_cutout.sh │ │ └── weight_decay_script.sh ├── genotypes.py ├── model.py ├── model_search.py ├── operations.py ├── train.py ├── train_imagenet.py ├── train_search.py ├── train_search_bohb.py ├── utils.py └── visualize.py ├── enas ├── __init__.py ├── cluster_scripts │ ├── search_space_1 │ │ └── baseline_script.sh │ ├── search_space_2 │ │ └── baseline_script.sh │ └── search_space_3 │ │ └── baseline_script.sh ├── data.py ├── enas.py ├── enas_child.py ├── micro_controller.py └── utils.py ├── gdas ├── __init__.py ├── architect.py ├── cluster_scripts │ ├── search_space_1 │ │ ├── baseline_script.sh │ │ ├── baseline_script_trans.sh │ │ ├── cutout.sh │ │ ├── gdas_evaluation.sh │ │ ├── gdas_evaluation_cutout.sh │ │ ├── gdas_evaluation_warmstarting.sh │ │ ├── learning_rate_eval.sh │ │ └── weight_decay_script.sh │ ├── search_space_2 │ │ ├── baseline_script.sh │ │ ├── baseline_script_trans.sh │ │ ├── cutout.sh │ │ ├── gdas_evaluation.sh │ │ ├── gdas_evaluation_cutout.sh │ │ ├── gdas_evaluation_warmstarting.sh │ │ ├── learning_rate_eval.sh │ │ └── weight_decay_script.sh │ └── search_space_3 │ │ ├── baseline_script.sh │ │ ├── baseline_script_trans.sh │ │ ├── cutout.sh │ │ ├── gdas_evaluation.sh │ │ ├── gdas_evaluation_cutout.sh │ │ ├── gdas_evaluation_cutout_unrolled.sh │ │ ├── gdas_evaluation_warmstarting.sh │ │ ├── learning_rate_eval.sh │ │ └── weight_decay_script.sh ├── model_search.py ├── train_search.py └── train_search_bohb.py ├── hyperband └── run_hyperband.py ├── pc_darts ├── cluster_scripts │ ├── search_space_1 │ │ ├── baseline_script.sh │ │ ├── baseline_script_trans.sh │ │ ├── baseline_warmstart_script.sh │ │ ├── cutout.sh │ │ ├── learning_rate_eval.sh │ │ ├── second_order_script.sh │ │ ├── second_order_script_cutout.sh │ │ └── weight_decay_script.sh │ ├── search_space_2 │ │ ├── baseline_script.sh │ │ ├── baseline_script_trans.sh │ │ ├── baseline_warmstart_script.sh │ │ ├── cutout.sh │ │ ├── learning_rate_eval.sh │ │ ├── second_order_script.sh │ │ ├── second_order_script_cutout.sh │ │ └── weight_decay_script.sh │ └── search_space_3 │ │ ├── baseline_script.sh │ │ ├── baseline_script_trans.sh │ │ ├── baseline_warmstart_script.sh │ │ ├── cutout.sh │ │ ├── learning_rate_eval.sh │ │ ├── second_order_script.sh │ │ ├── second_order_script_cutout.sh │ │ └── weight_decay_script.sh ├── model_search.py ├── train_search.py └── train_search_bohb.py ├── random_search_with_weight_sharing ├── __init__.py ├── cluster_scripts │ ├── search_space_1 │ │ └── baseline_script.sh │ ├── search_space_2 │ │ └── baseline_script.sh │ └── search_space_3 │ │ └── baseline_script.sh ├── darts_wrapper_discrete.py └── random_weight_share.py ├── regularized_evolution └── run_regularized_evolution.py ├── reinforce └── run_reinforce.py ├── smac └── run_smac.py ├── tpe └── run_tpe.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | /site 99 | 100 | # mypy 101 | .mypy_cache/ 102 | search-*/ 103 | eval-*/ 104 | runs/ 105 | data/ 106 | *.tfrecord 107 | *.png 108 | *.pdf 109 | *.pkl 110 | *.obj 111 | *bohb_output 112 | logs/ 113 | experiments/cluster_logs/bohb_logs 114 | experiments/discrete_optimizers 115 | */__pycache__/ 116 | 117 | # Swap 118 | [._]*.s[a-v][a-z] 119 | [._]*.sw[a-p] 120 | [._]s[a-v][a-z] 121 | [._]sw[a-p] 122 | 123 | # Session 124 | Session.vim 125 | 126 | # Temporary 127 | .netrwhist 128 | *~ 129 | # Auto-generated tag files 130 | tags 131 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## 'NAS-Bench-1Shot1: Benchmarking and Dissecting One-shot Neural Architecture Search' 2 | 3 | To run e.g. darts on search space 1 execute from the root of the repository: 4 | 5 | `PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=0 --save=baseline --search_space=1 --epochs=50` 6 | 7 | To evaluate the one-shot architectures on NAS-Bench-101 download NAS-Bench-101 and insert the path to it in `nasbench_analysis/eval_darts_one_shot_model_in_nasbench.py` 8 | 9 | Then run the following for evaluation: 10 | `PYTHONPATH=$PWD python nasbench_analysis/eval_darts_one_shot_model_in_nasbench.py` 11 | 12 | The NAS-Bench-101 test error and validation error for the searched architectures are written to the directory of the run and can then be analyzed using the experiment database as demonstrated in: `experiments/analysis/plot_results.py` 13 | -------------------------------------------------------------------------------- /cluster_scripts/bohb_one_shot_scripts/bohb-darts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # submit to the right queue 4 | #SBATCH -p bosch_gpu-rtx2080 5 | #SBATCH --gres gpu:1 6 | #SBATCH --array=1-16 7 | # 8 | # the execution will use the current directory for execution (important for relative paths) 9 | #SBATCH -D . 10 | # 11 | # redirect the output/error to some files 12 | #SBATCH -o ./experiments/bohb_logs/logs/%A-%a.o 13 | #SBATCH -e ./experiments/bohb_logs/logs/%A-%a.e 14 | # 15 | # 16 | source activate tensorflow-stable 17 | python optimizers/bohb_one_shot/master.py --array_id $SLURM_ARRAY_TASK_ID --total_num_workers 16 --num_iterations 64 --run_id $SLURM_ARRAY_JOB_ID --working_directory ./experiments/bohb_output/cs$3 --min_budget 25 --max_budget 100 --space $1 --algorithm $2 --cs $3 --seed $4 18 | -------------------------------------------------------------------------------- /cluster_scripts/bohb_one_shot_scripts/start_exp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | spaces='1 2 3' 4 | algs='darts gdas pc_darts' 5 | cs='1 2 3' 6 | seed='1 2 3' 7 | 8 | for s in $spaces; do 9 | for a in $algs; do 10 | for c in $cs; do 11 | for sd in $seed; do 12 | sbatch -J ${s}_${c}_${sd}_${a} cluster_scripts/bohb_one_shot_scripts/bohb-darts.sh $s $a $c $sd 13 | echo submitted job: space $s, $a, cs $c, seed $sd 14 | done 15 | done 16 | done 17 | done 18 | -------------------------------------------------------------------------------- /cluster_scripts/bohb_one_shot_scripts/warmstart.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # submit to the right queue 4 | #SBATCH -p bosch_gpu-rtx2080 5 | #SBATCH --gres gpu:1 6 | #SBATCH --array=1-16 7 | #SBATCH -J bohb-pc_darts 8 | # 9 | # the execution will use the current directory for execution (important for relative paths) 10 | #SBATCH -D . 11 | # 12 | # redirect the output/error to some files 13 | #SBATCH -o ./logs/%A-%a.o 14 | #SBATCH -e ./logs/%A-%a.e 15 | # 16 | # 17 | source activate tensorflow-stable 18 | python src/warmstart.py --array_id $SLURM_ARRAY_TASK_ID --total_num_workers 16 --num_iterations 64 --run_id $SLURM_ARRAY_JOB_ID --working_directory ./bohb_warm --previous_dir ./bohb_output/search_space_3/pc_darts/run3703171-seed1 --min_budget 25 --max_budget 100 --seed 1 --space 3 --algorithm pc_darts 19 | -------------------------------------------------------------------------------- /cluster_scripts/start-bohb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # submit to the right queue 4 | #SBATCH -p bosch_gpu-rtx2080 5 | #SBATCH --gres gpu:1 6 | #SBATCH -a 0-499 7 | #SBATCH -J bohb-nasbench 8 | # 9 | # the execution will use the current directory for execution (important for relative paths) 10 | #SBATCH -D . 11 | # 12 | # redirect the output/error to some files 13 | #SBATCH -o ./experiments/cluster_logs/%A_%a.o 14 | #SBATCH -e ./experiments/cluster_logs/%A_%a.e 15 | # 16 | # 17 | 18 | source activate tensorflow-stable 19 | PYTHONPATH=$PWD python optimizers/bohb/run_bohb.py --seed $SLURM_ARRAY_TASK_ID --search_space $1 20 | 21 | -------------------------------------------------------------------------------- /cluster_scripts/start-hb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # submit to the right queue 4 | #SBATCH -p bosch_gpu-rtx2080 5 | #SBATCH --gres gpu:1 6 | #SBATCH -a 0-499 7 | #SBATCH -J hyperband 8 | # 9 | # the execution will use the current directory for execution (important for relative paths) 10 | #SBATCH -D . 11 | # 12 | # redirect the output/error to some files 13 | #SBATCH -o ./experiments/cluster_logs/%A_%a.o 14 | #SBATCH -e ./experiments/cluster_logs/%A_%a.e 15 | # 16 | # 17 | 18 | source activate tensorflow-stable 19 | PYTHONPATH=$PWD python optimizers/hyperband/run_hyperband.py --seed $SLURM_ARRAY_TASK_ID --search_space $1 20 | 21 | -------------------------------------------------------------------------------- /cluster_scripts/start-smac.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # submit to the right queue 4 | #SBATCH -p ml_gpu-rtx2080 5 | #SBATCH --gres gpu:1 6 | #SBATCH -a 0-499 7 | #SBATCH -J smac 8 | # 9 | # the execution will use the current directory for execution (important for relative paths) 10 | #SBATCH -D . 11 | # 12 | # redirect the output/error to some files 13 | #SBATCH -o ./experiments/cluster_logs/%A_%a.o 14 | #SBATCH -e ./experiments/cluster_logs/%A_%a.e 15 | # 16 | # 17 | 18 | source activate tensorflow-stable 19 | PYTHONPATH=$PWD python optimizers/smac/run_smac.py --seed $SLURM_ARRAY_TASK_ID --search_space $1 20 | 21 | -------------------------------------------------------------------------------- /cluster_scripts/start-tpe.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # submit to the right queue 4 | #SBATCH -p bosch_gpu-rtx2080 5 | #SBATCH --gres gpu:1 6 | #SBATCH -a 0-499 7 | #SBATCH -J tpe-nasbench 8 | # 9 | # the execution will use the current directory for execution (important for relative paths) 10 | #SBATCH -D . 11 | # 12 | # redirect the output/error to some files 13 | #SBATCH -o ./experiments/cluster_logs/%A_%a.o 14 | #SBATCH -e ./experiments/cluster_logs/%A_%a.e 15 | # 16 | # 17 | 18 | source activate tensorflow-stable 19 | PYTHONPATH=$PWD python optimizers/tpe/run_tpe.py --seed $SLURM_ARRAY_TASK_ID --search_space $1 20 | 21 | -------------------------------------------------------------------------------- /cluster_scripts/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | method="tpe bohb hb smac" 4 | 5 | for m in $method; do 6 | for space in {1..3}; do 7 | sbatch cluster_scripts/start-${m}.sh $space 8 | done 9 | done 10 | -------------------------------------------------------------------------------- /data/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nasbench-1shot1/cb0f0ec61ce8b37f8e2d0f2728c3ef0b9ec81bf1/data/.gitkeep -------------------------------------------------------------------------------- /experiments/analysis/utils.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import glob 3 | import os 4 | import pickle 5 | import re 6 | 7 | import numpy as np 8 | import scipy.stats as stats 9 | 10 | from nasbench_analysis.eval_darts_one_shot_model_in_nasbench import natural_keys 11 | 12 | 13 | def parse_log(path): 14 | f = open(os.path.join(path, 'log.txt'), 'r') 15 | # Read in the relevant information 16 | train_accuracies = [] 17 | valid_accuracies = [] 18 | for line in f: 19 | if 'train_acc' in line: 20 | train_accuracies.append(line) 21 | if 'valid_acc' in line: 22 | valid_accuracies.append(line) 23 | 24 | valid_error = [[1 - 1 / 100 * float(re.search('valid_acc ([-+]?[0-9]*\.?[0-9]+)', line).group(1))] for line in 25 | valid_accuracies] 26 | train_error = [[1 - 1 / 100 * float(re.search('train_acc ([-+]?[0-9]*\.?[0-9]+)', line).group(1))] for line in 27 | train_accuracies] 28 | 29 | return valid_error, train_error 30 | 31 | 32 | def compute_spearman_correlation_top_1000(one_shot_test_error, nb_test_error): 33 | sort_by_one_shot = lambda os, nb: [[y, x] for (y, x) in sorted(zip(os, nb), key=lambda pair: pair[0])] 34 | correlation_at_epoch = [] 35 | for one_shot_test_error_on_epoch in one_shot_test_error: 36 | sorted_by_os_error = np.array(sort_by_one_shot(one_shot_test_error_on_epoch[0], nb_test_error)) 37 | correlation_at_epoch.append( 38 | stats.spearmanr(sorted_by_os_error[:, 0][:1000], sorted_by_os_error[:, 1][:1000]).correlation) 39 | return correlation_at_epoch 40 | 41 | 42 | def compute_spearman_correlation(one_shot_test_error, nb_test_error): 43 | correlation_at_epoch = [] 44 | for one_shot_test_error_on_epoch in one_shot_test_error: 45 | correlation_at_epoch.append(stats.spearmanr(one_shot_test_error_on_epoch[0], nb_test_error).correlation) 46 | return correlation_at_epoch 47 | 48 | 49 | def read_in_correlation(path, config): 50 | correlation_files = glob.glob(os.path.join(path, 'correlation_*.obj')) 51 | # If no correlation files available 52 | if len(correlation_files) == 0: 53 | return None, None 54 | else: 55 | read_file_list_with_pickle = lambda file_list: [pickle.load(open(file, 'rb')) for file in file_list] 56 | correlation_files.sort(key=natural_keys) 57 | 58 | one_shot_test_errors = glob.glob(os.path.join(path, 'one_shot_test_errors_*')) 59 | one_shot_test_errors.sort(key=natural_keys) 60 | one_shot_test_errors = read_file_list_with_pickle(one_shot_test_errors) 61 | 62 | if config['search_space'] == '1': 63 | nb_test_errors_per_epoch = pickle.load( 64 | open('experiments/analysis/data/test_errors_per_epoch_ss1.obj', 'rb')) 65 | elif config['search_space'] == '2': 66 | nb_test_errors_per_epoch = pickle.load( 67 | open('experiments/analysis/data/test_errors_per_epoch_ss2.obj', 'rb')) 68 | elif config['search_space'] == '3': 69 | nb_test_errors_per_epoch = pickle.load( 70 | open('experiments/analysis/data/test_errors_per_epoch_ss3.obj', 'rb')) 71 | else: 72 | raise ValueError('Unknown search space') 73 | correlation_per_epoch_total = { 74 | epoch: compute_spearman_correlation(one_shot_test_errors, nb_test_errors_at_epoch) for 75 | epoch, nb_test_errors_at_epoch in nb_test_errors_per_epoch.items()} 76 | 77 | correlation_per_epoch_top = { 78 | epoch: compute_spearman_correlation_top_1000(one_shot_test_errors, nb_test_errors_at_epoch) for 79 | epoch, nb_test_errors_at_epoch in nb_test_errors_per_epoch.items()} 80 | 81 | return collections.OrderedDict(sorted(correlation_per_epoch_total.items())), collections.OrderedDict( 82 | sorted(correlation_per_epoch_top.items())) 83 | 84 | -------------------------------------------------------------------------------- /experiments/bohb_logs/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nasbench-1shot1/cb0f0ec61ce8b37f8e2d0f2728c3ef0b9ec81bf1/experiments/bohb_logs/.gitkeep -------------------------------------------------------------------------------- /nasbench_analysis/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nasbench-1shot1/cb0f0ec61ce8b37f8e2d0f2728c3ef0b9ec81bf1/nasbench_analysis/__init__.py -------------------------------------------------------------------------------- /nasbench_analysis/architecture_inductive_bias/baseline_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p meta_gpu-ti # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-2700 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J IND_BIAS # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {0..100} 25 | do 26 | for arch_idx in {0..30} 27 | do 28 | # Job to perform 29 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python nasbench_analysis/architecture_inductive_bias/train.py --seed=${seed} --save=independent --search_space=3 --layers=9 --init_channels=16 --arch_idx=${arch_idx} --num_linear_layers=2 31 | exit $? 32 | fi 33 | let gpu_counter+=1 34 | done 35 | done 36 | 37 | done 38 | # Print some Information about the end-time to STDOUT 39 | echo "DONE"; 40 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /nasbench_analysis/eval_random_search_ws_in_nasbench.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import json 3 | import os 4 | import pickle 5 | 6 | import numpy as np 7 | from nasbench import api 8 | 9 | from nasbench_analysis.utils import INPUT, OUTPUT, CONV1X1, NasbenchWrapper, upscale_to_nasbench_format, natural_keys 10 | 11 | 12 | def get_directory_list(path): 13 | """Find directory containing config.json files""" 14 | directory_list = [] 15 | # return nothing if path is a file 16 | if os.path.isfile(path): 17 | return [] 18 | # add dir to directorylist if it contains .json files 19 | if len([f for f in os.listdir(path) if f == 'config.json' or 'sample_val_architecture' in f]) > 0: 20 | directory_list.append(path) 21 | for d in os.listdir(path): 22 | new_path = os.path.join(path, d) 23 | if os.path.isdir(new_path): 24 | directory_list += get_directory_list(new_path) 25 | return directory_list 26 | 27 | 28 | def eval_random_ws_model(config, model): 29 | model_list = pickle.load(open(model, 'rb')) 30 | adjacency_matrix, node_list = model_list[0][0] 31 | if int(config['search_space']) == int('1'): 32 | adjacency_matrix = upscale_to_nasbench_format(adjacency_matrix) 33 | node_list = [INPUT, *node_list, CONV1X1, OUTPUT] 34 | elif int(config['search_space']) == int('2'): 35 | adjacency_matrix = upscale_to_nasbench_format(adjacency_matrix) 36 | node_list = [INPUT, *node_list, CONV1X1, OUTPUT] 37 | elif int(config['search_space']) == int('3'): 38 | node_list = [INPUT, *node_list, OUTPUT] 39 | else: 40 | raise ValueError('Unknown search space') 41 | 42 | # Convert the adjacency matrix in format for nasbench 43 | adjacency_list = adjacency_matrix.astype(np.int).tolist() 44 | model_spec = api.ModelSpec(matrix=adjacency_list, ops=node_list) 45 | # Query nasbench 46 | data = nasbench.query(model_spec) 47 | valid_error, test_error = [], [] 48 | for item in data: 49 | test_error.append(1 - item['test_accuracy']) 50 | valid_error.append(1 - item['validation_accuracy']) 51 | return test_error, valid_error 52 | 53 | 54 | def eval_directory(path): 55 | """Evaluates all one-shot architecture methods in the directory.""" 56 | # Read in config 57 | with open(os.path.join(path, 'config.json')) as fp: 58 | config = json.load(fp) 59 | # Accumulate all one-shot models 60 | random_ws_archs = glob.glob(os.path.join(path, 'full_val_architecture_epoch_*.obj')) 61 | # Sort them by date 62 | random_ws_archs.sort(key=natural_keys) 63 | # Eval all models on nasbench 64 | test_errors = [] 65 | valid_errors = [] 66 | for model in random_ws_archs: 67 | test, valid = eval_random_ws_model(config=config, model=model) 68 | test_errors.append(test) 69 | valid_errors.append(valid) 70 | 71 | with open(os.path.join(path, 'one_shot_validation_errors.obj'), 'wb') as fp: 72 | pickle.dump(valid_errors, fp) 73 | 74 | with open(os.path.join(path, 'one_shot_test_errors.obj'), 'wb') as fp: 75 | pickle.dump(test_errors, fp) 76 | 77 | 78 | def main(): 79 | for directory in get_directory_list("experiments/enas/"): 80 | try: 81 | eval_directory(directory) 82 | except Exception as e: 83 | print('error', e, directory) 84 | 85 | 86 | if __name__ == '__main__': 87 | nasbench = NasbenchWrapper( 88 | dataset_file='/home/siemsj/projects/darts_weight_sharing_analysis/nasbench_analysis/nasbench_data/108_e/nasbench_full.tfrecord') 89 | main() 90 | -------------------------------------------------------------------------------- /nasbench_analysis/evaluate_correlation.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p ml_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 30000 # memory pool for all cores 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis/ # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J correlation # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for epoch in 5 15 25 35 45 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python nasbench_analysis/evaluate_one_shot_models.py --epoch=${epoch} --model_path=/home/siemsj/projects/darts_weight_sharing_analysis/experiments/random_ws/ss_20191019-025022_3_1 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /nasbench_analysis/evaluate_correlation_enas.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 30000 # memory pool for all cores 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis/ # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J correlation # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for epoch in 5 15 25 35 45 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python nasbench_analysis/evaluate_one_shot_models_enas.py --epoch=${epoch} --model_path=/home/siemsj/projects/darts_weight_sharing_analysis/experiments/enas/ss_20191109-203733_3_2 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /nasbench_analysis/search_spaces/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nasbench-1shot1/cb0f0ec61ce8b37f8e2d0f2728c3ef0b9ec81bf1/nasbench_analysis/search_spaces/__init__.py -------------------------------------------------------------------------------- /nasbench_analysis/single_architecture_training/baseline_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p meta_gpu-ti # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-100 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J INDY_TRAINING # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {100..200} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python nasbench_analysis/single_architecture_training/train.py --seed=${seed} --save=independent --search_space=3 --epochs=50 --layers=9 --init_channels=32 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /nasbench_analysis/single_architecture_training/baseline_script_nasbench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p meta_gpu-ti # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 0-10 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J INDY_TRAINING # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {0..10} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python nasbench_analysis/single_architecture_training/train_nasbench_like.py --seed=${seed} --save=independent --search_space=3 --layers=3 --init_channels=16 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /nasbench_analysis/single_architecture_training/baseline_script_sgdr.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p meta_gpu-ti # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-100 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J INDY_SGDR # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {0..100} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python nasbench_analysis/single_architecture_training/train_sgdr.py --seed=${seed} --save=independent --search_space=3 --epochs=50 --layers=9 --init_channels=16 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /nasbench_analysis/single_architecture_training/model_search.py: -------------------------------------------------------------------------------- 1 | from optimizers.darts.model_search import Network, MixedOp, ChoiceBlock, Cell 2 | from optimizers.darts.operations import * 3 | 4 | 5 | class MixedOpIndependentTraining(MixedOp): 6 | def __init__(self, *args, **kwargs): 7 | super(MixedOpIndependentTraining, self).__init__(*args, **kwargs) 8 | 9 | def forward(self, x, weights): 10 | cpu_weights = weights.tolist() 11 | clist = [] 12 | for j, cpu_weight in enumerate(cpu_weights): 13 | if abs(cpu_weight) > 1e-10: 14 | clist.append(weights[j] * self._ops[j](x)) 15 | assert len(clist) > 0, 'invalid length : {:}'.format(cpu_weights) 16 | return sum(clist) 17 | 18 | 19 | class ChoiceBlockIndependent(ChoiceBlock): 20 | """ 21 | Adapted to match Figure 3 in: 22 | Bender, Gabriel, et al. "Understanding and simplifying one-shot architecture search." 23 | International Conference on Machine Learning. 2018. 24 | """ 25 | 26 | def __init__(self, C_in): 27 | super(ChoiceBlockIndependent, self).__init__(C_in) 28 | # Use the GDAS Mixed Op instead of the DARTS Mixed op 29 | self.mixed_op = MixedOpIndependentTraining(C_in, stride=1) 30 | 31 | 32 | class CellIndependent(Cell): 33 | 34 | def __init__(self, steps, C_prev, C, layer, search_space): 35 | super(CellIndependent, self).__init__(steps, C_prev, C, layer, search_space) 36 | # Create the choice block. 37 | self._choice_blocks = nn.ModuleList() 38 | for i in range(self._steps): 39 | # Use the GDAS cell instead of the DARTS cell 40 | choice_block = ChoiceBlockIndependent(C_in=C) 41 | self._choice_blocks.append(choice_block) 42 | 43 | 44 | class NetworkIndependent(Network): 45 | 46 | def __init__(self, C, num_classes, layers, criterion, output_weights, search_space, steps=4): 47 | super(NetworkIndependent, self).__init__(C, num_classes, layers, criterion, output_weights, search_space, 48 | steps=steps) 49 | 50 | # Override the cells module list of DARTS with GDAS variants 51 | self.cells = nn.ModuleList() 52 | C_curr = C 53 | C_prev = C_curr 54 | for i in range(layers): 55 | if i in [layers // 3, 2 * layers // 3]: 56 | # Double the number of channels after each down-sampling step 57 | # Down-sample in forward method 58 | C_curr *= 2 59 | 60 | cell = CellIndependent(steps, C_prev, C_curr, layer=i, search_space=search_space) 61 | self.cells += [cell] 62 | C_prev = C_curr 63 | -------------------------------------------------------------------------------- /optimizers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nasbench-1shot1/cb0f0ec61ce8b37f8e2d0f2728c3ef0b9ec81bf1/optimizers/__init__.py -------------------------------------------------------------------------------- /optimizers/bohb_one_shot/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nasbench-1shot1/cb0f0ec61ce8b37f8e2d0f2728c3ef0b9ec81bf1/optimizers/bohb_one_shot/__init__.py -------------------------------------------------------------------------------- /optimizers/bohb_one_shot/custom_bohb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nasbench-1shot1/cb0f0ec61ce8b37f8e2d0f2728c3ef0b9ec81bf1/optimizers/bohb_one_shot/custom_bohb/__init__.py -------------------------------------------------------------------------------- /optimizers/bohb_one_shot/custom_bohb/bohb.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import math 4 | import copy 5 | import logging 6 | import numpy as np 7 | import ConfigSpace as CS 8 | 9 | from hpbandster.core.master import Master 10 | from hpbandster.optimizers.iterations import SuccessiveHalving 11 | #from hpbandster.optimizers.config_generators.bohb import BOHB as CG_BOHB 12 | from custom_bohb.bohb_gen import CG_BOHB_CUSTOM as CG_BOHB 13 | 14 | class BOHB(Master): 15 | def __init__(self, configspace = None, 16 | eta=3, min_budget=0.01, max_budget=1, 17 | min_points_in_model = None, top_n_percent=15, 18 | num_samples = 64, random_fraction=1/3, bandwidth_factor=3, 19 | min_bandwidth=1e-3, start_from_default=False, 20 | **kwargs ): 21 | 22 | 23 | if configspace is None: 24 | raise ValueError("You have to provide a valid CofigSpace object") 25 | 26 | cg = CG_BOHB( configspace = configspace, 27 | min_points_in_model = min_points_in_model, 28 | top_n_percent=top_n_percent, 29 | num_samples = num_samples, 30 | random_fraction=random_fraction, 31 | bandwidth_factor=bandwidth_factor, 32 | min_bandwidth = min_bandwidth, 33 | start_from_default=start_from_default 34 | ) 35 | 36 | super().__init__(config_generator=cg, **kwargs) 37 | 38 | # Hyperband related stuff 39 | self.eta = eta 40 | self.min_budget = min_budget 41 | self.max_budget = max_budget 42 | 43 | # precompute some HB stuff 44 | self.max_SH_iter = -int(np.log(min_budget/max_budget)/np.log(eta)) + 1 45 | self.budgets = max_budget * np.power(eta, -np.linspace(self.max_SH_iter-1, 0, self.max_SH_iter)) 46 | 47 | self.config.update({ 48 | 'eta' : eta, 49 | 'min_budget' : min_budget, 50 | 'max_budget' : max_budget, 51 | 'budgets' : self.budgets, 52 | 'max_SH_iter': self.max_SH_iter, 53 | 'min_points_in_model' : min_points_in_model, 54 | 'top_n_percent' : top_n_percent, 55 | 'num_samples' : num_samples, 56 | 'random_fraction' : random_fraction, 57 | 'bandwidth_factor' : bandwidth_factor, 58 | 'min_bandwidth': min_bandwidth 59 | }) 60 | 61 | def get_next_iteration(self, iteration, iteration_kwargs={}): 62 | # number of 'SH rungs' 63 | s = self.max_SH_iter - 1 - (iteration%self.max_SH_iter) 64 | # number of configurations in that bracket 65 | n0 = int(np.floor((self.max_SH_iter)/(s+1)) * self.eta**s) 66 | ns = [max(int(n0*(self.eta**(-i))), 1) for i in range(s+1)] 67 | 68 | return(SuccessiveHalving(HPB_iter=iteration, num_configs=ns, budgets=self.budgets[(-s-1):], config_sampler=self.config_generator.get_config, **iteration_kwargs)) 69 | -------------------------------------------------------------------------------- /optimizers/bohb_one_shot/custom_bohb/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import pickle 4 | import json 5 | import threading 6 | import subprocess 7 | 8 | import Pyro4.naming 9 | from netifaces import ifaddresses, AF_INET 10 | 11 | 12 | def nic_name_to_host(nic_name=None): 13 | """ translates the name of a network card into a valid host name""" 14 | 15 | def get_nic_name_from_system(): 16 | process = subprocess.Popen("ip route get 8.8.8.8".split(), 17 | stdout=subprocess.PIPE) 18 | output = process.stdout.read().decode() 19 | s = re.search(r'dev\s*(\S+)', output) 20 | return s.group(1) 21 | 22 | # if the network card name is not a valid one an ecxeption will be raised 23 | # and the method get_nic_name_from_system will discover a valid card name 24 | try: 25 | host = ifaddresses(nic_name).setdefault(AF_INET, [{'addr': 'No IP addr'}] )[0]['addr'] 26 | # ValueError if the nic_name is no correct 27 | # TypeError is nic_name is None 28 | except (ValueError, TypeError) as e: 29 | nic_name = get_nic_name_from_system() 30 | host = ifaddresses(nic_name).setdefault(AF_INET, [{'addr': 'No IP addr'}] )[0]['addr'] 31 | 32 | return(host) 33 | 34 | 35 | class NameServer(object): 36 | """ 37 | The nameserver serves as a phonebook-like lookup table for your workers. Unique names are created so the workers 38 | can work in parallel and register their results without creating racing conditions. The implementation uses 39 | `PYRO4 `_ as a backend and this class is basically a wrapper. 40 | """ 41 | def __init__(self, run_id, working_directory=None, host=None, port=0, nic_name=None): 42 | """ 43 | Parameters 44 | ---------- 45 | run_id: str 46 | unique run_id associated with the HPB run 47 | working_directory: str 48 | path to the working directory of the HPB run to store the nameservers credentials. 49 | If None, no config file will be written. 50 | host: str 51 | the hostname to use for the nameserver 52 | port: int 53 | the port to be used. Default (=0) means a random port 54 | nic_name: str 55 | name of the network interface to use (only used if host is not given) 56 | """ 57 | self.run_id = run_id 58 | self.host = host 59 | self.nic_name = nic_name 60 | self.port = port 61 | self.dir = working_directory 62 | self.conf_fn = None 63 | self.pyro_ns = None 64 | 65 | 66 | 67 | def start(self): 68 | """ 69 | starts a Pyro4 nameserver in a separate thread 70 | 71 | Returns 72 | ------- 73 | tuple (str, int): 74 | the host name and the used port 75 | """ 76 | 77 | if self.host is None: 78 | if self.nic_name is None: 79 | self.host = 'localhost' 80 | else: 81 | self.host = nic_name_to_host(self.nic_name) 82 | 83 | uri, self.pyro_ns, _ = Pyro4.naming.startNS(host=self.host, port=self.port) 84 | 85 | self.host, self.port = self.pyro_ns.locationStr.split(':') 86 | self.port = int(self.port) 87 | 88 | thread = threading.Thread(target=self.pyro_ns.requestLoop, name='Pyro4 nameserver started by HpBandSter') 89 | thread.start() 90 | 91 | if not self.dir is None: 92 | os.makedirs(self.dir, exist_ok=True) 93 | self.conf_fn = os.path.join(self.dir, 'HPB_run_%s_pyro.pkl'%self.run_id) 94 | 95 | with open(self.conf_fn, 'wb') as fh: 96 | pickle.dump((self.host, self.port), fh) 97 | 98 | return(self.host, self.port) 99 | 100 | 101 | def shutdown(self): 102 | """ 103 | clean shutdown of the nameserver and the config file (if written) 104 | """ 105 | if not self.pyro_ns is None: 106 | self.pyro_ns.shutdown() 107 | self.pyro_ns = None 108 | 109 | if not self.conf_fn is None: 110 | os.remove(self.conf_fn) 111 | self.conf_fn = None 112 | 113 | 114 | def __del__(self): 115 | self.shutdown() 116 | -------------------------------------------------------------------------------- /optimizers/darts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nasbench-1shot1/cb0f0ec61ce8b37f8e2d0f2728c3ef0b9ec81bf1/optimizers/darts/__init__.py -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_1/baseline_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu_tesla-P100 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=${seed} --save=baseline --search_space=1 --epochs=25 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_1/baseline_script_trans.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..6} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=${seed} --save=baseline_trans --search_space=1 --epochs=50 --weight_decay=0.00017949567554327632 --cutout --cutout_prob=0.576809112569184 --learning_rate=0.0011765562944021056 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_1/baseline_warmstart_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-6 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | # Job to perform 23 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 24 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=0 --save=baseline --search_space=1 --warm_start_epochs=20 --cutout 25 | exit $? 26 | fi 27 | 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=1 --save=baseline --search_space=1 --warm_start_epochs=20 --cutout 31 | exit $? 32 | fi 33 | 34 | 35 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 36 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=2 --save=baseline --search_space=1 --warm_start_epochs=20 --cutout 37 | exit $? 38 | fi 39 | 40 | 41 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 42 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=3 --save=baseline --search_space=1 --warm_start_epochs=20 --cutout 43 | exit $? 44 | fi 45 | 46 | 47 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 48 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=4 --save=baseline --search_space=1 --warm_start_epochs=20 --cutout 49 | exit $? 50 | fi 51 | 52 | 53 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 54 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=5 --save=baseline --search_space=1 --warm_start_epochs=20 --cutout 55 | exit $? 56 | fi 57 | # Print some Information about the end-time to STDOUT 58 | echo "DONE"; 59 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_1/cutout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu_tesla-P100 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=${seed} --save=cutout --search_space=1 --cutout --epochs=25 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_1/learning_rate_eval.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_LR_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for lr in "0.25" "0.0025" 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=3 --save=learning_rate --learning_rate=${lr} --search_space=1 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | 35 | # Print some Information about the end-time to STDOUT 36 | echo "DONE"; 37 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_1/second_order_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=${seed} --save=second_order --search_space=1 --unrolled --epochs=25 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_1/second_order_script_cutout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu_tesla-P100 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=${seed} --save=second_order_cutout --search_space=1 --unrolled --cutout --epochs=100 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_1/weight_decay_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for wd in "1e-4" "3e-4" "9e-4" "27e-4" "81e-4" 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=3 --save=weight_decay --weight_decay=${wd} --search_space=1 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | 35 | # Print some Information about the end-time to STDOUT 36 | echo "DONE"; 37 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_2/baseline_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu_tesla-P100 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=${seed} --save=baseline --search_space=2 --epochs=25 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_2/baseline_script_trans.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..6} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=${seed} --save=baseline_trans --search_space=2 --epochs=50 --weight_decay=0.00017949567554327632 --cutout --cutout_prob=0.576809112569184 --learning_rate=0.0011765562944021056 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_2/baseline_warmstart_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-6 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | # Job to perform 23 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 24 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=0 --save=baseline --search_space=2 --warm_start_epochs=20 --cutout 25 | exit $? 26 | fi 27 | 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=1 --save=baseline --search_space=2 --warm_start_epochs=20 --cutout 31 | exit $? 32 | fi 33 | 34 | 35 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 36 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=2 --save=baseline --search_space=2 --warm_start_epochs=20 --cutout 37 | exit $? 38 | fi 39 | 40 | 41 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 42 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=3 --save=baseline --search_space=2 --warm_start_epochs=20 --cutout 43 | exit $? 44 | fi 45 | 46 | 47 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 48 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=4 --save=baseline --search_space=2 --warm_start_epochs=20 --cutout 49 | exit $? 50 | fi 51 | 52 | 53 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 54 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=5 --save=baseline --search_space=2 --warm_start_epochs=20 --cutout 55 | exit $? 56 | fi 57 | # Print some Information about the end-time to STDOUT 58 | echo "DONE"; 59 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_2/cutout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu_tesla-P100 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=${seed} --save=cutout --search_space=2 --cutout --epochs=25 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_2/learning_rate_eval.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_LR_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for lr in "0.25" "0.0025" 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=3 --save=learning_rate --learning_rate=${lr} --search_space=2 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | 35 | # Print some Information about the end-time to STDOUT 36 | echo "DONE"; 37 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_2/second_order_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p ml_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=${seed} --save=second_order --search_space=2 --unrolled --epochs=25 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_2/second_order_script_cutout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p ml_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=${seed} --save=second_order_cutout --search_space=2 --unrolled --cutout --epochs=100 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_2/weight_decay_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --nice=10 ## priority 8 | #SBATCH --gres=gpu:1 # reserves four GPUs 9 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 10 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 12 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 13 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 14 | # Print some information about the job to STDOUT 15 | echo "Workingdir: $PWD"; 16 | echo "Started at $(date)"; 17 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 18 | 19 | # Activate conda environment 20 | source ~/.bashrc 21 | conda activate pytorch1.3 22 | 23 | gpu_counter=1 24 | 25 | for wd in "1e-4" "9e-4" "27e-4" "81e-4" 26 | do 27 | # Job to perform 28 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 29 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=3 --save=weight_decay --weight_decay=${wd} --search_space=2 30 | exit $? 31 | fi 32 | 33 | let gpu_counter+=1 34 | done 35 | 36 | # Print some Information about the end-time to STDOUT 37 | echo "DONE"; 38 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_3/baseline_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=${seed} --save=baseline --search_space=3 --epochs=25 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_3/baseline_script_trans.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..6} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=${seed} --save=baseline_trans --search_space=3 --epochs=50 --weight_decay=0.00017949567554327632 --cutout --cutout_prob=0.576809112569184 --learning_rate=0.0011765562944021056 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_3/baseline_warmstart_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-6 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | # Job to perform 23 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 24 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=0 --save=baseline --search_space=3 --warm_start_epochs=20 --cutout 25 | exit $? 26 | fi 27 | 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=1 --save=baseline --search_space=3 --warm_start_epochs=20 --cutout 31 | exit $? 32 | fi 33 | 34 | 35 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 36 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=2 --save=baseline --search_space=3 --warm_start_epochs=20 --cutout 37 | exit $? 38 | fi 39 | 40 | 41 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 42 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=3 --save=baseline --search_space=3 --warm_start_epochs=20 --cutout 43 | exit $? 44 | fi 45 | 46 | 47 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 48 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=4 --save=baseline --search_space=3 --warm_start_epochs=20 --cutout 49 | exit $? 50 | fi 51 | 52 | 53 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 54 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=5 --save=baseline --search_space=3 --warm_start_epochs=20 --cutout 55 | exit $? 56 | fi 57 | # Print some Information about the end-time to STDOUT 58 | echo "DONE"; 59 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_3/cutout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=${seed} --save=cutout --search_space=3 --cutout --epochs=25 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_3/learning_rate_eval.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_LR_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for lr in "0.25" "0.0025" 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=12 --save=learning_rate --learning_rate=${lr} --search_space=3 --unrolled 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | 35 | # Print some Information about the end-time to STDOUT 36 | echo "DONE"; 37 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_3/second_order_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p ml_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=${seed} --save=second_order --search_space=3 --unrolled --epochs=25 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_3/second_order_script_cutout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p ml_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=${seed} --save=second_order_cutout --search_space=3 --unrolled --cutout --epochs=100 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/cluster_scripts/search_space_3/weight_decay_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --nice=10 ## priority 8 | #SBATCH --gres=gpu:1 # reserves four GPUs 9 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 10 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 12 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 13 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 14 | # Print some information about the job to STDOUT 15 | echo "Workingdir: $PWD"; 16 | echo "Started at $(date)"; 17 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 18 | 19 | # Activate conda environment 20 | source ~/.bashrc 21 | conda activate pytorch1.3 22 | 23 | gpu_counter=1 24 | 25 | for wd in "1e-4" "9e-4" "27e-4" "81e-4" 26 | do 27 | # Job to perform 28 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 29 | PYTHONPATH=$PWD python optimizers/darts/train_search.py --seed=3 --save=weight_decay --weight_decay=${wd} --search_space=3 30 | exit $? 31 | fi 32 | 33 | let gpu_counter+=1 34 | done 35 | 36 | # Print some Information about the end-time to STDOUT 37 | echo "DONE"; 38 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/darts/genotypes.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | 3 | Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat') 4 | 5 | PRIMITIVES = [ 6 | 'maxpool3x3', 7 | 'conv3x3-bn-relu', 8 | 'conv1x1-bn-relu' 9 | ] 10 | 11 | NASNet = Genotype( 12 | normal=[ 13 | ('sep_conv_5x5', 1), 14 | ('sep_conv_3x3', 0), 15 | ('sep_conv_5x5', 0), 16 | ('sep_conv_3x3', 0), 17 | ('avg_pool_3x3', 1), 18 | ('skip_connect', 0), 19 | ('avg_pool_3x3', 0), 20 | ('avg_pool_3x3', 0), 21 | ('sep_conv_3x3', 1), 22 | ('skip_connect', 1), 23 | ], 24 | normal_concat=[2, 3, 4, 5, 6], 25 | reduce=[ 26 | ('sep_conv_5x5', 1), 27 | ('sep_conv_7x7', 0), 28 | ('max_pool_3x3', 1), 29 | ('sep_conv_7x7', 0), 30 | ('avg_pool_3x3', 1), 31 | ('sep_conv_5x5', 0), 32 | ('skip_connect', 3), 33 | ('avg_pool_3x3', 2), 34 | ('sep_conv_3x3', 2), 35 | ('max_pool_3x3', 1), 36 | ], 37 | reduce_concat=[4, 5, 6], 38 | ) 39 | 40 | AmoebaNet = Genotype( 41 | normal=[ 42 | ('avg_pool_3x3', 0), 43 | ('max_pool_3x3', 1), 44 | ('sep_conv_3x3', 0), 45 | ('sep_conv_5x5', 2), 46 | ('sep_conv_3x3', 0), 47 | ('avg_pool_3x3', 3), 48 | ('sep_conv_3x3', 1), 49 | ('skip_connect', 1), 50 | ('skip_connect', 0), 51 | ('avg_pool_3x3', 1), 52 | ], 53 | normal_concat=[4, 5, 6], 54 | reduce=[ 55 | ('avg_pool_3x3', 0), 56 | ('sep_conv_3x3', 1), 57 | ('max_pool_3x3', 0), 58 | ('sep_conv_7x7', 2), 59 | ('sep_conv_7x7', 0), 60 | ('avg_pool_3x3', 1), 61 | ('max_pool_3x3', 0), 62 | ('max_pool_3x3', 1), 63 | ('conv_7x1_1x7', 0), 64 | ('sep_conv_3x3', 5), 65 | ], 66 | reduce_concat=[3, 4, 6] 67 | ) 68 | 69 | DARTS_V1 = Genotype( 70 | normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0), 71 | ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2)], normal_concat=[2, 3, 4, 5], 72 | reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3', 0), 73 | ('skip_connect', 2), ('skip_connect', 2), ('avg_pool_3x3', 0)], reduce_concat=[2, 3, 4, 5]) 74 | DARTS_V2 = Genotype( 75 | normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), 76 | ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5], 77 | reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), 78 | ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5]) 79 | 80 | DARTS = DARTS_V2 81 | -------------------------------------------------------------------------------- /optimizers/darts/visualize.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from graphviz import Digraph 4 | 5 | 6 | def plot(genotype, filename): 7 | g = Digraph( 8 | format='pdf', 9 | edge_attr=dict(fontsize='20', fontname="times"), 10 | node_attr=dict(style='filled', shape='rect', align='center', fontsize='20', height='0.5', width='0.5', 11 | penwidth='2', fontname="times"), 12 | engine='dot') 13 | g.body.extend(['rankdir=LR']) 14 | 15 | g.node("c_{k-2}", fillcolor='darkseagreen2') 16 | g.node("c_{k-1}", fillcolor='darkseagreen2') 17 | assert len(genotype) % 2 == 0 18 | steps = len(genotype) // 2 19 | 20 | for i in range(steps): 21 | g.node(str(i), fillcolor='lightblue') 22 | 23 | for i in range(steps): 24 | for k in [2 * i, 2 * i + 1]: 25 | op, j = genotype[k] 26 | if j == 0: 27 | u = "c_{k-2}" 28 | elif j == 1: 29 | u = "c_{k-1}" 30 | else: 31 | u = str(j - 2) 32 | v = str(i) 33 | g.edge(u, v, label=op, fillcolor="gray") 34 | 35 | g.node("c_{k}", fillcolor='palegoldenrod') 36 | for i in range(steps): 37 | g.edge(str(i), "c_{k}", fillcolor="gray") 38 | 39 | g.render(filename, view=True) 40 | 41 | 42 | if __name__ == '__main__': 43 | if len(sys.argv) != 2: 44 | print("usage:\n python {} ARCH_NAME".format(sys.argv[0])) 45 | sys.exit(1) 46 | 47 | genotype_name = sys.argv[1] 48 | try: 49 | genotype = eval('genotypes.{}'.format(genotype_name)) 50 | except AttributeError: 51 | print("{} is not specified in genotypes.py".format(genotype_name)) 52 | sys.exit(1) 53 | 54 | plot(genotype.normal, "normal") 55 | plot(genotype.reduce, "reduction") 56 | -------------------------------------------------------------------------------- /optimizers/enas/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nasbench-1shot1/cb0f0ec61ce8b37f8e2d0f2728c3ef0b9ec81bf1/optimizers/enas/__init__.py -------------------------------------------------------------------------------- /optimizers/enas/cluster_scripts/search_space_1/baseline_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 30000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J ENAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD" 15 | echo "Started at $(date)" 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12}; do 25 | # Job to perform 26 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 27 | PYTHONPATH=$PWD python optimizers/enas/enas.py --seed=${seed} --search_space=1 --epochs=100 28 | exit $? 29 | fi 30 | 31 | let gpu_counter+=1 32 | done 33 | # Print some Information about the end-time to STDOUT 34 | echo "DONE" 35 | echo "Finished at $(date)" 36 | -------------------------------------------------------------------------------- /optimizers/enas/cluster_scripts/search_space_2/baseline_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 30000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J ENAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD" 15 | echo "Started at $(date)" 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12}; do 25 | # Job to perform 26 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 27 | PYTHONPATH=$PWD python optimizers/enas/enas.py --seed=${seed} --search_space=2 --epochs=100 28 | exit $? 29 | fi 30 | 31 | let gpu_counter+=1 32 | done 33 | # Print some Information about the end-time to STDOUT 34 | echo "DONE" 35 | echo "Finished at $(date)" 36 | -------------------------------------------------------------------------------- /optimizers/enas/cluster_scripts/search_space_3/baseline_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 30000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J ENAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD" 15 | echo "Started at $(date)" 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION" 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12}; do 25 | # Job to perform 26 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 27 | PYTHONPATH=$PWD python optimizers/enas/enas.py --seed=${seed} --search_space=3 --epochs=100 28 | exit $? 29 | fi 30 | 31 | let gpu_counter+=1 32 | done 33 | # Print some Information about the end-time to STDOUT 34 | echo "DONE" 35 | echo "Finished at $(date)" 36 | -------------------------------------------------------------------------------- /optimizers/enas/data.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data import DataLoader, SubsetRandomSampler 2 | from torchvision import transforms 3 | from torchvision.datasets import CIFAR10 4 | 5 | MEAN = [0.4914, 0.4822, 0.4465] 6 | STD = [0.2023, 0.1994, 0.2010] 7 | 8 | 9 | def get_loaders(args): 10 | train_transform = transforms.Compose([ 11 | transforms.RandomCrop(32, padding=4), 12 | transforms.RandomHorizontalFlip(), 13 | transforms.ToTensor(), 14 | transforms.Normalize( 15 | mean=MEAN, 16 | std=STD, 17 | ), 18 | ]) 19 | train_dataset = CIFAR10( 20 | root=args.data, 21 | train=True, 22 | download=True, 23 | transform=train_transform, 24 | ) 25 | 26 | indices = list(range(len(train_dataset))) 27 | 28 | train_loader = DataLoader( 29 | train_dataset, 30 | batch_size=args.batch_size, 31 | sampler=SubsetRandomSampler(indices[:-5000]), 32 | pin_memory=True, 33 | num_workers=2, 34 | ) 35 | 36 | reward_loader = DataLoader( 37 | train_dataset, 38 | batch_size=args.batch_size, 39 | sampler=SubsetRandomSampler(indices[-5000:]), 40 | pin_memory=True, 41 | num_workers=2, 42 | ) 43 | 44 | valid_transform = transforms.Compose([ 45 | transforms.ToTensor(), 46 | transforms.Normalize( 47 | mean=MEAN, 48 | std=STD, 49 | ), 50 | ]) 51 | valid_dataset = CIFAR10( 52 | root=args.data, 53 | train=False, 54 | download=False, 55 | transform=valid_transform, 56 | ) 57 | 58 | valid_loader = DataLoader( 59 | valid_dataset, 60 | batch_size=args.batch_size, 61 | shuffle=False, 62 | pin_memory=True, 63 | num_workers=2, 64 | ) 65 | 66 | # repeat_train_loader = RepeatedDataLoader(train_loader) 67 | repeat_reward_loader = RepeatedDataLoader(reward_loader) 68 | repeat_valid_loader = RepeatedDataLoader(valid_loader) 69 | 70 | return train_loader, repeat_reward_loader, repeat_valid_loader 71 | 72 | 73 | class RepeatedDataLoader(): 74 | def __init__(self, data_loader): 75 | self.data_loader = data_loader 76 | self.data_iter = self.data_loader.__iter__() 77 | 78 | def __len__(self): 79 | return len(self.data_loader) 80 | 81 | def next_batch(self): 82 | try: 83 | batch = self.data_iter.__next__() 84 | except StopIteration: 85 | self.data_iter = self.data_loader.__iter__() 86 | batch = self.data_iter.__next__() 87 | return batch 88 | -------------------------------------------------------------------------------- /optimizers/enas/utils.py: -------------------------------------------------------------------------------- 1 | import collections 2 | from collections import defaultdict 3 | 4 | import numpy as np 5 | import torch 6 | 7 | Node = collections.namedtuple('Node', ['id', 'name']) 8 | 9 | 10 | class keydefaultdict(defaultdict): 11 | def __missing__(self, key): 12 | if self.default_factory is None: 13 | raise KeyError(key) 14 | else: 15 | ret = self[key] = self.default_factory(key) 16 | return ret 17 | 18 | 19 | def get_variable(inputs, cuda=False, **kwargs): 20 | if type(inputs) in [list, np.ndarray]: 21 | inputs = torch.Tensor(inputs) 22 | if cuda: 23 | out = torch.Tensor(inputs.cuda(), **kwargs) 24 | else: 25 | out = torch.Tensor(inputs, **kwargs) 26 | return out 27 | -------------------------------------------------------------------------------- /optimizers/gdas/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nasbench-1shot1/cb0f0ec61ce8b37f8e2d0f2728c3ef0b9ec81bf1/optimizers/gdas/__init__.py -------------------------------------------------------------------------------- /optimizers/gdas/architect.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import Variable 3 | from torch import autograd 4 | 5 | from optimizers.darts.architect import Architect 6 | 7 | 8 | def _concat(xs): 9 | return torch.cat([x.view(-1) for x in xs]) 10 | 11 | 12 | class ArchitectGDAS(Architect): 13 | 14 | def __init__(self, model, args): 15 | self.grad_clip = args.grad_clip 16 | super(ArchitectGDAS, self).__init__(model, args) 17 | 18 | def step(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer, unrolled): 19 | self.optimizer.zero_grad() 20 | if unrolled: 21 | self._backward_step_unrolled(input_train, target_train, input_valid, target_valid, eta, network_optimizer) 22 | else: 23 | self._backward_step(input_valid, target_valid) 24 | 25 | # Add gradient clipping for gdas because gumbel softmax leads to gradients with high magnitude 26 | torch.nn.utils.clip_grad_norm(self.model.arch_parameters(), self.grad_clip) 27 | self.optimizer.step() 28 | 29 | def _compute_unrolled_model(self, input, target, eta, network_optimizer): 30 | loss = self.model._loss(input, target) 31 | theta = _concat(self.model.parameters()).data 32 | # Changes to reflect that for unused ops there will be no gradient and this needs to be handled 33 | dtheta = _concat( 34 | [grad_i + self.network_weight_decay * theta_i if grad_i is not None else self.network_weight_decay * theta_i 35 | for grad_i, theta_i in 36 | zip(torch.autograd.grad(loss, self.model.parameters(), allow_unused=True), self.model.parameters())]) 37 | 38 | try: 39 | moment = _concat(network_optimizer.state[v]['momentum_buffer'] for v in self.model.parameters()).musl_( 40 | self.network_momentum) 41 | except: 42 | moment = torch.zeros_like(dtheta) 43 | unrolled_model = self._construct_model_from_theta(theta.sub(eta, moment + dtheta)) 44 | return unrolled_model 45 | 46 | def _backward_step_unrolled(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer): 47 | unrolled_model = self._compute_unrolled_model(input_train, target_train, eta, network_optimizer) 48 | unrolled_loss = unrolled_model._loss(input_valid, target_valid) 49 | 50 | unrolled_loss.backward() 51 | dalpha = [v.grad for v in unrolled_model.arch_parameters()] 52 | # Changes to reflect that for unused ops there will be no gradient and this needs to be handled 53 | vector = [v.grad.data if v.grad is not None else torch.zeros_like(v) for v in unrolled_model.parameters()] 54 | implicit_grads = self._hessian_vector_product(vector, input_train, target_train) 55 | 56 | for g, ig in zip(dalpha, implicit_grads): 57 | g.data.sub_(eta, ig.data) 58 | 59 | for v, g in zip(self.model.arch_parameters(), dalpha): 60 | if v.grad is None: 61 | v.grad = Variable(g.data) 62 | else: 63 | v.grad.data.copy_(g.data) 64 | -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_1/baseline_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p ml_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J GDAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {13..25} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --seed=${seed} --save=first_order --search_space=1 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_1/baseline_script_trans.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..6} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --seed=${seed} --save=baseline_trans --search_space=1 --epochs=50 --weight_decay=1.0685329756580793e-05 --cutout --cutout_prob=0.3118341613188339 --learning_rate=0.854962095974854 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_1/cutout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p ml_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J GDAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {13..25} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --seed=${seed} --save=first_order_cutout --search_space=1 --cutout 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_1/gdas_evaluation.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu_tesla-P100 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J GDAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | 23 | # Job to perform 24 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 25 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=10 --search_space=1 26 | exit $? 27 | fi 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=14 --search_space=1 31 | exit $? 32 | fi 33 | 34 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 35 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=15 --search_space=1 36 | exit $? 37 | fi 38 | 39 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 40 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=16 --search_space=1 41 | exit $? 42 | fi 43 | 44 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 45 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=17 --search_space=1 46 | exit $? 47 | fi 48 | 49 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 50 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=18 --search_space=1 51 | exit $? 52 | fi 53 | 54 | if [ 7 -eq $SLURM_ARRAY_TASK_ID ]; then 55 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=19 --search_space=1 56 | exit $? 57 | fi 58 | 59 | if [ 8 -eq $SLURM_ARRAY_TASK_ID ]; then 60 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=20 --search_space=1 61 | exit $? 62 | fi 63 | 64 | if [ 9 -eq $SLURM_ARRAY_TASK_ID ]; then 65 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=23 --search_space=1 66 | exit $? 67 | fi 68 | 69 | if [ 10 -eq $SLURM_ARRAY_TASK_ID ]; then 70 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=24 --search_space=1 71 | exit $? 72 | fi 73 | 74 | if [ 11 -eq $SLURM_ARRAY_TASK_ID ]; then 75 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=21 --search_space=1 76 | exit $? 77 | fi 78 | 79 | if [ 12 -eq $SLURM_ARRAY_TASK_ID ]; then 80 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=22 --search_space=1 81 | exit $? 82 | fi 83 | # Print some Information about the end-time to STDOUT 84 | echo "DONE"; 85 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_1/gdas_evaluation_cutout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu_tesla-P100 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J GDAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | 23 | # Job to perform 24 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 25 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=10 --search_space=1 --cutout 26 | exit $? 27 | fi 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=14 --search_space=1 --cutout 31 | exit $? 32 | fi 33 | 34 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 35 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=15 --search_space=1 --cutout 36 | exit $? 37 | fi 38 | 39 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 40 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=16 --search_space=1 --cutout 41 | exit $? 42 | fi 43 | 44 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 45 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=17 --search_space=1 --cutout 46 | exit $? 47 | fi 48 | 49 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 50 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=18 --search_space=1 --cutout 51 | exit $? 52 | fi 53 | 54 | if [ 7 -eq $SLURM_ARRAY_TASK_ID ]; then 55 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=19 --search_space=1 --cutout 56 | exit $? 57 | fi 58 | 59 | if [ 8 -eq $SLURM_ARRAY_TASK_ID ]; then 60 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=20 --search_space=1 --cutout 61 | exit $? 62 | fi 63 | 64 | if [ 9 -eq $SLURM_ARRAY_TASK_ID ]; then 65 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=23 --search_space=1 --cutout 66 | exit $? 67 | fi 68 | 69 | if [ 10 -eq $SLURM_ARRAY_TASK_ID ]; then 70 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=24 --search_space=1 --cutout 71 | exit $? 72 | fi 73 | 74 | if [ 11 -eq $SLURM_ARRAY_TASK_ID ]; then 75 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=21 --search_space=1 --cutout 76 | exit $? 77 | fi 78 | 79 | if [ 12 -eq $SLURM_ARRAY_TASK_ID ]; then 80 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=22 --search_space=1 --cutout 81 | exit $? 82 | fi 83 | # Print some Information about the end-time to STDOUT 84 | echo "DONE"; 85 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_1/gdas_evaluation_warmstarting.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J GDAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | 23 | # Job to perform 24 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 25 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=10 --search_space=1 --warm_start_epochs=20 26 | exit $? 27 | fi 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=14 --search_space=1 --warm_start_epochs=20 31 | exit $? 32 | fi 33 | 34 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 35 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=15 --search_space=1 --warm_start_epochs=20 36 | exit $? 37 | fi 38 | 39 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 40 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=16 --search_space=1 --warm_start_epochs=20 41 | exit $? 42 | fi 43 | 44 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 45 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=17 --search_space=1 --warm_start_epochs=20 46 | exit $? 47 | fi 48 | 49 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 50 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=18 --search_space=1 --warm_start_epochs=20 51 | exit $? 52 | fi 53 | 54 | if [ 7 -eq $SLURM_ARRAY_TASK_ID ]; then 55 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=19 --search_space=1 --warm_start_epochs=20 56 | exit $? 57 | fi 58 | 59 | if [ 8 -eq $SLURM_ARRAY_TASK_ID ]; then 60 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=20 --search_space=1 --warm_start_epochs=20 61 | exit $? 62 | fi 63 | 64 | if [ 9 -eq $SLURM_ARRAY_TASK_ID ]; then 65 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=23 --search_space=1 --warm_start_epochs=20 66 | exit $? 67 | fi 68 | 69 | if [ 10 -eq $SLURM_ARRAY_TASK_ID ]; then 70 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=24 --search_space=1 --warm_start_epochs=20 71 | exit $? 72 | fi 73 | 74 | if [ 11 -eq $SLURM_ARRAY_TASK_ID ]; then 75 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=21 --search_space=1 --warm_start_epochs=20 76 | exit $? 77 | fi 78 | 79 | if [ 12 -eq $SLURM_ARRAY_TASK_ID ]; then 80 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=22 --search_space=1 --warm_start_epochs=20 81 | exit $? 82 | fi 83 | # Print some Information about the end-time to STDOUT 84 | echo "DONE"; 85 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_1/learning_rate_eval.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J PC_DARTS_LR_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for lr in "0.25" "0.0025" 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --seed=0 --save=learning_rate --learning_rate=${lr} --search_space=1 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | 35 | # Print some Information about the end-time to STDOUT 36 | echo "DONE"; 37 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_1/weight_decay_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --nice=10 ## priority 8 | #SBATCH --gres=gpu:1 # reserves four GPUs 9 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 10 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 12 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 13 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 14 | # Print some information about the job to STDOUT 15 | echo "Workingdir: $PWD"; 16 | echo "Started at $(date)"; 17 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 18 | 19 | # Activate conda environment 20 | source ~/.bashrc 21 | conda activate pytorch1.3 22 | 23 | gpu_counter=1 24 | 25 | for wd in "1e-4" "9e-4" "27e-4" "81e-4" 26 | do 27 | # Job to perform 28 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 29 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --seed=3 --save=weight_decay --weight_decay=${wd} --search_space=1 30 | exit $? 31 | fi 32 | 33 | let gpu_counter+=1 34 | done 35 | 36 | # Print some Information about the end-time to STDOUT 37 | echo "DONE"; 38 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_2/baseline_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J GDAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {13..25} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --seed=${seed} --save=first_order --search_space=2 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_2/baseline_script_trans.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..6} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --seed=${seed} --save=baseline_trans --search_space=2 --epochs=50 --weight_decay=1.0685329756580793e-05 --cutout --cutout_prob=0.3118341613188339 --learning_rate=0.854962095974854 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_2/cutout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J GDAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {13..25} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --seed=${seed} --save=first_order_cutout --search_space=2 --cutout 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_2/gdas_evaluation.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu_tesla-P100 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J GDAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | 23 | # Job to perform 24 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 25 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=10 --search_space=2 26 | exit $? 27 | fi 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=14 --search_space=2 31 | exit $? 32 | fi 33 | 34 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 35 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=15 --search_space=2 36 | exit $? 37 | fi 38 | 39 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 40 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=16 --search_space=2 41 | exit $? 42 | fi 43 | 44 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 45 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=17 --search_space=2 46 | exit $? 47 | fi 48 | 49 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 50 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=18 --search_space=2 51 | exit $? 52 | fi 53 | 54 | if [ 7 -eq $SLURM_ARRAY_TASK_ID ]; then 55 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=19 --search_space=2 56 | exit $? 57 | fi 58 | 59 | if [ 8 -eq $SLURM_ARRAY_TASK_ID ]; then 60 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=20 --search_space=2 61 | exit $? 62 | fi 63 | 64 | if [ 9 -eq $SLURM_ARRAY_TASK_ID ]; then 65 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=23 --search_space=2 66 | exit $? 67 | fi 68 | 69 | if [ 10 -eq $SLURM_ARRAY_TASK_ID ]; then 70 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=24 --search_space=2 71 | exit $? 72 | fi 73 | 74 | if [ 11 -eq $SLURM_ARRAY_TASK_ID ]; then 75 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=21 --search_space=2 76 | exit $? 77 | fi 78 | 79 | if [ 12 -eq $SLURM_ARRAY_TASK_ID ]; then 80 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=22 --search_space=2 81 | exit $? 82 | fi 83 | # Print some Information about the end-time to STDOUT 84 | echo "DONE"; 85 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_2/gdas_evaluation_cutout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu_tesla-P100 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J GDAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | 23 | # Job to perform 24 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 25 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=10 --search_space=2 --cutout 26 | exit $? 27 | fi 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=14 --search_space=2 --cutout 31 | exit $? 32 | fi 33 | 34 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 35 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=15 --search_space=2 --cutout 36 | exit $? 37 | fi 38 | 39 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 40 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=16 --search_space=2 --cutout 41 | exit $? 42 | fi 43 | 44 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 45 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=17 --search_space=2 --cutout 46 | exit $? 47 | fi 48 | 49 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 50 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=18 --search_space=2 --cutout 51 | exit $? 52 | fi 53 | 54 | if [ 7 -eq $SLURM_ARRAY_TASK_ID ]; then 55 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=19 --search_space=2 --cutout 56 | exit $? 57 | fi 58 | 59 | if [ 8 -eq $SLURM_ARRAY_TASK_ID ]; then 60 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=20 --search_space=2 --cutout 61 | exit $? 62 | fi 63 | 64 | if [ 9 -eq $SLURM_ARRAY_TASK_ID ]; then 65 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=23 --search_space=2 --cutout 66 | exit $? 67 | fi 68 | 69 | if [ 10 -eq $SLURM_ARRAY_TASK_ID ]; then 70 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=24 --search_space=2 --cutout 71 | exit $? 72 | fi 73 | 74 | if [ 11 -eq $SLURM_ARRAY_TASK_ID ]; then 75 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=21 --search_space=2 --cutout 76 | exit $? 77 | fi 78 | 79 | if [ 12 -eq $SLURM_ARRAY_TASK_ID ]; then 80 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=22 --search_space=2 --cutout 81 | exit $? 82 | fi 83 | # Print some Information about the end-time to STDOUT 84 | echo "DONE"; 85 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_2/gdas_evaluation_warmstarting.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J GDAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | 23 | # Job to perform 24 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 25 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=10 --search_space=2 --warm_start_epochs=20 26 | exit $? 27 | fi 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=14 --search_space=2 --warm_start_epochs=20 31 | exit $? 32 | fi 33 | 34 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 35 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=15 --search_space=2 --warm_start_epochs=20 36 | exit $? 37 | fi 38 | 39 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 40 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=16 --search_space=2 --warm_start_epochs=20 41 | exit $? 42 | fi 43 | 44 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 45 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=17 --search_space=2 --warm_start_epochs=20 46 | exit $? 47 | fi 48 | 49 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 50 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=18 --search_space=2 --warm_start_epochs=20 51 | exit $? 52 | fi 53 | 54 | if [ 7 -eq $SLURM_ARRAY_TASK_ID ]; then 55 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=19 --search_space=2 --warm_start_epochs=20 56 | exit $? 57 | fi 58 | 59 | if [ 8 -eq $SLURM_ARRAY_TASK_ID ]; then 60 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=20 --search_space=2 --warm_start_epochs=20 61 | exit $? 62 | fi 63 | 64 | if [ 9 -eq $SLURM_ARRAY_TASK_ID ]; then 65 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=23 --search_space=2 --warm_start_epochs=20 66 | exit $? 67 | fi 68 | 69 | if [ 10 -eq $SLURM_ARRAY_TASK_ID ]; then 70 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=24 --search_space=2 --warm_start_epochs=20 71 | exit $? 72 | fi 73 | 74 | if [ 11 -eq $SLURM_ARRAY_TASK_ID ]; then 75 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=21 --search_space=2 --warm_start_epochs=20 76 | exit $? 77 | fi 78 | 79 | if [ 12 -eq $SLURM_ARRAY_TASK_ID ]; then 80 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=22 --search_space=2 --warm_start_epochs=20 81 | exit $? 82 | fi 83 | # Print some Information about the end-time to STDOUT 84 | echo "DONE"; 85 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_2/learning_rate_eval.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J PC_DARTS_LR_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for lr in "0.25" "0.0025" 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --seed=2 --save=learning_rate --learning_rate=${lr} --search_space=2 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | 35 | # Print some Information about the end-time to STDOUT 36 | echo "DONE"; 37 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_2/weight_decay_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --nice=10 ## priority 8 | #SBATCH --gres=gpu:1 # reserves four GPUs 9 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 10 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 12 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 13 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 14 | # Print some information about the job to STDOUT 15 | echo "Workingdir: $PWD"; 16 | echo "Started at $(date)"; 17 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 18 | 19 | # Activate conda environment 20 | source ~/.bashrc 21 | conda activate pytorch1.3 22 | 23 | gpu_counter=1 24 | 25 | for wd in "1e-4" "9e-4" "27e-4" "81e-4" 26 | do 27 | # Job to perform 28 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 29 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --seed=3 --save=weight_decay --weight_decay=${wd} --search_space=2 30 | exit $? 31 | fi 32 | 33 | let gpu_counter+=1 34 | done 35 | 36 | # Print some Information about the end-time to STDOUT 37 | echo "DONE"; 38 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_3/baseline_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p ml_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J GDAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {13..25} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --seed=${seed} --save=first_order --search_space=3 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_3/baseline_script_trans.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p ml_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..6} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --seed=${seed} --save=baseline_trans --search_space=3 --epochs=50 --weight_decay=1.0685329756580793e-05 --cutout --cutout_prob=0.3118341613188339 --learning_rate=0.854962095974854 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_3/cutout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J GDAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {13..25} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --seed=${seed} --save=first_order_cutout --search_space=3 --cutout 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_3/gdas_evaluation.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu_tesla-P100 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J GDAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | 23 | # Job to perform 24 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 25 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=10 --search_space=3 26 | exit $? 27 | fi 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=14 --search_space=3 31 | exit $? 32 | fi 33 | 34 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 35 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=15 --search_space=3 36 | exit $? 37 | fi 38 | 39 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 40 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=16 --search_space=3 41 | exit $? 42 | fi 43 | 44 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 45 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=17 --search_space=3 46 | exit $? 47 | fi 48 | 49 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 50 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=18 --search_space=3 51 | exit $? 52 | fi 53 | 54 | if [ 7 -eq $SLURM_ARRAY_TASK_ID ]; then 55 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=19 --search_space=3 56 | exit $? 57 | fi 58 | 59 | if [ 8 -eq $SLURM_ARRAY_TASK_ID ]; then 60 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=20 --search_space=3 61 | exit $? 62 | fi 63 | 64 | if [ 9 -eq $SLURM_ARRAY_TASK_ID ]; then 65 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=23 --search_space=3 66 | exit $? 67 | fi 68 | 69 | if [ 10 -eq $SLURM_ARRAY_TASK_ID ]; then 70 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=24 --search_space=3 71 | exit $? 72 | fi 73 | 74 | if [ 11 -eq $SLURM_ARRAY_TASK_ID ]; then 75 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=21 --search_space=3 76 | exit $? 77 | fi 78 | 79 | if [ 12 -eq $SLURM_ARRAY_TASK_ID ]; then 80 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=22 --search_space=3 81 | exit $? 82 | fi 83 | # Print some Information about the end-time to STDOUT 84 | echo "DONE"; 85 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_3/gdas_evaluation_cutout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu_tesla-P100 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J GDAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | 23 | # Job to perform 24 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 25 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=10 --search_space=3 --cutout 26 | exit $? 27 | fi 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=14 --search_space=3 --cutout 31 | exit $? 32 | fi 33 | 34 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 35 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=15 --search_space=3 --cutout 36 | exit $? 37 | fi 38 | 39 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 40 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=16 --search_space=3 --cutout 41 | exit $? 42 | fi 43 | 44 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 45 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=17 --search_space=3 --cutout 46 | exit $? 47 | fi 48 | 49 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 50 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=18 --search_space=3 --cutout 51 | exit $? 52 | fi 53 | 54 | if [ 7 -eq $SLURM_ARRAY_TASK_ID ]; then 55 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=19 --search_space=3 --cutout 56 | exit $? 57 | fi 58 | 59 | if [ 8 -eq $SLURM_ARRAY_TASK_ID ]; then 60 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=20 --search_space=3 --cutout 61 | exit $? 62 | fi 63 | 64 | if [ 9 -eq $SLURM_ARRAY_TASK_ID ]; then 65 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=23 --search_space=3 --cutout 66 | exit $? 67 | fi 68 | 69 | if [ 10 -eq $SLURM_ARRAY_TASK_ID ]; then 70 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=24 --search_space=3 --cutout 71 | exit $? 72 | fi 73 | 74 | if [ 11 -eq $SLURM_ARRAY_TASK_ID ]; then 75 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=21 --search_space=3 --cutout 76 | exit $? 77 | fi 78 | 79 | if [ 12 -eq $SLURM_ARRAY_TASK_ID ]; then 80 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order_cutout --seed=22 --search_space=3 --cutout 81 | exit $? 82 | fi 83 | # Print some Information about the end-time to STDOUT 84 | echo "DONE"; 85 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_3/gdas_evaluation_cutout_unrolled.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p ml_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J GDAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | 23 | # Job to perform 24 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 25 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=second_order_cutout --seed=10 --search_space=3 --cutout --unrolled 26 | exit $? 27 | fi 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=second_order_cutout --seed=14 --search_space=3 --cutout --unrolled 31 | exit $? 32 | fi 33 | 34 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 35 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=second_order_cutout --seed=15 --search_space=3 --cutout --unrolled 36 | exit $? 37 | fi 38 | 39 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 40 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=second_order_cutout --seed=16 --search_space=3 --cutout --unrolled 41 | exit $? 42 | fi 43 | 44 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 45 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=second_order_cutout --seed=17 --search_space=3 --cutout --unrolled 46 | exit $? 47 | fi 48 | 49 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 50 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=second_order_cutout --seed=18 --search_space=3 --cutout --unrolled 51 | exit $? 52 | fi 53 | 54 | if [ 7 -eq $SLURM_ARRAY_TASK_ID ]; then 55 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=second_order_cutout --seed=19 --search_space=3 --cutout --unrolled 56 | exit $? 57 | fi 58 | 59 | if [ 8 -eq $SLURM_ARRAY_TASK_ID ]; then 60 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=second_order_cutout --seed=20 --search_space=3 --cutout --unrolled 61 | exit $? 62 | fi 63 | 64 | if [ 9 -eq $SLURM_ARRAY_TASK_ID ]; then 65 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=second_order_cutout --seed=23 --search_space=3 --cutout --unrolled 66 | exit $? 67 | fi 68 | 69 | if [ 10 -eq $SLURM_ARRAY_TASK_ID ]; then 70 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=second_order_cutout --seed=24 --search_space=3 --cutout --unrolled 71 | exit $? 72 | fi 73 | 74 | if [ 11 -eq $SLURM_ARRAY_TASK_ID ]; then 75 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=second_order_cutout --seed=21 --search_space=3 --cutout --unrolled 76 | exit $? 77 | fi 78 | 79 | if [ 12 -eq $SLURM_ARRAY_TASK_ID ]; then 80 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=second_order_cutout --seed=22 --search_space=3 --cutout --unrolled 81 | exit $? 82 | fi 83 | # Print some Information about the end-time to STDOUT 84 | echo "DONE"; 85 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_3/gdas_evaluation_warmstarting.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J GDAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | 23 | # Job to perform 24 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 25 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=10 --search_space=3 --warm_start_epochs=20 26 | exit $? 27 | fi 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=14 --search_space=3 --warm_start_epochs=20 31 | exit $? 32 | fi 33 | 34 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 35 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=15 --search_space=3 --warm_start_epochs=20 36 | exit $? 37 | fi 38 | 39 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 40 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=16 --search_space=3 --warm_start_epochs=20 41 | exit $? 42 | fi 43 | 44 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 45 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=17 --search_space=3 --warm_start_epochs=20 46 | exit $? 47 | fi 48 | 49 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 50 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=18 --search_space=3 --warm_start_epochs=20 51 | exit $? 52 | fi 53 | 54 | if [ 7 -eq $SLURM_ARRAY_TASK_ID ]; then 55 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=19 --search_space=3 --warm_start_epochs=20 56 | exit $? 57 | fi 58 | 59 | if [ 8 -eq $SLURM_ARRAY_TASK_ID ]; then 60 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=20 --search_space=3 --warm_start_epochs=20 61 | exit $? 62 | fi 63 | 64 | if [ 9 -eq $SLURM_ARRAY_TASK_ID ]; then 65 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=23 --search_space=3 --warm_start_epochs=20 66 | exit $? 67 | fi 68 | 69 | if [ 10 -eq $SLURM_ARRAY_TASK_ID ]; then 70 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=24 --search_space=3 --warm_start_epochs=20 71 | exit $? 72 | fi 73 | 74 | if [ 11 -eq $SLURM_ARRAY_TASK_ID ]; then 75 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=21 --search_space=3 --warm_start_epochs=20 76 | exit $? 77 | fi 78 | 79 | if [ 12 -eq $SLURM_ARRAY_TASK_ID ]; then 80 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --save=first_order --seed=22 --search_space=3 --warm_start_epochs=20 81 | exit $? 82 | fi 83 | # Print some Information about the end-time to STDOUT 84 | echo "DONE"; 85 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_3/learning_rate_eval.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J PC_DARTS_LR_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for lr in "0.25" "0.0025" 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --seed=13 --save=learning_rate --learning_rate=${lr} --search_space=3 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | 35 | # Print some Information about the end-time to STDOUT 36 | echo "DONE"; 37 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/gdas/cluster_scripts/search_space_3/weight_decay_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --nice=10 ## priority 8 | #SBATCH --gres=gpu:1 # reserves four GPUs 9 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 10 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 12 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 13 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 14 | # Print some information about the job to STDOUT 15 | echo "Workingdir: $PWD"; 16 | echo "Started at $(date)"; 17 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 18 | 19 | # Activate conda environment 20 | source ~/.bashrc 21 | conda activate pytorch1.3 22 | 23 | gpu_counter=1 24 | 25 | for wd in "1e-4" "9e-4" "27e-4" "81e-4" 26 | do 27 | # Job to perform 28 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 29 | PYTHONPATH=$PWD python optimizers/gdas/train_search.py --seed=3 --save=weight_decay --weight_decay=${wd} --search_space=3 30 | exit $? 31 | fi 32 | 33 | let gpu_counter+=1 34 | done 35 | 36 | # Print some Information about the end-time to STDOUT 37 | echo "DONE"; 38 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_1/baseline_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J PC_DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=${seed} --save=baseline --search_space=1 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_1/baseline_script_trans.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..6} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=${seed} --save=baseline_trans --search_space=1 --epochs=50 --weight_decay=0.00011084497550078689 --cutout --cutout_prob=0.43720581909871464 --learning_rate=0.48710801282480837 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_1/baseline_warmstart_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-6 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | # Job to perform 23 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 24 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=0 --save=baseline --search_space=1 --warm_start_epochs=20 --cutout 25 | exit $? 26 | fi 27 | 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=1 --save=baseline --search_space=1 --warm_start_epochs=20 --cutout 31 | exit $? 32 | fi 33 | 34 | 35 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 36 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=2 --save=baseline --search_space=1 --warm_start_epochs=20 --cutout 37 | exit $? 38 | fi 39 | 40 | 41 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 42 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=3 --save=baseline --search_space=1 --warm_start_epochs=20 --cutout 43 | exit $? 44 | fi 45 | 46 | 47 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 48 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=4 --save=baseline --search_space=1 --warm_start_epochs=20 --cutout 49 | exit $? 50 | fi 51 | 52 | 53 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 54 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=5 --save=baseline --search_space=1 --warm_start_epochs=20 --cutout 55 | exit $? 56 | fi 57 | # Print some Information about the end-time to STDOUT 58 | echo "DONE"; 59 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_1/cutout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J PC_DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=${seed} --save=cutout --search_space=1 --cutout 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_1/learning_rate_eval.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J PC_DARTS_LR_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for lr in "0.25" "0.0025" 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=2 --save=learning_rate --learning_rate=${lr} --search_space=1 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | 35 | # Print some Information about the end-time to STDOUT 36 | echo "DONE"; 37 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_1/second_order_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-6 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | # Job to perform 23 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 24 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=0 --save=unrolled --unrolled --search_space=1 --epochs=100 25 | exit $? 26 | fi 27 | 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=1 --save=unrolled --unrolled --search_space=1 --epochs=100 31 | exit $? 32 | fi 33 | 34 | 35 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 36 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=2 --save=unrolled --unrolled --search_space=1 --epochs=100 37 | exit $? 38 | fi 39 | 40 | 41 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 42 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=3 --save=unrolled --unrolled --search_space=1 --epochs=100 43 | exit $? 44 | fi 45 | 46 | 47 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 48 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=4 --save=unrolled --unrolled --search_space=1 --epochs=100 49 | exit $? 50 | fi 51 | 52 | 53 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 54 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=5 --save=unrolled --unrolled --search_space=1 --epochs=100 55 | exit $? 56 | fi 57 | # Print some Information about the end-time to STDOUT 58 | echo "DONE"; 59 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_1/second_order_script_cutout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-6 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | # Job to perform 23 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 24 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=0 --save=unrolled_cutout --unrolled --cutout --search_space=1 25 | exit $? 26 | fi 27 | 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=1 --save=unrolled_cutout --unrolled --cutout --search_space=1 31 | exit $? 32 | fi 33 | 34 | 35 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 36 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=2 --save=unrolled_cutout --unrolled --cutout --search_space=1 37 | exit $? 38 | fi 39 | 40 | 41 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 42 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=3 --save=unrolled_cutout --unrolled --cutout --search_space=1 43 | exit $? 44 | fi 45 | 46 | 47 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 48 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=4 --save=unrolled_cutout --unrolled --cutout --search_space=1 49 | exit $? 50 | fi 51 | 52 | 53 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 54 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=5 --save=unrolled_cutout --unrolled --cutout --search_space=1 55 | exit $? 56 | fi 57 | # Print some Information about the end-time to STDOUT 58 | echo "DONE"; 59 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_1/weight_decay_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --nice=10 ## priority 8 | #SBATCH --gres=gpu:1 # reserves four GPUs 9 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 10 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 12 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 13 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 14 | # Print some information about the job to STDOUT 15 | echo "Workingdir: $PWD"; 16 | echo "Started at $(date)"; 17 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 18 | 19 | # Activate conda environment 20 | source ~/.bashrc 21 | conda activate pytorch1.3 22 | 23 | gpu_counter=1 24 | 25 | for wd in "1e-4" "9e-4" "27e-4" "81e-4" 26 | do 27 | # Job to perform 28 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 29 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=1 --save=weight_decay --weight_decay=${wd} --search_space=1 30 | exit $? 31 | fi 32 | 33 | let gpu_counter+=1 34 | done 35 | 36 | # Print some Information about the end-time to STDOUT 37 | echo "DONE"; 38 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_2/baseline_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J PC_DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=${seed} --save=baseline --search_space=2 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_2/baseline_script_trans.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..6} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=${seed} --save=baseline_trans --search_space=2 --epochs=50 --weight_decay=0.00011084497550078689 --cutout --cutout_prob=0.43720581909871464 --learning_rate=0.48710801282480837 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_2/baseline_warmstart_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-6 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | # Job to perform 23 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 24 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=0 --save=baseline --search_space=2 --warm_start_epochs=20 --cutout 25 | exit $? 26 | fi 27 | 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=1 --save=baseline --search_space=2 --warm_start_epochs=20 --cutout 31 | exit $? 32 | fi 33 | 34 | 35 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 36 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=2 --save=baseline --search_space=2 --warm_start_epochs=20 --cutout 37 | exit $? 38 | fi 39 | 40 | 41 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 42 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=3 --save=baseline --search_space=2 --warm_start_epochs=20 --cutout 43 | exit $? 44 | fi 45 | 46 | 47 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 48 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=4 --save=baseline --search_space=2 --warm_start_epochs=20 --cutout 49 | exit $? 50 | fi 51 | 52 | 53 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 54 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=5 --save=baseline --search_space=2 --warm_start_epochs=20 --cutout 55 | exit $? 56 | fi 57 | # Print some Information about the end-time to STDOUT 58 | echo "DONE"; 59 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_2/cutout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J PC_DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=${seed} --save=cutout --search_space=2 --cutout 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_2/learning_rate_eval.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J PC_DARTS_LR_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for lr in "0.25" "0.0025" 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=2 --save=learning_rate --learning_rate=${lr} --search_space=2 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | 35 | # Print some Information about the end-time to STDOUT 36 | echo "DONE"; 37 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_2/second_order_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-6 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | # Job to perform 23 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 24 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=0 --save=unrolled --unrolled --search_space=2 --epochs=100 25 | exit $? 26 | fi 27 | 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=1 --save=unrolled --unrolled --search_space=2 --epochs=100 31 | exit $? 32 | fi 33 | 34 | 35 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 36 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=2 --save=unrolled --unrolled --search_space=2 --epochs=100 37 | exit $? 38 | fi 39 | 40 | 41 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 42 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=3 --save=unrolled --unrolled --search_space=2 --epochs=100 43 | exit $? 44 | fi 45 | 46 | 47 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 48 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=4 --save=unrolled --unrolled --search_space=2 --epochs=100 49 | exit $? 50 | fi 51 | 52 | 53 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 54 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=5 --save=unrolled --unrolled --search_space=2 --epochs=100 55 | exit $? 56 | fi 57 | # Print some Information about the end-time to STDOUT 58 | echo "DONE"; 59 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_2/second_order_script_cutout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-6 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | # Job to perform 23 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 24 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=0 --save=unrolled_cutout --unrolled --cutout --search_space=2 25 | exit $? 26 | fi 27 | 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=1 --save=unrolled_cutout --unrolled --cutout --search_space=2 31 | exit $? 32 | fi 33 | 34 | 35 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 36 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=2 --save=unrolled_cutout --unrolled --cutout --search_space=2 37 | exit $? 38 | fi 39 | 40 | 41 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 42 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=3 --save=unrolled_cutout --unrolled --cutout --search_space=2 43 | exit $? 44 | fi 45 | 46 | 47 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 48 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=4 --save=unrolled_cutout --unrolled --cutout --search_space=2 49 | exit $? 50 | fi 51 | 52 | 53 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 54 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=5 --save=unrolled_cutout --unrolled --cutout --search_space=2 55 | exit $? 56 | fi 57 | # Print some Information about the end-time to STDOUT 58 | echo "DONE"; 59 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_2/weight_decay_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --nice=10 ## priority 8 | #SBATCH --gres=gpu:1 # reserves four GPUs 9 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 10 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 12 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 13 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 14 | # Print some information about the job to STDOUT 15 | echo "Workingdir: $PWD"; 16 | echo "Started at $(date)"; 17 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 18 | 19 | # Activate conda environment 20 | source ~/.bashrc 21 | conda activate pytorch1.3 22 | 23 | gpu_counter=1 24 | 25 | for wd in "1e-4" "9e-4" "27e-4" "81e-4" 26 | do 27 | # Job to perform 28 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 29 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=3 --save=weight_decay --weight_decay=${wd} --search_space=2 30 | exit $? 31 | fi 32 | 33 | let gpu_counter+=1 34 | done 35 | 36 | # Print some Information about the end-time to STDOUT 37 | echo "DONE"; 38 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_3/baseline_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J PC_DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=${seed} --save=baseline --search_space=3 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_3/baseline_script_trans.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..6} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=${seed} --save=baseline_trans --search_space=3 --epochs=50 --weight_decay=0.00011084497550078689 --cutout --cutout_prob=0.43720581909871464 --learning_rate=0.48710801282480837 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_3/baseline_warmstart_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-6 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | # Job to perform 23 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 24 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=0 --save=baseline --search_space=3 --warm_start_epochs=20 --cutout 25 | exit $? 26 | fi 27 | 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=1 --save=baseline --search_space=3 --warm_start_epochs=20 --cutout 31 | exit $? 32 | fi 33 | 34 | 35 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 36 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=2 --save=baseline --search_space=3 --warm_start_epochs=20 --cutout 37 | exit $? 38 | fi 39 | 40 | 41 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 42 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=3 --save=baseline --search_space=3 --warm_start_epochs=20 --cutout 43 | exit $? 44 | fi 45 | 46 | 47 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 48 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=4 --save=baseline --search_space=3 --warm_start_epochs=20 --cutout 49 | exit $? 50 | fi 51 | 52 | 53 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 54 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=5 --save=baseline --search_space=3 --warm_start_epochs=20 --cutout 55 | exit $? 56 | fi 57 | # Print some Information about the end-time to STDOUT 58 | echo "DONE"; 59 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_3/cutout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J PC_DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=${seed} --save=cutout --search_space=3 --cutout 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_3/learning_rate_eval.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J PC_DARTS_LR_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for lr in "0.25" "0.0025" 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=12 --save=learning_rate --learning_rate=${lr} --search_space=3 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | 35 | # Print some Information about the end-time to STDOUT 36 | echo "DONE"; 37 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_3/second_order_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-6 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | # Job to perform 23 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 24 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=0 --save=unrolled --unrolled --search_space=3 --epochs=100 25 | exit $? 26 | fi 27 | 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=1 --save=unrolled --unrolled --search_space=3 --epochs=100 31 | exit $? 32 | fi 33 | 34 | 35 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 36 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=2 --save=unrolled --unrolled --search_space=3 --epochs=100 37 | exit $? 38 | fi 39 | 40 | 41 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 42 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=3 --save=unrolled --unrolled --search_space=3 --epochs=100 43 | exit $? 44 | fi 45 | 46 | 47 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 48 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=4 --save=unrolled --unrolled --search_space=3 --epochs=100 49 | exit $? 50 | fi 51 | 52 | 53 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 54 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=5 --save=unrolled --unrolled --search_space=3 --epochs=100 55 | exit $? 56 | fi 57 | # Print some Information about the end-time to STDOUT 58 | echo "DONE"; 59 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_3/second_order_script_cutout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-6 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts/cnn # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1_0_1 21 | 22 | # Job to perform 23 | if [ 1 -eq $SLURM_ARRAY_TASK_ID ]; then 24 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=0 --save=unrolled_cutout --unrolled --cutout --search_space=3 25 | exit $? 26 | fi 27 | 28 | 29 | if [ 2 -eq $SLURM_ARRAY_TASK_ID ]; then 30 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=1 --save=unrolled_cutout --unrolled --cutout --search_space=3 31 | exit $? 32 | fi 33 | 34 | 35 | if [ 3 -eq $SLURM_ARRAY_TASK_ID ]; then 36 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=2 --save=unrolled_cutout --unrolled --cutout --search_space=3 37 | exit $? 38 | fi 39 | 40 | 41 | if [ 4 -eq $SLURM_ARRAY_TASK_ID ]; then 42 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=3 --save=unrolled_cutout --unrolled --cutout --search_space=3 43 | exit $? 44 | fi 45 | 46 | 47 | if [ 5 -eq $SLURM_ARRAY_TASK_ID ]; then 48 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=4 --save=unrolled_cutout --unrolled --cutout --search_space=3 49 | exit $? 50 | fi 51 | 52 | 53 | if [ 6 -eq $SLURM_ARRAY_TASK_ID ]; then 54 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=5 --save=unrolled_cutout --unrolled --cutout --search_space=3 55 | exit $? 56 | fi 57 | # Print some Information about the end-time to STDOUT 58 | echo "DONE"; 59 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/pc_darts/cluster_scripts/search_space_3/weight_decay_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-5 # array size 7 | #SBATCH --nice=10 ## priority 8 | #SBATCH --gres=gpu:1 # reserves four GPUs 9 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 10 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 12 | #SBATCH -J DARTS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 13 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 14 | # Print some information about the job to STDOUT 15 | echo "Workingdir: $PWD"; 16 | echo "Started at $(date)"; 17 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 18 | 19 | # Activate conda environment 20 | source ~/.bashrc 21 | conda activate pytorch1.3 22 | 23 | gpu_counter=1 24 | 25 | for wd in "1e-4" "9e-4" "27e-4" "81e-4" 26 | do 27 | # Job to perform 28 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 29 | PYTHONPATH=$PWD python optimizers/pc_darts/train_search.py --seed=1 --save=weight_decay --weight_decay=${wd} --search_space=3 30 | exit $? 31 | fi 32 | 33 | let gpu_counter+=1 34 | done 35 | 36 | # Print some Information about the end-time to STDOUT 37 | echo "DONE"; 38 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/random_search_with_weight_sharing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/nasbench-1shot1/cb0f0ec61ce8b37f8e2d0f2728c3ef0b9ec81bf1/optimizers/random_search_with_weight_sharing/__init__.py -------------------------------------------------------------------------------- /optimizers/random_search_with_weight_sharing/cluster_scripts/search_space_1/baseline_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p ml_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J RANDOMNAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/random_search_with_weight_sharing/random_weight_share.py --seed=${seed} --search_space=1 --epochs=100 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/random_search_with_weight_sharing/cluster_scripts/search_space_2/baseline_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p gpu_tesla-P100 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J RANDOMNAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/random_search_with_weight_sharing/random_weight_share.py --seed=${seed} --search_space=2 --epochs=100 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/random_search_with_weight_sharing/cluster_scripts/search_space_3/baseline_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 # partition (queue) 3 | #SBATCH --mem 10000 # memory pool for all cores (8GB) 4 | #SBATCH -t 11-00:00 # time (D-HH:MM) 5 | #SBATCH -c 2 # number of cores 6 | #SBATCH -a 1-12 # array size 7 | #SBATCH --gres=gpu:1 # reserves four GPUs 8 | #SBATCH -D /home/siemsj/projects/darts_weight_sharing_analysis # Change working_dir 9 | #SBATCH -o log/log_$USER_%Y-%m-%d.out # STDOUT (the folder log has to be created prior to running or this won't work) 10 | #SBATCH -e log/err_$USER_%Y-%m-%d.err # STDERR (the folder log has to be created prior to running or this won't work) 11 | #SBATCH -J RANDOMNAS_NASBENCH # sets the job name. If not specified, the file name will be used as job name 12 | # #SBATCH --mail-type=END,FAIL # (recive mails about end and timeouts/crashes of your job) 13 | # Print some information about the job to STDOUT 14 | echo "Workingdir: $PWD"; 15 | echo "Started at $(date)"; 16 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 17 | 18 | # Activate conda environment 19 | source ~/.bashrc 20 | conda activate pytorch1.3 21 | 22 | gpu_counter=1 23 | 24 | for seed in {1..12} 25 | do 26 | # Job to perform 27 | if [ $gpu_counter -eq $SLURM_ARRAY_TASK_ID ]; then 28 | PYTHONPATH=$PWD python optimizers/random_search_with_weight_sharing/random_weight_share.py --seed=${seed} --search_space=3 --epochs=100 29 | exit $? 30 | fi 31 | 32 | let gpu_counter+=1 33 | done 34 | # Print some Information about the end-time to STDOUT 35 | echo "DONE"; 36 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /optimizers/tpe/run_tpe.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | import argparse 4 | import logging 5 | import numpy as np 6 | import ConfigSpace 7 | logging.basicConfig(level=logging.INFO) 8 | 9 | from copy import deepcopy 10 | from hyperopt import fmin, tpe, hp, STATUS_OK, Trials 11 | from nasbench import api 12 | 13 | from nasbench_analysis.search_spaces.search_space_1 import SearchSpace1 14 | from nasbench_analysis.search_spaces.search_space_2 import SearchSpace2 15 | from nasbench_analysis.search_spaces.search_space_3 import SearchSpace3 16 | from nasbench_analysis.utils import INPUT, OUTPUT, CONV1X1, CONV3X3, MAXPOOL3X3 17 | 18 | from IPython import embed 19 | 20 | 21 | parser = argparse.ArgumentParser() 22 | parser.add_argument('--run_id', default=0, type=int, nargs='?', 23 | help='unique number to identify this run') 24 | parser.add_argument('--search_space', default=None, type=str, nargs='?', 25 | help='specifies the benchmark') 26 | parser.add_argument('--n_iters', default=280, type=int, nargs='?', 27 | help='number of iterations for optimization method') 28 | parser.add_argument('--output_path', default="./experiments", type=str, nargs='?', 29 | help='specifies the path where the results will be saved') 30 | parser.add_argument('--data_dir', 31 | default="nasbench_analysis/nasbench_data/108_e/nasbench_only108.tfrecord", 32 | type=str, nargs='?', help='specifies the path to the nasbench data') 33 | parser.add_argument('--seed', default=0, type=int, 34 | help='random seed') 35 | parser.add_argument('--n_repetitions', default=500, type=int, 36 | help='number of repetitions') 37 | args = parser.parse_args() 38 | 39 | nasbench = api.NASBench(args.data_dir) 40 | 41 | output_path = os.path.join(args.output_path, "discrete_optimizers", 'TPE') 42 | os.makedirs(os.path.join(output_path), exist_ok=True) 43 | 44 | if args.search_space is None: 45 | spaces = [1, 2, 3] 46 | else: 47 | spaces = [int(args.search_space)] 48 | 49 | #embed() 50 | 51 | def objective_function(config): 52 | config_copy = deepcopy(config) 53 | c = ConfigSpace.Configuration(cs, values=config_copy) 54 | y, cost = search_space.objective_function(nasbench, c, budget=108) 55 | return { 56 | 'config': config_copy, 57 | 'loss': 1 - float(y), 58 | 'cost': cost, 59 | 'status': STATUS_OK 60 | } 61 | 62 | for space in spaces: 63 | print('##### Search Space {} #####'.format(space)) 64 | search_space = eval('SearchSpace{}()'.format(space)) 65 | cs = search_space.get_configuration_space() 66 | 67 | hyperopt_space = {h.name: hp.choice(h.name, h.choices) for h in 68 | cs.get_hyperparameters()} 69 | 70 | #for seed in range(args.n_repetitions): 71 | print('##### Seed {} #####'.format(args.seed)) 72 | # Set random_seed 73 | np.random.seed(args.seed) 74 | 75 | trials = Trials() 76 | best = fmin(objective_function, 77 | space=hyperopt_space, 78 | algo=tpe.suggest, 79 | max_evals=args.n_iters, 80 | trials=trials) 81 | 82 | fh = open(os.path.join(output_path, 83 | 'algo_{}_{}_ssp_{}_seed_{}.obj'.format('TPE', 84 | args.run_id, 85 | space, 86 | args.seed)), 'wb') 87 | pickle.dump(search_space.run_history, fh) 88 | fh.close() 89 | 90 | print(min([1 - arch.test_accuracy - search_space.test_min_error for 91 | arch in search_space.run_history])) 92 | -------------------------------------------------------------------------------- /optimizers/utils.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | 3 | Architecture = namedtuple('Architecture', ['adjacency_matrix', 'node_list']) 4 | 5 | class Model(object): 6 | """A class representing a model. 7 | 8 | It holds two attributes: `arch` (the simulated architecture) and `accuracy` 9 | (the simulated accuracy / fitness). See Appendix C for an introduction to 10 | this toy problem. 11 | 12 | In the real case of neural networks, `arch` would instead hold the 13 | architecture of the normal and reduction cells of a neural network and 14 | accuracy would be instead the result of training the neural net and 15 | evaluating it on the validation set. 16 | 17 | We do not include test accuracies here as they are not used by the algorithm 18 | in any way. In the case of real neural networks, the test accuracy is only 19 | used for the purpose of reporting / plotting final results. 20 | 21 | In the context of evolutionary algorithms, a model is often referred to as 22 | an "individual". 23 | 24 | Attributes: (as in the original code) 25 | arch: the architecture as an int representing a bit-string of length `DIM`. 26 | As a result, the integers are required to be less than `2**DIM`. They 27 | can be visualized as strings of 0s and 1s by calling `print(model)`, 28 | where `model` is an instance of this class. 29 | accuracy: the simulated validation accuracy. This is the sum of the 30 | bits in the bit-string, divided by DIM to produce a value in the 31 | interval [0.0, 1.0]. After that, a small amount of Gaussian noise is 32 | added with mean 0.0 and standard deviation `NOISE_STDEV`. The resulting 33 | number is clipped to within [0.0, 1.0] to produce the final validation 34 | accuracy of the model. A given model will have a fixed validation 35 | accuracy but two models that have the same architecture will generally 36 | have different validation accuracies due to this noise. In the context 37 | of evolutionary algorithms, this is often known as the "fitness". 38 | """ 39 | 40 | def __init__(self): 41 | self.arch = None 42 | self.validation_accuracy = None 43 | self.test_accuracy = None 44 | self.training_time = None 45 | self.budget = None 46 | 47 | def update_data(self, arch, nasbench_data, budget): 48 | self.arch = arch 49 | self.validation_accuracy = nasbench_data['validation_accuracy'] 50 | self.test_accuracy = nasbench_data['test_accuracy'] 51 | self.training_time = nasbench_data['training_time'] 52 | self.budget = budget 53 | 54 | def query_nasbench(self, nasbench, sample): 55 | config = ConfigSpace.Configuration( 56 | search_space.get_configuration_space(), vector=sample 57 | ) 58 | adjacency_matrix, node_list = search_space.convert_config_to_nasbench_format(config) 59 | if type(search_space) == SearchSpace3: 60 | node_list = [INPUT, *node_list, OUTPUT] 61 | else: 62 | node_list = [INPUT, *node_list, CONV1X1, OUTPUT] 63 | adjacency_list = adjacency_matrix.astype(np.int).tolist() 64 | model_spec = api.ModelSpec(matrix=adjacency_list, ops=node_list) 65 | 66 | nasbench_data = nasbench.query(model_spec) 67 | self.arch = Architecture(adjacency_matrix=adjacency_matrix, 68 | node_list=node_list) 69 | self.validation_accuracy = nasbench_data['validation_accuracy'] 70 | self.test_accuracy = nasbench_data['test_accuracy'] 71 | self.training_time = nasbench_data['training_time'] 72 | 73 | --------------------------------------------------------------------------------