├── docs ├── nojekyll ├── buildinfo ├── .nojekyll ├── _sources │ ├── example.rst.txt │ ├── license.rst.txt │ ├── citing.rst.txt │ ├── contributing.rst.txt │ ├── manual.rst.txt │ └── index.rst.txt ├── objects.inv ├── _static │ ├── fonts │ │ ├── Inconsolata.ttf │ │ ├── Lato-Bold.ttf │ │ ├── Lato-Regular.ttf │ │ ├── Inconsolata-Bold.ttf │ │ ├── Lato │ │ │ ├── lato-bold.eot │ │ │ ├── lato-bold.ttf │ │ │ ├── lato-bold.woff │ │ │ ├── lato-bold.woff2 │ │ │ ├── lato-italic.eot │ │ │ ├── lato-italic.ttf │ │ │ ├── lato-italic.woff │ │ │ ├── lato-italic.woff2 │ │ │ ├── lato-regular.eot │ │ │ ├── lato-regular.ttf │ │ │ ├── lato-regular.woff │ │ │ ├── lato-bolditalic.eot │ │ │ ├── lato-bolditalic.ttf │ │ │ ├── lato-bolditalic.woff │ │ │ ├── lato-regular.woff2 │ │ │ └── lato-bolditalic.woff2 │ │ ├── RobotoSlab-Bold.ttf │ │ ├── RobotoSlab-Regular.ttf │ │ ├── Inconsolata-Regular.ttf │ │ ├── fontawesome-webfont.eot │ │ ├── fontawesome-webfont.ttf │ │ ├── fontawesome-webfont.woff │ │ ├── fontawesome-webfont.woff2 │ │ └── RobotoSlab │ │ │ ├── roboto-slab-v7-bold.eot │ │ │ ├── roboto-slab-v7-bold.ttf │ │ │ ├── roboto-slab-v7-bold.woff │ │ │ ├── roboto-slab-v7-bold.woff2 │ │ │ ├── roboto-slab-v7-regular.eot │ │ │ ├── roboto-slab-v7-regular.ttf │ │ │ ├── roboto-slab-v7-regular.woff │ │ │ └── roboto-slab-v7-regular.woff2 │ ├── css │ │ └── fonts │ │ │ ├── lato-bold.woff │ │ │ ├── lato-bold.woff2 │ │ │ ├── lato-normal.woff │ │ │ ├── lato-normal.woff2 │ │ │ ├── Roboto-Slab-Bold.woff │ │ │ ├── lato-bold-italic.woff │ │ │ ├── Roboto-Slab-Bold.woff2 │ │ │ ├── fontawesome-webfont.eot │ │ │ ├── fontawesome-webfont.ttf │ │ │ ├── lato-bold-italic.woff2 │ │ │ ├── lato-normal-italic.woff │ │ │ ├── Roboto-Slab-Regular.woff │ │ │ ├── Roboto-Slab-Regular.woff2 │ │ │ ├── fontawesome-webfont.woff │ │ │ ├── fontawesome-webfont.woff2 │ │ │ └── lato-normal-italic.woff2 │ ├── documentation_options.js │ └── js │ │ └── badge_only.js └── .buildinfo ├── naslib ├── __init__.py ├── optimizers │ ├── core │ │ └── __init__.py │ ├── discrete │ │ ├── __init__.py │ │ ├── bp │ │ │ └── __init__.py │ │ ├── ls │ │ │ └── __init__.py │ │ ├── re │ │ │ └── __init__.py │ │ ├── rs │ │ │ └── __init__.py │ │ ├── bananas │ │ │ └── __init__.py │ │ └── npenas │ │ │ └── __init__.py │ ├── oneshot │ │ ├── __init__.py │ │ ├── darts │ │ │ └── __init__.py │ │ ├── drnas │ │ │ └── __init__.py │ │ ├── gdas │ │ │ └── __init__.py │ │ ├── rs_ws │ │ │ ├── __init__.py │ │ │ └── optimizer.py │ │ ├── gsparsity │ │ │ ├── __init__.py │ │ │ ├── gsparse.sh │ │ │ ├── darts_gsparse.sh │ │ │ ├── config.yaml │ │ │ ├── darts_config.yaml │ │ │ └── runner.py │ │ └── oneshot_train │ │ │ └── __init__.py │ └── __init__.py ├── search_spaces │ ├── core │ │ ├── __init__.py │ │ └── query_metrics.py │ ├── darts │ │ ├── __init__.py │ │ └── primitives.py │ ├── nasbench101 │ │ ├── __init__.py │ │ └── primitives.py │ ├── nasbench201 │ │ ├── __init__.py │ │ └── primitives.py │ ├── nasbenchasr │ │ ├── __init__.py │ │ └── conversions.py │ ├── nasbenchnlp │ │ └── __init__.py │ ├── transbench101 │ │ ├── __init__.py │ │ ├── loss.py │ │ └── primitives.py │ ├── __init__.py │ └── hierarchical │ │ └── primitives.py ├── predictors │ ├── gp │ │ ├── gpwl_utils │ │ │ └── __init__.py │ │ ├── __init__.py │ │ ├── sparse_gp.py │ │ └── var_sparse_gp.py │ ├── lce │ │ ├── __init__.py │ │ └── lce.py │ ├── lce_m │ │ └── __init__.py │ ├── oneshot │ │ └── __init__.py │ ├── bnn │ │ ├── __init__.py │ │ ├── bayesian_linear_reg.py │ │ ├── bohamiann.py │ │ ├── dngo.py │ │ └── bnn_base.py │ ├── trees │ │ ├── __init__.py │ │ ├── random_forest.py │ │ └── lgb.py │ ├── utils │ │ ├── build_nets │ │ │ ├── shape_infers │ │ │ │ ├── shared_utils.py │ │ │ │ └── __init__.py │ │ │ ├── cell_infers │ │ │ │ ├── __init__.py │ │ │ │ └── tiny_network.py │ │ │ ├── cell_searchs │ │ │ │ ├── _test_module.py │ │ │ │ └── __init__.py │ │ │ ├── shape_searchs │ │ │ │ ├── __init__.py │ │ │ │ └── test.py │ │ │ ├── initialization.py │ │ │ └── SharedUtils.py │ │ ├── pruners │ │ │ ├── measures │ │ │ │ ├── model_stats.py │ │ │ │ ├── l2_norm.py │ │ │ │ ├── grad_norm.py │ │ │ │ ├── plain.py │ │ │ │ ├── jacov.py │ │ │ │ ├── __init__.py │ │ │ │ └── synflow.py │ │ │ ├── __init__.py │ │ │ └── weight_initializers.py │ │ ├── models │ │ │ └── __init__.py │ │ └── encodings_asr.py │ ├── __init__.py │ └── early_stopping.py ├── data │ ├── nb201_all.pickle │ ├── class_scene_selected.npy │ ├── class_object_selected.npy │ ├── permutations_hamming_max_1000.npy │ └── taskonomydata_mini │ │ └── download_tnb.sh ├── __version__.py ├── utils │ └── __init__.py ├── configs │ └── example_bbo_config.yaml ├── runners │ ├── nas_predictors │ │ ├── discrete_config.yaml │ │ ├── nas_predictor_config.yaml │ │ ├── oneshot_runner.py │ │ └── runner.py │ ├── statistics │ │ ├── statistics_config.yaml │ │ └── runner.py │ ├── nas │ │ ├── discrete_config.yaml │ │ └── runner.py │ ├── predictors │ │ └── predictor_config.yaml │ └── bbo │ │ ├── discrete_config.yaml │ │ └── runner.py └── defaults │ ├── nb201_defaults.yaml │ ├── additional_primitives.py │ ├── config_multi.yaml │ └── darts_defaults.yaml ├── tests ├── __init__.py └── assets │ ├── nb201_test_set_x.npy │ ├── nb201_test_set_y.npy │ ├── nb201_test_set_info.npy │ ├── nb201_test_set_times.npy │ ├── config.yaml │ └── test_predictor.yaml ├── .coveragerc ├── images ├── predictors.png ├── naslib-logo.png └── naslib-overall.png ├── .flake8 ├── setup.cfg ├── scripts ├── nas_predictors │ ├── submit.sh │ ├── submit-oneshot.sh │ ├── slurm_job-nb101.sh │ ├── slurm_job-nb301.sh │ ├── slurm_job-nb201-c100.sh │ ├── slurm_job-nb201-imagenet.sh │ ├── slurm_job-imgnet.sh │ ├── submit-all.sh │ ├── slurm_job-nb201-c10.sh │ ├── oneshot_eval.sh │ ├── run_nb201_npenas_2.sh │ ├── run_nb101_bo.sh │ ├── run_nb101_npenas.sh │ ├── run_darts_npenas.sh │ ├── run_nb201_bo_2.sh │ ├── run_nb201_npenas.sh │ ├── run_darts_bo.sh │ ├── run_nb201_bo.sh │ └── run_im_bo_arber.sh ├── darts │ └── gsparse.sh ├── nasbench201 │ └── gsparse.sh ├── bbo │ ├── scheduler_bosch.sh │ ├── scheduler.sh │ ├── submit_boschgpu_folder.sh │ ├── submit_bosch_folder.sh │ ├── submit_folder.sh │ ├── make_configs_nb101.sh │ ├── make_configs_nb201.sh │ ├── make_configs_nlp.sh │ ├── make_configs_mr.sh │ ├── make_configs_darts.sh │ ├── make_configs_asr.sh │ ├── make_configs_transnb101_macro.sh │ └── make_configs_transnb101_micro.sh ├── predictors │ ├── test.sh │ ├── run_nb201_2.sh │ ├── run_tnb_predictors.sh │ ├── run_nb101.sh │ ├── run_nlp.sh │ ├── run_darts.sh │ ├── run_nb201.sh │ └── run_hpo_test.sh ├── nas │ ├── run_nb311.sh │ ├── run_nbnlp.sh │ ├── run_nb201.sh │ ├── run_nb211.sh │ ├── run_nb201_cifar100.sh │ ├── run_nb201_imagenet16-200.sh │ └── run_nb111.sh └── statistics │ └── run.sh ├── .codecov.yml ├── .github ├── CODEOWNERS └── workflows │ ├── pre-commit.yml │ ├── unit-tests.yml │ ├── scheduled-test.yml │ └── codecov.yml ├── examples ├── run_darts.py └── demo.py ├── requirements.txt ├── .pre-commit-config.yaml ├── setup.py └── .gitignore /docs/nojekyll: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/buildinfo: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/.nojekyll: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /naslib/optimizers/core/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/optimizers/discrete/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/search_spaces/core/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/search_spaces/darts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/optimizers/discrete/bp/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/optimizers/discrete/ls/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/optimizers/discrete/re/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/optimizers/discrete/rs/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/darts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/drnas/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/gdas/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/rs_ws/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/predictors/gp/gpwl_utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/optimizers/discrete/bananas/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/optimizers/discrete/npenas/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/gsparsity/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/search_spaces/nasbench101/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/search_spaces/nasbench201/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/search_spaces/nasbenchasr/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/search_spaces/nasbenchnlp/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/search_spaces/transbench101/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/oneshot_train/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/_sources/example.rst.txt: -------------------------------------------------------------------------------- 1 | Example 2 | ======= 3 | 4 | 5 | -------------------------------------------------------------------------------- /naslib/predictors/lce/__init__.py: -------------------------------------------------------------------------------- 1 | from .lce import LCEPredictor 2 | -------------------------------------------------------------------------------- /naslib/predictors/lce_m/__init__.py: -------------------------------------------------------------------------------- 1 | from .lce_m import LCEMPredictor 2 | -------------------------------------------------------------------------------- /naslib/predictors/oneshot/__init__.py: -------------------------------------------------------------------------------- 1 | from .oneshot import OneShotPredictor 2 | -------------------------------------------------------------------------------- /docs/objects.inv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/objects.inv -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | source = naslib 3 | omit = 4 | naslib/analysis/* 5 | ../venvs/* 6 | -------------------------------------------------------------------------------- /images/predictors.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/images/predictors.png -------------------------------------------------------------------------------- /images/naslib-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/images/naslib-logo.png -------------------------------------------------------------------------------- /images/naslib-overall.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/images/naslib-overall.png -------------------------------------------------------------------------------- /naslib/data/nb201_all.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/naslib/data/nb201_all.pickle -------------------------------------------------------------------------------- /docs/_static/fonts/Inconsolata.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Inconsolata.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/Lato-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Lato-Bold.ttf -------------------------------------------------------------------------------- /tests/assets/nb201_test_set_x.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/tests/assets/nb201_test_set_x.npy -------------------------------------------------------------------------------- /tests/assets/nb201_test_set_y.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/tests/assets/nb201_test_set_y.npy -------------------------------------------------------------------------------- /docs/_static/fonts/Lato-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Lato-Regular.ttf -------------------------------------------------------------------------------- /naslib/data/class_scene_selected.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/naslib/data/class_scene_selected.npy -------------------------------------------------------------------------------- /tests/assets/nb201_test_set_info.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/tests/assets/nb201_test_set_info.npy -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/css/fonts/lato-bold.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/css/fonts/lato-bold.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/css/fonts/lato-normal.woff -------------------------------------------------------------------------------- /docs/_static/fonts/Inconsolata-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Inconsolata-Bold.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-bold.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Lato/lato-bold.eot -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Lato/lato-bold.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Lato/lato-bold.woff -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Lato/lato-bold.woff2 -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-italic.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Lato/lato-italic.eot -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-italic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Lato/lato-italic.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/RobotoSlab-Bold.ttf -------------------------------------------------------------------------------- /naslib/data/class_object_selected.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/naslib/data/class_object_selected.npy -------------------------------------------------------------------------------- /tests/assets/nb201_test_set_times.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/tests/assets/nb201_test_set_times.npy -------------------------------------------------------------------------------- /docs/_sources/license.rst.txt: -------------------------------------------------------------------------------- 1 | License 2 | ======= 3 | NASLib is licensed the same way as scikit-learn, namely the 3-clause BSD license. -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/css/fonts/lato-normal.woff2 -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Lato/lato-italic.woff -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Lato/lato-italic.woff2 -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Lato/lato-regular.eot -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Lato/lato-regular.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Lato/lato-regular.woff -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/RobotoSlab-Regular.ttf -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/css/fonts/Roboto-Slab-Bold.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/css/fonts/lato-bold-italic.woff -------------------------------------------------------------------------------- /docs/_static/fonts/Inconsolata-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Inconsolata-Regular.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-bolditalic.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Lato/lato-bolditalic.eot -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-bolditalic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Lato/lato-bolditalic.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-bolditalic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Lato/lato-bolditalic.woff -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Lato/lato-regular.woff2 -------------------------------------------------------------------------------- /docs/_static/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /docs/_static/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /docs/_static/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = E203, E266, E501, W503, F403, F401 3 | max-line-length = 79 4 | max-complexity = 18 5 | select = B,C,E,F,W,T4,B9 6 | -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/css/fonts/Roboto-Slab-Bold.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/css/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/css/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/css/fonts/lato-bold-italic.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/css/fonts/lato-normal-italic.woff -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-bolditalic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/Lato/lato-bolditalic.woff2 -------------------------------------------------------------------------------- /naslib/data/permutations_hamming_max_1000.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/naslib/data/permutations_hamming_max_1000.npy -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/css/fonts/Roboto-Slab-Regular.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/css/fonts/Roboto-Slab-Regular.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/css/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/css/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/css/fonts/lato-normal-italic.woff2 -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf -------------------------------------------------------------------------------- /naslib/__version__.py: -------------------------------------------------------------------------------- 1 | """Version information.""" 2 | 3 | # The following line *must* be the last in the module, exactly as formatted: 4 | __version__ = "0.1.0" 5 | -------------------------------------------------------------------------------- /naslib/predictors/bnn/__init__.py: -------------------------------------------------------------------------------- 1 | from .dngo import DNGOPredictor 2 | from .bohamiann import BOHAMIANN 3 | from .bayesian_linear_reg import BayesianLinearRegression 4 | -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/heixiaoma/NASLib/Develop/docs/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 -------------------------------------------------------------------------------- /docs/_sources/citing.rst.txt: -------------------------------------------------------------------------------- 1 | Citing NASLib 2 | ============= 3 | If you use this code in your own work, please :footcite:t:`naslib-2020` and :footcite:t:`white2021powerful`. 4 | 5 | .. footbibliography:: 6 | -------------------------------------------------------------------------------- /naslib/predictors/trees/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_tree_class import BaseTree 2 | from .lgb import LGBoost 3 | from .ngb import NGBoost 4 | from .xgb import XGBoost 5 | from .random_forest import RandomForestPredictor 6 | -------------------------------------------------------------------------------- /tests/assets/config.yaml: -------------------------------------------------------------------------------- 1 | seed: 12 2 | out_dir: "tmp/test/hallo" 3 | search_space: darts 4 | optimizer: darts 5 | dataset: cifar10 6 | 7 | search: 8 | batch_size: 300 9 | 10 | evaluation: 11 | batch_size: 200 -------------------------------------------------------------------------------- /naslib/predictors/gp/__init__.py: -------------------------------------------------------------------------------- 1 | from .gp_base import BaseGPModel 2 | from .gp import GPPredictor 3 | from .sparse_gp import SparseGPPredictor 4 | from .var_sparse_gp import VarSparseGPPredictor 5 | from .gpwl import GPWLPredictor 6 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.md 3 | 4 | [mypy] 5 | ignore_missing_imports = True 6 | follow_imports=skip 7 | disallow_untyped_decorators = True 8 | disallow_incomplete_defs = True 9 | disallow_untyped_defs = True -------------------------------------------------------------------------------- /docs/.buildinfo: -------------------------------------------------------------------------------- 1 | # Sphinx build info version 1 2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. 3 | config: 5d25f23202db3f00d71b7eeba9ec715c 4 | tags: 645f666f9bcd5a90fca523b33c5a78b7 5 | -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/shape_infers/shared_utils.py: -------------------------------------------------------------------------------- 1 | def parse_channel_info(xstring): 2 | blocks = xstring.split(" ") 3 | blocks = [x.split("-") for x in blocks] 4 | blocks = [[int(_) for _ in x] for x in blocks] 5 | return blocks 6 | -------------------------------------------------------------------------------- /scripts/nas_predictors/submit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | predictors=(bananas mlp lgb gcn bonas xgb ngb rf dngo \ 4 | bohamiann bayes_lin_reg seminas nao gp sparse_gp var_sparse_gp) 5 | 6 | for predictor in ${predictors[@]} 7 | do 8 | sbatch -J ${predictor} slurm_job.sh $predictor 9 | done 10 | 11 | -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/cell_infers/__init__.py: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # 3 | ##################################################### 4 | from .tiny_network import TinyNetwork 5 | from .nasnet_cifar import NASNetonCIFAR 6 | -------------------------------------------------------------------------------- /naslib/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .utils import ( 2 | iter_flatten, 3 | set_seed, 4 | get_config_from_args, 5 | default_argument_parser, 6 | log_args, 7 | generate_kfold, 8 | cross_validation, 9 | ) 10 | from .logging import setup_logger 11 | from .get_dataset_api import get_dataset_api 12 | -------------------------------------------------------------------------------- /.codecov.yml: -------------------------------------------------------------------------------- 1 | # Allow coverage to decrease by 0.05%. 2 | coverage: 3 | range: 10..95 4 | round: nearest 5 | precision: 2 6 | status: 7 | project: 8 | default: 9 | threshold: 0.05% 10 | 11 | # Don't post a comment on pull requests. 12 | comment: false 13 | 14 | codecov: 15 | branch: Develop 16 | require_ci_to_pass: false 17 | 18 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | 2 | # These owners will be the default owners for everything in 3 | # the repo. Unless a later match takes precedence, they will be requested for 4 | # review when someone opens a pull request. 5 | 6 | * @arberzela @yashsmehta @crwhite14 7 | 8 | # You can have owners for particular type of files (e.g. .py) or for specific subdirectories or 9 | # for specific branches. -------------------------------------------------------------------------------- /scripts/darts/gsparse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Started at $(date)"; 4 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 5 | 6 | start=`date +%s` 7 | 8 | conda activate mlenv 9 | python runner.py 10 | 11 | end=`date +%s` 12 | runtime=$((end-start)) 13 | 14 | echo Runtime: $runtime 15 | -------------------------------------------------------------------------------- /scripts/nasbench201/gsparse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Started at $(date)"; 4 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 5 | 6 | start=`date +%s` 7 | 8 | conda activate mlenv 9 | python runner.py 10 | 11 | end=`date +%s` 12 | runtime=$((end-start)) 13 | 14 | echo Runtime: $runtime 15 | -------------------------------------------------------------------------------- /naslib/predictors/utils/pruners/measures/model_stats.py: -------------------------------------------------------------------------------- 1 | import tensorwatch as tw 2 | 3 | 4 | def get_model_stats(model, 5 | input_tensor_shape, clone_model=True)->tw.ModelStats: 6 | # model stats is doing some hooks so do it last 7 | model_stats = tw.ModelStats(model, input_tensor_shape, 8 | clone_model=clone_model) 9 | return model_stats -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/gsparsity/gsparse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Started at $(date)"; 4 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 5 | 6 | start=`date +%s` 7 | 8 | python runner.py --config-file config.yaml 9 | 10 | end=`date +%s` 11 | runtime=$((end-start)) 12 | 13 | echo Runtime: $runtime 14 | -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/gsparsity/darts_gsparse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Started at $(date)"; 4 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 5 | 6 | start=`date +%s` 7 | 8 | python runner.py --config-file darts_config.yaml 9 | 10 | end=`date +%s` 11 | runtime=$((end-start)) 12 | 13 | echo Runtime: $runtime 14 | -------------------------------------------------------------------------------- /docs/_static/documentation_options.js: -------------------------------------------------------------------------------- 1 | var DOCUMENTATION_OPTIONS = { 2 | URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), 3 | VERSION: '0.0.1', 4 | LANGUAGE: 'None', 5 | COLLAPSE_INDEX: false, 6 | BUILDER: 'html', 7 | FILE_SUFFIX: '.html', 8 | LINK_SUFFIX: '.html', 9 | HAS_SOURCE: true, 10 | SOURCELINK_SUFFIX: '.txt', 11 | NAVIGATION_WITH_KEYS: false 12 | }; -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/cell_searchs/_test_module.py: -------------------------------------------------------------------------------- 1 | ################################################## 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # 3 | ################################################## 4 | import torch 5 | from search_model_enas_utils import Controller 6 | 7 | 8 | def main(): 9 | controller = Controller(6, 4) 10 | predictions = controller() 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/shape_searchs/__init__.py: -------------------------------------------------------------------------------- 1 | ################################################## 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # 3 | ################################################## 4 | from .SearchCifarResNet_width import SearchWidthCifarResNet 5 | from .SearchCifarResNet_depth import SearchDepthCifarResNet 6 | from .SearchCifarResNet import SearchShapeCifarResNet 7 | from .SearchSimResNet_width import SearchWidthSimResNet 8 | from .SearchImagenetResNet import SearchShapeImagenetResNet 9 | -------------------------------------------------------------------------------- /naslib/search_spaces/core/query_metrics.py: -------------------------------------------------------------------------------- 1 | from enum import Enum, auto 2 | 3 | 4 | class Metric(Enum): 5 | RAW = auto() 6 | ALL = auto() 7 | 8 | TRAIN_ACCURACY = auto() 9 | VAL_ACCURACY = auto() 10 | TEST_ACCURACY = auto() 11 | 12 | TRAIN_LOSS = auto() 13 | VAL_LOSS = auto() 14 | TEST_LOSS = auto() 15 | 16 | TRAIN_TIME = auto() 17 | VAL_TIME = auto() 18 | TEST_TIME = auto() 19 | 20 | FLOPS = auto() 21 | LATENCY = auto() 22 | PARAMETERS = auto() 23 | EPOCH = auto() 24 | HP = auto() 25 | -------------------------------------------------------------------------------- /tests/assets/test_predictor.yaml: -------------------------------------------------------------------------------- 1 | experiment_type: single 2 | search_space: nasbench201 3 | dataset: cifar10 4 | predictor: mlp 5 | uniform_random: 1 6 | test_size: 10 7 | train_size_single: 20 8 | train_size_list: [5, 8, 14, 24, 42, 71, 121, 205] 9 | fidelity_single: 5 10 | fidelity_list: [1, 2, 3, 5, 7, 9, 13, 19, 26, 37, 52, 73] 11 | out_dir: run 12 | max_hpo_time: 0 13 | seed: 1000 14 | search: 15 | seed: 1000 16 | batch_size: 256 17 | data_size: 25000 18 | cutout: False 19 | cutout_length: 16 20 | cutout_prob: 1.0 21 | train_portion: 0.7 -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/shape_infers/__init__.py: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # 3 | ##################################################### 4 | from .InferCifarResNet_width import InferWidthCifarResNet 5 | from .InferImagenetResNet import InferImagenetResNet 6 | from .InferCifarResNet_depth import InferDepthCifarResNet 7 | from .InferCifarResNet import InferCifarResNet 8 | from .InferMobileNetV2 import InferMobileNetV2 9 | from .InferTinyCellNet import DynamicShapeTinyNet 10 | -------------------------------------------------------------------------------- /naslib/configs/example_bbo_config.yaml: -------------------------------------------------------------------------------- 1 | config_id: 0 2 | dataset: TIMIT 3 | optimizer: rs 4 | out_dir: run_cpu 5 | search: 6 | acq_fn_optimization: random_sampling 7 | acq_fn_type: its 8 | checkpoint_freq: 5000 9 | debug_predictor: false 10 | encoding_type: path 11 | epochs: 200 12 | fidelity: -1 13 | k: 10 14 | max_mutations: 1 15 | num_arches_to_mutate: 5 16 | num_candidates: 200 17 | num_ensemble: 3 18 | num_init: 10 19 | population_size: 50 20 | predictor: var_sparse_gp 21 | predictor_type: bananas 22 | sample_size: 10 23 | search_space: asr 24 | seed: 0 25 | -------------------------------------------------------------------------------- /scripts/bbo/scheduler_bosch.sh: -------------------------------------------------------------------------------- 1 | search_space=$1 2 | bosch_partition=$2 3 | dataset_dir="/home/mehtay/research/NASLib/naslib/configs/bbo/configs_cpu/$search_space/*" 4 | 5 | for optimizer_dir in $dataset_dir/* 6 | do 7 | # echo $config_dir 8 | for config_dir in $optimizer_dir/* 9 | do 10 | echo starting to run ${config_dir} across 10 seeds ... 11 | if [ $bosch_partition == 'gpu' ] 12 | then 13 | sbatch --bosch submit_boschgpu_folder.sh $config_dir 14 | fi 15 | 16 | if [ $bosch_partition == 'cpu' ] 17 | then 18 | sbatch --bosch submit_bosch_folder.sh $config_dir 19 | fi 20 | done 21 | done -------------------------------------------------------------------------------- /.github/workflows/pre-commit.yml: -------------------------------------------------------------------------------- 1 | name: pre-commit 2 | 3 | on: 4 | push: 5 | branches: [Develop, master] 6 | pull_request: 7 | branches: [Develop, master] 8 | 9 | jobs: 10 | run-all-files: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v2 14 | - name: Setup Python 3.8 15 | uses: actions/setup-python@v2 16 | with: 17 | python-version: 3.8 18 | 19 | - name: Install pre-commit 20 | run: | 21 | pip install pre-commit 22 | pre-commit install 23 | - name: Run pre-commit 24 | run: | 25 | pre-commit run --all-files 26 | -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/initialization.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | def initialize_resnet(m): 6 | if isinstance(m, nn.Conv2d): 7 | nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") 8 | if m.bias is not None: 9 | nn.init.constant_(m.bias, 0) 10 | elif isinstance(m, nn.BatchNorm2d): 11 | nn.init.constant_(m.weight, 1) 12 | if m.bias is not None: 13 | nn.init.constant_(m.bias, 0) 14 | elif isinstance(m, nn.Linear): 15 | nn.init.normal_(m.weight, 0, 0.01) 16 | nn.init.constant_(m.bias, 0) 17 | -------------------------------------------------------------------------------- /scripts/bbo/scheduler.sh: -------------------------------------------------------------------------------- 1 | search_space=$1 2 | 3 | dataset_dir="/home/mehtay/research/NASLib/naslib/configs/bbo/configs_cpu/$search_space/*" 4 | 5 | for optimizer_dir in $dataset_dir/* 6 | do 7 | echo $config_dir 8 | for config_dir in $optimizer_dir/* 9 | do 10 | echo starting to run ${config_dir} across 10 seeds ... 11 | sbatch submit_folder.sh $config_dir 12 | 13 | done 14 | done 15 | 16 | # for running default config files separately 17 | # for optimizer_dir in $dataset_dir/* 18 | # do 19 | # echo starting to run $optimizer_dir/config_0 across 10 seeds ... 20 | # sbatch submit_folder.sh $optimizer_dir/config_0 21 | 22 | # done 23 | -------------------------------------------------------------------------------- /.github/workflows/unit-tests.yml: -------------------------------------------------------------------------------- 1 | name: Unit Tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | - Develop 8 | 9 | 10 | jobs: 11 | run-all-files: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v2 15 | - name: Setup Python 3.8 16 | uses: actions/setup-python@v2 17 | with: 18 | python-version: 3.8 19 | 20 | - name: Install naslib 21 | run: | 22 | python -m pip install --upgrade pip setuptools wheel 23 | pip install -e .[test] 24 | 25 | - name: Run tests 26 | run: | 27 | cd tests/ 28 | python -m unittest discover -v 29 | -------------------------------------------------------------------------------- /naslib/search_spaces/__init__.py: -------------------------------------------------------------------------------- 1 | from .simple_cell.graph import SimpleCellSearchSpace 2 | from .darts.graph import DartsSearchSpace 3 | from .nasbench101.graph import NasBench101SearchSpace 4 | from .nasbench201.graph import NasBench201SearchSpace 5 | from .nasbenchnlp.graph import NasBenchNLPSearchSpace 6 | from .nasbenchasr.graph import NasBenchASRSearchSpace 7 | from .natsbenchsize.graph import NATSBenchSizeSearchSpace 8 | from .hierarchical.graph import HierarchicalSearchSpace 9 | from .transbench101.graph import TransBench101SearchSpaceMicro 10 | from .transbench101.graph import TransBench101SearchSpaceMacro 11 | 12 | from .transbench101.api import TransNASBenchAPI 13 | -------------------------------------------------------------------------------- /scripts/nas_predictors/submit-oneshot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | optimizers="oneshot rsws" 4 | space="darts nasbench201" 5 | portion="25 5 75 9" 6 | epochs="25 50 100 150" 7 | 8 | for s in $space 9 | do 10 | for o in $optimizers 11 | do 12 | for p in $portion 13 | do 14 | for e in $epochs 15 | do 16 | sbatch -J ${s}\_${o}\_$p\_$e oneshot_eval.sh $s $o $p $e 17 | echo $s $o 0.$p\_$e 18 | done 19 | done 20 | done 21 | done 22 | #for s in $space 23 | #do 24 | #for o in $optimizers 25 | #do 26 | #for p in $portion 27 | #do 28 | #for e in $epochs 29 | #do 30 | #scancel -n ${s}\_${o}\_$p\_$e 31 | #done 32 | #done 33 | #done 34 | #done 35 | -------------------------------------------------------------------------------- /docs/_sources/contributing.rst.txt: -------------------------------------------------------------------------------- 1 | Contributions 2 | ============= 3 | 4 | We appreciate all contribution to Auto-PyTorch, from bug reports and documentation to new features. If you want to contribute to the code, you can pick an issue from the issue tracker which is marked with Needs contributer. 5 | 6 | .. note:: 7 | To avoid spending time on duplicate work or features that are unlikely to get merged, it is highly advised that you contact the developers by opening a github issue before starting to work. 8 | 9 | 10 | When developing new features, please create a new branch from the refactor_development branch. When to submitting a pull request, make sure that all tests are still passing. -------------------------------------------------------------------------------- /naslib/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | from .oneshot.darts.optimizer import DARTSOptimizer 2 | from .oneshot.gsparsity.optimizer import GSparseOptimizer 3 | from .oneshot.oneshot_train.optimizer import OneShotNASOptimizer 4 | from .oneshot.rs_ws.optimizer import RandomNASOptimizer 5 | from .oneshot.gdas.optimizer import GDASOptimizer 6 | from .oneshot.drnas.optimizer import DrNASOptimizer 7 | from .discrete.rs.optimizer import RandomSearch 8 | from .discrete.re.optimizer import RegularizedEvolution 9 | from .discrete.ls.optimizer import LocalSearch 10 | from .discrete.bananas.optimizer import Bananas 11 | from .discrete.bp.optimizer import BasePredictor 12 | from .discrete.npenas.optimizer import Npenas 13 | -------------------------------------------------------------------------------- /naslib/search_spaces/transbench101/loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import functional as F 3 | from torch.nn.modules.loss import _WeightedLoss 4 | 5 | class SoftmaxCrossEntropyWithLogits(_WeightedLoss): 6 | def __init__(self, weight=None): 7 | super(SoftmaxCrossEntropyWithLogits, self).__init__(weight=None) 8 | self.weight = weight 9 | 10 | def forward(self, input, target): 11 | logits_scaled = torch.log(F.softmax(input, dim=-1) + 0.00001) 12 | 13 | if self.weight is not None: 14 | loss = -((target * logits_scaled) * self.weight).sum(dim=-1) 15 | else: 16 | loss = -(target * logits_scaled).sum(dim=-1) 17 | return loss.mean() 18 | -------------------------------------------------------------------------------- /naslib/predictors/gp/sparse_gp.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import pyro 3 | import pyro.contrib.gp as gp 4 | import pyro.distributions as dist 5 | import numpy as np 6 | 7 | from naslib.predictors.gp import GPPredictor 8 | 9 | 10 | class SparseGPPredictor(GPPredictor): 11 | def get_model(self, train_data, **kwargs): 12 | X_train, y_train = train_data 13 | # initialize the kernel and model 14 | pyro.clear_param_store() 15 | kernel = self.kernel(input_dim=X_train.shape[1]) 16 | Xu = torch.arange(10.0) / 2.0 17 | Xu.unsqueeze_(-1) 18 | Xu = Xu.expand(10, X_train.shape[1]).double() 19 | gpr = gp.models.SparseGPRegression( 20 | X_train, y_train, kernel, Xu=Xu, jitter=1.0e-5 21 | ) 22 | return gpr 23 | -------------------------------------------------------------------------------- /naslib/predictors/gp/var_sparse_gp.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import pyro 3 | import pyro.contrib.gp as gp 4 | 5 | from naslib.predictors.gp import GPPredictor 6 | 7 | 8 | class VarSparseGPPredictor(GPPredictor): 9 | def get_model(self, train_data, **kwargs): 10 | X_train, y_train = train_data 11 | # initialize the kernel and model 12 | pyro.clear_param_store() 13 | kernel = self.kernel(input_dim=X_train.shape[1]) 14 | Xu = torch.arange(10.0) / 2.0 15 | Xu.unsqueeze_(-1) 16 | Xu = Xu.expand(10, X_train.shape[1]).double() 17 | likelihood = gp.likelihoods.Gaussian() 18 | gpr = gp.models.VariationalSparseGP( 19 | X_train, y_train, kernel, Xu=Xu, likelihood=likelihood, whiten=True 20 | ) 21 | return gpr 22 | -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/shape_searchs/test.py: -------------------------------------------------------------------------------- 1 | ################################################## 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # 3 | ################################################## 4 | import torch 5 | import torch.nn as nn 6 | from SoftSelect import ChannelWiseInter 7 | 8 | 9 | if __name__ == "__main__": 10 | 11 | tensors = torch.rand((16, 128, 7, 7)) 12 | 13 | for oc in range(200, 210): 14 | out_v1 = ChannelWiseInter(tensors, oc, "v1") 15 | out_v2 = ChannelWiseInter(tensors, oc, "v2") 16 | assert (out_v1 == out_v2).any().item() == 1 17 | for oc in range(48, 160): 18 | out_v1 = ChannelWiseInter(tensors, oc, "v1") 19 | out_v2 = ChannelWiseInter(tensors, oc, "v2") 20 | assert (out_v1 == out_v2).any().item() == 1 21 | -------------------------------------------------------------------------------- /naslib/runners/nas_predictors/discrete_config.yaml: -------------------------------------------------------------------------------- 1 | seed: 0 2 | optimizer: bananas 3 | search_space: nasbench201 4 | dataset: cifar10 5 | out_dir: run 6 | 7 | search: 8 | checkpoint_freq: 1000 9 | epochs: 100 10 | fidelity: -1 11 | 12 | predictor_type: var_sparse_gp 13 | num_init: 10 14 | k: 10 15 | 16 | # BANANAS 17 | num_ensemble: 3 18 | acq_fn_type: its 19 | acq_fn_optimization: random_sampling 20 | encoding_type: adjacency_one_hot 21 | num_arches_to_mutate: 2 22 | max_mutations: 1 23 | num_candidates: 20 24 | 25 | # jacov data loader 26 | batch_size: 256 27 | data_size: 25000 28 | cutout: False 29 | cutout_length: 16 30 | cutout_prob: 1.0 31 | train_portion: 0.7 32 | 33 | # other params 34 | debug_predictor: False 35 | sample_size: 10 36 | population_size: 30 37 | -------------------------------------------------------------------------------- /naslib/runners/statistics/statistics_config.yaml: -------------------------------------------------------------------------------- 1 | 2 | # nasbench101, nasbench201, darts, nlp, transbench101 3 | search_space: nasbench201 4 | 5 | # nasbench201 datasets: cifar10, cifar100, ImageNet16-120 6 | # transbench101 datasets: class_scene, class_object, 7 | # jigsaw, room_layout, segmentsemantic, normal, autoencoder 8 | dataset: cifar10 9 | 10 | # output results to this directory 11 | out_dir: run 12 | # random seed (only important for autocorrelation) 13 | seed: 1000 14 | 15 | # stats that can be computed by iterating through 16 | # all architectures in the search space 17 | run_acc_stats: 1 18 | max_set_size: 1000 19 | 20 | # compute the average nbhd size 21 | run_nbhd_size: 1 22 | max_nbhd_trials: 500 23 | 24 | # autocorrelation parameters 25 | run_autocorr: 1 26 | max_autocorr_trials: 10 27 | autocorr_size: 36 28 | walks: 500 29 | -------------------------------------------------------------------------------- /.github/workflows/scheduled-test.yml: -------------------------------------------------------------------------------- 1 | name: Scheduled Tests 2 | 3 | on: 4 | schedule: 5 | # Every Monday at 7AM UTC 6 | - cron: '0 07 * * 1' 7 | 8 | 9 | jobs: 10 | ubuntu: 11 | 12 | runs-on: ubuntu-latest 13 | strategy: 14 | matrix: 15 | python-version: [3.8] 16 | fail-fast: false 17 | max-parallel: 2 18 | 19 | steps: 20 | - uses: actions/checkout@v2 21 | with: 22 | ref: Develop 23 | - name: Setup Python ${{ matrix.python-version }} 24 | uses: actions/setup-python@v2 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | - name: Install naslib 28 | run: | 29 | python -m pip install --upgrade pip setuptools wheel 30 | pip install -e .[test] 31 | - name: Run tests 32 | run: | 33 | cd tests/ 34 | python -m unittest discover -v 35 | -------------------------------------------------------------------------------- /examples/run_darts.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from naslib.defaults.trainer import Trainer 3 | from naslib.optimizers import DARTSOptimizer, GDASOptimizer, RandomSearch 4 | from naslib.search_spaces import DartsSearchSpace, SimpleCellSearchSpace 5 | 6 | from naslib.utils import set_seed, setup_logger, get_config_from_args 7 | 8 | config = get_config_from_args() # use --help so see the options 9 | set_seed(config.seed) 10 | 11 | logger = setup_logger(config.save + "/log.log") 12 | logger.setLevel(logging.INFO) # default DEBUG is very verbose 13 | 14 | search_space = DartsSearchSpace() # use SimpleCellSearchSpace() for less heavy search 15 | 16 | optimizer = DARTSOptimizer(config) 17 | optimizer.adapt_search_space(search_space) 18 | 19 | trainer = Trainer(optimizer, config) 20 | trainer.search() # Search for an architecture 21 | trainer.evaluate() # Evaluate the best architecture 22 | -------------------------------------------------------------------------------- /naslib/predictors/__init__.py: -------------------------------------------------------------------------------- 1 | from .predictor import Predictor 2 | from .bonas import BonasPredictor 3 | from .bnn import BayesianLinearRegression, BOHAMIANN, DNGOPredictor 4 | from .early_stopping import EarlyStopping 5 | from .ensemble import Ensemble 6 | from .gcn import GCNPredictor 7 | from .gp import GPPredictor, SparseGPPredictor, VarSparseGPPredictor, GPWLPredictor 8 | from .lce import LCEPredictor 9 | from .lce_m import LCEMPredictor 10 | from .lcsvr import SVR_Estimator 11 | from .mlp import MLPPredictor 12 | from .oneshot import OneShotPredictor 13 | from .seminas import SemiNASPredictor 14 | from .soloss import SoLosspredictor 15 | from .trees import LGBoost, NGBoost, RandomForestPredictor, XGBoost 16 | from .zerocost_v1 import ZeroCostV1 17 | from .zerocost_v2 import ZeroCostV2 18 | from .omni_ngb import OmniNGBPredictor 19 | from .omni_seminas import OmniSemiNASPredictor 20 | -------------------------------------------------------------------------------- /.github/workflows/codecov.yml: -------------------------------------------------------------------------------- 1 | name: CodeCov 2 | 3 | on: 4 | push: 5 | branches: [Develop, master] 6 | pull_request: 7 | branches: [Develop, master] 8 | 9 | jobs: 10 | run: 11 | runs-on: ubuntu-latest 12 | env: 13 | OS: ubuntu-latest 14 | PYTHON: '3.8' 15 | steps: 16 | - uses: actions/checkout@v2 17 | 18 | - name: Setup Python 3.8 19 | uses: actions/setup-python@master 20 | with: 21 | python-version: 3.8 22 | 23 | - name: 'generate report' 24 | run: | 25 | python -m pip install --upgrade pip setuptools wheel 26 | pip install -e .[test] 27 | pip install coverage 28 | cd tests/ 29 | coverage run -m unittest 30 | 31 | - name: Upload coverage to Codecov 32 | uses: codecov/codecov-action@v1 33 | with: 34 | flags: unittests 35 | fail_ci_if_error: true 36 | -------------------------------------------------------------------------------- /naslib/predictors/bnn/bayesian_linear_reg.py: -------------------------------------------------------------------------------- 1 | # This is an implementation of Bayesian Linear Regression 2 | 3 | from pybnn.bayesian_linear_regression import BayesianLinearRegression as BLR 4 | from pybnn.bayesian_linear_regression import linear_basis_func, quadratic_basis_func 5 | 6 | from naslib.predictors.bnn.bnn_base import BNN 7 | 8 | 9 | class BayesianLinearRegression(BNN): 10 | def get_model(self, **kwargs): 11 | predictor = BLR( 12 | alpha=1.0, 13 | beta=100, 14 | basis_func=linear_basis_func, 15 | prior=None, 16 | do_mcmc=False, # turn this off for better sample efficiency 17 | n_hypers=20, 18 | chain_length=100, 19 | burnin_steps=100, 20 | ) 21 | return predictor 22 | 23 | def train_model(self, xtrain, ytrain): 24 | self.model.train(xtrain, ytrain, do_optimize=True) 25 | -------------------------------------------------------------------------------- /naslib/defaults/nb201_defaults.yaml: -------------------------------------------------------------------------------- 1 | seed: 99 2 | optimizer: re 3 | dataset: cifar10 4 | out_dir: run 5 | 6 | search: 7 | checkpoint_freq: 5 8 | epochs: 150 9 | 10 | # GDAS 11 | tau_max: 10 12 | tau_min: 0.1 13 | 14 | # RE 15 | sample_size: 10 16 | population_size: 30 17 | 18 | # LS 19 | num_init: 10 20 | 21 | # BANANAS 22 | k: 10 23 | num_ensemble: 3 24 | acq_fn_type: its 25 | acq_fn_optimization: mutation 26 | encoding_type: path 27 | num_arches_to_mutate: 2 28 | max_mutations: 1 29 | num_candidates: 100 30 | 31 | # GSparsity 32 | seed: 50 33 | grad_clip: 0 34 | threshold: 0.000001 35 | weight_decay: 60 36 | learning_rate: 0.001 37 | momentum: 0.8 38 | normalization: div 39 | normalization_exponent: 0.5 40 | batch_size: 64 41 | learning_rate_min: 0.0001 42 | epochs: 100 43 | warm_start_epochs: 0 44 | train_portion: 1.0 45 | data_size: 25000 46 | 47 | -------------------------------------------------------------------------------- /docs/_static/js/badge_only.js: -------------------------------------------------------------------------------- 1 | !function(e){var t={};function r(n){if(t[n])return t[n].exports;var o=t[n]={i:n,l:!1,exports:{}};return e[n].call(o.exports,o,o.exports,r),o.l=!0,o.exports}r.m=e,r.c=t,r.d=function(e,t,n){r.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},r.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.t=function(e,t){if(1&t&&(e=r(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(r.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)r.d(n,o,function(t){return e[t]}.bind(null,o));return n},r.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return r.d(t,"a",t),t},r.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},r.p="",r(r.s=4)}({4:function(e,t,r){}}); -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ConfigSpace==0.4.19 2 | cython==0.29.23 3 | hyperopt==0.2.5 4 | pyyaml==5.4.1 5 | networkx==2.7.1 6 | numpy==1.21.5; python_version <= '3.7' 7 | numpy>=1.22.0; python_version > '3.7' 8 | torch==1.9.0 9 | torchvision==0.10.0 10 | fvcore==0.1.5.post20210630 11 | matplotlib==3.5.1 #3.3.4 #latest version not supported for python 3.6 12 | pandas==1.3.5 #1.1.5 #latest version not supported for python 3.6 13 | pytest==6.2.4 14 | pytest-cov==2.12.1 15 | codecov==2.1.11 16 | coverage==5.5 17 | lightgbm==3.2.1 18 | ngboost==0.3.11 19 | xgboost==1.4.2 20 | emcee==3.1.0 21 | pybnn==0.0.5 22 | grakel==0.1.8 23 | pyro-ppl==1.6.0 24 | pre-commit==2.13.0 25 | black==21.7b0 26 | 27 | # additional from setup.py prev 28 | cycler==0.10.0 #matplotlib plots 29 | kiwisolver==1.3.1 30 | iopath==0.1.9 31 | tabulate==0.8.9 32 | tqdm==4.61.1 33 | yacs==0.1.8 34 | scikit-learn==1.0.2 35 | scikit-image==0.19.2 36 | pytorch-msssim==0.2.1 37 | tensorwatch==0.9.1 38 | -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/gsparsity/config.yaml: -------------------------------------------------------------------------------- 1 | dataset: cifar100 2 | seed: 1 3 | search_space: nasbench201 4 | out_dir: /work/dlclarge2/agnihotr-ml/NASLib/naslib/optimizers/oneshot/gsparsity/run 5 | optimizer: gsparsity 6 | 7 | search: 8 | 9 | 10 | #GSparsity 11 | grad_clip: 0 12 | threshold: 0.000001 13 | weight_decay: 120 14 | learning_rate: 0.01 15 | momentum: 0.8 16 | normalization: div 17 | normalization_exponent: 0.5 18 | batch_size: 128 19 | learning_rate_min: 0.0001 20 | epochs: 100 21 | warm_start_epochs: 0 22 | train_portion: 0.95 23 | data_size: 50000 24 | 25 | evaluation: 26 | checkpoint_freq: 30 27 | batch_size: 96 28 | learning_rate: 0.025 29 | learning_rate_min: 0.00 30 | momentum: 0.9 31 | weight_decay: 0.0003 32 | epochs: 600 33 | warm_start_epochs: 0 34 | grad_clip: 5 35 | train_portion: 1. 36 | data_size: 50000 37 | 38 | cutout: True 39 | cutout_length: 16 40 | cutout_prob: 1.0 41 | drop_path_prob: 0.2 42 | auxiliary_weight: 0.4 43 | -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/gsparsity/darts_config.yaml: -------------------------------------------------------------------------------- 1 | dataset: cifar100 2 | seed: 20 3 | search_space: darts 4 | out_dir: /work/dlclarge2/agnihotr-ml/NASLib/naslib/optimizers/oneshot/gsparsity/run 5 | optimizer: gsparsity 6 | 7 | search: 8 | 9 | 10 | #GSparsity 11 | grad_clip: 0 12 | threshold: 0.000001 13 | weight_decay: 60 14 | learning_rate: 0.01 15 | momentum: 0.8 16 | normalization: div 17 | normalization_exponent: 0.5 18 | batch_size: 32 19 | learning_rate_min: 0.0001 20 | epochs: 50 21 | warm_start_epochs: 0 22 | train_portion: 0.9 23 | data_size: 45000 24 | 25 | evaluation: 26 | checkpoint_freq: 30 27 | batch_size: 96 28 | learning_rate: 0.025 29 | learning_rate_min: 0.00 30 | momentum: 0.9 31 | weight_decay: 0.0001 32 | epochs: 600 33 | warm_start_epochs: 0 34 | grad_clip: 5 35 | train_portion: 1. 36 | data_size: 50000 37 | 38 | cutout: True 39 | cutout_length: 16 40 | cutout_prob: 1.0 41 | drop_path_prob: 0.2 42 | auxiliary_weight: 0.4 43 | -------------------------------------------------------------------------------- /naslib/predictors/utils/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Samsung Electronics Co., Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================= 15 | 16 | from os.path import dirname, basename, isfile, join 17 | import glob 18 | 19 | modules = glob.glob(join(dirname(__file__), "*.py")) 20 | __all__ = [ 21 | basename(f)[:-3] for f in modules if isfile(f) and not f.endswith("__init__.py") 22 | ] 23 | -------------------------------------------------------------------------------- /naslib/predictors/utils/pruners/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Samsung Electronics Co., Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================= 15 | 16 | from os.path import dirname, basename, isfile, join 17 | import glob 18 | 19 | modules = glob.glob(join(dirname(__file__), "*.py")) 20 | __all__ = [ 21 | basename(f)[:-3] for f in modules if isfile(f) and not f.endswith("__init__.py") 22 | ] 23 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/mirrors-mypy 3 | rev: v0.761 4 | hooks: 5 | - id: mypy 6 | args: [--show-error-codes, 7 | --warn-redundant-casts, 8 | --warn-return-any, 9 | --warn-unreachable, 10 | ] 11 | files: naslib/.* 12 | exclude: 13 | - naslib/examples/.* 14 | - naslib/docs/.* 15 | 16 | - repo: https://gitlab.com/pycqa/flake8 17 | rev: 3.8.3 18 | hooks: 19 | - id: flake8 20 | additional_dependencies: 21 | - flake8-print==3.1.4 22 | - flake8-import-order 23 | name: flake8 naslib 24 | files: naslib/.* 25 | exclude: 26 | - naslib/examples/.* 27 | - naslib/docs/.* 28 | - naslib/predictors/.* 29 | - id: flake8 30 | additional_dependencies: 31 | - flake8-print==3.1.4 32 | - flake8-import-order 33 | name: flake8 test 34 | files: tests/.* -------------------------------------------------------------------------------- /scripts/nas_predictors/slurm_job-nb101.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 #bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH --gres=gpu:1 # reserves one GPU 4 | #SBATCH -o logs_bo-101/%x.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e logs_bo-101/%x.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -D . 7 | #SBATCH -a 0-99 # array size 8 | 9 | echo "Workingdir: $PWD"; 10 | echo "Started at $(date)"; 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 12 | 13 | start=`date +%s` 14 | 15 | # Activate virtual env so that run_experiment can load the correct packages 16 | source activate python37 17 | python runner.py --config-file bo101_feb03_0/cifar10/configs/nas_predictors/config_bananas_${1}_${SLURM_ARRAY_TASK_ID}.yaml 18 | 19 | 20 | 21 | end=`date +%s` 22 | runtime=$((end-start)) 23 | 24 | echo Runtime: $runtime 25 | -------------------------------------------------------------------------------- /scripts/nas_predictors/slurm_job-nb301.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 #ml_gpu-rtx2080 #bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH --gres=gpu:1 # reserves one GPU 4 | #SBATCH -o logs_bo-301/%x.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e logs_bo-301/%x.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -D . 7 | #SBATCH -a 0-99 # array size 8 | 9 | echo "Workingdir: $PWD"; 10 | echo "Started at $(date)"; 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 12 | 13 | start=`date +%s` 14 | 15 | # Activate virtual env so that run_experiment can load the correct packages 16 | source activate python37 17 | python runner.py --config-file bo301_feb9_0_0/cifar10/configs/nas_predictors/config_bananas_${1}_${SLURM_ARRAY_TASK_ID}.yaml 18 | 19 | 20 | end=`date +%s` 21 | runtime=$((end-start)) 22 | 23 | echo Runtime: $runtime 24 | -------------------------------------------------------------------------------- /naslib/defaults/additional_primitives.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from naslib.search_spaces.core.primitives import AbstractPrimitive, Identity 4 | 5 | 6 | class DropPathWrapper(AbstractPrimitive): 7 | """ 8 | A wrapper for the drop path training regularization. 9 | """ 10 | 11 | def __init__(self, op): 12 | super().__init__(locals()) 13 | self.op = op 14 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 15 | 16 | def forward(self, x, edge_data): 17 | x = self.op(x, edge_data) 18 | if ( 19 | edge_data.drop_path_prob > 0.0 20 | and not isinstance(self.op, Identity) 21 | and self.training 22 | ): 23 | keep_prob = 1.0 - edge_data.drop_path_prob 24 | mask = torch.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob) 25 | mask = mask.to(self.device) 26 | x.div_(keep_prob) 27 | x.mul_(mask) 28 | return x 29 | 30 | def get_embedded_ops(self): 31 | return self.op 32 | -------------------------------------------------------------------------------- /naslib/predictors/utils/pruners/measures/l2_norm.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Samsung Electronics Co., Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================= 15 | 16 | from . import measure 17 | from ..p_utils import get_layer_metric_array 18 | 19 | 20 | @measure("l2_norm", copy_net=False, mode="param") 21 | def get_l2_norm_array(net, inputs, targets, mode, split_data=1): 22 | return get_layer_metric_array(net, lambda l: l.weight.norm(), mode=mode) 23 | -------------------------------------------------------------------------------- /scripts/nas_predictors/slurm_job-nb201-c100.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 #ml_gpu-rtx2080 # bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH --gres=gpu:1 # reserves one GPU 4 | #SBATCH -o logs_bo-201-c100/%x.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e logs_bo-201-c100/%x.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -D . 7 | #SBATCH -a 0-99 # array size 8 | 9 | echo "Workingdir: $PWD"; 10 | echo "Started at $(date)"; 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 12 | 13 | start=`date +%s` 14 | 15 | # Activate virtual env so that run_experiment can load the correct packages 16 | source activate python37 17 | python runner.py --config-file bo201_c100_feb03_0/cifar100/configs/nas_predictors/config_bananas_${1}_${SLURM_ARRAY_TASK_ID}.yaml 18 | 19 | 20 | end=`date +%s` 21 | runtime=$((end-start)) 22 | 23 | echo Runtime: $runtime 24 | -------------------------------------------------------------------------------- /scripts/bbo/submit_boschgpu_folder.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 #bosch_cpu-cascadelake # partition (queue) 3 | #SBATCH -t 0-07:00 # time (D-HH:MM) 4 | #SBATCH -o slurmlog/%A.%N.out # STDOUT (the folder log has to exist) %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e slurmlog/%A.%N.err # STDERR (the folder log has to exist) %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -J bbo-exps # sets the job name. 7 | #SBATCH --mem=10G 8 | 9 | # Print some information about the job to STDOUT 10 | 11 | echo "Workingdir: $PWD"; 12 | echo "Started at $(date)"; 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 14 | 15 | # python -u runner.py --config-file $1 16 | 17 | for config_file_seed in $1/* 18 | do 19 | echo submitted ${config_file_seed} 20 | python -u runner.py --config-file $config_file_seed 21 | done 22 | 23 | # echo $COMMAND; 24 | # eval $COMMAND; 25 | 26 | echo "DONE"; 27 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /scripts/nas_predictors/slurm_job-nb201-imagenet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 #ml_gpu-rtx2080 # bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH --gres=gpu:1 # reserves one GPU 4 | #SBATCH -o logs_bo-201-imagenet/%x.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e logs_bo-201-imagenet/%x.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -D . 7 | #SBATCH -a 0-99 # array size 8 | 9 | echo "Workingdir: $PWD"; 10 | echo "Started at $(date)"; 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 12 | 13 | start=`date +%s` 14 | 15 | # Activate virtual env so that run_experiment can load the correct packages 16 | source activate python37 17 | python runner.py --config-file bo201_feb22_0/ImageNet16-120/configs/nas_predictors/config_bananas_${1}_${SLURM_ARRAY_TASK_ID}.yaml 18 | 19 | 20 | end=`date +%s` 21 | runtime=$((end-start)) 22 | 23 | echo Runtime: $runtime 24 | -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/SharedUtils.py: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # 3 | ##################################################### 4 | import torch 5 | import torch.nn as nn 6 | 7 | 8 | def additive_func(A, B): 9 | assert A.dim() == B.dim() and A.size(0) == B.size(0), "{:} vs {:}".format( 10 | A.size(), B.size() 11 | ) 12 | C = min(A.size(1), B.size(1)) 13 | if A.size(1) == B.size(1): 14 | return A + B 15 | elif A.size(1) < B.size(1): 16 | out = B.clone() 17 | out[:, :C] += A 18 | return out 19 | else: 20 | out = A.clone() 21 | out[:, :C] += B 22 | return out 23 | 24 | 25 | def change_key(key, value): 26 | def func(m): 27 | if hasattr(m, key): 28 | setattr(m, key, value) 29 | 30 | return func 31 | 32 | 33 | def parse_channel_info(xstring): 34 | blocks = xstring.split(" ") 35 | blocks = [x.split("-") for x in blocks] 36 | blocks = [[int(_) for _ in x] for x in blocks] 37 | return blocks 38 | -------------------------------------------------------------------------------- /scripts/bbo/submit_bosch_folder.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_cpu-cascadelake #bosch_gpu-rtx2080 #bosch_cpu-cascadelake # partition (queue) 3 | #SBATCH -t 0-07:00 # time (D-HH:MM) 4 | #SBATCH -o slurmlog/%A.%N.out # STDOUT (the folder log has to exist) %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e slurmlog/%A.%N.err # STDERR (the folder log has to exist) %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -J bbo-exps # sets the job name. 7 | #SBATCH --mem=7G 8 | 9 | # Print some information about the job to STDOUT 10 | 11 | echo "Workingdir: $PWD"; 12 | echo "Started at $(date)"; 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 14 | 15 | # python -u runner.py --config-file $1 16 | 17 | for config_file_seed in $1/* 18 | do 19 | echo submitted ${config_file_seed} 20 | python -u runner.py --config-file $config_file_seed 21 | done 22 | 23 | # echo $COMMAND; 24 | # eval $COMMAND; 25 | 26 | echo "DONE"; 27 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/cell_searchs/__init__.py: -------------------------------------------------------------------------------- 1 | ################################################## 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # 3 | ################################################## 4 | # The macro structure is defined in NAS-Bench-201 5 | from .search_model_darts import TinyNetworkDarts 6 | from .search_model_gdas import TinyNetworkGDAS 7 | from .search_model_setn import TinyNetworkSETN 8 | from .search_model_enas import TinyNetworkENAS 9 | from .search_model_random import TinyNetworkRANDOM 10 | from .genotypes import Structure as CellStructure, architectures as CellArchitectures 11 | 12 | # NASNet-based macro structure 13 | from .search_model_gdas_nasnet import NASNetworkGDAS 14 | from .search_model_darts_nasnet import NASNetworkDARTS 15 | 16 | 17 | nas201_super_nets = { 18 | "DARTS-V1": TinyNetworkDarts, 19 | "DARTS-V2": TinyNetworkDarts, 20 | "GDAS": TinyNetworkGDAS, 21 | "SETN": TinyNetworkSETN, 22 | "ENAS": TinyNetworkENAS, 23 | "RANDOM": TinyNetworkRANDOM, 24 | } 25 | 26 | nasnet_super_nets = {"GDAS": NASNetworkGDAS, "DARTS": NASNetworkDARTS} 27 | -------------------------------------------------------------------------------- /scripts/nas_predictors/slurm_job-imgnet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080,alldlc_gpu-rtx2080,ml_gpu-rtx2080 #ml_gpu-rtx2080 # bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH --gres=gpu:1 # reserves one GPU 4 | #SBATCH -o logs_bo-201-imagenet/%x.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e logs_bo-201-imagenet/%x.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -D . 7 | #SBATCH -a 0-99 # array size 8 | 9 | echo "Workingdir: $PWD"; 10 | echo "Started at $(date)"; 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 12 | 13 | start=`date +%s` 14 | 15 | # Activate virtual env so that run_experiment can load the correct packages 16 | source activate python37 17 | python runner.py --config-file bo201_im_feb4_2_0/ImageNet16-120/configs/nas_predictors/config_bananas_${1}_${SLURM_ARRAY_TASK_ID}.yaml 18 | 19 | 20 | end=`date +%s` 21 | runtime=$((end-start)) 22 | 23 | echo Runtime: $runtime 24 | -------------------------------------------------------------------------------- /naslib/defaults/config_multi.yaml: -------------------------------------------------------------------------------- 1 | dataset: cifar10 2 | seed: 10 3 | num_classes: 10 4 | 5 | search: 6 | checkpoint_freq: 10 7 | batch_size: 256 8 | learning_rate: 0.025 9 | learning_rate_min: 0.001 10 | momentum: 0.9 11 | weight_decay: 0.0003 12 | epochs: 1 13 | warm_start_epochs: 0 14 | grad_clip: 5 15 | train_portion: 0.5 16 | data_size: 25000 17 | 18 | cutout: False 19 | cutout_length: 16 20 | cutout_prob: 1.0 21 | drop_path_prob: 0.0 22 | 23 | unrolled: False 24 | arch_learning_rate: 0.0003 25 | arch_weight_decay: 0.001 26 | output_weights: True 27 | 28 | # GDAS 29 | tau_max: 10 30 | tau_min: 0.1 31 | 32 | # RE 33 | sample_size: 10 34 | population_size: 100 35 | 36 | evaluation: 37 | checkpoint_freq: 1 38 | batch_size: 96 39 | learning_rate: 0.025 40 | learning_rate_min: 0.00 41 | momentum: 0.9 42 | weight_decay: 0.0003 43 | epochs: 5 44 | warm_start_epochs: 0 45 | grad_clip: 5 46 | train_portion: 1 47 | data_size: 50000 48 | 49 | cutout: True 50 | cutout_length: 16 51 | cutout_prob: 1.0 52 | drop_path_prob: 0.2 53 | auxiliary_weight: 0.4 54 | -------------------------------------------------------------------------------- /naslib/predictors/bnn/bohamiann.py: -------------------------------------------------------------------------------- 1 | # This is an implementation of the BOHAMIANN predictor from the paper: 2 | # Springenberg et al., 2016. Bayesian Optimization with Robust Bayesian Neural 3 | # Networks 4 | 5 | import torch.nn as nn 6 | from pybnn.bohamiann import Bohamiann, nll, get_default_network 7 | 8 | from naslib.predictors.bnn.bnn_base import BNN 9 | 10 | 11 | class BOHAMIANN(BNN): 12 | def get_model(self, **kwargs): 13 | predictor = Bohamiann( 14 | get_network=get_default_network, 15 | sampling_method="adaptive_sghmc", 16 | use_double_precision=True, 17 | metrics=(nn.MSELoss,), 18 | likelihood_function=nll, 19 | print_every_n_steps=10, 20 | normalize_input=False, 21 | normalize_output=True, 22 | ) 23 | return predictor 24 | 25 | def train_model(self, xtrain, ytrain): 26 | self.model.train( 27 | xtrain, 28 | ytrain, 29 | num_steps=self.num_steps, 30 | num_burn_in_steps=10, 31 | keep_every=5, 32 | lr=1e-2, 33 | verbose=True, 34 | ) 35 | -------------------------------------------------------------------------------- /docs/_sources/manual.rst.txt: -------------------------------------------------------------------------------- 1 | Manual 2 | ====== 3 | 4 | Requirements 5 | ------------ 6 | NASLib has the following requirments: 7 | 8 | * Linux operating system (for example Ubuntu, Mac OS X). 9 | * Python (>=3.7). 10 | * Pytorch. 11 | * This is a bulleted list. 12 | 13 | Setting up a virtual environment 14 | -------------------------------- 15 | We recommend to set up a virtual environment 16 | 17 | .. code-block:: console 18 | 19 | python3 -m venv naslib 20 | source naslib/bin/activate 21 | 22 | .. note:: 23 | Make sure you use the latest version of pip 24 | 25 | .. code-block:: console 26 | 27 | pip install --upgrade pip setuptools wheel 28 | pip install cython 29 | 30 | Setting up NASLib 31 | ----------------- 32 | Clone and install. 33 | If you plan to modify naslib consider adding the -e option for pip install 34 | 35 | .. code-block:: console 36 | 37 | git clone ... 38 | cd naslib 39 | pip install . 40 | 41 | To validate the installation, you can run tests 42 | 43 | .. code-block:: console 44 | 45 | cd tests 46 | coverage run -m unittest discover 47 | 48 | The test coverage can be seen with coverage report. 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /scripts/nas_predictors/submit-all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #nb101 4 | #predictors101=(bananas mlp lgb gcn xgb ngb rf dngo \ 5 | #bohamiann bayes_lin_reg seminas nao gp sparse_gp var_sparse_gp) 6 | 7 | #nb201 8 | predictors201=(bananas feedforward gbdt gcn bonas xgb ngb rf dngo \ 9 | bohamiann bayes_lin_reg seminas nao gp sparse_gp var_sparse_gp) 10 | 11 | #nb301 12 | #predictors301=(bananas mlp lgb bonas xgb ngb rf dngo \ 13 | # bohamiann bayes_lin_reg gp sparse_gp var_sparse_gp nao) 14 | 15 | #for predictor in ${predictors101[@]} 16 | #do 17 | #sbatch -J 101-${predictor} slurm_job-nb101.sh $predictor 18 | #done 19 | 20 | #for predictor in ${predictors201[@]} 21 | #do 22 | #sbatch -J 201-${predictor} slurm_job-nb201-c10.sh $predictor 23 | #sbatch -J c100-201-${predictor} slurm_job-nb201-c100.sh $predictor 24 | #sbatch -J imnet-201-${predictor} slurm_job-nb201-imagenet.sh $predictor 25 | #sbatch -J imnet-201-${predictor} slurm_job-imgnet.sh $predictor 26 | #done 27 | 28 | for predictor in ${predictors201[@]} 29 | do 30 | sbatch -J c10-${predictor} slurm_job-nb201-c10.sh $predictor 31 | #sbatch -J im-${predictor} slurm_job-nb201-imagenet.sh $predictor 32 | done 33 | 34 | -------------------------------------------------------------------------------- /naslib/predictors/bnn/dngo.py: -------------------------------------------------------------------------------- 1 | # This is an implementation of the DNGO predictor from the paper: 2 | # Snoek et al., 2015. Scalable Bayesian Optimization using DNNs 3 | 4 | from pybnn.dngo import DNGO 5 | 6 | from naslib.predictors.bnn.bnn_base import BNN 7 | 8 | 9 | class DNGOPredictor(BNN): 10 | def get_model(self, **kwargs): 11 | predictor = DNGO( 12 | batch_size=10, 13 | num_epochs=500, 14 | learning_rate=0.01, 15 | adapt_epoch=5000, 16 | n_units_1=50, 17 | n_units_2=50, 18 | n_units_3=50, 19 | alpha=1.0, 20 | beta=1000, 21 | prior=None, 22 | do_mcmc=True, # turn this off for better sample efficiency 23 | n_hypers=20, 24 | chain_length=2000, 25 | burnin_steps=2000, 26 | normalize_input=False, 27 | normalize_output=True, 28 | ) 29 | return predictor 30 | 31 | def train_model(self, xtrain, ytrain): 32 | try: 33 | self.model.train(xtrain, ytrain, do_optimize=True) 34 | except ValueError: 35 | self.model.train(xtrain, ytrain, do_optimize=False) 36 | -------------------------------------------------------------------------------- /scripts/bbo/submit_folder.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 #bosch_gpu-rtx2080 #bosch_cpu-cascadelake #bosch_gpu-rtx2080 #mldlc_gpu-rtx2080 #alldlc_gpu-rtx2080 #gpu_tesla-P100 #ml_gpu-rtx2080 # bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH -t 0-02:00 # time (D-HH:MM) 4 | #SBATCH -o slurmlog/%A.%N.out # STDOUT (the folder log has to exist) %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e slurmlog/%A.%N.err # STDERR (the folder log has to exist) %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -J bbo-exps # sets the job name. 7 | #SBATCH --mem=10G 8 | 9 | # Print some information about the job to STDOUT 10 | 11 | echo "Workingdir: $PWD"; 12 | echo "Started at $(date)"; 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 14 | 15 | # python -u runner.py --config-file $1 16 | 17 | for config_file_seed in $1/* 18 | do 19 | echo submitted ${config_file_seed} 20 | python -u runner.py --config-file $config_file_seed 21 | done 22 | 23 | # echo $COMMAND; 24 | # eval $COMMAND; 25 | 26 | echo "DONE"; 27 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /naslib/search_spaces/nasbenchasr/conversions.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Sequence 2 | 3 | 4 | # utils to work with nested collections 5 | def recursive_iter(seq): 6 | ''' Iterate over elements in seq recursively (returns only non-sequences) 7 | ''' 8 | if isinstance(seq, Sequence): 9 | for e in seq: 10 | for v in recursive_iter(e): 11 | yield v 12 | else: 13 | yield seq 14 | 15 | 16 | def flatten(seq): 17 | ''' Flatten all nested sequences, returned type is type of ``seq`` 18 | ''' 19 | return list(recursive_iter(seq)) 20 | 21 | 22 | def copy_structure(data, shape): 23 | ''' Put data from ``data`` into nested containers like in ``shape``. 24 | This can be seen as "unflatten" operation, i.e.: 25 | seq == copy_structure(flatten(seq), seq) 26 | ''' 27 | d_it = recursive_iter(data) 28 | 29 | def copy_level(s): 30 | if isinstance(s, Sequence): 31 | return type(s)(copy_level(ss) for ss in s) 32 | else: 33 | return next(d_it) 34 | return copy_level(shape) 35 | 36 | 37 | def make_compact_immutable(compact): 38 | return tuple([tuple(c) for c in compact]) 39 | 40 | 41 | def make_compact_mutable(compact): 42 | return [list(c) for c in compact] -------------------------------------------------------------------------------- /scripts/nas_predictors/slurm_job-nb201-c10.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p ml_gpu-teslaP100 #ml_gpu-rtx2080 # bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH --gres=gpu:1 # reserves one GPU 4 | #SBATCH -o logs_bo-201-c10/%x.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e logs_bo-201-c10/%x.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -D . 7 | #SBATCH -a 17-23 # array size 8 | 9 | echo "Workingdir: $PWD"; 10 | echo "Started at $(date)"; 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 12 | 13 | start=`date +%s` 14 | 15 | # Activate virtual env so that run_experiment can load the correct packages 16 | source activate python37 17 | python runner.py --config-file bo201_feb22_0/cifar10/configs/nas_predictors/config_bananas_${1}_${SLURM_ARRAY_TASK_ID}.yaml 18 | #python runner.py --config-file bo201_c100_feb01_0/cifar100/configs/nas_predictors/config_bananas_${1}_${SLURM_ARRAY_TASK_ID}.yaml 19 | #python runner.py --config-file bo201_imagenet_feb01_0/ImageNet16-120/configs/nas_predictors/config_bananas_${1}_${SLURM_ARRAY_TASK_ID}.yaml 20 | 21 | 22 | end=`date +%s` 23 | runtime=$((end-start)) 24 | 25 | echo Runtime: $runtime 26 | -------------------------------------------------------------------------------- /scripts/bbo/make_configs_nb101.sh: -------------------------------------------------------------------------------- 1 | export OMP_NUM_THREADS=2 2 | # optimizers=(rs) 3 | optimizers=(rs re ls npenas bananas) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | if [[ $optimizers == bananas* ]] 12 | then 13 | acq_fn_optimization=mutation 14 | else 15 | acq_fn_optimization=random_sampling 16 | fi 17 | 18 | # folders: 19 | base_file=naslib 20 | out_dir=run_cpu 21 | 22 | # bbo-bs or predictor-bs 23 | config_type=bbo-bs 24 | 25 | # search space / data: 26 | search_space=nasbench101 27 | dataset=cifar10 28 | 29 | fidelity=-1 30 | epochs=200 31 | predictor=var_sparse_gp 32 | 33 | # trials / seeds: 34 | trials=10 35 | end_seed=$(($start_seed + $trials - 1)) 36 | 37 | # create config files 38 | for i in $(seq 0 $((${#dataset[@]}-1)) ) 39 | do 40 | dataset=${dataset[$i]} 41 | echo $dataset 42 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 43 | do 44 | optimizer=${optimizers[$i]} 45 | python create_configs.py \ 46 | --start_seed $start_seed --trials $trials \ 47 | --out_dir $out_dir --dataset=$dataset --config_type $config_type \ 48 | --search_space $search_space --optimizer $optimizer \ 49 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 50 | --fidelity $fidelity --epochs $epochs 51 | done 52 | done 53 | 54 | 55 | echo 'configs are ready, check config folder ...' 56 | -------------------------------------------------------------------------------- /scripts/nas_predictors/oneshot_eval.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080,ml_gpu-rtx2080 #ml_gpu-rtx2080 # bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH --gres=gpu:1 # reserves one GPU 4 | #SBATCH -o logs_oneshot_eval/%x.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e logs_oneshot_eval/%x.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -D . 7 | #SBATCH -a 1 # array size 8 | 9 | echo "Workingdir: $PWD"; 10 | echo "Started at $(date)"; 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 12 | 13 | start=`date +%s` 14 | 15 | # Activate virtual env so that run_experiment can load the correct packages 16 | source activate python37 17 | python oneshot_runner.py --config-file nas_predictor_config.yaml \ 18 | --model-path "run_epochs_size/${3}_${4}/cifar10/nas_predictors/${1}/${2}/$SLURM_ARRAY_TASK_ID/search/model_final.pth" \ 19 | search_space $1 optimizer $2 search.predictor_type $2 \ 20 | seed $SLURM_ARRAY_TASK_ID search.seed $SLURM_ARRAY_TASK_ID \ 21 | search.train_portion 0.$3 search.epochs $4 \ 22 | out_dir run_epochs_size-eval/$3\_$4 23 | 24 | 25 | end=`date +%s` 26 | runtime=$((end-start)) 27 | 28 | echo Runtime: $runtime 29 | -------------------------------------------------------------------------------- /naslib/predictors/early_stopping.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from naslib.predictors.predictor import Predictor 4 | from naslib.search_spaces.core.query_metrics import Metric 5 | 6 | 7 | class EarlyStopping(Predictor): 8 | def __init__(self, metric): 9 | 10 | self.metric = metric 11 | 12 | def query(self, xtest, info): 13 | """ 14 | info: a list of dictionaries which include the learning curve of the 15 | corresponding architecture. 16 | Return the final value on the learning curve 17 | """ 18 | if self.metric in [Metric.VAL_LOSS, Metric.TRAIN_LOSS]: 19 | # invert to get accurate rank correlation 20 | return np.array([-inf["lc"][-1] for inf in info]) 21 | else: 22 | return np.array([inf["lc"][-1] for inf in info]) 23 | 24 | def get_metric(self): 25 | return self.metric 26 | 27 | def get_data_reqs(self): 28 | """ 29 | Returns a dictionary with info about whether the predictor needs 30 | extra info to train/query. 31 | """ 32 | reqs = { 33 | "requires_partial_lc": True, 34 | "metric": self.metric, 35 | "requires_hyperparameters": False, 36 | "hyperparams": None, 37 | "unlabeled": False, 38 | "unlabeled_factor": 0, 39 | } 40 | return reqs 41 | -------------------------------------------------------------------------------- /scripts/bbo/make_configs_nb201.sh: -------------------------------------------------------------------------------- 1 | export OMP_NUM_THREADS=2 2 | # optimizers=(rs) 3 | optimizers=(rs re ls npenas bananas) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | if [[ $optimizers == bananas* ]] 12 | then 13 | acq_fn_optimization=mutation 14 | else 15 | acq_fn_optimization=random_sampling 16 | fi 17 | 18 | # folders: 19 | base_file=naslib 20 | out_dir=run_cpu 21 | 22 | # bbo-bs or predictor-bs 23 | config_type=bbo-bs 24 | 25 | # search space / data: 26 | search_space=nasbench201 27 | 28 | dataset=(cifar10 cifar100 ImageNet16-120) 29 | 30 | fidelity=-1 31 | epochs=200 32 | predictor=var_sparse_gp 33 | 34 | # trials / seeds: 35 | trials=10 36 | end_seed=$(($start_seed + $trials - 1)) 37 | 38 | # create config files 39 | for i in $(seq 0 $((${#dataset[@]}-1)) ) 40 | do 41 | dataset=${dataset[$i]} 42 | echo $dataset 43 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 44 | do 45 | optimizer=${optimizers[$i]} 46 | python create_configs.py \ 47 | --start_seed $start_seed --trials $trials \ 48 | --out_dir $out_dir --dataset=$dataset --config_type $config_type \ 49 | --search_space $search_space --optimizer $optimizer \ 50 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 51 | --fidelity $fidelity --epochs $epochs 52 | done 53 | done 54 | 55 | 56 | echo 'configs are ready, check config folder ...' 57 | -------------------------------------------------------------------------------- /scripts/bbo/make_configs_nlp.sh: -------------------------------------------------------------------------------- 1 | export OMP_NUM_THREADS=2 2 | # optimizers=(rs) 3 | optimizers=(rs re ls npenas bananas) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | if [[ $optimizers == bananas* ]] 12 | then 13 | acq_fn_optimization=mutation 14 | else 15 | acq_fn_optimization=random_sampling 16 | fi 17 | 18 | # folders: 19 | base_file=naslib 20 | out_dir=run_cpu 21 | 22 | # bbo-bs or predictor-bs 23 | config_type=bbo-bs 24 | 25 | # search space / data: 26 | search_space=nlp 27 | 28 | dataset=(LM-task) 29 | 30 | # epoch number to get the values 31 | fidelity=-1 32 | epochs=200 33 | predictor=var_sparse_gp 34 | 35 | # trials / seeds: 36 | trials=10 37 | end_seed=$(($start_seed + $trials - 1)) 38 | 39 | # create config files 40 | for i in $(seq 0 $((${#dataset[@]}-1)) ) 41 | do 42 | dataset=${dataset[$i]} 43 | echo $dataset 44 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 45 | do 46 | optimizer=${optimizers[$i]} 47 | python create_configs.py \ 48 | --start_seed $start_seed --trials $trials \ 49 | --out_dir $out_dir --dataset=$dataset --config_type $config_type \ 50 | --search_space $search_space --optimizer $optimizer \ 51 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 52 | --fidelity $fidelity --epochs $epochs 53 | done 54 | done 55 | 56 | 57 | echo 'configs are ready, check config folder ...' 58 | -------------------------------------------------------------------------------- /scripts/bbo/make_configs_mr.sh: -------------------------------------------------------------------------------- 1 | 2 | export OMP_NUM_THREADS=2 3 | # optimizers=(rs) 4 | optimizers=(rs re ls npenas bananas) 5 | 6 | start_seed=$1 7 | if [ -z "$start_seed" ] 8 | then 9 | start_seed=0 10 | fi 11 | 12 | if [[ $optimizers == bananas* ]] 13 | then 14 | acq_fn_optimization=mutation 15 | else 16 | acq_fn_optimization=random_sampling 17 | fi 18 | 19 | # folders: 20 | base_file=naslib 21 | out_dir=run_cpu 22 | 23 | # bbo-bs or predictor-bs 24 | config_type=bbo-bs 25 | 26 | # search space / data: 27 | search_space=mr 28 | 29 | dataset=(seg video 3ddet cls) 30 | 31 | # epoch number to get the values 32 | fidelity=-1 33 | epochs=200 34 | predictor=var_sparse_gp 35 | 36 | # trials / seeds: 37 | trials=10 38 | end_seed=$(($start_seed + $trials - 1)) 39 | 40 | # create config files 41 | for i in $(seq 0 $((${#dataset[@]}-1)) ) 42 | do 43 | dataset=${dataset[$i]} 44 | echo $dataset 45 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 46 | do 47 | optimizer=${optimizers[$i]} 48 | python create_configs.py \ 49 | --start_seed $start_seed --trials $trials \ 50 | --out_dir $out_dir --dataset=$dataset --config_type $config_type \ 51 | --search_space $search_space --optimizer $optimizer \ 52 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 53 | --fidelity $fidelity --epochs $epochs 54 | done 55 | done 56 | 57 | 58 | echo 'configs are ready, check config folder ...' -------------------------------------------------------------------------------- /scripts/bbo/make_configs_darts.sh: -------------------------------------------------------------------------------- 1 | export OMP_NUM_THREADS=2 2 | # optimizers=(rs) 3 | optimizers=(rs re ls npenas bananas) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | if [[ $optimizers == bananas* ]] 12 | then 13 | acq_fn_optimization=mutation 14 | else 15 | acq_fn_optimization=random_sampling 16 | fi 17 | 18 | # folders: 19 | base_file=naslib 20 | out_dir=run_cpu 21 | 22 | # bbo-bs or predictor-bs 23 | config_type=bbo-bs 24 | 25 | # search space / data: 26 | search_space=darts 27 | 28 | dataset=(cifar10) 29 | 30 | # epoch number to get the values 31 | fidelity=-1 32 | epochs=200 33 | predictor=var_sparse_gp 34 | os.makedirs(folder, exist_ok=True) 35 | 36 | 37 | # trials / seeds: 38 | trials=10 39 | end_seed=$(($start_seed + $trials - 1)) 40 | 41 | # create config files 42 | for i in $(seq 0 $((${#dataset[@]}-1)) ) 43 | do 44 | dataset=${dataset[$i]} 45 | echo $dataset 46 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 47 | do 48 | optimizer=${optimizers[$i]} 49 | python create_configs.py \ 50 | --start_seed $start_seed --trials $trials \ 51 | --out_dir $out_dir --dataset=$dataset --config_type $config_type \ 52 | --search_space $search_space --optimizer $optimizer \ 53 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 54 | --fidelity $fidelity --epochs $epochs 55 | done 56 | done 57 | 58 | 59 | echo 'configs are ready, check config folder ...' 60 | -------------------------------------------------------------------------------- /naslib/runners/nas/discrete_config.yaml: -------------------------------------------------------------------------------- 1 | # random seed 2 | seed: 0 3 | 4 | # re, bananas, npenas, ls, rs 5 | optimizer: re 6 | 7 | # nasbench101, nasbench201, darts, nlp, 8 | # transbench101_micro, transbench101_macro, asr 9 | search_space: nasbench201 10 | 11 | # nasbench201 datasets: cifar10, cifar100, ImageNet16-120 12 | # transbench101 datasets: class_scene, class_object, 13 | # jigsaw, room_layout, segmentsemantic, normal, autoencoder 14 | dataset: cifar10 15 | 16 | # output results to this directory 17 | out_dir: run 18 | 19 | # parameters for the optimizers 20 | search: 21 | # for bohb 22 | budgets: 50000000 23 | checkpoint_freq: 1000 24 | fidelity: 108 25 | 26 | # for all optimizers 27 | epochs: 100 28 | 29 | # for bananas and npenas, choose one predictor 30 | # out of the 16 model-based predictors 31 | predictor_type: var_sparse_gp 32 | 33 | # number of initial architectures 34 | num_init: 10 35 | 36 | # BANANAS 37 | k: 10 38 | num_ensemble: 3 39 | acq_fn_type: its 40 | acq_fn_optimization: mutation 41 | encoding_type: adjacency_one_hot 42 | num_arches_to_mutate: 5 43 | max_mutations: 1 44 | num_candidates: 200 45 | 46 | # jacov data loader 47 | batch_size: 256 48 | data_size: 25000 49 | cutout: False 50 | cutout_length: 16 51 | cutout_prob: 1.0 52 | train_portion: 0.7 53 | 54 | # other params 55 | debug_predictor: False 56 | sample_size: 10 57 | population_size: 30 58 | -------------------------------------------------------------------------------- /scripts/bbo/make_configs_asr.sh: -------------------------------------------------------------------------------- 1 | #### NOTE: this script has to be run while being in the parent 'scripts' dir 2 | 3 | export OMP_NUM_THREADS=2 4 | # optimizers=(rs) 5 | optimizers=(rs re ls npenas bananas) 6 | 7 | start_seed=$1 8 | if [ -z "$start_seed" ] 9 | then 10 | start_seed=0 11 | fi 12 | 13 | if [[ $optimizers == bananas* ]] 14 | then 15 | acq_fn_optimization=mutation 16 | else 17 | acq_fn_optimization=random_sampling 18 | fi 19 | 20 | # folders: 21 | out_dir=run_cpu 22 | 23 | # bbo-bs or predictor-bs 24 | config_type=bbo-bs 25 | 26 | # search space / data: 27 | search_space=asr 28 | 29 | dataset=(TIMIT) 30 | 31 | # epoch number to get the values 32 | fidelity=-1 33 | epochs=200 34 | predictor=var_sparse_gp 35 | 36 | # trials / seeds: 37 | trials=10 38 | end_seed=$(($start_seed + $trials - 1)) 39 | 40 | # create config files 41 | for i in $(seq 0 $((${#dataset[@]}-1)) ) 42 | do 43 | dataset=${dataset[$i]} 44 | echo $dataset 45 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 46 | do 47 | optimizer=${optimizers[$i]} 48 | python create_configs.py \ 49 | --start_seed $start_seed --trials $trials \ 50 | --out_dir $out_dir --dataset=$dataset --config_type $config_type \ 51 | --search_space $search_space --optimizer $optimizer \ 52 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 53 | --fidelity $fidelity --epochs $epochs 54 | done 55 | done 56 | 57 | 58 | echo 'configs are ready, check config folder ...' -------------------------------------------------------------------------------- /naslib/search_spaces/darts/primitives.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from ..core.primitives import AbstractPrimitive 5 | 6 | 7 | class FactorizedReduce(AbstractPrimitive): 8 | """ 9 | Factorized reduce as used in ResNet to add some sort 10 | of Identity connection even though the resolution does not 11 | match. 12 | 13 | If the resolution matches it resolves to identity 14 | """ 15 | 16 | def __init__(self, C_in, C_out, stride=1, affine=True, **kwargs): 17 | super().__init__(locals()) 18 | 19 | if stride == 1 and C_in == C_out: 20 | self.is_identity = True 21 | else: 22 | self.is_identity = False 23 | assert C_out % 2 == 0 24 | self.relu = nn.ReLU(inplace=False) 25 | self.conv_1 = nn.Conv2d( 26 | C_in, C_out // 2, 1, stride=2, padding=0, bias=False 27 | ) 28 | self.conv_2 = nn.Conv2d( 29 | C_in, C_out // 2, 1, stride=2, padding=0, bias=False 30 | ) 31 | self.bn = nn.BatchNorm2d(C_out, affine=affine) 32 | 33 | def forward(self, x, edge_data): 34 | if self.is_identity: 35 | return x 36 | else: 37 | x = self.relu(x) 38 | out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1) 39 | out = self.bn(out) 40 | return out 41 | 42 | def get_embedded_ops(self): 43 | return None 44 | -------------------------------------------------------------------------------- /scripts/bbo/make_configs_transnb101_macro.sh: -------------------------------------------------------------------------------- 1 | export OMP_NUM_THREADS=2 2 | # optimizers=(rs) 3 | optimizers=(rs re ls npenas bananas) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | if [[ $optimizers == bananas* ]] 12 | then 13 | acq_fn_optimization=mutation 14 | else 15 | acq_fn_optimization=random_sampling 16 | fi 17 | 18 | # folders: 19 | base_file=naslib 20 | out_dir=run_cpu 21 | 22 | # bbo-bs or predictor-bs 23 | config_type=bbo-bs 24 | 25 | # search space / data: 26 | search_space=transbench101_macro 27 | 28 | dataset=(class_scene class_object jigsaw room_layout segmentsemantic normal autoencoder) 29 | 30 | # epoch number to get the values 31 | fidelity=-1 32 | epochs=200 33 | predictor=var_sparse_gp 34 | 35 | # trials / seeds: 36 | trials=10 37 | end_seed=$(($start_seed + $trials - 1)) 38 | 39 | # create config files 40 | for i in $(seq 0 $((${#dataset[@]}-1)) ) 41 | do 42 | dataset=${dataset[$i]} 43 | echo $dataset 44 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 45 | do 46 | optimizer=${optimizers[$i]} 47 | python create_configs.py \ 48 | --start_seed $start_seed --trials $trials \ 49 | --out_dir $out_dir --dataset=$dataset --config_type $config_type \ 50 | --search_space $search_space --optimizer $optimizer \ 51 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 52 | --fidelity $fidelity --epochs $epochs 53 | done 54 | done 55 | 56 | 57 | echo 'configs are ready, check config folder ...' 58 | -------------------------------------------------------------------------------- /scripts/bbo/make_configs_transnb101_micro.sh: -------------------------------------------------------------------------------- 1 | export OMP_NUM_THREADS=2 2 | # optimizers=(rs) 3 | optimizers=(rs re ls npenas bananas) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | if [[ $optimizers == bananas* ]] 12 | then 13 | acq_fn_optimization=mutation 14 | else 15 | acq_fn_optimization=random_sampling 16 | fi 17 | 18 | # folders: 19 | base_file=naslib 20 | out_dir=run_cpu 21 | 22 | # bbo-bs or predictor-bs 23 | config_type=bbo-bs 24 | 25 | # search space / data: 26 | search_space=transbench101_micro 27 | 28 | dataset=(class_scene class_object jigsaw room_layout segmentsemantic normal autoencoder) 29 | 30 | # epoch number to get the values 31 | fidelity=-1 32 | epochs=200 33 | predictor=var_sparse_gp 34 | 35 | # trials / seeds: 36 | trials=10 37 | end_seed=$(($start_seed + $trials - 1)) 38 | 39 | # create config files 40 | for i in $(seq 0 $((${#dataset[@]}-1)) ) 41 | do 42 | dataset=${dataset[$i]} 43 | echo $dataset 44 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 45 | do 46 | optimizer=${optimizers[$i]} 47 | python create_configs.py \ 48 | --start_seed $start_seed --trials $trials \ 49 | --out_dir $out_dir --dataset=$dataset --config_type $config_type \ 50 | --search_space $search_space --optimizer $optimizer \ 51 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 52 | --fidelity $fidelity --epochs $epochs 53 | done 54 | done 55 | 56 | 57 | echo 'configs are ready, check config folder ...' 58 | -------------------------------------------------------------------------------- /naslib/search_spaces/hierarchical/primitives.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from ..core.primitives import AbstractPrimitive 4 | 5 | 6 | class ConvBNReLU(AbstractPrimitive): 7 | def __init__(self, C_in, C_out, kernel_size, stride=1, affine=False): 8 | super().__init__(locals()) 9 | pad = 0 if stride == 1 and kernel_size == 1 else 1 10 | self.op = nn.Sequential( 11 | nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=pad, bias=False), 12 | nn.BatchNorm2d(C_out, affine=affine), 13 | nn.ReLU(inplace=False), 14 | ) 15 | 16 | def forward(self, x, edge_data): 17 | return self.op(x) 18 | 19 | def get_embedded_ops(self): 20 | return None 21 | 22 | 23 | class DepthwiseConv(AbstractPrimitive): 24 | """ 25 | Depthwise convolution 26 | """ 27 | 28 | def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): 29 | super().__init__(locals()) 30 | self.op = nn.Sequential( 31 | nn.Conv2d( 32 | C_in, 33 | C_in, 34 | kernel_size=kernel_size, 35 | stride=stride, 36 | padding=padding, 37 | groups=C_in, 38 | bias=False, 39 | ), 40 | nn.BatchNorm2d(C_in, affine=affine), 41 | nn.ReLU(inplace=False), 42 | ) 43 | 44 | def forward(self, x, edge_data): 45 | return self.op(x) 46 | 47 | def get_embedded_ops(self): 48 | return None 49 | -------------------------------------------------------------------------------- /naslib/search_spaces/nasbench201/primitives.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from ..core.primitives import AbstractPrimitive, ReLUConvBN 4 | 5 | 6 | """ 7 | Code below from NASBench-201 and slighly adapted 8 | @inproceedings{dong2020nasbench201, 9 | title = {NAS-Bench-201: Extending the Scope of Reproducible Neural Architecture Search}, 10 | author = {Dong, Xuanyi and Yang, Yi}, 11 | booktitle = {International Conference on Learning Representations (ICLR)}, 12 | url = {https://openreview.net/forum?id=HJxyZkBKDr}, 13 | year = {2020} 14 | } 15 | """ 16 | 17 | 18 | class ResNetBasicblock(AbstractPrimitive): 19 | def __init__(self, C_in, C_out, stride, affine=True): 20 | super().__init__(locals()) 21 | assert stride == 1 or stride == 2, "invalid stride {:}".format(stride) 22 | self.conv_a = ReLUConvBN(C_in, C_out, 3, stride) 23 | self.conv_b = ReLUConvBN(C_out, C_out, 3) 24 | if stride == 2: 25 | self.downsample = nn.Sequential( 26 | nn.AvgPool2d(kernel_size=2, stride=2, padding=0), 27 | nn.Conv2d(C_in, C_out, kernel_size=1, stride=1, padding=0, bias=False), 28 | ) 29 | else: 30 | self.downsample = None 31 | 32 | def forward(self, x, edge_data): 33 | basicblock = self.conv_a(x, None) 34 | basicblock = self.conv_b(basicblock, None) 35 | residual = self.downsample(x) if self.downsample is not None else x 36 | return residual + basicblock 37 | 38 | def get_embedded_ops(self): 39 | return None 40 | -------------------------------------------------------------------------------- /naslib/search_spaces/transbench101/primitives.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from ..core.primitives import AbstractPrimitive, ReLUConvBN 4 | 5 | 6 | """ 7 | Code below from NASBench-201 and slighly adapted 8 | @inproceedings{dong2020nasbench201, 9 | title = {NAS-Bench-201: Extending the Scope of Reproducible Neural Architecture Search}, 10 | author = {Dong, Xuanyi and Yang, Yi}, 11 | booktitle = {International Conference on Learning Representations (ICLR)}, 12 | url = {https://openreview.net/forum?id=HJxyZkBKDr}, 13 | year = {2020} 14 | } 15 | """ 16 | 17 | 18 | class ResNetBasicblock(AbstractPrimitive): 19 | 20 | def __init__(self, C_in, C_out, stride, affine=True): 21 | super().__init__(locals()) 22 | assert stride == 1 or stride == 2, 'invalid stride {:}'.format(stride) 23 | self.conv_a = ReLUConvBN(C_in, C_out, 3, stride) 24 | self.conv_b = ReLUConvBN(C_out, C_out, 3) 25 | if stride == 2: 26 | self.downsample = nn.Sequential( 27 | nn.AvgPool2d(kernel_size=2, stride=2, padding=0), 28 | nn.Conv2d(C_in, C_out, kernel_size=1, stride=1, padding=0, bias=False)) 29 | else: 30 | self.downsample = None 31 | 32 | 33 | def forward(self, x, edge_data): 34 | basicblock = self.conv_a(x, None) 35 | basicblock = self.conv_b(basicblock, None) 36 | residual = self.downsample(x) if self.downsample is not None else x 37 | return residual + basicblock 38 | 39 | 40 | def get_embedded_ops(self): 41 | return None 42 | -------------------------------------------------------------------------------- /scripts/nas_predictors/run_nb201_npenas_2.sh: -------------------------------------------------------------------------------- 1 | optimizer=npenas 2 | predictors=(omni_xgb) 3 | 4 | start_seed=$1 5 | if [ -z "$start_seed" ] 6 | then 7 | start_seed=0 8 | fi 9 | 10 | # folders: 11 | base_file=NASLib/naslib 12 | s3_folder=bo201_feb21 13 | out_dir=$s3_folder\_$start_seed 14 | 15 | # search space / data: 16 | search_space=nasbench201 17 | dataset=cifar10 18 | search_epochs=500 19 | 20 | # trials / seeds: 21 | trials=100 22 | end_seed=$(($start_seed + $trials - 1)) 23 | save_to_s3=true 24 | 25 | # create config files 26 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 27 | do 28 | predictor=${predictors[$i]} 29 | python $base_file/benchmarks/create_configs.py --predictor $predictor \ 30 | --epochs $search_epochs --start_seed $start_seed --trials $trials \ 31 | --out_dir $out_dir --dataset=$dataset --config_type nas_predictor \ 32 | --search_space $search_space --optimizer $optimizer 33 | done 34 | 35 | # run experiments 36 | for t in $(seq $start_seed $end_seed) 37 | do 38 | for predictor in ${predictors[@]} 39 | do 40 | config_file=$out_dir/$dataset/configs/nas_predictors/config\_$optimizer\_$predictor\_$t.yaml 41 | echo ================running $predictor trial: $t ===================== 42 | python $base_file/benchmarks/nas_predictors/runner.py --config-file $config_file 43 | done 44 | if [ "save_to_s3" ] 45 | then 46 | # zip and save to s3 47 | echo zipping and saving to s3 48 | zip -r $out_dir.zip $out_dir 49 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 50 | fi 51 | done 52 | -------------------------------------------------------------------------------- /scripts/predictors/test.sh: -------------------------------------------------------------------------------- 1 | predictors=(rf) 2 | experiment_types=(single) 3 | 4 | start_seed=$1 5 | if [ -z "$start_seed" ] 6 | then 7 | start_seed=0 8 | fi 9 | 10 | # folders: 11 | base_file=NASLib/naslib 12 | s3_folder=test 13 | out_dir=$s3_folder\_$start_seed 14 | 15 | # search space / data: 16 | search_space=nasbench201 17 | dataset=cifar10 18 | 19 | # other variables: 20 | trials=1 21 | end_seed=$(($start_seed + $trials - 1)) 22 | save_to_s3=true 23 | test_size=10 24 | 25 | # create config files 26 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 27 | do 28 | predictor=${predictors[$i]} 29 | experiment_type=${experiment_types[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor --experiment_type $experiment_type \ 31 | --test_size $test_size --start_seed $start_seed --trials $trials --out_dir $out_dir \ 32 | --dataset=$dataset --config_type predictor --search_space $search_space 33 | done 34 | 35 | # run experiments 36 | for t in $(seq $start_seed $end_seed) 37 | do 38 | for predictor in ${predictors[@]} 39 | do 40 | config_file=$out_dir/$dataset/configs/predictors/config\_$predictor\_$t.yaml 41 | echo ================running $predictor trial: $t ===================== 42 | python $base_file/benchmarks/predictors/runner.py --config-file $config_file 43 | done 44 | if [ "$save_to_s3" ] 45 | then 46 | # zip and save to s3 47 | echo zipping and saving to s3 48 | zip -r $out_dir.zip $out_dir 49 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 50 | fi 51 | done 52 | -------------------------------------------------------------------------------- /naslib/predictors/utils/pruners/measures/grad_norm.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Samsung Electronics Co., Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================= 15 | 16 | import torch 17 | import torch.nn.functional as F 18 | 19 | import copy 20 | 21 | from . import measure 22 | from ..p_utils import get_layer_metric_array 23 | 24 | 25 | @measure("grad_norm", bn=True) 26 | def get_grad_norm_arr(net, inputs, targets, loss_fn, split_data=1, skip_grad=False): 27 | net.zero_grad() 28 | N = inputs.shape[0] 29 | for sp in range(split_data): 30 | st = sp * N // split_data 31 | en = (sp + 1) * N // split_data 32 | 33 | outputs = net.forward(inputs[st:en]) 34 | loss = loss_fn(outputs, targets[st:en]) 35 | loss.backward() 36 | 37 | grad_norm_arr = get_layer_metric_array( 38 | net, 39 | lambda l: l.weight.grad.norm() 40 | if l.weight.grad is not None 41 | else torch.zeros_like(l.weight), 42 | mode="param", 43 | ) 44 | 45 | return grad_norm_arr 46 | -------------------------------------------------------------------------------- /scripts/nas_predictors/run_nb101_bo.sh: -------------------------------------------------------------------------------- 1 | optimizer=bananas 2 | predictors=(bananas mlp lgb gcn xgb ngb rf dngo \ 3 | bohamiann bayes_lin_reg seminas nao gp sparse_gp var_sparse_gp) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | # folders: 12 | base_file=naslib 13 | s3_folder=bo101 14 | out_dir=$s3_folder\_$start_seed 15 | 16 | # search space / data: 17 | search_space=nasbench101 18 | dataset=cifar10 19 | search_epochs=500 20 | 21 | # trials / seeds: 22 | trials=100 23 | end_seed=$(($start_seed + $trials - 1)) 24 | save_to_s3=true 25 | 26 | # create config files 27 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 28 | do 29 | predictor=${predictors[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor \ 31 | --epochs $search_epochs --start_seed $start_seed --trials $trials \ 32 | --out_dir $out_dir --dataset=$dataset --config_type nas_predictor \ 33 | --search_space $search_space --optimizer $optimizer 34 | done 35 | 36 | # run experiments 37 | for t in $(seq $start_seed $end_seed) 38 | do 39 | for predictor in ${predictors[@]} 40 | do 41 | config_file=$out_dir/$dataset/configs/nas_predictors/config\_$optimizer\_$predictor\_$t.yaml 42 | echo ================running $predictor trial: $t ===================== 43 | python $base_file/benchmarks/nas_predictors/runner.py --config-file $config_file 44 | done 45 | if [ "save_to_s3" ] 46 | then 47 | # zip and save to s3 48 | echo zipping and saving to s3 49 | zip -r $out_dir.zip $out_dir 50 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 51 | fi 52 | done 53 | -------------------------------------------------------------------------------- /scripts/nas_predictors/run_nb101_npenas.sh: -------------------------------------------------------------------------------- 1 | optimizer=npenas 2 | predictors=(bananas mlp lgb gcn xgb ngb rf dngo \ 3 | bohamiann bayes_lin_reg seminas nao gp sparse_gp var_sparse_gp) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | # folders: 12 | base_file=naslib 13 | s3_folder=np101 14 | out_dir=$s3_folder\_$start_seed 15 | 16 | # search space / data: 17 | search_space=nasbench101 18 | dataset=cifar10 19 | search_epochs=500 20 | 21 | # trials / seeds: 22 | trials=100 23 | end_seed=$(($start_seed + $trials - 1)) 24 | save_to_s3=true 25 | 26 | # create config files 27 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 28 | do 29 | predictor=${predictors[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor \ 31 | --epochs $search_epochs --start_seed $start_seed --trials $trials \ 32 | --out_dir $out_dir --dataset=$dataset --config_type nas_predictor \ 33 | --search_space $search_space --optimizer $optimizer 34 | done 35 | 36 | # run experiments 37 | for t in $(seq $start_seed $end_seed) 38 | do 39 | for predictor in ${predictors[@]} 40 | do 41 | config_file=$out_dir/$dataset/configs/nas_predictors/config\_$optimizer\_$predictor\_$t.yaml 42 | echo ================running $predictor trial: $t ===================== 43 | python $base_file/benchmarks/nas_predictors/runner.py --config-file $config_file 44 | done 45 | if [ "save_to_s3" ] 46 | then 47 | # zip and save to s3 48 | echo zipping and saving to s3 49 | zip -r $out_dir.zip $out_dir 50 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 51 | fi 52 | done 53 | -------------------------------------------------------------------------------- /naslib/runners/statistics/runner.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from naslib.defaults.statistics_evaluator import StatisticsEvaluator 4 | 5 | from naslib.search_spaces import ( 6 | NasBench101SearchSpace, 7 | NasBench201SearchSpace, 8 | DartsSearchSpace, 9 | NasBenchNLPSearchSpace, 10 | TransBench101SearchSpaceMicro, 11 | TransBench101SearchSpaceMacro 12 | ) 13 | 14 | from naslib.utils import utils, setup_logger, get_dataset_api 15 | 16 | 17 | config = utils.get_config_from_args(config_type="statistics") 18 | utils.set_seed(config.seed) 19 | logger = setup_logger(config.save + "/log.log") 20 | logger.setLevel(logging.INFO) 21 | utils.log_args(config) 22 | 23 | supported_search_spaces = { 24 | "nasbench101": NasBench101SearchSpace(), 25 | "nasbench201": NasBench201SearchSpace(), 26 | "darts": DartsSearchSpace(), 27 | "nlp": NasBenchNLPSearchSpace(), 28 | "transbench101_micro": TransBench101SearchSpaceMicro(config.dataset), 29 | "transbench101_macro": TransBench101SearchSpaceMacro(), 30 | } 31 | 32 | """ 33 | If the API did not evaluate *all* architectures in the search space, 34 | set load_labeled=True 35 | """ 36 | load_labeled = True if config.search_space in ["darts", "nlp"] else False 37 | dataset_api = get_dataset_api(config.search_space, config.dataset) 38 | 39 | # initialize the search space 40 | search_space = supported_search_spaces[config.search_space] 41 | 42 | # initialize the StatisticsEvaluator class 43 | statistics_evaluator = StatisticsEvaluator(config=config) 44 | statistics_evaluator.adapt_search_space( 45 | search_space, load_labeled=load_labeled, dataset_api=dataset_api 46 | ) 47 | 48 | # evaluate the statistics 49 | statistics_evaluator.evaluate() 50 | -------------------------------------------------------------------------------- /naslib/predictors/utils/pruners/measures/plain.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Samsung Electronics Co., Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================= 15 | 16 | import torch 17 | import torch.nn.functional as F 18 | 19 | from . import measure 20 | from ..p_utils import get_layer_metric_array 21 | 22 | 23 | @measure("plain", bn=True, mode="param") 24 | def compute_plain_per_weight(net, inputs, targets, mode, loss_fn, split_data=1): 25 | 26 | net.zero_grad() 27 | N = inputs.shape[0] 28 | for sp in range(split_data): 29 | st = sp * N // split_data 30 | en = (sp + 1) * N // split_data 31 | 32 | outputs = net.forward(inputs[st:en]) 33 | loss = loss_fn(outputs, targets[st:en]) 34 | loss.backward() 35 | 36 | # select the gradients that we want to use for search/prune 37 | def plain(layer): 38 | if layer.weight.grad is not None: 39 | return layer.weight.grad * layer.weight 40 | else: 41 | return torch.zeros_like(layer.weight) 42 | 43 | grads_abs = get_layer_metric_array(net, plain, mode) 44 | return grads_abs 45 | -------------------------------------------------------------------------------- /scripts/nas_predictors/run_darts_npenas.sh: -------------------------------------------------------------------------------- 1 | optimizer=npenas 2 | predictors=(omni_seminas bananas mlp lgb gcn bonas xgb ngb rf dngo \ 3 | bohamiann bayes_lin_reg gp seminas sparse_gp var_sparse_gp nao) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | # folders: 12 | base_file=NASLib/naslib 13 | s3_folder=np301 14 | out_dir=$s3_folder\_$start_seed 15 | 16 | # search space / data: 17 | search_space=darts 18 | dataset=cifar10 19 | search_epochs=500 20 | 21 | # trials / seeds: 22 | trials=100 23 | end_seed=$(($start_seed + $trials - 1)) 24 | save_to_s3=true 25 | 26 | # create config files 27 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 28 | do 29 | predictor=${predictors[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor \ 31 | --epochs $search_epochs --start_seed $start_seed --trials $trials \ 32 | --out_dir $out_dir --dataset=$dataset --config_type nas_predictor \ 33 | --search_space $search_space --optimizer $optimizer 34 | done 35 | 36 | # run experiments 37 | for t in $(seq $start_seed $end_seed) 38 | do 39 | for predictor in ${predictors[@]} 40 | do 41 | config_file=$out_dir/$dataset/configs/nas_predictors/config\_$optimizer\_$predictor\_$t.yaml 42 | echo ================running $predictor trial: $t ===================== 43 | python $base_file/benchmarks/nas_predictors/runner.py --config-file $config_file 44 | done 45 | if [ "save_to_s3" ] 46 | then 47 | # zip and save to s3 48 | echo zipping and saving to s3 49 | zip -r $out_dir.zip $out_dir 50 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 51 | fi 52 | done -------------------------------------------------------------------------------- /scripts/nas_predictors/run_nb201_bo_2.sh: -------------------------------------------------------------------------------- 1 | optimizer=bananas 2 | predictors=(bananas feedforward gbdt gcn bonas xgb rf dngo \ 3 | bohamiann bayes_lin_reg gp seminas sparse_gp var_sparse_gp nao) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | # folders: 12 | base_file=NASLib/naslib 13 | s3_folder=bo201_feb22 14 | out_dir=$s3_folder\_$start_seed 15 | 16 | # search space / data: 17 | search_space=nasbench201 18 | dataset=cifar10 19 | search_epochs=500 20 | 21 | # trials / seeds: 22 | trials=100 23 | end_seed=$(($start_seed + $trials - 1)) 24 | save_to_s3=true 25 | 26 | # create config files 27 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 28 | do 29 | predictor=${predictors[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor \ 31 | --epochs $search_epochs --start_seed $start_seed --trials $trials \ 32 | --out_dir $out_dir --dataset=$dataset --config_type nas_predictor \ 33 | --search_space $search_space --optimizer $optimizer 34 | done 35 | 36 | # run experiments 37 | for t in $(seq $start_seed $end_seed) 38 | do 39 | for predictor in ${predictors[@]} 40 | do 41 | config_file=$out_dir/$dataset/configs/nas_predictors/config\_$optimizer\_$predictor\_$t.yaml 42 | echo ================running $predictor trial: $t ===================== 43 | python $base_file/benchmarks/nas_predictors/runner.py --config-file $config_file 44 | done 45 | if [ "save_to_s3" ] 46 | then 47 | # zip and save to s3 48 | echo zipping and saving to s3 49 | zip -r $out_dir.zip $out_dir 50 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 51 | fi 52 | done 53 | -------------------------------------------------------------------------------- /scripts/nas_predictors/run_nb201_npenas.sh: -------------------------------------------------------------------------------- 1 | optimizer=npenas 2 | predictors=(omni_seminas bananas mlp lgb gcn bonas xgb ngb rf dngo \ 3 | bohamiann bayes_lin_reg gp seminas sparse_gp var_sparse_gp nao) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | # folders: 12 | base_file=NASLib/naslib 13 | s3_folder=np201 14 | out_dir=$s3_folder\_$start_seed 15 | 16 | # search space / data: 17 | search_space=nasbench201 18 | dataset=cifar10 19 | search_epochs=500 20 | 21 | # trials / seeds: 22 | trials=100 23 | end_seed=$(($start_seed + $trials - 1)) 24 | save_to_s3=true 25 | 26 | # create config files 27 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 28 | do 29 | predictor=${predictors[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor \ 31 | --epochs $search_epochs --start_seed $start_seed --trials $trials \ 32 | --out_dir $out_dir --dataset=$dataset --config_type nas_predictor \ 33 | --search_space $search_space --optimizer $optimizer 34 | done 35 | 36 | # run experiments 37 | for t in $(seq $start_seed $end_seed) 38 | do 39 | for predictor in ${predictors[@]} 40 | do 41 | config_file=$out_dir/$dataset/configs/nas_predictors/config\_$optimizer\_$predictor\_$t.yaml 42 | echo ================running $predictor trial: $t ===================== 43 | python $base_file/benchmarks/nas_predictors/runner.py --config-file $config_file 44 | done 45 | if [ "save_to_s3" ] 46 | then 47 | # zip and save to s3 48 | echo zipping and saving to s3 49 | zip -r $out_dir.zip $out_dir 50 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 51 | fi 52 | done 53 | -------------------------------------------------------------------------------- /scripts/nas_predictors/run_darts_bo.sh: -------------------------------------------------------------------------------- 1 | optimizer=bananas 2 | predictors=(omni_seminas bananas mlp lgb gcn bonas xgb ngb rf dngo \ 3 | bohamiann bayes_lin_reg gp seminas sparse_gp var_sparse_gp nao) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | # folders: 12 | base_file=naslib 13 | s3_folder=bo301_feb9_0 14 | out_dir=$s3_folder\_$start_seed 15 | 16 | # search space / data: 17 | search_space=darts 18 | dataset=cifar10 19 | search_epochs=500 20 | 21 | # trials / seeds: 22 | trials=100 23 | end_seed=$(($start_seed + $trials - 1)) 24 | save_to_s3=true 25 | 26 | # create config files 27 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 28 | do 29 | predictor=${predictors[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor \ 31 | --epochs $search_epochs --start_seed $start_seed --trials $trials \ 32 | --out_dir $out_dir --dataset=$dataset --config_type nas_predictor \ 33 | --search_space $search_space --optimizer $optimizer 34 | done 35 | 36 | # run experiments 37 | #for t in $(seq $start_seed $end_seed) 38 | #do 39 | #for predictor in ${predictors[@]} 40 | #do 41 | #config_file=$out_dir/$dataset/configs/nas_predictors/config\_$optimizer\_$predictor\_$t.yaml 42 | #echo ================running $predictor trial: $t ===================== 43 | #python $base_file/benchmarks/nas_predictors/runner.py --config-file $config_file 44 | #done 45 | #if [ "save_to_s3" ] 46 | #then 47 | ## zip and save to s3 48 | #echo zipping and saving to s3 49 | #zip -r $out_dir.zip $out_dir 50 | #python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 51 | #fi 52 | #done 53 | -------------------------------------------------------------------------------- /scripts/predictors/run_nb201_2.sh: -------------------------------------------------------------------------------- 1 | predictors=(xgb_hpo xgb feedforward_hpo feedforward) 2 | experiment_types=(vary_train_size vary_train_size vary_train_size vary_train_size) 3 | 4 | start_seed=$1 5 | if [ -z "$start_seed" ] 6 | then 7 | start_seed=0 8 | fi 9 | 10 | # folders: 11 | base_file=NASLib/naslib 12 | s3_folder=p201_c10_feb21 13 | out_dir=$s3_folder\_$start_seed 14 | 15 | # search space / data: 16 | search_space=nasbench201 17 | dataset=cifar10 18 | 19 | # other variables: 20 | trials=100 21 | end_seed=$(($start_seed + $trials - 1)) 22 | save_to_s3=true 23 | test_size=200 24 | 25 | # create config files 26 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 27 | do 28 | predictor=${predictors[$i]} 29 | experiment_type=${experiment_types[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor --experiment_type $experiment_type \ 31 | --test_size $test_size --start_seed $start_seed --trials $trials --out_dir $out_dir \ 32 | --dataset=$dataset --config_type predictor --search_space $search_space 33 | done 34 | 35 | # run experiments 36 | for t in $(seq $start_seed $end_seed) 37 | do 38 | for predictor in ${predictors[@]} 39 | do 40 | config_file=$out_dir/$dataset/configs/predictors/config\_$predictor\_$t.yaml 41 | echo ================running $predictor trial: $t ===================== 42 | python $base_file/benchmarks/predictors/runner.py --config-file $config_file 43 | done 44 | if [ "$save_to_s3" ] 45 | then 46 | # zip and save to s3 47 | echo zipping and saving to s3 48 | zip -r $out_dir.zip $out_dir 49 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 50 | fi 51 | done 52 | -------------------------------------------------------------------------------- /scripts/nas_predictors/run_nb201_bo.sh: -------------------------------------------------------------------------------- 1 | optimizer=bananas 2 | predictors=(omni_seminas bananas mlp lgb gcn bonas xgb ngb rf dngo \ 3 | bohamiann bayes_lin_reg gp seminas sparse_gp var_sparse_gp nao) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | # folders: 12 | base_file=NASLib/naslib 13 | s3_folder=bo201_feb22 14 | out_dir=$s3_folder\_$start_seed 15 | 16 | # search space / data: 17 | search_space=nasbench201 18 | dataset=ImageNet16-120 19 | search_epochs=500 20 | 21 | # trials / seeds: 22 | trials=100 23 | end_seed=$(($start_seed + $trials - 1)) 24 | save_to_s3=true 25 | 26 | # create config files 27 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 28 | do 29 | predictor=${predictors[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor \ 31 | --epochs $search_epochs --start_seed $start_seed --trials $trials \ 32 | --out_dir $out_dir --dataset=$dataset --config_type nas_predictor \ 33 | --search_space $search_space --optimizer $optimizer 34 | done 35 | 36 | # run experiments 37 | #for t in $(seq $start_seed $end_seed) 38 | #do 39 | #for predictor in ${predictors[@]} 40 | #do 41 | #config_file=$out_dir/$dataset/configs/nas_predictors/config\_$optimizer\_$predictor\_$t.yaml 42 | #echo ================running $predictor trial: $t ===================== 43 | #python $base_file/benchmarks/nas_predictors/runner.py --config-file $config_file 44 | #done 45 | #if [ "save_to_s3" ] 46 | #then 47 | ## zip and save to s3 48 | #echo zipping and saving to s3 49 | #zip -r $out_dir.zip $out_dir 50 | #python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 51 | #fi 52 | #done 53 | -------------------------------------------------------------------------------- /scripts/nas_predictors/run_im_bo_arber.sh: -------------------------------------------------------------------------------- 1 | optimizer=bananas 2 | predictors=(ngb_hp omni nao seminas bananas feedforward gbdt gcn bonas xgb ngb rf dngo \ 3 | bohamiann bayes_lin_reg gp sparse_gp var_sparse_gp) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | # folders: 12 | base_file=naslib 13 | s3_folder=bo201_im_feb4_2 14 | out_dir=$s3_folder\_$start_seed 15 | 16 | # search space / data: 17 | search_space=nasbench201 18 | dataset=ImageNet16-120 19 | search_epochs=500 20 | 21 | # trials / seeds: 22 | trials=100 23 | end_seed=$(($start_seed + $trials - 1)) 24 | save_to_s3=true 25 | 26 | # create config files 27 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 28 | do 29 | predictor=${predictors[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor \ 31 | --epochs $search_epochs --start_seed $start_seed --trials $trials \ 32 | --out_dir $out_dir --dataset=$dataset --config_type nas_predictor \ 33 | --search_space $search_space --optimizer $optimizer 34 | done 35 | 36 | # run experiments 37 | #for t in $(seq $start_seed $end_seed) 38 | #do 39 | #for predictor in ${predictors[@]} 40 | #do 41 | #config_file=$out_dir/$dataset/configs/nas_predictors/config\_$optimizer\_$predictor\_$t.yaml 42 | #echo ================running $predictor trial: $t ===================== 43 | #python $base_file/benchmarks/nas_predictors/runner.py --config-file $config_file 44 | #done 45 | #if [ "save_to_s3" ] 46 | #then 47 | ## zip and save to s3 48 | #echo zipping and saving to s3 49 | #zip -r $out_dir.zip $out_dir 50 | #python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 51 | #fi 52 | #done 53 | -------------------------------------------------------------------------------- /naslib/runners/nas_predictors/nas_predictor_config.yaml: -------------------------------------------------------------------------------- 1 | seed: 0 2 | optimizer: oneshot 3 | search_space: darts 4 | dataset: cifar10 5 | out_dir: run 6 | 7 | experiment_type: single 8 | predictor: oneshot 9 | test_size: 200 10 | train_size_list: [8, 12] 11 | train_size_single: 2 12 | fidelity_list: [5] 13 | fidelity_single: 5 14 | 15 | search: 16 | checkpoint_freq: 1000 17 | epochs: 50 18 | fidelity: -1 19 | 20 | # GDAS 21 | tau_max: 10 22 | tau_min: 0.1 23 | 24 | # RE 25 | sample_size: 10 26 | population_size: 30 27 | 28 | # LS 29 | num_init: 10 30 | 31 | # BANANAS 32 | k: 10 33 | num_ensemble: 3 34 | acq_fn_type: its 35 | acq_fn_optimization: mutation 36 | encoding_type: path 37 | num_arches_to_mutate: 2 38 | max_mutations: 1 39 | num_candidates: 100 40 | 41 | # BP 42 | predictor_type: oneshot 43 | debug_predictor: False 44 | 45 | 46 | # additional params 47 | batch_size: 64 48 | learning_rate: 0.025 49 | learning_rate_min: 0.001 50 | momentum: 0.9 51 | weight_decay: 0.0003 52 | warm_start_epochs: 0 53 | grad_clip: 5 54 | train_portion: 0.9 55 | data_size: 25000 56 | 57 | cutout: False 58 | cutout_length: 16 59 | cutout_prob: 1.0 60 | drop_path_prob: 0.0 61 | 62 | unrolled: False 63 | arch_learning_rate: 0.0003 64 | arch_weight_decay: 0.001 65 | output_weights: True 66 | 67 | 68 | evaluation: 69 | checkpoint_freq: 5000 70 | batch_size: 96 71 | learning_rate: 0.025 72 | learning_rate_min: 0.00 73 | momentum: 0.9 74 | weight_decay: 0.0003 75 | epochs: 600 76 | warm_start_epochs: 0 77 | grad_clip: 5 78 | train_portion: 1. 79 | data_size: 50000 80 | 81 | cutout: True 82 | cutout_length: 16 83 | cutout_prob: 1.0 84 | drop_path_prob: 0.2 85 | auxiliary_weight: 0.4 86 | 87 | -------------------------------------------------------------------------------- /naslib/runners/predictors/predictor_config.yaml: -------------------------------------------------------------------------------- 1 | # The experiment type can be single, vary_train_size, vary_fidelity, or vary_both 2 | # single will use train_size_single and fidleity_single 3 | # vary_train_size will use train_size_list and fidelity_single 4 | # vary_fidelity will use train_size_single and fidelity_list 5 | # vary_both will use train_size_list and fidelity_list 6 | experiment_type: single 7 | 8 | # nasbench101, nasbench201, darts, nlp, transbench101_micro 9 | search_space: nasbench201 10 | 11 | # nasbench201 datasets: cifar10, cifar100, ImageNet16-120 12 | # transbench101 datasets: class_scene, class_object, 13 | # jigsaw, room_layout, segmentsemantic, normal, autoencoder 14 | dataset: cifar10 15 | 16 | # one of the 31 predictors in benchmarks/predictors/runner.py 17 | predictor: synflow 18 | 19 | # 0: mutation-based, or 1: uniformly random, train/test sets 20 | uniform_random: 1 21 | 22 | # test set size 23 | test_size: 2 24 | 25 | # size of the training set (used by model-based predictors) 26 | train_size_single: 3 27 | train_size_list: [5, 8, 14, 24, 42, 71, 121, 205] 28 | 29 | # num. epochs to train the test set arches (used by learning curve methods) 30 | fidelity_single: 5 31 | fidelity_list: [1, 2, 3, 5, 7, 9, 13, 19, 26, 37, 52, 73] 32 | 33 | # output results to this directory 34 | out_dir: run 35 | 36 | # maximum number of seconds to run cross-validation (for model-based predictors) 37 | max_hpo_time: 0 38 | 39 | # load the hyperparams from the specified file. 40 | # otherwise, set to None or False 41 | hparams_from_file: predictor_hpo_configs/hpo_config_1.json 42 | 43 | # random seed 44 | seed: 1000 45 | 46 | # these are used by the zero-cost methods 47 | search: 48 | batch_size: 256 49 | data_size: 25000 50 | cutout: False 51 | cutout_length: 16 52 | cutout_prob: 1.0 53 | train_portion: 0.7 54 | -------------------------------------------------------------------------------- /naslib/runners/bbo/discrete_config.yaml: -------------------------------------------------------------------------------- 1 | # random seed 2 | seed: 0 3 | 4 | # re, bananas, npenas, ls, rs 5 | optimizer: re 6 | 7 | # nasbench101, nasbench201, darts, nlp, transbench101, asr 8 | search_space: nasbench201 9 | 10 | # cifar10, cifar100, or ImageNet16-120 (only important for nasbench201) 11 | dataset: cifar10 12 | 13 | # output results to this directory 14 | out_dir: run_boschcpu 15 | 16 | # config id for the experiment 17 | config_id: 0 18 | 19 | # parameters for the optimizers 20 | search: 21 | # for bohb 22 | budgets: 50000000 23 | checkpoint_freq: 1000 24 | fidelity: 108 25 | 26 | # for all optimizers 27 | epochs: 10 28 | 29 | # for bananas and npenas, choose one predictor 30 | # out of the 16 model-based predictors 31 | predictor_type: var_sparse_gp 32 | 33 | # number of initial architectures 34 | num_init: 10 35 | 36 | # BANANAS 37 | k: 10 38 | num_ensemble: 3 39 | acq_fn_type: its 40 | acq_fn_optimization: mutation 41 | encoding_type: adjacency_one_hot 42 | num_arches_to_mutate: 1 43 | max_mutations: 1 44 | num_candidates: 50 45 | 46 | # jacov data loader 47 | batch_size: 256 48 | data_size: 25000 49 | cutout: False 50 | cutout_length: 16 51 | cutout_prob: 1.0 52 | train_portion: 0.7 53 | 54 | # other params 55 | debug_predictor: False 56 | sample_size: 10 57 | population_size: 30 58 | 59 | # copied directly from darts_defaults 60 | evaluation: 61 | checkpoint_freq: 30 62 | batch_size: 96 63 | learning_rate: 0.025 64 | learning_rate_min: 0.00 65 | momentum: 0.9 66 | weight_decay: 0.0003 67 | epochs: 600 68 | warm_start_epochs: 0 69 | grad_clip: 5 70 | train_portion: 1. 71 | data_size: 50000 72 | 73 | cutout: True 74 | cutout_length: 16 75 | cutout_prob: 1.0 76 | drop_path_prob: 0.2 77 | auxiliary_weight: 0.4 78 | -------------------------------------------------------------------------------- /scripts/predictors/run_tnb_predictors.sh: -------------------------------------------------------------------------------- 1 | #predictors=(jacov2 snip synflow fisher grad_norm grasp) 2 | #experiment_types=(single single single single single single) 3 | 4 | predictors=(jacov2 snip synflow fisher grad_norm) 5 | experiment_types=(single single single single single) 6 | 7 | start_seed=$1 8 | if [ -z "$start_seed" ] 9 | then 10 | start_seed=0 11 | fi 12 | 13 | # folders: 14 | base_file=NASLib/naslib 15 | s3_folder=class_scene_zc_dec10_2021 16 | out_dir=$s3_folder\_$start_seed 17 | 18 | # search space / data: 19 | search_space=transbench101_micro 20 | dataset=class_scene 21 | 22 | # other variables: 23 | trials=100 24 | end_seed=$(($start_seed + $trials - 1)) 25 | save_to_s3=true 26 | test_size=100 27 | 28 | # create config files 29 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 30 | do 31 | predictor=${predictors[$i]} 32 | experiment_type=${experiment_types[$i]} 33 | python $base_file/benchmarks/create_configs.py --predictor $predictor --experiment_type $experiment_type \ 34 | --test_size $test_size --start_seed $start_seed --trials $trials --out_dir $out_dir \ 35 | --dataset=$dataset --config_type predictor --search_space $search_space 36 | done 37 | 38 | # run experiments 39 | for t in $(seq $start_seed $end_seed) 40 | do 41 | for predictor in ${predictors[@]} 42 | do 43 | config_file=$out_dir/$dataset/configs/predictors/config\_$predictor\_$t.yaml 44 | echo ================running $predictor trial: $t ===================== 45 | python $base_file/benchmarks/predictors/runner.py --config-file $config_file 46 | done 47 | if [ "$save_to_s3" ] 48 | then 49 | # zip and save to s3 50 | echo zipping and saving to s3 51 | zip -r $out_dir.zip $out_dir 52 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 53 | fi 54 | done 55 | -------------------------------------------------------------------------------- /docs/_sources/index.rst.txt: -------------------------------------------------------------------------------- 1 | .. Sphinx documentation NASLib documentation master file, created by 2 | sphinx-quickstart on Mon Jul 5 20:39:23 2021. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Documentation NASLib! 7 | ======================================================= 8 | NASLib is a Neural Architecture Search (NAS) library. Its purpose is to facilitate NAS research for the community by providing interfaces to several state-of-the-art NAS search spaces. 9 | 10 | .. warning:: 11 | This library is under construction and there is no official release yet. Feel free to play around and have a look but be aware that the APIs will be changed until we have a first release. 12 | 13 | NASLib has been used to run an extensive comparison of 31 performance predictors. The results were published in the paper How Powerful are Performance Predictors in Neural Architecture Search? 14 | For more details take a look at its seperate README. 15 | 16 | .. image:: naslib-overview.png 17 | .. 18 | :width: 200px 19 | :height: 100px 20 | :scale: 50 % 21 | :alt: alternate text 22 | :align: right 23 | 24 | Usage 25 | ----- 26 | 27 | :: 28 | 29 | search_space = SimpleCellSearchSpace() 30 | 31 | optimizer = DARTSOptimizer(config) 32 | optimizer.adapt_search_space(search_space) 33 | 34 | trainer = Trainer(optimizer, config) 35 | trainer.search() # Search for an architecture 36 | trainer.evaluate() # Evaluate the best architecture 37 | 38 | .. toctree:: 39 | :maxdepth: 2 40 | :caption: Online Documentation Contents: 41 | 42 | manual 43 | example 44 | license 45 | citing 46 | contributing 47 | 48 | .. 49 | Indices and tables 50 | ================== 51 | 52 | * :ref:`genindex` 53 | * :ref:`modindex` 54 | * :ref:`search` 55 | 56 | -------------------------------------------------------------------------------- /naslib/predictors/utils/encodings_asr.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import logging 3 | 4 | from naslib.search_spaces.nasbenchasr.conversions import flatten 5 | 6 | """ 7 | These are the encoding methods for nas-bench-asr. 8 | The plan is to unify encodings across all search spaces. 9 | Note: this has not been thoroughly tested yet. 10 | """ 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | one_hot_ops = [ 16 | [1, 0, 0, 0, 0, 0], 17 | [0, 1, 0, 0, 0, 0], 18 | [0, 0, 1, 0, 0, 0], 19 | [0, 0, 0, 1, 0, 0], 20 | [0, 0, 0, 0, 1, 0], 21 | [0, 0, 0, 0, 0, 1], 22 | ] 23 | 24 | 25 | def encode_compact(compact): 26 | return flatten(compact) 27 | 28 | 29 | def encode_adjacency_one_hot(compact): 30 | one_hot = [] 31 | for e in flatten(compact): 32 | one_hot = [*one_hot, *one_hot_ops[e]] 33 | return one_hot 34 | 35 | 36 | def encode_seminas_nasbenchasr(compact): 37 | # note: the adjacency matrix is fixed for ASR, 38 | # so the identity matrix can be passed in 39 | dic = { 40 | "num_vertices": 9, 41 | "adjacency": np.identity(9, dtype=np.float32), 42 | "operations": flatten(compact), 43 | "mask": np.array([i < 9 for i in range(9)], dtype=np.float32), 44 | "val_acc": 0.0, 45 | } 46 | return dic 47 | 48 | 49 | def encode_asr(arch, encoding_type='adjacency_one_hot', max_nodes=3, accs=None): 50 | 51 | compact = arch.get_compact() 52 | 53 | if encoding_type == 'adjacency_one_hot': 54 | return encode_adjacency_one_hot(compact) 55 | 56 | elif encoding_type == 'compact': 57 | return encode_compact(compact) 58 | 59 | elif encoding_type == 'seminas': 60 | return encode_seminas_nasbenchasr(compact) 61 | 62 | else: 63 | print('{} is not yet implemented as an encoding type \ 64 | for asr'.format(encoding_type)) 65 | raise NotImplementedError() 66 | -------------------------------------------------------------------------------- /naslib/defaults/darts_defaults.yaml: -------------------------------------------------------------------------------- 1 | dataset: cifar10 2 | seed: 99 3 | search_space: nasbench201 4 | out_dir: run 5 | optimizer: darts 6 | 7 | search: 8 | checkpoint_freq: 5 9 | batch_size: 64 10 | learning_rate: 0.025 11 | learning_rate_min: 0.001 12 | momentum: 0.9 13 | weight_decay: 0.0003 14 | epochs: 50 15 | warm_start_epochs: 0 16 | grad_clip: 5 17 | train_portion: 0.5 18 | data_size: 25000 19 | 20 | cutout: False 21 | cutout_length: 16 22 | cutout_prob: 1.0 23 | drop_path_prob: 0.0 24 | 25 | unrolled: False 26 | arch_learning_rate: 0.0003 27 | arch_weight_decay: 0.001 28 | output_weights: True 29 | 30 | fidelity: 200 31 | 32 | # GDAS 33 | tau_max: 10 34 | tau_min: 0.1 35 | 36 | # RE 37 | sample_size: 10 38 | population_size: 100 39 | 40 | #LS 41 | num_init: 10 42 | 43 | #GSparsity 44 | seed: 50 45 | grad_clip: 0 46 | threshold: 0.000001 47 | weight_decay: 120 48 | learning_rate: 0.01 49 | momentum: 0.8 50 | normalization: div 51 | normalization_exponent: 0.5 52 | batch_size: 256 53 | learning_rate_min: 0.0001 54 | epochs: 100 55 | warm_start_epochs: 0 56 | train_portion: 0.9 57 | data_size: 25000 58 | 59 | 60 | # BANANAS 61 | k: 10 62 | num_ensemble: 3 63 | acq_fn_type: its 64 | acq_fn_optimization: mutation 65 | encoding_type: path 66 | num_arches_to_mutate: 2 67 | max_mutations: 1 68 | num_candidates: 100 69 | 70 | # BasePredictor 71 | predictor_type: var_sparse_gp 72 | debug_predictor: False 73 | 74 | evaluation: 75 | checkpoint_freq: 30 76 | batch_size: 96 77 | learning_rate: 0.025 78 | learning_rate_min: 0.00 79 | momentum: 0.9 80 | weight_decay: 0.0003 81 | epochs: 600 82 | warm_start_epochs: 0 83 | grad_clip: 5 84 | train_portion: 1. 85 | data_size: 50000 86 | 87 | cutout: True 88 | cutout_length: 16 89 | cutout_prob: 1.0 90 | drop_path_prob: 0.2 91 | auxiliary_weight: 0.4 -------------------------------------------------------------------------------- /naslib/predictors/utils/pruners/measures/jacov.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Samsung Electronics Co., Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================= 15 | 16 | import torch 17 | import numpy as np 18 | 19 | from . import measure 20 | 21 | 22 | def get_batch_jacobian(net, x, target, device, split_data): 23 | x.requires_grad_(True) 24 | 25 | N = x.shape[0] 26 | for sp in range(split_data): 27 | st = sp * N // split_data 28 | en = (sp + 1) * N // split_data 29 | y = net(x[st:en]) 30 | y.backward(torch.ones_like(y)) 31 | 32 | jacob = x.grad.detach() 33 | x.requires_grad_(False) 34 | return jacob, target.detach() 35 | 36 | 37 | def eval_score(jacob, labels=None): 38 | corrs = np.corrcoef(jacob) 39 | v, _ = np.linalg.eig(corrs) 40 | k = 1e-5 41 | return -np.sum(np.log(v + k) + 1.0 / (v + k)) 42 | 43 | 44 | @measure("jacov", bn=True) 45 | def compute_jacob_cov(net, inputs, targets, split_data=1, loss_fn=None): 46 | device = inputs.device 47 | # Compute gradients (but don't apply them) 48 | net.zero_grad() 49 | 50 | jacobs, labels = get_batch_jacobian( 51 | net, inputs, targets, device, split_data=split_data 52 | ) 53 | jacobs = jacobs.reshape(jacobs.size(0), -1).cpu().numpy() 54 | 55 | try: 56 | jc = eval_score(jacobs, labels) 57 | except Exception as e: 58 | print(e) 59 | jc = np.nan 60 | 61 | return jc 62 | -------------------------------------------------------------------------------- /scripts/predictors/run_nb101.sh: -------------------------------------------------------------------------------- 1 | predictors=(fisher grad_norm grasp jacov snip synflow \ 2 | bananas bonas gcn mlp nao seminas \ 3 | lgb ngb rf xgb \ 4 | bayes_lin_reg bohamiann dngo \ 5 | gp sparse_gp var_sparse_gp) 6 | 7 | experiment_types=(single single single single single single \ 8 | vary_train_size vary_train_size vary_train_size vary_train_size vary_train_size vary_train_size \ 9 | vary_train_size vary_train_size vary_train_size vary_train_size \ 10 | vary_train_size vary_train_size vary_train_size \ 11 | vary_train_size vary_train_size vary_train_size) 12 | 13 | start_seed=$1 14 | if [ -z "$start_seed" ] 15 | then 16 | start_seed=0 17 | fi 18 | 19 | # folders: 20 | base_file=NASLib/naslib 21 | s3_folder=p101 22 | out_dir=$s3_folder\_$start_seed 23 | 24 | # search space / data: 25 | search_space=nasbench101 26 | dataset=cifar10 27 | 28 | # other variables: 29 | trials=100 30 | end_seed=$(($start_seed + $trials - 1)) 31 | save_to_s3=true 32 | test_size=200 33 | 34 | # create config files 35 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 36 | do 37 | predictor=${predictors[$i]} 38 | experiment_type=${experiment_types[$i]} 39 | python $base_file/benchmarks/create_configs.py --predictor $predictor --experiment_type $experiment_type \ 40 | --test_size $test_size --start_seed $start_seed --trials $trials --out_dir $out_dir \ 41 | --dataset=$dataset --config_type predictor --search_space $search_space 42 | done 43 | 44 | # run experiments 45 | for t in $(seq $start_seed $end_seed) 46 | do 47 | for predictor in ${predictors[@]} 48 | do 49 | config_file=$out_dir/$dataset/configs/predictors/config\_$predictor\_$t.yaml 50 | echo ================running $predictor trial: $t ===================== 51 | python $base_file/benchmarks/predictors/runner.py --config-file $config_file 52 | done 53 | if [ "$save_to_s3" ] 54 | then 55 | # zip and save to s3 56 | echo zipping and saving to s3 57 | zip -r $out_dir.zip $out_dir 58 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 59 | fi 60 | done 61 | -------------------------------------------------------------------------------- /scripts/predictors/run_nlp.sh: -------------------------------------------------------------------------------- 1 | predictors=(lce lce_m sotl sotle valacc valloss \ 2 | lcsvr \ 3 | gcn mlp nao seminas \ 4 | lgb ngb rf xgb \ 5 | bayes_lin_reg bohamiann dngo \ 6 | gp sparse_gp var_sparse_gp) 7 | 8 | experiment_types=(vary_fidelity vary_fidelity vary_fidelity vary_fidelity vary_fidelity vary_fidelity \ 9 | vary_both \ 10 | vary_train_size vary_train_size vary_train_size vary_train_size \ 11 | vary_train_size vary_train_size vary_train_size vary_train_size \ 12 | vary_train_size vary_train_size vary_train_size \ 13 | vary_train_size vary_train_size vary_train_size) 14 | 15 | start_seed=$1 16 | if [ -z "$start_seed" ] 17 | then 18 | start_seed=0 19 | fi 20 | 21 | # folders: 22 | base_file=NASLib/naslib 23 | s3_folder=pnlp 24 | out_dir=$s3_folder\_$start_seed 25 | 26 | # search space / data: 27 | search_space=nlp 28 | dataset=ptb 29 | 30 | # other variables: 31 | trials=100 32 | end_seed=$(($start_seed + $trials - 1)) 33 | save_to_s3=true 34 | test_size=200 35 | 36 | # create config files 37 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 38 | do 39 | predictor=${predictors[$i]} 40 | experiment_type=${experiment_types[$i]} 41 | python $base_file/benchmarks/create_configs.py --predictor $predictor --experiment_type $experiment_type \ 42 | --test_size $test_size --start_seed $start_seed --trials $trials --out_dir $out_dir \ 43 | --dataset=$dataset --config_type predictor --search_space $search_space 44 | done 45 | 46 | # run experiments 47 | for t in $(seq $start_seed $end_seed) 48 | do 49 | for predictor in ${predictors[@]} 50 | do 51 | config_file=$out_dir/$dataset/configs/predictors/config\_$predictor\_$t.yaml 52 | echo ================running $predictor trial: $t ===================== 53 | python $base_file/benchmarks/predictors/runner.py --config-file $config_file 54 | done 55 | if [ "$save_to_s3" ] 56 | then 57 | # zip and save to s3 58 | echo zipping and saving to s3 59 | zip -r $out_dir.zip $out_dir 60 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 61 | fi 62 | done 63 | -------------------------------------------------------------------------------- /examples/demo.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | 4 | from naslib.defaults.trainer import Trainer 5 | from naslib.optimizers import ( 6 | DARTSOptimizer, 7 | GDASOptimizer, 8 | DrNASOptimizer, 9 | RandomSearch, 10 | RegularizedEvolution, 11 | LocalSearch, 12 | Bananas, 13 | BasePredictor, 14 | ) 15 | 16 | from naslib.search_spaces import ( 17 | DartsSearchSpace, 18 | SimpleCellSearchSpace, 19 | NasBench201SearchSpace, 20 | HierarchicalSearchSpace, 21 | ) 22 | 23 | # from naslib.search_spaces.nasbench101 import graph 24 | 25 | from naslib.utils import utils, setup_logger 26 | 27 | # Read args and config, setup logger 28 | config = utils.get_config_from_args() 29 | utils.set_seed(config.seed) 30 | 31 | logger = setup_logger(config.save + "/log.log") 32 | # logger.setLevel(logging.INFO) # default DEBUG is very verbose 33 | 34 | utils.log_args(config) 35 | 36 | supported_optimizers = { 37 | "darts": DARTSOptimizer(config), 38 | "gdas": GDASOptimizer(config), 39 | "drnas": DrNASOptimizer(config), 40 | "rs": RandomSearch(config), 41 | "re": RegularizedEvolution(config), 42 | "ls": LocalSearch(config), 43 | "bananas": Bananas(config), 44 | "bp": BasePredictor(config), 45 | } 46 | 47 | # Changing the search space is one line of code 48 | search_space = SimpleCellSearchSpace() 49 | # search_space = graph.NasBench101SearchSpace() 50 | # search_space = HierarchicalSearchSpace() 51 | # search_space = DartsSearchSpace() 52 | # search_space = NasBench201SearchSpace() 53 | 54 | # Changing the optimizer is one line of code 55 | # optimizer = supported_optimizers[config.optimizer] 56 | optimizer = supported_optimizers["drnas"] 57 | optimizer.adapt_search_space(search_space) 58 | 59 | # Start the search and evaluation 60 | trainer = Trainer(optimizer, config) 61 | 62 | if not config.eval_only: 63 | checkpoint = utils.get_last_checkpoint(config) if config.resume else "" 64 | trainer.search(resume_from=checkpoint) 65 | 66 | checkpoint = utils.get_last_checkpoint(config, search=False) if config.resume else "" 67 | trainer.evaluate(resume_from=checkpoint) 68 | -------------------------------------------------------------------------------- /scripts/nas/run_nb311.sh: -------------------------------------------------------------------------------- 1 | export PYTHONPATH=$HOME/nasbench311/nasbench301:$HOME/nasbench311/NASLib:$PYTHONPATH 2 | export OMP_NUM_THREADS=2 3 | #optimizers=(rs) 4 | optimizers=(re) 5 | #optimizers=(rea_lce) 6 | #optimizers=(rea_svr) 7 | #optimizers=(ls) 8 | #optimizers=(ls_lce) 9 | #optimizers=(ls_svr) 10 | #optimizers=(bananas) 11 | #optimizers=(bananas_svr) 12 | #optimizers=(bananas_lce) 13 | #optimizers=(hb_simple) 14 | #optimizers=(bohb_simple) 15 | #optimizers=(dehb_simple) 16 | predictor=bananas #optional: (gcn xgb) 17 | 18 | start_seed=$1 19 | if [ -z "$start_seed" ] 20 | then 21 | start_seed=0 22 | fi 23 | 24 | if [[ $optimizers == bananas* ]] 25 | then 26 | acq_fn_optimization=mutation 27 | else 28 | acq_fn_optimization=random_sampling 29 | fi 30 | 31 | # folders: 32 | base_file=naslib 33 | s3_folder=nas301 34 | out_dir=$s3_folder\_$start_seed 35 | 36 | # search space / data: 37 | search_space=darts 38 | dataset=cifar10 39 | budgets=5000000 40 | fidelity=97 41 | single_fidelity=20 42 | 43 | # trials / seeds: 44 | trials=30 45 | end_seed=$(($start_seed + $trials - 1)) 46 | 47 | # create config files 48 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 49 | do 50 | optimizer=${optimizers[$i]} 51 | python $base_file/benchmarks/create_configs.py \ 52 | --budgets $budgets --start_seed $start_seed --trials $trials \ 53 | --out_dir $out_dir --dataset=$dataset --config_type nas \ 54 | --search_space $search_space --optimizer $optimizer \ 55 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 56 | --fidelity $fidelity --single_fidelity $single_fidelity 57 | done 58 | 59 | # run experiments 60 | for t in $(seq $start_seed $end_seed) 61 | do 62 | for optimizer in ${optimizers[@]} 63 | do 64 | if [[ $optimizer == bananas* ]] 65 | then 66 | config_file=$out_dir/$dataset/configs/nas/config\_$optimizer\_$predictor\_$t.yaml 67 | else 68 | config_file=$out_dir/$dataset/configs/nas/config\_$optimizer\_$t.yaml 69 | fi 70 | echo ================running $optimizer trial: $t ===================== 71 | python $base_file/benchmarks/nas/runner.py --config-file $config_file 72 | done 73 | done 74 | -------------------------------------------------------------------------------- /scripts/nas/run_nbnlp.sh: -------------------------------------------------------------------------------- 1 | export PYTHONPATH=$HOME/nasbench311/nasbench301:$HOME/nasbench311/NASLib:$PYTHONPATH 2 | export OMP_NUM_THREADS=2 3 | #optimizers=(rs) 4 | optimizers=(re) 5 | #optimizers=(rea_lce) 6 | #optimizers=(rea_svr) 7 | #optimizers=(ls) 8 | #optimizers=(ls_lce) 9 | #optimizers=(ls_svr) 10 | #optimizers=(bananas) 11 | #optimizers=(bananas_svr) 12 | #optimizers=(bananas_lce) 13 | #optimizers=(hb_simple) 14 | predictor=bananas #optional: (gcn xgb) 15 | 16 | start_seed=$1 17 | if [ -z "$start_seed" ] 18 | then 19 | start_seed=0 20 | fi 21 | 22 | if [[ $optimizers == bananas* ]] 23 | then 24 | acq_fn_optimization=mutation 25 | else 26 | acq_fn_optimization=random_sampling 27 | fi 28 | 29 | # folders: 30 | base_file=naslib 31 | s3_folder=nasnlp 32 | out_dir=$s3_folder\_$start_seed 33 | 34 | # search space / data: 35 | search_space=nlp 36 | dataset=ptb 37 | budgets=1000000 38 | fidelity=49 39 | single_fidelity=10 40 | population_size=20 41 | num_init=20 42 | 43 | # trials / seeds: 44 | trials=30 45 | end_seed=$(($start_seed + $trials - 1)) 46 | 47 | # create config files 48 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 49 | do 50 | optimizer=${optimizers[$i]} 51 | python $base_file/benchmarks/create_configs.py \ 52 | --budgets $budgets --start_seed $start_seed --trials $trials \ 53 | --out_dir $out_dir --dataset=$dataset --config_type nas \ 54 | --search_space $search_space --optimizer $optimizer \ 55 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 56 | --fidelity $fidelity --single_fidelity $single_fidelity \ 57 | --population_size $population_size --num_init $num_init 58 | done 59 | 60 | # run experiments 61 | for t in $(seq $start_seed $end_seed) 62 | do 63 | for optimizer in ${optimizers[@]} 64 | do 65 | if [[ $optimizer == bananas* ]] 66 | then 67 | config_file=$out_dir/$dataset/configs/nas/config\_$optimizer\_$predictor\_$t.yaml 68 | else 69 | config_file=$out_dir/$dataset/configs/nas/config\_$optimizer\_$t.yaml 70 | fi 71 | echo ================running $optimizer trial: $t ===================== 72 | python $base_file/benchmarks/nas/runner.py --config-file $config_file 73 | done 74 | done -------------------------------------------------------------------------------- /naslib/data/taskonomydata_mini/download_tnb.sh: -------------------------------------------------------------------------------- 1 | # this downloads and renames all the taskonomydata_mini data for TransNAS-Bench-101 2 | 3 | datasets=(wainscott tolstoy klickitat pinesdale stockman beechwood coffeen corozal \ 4 | benevolence eagan forkland hanson hiteman ihlen lakeville lindenwood \ 5 | marstons merom newfields pomaria shelbyville uvalda) 6 | 7 | # download all rgb files 8 | for dataset in ${datasets[@]} 9 | do 10 | file=$dataset\_rgb.tar 11 | filepath=http://downloads.cs.stanford.edu/downloads/taskonomy_data/rgb/$file 12 | echo $filepath 13 | cd $dataset 14 | if [ -d "rgb" ] 15 | then 16 | echo rgb exists 17 | else 18 | echo rgb does not exist 19 | wget $filepath 20 | tar -xvf $file 21 | rm $file 22 | fi 23 | cd .. 24 | done 25 | 26 | # download all class_object files 27 | for dataset in ${datasets[@]} 28 | do 29 | file=$dataset\_class_object.tar 30 | filepath=http://downloads.cs.stanford.edu/downloads/taskonomy_data/class_object/$file 31 | echo $filepath 32 | cd $dataset 33 | if [ -d "class_object" ] 34 | then 35 | echo class_object exists 36 | else 37 | echo class_object does not exist 38 | wget $filepath 39 | tar -xvf $file 40 | rm $file 41 | fi 42 | cd .. 43 | done 44 | 45 | # download all class_scene files 46 | for dataset in ${datasets[@]} 47 | do 48 | file=$dataset\_class_scene.tar 49 | filepath=http://downloads.cs.stanford.edu/downloads/taskonomy_data/class_scene/$file 50 | echo $filepath 51 | cd $dataset 52 | if [ -d "class_scene" ] 53 | then 54 | echo class_scene exists 55 | else 56 | echo class_scene does not exist 57 | wget $filepath 58 | tar -xvf $file 59 | rm $file 60 | fi 61 | cd .. 62 | done 63 | 64 | # rename all class_places.npy to class_scene.npy 65 | for dataset in ${datasets[@]} 66 | do 67 | echo starting $dataset 68 | for j in $dataset/class_scene/*class_places.npy 69 | do 70 | #echo "$j" 71 | #echo "${j%class_places.npy}class_scene.npy" 72 | mv -- "$j" "${j%class_places.npy}class_scene.npy" 73 | done 74 | done 75 | -------------------------------------------------------------------------------- /naslib/runners/bbo/runner.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from naslib.defaults.trainer import Trainer 4 | from naslib.optimizers import ( 5 | RandomSearch, 6 | Npenas, 7 | RegularizedEvolution, 8 | LocalSearch, 9 | Bananas 10 | ) 11 | 12 | from naslib.search_spaces.core.query_metrics import Metric 13 | from naslib.search_spaces import ( 14 | NasBench101SearchSpace, 15 | NasBench201SearchSpace, 16 | DartsSearchSpace, 17 | NasBenchNLPSearchSpace, 18 | TransBench101SearchSpaceMicro, 19 | TransBench101SearchSpaceMacro, 20 | NasBenchASRSearchSpace 21 | ) 22 | from naslib.utils import utils, setup_logger, get_dataset_api 23 | 24 | from torch.utils.tensorboard import SummaryWriter 25 | 26 | config = utils.get_config_from_args(config_type='bbo-bs') 27 | 28 | logger = setup_logger(config.save + "/log.log") 29 | logger.setLevel(logging.INFO) 30 | 31 | utils.log_args(config) 32 | 33 | writer = SummaryWriter(config.save) 34 | 35 | supported_optimizers = { 36 | 'rs': RandomSearch(config), 37 | 're': RegularizedEvolution(config), 38 | 'bananas': Bananas(config), 39 | 'npenas': Npenas(config), 40 | 'ls': LocalSearch(config), 41 | } 42 | 43 | supported_search_spaces = { 44 | 'nasbench101': NasBench101SearchSpace(), 45 | 'nasbench201': NasBench201SearchSpace(), 46 | 'darts': DartsSearchSpace(), 47 | 'nlp': NasBenchNLPSearchSpace(), 48 | 'transbench101_micro': TransBench101SearchSpaceMicro(config.dataset), 49 | 'transbench101_macro': TransBench101SearchSpaceMacro(), 50 | 'asr': NasBenchASRSearchSpace() 51 | } 52 | 53 | dataset_api = get_dataset_api(config.search_space, config.dataset) 54 | utils.set_seed(config.seed) 55 | 56 | search_space = supported_search_spaces[config.search_space] 57 | 58 | metric = Metric.VAL_ACCURACY if config.search_space == 'darts' else None 59 | 60 | optimizer = supported_optimizers[config.optimizer] 61 | optimizer.adapt_search_space(search_space, dataset_api=dataset_api) 62 | 63 | trainer = Trainer(optimizer, config, lightweight_output=True) 64 | 65 | trainer.search(resume_from="", summary_writer=writer, report_incumbent=False) 66 | trainer.evaluate(resume_from="", dataset_api=dataset_api, metric=metric) 67 | -------------------------------------------------------------------------------- /naslib/runners/nas_predictors/oneshot_runner.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | import naslib as nl 4 | 5 | from naslib.defaults.predictor_evaluator import PredictorEvaluator 6 | from naslib.defaults.trainer import Trainer 7 | from naslib.optimizers import Bananas, OneShotNASOptimizer, RandomNASOptimizer 8 | from naslib.predictors import OneShotPredictor 9 | 10 | from naslib.search_spaces import ( 11 | NasBench101SearchSpace, 12 | NasBench201SearchSpace, 13 | DartsSearchSpace, 14 | ) 15 | from naslib.utils import utils, setup_logger, get_dataset_api 16 | from naslib.utils.utils import get_project_root 17 | 18 | 19 | config = utils.get_config_from_args(config_type="oneshot") 20 | 21 | logger = setup_logger(config.save + "/log.log") 22 | logger.setLevel(logging.INFO) 23 | 24 | utils.log_args(config) 25 | 26 | supported_optimizers = { 27 | "bananas": Bananas(config), 28 | "oneshot": OneShotNASOptimizer(config), 29 | "rsws": RandomNASOptimizer(config), 30 | } 31 | 32 | supported_search_spaces = { 33 | "nasbench101": NasBench101SearchSpace(), 34 | "nasbench201": NasBench201SearchSpace(), 35 | "darts": DartsSearchSpace(), 36 | } 37 | 38 | 39 | # load_labeled = (True if config.search_space == 'darts' else False) 40 | load_labeled = False 41 | dataset_api = get_dataset_api(config.search_space, config.dataset) 42 | utils.set_seed(config.seed) 43 | 44 | search_space = supported_search_spaces[config.search_space] 45 | 46 | optimizer = supported_optimizers[config.optimizer] 47 | optimizer.adapt_search_space(search_space, dataset_api=dataset_api) 48 | 49 | trainer = Trainer(optimizer, config, lightweight_output=True) 50 | 51 | if config.optimizer == "bananas": 52 | trainer.search(resume_from="") 53 | trainer.evaluate(resume_from="", dataset_api=dataset_api) 54 | elif config.optimizer in ["oneshot", "rsws"]: 55 | predictor = OneShotPredictor(config, trainer, model_path=config.model_path) 56 | 57 | predictor_evaluator = PredictorEvaluator(predictor, config=config) 58 | predictor_evaluator.adapt_search_space( 59 | search_space, load_labeled=load_labeled, dataset_api=dataset_api 60 | ) 61 | 62 | # evaluate the predictor 63 | predictor_evaluator.evaluate() 64 | -------------------------------------------------------------------------------- /naslib/predictors/utils/pruners/measures/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Samsung Electronics Co., Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================= 15 | 16 | 17 | available_measures = [] 18 | _measure_impls = {} 19 | 20 | 21 | def measure(name, bn=True, copy_net=True, force_clean=True, **impl_args): 22 | def make_impl(func): 23 | def measure_impl(net_orig, device, *args, **kwargs): 24 | if copy_net: 25 | net = net_orig.get_prunable_copy(bn=bn).to(device) 26 | # set model.train() 27 | else: 28 | net = net_orig 29 | ret = func(net, *args, **kwargs, **impl_args) 30 | if copy_net and force_clean: 31 | import gc 32 | import torch 33 | 34 | del net 35 | torch.cuda.empty_cache() 36 | gc.collect() 37 | return ret 38 | 39 | global _measure_impls 40 | if name in _measure_impls: 41 | raise KeyError(f"Duplicated measure! {name}") 42 | available_measures.append(name) 43 | _measure_impls[name] = measure_impl 44 | return func 45 | 46 | return make_impl 47 | 48 | 49 | def calc_measure(name, net, device, *args, **kwargs): 50 | return _measure_impls[name](net, device, *args, **kwargs) 51 | 52 | 53 | def load_all(): 54 | from . import grad_norm 55 | from . import snip 56 | from . import grasp 57 | from . import fisher 58 | from . import jacov 59 | from . import plain 60 | from . import synflow 61 | 62 | 63 | # TODO: should we do that by default? 64 | load_all() 65 | -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/rs_ws/optimizer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import logging 3 | 4 | from naslib.optimizers import OneShotNASOptimizer 5 | from naslib.search_spaces.darts.conversions import convert_compact_to_genotype 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | class RandomNASOptimizer(OneShotNASOptimizer): 11 | """ 12 | Implementation of the Random NAS with weight sharing as in 13 | Li et al. 2019: Random Search and Reproducibility for Neural Architecture Search. 14 | """ 15 | 16 | @staticmethod 17 | def add_alphas(edge): 18 | """ 19 | Function to add the architectural weights to the edges. 20 | """ 21 | len_primitives = len(edge.data.op) 22 | alpha = torch.nn.Parameter( 23 | torch.zeros(size=[len_primitives], requires_grad=False) 24 | ) 25 | edge.data.set("alpha", alpha, shared=True) 26 | 27 | def step(self, data_train, data_val): 28 | input_train, target_train = data_train 29 | input_val, target_val = data_val 30 | 31 | # Update architecture weights by sampling only a random arch and 32 | # setting the alpha values accordingly 33 | self.sample_random_and_update_alphas() 34 | 35 | logits_val = self.graph(input_val) 36 | val_loss = self.loss(logits_val, target_val) 37 | val_loss.backward() 38 | 39 | # Update op weights 40 | self.op_optimizer.zero_grad() 41 | logits_train = self.graph(input_train) 42 | train_loss = self.loss(logits_train, target_train) 43 | train_loss.backward() 44 | if self.grad_clip: 45 | torch.nn.utils.clip_grad_norm_(self.graph.parameters(), self.grad_clip) 46 | self.op_optimizer.step() 47 | 48 | return logits_train, logits_val, train_loss, val_loss 49 | 50 | def sample_random_and_update_alphas(self): 51 | tmp_graph = self.search_space.clone() 52 | tmp_graph.sample_random_architecture() 53 | 54 | if self.graph.get_type() == "nasbench201": 55 | sample = tmp_graph.get_op_indices() 56 | elif self.graph.get_type() == "darts": 57 | sample = convert_compact_to_genotype(tmp_graph.get_compact()) 58 | 59 | self.set_alphas_from_path(sample) 60 | -------------------------------------------------------------------------------- /naslib/predictors/bnn/bnn_base.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import json 4 | 5 | from naslib.predictors.utils.encodings import encode 6 | from naslib.predictors.predictor import Predictor 7 | 8 | 9 | class BNN(Predictor): 10 | def __init__(self, encoding_type="adjacency_one_hot", 11 | ss_type="nasbench201", hparams_from_file=None): 12 | self.encoding_type = encoding_type 13 | self.ss_type = ss_type 14 | self.hparams_from_file=hparams_from_file 15 | 16 | def get_model(self, **kwargs): 17 | return NotImplementedError("Model needs to be defined.") 18 | 19 | def train_model(self, xtrain, ytrain): 20 | return NotImplementedError("Training method not defined.") 21 | 22 | def fit(self, xtrain, ytrain, train_info=None, **kwargs): 23 | if self.encoding_type is not None: 24 | _xtrain = np.array( 25 | [ 26 | encode(arch, encoding_type=self.encoding_type, ss_type=self.ss_type) 27 | for arch in xtrain 28 | ] 29 | ) 30 | else: 31 | _xtrain = xtrain 32 | _ytrain = np.array(ytrain) 33 | 34 | self.model = self.get_model(**kwargs) 35 | if self.hparams_from_file and self.hparams_from_file not in ['False', 'None'] \ 36 | and os.path.exists(self.hparams_from_file): 37 | self.num_steps = json.load(open(self.hparams_from_file, 'rb'))['bohamiann']['num_steps'] 38 | print('loaded hyperparams from', self.hparams_from_file) 39 | else: 40 | self.num_steps = 100 41 | self.train_model(_xtrain, _ytrain) 42 | 43 | train_pred = self.query(xtrain) 44 | train_error = np.mean(abs(train_pred - _ytrain)) 45 | return train_error 46 | 47 | def query(self, xtest, info=None): 48 | if self.encoding_type is not None: 49 | test_data = np.array( 50 | [ 51 | encode(arch, encoding_type=self.encoding_type, ss_type=self.ss_type) 52 | for arch in xtest 53 | ] 54 | ) 55 | else: 56 | test_data = xtest 57 | 58 | m, v = self.model.predict(test_data) 59 | return np.squeeze(m) 60 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import subprocess 4 | from setuptools import setup, find_packages 5 | 6 | # Check for python version 7 | if sys.version_info.major != 3 or sys.version_info.minor < 7 or sys.version_info.minor > 9: 8 | raise ValueError( 9 | 'Unsupported Python version %d.%d.%d found. NASLib requires Python ' 10 | '3.7, 3.8 or 3.9' % (sys.version_info.major, sys.version_info.minor, sys.version_info.micro) 11 | ) 12 | 13 | 14 | cwd = os.path.dirname(os.path.abspath(__file__)) 15 | 16 | version_path = os.path.join(cwd, 'naslib', '__version__.py') 17 | with open(version_path) as fh: 18 | version = fh.readlines()[-1].split()[-1].strip("\"'") 19 | 20 | with open("README.md", "r") as f: 21 | long_description = f.read() 22 | 23 | requirements = [] 24 | with open("requirements.txt", "r") as f: 25 | for line in f: 26 | requirements.append(line.strip()) 27 | 28 | #git_nasbench = "git+https://github.com/yashsmehta/nasbench.git@master" 29 | # 30 | #try: 31 | #import nasbench 32 | #except ImportError: 33 | #if '--user' in sys.argv: 34 | #subprocess.run([sys.executable, '-m', 'pip', 'install', '--upgrade', 35 | #'--user', git_nasbench], check=False) 36 | #else: 37 | #subprocess.run([sys.executable, '-m', 'pip', 'install', '--upgrade', 38 | #git_nasbench], check=False) 39 | 40 | 41 | print('-- Building version ' + version) 42 | print('-- Note: by default installs pytorch-cpu version (1.9.0), update to torch-gpu by following instructions from: https://pytorch.org/get-started/locally/') 43 | 44 | setup( 45 | name='naslib', 46 | version=version, 47 | description='NASLib: A modular and extensible Neural Architecture Search (NAS) library.', 48 | long_description=long_description, 49 | long_description_content_type="text/markdown", 50 | author='AutoML Freiburg', 51 | author_email='zelaa@cs.uni-freiburg.de', 52 | url='https://github.com/automl/NASLib', 53 | license='Apache License 2.0', 54 | classifiers=['Development Status :: 1 - Beta'], 55 | packages=find_packages(), 56 | python_requires='>=3.7', 57 | platforms=['Linux'], 58 | install_requires=requirements, 59 | keywords=['NAS', 'automl'], 60 | test_suite='pytest' 61 | ) 62 | -------------------------------------------------------------------------------- /scripts/predictors/run_darts.sh: -------------------------------------------------------------------------------- 1 | predictors=(fisher grad_norm grasp jacov snip synflow \ 2 | lce lce_m sotl sotle valacc valloss \ 3 | lcsvr omni_ngb omni_seminas \ 4 | bananas bonas gcn mlp nao seminas \ 5 | lgb ngb rf xgb \ 6 | bayes_lin_reg bohamiann dngo \ 7 | gp sparse_gp var_sparse_gp) 8 | 9 | experiment_types=(single single single single single single \ 10 | vary_fidelity vary_fidelity vary_fidelity vary_fidelity vary_fidelity vary_fidelity \ 11 | vary_both vary_both vary_both \ 12 | vary_train_size vary_train_size vary_train_size vary_train_size vary_train_size vary_train_size \ 13 | vary_train_size vary_train_size vary_train_size vary_train_size \ 14 | vary_train_size vary_train_size vary_train_size \ 15 | vary_train_size vary_train_size vary_train_size) 16 | 17 | start_seed=$1 18 | if [ -z "$start_seed" ] 19 | then 20 | start_seed=0 21 | fi 22 | 23 | # folders: 24 | base_file=NASLib/naslib 25 | s3_folder=p301 26 | out_dir=$s3_folder\_$start_seed 27 | 28 | # search space / data: 29 | search_space=darts 30 | dataset=cifar10 31 | 32 | # other variables: 33 | trials=100 34 | end_seed=$(($start_seed + $trials - 1)) 35 | save_to_s3=true 36 | test_size=100 37 | 38 | # create config files 39 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 40 | do 41 | predictor=${predictors[$i]} 42 | experiment_type=${experiment_types[$i]} 43 | python $base_file/benchmarks/create_configs.py --predictor $predictor --experiment_type $experiment_type \ 44 | --test_size $test_size --start_seed $start_seed --trials $trials --out_dir $out_dir \ 45 | --dataset=$dataset --config_type predictor --search_space $search_space 46 | done 47 | 48 | # run experiments 49 | for t in $(seq $start_seed $end_seed) 50 | do 51 | for predictor in ${predictors[@]} 52 | do 53 | config_file=$out_dir/$dataset/configs/predictors/config\_$predictor\_$t.yaml 54 | echo ================running $predictor trial: $t ===================== 55 | python $base_file/benchmarks/predictors/runner.py --config-file $config_file 56 | done 57 | if [ "$save_to_s3" ] 58 | then 59 | # zip and save to s3 60 | echo zipping and saving to s3 61 | zip -r $out_dir.zip $out_dir 62 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 63 | fi 64 | done 65 | -------------------------------------------------------------------------------- /scripts/predictors/run_nb201.sh: -------------------------------------------------------------------------------- 1 | predictors=(fisher grad_norm grasp jacov snip synflow \ 2 | lce lce_m sotl sotle valacc valloss \ 3 | lcsvr omni_ngb omni_seminas \ 4 | bananas bonas gcn mlp nao seminas \ 5 | lgb ngb rf xgb \ 6 | bayes_lin_reg bohamiann dngo \ 7 | gp sparse_gp var_sparse_gp) 8 | 9 | experiment_types=(single single single single single single \ 10 | vary_fidelity vary_fidelity vary_fidelity vary_fidelity vary_fidelity vary_fidelity \ 11 | vary_both vary_both vary_both \ 12 | vary_train_size vary_train_size vary_train_size vary_train_size vary_train_size vary_train_size \ 13 | vary_train_size vary_train_size vary_train_size vary_train_size \ 14 | vary_train_size vary_train_size vary_train_size \ 15 | vary_train_size vary_train_size vary_train_size) 16 | 17 | start_seed=$1 18 | if [ -z "$start_seed" ] 19 | then 20 | start_seed=0 21 | fi 22 | 23 | # folders: 24 | base_file=NASLib/naslib 25 | s3_folder=p201_im 26 | out_dir=$s3_folder\_$start_seed 27 | 28 | # search space / data: 29 | search_space=nasbench201 30 | dataset=ImageNet16-120 31 | 32 | # other variables: 33 | trials=100 34 | end_seed=$(($start_seed + $trials - 1)) 35 | save_to_s3=true 36 | test_size=200 37 | 38 | # create config files 39 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 40 | do 41 | predictor=${predictors[$i]} 42 | experiment_type=${experiment_types[$i]} 43 | python $base_file/benchmarks/create_configs.py --predictor $predictor --experiment_type $experiment_type \ 44 | --test_size $test_size --start_seed $start_seed --trials $trials --out_dir $out_dir \ 45 | --dataset=$dataset --config_type predictor --search_space $search_space 46 | done 47 | 48 | # run experiments 49 | for t in $(seq $start_seed $end_seed) 50 | do 51 | for predictor in ${predictors[@]} 52 | do 53 | config_file=$out_dir/$dataset/configs/predictors/config\_$predictor\_$t.yaml 54 | echo ================running $predictor trial: $t ===================== 55 | python $base_file/benchmarks/predictors/runner.py --config-file $config_file 56 | done 57 | if [ "$save_to_s3" ] 58 | then 59 | # zip and save to s3 60 | echo zipping and saving to s3 61 | zip -r $out_dir.zip $out_dir 62 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 63 | fi 64 | done 65 | -------------------------------------------------------------------------------- /naslib/runners/nas_predictors/runner.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | import naslib as nl 4 | 5 | from naslib.defaults.predictor_evaluator import PredictorEvaluator 6 | from naslib.defaults.trainer import Trainer 7 | from naslib.optimizers import Bananas, Npenas, OneShotNASOptimizer, RandomNASOptimizer 8 | from naslib.predictors import OneShotPredictor 9 | 10 | from naslib.search_spaces import ( 11 | NasBench101SearchSpace, 12 | NasBench201SearchSpace, 13 | DartsSearchSpace, 14 | NasBenchNLPSearchSpace, 15 | ) 16 | from naslib.utils import utils, setup_logger, get_dataset_api 17 | from naslib.utils.utils import get_project_root 18 | 19 | 20 | config = utils.get_config_from_args(config_type="nas_predictor") 21 | 22 | logger = setup_logger(config.save + "/log.log") 23 | logger.setLevel(logging.INFO) 24 | 25 | utils.log_args(config) 26 | 27 | supported_optimizers = { 28 | "bananas": Bananas(config), 29 | "npenas": Npenas(config), 30 | #'oneshot': OneShotNASOptimizer(config), 31 | #'rsws': RandomNASOptimizer(config), 32 | } 33 | 34 | supported_search_spaces = { 35 | "nasbench101": NasBench101SearchSpace(), 36 | "nasbench201": NasBench201SearchSpace(), 37 | "darts": DartsSearchSpace(), 38 | "nlp": NasBenchNLPSearchSpace(), 39 | } 40 | 41 | 42 | # load_labeled = (True if config.search_space == 'darts' else False) 43 | load_labeled = False 44 | dataset_api = get_dataset_api(config.search_space, config.dataset) 45 | utils.set_seed(config.seed) 46 | 47 | search_space = supported_search_spaces[config.search_space] 48 | 49 | optimizer = supported_optimizers[config.optimizer] 50 | optimizer.adapt_search_space(search_space, dataset_api=dataset_api) 51 | 52 | trainer = Trainer(optimizer, config, lightweight_output=True) 53 | 54 | if config.optimizer in ["bananas", "npenas"]: 55 | trainer.search(resume_from="") 56 | trainer.evaluate(resume_from="", dataset_api=dataset_api) 57 | elif config.optimizer in ["oneshot", "rsws"]: 58 | predictor = OneShotPredictor(config, trainer, model_path=config.model_path) 59 | 60 | predictor_evaluator = PredictorEvaluator(predictor, config=config) 61 | predictor_evaluator.adapt_search_space( 62 | search_space, load_labeled=load_labeled, dataset_api=dataset_api 63 | ) 64 | 65 | # evaluate the predictor 66 | predictor_evaluator.evaluate() 67 | -------------------------------------------------------------------------------- /naslib/predictors/trees/random_forest.py: -------------------------------------------------------------------------------- 1 | from sklearn.ensemble import RandomForestRegressor as RF 2 | import numpy as np 3 | import os 4 | import json 5 | 6 | from naslib.predictors.trees import BaseTree 7 | from naslib.predictors.trees.ngb import loguniform 8 | 9 | 10 | class RandomForestPredictor(BaseTree): 11 | @property 12 | def default_hyperparams(self): 13 | # NOTE: Copied from NB301 14 | params = { 15 | "n_estimators": 116, 16 | "max_features": 0.17055852159745608, 17 | "min_samples_leaf": 2, 18 | "min_samples_split": 2, 19 | "bootstrap": False, 20 | #'verbose': -1 21 | } 22 | return params 23 | 24 | def set_random_hyperparams(self): 25 | if self.hyperparams is None: 26 | # evaluate the default config first during HPO 27 | params = self.default_hyperparams.copy() 28 | else: 29 | params = { 30 | "n_estimators": int(loguniform(16, 128)), 31 | "max_features": loguniform(0.1, 0.9), 32 | "min_samples_leaf": int(np.random.choice(19) + 1), 33 | "min_samples_split": int(np.random.choice(18) + 2), 34 | "bootstrap": False, 35 | #'verbose': -1 36 | } 37 | self.hyperparams = params 38 | return params 39 | 40 | def get_dataset(self, encodings, labels=None): 41 | if labels is None: 42 | return encodings 43 | else: 44 | return (encodings, (labels - self.mean) / self.std) 45 | 46 | def train(self, train_data): 47 | X_train, y_train = train_data 48 | model = RF(**self.hyperparams) 49 | return model.fit(X_train, y_train) 50 | 51 | def fit(self, xtrain, ytrain, train_info=None, params=None, **kwargs): 52 | if self.hparams_from_file and self.hparams_from_file not in ['False', 'None'] \ 53 | and os.path.exists(self.hparams_from_file): 54 | self.hyperparams = json.load(open(self.hparams_from_file, 'rb'))['rf'] 55 | print('loaded hyperparams from', self.hparams_from_file) 56 | elif self.hyperparams is None: 57 | self.hyperparams = self.default_hyperparams.copy() 58 | return super(RandomForestPredictor, self).fit(xtrain, ytrain, params, **kwargs) 59 | -------------------------------------------------------------------------------- /scripts/nas/run_nb201.sh: -------------------------------------------------------------------------------- 1 | export OMP_NUM_THREADS=1 2 | #optimizers=(rs) 3 | #optimizers=(re) 4 | #optimizers=(rea_lce) 5 | #optimizers=(rea_svr) 6 | optimizers=(ls) 7 | #optimizers=(ls_lce) 8 | #optimizers=(ls_svr) 9 | #optimizers=(bananas) 10 | #optimizers=(bananas_svr) 11 | #optimizers=(bananas_lce) 12 | #optimizers=(hb_simple) 13 | #optimizers=(bohb_simple) 14 | #optimizers=(dehb_simple) 15 | predictor=bananas #optional: (gcn xgb) 16 | 17 | start_seed=$1 18 | if [ -z "$start_seed" ] 19 | then 20 | start_seed=0 21 | fi 22 | 23 | if [[ $optimizers == bananas* ]] 24 | then 25 | acq_fn_optimization=mutation 26 | else 27 | acq_fn_optimization=random_sampling 28 | fi 29 | 30 | # folders: 31 | base_file=naslib 32 | s3_folder=nas201 33 | out_dir=$s3_folder\_$start_seed 34 | 35 | # search space / data: 36 | search_space=nasbench201 37 | dataset=cifar10 38 | budgets=400000 39 | fidelity=200 40 | single_fidelity=20 41 | population_size=20 42 | sample_size=10 43 | num_init=20 44 | num_arches_to_mutate=4 45 | max_mutations=5 46 | 47 | # trials / seeds: 48 | trials=30 49 | end_seed=$(($start_seed + $trials - 1)) 50 | 51 | # create config files 52 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 53 | do 54 | optimizer=${optimizers[$i]} 55 | python $base_file/benchmarks/create_configs.py \ 56 | --budgets $budgets --start_seed $start_seed --trials $trials \ 57 | --out_dir $out_dir --dataset=$dataset --config_type nas \ 58 | --search_space $search_space --optimizer $optimizer \ 59 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 60 | --fidelity $fidelity --single_fidelity $single_fidelity --population_size $population_size \ 61 | --sample_size $sample_size --num_arches_to_mutate $num_arches_to_mutate --max_mutations $max_mutations \ 62 | --num_init $num_init 63 | done 64 | 65 | # run experiments 66 | for t in $(seq $start_seed $end_seed) 67 | do 68 | for optimizer in ${optimizers[@]} 69 | do 70 | if [[ $optimizer == bananas* ]] 71 | then 72 | config_file=$out_dir/$dataset/configs/nas/config\_$optimizer\_$predictor\_$t.yaml 73 | else 74 | config_file=$out_dir/$dataset/configs/nas/config\_$optimizer\_$t.yaml 75 | fi 76 | echo ================running $optimizer trial: $t ===================== 77 | python $base_file/benchmarks/nas/runner.py --config-file $config_file 78 | done 79 | done -------------------------------------------------------------------------------- /scripts/nas/run_nb211.sh: -------------------------------------------------------------------------------- 1 | export OMP_NUM_THREADS=1 2 | #optimizers=(rs) 3 | #optimizers=(re) 4 | #optimizers=(rea_lce) 5 | #optimizers=(rea_svr) 6 | optimizers=(ls) 7 | #optimizers=(ls_lce) 8 | #optimizers=(ls_svr) 9 | #optimizers=(bananas) 10 | #optimizers=(bananas_svr) 11 | #optimizers=(bananas_lce) 12 | #optimizers=(hb_simple) 13 | #optimizers=(bohb_simple) 14 | #optimizers=(dehb_simple) 15 | predictor=bananas #optional: (gcn xgb) 16 | 17 | start_seed=$1 18 | if [ -z "$start_seed" ] 19 | then 20 | start_seed=0 21 | fi 22 | 23 | if [[ $optimizers == bananas* ]] 24 | then 25 | acq_fn_optimization=mutation 26 | else 27 | acq_fn_optimization=random_sampling 28 | fi 29 | 30 | # folders: 31 | base_file=naslib 32 | s3_folder=nas201 33 | out_dir=$s3_folder\_$start_seed 34 | 35 | # search space / data: 36 | search_space=nasbench211 37 | dataset=cifar10 38 | budgets=400000 39 | fidelity=200 40 | single_fidelity=20 41 | population_size=20 42 | sample_size=10 43 | num_init=20 44 | num_arches_to_mutate=4 45 | max_mutations=5 46 | 47 | # trials / seeds: 48 | trials=30 49 | end_seed=$(($start_seed + $trials - 1)) 50 | 51 | # create config files 52 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 53 | do 54 | optimizer=${optimizers[$i]} 55 | python $base_file/benchmarks/create_configs.py \ 56 | --budgets $budgets --start_seed $start_seed --trials $trials \ 57 | --out_dir $out_dir --dataset=$dataset --config_type nas \ 58 | --search_space $search_space --optimizer $optimizer \ 59 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 60 | --fidelity $fidelity --single_fidelity $single_fidelity --population_size $population_size \ 61 | --sample_size $sample_size --num_arches_to_mutate $num_arches_to_mutate --max_mutations $max_mutations \ 62 | --num_init $num_init 63 | done 64 | 65 | # run experiments 66 | for t in $(seq $start_seed $end_seed) 67 | do 68 | for optimizer in ${optimizers[@]} 69 | do 70 | if [[ $optimizer == bananas* ]] 71 | then 72 | config_file=$out_dir/$dataset/configs/nas/config\_$optimizer\_$predictor\_$t.yaml 73 | else 74 | config_file=$out_dir/$dataset/configs/nas/config\_$optimizer\_$t.yaml 75 | fi 76 | echo ================running $optimizer trial: $t ===================== 77 | python $base_file/benchmarks/nas/runner.py --config-file $config_file 78 | done 79 | done -------------------------------------------------------------------------------- /scripts/nas/run_nb201_cifar100.sh: -------------------------------------------------------------------------------- 1 | export OMP_NUM_THREADS=1 2 | #optimizers=(rs) 3 | #optimizers=(re) 4 | #optimizers=(rea_lce) 5 | #optimizers=(rea_svr) 6 | optimizers=(ls) 7 | #optimizers=(ls_lce) 8 | #optimizers=(ls_svr) 9 | #optimizers=(bananas) 10 | #optimizers=(bananas_svr) 11 | #optimizers=(bananas_lce) 12 | #optimizers=(hb_simple) 13 | #optimizers=(bohb_simple) 14 | #optimizers=(dehb_simple) 15 | predictor=bananas #optional: (gcn xgb) 16 | 17 | start_seed=$1 18 | if [ -z "$start_seed" ] 19 | then 20 | start_seed=0 21 | fi 22 | 23 | if [[ $optimizers == bananas* ]] 24 | then 25 | acq_fn_optimization=mutation 26 | else 27 | acq_fn_optimization=random_sampling 28 | fi 29 | 30 | # folders: 31 | base_file=naslib 32 | s3_folder=nas201 33 | out_dir=$s3_folder\_$start_seed 34 | 35 | # search space / data: 36 | search_space=nasbench201 37 | dataset=cifar100 38 | budgets=800000 39 | fidelity=200 40 | single_fidelity=20 41 | population_size=20 42 | sample_size=10 43 | num_init=20 44 | num_arches_to_mutate=4 45 | max_mutations=5 46 | 47 | # trials / seeds: 48 | trials=30 49 | end_seed=$(($start_seed + $trials - 1)) 50 | 51 | # create config files 52 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 53 | do 54 | optimizer=${optimizers[$i]} 55 | python $base_file/benchmarks/create_configs.py \ 56 | --budgets $budgets --start_seed $start_seed --trials $trials \ 57 | --out_dir $out_dir --dataset=$dataset --config_type nas \ 58 | --search_space $search_space --optimizer $optimizer \ 59 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 60 | --fidelity $fidelity --single_fidelity $single_fidelity --population_size $population_size \ 61 | --sample_size $sample_size --num_arches_to_mutate $num_arches_to_mutate --max_mutations $max_mutations \ 62 | --num_init $num_init 63 | done 64 | 65 | # run experiments 66 | for t in $(seq $start_seed $end_seed) 67 | do 68 | for optimizer in ${optimizers[@]} 69 | do 70 | if [[ $optimizer == bananas* ]] 71 | then 72 | config_file=$out_dir/$dataset/configs/nas/config\_$optimizer\_$predictor\_$t.yaml 73 | else 74 | config_file=$out_dir/$dataset/configs/nas/config\_$optimizer\_$t.yaml 75 | fi 76 | echo ================running $optimizer trial: $t ===================== 77 | python $base_file/benchmarks/nas/runner.py --config-file $config_file 78 | done 79 | done -------------------------------------------------------------------------------- /scripts/nas/run_nb201_imagenet16-200.sh: -------------------------------------------------------------------------------- 1 | export OMP_NUM_THREADS=1 2 | #optimizers=(rs) 3 | #optimizers=(re) 4 | #optimizers=(rea_lce) 5 | optimizers=(rea_svr) 6 | #optimizers=(ls) 7 | #optimizers=(ls_lce) 8 | #optimizers=(ls_svr) 9 | #optimizers=(bananas) 10 | #optimizers=(bananas_svr) 11 | #optimizers=(bananas_lce) 12 | #optimizers=(hb_simple) 13 | #optimizers=(bohb_simple) 14 | #optimizers=(dehb_simple) 15 | predictor=bananas #optional: (gcn xgb) 16 | 17 | start_seed=$1 18 | if [ -z "$start_seed" ] 19 | then 20 | start_seed=0 21 | fi 22 | 23 | if [[ $optimizers == bananas* ]] 24 | then 25 | acq_fn_optimization=mutation 26 | else 27 | acq_fn_optimization=random_sampling 28 | fi 29 | 30 | # folders: 31 | base_file=naslib 32 | s3_folder=nas201 33 | out_dir=$s3_folder\_$start_seed 34 | 35 | # search space / data: 36 | search_space=nasbench201 37 | dataset=ImageNet16-120 38 | budgets=1000000 39 | fidelity=200 40 | single_fidelity=20 41 | population_size=20 42 | sample_size=10 43 | num_init=20 44 | num_arches_to_mutate=4 45 | max_mutations=5 46 | 47 | # trials / seeds: 48 | trials=30 49 | end_seed=$(($start_seed + $trials - 1)) 50 | 51 | # create config files 52 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 53 | do 54 | optimizer=${optimizers[$i]} 55 | python $base_file/benchmarks/create_configs.py \ 56 | --budgets $budgets --start_seed $start_seed --trials $trials \ 57 | --out_dir $out_dir --dataset=$dataset --config_type nas \ 58 | --search_space $search_space --optimizer $optimizer \ 59 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 60 | --fidelity $fidelity --single_fidelity $single_fidelity --population_size $population_size \ 61 | --sample_size $sample_size --num_arches_to_mutate $num_arches_to_mutate --max_mutations $max_mutations \ 62 | --num_init $num_init 63 | done 64 | 65 | # run experiments 66 | for t in $(seq $start_seed $end_seed) 67 | do 68 | for optimizer in ${optimizers[@]} 69 | do 70 | if [[ $optimizer == bananas* ]] 71 | then 72 | config_file=$out_dir/$dataset/configs/nas/config\_$optimizer\_$predictor\_$t.yaml 73 | else 74 | config_file=$out_dir/$dataset/configs/nas/config\_$optimizer\_$t.yaml 75 | fi 76 | echo ================running $optimizer trial: $t ===================== 77 | python $base_file/benchmarks/nas/runner.py --config-file $config_file 78 | done 79 | done -------------------------------------------------------------------------------- /naslib/search_spaces/nasbench101/primitives.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from ..core.primitives import AbstractPrimitive 4 | 5 | 6 | """ 7 | Code below from NASBench-01 and slighly adapted 8 | @inproceedings{dong2020nasbench201, 9 | title = {NAS-Bench-201: Extending the Scope of Reproducible Neural Architecture Search}, 10 | author = {Dong, Xuanyi and Yang, Yi}, 11 | booktitle = {International Conference on Learning Representations (ICLR)}, 12 | url = {https://openreview.net/forum?id=HJxyZkBKDr}, 13 | year = {2020} 14 | } 15 | """ 16 | 17 | 18 | class ReLUConvBN(AbstractPrimitive): 19 | def __init__(self, C_in, C, kernel_size, stride=1, affine=False): 20 | super().__init__(locals()) 21 | self.kernel_size = kernel_size 22 | pad = 0 if stride == 1 and kernel_size == 1 else 1 23 | self.op = nn.Sequential( 24 | nn.ReLU(inplace=False), 25 | nn.Conv2d(C_in, C, kernel_size, stride=stride, padding=pad, bias=False), 26 | nn.BatchNorm2d(C, affine=affine), 27 | ) 28 | 29 | def forward(self, x, edge_data): 30 | return self.op(x) 31 | 32 | def get_embedded_ops(self): 33 | return None 34 | 35 | @property 36 | def get_op_name(self): 37 | op_name = super().get_op_name 38 | op_name += "{}x{}".format(self.kernel_size, self.kernel_size) 39 | return op_name 40 | 41 | 42 | class ResNetBasicblock(AbstractPrimitive): 43 | def __init__(self, C_in, C_out, stride, affine=True): 44 | super().__init__() 45 | assert stride == 1 or stride == 2, "invalid stride {:}".format(stride) 46 | self.conv_a = ReLUConvBN(C_in, C_out, 3, stride) 47 | self.conv_b = ReLUConvBN(C_out, C_out, 3) 48 | if stride == 2: 49 | self.downsample = nn.Sequential( 50 | nn.AvgPool2d(kernel_size=2, stride=2, padding=0), 51 | nn.Conv2d(C_in, C_out, kernel_size=1, stride=1, padding=0, bias=False), 52 | ) 53 | else: 54 | self.downsample = None 55 | 56 | def forward(self, x, edge_data): 57 | basicblock = self.conv_a(x, None) 58 | basicblock = self.conv_b(basicblock, None) 59 | residual = self.downsample(x) if self.downsample is not None else x 60 | return residual + basicblock 61 | 62 | def get_embedded_ops(self): 63 | return None 64 | -------------------------------------------------------------------------------- /scripts/nas/run_nb111.sh: -------------------------------------------------------------------------------- 1 | export PYTHONPATH=$HOME/nasbench311/nasbench301:$HOME/nasbench311/NASLib:$PYTHONPATH 2 | export OMP_NUM_THREADS=2 3 | #optimizers=(rs) 4 | optimizers=(re) 5 | #optimizers=(rea_lce) 6 | #optimizers=(rea_svr) 7 | #optimizers=(ls) 8 | #optimizers=(ls_lce) 9 | #optimizers=(ls_svr) 10 | #optimizers=(bananas) 11 | #optimizers=(bananas_svr) 12 | #optimizers=(bananas_lce) 13 | #optimizers=(hb_simple) 14 | #optimizers=(bohb_simple) 15 | #optimizers=(dehb_simple) 16 | predictor=bananas #optional: (gcn xgb) 17 | 18 | start_seed=$1 19 | if [ -z "$start_seed" ] 20 | then 21 | start_seed=0 22 | fi 23 | 24 | if [[ $optimizers == bananas* ]] 25 | then 26 | acq_fn_optimization=mutation 27 | else 28 | acq_fn_optimization=random_sampling 29 | fi 30 | 31 | # folders: 32 | base_file=naslib 33 | s3_folder=nas101 34 | out_dir=$s3_folder\_$start_seed 35 | 36 | # search space / data: 37 | search_space=nasbench101 38 | dataset=cifar10 39 | budgets=560000 40 | fidelity=107 41 | single_fidelity=12 42 | population_size=20 43 | sample_size=10 44 | num_arches_to_mutate=1 45 | max_mutations=1 46 | 47 | # trials / seeds: 48 | trials=30 49 | end_seed=$(($start_seed + $trials - 1)) 50 | 51 | # create config files 52 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 53 | do 54 | optimizer=${optimizers[$i]} 55 | python $base_file/benchmarks/create_configs.py \ 56 | --budgets $budgets --start_seed $start_seed --trials $trials \ 57 | --out_dir $out_dir --dataset=$dataset --config_type nas \ 58 | --search_space $search_space --optimizer $optimizer \ 59 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 60 | --fidelity $fidelity --single_fidelity $single_fidelity --population_size $population_size \ 61 | --sample_size $sample_size --num_arches_to_mutate $num_arches_to_mutate --max_mutations $max_mutations 62 | done 63 | 64 | # run experiments 65 | for t in $(seq $start_seed $end_seed) 66 | do 67 | for optimizer in ${optimizers[@]} 68 | do 69 | if [[ $optimizer == bananas* ]] 70 | then 71 | config_file=$out_dir/$dataset/configs/nas/config\_$optimizer\_$predictor\_$t.yaml 72 | else 73 | config_file=$out_dir/$dataset/configs/nas/config\_$optimizer\_$t.yaml 74 | fi 75 | echo ================running $optimizer trial: $t ===================== 76 | python $base_file/benchmarks/nas/runner.py --config-file $config_file 77 | done 78 | done -------------------------------------------------------------------------------- /scripts/predictors/run_hpo_test.sh: -------------------------------------------------------------------------------- 1 | search_spaces=(nlp transbench101_micro transbench101_macro) 2 | 3 | datasets=(penntreebank class_object jigsaw) 4 | 5 | predictors=(rf bohamiann) 6 | 7 | start_hposeed=$1 8 | if [ -z "$start_hposeed" ] 9 | then 10 | start_hposeed=0 11 | fi 12 | 13 | # folders: 14 | base_file=NASLib/naslib 15 | save_to_s3=true 16 | s3_folder=predictor_hpo_sep24_test 17 | out_dir=$s3_folder\_$start_hposeed 18 | hpo_config_folder=predictor_hpo 19 | 20 | # there are two types of seeds: 21 | # 'hposeed' for the random hyperparameters 22 | # 'seed' for the randomness within the algorithm 23 | 24 | hpo_trials=10 # 1000 25 | end_hposeed=$(($start_hposeed + $hpo_trials - 1)) 26 | start_seed=0 27 | end_seed=2 # 20 28 | 29 | train_size=10 #100 30 | test_size=20 #200 31 | 32 | for hposeed in $(seq $start_hposeed $end_hposeed) 33 | do 34 | for i in $(seq 0 $((${#search_spaces[@]}-1)) ) 35 | do 36 | search_space=${search_spaces[$i]} 37 | dataset=${datasets[$i]} 38 | for predictor in ${predictors[@]} 39 | do 40 | for seed in $(seq $start_seed $end_seed) 41 | do 42 | # create experiment configs 43 | base_folder=$out_dir/hpo\_$hposeed/$search_space/$dataset 44 | save_folder=$base_folder/$predictor/$seed 45 | config_file=$base_folder/configs/$predictor\_$seed.yaml 46 | python $base_file/benchmarks/generate_predictor_hpo_configs.py --search_space \ 47 | $search_space --dataset $dataset --predictor $predictor --hposeed $hposeed \ 48 | --seed $seed --train_size_single $train_size --test_size $test_size --out_dir $out_dir \ 49 | --save_folder $save_folder --config_file $config_file --hpo_config_folder \ 50 | $hpo_config_folder 51 | 52 | # run the experiment 53 | echo =========running hpo $hposeed $search_space $dataset $predictor trial: $seed ============= 54 | python $base_file/benchmarks/predictors/runner.py --config-file $config_file 55 | done 56 | done 57 | done 58 | #if [ "$save_to_s3" ] 59 | #then 60 | # zip and save to s3 61 | #echo zipping and saving to s3 62 | #zip -r $out_dir.zip $out_dir 63 | #python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 64 | #fi 65 | done -------------------------------------------------------------------------------- /naslib/runners/nas/runner.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from naslib.defaults.trainer import Trainer 4 | from naslib.optimizers import ( 5 | RandomSearch, 6 | Npenas, 7 | RegularizedEvolution, 8 | LocalSearch, 9 | Bananas, 10 | DARTSOptimizer, 11 | DrNASOptimizer, 12 | GDASOptimizer 13 | ) 14 | 15 | from naslib.search_spaces import ( 16 | NasBench101SearchSpace, 17 | NasBench201SearchSpace, 18 | DartsSearchSpace, 19 | NasBenchNLPSearchSpace, 20 | TransBench101SearchSpaceMicro, 21 | TransBench101SearchSpaceMacro, 22 | NasBenchASRSearchSpace 23 | ) 24 | 25 | from naslib.utils import utils, setup_logger, get_dataset_api 26 | 27 | from naslib.search_spaces.transbench101.loss import SoftmaxCrossEntropyWithLogits 28 | 29 | config = utils.get_config_from_args(config_type='nas') 30 | 31 | logger = setup_logger(config.save + "/log.log") 32 | logger.setLevel(logging.INFO) 33 | 34 | utils.log_args(config) 35 | 36 | supported_optimizers = { 37 | 'rs': RandomSearch(config), 38 | 're': RegularizedEvolution(config), 39 | 'bananas': Bananas(config), 40 | 'npenas': Npenas(config), 41 | 'ls': LocalSearch(config), 42 | 'darts': DARTSOptimizer(config), 43 | 'drnas': DrNASOptimizer(config), 44 | 'gdas': GDASOptimizer(config), 45 | } 46 | 47 | supported_search_spaces = { 48 | 'nasbench101': NasBench101SearchSpace(), 49 | 'nasbench201': NasBench201SearchSpace(), 50 | 'darts': DartsSearchSpace(), 51 | 'nlp': NasBenchNLPSearchSpace(), 52 | 'transbench101_micro': TransBench101SearchSpaceMicro(config.dataset), 53 | 'transbench101_macro': TransBench101SearchSpaceMacro(), 54 | 'asr': NasBenchASRSearchSpace(), 55 | } 56 | 57 | dataset_api = get_dataset_api(config.search_space, config.dataset) 58 | utils.set_seed(config.seed) 59 | 60 | search_space = supported_search_spaces[config.search_space] 61 | 62 | optimizer = supported_optimizers[config.optimizer] 63 | optimizer.adapt_search_space(search_space, dataset_api=dataset_api) 64 | 65 | import torch 66 | 67 | if config.dataset in ['class_object', 'class_scene']: 68 | optimizer.loss = SoftmaxCrossEntropyWithLogits() 69 | elif config.dataset == 'autoencoder': 70 | optimizer.loss = torch.nn.L1Loss() 71 | 72 | 73 | trainer = Trainer(optimizer, config, lightweight_output=True) 74 | 75 | trainer.search(resume_from="") 76 | trainer.evaluate(resume_from="", dataset_api=dataset_api) 77 | -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/cell_infers/tiny_network.py: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # 3 | ##################################################### 4 | import torch.nn as nn 5 | from ..cell_operations import ResNetBasicblock 6 | from .cells import InferCell 7 | 8 | 9 | # The macro structure for architectures in NAS-Bench-201 10 | class TinyNetwork(nn.Module): 11 | def __init__(self, C, N, genotype, num_classes): 12 | super(TinyNetwork, self).__init__() 13 | self._C = C 14 | self._layerN = N 15 | 16 | self.stem = nn.Sequential( 17 | nn.Conv2d(3, C, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(C) 18 | ) 19 | 20 | layer_channels = [C] * N + [C * 2] + [C * 2] * N + [C * 4] + [C * 4] * N 21 | layer_reductions = [False] * N + [True] + [False] * N + [True] + [False] * N 22 | 23 | C_prev = C 24 | self.cells = nn.ModuleList() 25 | for index, (C_curr, reduction) in enumerate( 26 | zip(layer_channels, layer_reductions) 27 | ): 28 | if reduction: 29 | cell = ResNetBasicblock(C_prev, C_curr, 2, True) 30 | else: 31 | cell = InferCell(genotype, C_prev, C_curr, 1) 32 | self.cells.append(cell) 33 | C_prev = cell.out_dim 34 | self._Layer = len(self.cells) 35 | 36 | self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True)) 37 | self.global_pooling = nn.AdaptiveAvgPool2d(1) 38 | self.classifier = nn.Linear(C_prev, num_classes) 39 | 40 | def get_message(self): 41 | string = self.extra_repr() 42 | for i, cell in enumerate(self.cells): 43 | string += "\n {:02d}/{:02d} :: {:}".format( 44 | i, len(self.cells), cell.extra_repr() 45 | ) 46 | return string 47 | 48 | def extra_repr(self): 49 | return "{name}(C={_C}, N={_layerN}, L={_Layer})".format( 50 | name=self.__class__.__name__, **self.__dict__ 51 | ) 52 | 53 | def forward(self, inputs): 54 | feature = self.stem(inputs) 55 | for i, cell in enumerate(self.cells): 56 | feature = cell(feature) 57 | 58 | out = self.lastact(feature) 59 | out = self.global_pooling(out) 60 | out = out.view(out.size(0), -1) 61 | logits = self.classifier(out) 62 | 63 | return out, logits 64 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.DS_Store 2 | 3 | # Log files 4 | *.out 5 | *.err 6 | *.log 7 | *.json 8 | 9 | # Model files 10 | *.pth 11 | 12 | # slurm scripts 13 | slurm_scripts/* 14 | run/ 15 | vscode_remote_debugging/ 16 | 17 | src/* 18 | 19 | # Everything stored in subfolders of benchmark 20 | naslib/benchmarks/*/*/ 21 | examples/*/ 22 | 23 | # IDE related 24 | .vscode/ 25 | .idea/ 26 | 27 | # Byte-compiled / optimized / DLL files 28 | __pycache__/ 29 | *.py[cod] 30 | *$py.class 31 | 32 | # C extensions 33 | *.so 34 | 35 | # Distribution / packaging 36 | .Python 37 | env/ 38 | build/ 39 | develop-eggs/ 40 | dist/ 41 | downloads/ 42 | eggs/ 43 | .eggs/ 44 | lib/ 45 | lib64/ 46 | parts/ 47 | sdist/ 48 | var/ 49 | wheels/ 50 | *.egg-info/ 51 | .installed.cfg 52 | *.egg 53 | 54 | # PyInstaller 55 | # Usually these files are written by a python script from a template 56 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 57 | *.manifest 58 | *.spec 59 | 60 | # Installer logs 61 | pip-log.txt 62 | pip-delete-this-directory.txt 63 | 64 | # Unit test / coverage reports 65 | htmlcov/ 66 | .tox/ 67 | .coverage 68 | .coverage.* 69 | .cache 70 | nosetests.xml 71 | coverage.xml 72 | *.cover 73 | .hypothesis/ 74 | 75 | # Translations 76 | *.mo 77 | *.pot 78 | 79 | # Django stuff: 80 | *.log 81 | local_settings.py 82 | 83 | # Flask stuff: 84 | instance/ 85 | .webassets-cache 86 | 87 | # Scrapy stuff: 88 | .scrapy 89 | 90 | # Sphinx documentation 91 | docs/_build/ 92 | 93 | # PyBuilder 94 | target/ 95 | 96 | # Jupyter Notebook 97 | .ipynb_checkpoints 98 | 99 | # pyenv 100 | .python-version 101 | 102 | # celery beat schedule file 103 | celerybeat-schedule 104 | 105 | # SageMath parsed files 106 | *.sage.py 107 | 108 | # dotenv 109 | .env 110 | 111 | # virtualenv 112 | .venv 113 | venv/ 114 | ENV/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | runs/ 129 | data/ 130 | *.tfrecord 131 | *.png 132 | *.pdf 133 | *.pkl 134 | *.obj 135 | *bohb_output 136 | logs/ 137 | run_*/ 138 | final_eval*/ 139 | 140 | # Swap 141 | [._]*.s[a-v][a-z] 142 | [._]*.sw[a-p] 143 | [._]s[a-v][a-z] 144 | [._]sw[a-p] 145 | 146 | # Session 147 | Session.vim 148 | 149 | # Temporary 150 | .netrwhist 151 | *~ 152 | # Auto-generated tag files 153 | tags 154 | -------------------------------------------------------------------------------- /naslib/predictors/lce/lce.py: -------------------------------------------------------------------------------- 1 | # This code is mostly from https://github.com/automl/pylearningcurvepredictor 2 | # pylearningcurvepredictor author: Tobias Domhan, tdomhan 3 | 4 | import numpy as np 5 | 6 | from naslib.predictors.predictor import Predictor 7 | from naslib.predictors.lce.parametric_model import ( 8 | model_name_list, 9 | model_config, 10 | construct_parametric_model, 11 | ) 12 | from naslib.predictors.lce.parametric_ensemble import ParametricEnsemble 13 | 14 | 15 | class LCEPredictor(Predictor): 16 | def __init__(self, metric=None): 17 | self.metric = metric 18 | 19 | def query(self, xtest, info): 20 | 21 | ensemble = ParametricEnsemble( 22 | [construct_parametric_model(model_config, name) for name in model_name_list] 23 | ) 24 | 25 | learning_curves = np.array([np.array(inf["lc"]) / 100 for inf in info]) 26 | trained_epochs = len(info[0]["lc"]) 27 | 28 | if self.ss_type == "nasbench201": 29 | final_epoch = 200 30 | default_guess = 85.0 31 | N = 300 32 | elif self.ss_type == "darts": 33 | final_epoch = 98 34 | default_guess = 93.0 35 | N = 1000 36 | elif self.ss_type == "nlp": 37 | final_epoch = 50 38 | default_guess = 94.83 39 | N = 1000 40 | else: 41 | raise NotImplementedError() 42 | 43 | predictions = [] 44 | for i in range(len(xtest)): 45 | ensemble.mcmc(learning_curves[i, :], N=N) 46 | prediction = ensemble.mcmc_sample_predict([final_epoch]) 47 | prediction = np.squeeze(prediction) * 100 48 | 49 | if np.isnan(prediction) or not np.isfinite(prediction): 50 | print("nan or finite") 51 | prediction = default_guess + np.random.rand() 52 | predictions.append(prediction) 53 | 54 | predictions = np.squeeze(np.array(predictions)) 55 | 56 | return predictions 57 | 58 | def get_data_reqs(self): 59 | """ 60 | Returns a dictionary with info about whether the predictor needs 61 | extra info to train/query. 62 | """ 63 | reqs = { 64 | "requires_partial_lc": True, 65 | "metric": self.metric, 66 | "requires_hyperparameters": False, 67 | "hyperparams": None, 68 | "unlabeled": False, 69 | "unlabeled_factor": 0, 70 | } 71 | return reqs 72 | -------------------------------------------------------------------------------- /naslib/predictors/utils/pruners/weight_initializers.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Samsung Electronics Co., Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================= 15 | 16 | import torch.nn as nn 17 | 18 | 19 | def init_net(net, w_type, b_type): 20 | if w_type == "none": 21 | pass 22 | elif w_type == "xavier": 23 | net.apply(init_weights_vs) 24 | elif w_type == "kaiming": 25 | net.apply(init_weights_he) 26 | elif w_type == "zero": 27 | net.apply(init_weights_zero) 28 | else: 29 | raise NotImplementedError(f"init_type={w_type} is not supported.") 30 | 31 | if b_type == "none": 32 | pass 33 | elif b_type == "xavier": 34 | net.apply(init_bias_vs) 35 | elif b_type == "kaiming": 36 | net.apply(init_bias_he) 37 | elif b_type == "zero": 38 | net.apply(init_bias_zero) 39 | else: 40 | raise NotImplementedError(f"init_type={b_type} is not supported.") 41 | 42 | 43 | def init_weights_vs(m): 44 | if type(m) == nn.Linear or type(m) == nn.Conv2d: 45 | nn.init.xavier_normal_(m.weight) 46 | 47 | 48 | def init_bias_vs(m): 49 | if type(m) == nn.Linear or type(m) == nn.Conv2d: 50 | if m.bias is not None: 51 | nn.init.xavier_normal_(m.bias) 52 | 53 | 54 | def init_weights_he(m): 55 | if type(m) == nn.Linear or type(m) == nn.Conv2d: 56 | nn.init.kaiming_normal_(m.weight) 57 | 58 | 59 | def init_bias_he(m): 60 | if type(m) == nn.Linear or type(m) == nn.Conv2d: 61 | if m.bias is not None: 62 | nn.init.kaiming_normal_(m.bias) 63 | 64 | 65 | def init_weights_zero(m): 66 | if type(m) == nn.Linear or type(m) == nn.Conv2d: 67 | m.weight.data.fill_(0.0) 68 | 69 | 70 | def init_bias_zero(m): 71 | if type(m) == nn.Linear or type(m) == nn.Conv2d: 72 | if m.bias is not None: 73 | m.bias.data.fill_(0.0) 74 | -------------------------------------------------------------------------------- /scripts/statistics/run.sh: -------------------------------------------------------------------------------- 1 | search_spaces=(nasbench101 nasbench201 nasbench201 nasbench201 \ 2 | darts nlp transbench101 transbench101 transbench101 transbench101 \ 3 | transbench101 transbench101 transbench101) 4 | 5 | datasets=(cifar10 cifar10 cifar100 ImageNet16-120 \ 6 | cifar10 penntreebank class_scene class_object jigsaw room_layout \ 7 | segmentsemantic normal autoencoder) 8 | 9 | run_acc_stats=(1 1 1 1 \ 10 | 1 1 1 1 1 1 \ 11 | 1 1 1) 12 | 13 | run_nbhd_sizes=(1 1 1 1 \ 14 | 1 1 1 1 1 1 \ 15 | 1 1 1) 16 | 17 | run_autocorrs=(1 1 1 1 \ 18 | 1 1 1 1 1 1 \ 19 | 1 1 1) 20 | 21 | start_seed=$1 22 | if [ -z "$start_seed" ] 23 | then 24 | start_seed=0 25 | fi 26 | 27 | # folders: 28 | base_file=NASLib/naslib 29 | s3_folder=stats 30 | out_dir=$s3_folder\_$start_seed 31 | 32 | # other variables: 33 | trials=3 34 | end_seed=$(($start_seed + $trials - 1)) 35 | save_to_s3=true 36 | 37 | max_set_size=430000 38 | max_nbhd_trials=500 39 | max_autocorr_trials=20 40 | walks=5000 41 | 42 | # create config files 43 | for i in $(seq 0 $((${#search_spaces[@]}-1)) ) 44 | do 45 | search_space=${search_spaces[$i]} 46 | dataset=${datasets[$i]} 47 | run_acc_stats=${run_acc_stats[$i]} 48 | run_nbhd_size=${run_nbhd_sizes[$i]} 49 | run_autocorr=${run_autocorrs[$i]} 50 | 51 | python $base_file/benchmarks/create_configs.py --search_space $search_space --dataset=$dataset \ 52 | --run_acc_stats $run_acc_stats --run_nbhd_size $run_nbhd_size --run_autocorr $run_autocorr \ 53 | --start_seed $start_seed --trials $trials --out_dir $out_dir --max_set_size $max_set_size \ 54 | --max_nbhd_trials $max_nbhd_trials --max_autocorr_trials $max_autocorr_trials --walks $walks \ 55 | --config_type statistics 56 | done 57 | 58 | # run experiments 59 | for t in $(seq $start_seed $end_seed) 60 | do 61 | for i in $(seq 0 $((${#search_spaces[@]}-1)) ) 62 | do 63 | search_space=${search_spaces[$i]} 64 | dataset=${datasets[$i]} 65 | config_file=$out_dir/$search_space/$dataset/configs/statistics/config\_$t.yaml 66 | echo ================running $search_space $dataset trial: $t ===================== 67 | python $base_file/benchmarks/statistics/runner.py --config-file $config_file 68 | done 69 | if [ "$save_to_s3" ] 70 | then 71 | # zip and save to s3 72 | echo zipping and saving to s3 73 | zip -r $out_dir.zip $out_dir 74 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 75 | fi 76 | done 77 | -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/gsparsity/runner.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pyexpat import model 3 | import sys 4 | import naslib as nl 5 | 6 | 7 | from naslib.defaults.trainer import Trainer 8 | from naslib.optimizers import ( 9 | DARTSOptimizer, 10 | GDASOptimizer, 11 | OneShotNASOptimizer, 12 | RandomNASOptimizer, 13 | RandomSearch, 14 | RegularizedEvolution, 15 | LocalSearch, 16 | Bananas, 17 | BasePredictor, 18 | GSparseOptimizer 19 | ) 20 | 21 | from naslib.search_spaces import NasBench201SearchSpace, DartsSearchSpace 22 | from naslib.utils import utils, setup_logger, get_dataset_api 23 | from naslib.search_spaces.core.query_metrics import Metric 24 | 25 | config = utils.get_config_from_args() 26 | utils.set_seed(config.seed) 27 | 28 | logger = setup_logger(config.save + "/log.log") 29 | logger.setLevel(logging.INFO) # default DEBUG is too verbose 30 | 31 | utils.log_args(config) 32 | 33 | supported_optimizers = { 34 | "darts": DARTSOptimizer(config), 35 | "gdas": GDASOptimizer(config), 36 | "oneshot": OneShotNASOptimizer(config), 37 | "rsws": RandomNASOptimizer(config), 38 | "re": RegularizedEvolution(config), 39 | "rs": RandomSearch(config), 40 | "ls": RandomSearch(config), 41 | "bananas": Bananas(config), 42 | "bp": BasePredictor(config), 43 | "gsparsity": GSparseOptimizer(config) 44 | } 45 | 46 | supported_search_space ={ 47 | "nasbench201" : NasBench201SearchSpace(), 48 | "darts" : DartsSearchSpace() 49 | } 50 | 51 | #search_space = NasBench201SearchSpace() 52 | search_space = supported_search_space[config.search_space] 53 | #dataset_api = get_dataset_api("nasbench201", config.dataset) 54 | print(search_space) 55 | dataset_api = get_dataset_api(config.search_space, config.dataset) 56 | 57 | optimizer = supported_optimizers[config.optimizer] 58 | optimizer.adapt_search_space(search_space) 59 | 60 | trainer = Trainer(optimizer, config, lightweight_output=True) 61 | trainer.search() 62 | 63 | # if not config.eval_only: 64 | # checkpoint = utils.get_last_checkpoint(config) if config.resume else "" 65 | # trainer.search(resume_from=checkpoint) 66 | 67 | #checkpoint = utils.get_last_checkpoint(config, search_model=True) if config.resume else "" 68 | #trainer.evaluate(resume_from=checkpoint, dataset_api=dataset_api) 69 | #model="/work/dlclarge2/agnihotr-ml/NASLib/naslib/optimizers/oneshot/gsparsity/run/darts/cifar100/gsparsity/9/search/model_final.pth" 70 | trainer.evaluate(dataset_api=dataset_api, metric=Metric.VAL_ACCURACY, search_model=model) 71 | -------------------------------------------------------------------------------- /naslib/predictors/utils/pruners/measures/synflow.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Samsung Electronics Co., Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================= 15 | 16 | import torch 17 | 18 | from . import measure 19 | from ..p_utils import get_layer_metric_array 20 | 21 | 22 | @measure("synflow", bn=False, mode="param") 23 | @measure("synflow_bn", bn=True, mode="param") 24 | def compute_synflow_per_weight(net, inputs, targets, mode, split_data=1, loss_fn=None): 25 | 26 | device = inputs.device 27 | 28 | # convert params to their abs. Keep sign for converting it back. 29 | @torch.no_grad() 30 | def linearize(net): 31 | signs = {} 32 | for name, param in net.state_dict().items(): 33 | signs[name] = torch.sign(param) 34 | param.abs_() 35 | return signs 36 | 37 | # convert to orig values 38 | @torch.no_grad() 39 | def nonlinearize(net, signs): 40 | for name, param in net.state_dict().items(): 41 | if "weight_mask" not in name: 42 | param.mul_(signs[name]) 43 | 44 | # keep signs of all params 45 | signs = linearize(net.double()) 46 | 47 | # Compute gradients with input of 1s 48 | net.zero_grad() 49 | net.double() 50 | input_dim = list(inputs[0, :].shape) 51 | # inputs = torch.ones([1] + input_dim).to(device) 52 | inputs = torch.ones([1] + input_dim).double().to(device) 53 | output = net.forward(inputs) 54 | torch.sum(output).backward() 55 | 56 | # select the gradients that we want to use for search/prune 57 | def synflow(layer): 58 | if layer.weight.grad is not None: 59 | return torch.abs(layer.weight * layer.weight.grad) 60 | else: 61 | return torch.zeros_like(layer.weight) 62 | 63 | grads_abs = get_layer_metric_array(net, synflow, mode) 64 | 65 | # apply signs of all params 66 | nonlinearize(net, signs) 67 | 68 | return grads_abs 69 | -------------------------------------------------------------------------------- /naslib/predictors/trees/lgb.py: -------------------------------------------------------------------------------- 1 | # Author: Yang Liu @ Abacus.ai 2 | # This is an implementation of GBDT predictor for NAS from the paper: 3 | # Luo, Renqian, et al. "Neural architecture search with gbdt." arXiv preprint arXiv:2007.04785 (2020). 4 | 5 | import numpy as np 6 | import lightgbm as lgb 7 | from naslib.predictors.trees.ngb import loguniform 8 | 9 | from naslib.predictors.trees import BaseTree 10 | 11 | 12 | class LGBoost(BaseTree): 13 | @property 14 | def default_hyperparams(self, params=None): 15 | # default parameters used in Luo et al. 2020 16 | params = { 17 | "boosting_type": "gbdt", 18 | "objective": "regression", 19 | "min_data_in_leaf": 5, 20 | "num_leaves": 31, 21 | "learning_rate": 0.05, 22 | "feature_fraction": 0.9, 23 | "bagging_fraction": 0.8, 24 | "bagging_freq": 5, 25 | "verbose": -1, 26 | } 27 | return params 28 | 29 | def set_random_hyperparams(self): 30 | if self.hyperparams is None: 31 | # evaluate the default config first during HPO 32 | params = self.default_hyperparams.copy() 33 | else: 34 | params = { 35 | "boosting_type": "gbdt", 36 | "objective": "regression", 37 | "min_data_in_leaf": 5, 38 | "num_leaves": int(np.random.choice(90) + 10), 39 | "learning_rate": loguniform(0.001, 0.1), 40 | "feature_fraction": np.random.uniform(0.1, 1), 41 | "bagging_fraction": 0.8, 42 | "bagging_freq": 5, 43 | "verbose": -1, 44 | } 45 | self.hyperparams = params 46 | return params 47 | 48 | def get_dataset(self, encodings, labels=None): 49 | if labels is None: 50 | return encodings 51 | else: 52 | return lgb.Dataset(encodings, label=((labels - self.mean) / self.std)) 53 | 54 | def train(self, train_data): 55 | hparams = {**self.hyperparams, "metric": {"l2"}} 56 | return lgb.train(hparams, train_data, num_boost_round=500) 57 | 58 | def predict(self, data): 59 | return self.model.predict(data, num_iteration=self.model.best_iteration) 60 | 61 | def fit(self, xtrain, ytrain, train_info=None, params=None, **kwargs): 62 | if self.hyperparams is None: 63 | self.hyperparams = self.default_hyperparams.copy() 64 | return super(LGBoost, self).fit(xtrain, ytrain, train_info, params, **kwargs) 65 | --------------------------------------------------------------------------------