├── .codecov.yml ├── .coveragerc ├── .flake8 ├── .github ├── CODEOWNERS └── workflows │ ├── codecov.yml │ ├── codeql-analysis.yml │ ├── pre-commit.yml │ ├── scheduled-test.yml │ └── unit-tests.yml ├── .gitignore ├── .pre-commit-config.yaml ├── LICENSE ├── MANIFEST.in ├── README.md ├── docs ├── .buildinfo ├── .nojekyll ├── _sources │ ├── citing.rst.txt │ ├── contributing.rst.txt │ ├── example.rst.txt │ ├── index.rst.txt │ ├── license.rst.txt │ ├── manual.rst.txt │ └── search_spaces.ipynb.txt ├── _static │ ├── basic.css │ ├── css │ │ ├── badge_only.css │ │ ├── fonts │ │ │ ├── Roboto-Slab-Bold.woff │ │ │ ├── Roboto-Slab-Bold.woff2 │ │ │ ├── Roboto-Slab-Regular.woff │ │ │ ├── Roboto-Slab-Regular.woff2 │ │ │ ├── fontawesome-webfont.eot │ │ │ ├── fontawesome-webfont.svg │ │ │ ├── fontawesome-webfont.ttf │ │ │ ├── fontawesome-webfont.woff │ │ │ ├── fontawesome-webfont.woff2 │ │ │ ├── lato-bold-italic.woff │ │ │ ├── lato-bold-italic.woff2 │ │ │ ├── lato-bold.woff │ │ │ ├── lato-bold.woff2 │ │ │ ├── lato-normal-italic.woff │ │ │ ├── lato-normal-italic.woff2 │ │ │ ├── lato-normal.woff │ │ │ └── lato-normal.woff2 │ │ └── theme.css │ ├── doctools.js │ ├── documentation_options.js │ ├── fonts │ │ ├── Inconsolata-Bold.ttf │ │ ├── Inconsolata-Regular.ttf │ │ ├── Inconsolata.ttf │ │ ├── Lato-Bold.ttf │ │ ├── Lato-Regular.ttf │ │ ├── Lato │ │ │ ├── lato-bold.eot │ │ │ ├── lato-bold.ttf │ │ │ ├── lato-bold.woff │ │ │ ├── lato-bold.woff2 │ │ │ ├── lato-bolditalic.eot │ │ │ ├── lato-bolditalic.ttf │ │ │ ├── lato-bolditalic.woff │ │ │ ├── lato-bolditalic.woff2 │ │ │ ├── lato-italic.eot │ │ │ ├── lato-italic.ttf │ │ │ ├── lato-italic.woff │ │ │ ├── lato-italic.woff2 │ │ │ ├── lato-regular.eot │ │ │ ├── lato-regular.ttf │ │ │ ├── lato-regular.woff │ │ │ └── lato-regular.woff2 │ │ ├── RobotoSlab-Bold.ttf │ │ ├── RobotoSlab-Regular.ttf │ │ ├── RobotoSlab │ │ │ ├── roboto-slab-v7-bold.eot │ │ │ ├── roboto-slab-v7-bold.ttf │ │ │ ├── roboto-slab-v7-bold.woff │ │ │ ├── roboto-slab-v7-bold.woff2 │ │ │ ├── roboto-slab-v7-regular.eot │ │ │ ├── roboto-slab-v7-regular.ttf │ │ │ ├── roboto-slab-v7-regular.woff │ │ │ └── roboto-slab-v7-regular.woff2 │ │ ├── fontawesome-webfont.eot │ │ ├── fontawesome-webfont.svg │ │ ├── fontawesome-webfont.ttf │ │ ├── fontawesome-webfont.woff │ │ └── fontawesome-webfont.woff2 │ ├── jquery-3.5.1.js │ ├── jquery.js │ ├── js │ │ ├── badge_only.js │ │ ├── html5shiv-printshiv.min.js │ │ ├── html5shiv.min.js │ │ ├── modernizr.min.js │ │ └── theme.js │ ├── language_data.js │ ├── pygments.css │ ├── searchtools.js │ ├── underscore-1.13.1.js │ └── underscore.js ├── buildinfo ├── citing.html ├── contributing.html ├── example.html ├── genindex.html ├── index.html ├── license.html ├── manual.html ├── nojekyll ├── objects.inv ├── search.html ├── search_spaces.html ├── search_spaces.ipynb └── searchindex.js ├── examples ├── custom_data.py ├── demo.py ├── example_runner.ipynb ├── naslib_tutorial.ipynb ├── plot.ipynb ├── plot_bbo.ipynb ├── plot_predictors.ipynb ├── plot_save_arch_weights.py ├── predictors.md ├── run_darts.py └── search_spaces.ipynb ├── images ├── naslib-logo.png ├── naslib-overall.png └── predictors.png ├── naslib ├── __init__.py ├── __version__.py ├── configs │ └── example_bbo_config.yaml ├── data │ ├── class_object_selected.npy │ ├── class_scene_selected.npy │ ├── nb201_all.pickle │ ├── permutations_hamming_max_1000.npy │ └── taskonomydata_mini │ │ ├── download_tnb.sh │ │ └── generate_splits.py ├── defaults │ ├── __init__.py │ ├── additional_primitives.py │ ├── config_multi.yaml │ ├── darts_defaults.yaml │ ├── nb201_defaults.yaml │ ├── predictor_evaluator.py │ ├── statistics_evaluator.py │ ├── trainer.py │ └── trainer_multi.py ├── evaluators │ ├── __init__.py │ ├── full_evaluation.py │ ├── runner.py │ ├── zc_ensemble_evaluator.py │ └── zc_evaluator.py ├── optimizers │ ├── __init__.py │ ├── core │ │ ├── __init__.py │ │ └── metaclasses.py │ ├── discrete │ │ ├── __init__.py │ │ ├── bananas │ │ │ ├── __init__.py │ │ │ ├── acquisition_functions.py │ │ │ └── optimizer.py │ │ ├── bp │ │ │ ├── __init__.py │ │ │ └── optimizer.py │ │ ├── ls │ │ │ ├── __init__.py │ │ │ └── optimizer.py │ │ ├── npenas │ │ │ ├── __init__.py │ │ │ └── optimizer.py │ │ ├── re │ │ │ ├── __init__.py │ │ │ └── optimizer.py │ │ └── rs │ │ │ ├── __init__.py │ │ │ └── optimizer.py │ └── oneshot │ │ ├── __init__.py │ │ ├── configurable │ │ ├── components.py │ │ └── optimizer.py │ │ ├── darts │ │ ├── __init__.py │ │ └── optimizer.py │ │ ├── drnas │ │ ├── __init__.py │ │ ├── optimizer.py │ │ └── optimizer_progressive_growing.py │ │ ├── gdas │ │ ├── __init__.py │ │ └── optimizer.py │ │ ├── gsparsity │ │ ├── ProxSGD_for_groups.py │ │ ├── __init__.py │ │ ├── config.yaml │ │ ├── darts_config.yaml │ │ ├── darts_gsparse.sh │ │ ├── gsparse.sh │ │ ├── optimizer.py │ │ └── runner.py │ │ ├── oneshot_train │ │ ├── __init__.py │ │ └── optimizer.py │ │ └── rs_ws │ │ ├── __init__.py │ │ └── optimizer.py ├── predictors │ ├── __init__.py │ ├── bnn │ │ ├── __init__.py │ │ ├── bayesian_linear_reg.py │ │ ├── bnn_base.py │ │ ├── bohamiann.py │ │ └── dngo.py │ ├── bonas.py │ ├── early_stopping.py │ ├── ensemble.py │ ├── gcn.py │ ├── gp │ │ ├── __init__.py │ │ ├── deep_gp.py │ │ ├── gp.py │ │ ├── gp_base.py │ │ ├── gpwl.py │ │ ├── gpwl_utils │ │ │ ├── __init__.py │ │ │ ├── convert.py │ │ │ ├── vertex_histogram.py │ │ │ └── wl_kernel.py │ │ ├── sparse_gp.py │ │ └── var_sparse_gp.py │ ├── lce │ │ ├── __init__.py │ │ ├── lce.py │ │ ├── parametric_ensemble.py │ │ └── parametric_model.py │ ├── lce_m │ │ ├── __init__.py │ │ ├── curvefunctions.py │ │ ├── curvemodels.py │ │ ├── lce_m.py │ │ └── learning_curves.py │ ├── lcsvr.py │ ├── mlp.py │ ├── omni_ngb.py │ ├── omni_seminas.py │ ├── oneshot │ │ ├── __init__.py │ │ └── oneshot.py │ ├── predictor.py │ ├── seminas.py │ ├── soloss.py │ ├── trees │ │ ├── __init__.py │ │ ├── base_tree_class.py │ │ ├── lgb.py │ │ ├── ngb.py │ │ ├── random_forest.py │ │ └── xgb.py │ ├── utils │ │ ├── __init__.py │ │ ├── bin_thresholds.py │ │ ├── build_nets │ │ │ ├── CifarDenseNet.py │ │ │ ├── CifarResNet.py │ │ │ ├── CifarWideResNet.py │ │ │ ├── ImageNet_MobileNetV2.py │ │ │ ├── ImageNet_ResNet.py │ │ │ ├── SharedUtils.py │ │ │ ├── __init__.py │ │ │ ├── build_darts_net.py │ │ │ ├── cell_infers │ │ │ │ ├── __init__.py │ │ │ │ ├── cells.py │ │ │ │ ├── nasnet_cifar.py │ │ │ │ └── tiny_network.py │ │ │ ├── cell_operations.py │ │ │ ├── cell_searchs │ │ │ │ ├── __init__.py │ │ │ │ ├── _test_module.py │ │ │ │ ├── genotypes.py │ │ │ │ ├── search_cells.py │ │ │ │ ├── search_model_darts.py │ │ │ │ ├── search_model_darts_nasnet.py │ │ │ │ ├── search_model_enas.py │ │ │ │ ├── search_model_enas_utils.py │ │ │ │ ├── search_model_gdas.py │ │ │ │ ├── search_model_gdas_nasnet.py │ │ │ │ ├── search_model_random.py │ │ │ │ ├── search_model_setn.py │ │ │ │ └── search_model_setn_nasnet.py │ │ │ ├── clone_weights.py │ │ │ ├── configure_utils.py │ │ │ ├── get_dataset_with_transform.py │ │ │ ├── initialization.py │ │ │ ├── shape_infers │ │ │ │ ├── InferCifarResNet.py │ │ │ │ ├── InferCifarResNet_depth.py │ │ │ │ ├── InferCifarResNet_width.py │ │ │ │ ├── InferImagenetResNet.py │ │ │ │ ├── InferMobileNetV2.py │ │ │ │ ├── InferTinyCellNet.py │ │ │ │ ├── __init__.py │ │ │ │ └── shared_utils.py │ │ │ └── shape_searchs │ │ │ │ ├── SearchCifarResNet.py │ │ │ │ ├── SearchCifarResNet_depth.py │ │ │ │ ├── SearchCifarResNet_width.py │ │ │ │ ├── SearchImagenetResNet.py │ │ │ │ ├── SearchSimResNet_width.py │ │ │ │ ├── SoftSelect.py │ │ │ │ ├── __init__.py │ │ │ │ └── test.py │ │ ├── encodings.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── build_darts_net.py │ │ │ ├── nasbench1.py │ │ │ ├── nasbench1_ops.py │ │ │ ├── nasbench1_spec.py │ │ │ ├── nasbench2.py │ │ │ └── nasbench2_ops.py │ │ └── pruners │ │ │ ├── __init__.py │ │ │ ├── measures │ │ │ ├── __init__.py │ │ │ ├── epe_nas.py │ │ │ ├── fisher.py │ │ │ ├── grad_norm.py │ │ │ ├── grasp.py │ │ │ ├── jacov.py │ │ │ ├── l2_norm.py │ │ │ ├── model_stats.py │ │ │ ├── nwot.py │ │ │ ├── plain.py │ │ │ ├── snip.py │ │ │ ├── synflow.py │ │ │ └── zen.py │ │ │ ├── p_utils.py │ │ │ ├── predictive.py │ │ │ └── weight_initializers.py │ └── zerocost.py ├── runners │ ├── bbo │ │ ├── discrete_config.yaml │ │ └── runner.py │ ├── nas │ │ ├── discrete_config.yaml │ │ └── runner.py │ ├── nas_predictors │ │ ├── discrete_config.yaml │ │ ├── nas_predictor_config.yaml │ │ ├── oneshot_runner.py │ │ └── runner.py │ ├── predictors │ │ ├── predictor_config.yaml │ │ └── runner.py │ ├── statistics │ │ ├── runner.py │ │ └── statistics_config.yaml │ └── zc │ │ ├── bbo │ │ └── xgb_runner.py │ │ ├── benchmarks │ │ ├── runner.py │ │ └── sampler.py │ │ ├── runner.py │ │ └── zc_config.yaml ├── search_spaces │ ├── __init__.py │ ├── core │ │ ├── __init__.py │ │ ├── graph.py │ │ ├── primitives.py │ │ └── query_metrics.py │ ├── hierarchical │ │ ├── __init__.py │ │ ├── graph.py │ │ └── primitives.py │ ├── nasbench101 │ │ ├── __init__.py │ │ ├── conversions.py │ │ ├── encodings.py │ │ ├── graph.py │ │ └── primitives.py │ ├── nasbench201 │ │ ├── __init__.py │ │ ├── conversions.py │ │ ├── encodings.py │ │ ├── graph.py │ │ └── primitives.py │ ├── nasbench301 │ │ ├── __init__.py │ │ ├── conversions.py │ │ ├── encodings.py │ │ ├── graph.py │ │ └── primitives.py │ ├── nasbenchasr │ │ ├── __init__.py │ │ ├── conversions.py │ │ ├── encodings.py │ │ ├── graph.py │ │ └── primitives.py │ ├── nasbenchnlp │ │ ├── __init__.py │ │ ├── conversions.py │ │ ├── encodings.py │ │ └── graph.py │ ├── natsbenchsize │ │ ├── __init__.py │ │ └── graph.py │ ├── simple_cell │ │ ├── __init__.py │ │ └── graph.py │ └── transbench101 │ │ ├── __init__.py │ │ ├── api.py │ │ ├── conversions.py │ │ ├── encodings.py │ │ ├── graph.py │ │ ├── loss.py │ │ ├── primitives.py │ │ └── tnb101 │ │ ├── README.MD │ │ ├── __init__.py │ │ ├── model_builder.py │ │ └── models │ │ ├── __init__.py │ │ ├── decoder.py │ │ ├── discriminator.py │ │ ├── encoder.py │ │ ├── feedforward.py │ │ ├── gan.py │ │ ├── net_infer │ │ ├── __init__.py │ │ ├── cell_micro.py │ │ └── net_macro.py │ │ ├── net_ops │ │ ├── __init__.py │ │ ├── cell_ops.py │ │ └── norm.py │ │ ├── segmentation.py │ │ └── siamese.py └── utils │ ├── DownsampledImageNet.py │ ├── __init__.py │ ├── asr.py │ ├── custom_dataset.py │ ├── darcyflow_dataset.py │ ├── dataset.py │ ├── encodings.py │ ├── get_dataset_api.py │ ├── load_ops.py │ ├── log.py │ ├── nb101_api.py │ ├── ninapro_dataset.py │ ├── pytorch_helper.py │ ├── taskonomy_dataset.py │ ├── vis │ └── __init__.py │ └── zerocost.py ├── requirements.txt ├── scripts ├── bash_scripts │ └── download_benchmarks.sh ├── bbo │ ├── make_configs_asr.sh │ ├── make_configs_darts.sh │ ├── make_configs_mr.sh │ ├── make_configs_nb101.sh │ ├── make_configs_nb201.sh │ ├── make_configs_nlp.sh │ ├── make_configs_transnb101_macro.sh │ ├── make_configs_transnb101_micro.sh │ ├── scheduler.sh │ ├── scheduler_bosch.sh │ ├── submit_bosch_folder.sh │ ├── submit_boschgpu_folder.sh │ └── submit_folder.sh ├── create_configs.py ├── darts │ └── gsparse.sh ├── nas │ ├── run_nb111.sh │ ├── run_nb201.sh │ ├── run_nb201_cifar100.sh │ ├── run_nb201_imagenet16-200.sh │ ├── run_nb211.sh │ ├── run_nb311.sh │ └── run_nbnlp.sh ├── nas_predictors │ ├── oneshot_eval.sh │ ├── run_darts_bo.sh │ ├── run_darts_npenas.sh │ ├── run_im_bo_arber.sh │ ├── run_nb101_bo.sh │ ├── run_nb101_npenas.sh │ ├── run_nb201_bo.sh │ ├── run_nb201_bo_2.sh │ ├── run_nb201_npenas.sh │ ├── run_nb201_npenas_2.sh │ ├── slurm_job-imgnet.sh │ ├── slurm_job-nb101.sh │ ├── slurm_job-nb201-c10.sh │ ├── slurm_job-nb201-c100.sh │ ├── slurm_job-nb201-imagenet.sh │ ├── slurm_job-nb301.sh │ ├── submit-all.sh │ ├── submit-oneshot.sh │ └── submit.sh ├── nasbench201 │ └── gsparse.sh ├── predictors │ ├── run_darts.sh │ ├── run_hpo.sh │ ├── run_hpo_test.sh │ ├── run_nb101.sh │ ├── run_nb201.sh │ ├── run_nb201_2.sh │ ├── run_nlp.sh │ ├── run_tnb_predictors.sh │ └── test.sh ├── prestore_nasbench360.py ├── statistics │ └── run.sh └── zc │ ├── bash_scripts │ ├── benchmarks │ │ ├── create_configs.sh │ │ ├── create_configs_all.sh │ │ ├── create_configs_nb101.sh │ │ ├── create_configs_nb201.sh │ │ ├── create_configs_nb301.sh │ │ └── create_configs_tnb101.sh │ ├── correlation │ │ ├── create_configs.sh │ │ ├── create_configs_all.sh │ │ ├── create_configs_nb101.sh │ │ ├── create_configs_nb201.sh │ │ ├── create_configs_nb301.sh │ │ └── create_configs_tnb101.sh │ ├── download_data.sh │ ├── download_nbs_zero.sh │ ├── run_nb201.sh │ ├── run_nb301.sh │ ├── run_tnb101.sh │ ├── xgb_correlation │ │ ├── create_configs.sh │ │ ├── create_configs_all.sh │ │ ├── create_configs_nb101.sh │ │ ├── create_configs_nb201.sh │ │ ├── create_configs_nb301.sh │ │ └── create_configs_tnb101.sh │ └── zc_ensemble │ │ ├── create_configs.sh │ │ ├── create_configs_all.sh │ │ ├── create_configs_nb101.sh │ │ ├── create_configs_nb201.sh │ │ ├── create_configs_nb301.sh │ │ └── create_configs_tnb101.sh │ ├── benchmarks │ ├── run.sh │ ├── run_all.sh │ ├── run_nb101.sh │ ├── run_nb201.sh │ ├── run_nb301.sh │ └── run_tnb101.sh │ ├── correlation │ ├── run.sh │ ├── run_all.sh │ ├── run_nb101.sh │ ├── run_nb201.sh │ ├── run_nb301.sh │ ├── run_one.sh │ └── run_tnb101.sh │ ├── create_benchmarks.py │ ├── create_configs_benchmarks.py │ ├── create_configs_correlation.py │ ├── create_configs_xgb_correlation.py │ ├── create_configs_zc_ensembles.py │ ├── xgb_correlation │ ├── run.sh │ ├── run_all.sh │ ├── run_nb101.sh │ ├── run_nb201.sh │ ├── run_nb301.sh │ └── run_tnb101.sh │ └── zc_ensembles │ ├── run.sh │ ├── run_all.sh │ ├── run_nb101.sh │ ├── run_nb201.sh │ ├── run_nb301.sh │ └── run_tnb101.sh ├── setup.cfg ├── setup.py ├── test_benchmark_apis.py ├── tests ├── __init__.py ├── assets │ ├── config.yaml │ ├── nb101_dummy.pkl │ ├── nb201_test_set_info.npy │ ├── nb201_test_set_times.npy │ ├── nb201_test_set_x.npy │ ├── nb201_test_set_y.npy │ └── test_predictor.yaml ├── test_darts_search_space.py ├── test_hierarchical_search_space.py ├── test_nb101_search_space.py ├── test_nb201_search_space.py ├── test_nb301_search_space.py ├── test_optimizer_factory.py ├── test_predictors.py ├── test_simple_cell_search_space.py └── test_utils.py └── tutorial ├── NASLib Tutorial.ipynb ├── NASLib_tutorial_merged.ipynb ├── colab_test.ipynb ├── optimizer.py └── zc_intro.py /.codecov.yml: -------------------------------------------------------------------------------- 1 | # Allow coverage to decrease by 0.05%. 2 | coverage: 3 | range: 10..95 4 | round: nearest 5 | precision: 2 6 | status: 7 | project: 8 | default: 9 | threshold: 0.05% 10 | 11 | # Don't post a comment on pull requests. 12 | comment: false 13 | 14 | codecov: 15 | branch: Develop 16 | require_ci_to_pass: false 17 | 18 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | source = naslib 3 | omit = 4 | naslib/analysis/* 5 | ../venvs/* 6 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = E203, E266, E501, W503, F403, F401 3 | max-line-length = 79 4 | max-complexity = 18 5 | select = B,C,E,F,W,T4,B9 6 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | 2 | # These owners will be the default owners for everything in 3 | # the repo. Unless a later match takes precedence, they will be requested for 4 | # review when someone opens a pull request. 5 | 6 | * @arberzela @yashsmehta @crwhite14 7 | 8 | # You can have owners for particular type of files (e.g. .py) or for specific subdirectories or 9 | # for specific branches. -------------------------------------------------------------------------------- /.github/workflows/codecov.yml: -------------------------------------------------------------------------------- 1 | name: CodeCov 2 | 3 | on: 4 | push: 5 | branches: [Develop, master] 6 | pull_request: 7 | branches: [Develop, master] 8 | 9 | jobs: 10 | run: 11 | runs-on: ubuntu-latest 12 | env: 13 | OS: ubuntu-latest 14 | PYTHON: '3.8' 15 | steps: 16 | - uses: actions/checkout@v2 17 | 18 | - name: Setup Python 3.8 19 | uses: actions/setup-python@master 20 | with: 21 | python-version: 3.8 22 | 23 | - name: 'generate report' 24 | run: | 25 | python -m pip install --upgrade pip setuptools wheel 26 | pip install -e .[test] 27 | pip install coverage 28 | cd tests/ 29 | coverage run -m unittest 30 | 31 | - name: Upload coverage to Codecov 32 | uses: codecov/codecov-action@v1 33 | with: 34 | flags: unittests 35 | fail_ci_if_error: true 36 | -------------------------------------------------------------------------------- /.github/workflows/pre-commit.yml: -------------------------------------------------------------------------------- 1 | name: pre-commit 2 | 3 | on: 4 | push: 5 | branches: [Develop, master] 6 | pull_request: 7 | branches: [Develop, master] 8 | 9 | jobs: 10 | run-all-files: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v2 14 | - name: Setup Python 3.8 15 | uses: actions/setup-python@v2 16 | with: 17 | python-version: 3.8 18 | 19 | - name: Install pre-commit 20 | run: | 21 | pip install pre-commit 22 | pre-commit install 23 | - name: Run pre-commit 24 | run: | 25 | pre-commit run --all-files 26 | -------------------------------------------------------------------------------- /.github/workflows/scheduled-test.yml: -------------------------------------------------------------------------------- 1 | name: Scheduled Tests 2 | 3 | on: 4 | schedule: 5 | # Every Monday at 7AM UTC 6 | - cron: '0 07 * * 1' 7 | 8 | 9 | jobs: 10 | ubuntu: 11 | 12 | runs-on: ubuntu-latest 13 | strategy: 14 | matrix: 15 | python-version: [3.8] 16 | fail-fast: false 17 | max-parallel: 2 18 | 19 | steps: 20 | - uses: actions/checkout@v2 21 | with: 22 | ref: Develop 23 | - name: Setup Python ${{ matrix.python-version }} 24 | uses: actions/setup-python@v2 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | - name: Install naslib 28 | run: | 29 | python -m pip install --upgrade pip setuptools wheel 30 | pip install -e .[test] 31 | - name: Run tests 32 | run: | 33 | cd tests/ 34 | python -m unittest discover -v 35 | -------------------------------------------------------------------------------- /.github/workflows/unit-tests.yml: -------------------------------------------------------------------------------- 1 | name: Unit Tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | - Develop 8 | 9 | 10 | jobs: 11 | run-all-files: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v2 15 | - name: Setup Python 3.8 16 | uses: actions/setup-python@v2 17 | with: 18 | python-version: 3.8 19 | 20 | - name: Install naslib 21 | run: | 22 | python -m pip install --upgrade pip setuptools wheel 23 | pip install -e .[test] 24 | 25 | - name: Run tests 26 | run: | 27 | cd tests/ 28 | python -m unittest discover -v 29 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/mirrors-mypy 3 | rev: v0.761 4 | hooks: 5 | - id: mypy 6 | args: [--show-error-codes, 7 | --warn-redundant-casts, 8 | --warn-return-any, 9 | --warn-unreachable, 10 | ] 11 | files: naslib/.* 12 | exclude: 13 | - naslib/examples/.* 14 | - naslib/docs/.* 15 | 16 | - repo: https://gitlab.com/pycqa/flake8 17 | rev: 3.8.3 18 | hooks: 19 | - id: flake8 20 | additional_dependencies: 21 | - flake8-print==3.1.4 22 | - flake8-import-order 23 | name: flake8 naslib 24 | files: naslib/.* 25 | exclude: 26 | - naslib/examples/.* 27 | - naslib/docs/.* 28 | - naslib/predictors/.* 29 | - id: flake8 30 | additional_dependencies: 31 | - flake8-print==3.1.4 32 | - flake8-import-order 33 | name: flake8 test 34 | files: tests/.* -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include naslib/defaults/*.yaml -------------------------------------------------------------------------------- /docs/.buildinfo: -------------------------------------------------------------------------------- 1 | # Sphinx build info version 1 2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. 3 | config: 5d25f23202db3f00d71b7eeba9ec715c 4 | tags: 645f666f9bcd5a90fca523b33c5a78b7 5 | -------------------------------------------------------------------------------- /docs/.nojekyll: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/_sources/citing.rst.txt: -------------------------------------------------------------------------------- 1 | Citing NASLib 2 | ============= 3 | If you use this code in your own work, please :footcite:t:`naslib-2020` and :footcite:t:`white2021powerful`. 4 | 5 | .. footbibliography:: 6 | -------------------------------------------------------------------------------- /docs/_sources/contributing.rst.txt: -------------------------------------------------------------------------------- 1 | Contributions 2 | ============= 3 | 4 | We appreciate all contribution to Auto-PyTorch, from bug reports and documentation to new features. If you want to contribute to the code, you can pick an issue from the issue tracker which is marked with Needs contributer. 5 | 6 | .. note:: 7 | To avoid spending time on duplicate work or features that are unlikely to get merged, it is highly advised that you contact the developers by opening a github issue before starting to work. 8 | 9 | 10 | When developing new features, please create a new branch from the refactor_development branch. When to submitting a pull request, make sure that all tests are still passing. -------------------------------------------------------------------------------- /docs/_sources/example.rst.txt: -------------------------------------------------------------------------------- 1 | Example 2 | ======= 3 | 4 | 5 | -------------------------------------------------------------------------------- /docs/_sources/index.rst.txt: -------------------------------------------------------------------------------- 1 | .. Sphinx documentation NASLib documentation master file, created by 2 | sphinx-quickstart on Mon Jul 5 20:39:23 2021. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Documentation NASLib! 7 | ======================================================= 8 | NASLib is a Neural Architecture Search (NAS) library. Its purpose is to facilitate NAS research for the community by providing interfaces to several state-of-the-art NAS search spaces. 9 | 10 | .. warning:: 11 | This library is under construction and there is no official release yet. Feel free to play around and have a look but be aware that the APIs will be changed until we have a first release. 12 | 13 | NASLib has been used to run an extensive comparison of 31 performance predictors. The results were published in the paper How Powerful are Performance Predictors in Neural Architecture Search? 14 | For more details take a look at its seperate README. 15 | 16 | .. image:: naslib-overview.png 17 | .. 18 | :width: 200px 19 | :height: 100px 20 | :scale: 50 % 21 | :alt: alternate text 22 | :align: right 23 | 24 | Usage 25 | ----- 26 | 27 | :: 28 | 29 | search_space = SimpleCellSearchSpace() 30 | 31 | optimizer = DARTSOptimizer(config) 32 | optimizer.adapt_search_space(search_space) 33 | 34 | trainer = Trainer(optimizer, config) 35 | trainer.search() # Search for an architecture 36 | trainer.evaluate() # Evaluate the best architecture 37 | 38 | .. toctree:: 39 | :maxdepth: 2 40 | :caption: Online Documentation Contents: 41 | 42 | manual 43 | example 44 | license 45 | citing 46 | contributing 47 | 48 | .. 49 | Indices and tables 50 | ================== 51 | 52 | * :ref:`genindex` 53 | * :ref:`modindex` 54 | * :ref:`search` 55 | 56 | -------------------------------------------------------------------------------- /docs/_sources/license.rst.txt: -------------------------------------------------------------------------------- 1 | License 2 | ======= 3 | NASLib is licensed the same way as scikit-learn, namely the 3-clause BSD license. -------------------------------------------------------------------------------- /docs/_sources/manual.rst.txt: -------------------------------------------------------------------------------- 1 | Manual 2 | ====== 3 | 4 | Requirements 5 | ------------ 6 | NASLib has the following requirments: 7 | 8 | * Linux operating system (for example Ubuntu, Mac OS X). 9 | * Python (>=3.7). 10 | * Pytorch. 11 | * This is a bulleted list. 12 | 13 | Setting up a virtual environment 14 | -------------------------------- 15 | We recommend to set up a virtual environment 16 | 17 | .. code-block:: console 18 | 19 | python3 -m venv naslib 20 | source naslib/bin/activate 21 | 22 | .. note:: 23 | Make sure you use the latest version of pip 24 | 25 | .. code-block:: console 26 | 27 | pip install --upgrade pip setuptools wheel 28 | pip install cython 29 | 30 | Setting up NASLib 31 | ----------------- 32 | Clone and install. 33 | If you plan to modify naslib consider adding the -e option for pip install 34 | 35 | .. code-block:: console 36 | 37 | git clone ... 38 | cd naslib 39 | pip install . 40 | 41 | To validate the installation, you can run tests 42 | 43 | .. code-block:: console 44 | 45 | cd tests 46 | coverage run -m unittest discover 47 | 48 | The test coverage can be seen with coverage report. 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/css/fonts/Roboto-Slab-Bold.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/css/fonts/Roboto-Slab-Bold.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/css/fonts/Roboto-Slab-Regular.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/css/fonts/Roboto-Slab-Regular.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/css/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/css/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/css/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/css/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/css/fonts/lato-bold-italic.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/css/fonts/lato-bold-italic.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/css/fonts/lato-bold.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/css/fonts/lato-bold.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/css/fonts/lato-normal-italic.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/css/fonts/lato-normal-italic.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/css/fonts/lato-normal.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/css/fonts/lato-normal.woff2 -------------------------------------------------------------------------------- /docs/_static/documentation_options.js: -------------------------------------------------------------------------------- 1 | var DOCUMENTATION_OPTIONS = { 2 | URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), 3 | VERSION: '0.0.1', 4 | LANGUAGE: 'None', 5 | COLLAPSE_INDEX: false, 6 | BUILDER: 'html', 7 | FILE_SUFFIX: '.html', 8 | LINK_SUFFIX: '.html', 9 | HAS_SOURCE: true, 10 | SOURCELINK_SUFFIX: '.txt', 11 | NAVIGATION_WITH_KEYS: false 12 | }; -------------------------------------------------------------------------------- /docs/_static/fonts/Inconsolata-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Inconsolata-Bold.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/Inconsolata-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Inconsolata-Regular.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/Inconsolata.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Inconsolata.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/Lato-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Lato-Bold.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/Lato-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Lato-Regular.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-bold.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Lato/lato-bold.eot -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Lato/lato-bold.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Lato/lato-bold.woff -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Lato/lato-bold.woff2 -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-bolditalic.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Lato/lato-bolditalic.eot -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-bolditalic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Lato/lato-bolditalic.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-bolditalic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Lato/lato-bolditalic.woff -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-bolditalic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Lato/lato-bolditalic.woff2 -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-italic.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Lato/lato-italic.eot -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-italic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Lato/lato-italic.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Lato/lato-italic.woff -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Lato/lato-italic.woff2 -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Lato/lato-regular.eot -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Lato/lato-regular.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Lato/lato-regular.woff -------------------------------------------------------------------------------- /docs/_static/fonts/Lato/lato-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/Lato/lato-regular.woff2 -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/RobotoSlab-Bold.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/RobotoSlab-Regular.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff -------------------------------------------------------------------------------- /docs/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 -------------------------------------------------------------------------------- /docs/_static/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /docs/_static/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /docs/_static/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /docs/_static/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/_static/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /docs/_static/js/badge_only.js: -------------------------------------------------------------------------------- 1 | !function(e){var t={};function r(n){if(t[n])return t[n].exports;var o=t[n]={i:n,l:!1,exports:{}};return e[n].call(o.exports,o,o.exports,r),o.l=!0,o.exports}r.m=e,r.c=t,r.d=function(e,t,n){r.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},r.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.t=function(e,t){if(1&t&&(e=r(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(r.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)r.d(n,o,function(t){return e[t]}.bind(null,o));return n},r.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return r.d(t,"a",t),t},r.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},r.p="",r(r.s=4)}({4:function(e,t,r){}}); -------------------------------------------------------------------------------- /docs/buildinfo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/buildinfo -------------------------------------------------------------------------------- /docs/nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/nojekyll -------------------------------------------------------------------------------- /docs/objects.inv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/docs/objects.inv -------------------------------------------------------------------------------- /examples/custom_data.py: -------------------------------------------------------------------------------- 1 | import torchvision.datasets as dset 2 | 3 | from naslib.utils.custom_dataset import CustomDataset 4 | from naslib.utils.dataset import _data_transforms_cifar10 5 | from naslib.utils import get_config_from_args 6 | 7 | 8 | if __name__ == '__main__': 9 | class MyCustomDataset(CustomDataset): 10 | def __init__(self, config, mode="train"): 11 | super().__init__(config, mode) 12 | 13 | 14 | def get_transforms(self, config): 15 | train_transform, valid_transform = _data_transforms_cifar10(config) 16 | return train_transform, valid_transform 17 | 18 | 19 | def get_data(self, data, train_transform, valid_transform): 20 | train_data = dset.CIFAR10( 21 | root=data, train=True, download=True, transform=train_transform 22 | ) 23 | test_data = dset.CIFAR10( 24 | root=data, train=False, download=True, transform=valid_transform 25 | ) 26 | 27 | return train_data, test_data 28 | 29 | config = get_config_from_args() 30 | dataset = MyCustomDataset(config) 31 | train_queue, valid_queue, test_queue, train_transform, valid_transform = dataset.get_loaders() 32 | -------------------------------------------------------------------------------- /examples/plot_save_arch_weights.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | from naslib.defaults.trainer import Trainer 4 | from naslib.optimizers import DARTSOptimizer, GDASOptimizer, DrNASOptimizer 5 | from naslib.search_spaces import NasBench101SearchSpace, NasBench201SearchSpace, NasBench301SearchSpace 6 | 7 | from naslib.utils import set_seed, setup_logger, get_config_from_args, create_exp_dir 8 | from naslib.utils.vis import plot_architectural_weights 9 | 10 | config = get_config_from_args() # use --help so see the options 11 | config.search.epochs = 50 12 | config.save_arch_weights = True 13 | config.plot_arch_weights = True 14 | config.optimizer = 'gdas' 15 | config.search_space = 'nasbench301' 16 | config.save = "{}/{}/{}/{}/{}".format( 17 | config.out_dir, config.search_space, config.dataset, config.optimizer, config.seed 18 | ) 19 | create_exp_dir(config.save) 20 | create_exp_dir(config.save + "/search") # required for the checkpoints 21 | create_exp_dir(config.save + "/eval") 22 | 23 | optimizers = { 24 | 'gdas': GDASOptimizer(config), 25 | 'darts': DARTSOptimizer(config), 26 | 'drnas': DrNASOptimizer(config), 27 | } 28 | 29 | search_spaces = { 30 | 'nasbench101': NasBench101SearchSpace(), 31 | 'nasbench201': NasBench201SearchSpace(), 32 | 'nasbench301': NasBench301SearchSpace(), 33 | } 34 | 35 | set_seed(config.seed) 36 | 37 | logger = setup_logger(config.save + "/log.log") 38 | logger.setLevel(logging.INFO) # default DEBUG is very verbose 39 | 40 | search_space = search_spaces[config.search_space] 41 | 42 | optimizer = optimizers[config.optimizer] 43 | optimizer.adapt_search_space(search_space) 44 | 45 | trainer = Trainer(optimizer, config) 46 | trainer.search() 47 | 48 | plot_architectural_weights(config, optimizer) 49 | -------------------------------------------------------------------------------- /examples/run_darts.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from naslib.defaults.trainer import Trainer 3 | from naslib.optimizers import DARTSOptimizer, GDASOptimizer, RandomSearch 4 | from naslib.search_spaces import NasBench301SearchSpace, SimpleCellSearchSpace 5 | 6 | from naslib.utils import set_seed, setup_logger, get_config_from_args 7 | 8 | config = get_config_from_args() # use --help so see the options 9 | set_seed(config.seed) 10 | 11 | logger = setup_logger(config.save + "/log.log") 12 | logger.setLevel(logging.INFO) # default DEBUG is very verbose 13 | 14 | search_space = NasBench301SearchSpace() # use SimpleCellSearchSpace() for less heavy search 15 | 16 | optimizer = DARTSOptimizer(**config.search) 17 | optimizer.adapt_search_space(search_space, config.dataset) 18 | 19 | trainer = Trainer(optimizer, config) 20 | trainer.search() # Search for an architecture 21 | trainer.evaluate() # Evaluate the best architecture 22 | -------------------------------------------------------------------------------- /images/naslib-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/images/naslib-logo.png -------------------------------------------------------------------------------- /images/naslib-overall.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/images/naslib-overall.png -------------------------------------------------------------------------------- /images/predictors.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/images/predictors.png -------------------------------------------------------------------------------- /naslib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/__init__.py -------------------------------------------------------------------------------- /naslib/__version__.py: -------------------------------------------------------------------------------- 1 | """Version information.""" 2 | 3 | # The following line *must* be the last in the module, exactly as formatted: 4 | __version__ = "0.1.0" 5 | -------------------------------------------------------------------------------- /naslib/configs/example_bbo_config.yaml: -------------------------------------------------------------------------------- 1 | config_id: 0 2 | dataset: TIMIT 3 | optimizer: rs 4 | out_dir: run_cpu 5 | search: 6 | acq_fn_optimization: random_sampling 7 | acq_fn_type: its 8 | checkpoint_freq: 5000 9 | debug_predictor: false 10 | encoding_type: path 11 | epochs: 200 12 | fidelity: -1 13 | k: 10 14 | max_mutations: 1 15 | num_arches_to_mutate: 5 16 | num_candidates: 200 17 | num_ensemble: 3 18 | num_init: 10 19 | population_size: 50 20 | predictor: var_sparse_gp 21 | predictor_type: bananas 22 | sample_size: 10 23 | search_space: asr 24 | seed: 0 25 | -------------------------------------------------------------------------------- /naslib/data/class_object_selected.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/data/class_object_selected.npy -------------------------------------------------------------------------------- /naslib/data/class_scene_selected.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/data/class_scene_selected.npy -------------------------------------------------------------------------------- /naslib/data/nb201_all.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/data/nb201_all.pickle -------------------------------------------------------------------------------- /naslib/data/permutations_hamming_max_1000.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/data/permutations_hamming_max_1000.npy -------------------------------------------------------------------------------- /naslib/data/taskonomydata_mini/generate_splits.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | 5 | 6 | def main(vals_buildings, test_buildings): 7 | all_tasks = [] 8 | dirs = [ f.path for f in os.scandir(os.path.dirname(os.path.abspath(__file__))) if f.is_dir() ] 9 | for d in dirs: 10 | taskname = os.path.basename(d) 11 | templates = [ f"{taskname}/{{domain}}/"+os.path.basename(f.path).replace("_rgb.","_{domain}.") for f in os.scandir(os.path.join(d,"rgb")) if f.is_file() ] 12 | templates = sorted(templates) 13 | with open(d+".json", "w") as f: 14 | json.dump(templates, f) 15 | 16 | all_tasks.append(taskname) 17 | 18 | train_tasks = [] 19 | val_tasks = [] 20 | test_tasks = [] 21 | for task in all_tasks: 22 | if task in test_buildings: 23 | test_tasks.append(task) 24 | elif task in vals_buildings: 25 | val_tasks.append(task) 26 | else: 27 | train_tasks.append(task) 28 | 29 | foldername = os.path.dirname(d) 30 | for s,f in zip([train_tasks, val_tasks, test_tasks], ["train_split.json", "val_split.json", "test_split.json"]): 31 | with open(os.path.join(foldername, f), "w") as file: 32 | json.dump(s, file) 33 | 34 | 35 | if __name__ == '__main__': 36 | parser = argparse.ArgumentParser("Taskonomy splits generator") 37 | parser.add_argument("--val", nargs="*", type=str, default=[]) 38 | parser.add_argument("--test", nargs="+", type=str, default=["uvalda", "merom", "stockman"]) 39 | args = parser.parse_args() 40 | 41 | main(args.val, args.test) 42 | -------------------------------------------------------------------------------- /naslib/defaults/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/defaults/__init__.py -------------------------------------------------------------------------------- /naslib/defaults/additional_primitives.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from naslib.search_spaces.core.primitives import AbstractPrimitive, Identity 4 | 5 | 6 | class DropPathWrapper(AbstractPrimitive): 7 | """ 8 | A wrapper for the drop path training regularization. 9 | """ 10 | 11 | def __init__(self, op): 12 | super().__init__(locals()) 13 | self.op = op 14 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 15 | 16 | def forward(self, x, edge_data): 17 | x = self.op(x, edge_data) 18 | if ( 19 | edge_data.drop_path_prob > 0.0 20 | and not isinstance(self.op, Identity) 21 | and self.training 22 | ): 23 | keep_prob = 1.0 - edge_data.drop_path_prob 24 | mask = torch.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob) 25 | mask = mask.to(self.device) 26 | x.div_(keep_prob) 27 | x.mul_(mask) 28 | return x 29 | 30 | def get_embedded_ops(self): 31 | return self.op 32 | -------------------------------------------------------------------------------- /naslib/defaults/config_multi.yaml: -------------------------------------------------------------------------------- 1 | dataset: cifar10 2 | seed: 10 3 | num_classes: 10 4 | 5 | search: 6 | checkpoint_freq: 10 7 | batch_size: 256 8 | learning_rate: 0.025 9 | learning_rate_min: 0.001 10 | momentum: 0.9 11 | weight_decay: 0.0003 12 | epochs: 1 13 | warm_start_epochs: 0 14 | grad_clip: 5 15 | train_portion: 0.5 16 | data_size: 25000 17 | 18 | cutout: False 19 | cutout_length: 16 20 | cutout_prob: 1.0 21 | drop_path_prob: 0.0 22 | 23 | unrolled: False 24 | arch_learning_rate: 0.0003 25 | arch_weight_decay: 0.001 26 | output_weights: True 27 | 28 | # GDAS 29 | tau_max: 10 30 | tau_min: 0.1 31 | 32 | # RE 33 | sample_size: 10 34 | population_size: 100 35 | 36 | evaluation: 37 | checkpoint_freq: 1 38 | batch_size: 96 39 | learning_rate: 0.025 40 | learning_rate_min: 0.00 41 | momentum: 0.9 42 | weight_decay: 0.0003 43 | epochs: 5 44 | warm_start_epochs: 0 45 | grad_clip: 5 46 | train_portion: 1 47 | data_size: 50000 48 | 49 | cutout: True 50 | cutout_length: 16 51 | cutout_prob: 1.0 52 | drop_path_prob: 0.2 53 | auxiliary_weight: 0.4 54 | -------------------------------------------------------------------------------- /naslib/defaults/darts_defaults.yaml: -------------------------------------------------------------------------------- 1 | dataset: cifar10 2 | seed: 99 3 | search_space: nasbench201 4 | out_dir: run 5 | optimizer: darts 6 | 7 | search: 8 | checkpoint_freq: 5 9 | batch_size: 64 10 | learning_rate: 0.025 11 | learning_rate_min: 0.001 12 | momentum: 0.9 13 | weight_decay: 0.0003 14 | epochs: 50 15 | warm_start_epochs: 0 16 | grad_clip: 5 17 | train_portion: 0.5 18 | data_size: 25000 19 | 20 | cutout: False 21 | cutout_length: 16 22 | cutout_prob: 1.0 23 | drop_path_prob: 0.0 24 | 25 | unrolled: False 26 | arch_learning_rate: 0.0003 27 | arch_weight_decay: 0.001 28 | output_weights: True 29 | 30 | fidelity: 200 31 | 32 | # GDAS 33 | tau_max: 10 34 | tau_min: 0.1 35 | 36 | # RE 37 | sample_size: 10 38 | population_size: 100 39 | 40 | #LS 41 | num_init: 10 42 | 43 | #GSparsity-> Uncomment the lines below for GSparsity 44 | #seed: 50 45 | #grad_clip: 0 46 | #threshold: 0.000001 47 | #weight_decay: 120 48 | #learning_rate: 0.01 49 | #momentum: 0.8 50 | #normalization: div 51 | #normalization_exponent: 0.5 52 | #batch_size: 256 53 | #learning_rate_min: 0.0001 54 | #epochs: 100 55 | #warm_start_epochs: 0 56 | #train_portion: 0.9 57 | #data_size: 25000 58 | 59 | 60 | # BANANAS 61 | k: 10 62 | num_ensemble: 3 63 | acq_fn_type: its 64 | acq_fn_optimization: mutation 65 | encoding_type: path 66 | num_arches_to_mutate: 2 67 | max_mutations: 1 68 | num_candidates: 100 69 | 70 | # BasePredictor 71 | predictor_type: var_sparse_gp 72 | debug_predictor: False 73 | 74 | evaluation: 75 | checkpoint_freq: 30 76 | batch_size: 96 77 | learning_rate: 0.025 78 | learning_rate_min: 0.00 79 | momentum: 0.9 80 | weight_decay: 0.0003 81 | epochs: 600 82 | warm_start_epochs: 0 83 | grad_clip: 5 84 | train_portion: 1. 85 | data_size: 50000 86 | 87 | cutout: True 88 | cutout_length: 16 89 | cutout_prob: 1.0 90 | drop_path_prob: 0.2 91 | auxiliary_weight: 0.4 92 | -------------------------------------------------------------------------------- /naslib/defaults/nb201_defaults.yaml: -------------------------------------------------------------------------------- 1 | seed: 99 2 | optimizer: re 3 | dataset: cifar10 4 | out_dir: run 5 | 6 | search: 7 | checkpoint_freq: 5 8 | epochs: 150 9 | 10 | # GDAS 11 | tau_max: 10 12 | tau_min: 0.1 13 | 14 | # RE 15 | sample_size: 10 16 | population_size: 30 17 | 18 | # LS 19 | num_init: 10 20 | 21 | # BANANAS 22 | k: 10 23 | num_ensemble: 3 24 | acq_fn_type: its 25 | acq_fn_optimization: mutation 26 | encoding_type: path 27 | num_arches_to_mutate: 2 28 | max_mutations: 1 29 | num_candidates: 100 30 | 31 | # GSparsity 32 | seed: 50 33 | grad_clip: 0 34 | threshold: 0.000001 35 | weight_decay: 60 36 | learning_rate: 0.001 37 | momentum: 0.8 38 | normalization: div 39 | normalization_exponent: 0.5 40 | batch_size: 64 41 | learning_rate_min: 0.0001 42 | epochs: 100 43 | warm_start_epochs: 0 44 | train_portion: 1.0 45 | data_size: 25000 46 | 47 | -------------------------------------------------------------------------------- /naslib/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | from .zc_evaluator import ZeroCostPredictorEvaluator 2 | from .full_evaluation import full_evaluate_predictor -------------------------------------------------------------------------------- /naslib/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | from .oneshot.darts.optimizer import DARTSOptimizer 2 | from .oneshot.gsparsity.optimizer import GSparseOptimizer 3 | from .oneshot.oneshot_train.optimizer import OneShotNASOptimizer 4 | from .oneshot.rs_ws.optimizer import RandomNASOptimizer 5 | from .oneshot.gdas.optimizer import GDASOptimizer 6 | from .oneshot.drnas.optimizer import DrNASOptimizer 7 | from .discrete.rs.optimizer import RandomSearch 8 | from .discrete.re.optimizer import RegularizedEvolution 9 | from .discrete.ls.optimizer import LocalSearch 10 | from .discrete.bananas.optimizer import Bananas 11 | from .discrete.bp.optimizer import BasePredictor 12 | from .discrete.npenas.optimizer import Npenas 13 | -------------------------------------------------------------------------------- /naslib/optimizers/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/optimizers/core/__init__.py -------------------------------------------------------------------------------- /naslib/optimizers/discrete/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/optimizers/discrete/__init__.py -------------------------------------------------------------------------------- /naslib/optimizers/discrete/bananas/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/optimizers/discrete/bananas/__init__.py -------------------------------------------------------------------------------- /naslib/optimizers/discrete/bp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/optimizers/discrete/bp/__init__.py -------------------------------------------------------------------------------- /naslib/optimizers/discrete/ls/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/optimizers/discrete/ls/__init__.py -------------------------------------------------------------------------------- /naslib/optimizers/discrete/npenas/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/optimizers/discrete/npenas/__init__.py -------------------------------------------------------------------------------- /naslib/optimizers/discrete/re/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/optimizers/discrete/re/__init__.py -------------------------------------------------------------------------------- /naslib/optimizers/discrete/rs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/optimizers/discrete/rs/__init__.py -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/optimizers/oneshot/__init__.py -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/darts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/optimizers/oneshot/darts/__init__.py -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/drnas/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/optimizers/oneshot/drnas/__init__.py -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/gdas/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/optimizers/oneshot/gdas/__init__.py -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/gsparsity/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/optimizers/oneshot/gsparsity/__init__.py -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/gsparsity/config.yaml: -------------------------------------------------------------------------------- 1 | dataset: cifar100 2 | seed: 1 3 | search_space: nasbench201 4 | out_dir: /work/dlclarge2/agnihotr-ml/NASLib/naslib/optimizers/oneshot/gsparsity/run 5 | optimizer: gsparsity 6 | 7 | search: 8 | 9 | 10 | #GSparsity 11 | grad_clip: 0 12 | threshold: 0.000001 13 | weight_decay: 120 14 | learning_rate: 0.01 15 | momentum: 0.8 16 | normalization: div 17 | normalization_exponent: 0.5 18 | batch_size: 128 19 | learning_rate_min: 0.0001 20 | epochs: 100 21 | warm_start_epochs: 0 22 | train_portion: 0.95 23 | data_size: 50000 24 | 25 | evaluation: 26 | checkpoint_freq: 30 27 | batch_size: 96 28 | learning_rate: 0.025 29 | learning_rate_min: 0.00 30 | momentum: 0.9 31 | weight_decay: 0.0003 32 | epochs: 600 33 | warm_start_epochs: 0 34 | grad_clip: 5 35 | train_portion: 1. 36 | data_size: 50000 37 | 38 | cutout: True 39 | cutout_length: 16 40 | cutout_prob: 1.0 41 | drop_path_prob: 0.2 42 | auxiliary_weight: 0.4 43 | -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/gsparsity/darts_config.yaml: -------------------------------------------------------------------------------- 1 | dataset: cifar100 2 | seed: 20 3 | search_space: darts 4 | out_dir: /work/dlclarge2/agnihotr-ml/NASLib/naslib/optimizers/oneshot/gsparsity/run 5 | optimizer: gsparsity 6 | 7 | search: 8 | 9 | 10 | #GSparsity 11 | grad_clip: 0 12 | threshold: 0.000001 13 | weight_decay: 60 14 | learning_rate: 0.01 15 | momentum: 0.8 16 | normalization: div 17 | normalization_exponent: 0.5 18 | batch_size: 32 19 | learning_rate_min: 0.0001 20 | epochs: 50 21 | warm_start_epochs: 0 22 | train_portion: 0.9 23 | data_size: 45000 24 | 25 | evaluation: 26 | checkpoint_freq: 30 27 | batch_size: 96 28 | learning_rate: 0.025 29 | learning_rate_min: 0.00 30 | momentum: 0.9 31 | weight_decay: 0.0001 32 | epochs: 600 33 | warm_start_epochs: 0 34 | grad_clip: 5 35 | train_portion: 1. 36 | data_size: 50000 37 | 38 | cutout: True 39 | cutout_length: 16 40 | cutout_prob: 1.0 41 | drop_path_prob: 0.2 42 | auxiliary_weight: 0.4 43 | -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/gsparsity/darts_gsparse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Started at $(date)"; 4 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 5 | 6 | start=`date +%s` 7 | 8 | python runner.py --config-file darts_config.yaml 9 | 10 | end=`date +%s` 11 | runtime=$((end-start)) 12 | 13 | echo Runtime: $runtime 14 | -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/gsparsity/gsparse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Started at $(date)"; 4 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 5 | 6 | start=`date +%s` 7 | 8 | python runner.py --config-file config.yaml 9 | 10 | end=`date +%s` 11 | runtime=$((end-start)) 12 | 13 | echo Runtime: $runtime 14 | -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/oneshot_train/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/optimizers/oneshot/oneshot_train/__init__.py -------------------------------------------------------------------------------- /naslib/optimizers/oneshot/rs_ws/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/optimizers/oneshot/rs_ws/__init__.py -------------------------------------------------------------------------------- /naslib/predictors/__init__.py: -------------------------------------------------------------------------------- 1 | from .predictor import Predictor 2 | from .bonas import BonasPredictor 3 | from .bnn import BayesianLinearRegression, BOHAMIANN, DNGOPredictor 4 | from .early_stopping import EarlyStopping 5 | from .ensemble import Ensemble 6 | from .gcn import GCNPredictor 7 | from .gp import GPPredictor, SparseGPPredictor, VarSparseGPPredictor, GPWLPredictor 8 | from .lce import LCEPredictor 9 | from .lce_m import LCEMPredictor 10 | from .lcsvr import SVR_Estimator 11 | from .mlp import MLPPredictor 12 | from .oneshot import OneShotPredictor 13 | from .seminas import SemiNASPredictor 14 | from .soloss import SoLosspredictor 15 | from .trees import LGBoost, NGBoost, RandomForestPredictor, XGBoost 16 | from .zerocost import ZeroCost 17 | from .omni_ngb import OmniNGBPredictor 18 | from .omni_seminas import OmniSemiNASPredictor 19 | -------------------------------------------------------------------------------- /naslib/predictors/bnn/__init__.py: -------------------------------------------------------------------------------- 1 | from .dngo import DNGOPredictor 2 | from .bohamiann import BOHAMIANN 3 | from .bayesian_linear_reg import BayesianLinearRegression 4 | -------------------------------------------------------------------------------- /naslib/predictors/bnn/bayesian_linear_reg.py: -------------------------------------------------------------------------------- 1 | # This is an implementation of Bayesian Linear Regression 2 | 3 | from pybnn.bayesian_linear_regression import BayesianLinearRegression as BLR 4 | from pybnn.bayesian_linear_regression import linear_basis_func, quadratic_basis_func 5 | 6 | from naslib.predictors.bnn.bnn_base import BNN 7 | 8 | 9 | class BayesianLinearRegression(BNN): 10 | def get_model(self, **kwargs): 11 | predictor = BLR( 12 | alpha=1.0, 13 | beta=100, 14 | basis_func=linear_basis_func, 15 | prior=None, 16 | do_mcmc=False, # turn this off for better sample efficiency 17 | n_hypers=20, 18 | chain_length=100, 19 | burnin_steps=100, 20 | ) 21 | return predictor 22 | 23 | def train_model(self, xtrain, ytrain): 24 | self.model.train(xtrain, ytrain, do_optimize=True) 25 | -------------------------------------------------------------------------------- /naslib/predictors/bnn/bohamiann.py: -------------------------------------------------------------------------------- 1 | # This is an implementation of the BOHAMIANN predictor from the paper: 2 | # Springenberg et al., 2016. Bayesian Optimization with Robust Bayesian Neural 3 | # Networks 4 | 5 | import torch.nn as nn 6 | from pybnn.bohamiann import Bohamiann, nll, get_default_network 7 | 8 | from naslib.predictors.bnn.bnn_base import BNN 9 | 10 | 11 | class BOHAMIANN(BNN): 12 | def get_model(self, **kwargs): 13 | predictor = Bohamiann( 14 | get_network=get_default_network, 15 | sampling_method="adaptive_sghmc", 16 | use_double_precision=True, 17 | metrics=(nn.MSELoss,), 18 | likelihood_function=nll, 19 | print_every_n_steps=10, 20 | normalize_input=False, 21 | normalize_output=True, 22 | ) 23 | return predictor 24 | 25 | def train_model(self, xtrain, ytrain): 26 | self.model.train( 27 | xtrain, 28 | ytrain, 29 | num_steps=self.num_steps, 30 | num_burn_in_steps=10, 31 | keep_every=5, 32 | lr=1e-2, 33 | verbose=True, 34 | ) 35 | -------------------------------------------------------------------------------- /naslib/predictors/bnn/dngo.py: -------------------------------------------------------------------------------- 1 | # This is an implementation of the DNGO predictor from the paper: 2 | # Snoek et al., 2015. Scalable Bayesian Optimization using DNNs 3 | 4 | from pybnn.dngo import DNGO 5 | 6 | from naslib.predictors.bnn.bnn_base import BNN 7 | 8 | 9 | class DNGOPredictor(BNN): 10 | def get_model(self, **kwargs): 11 | predictor = DNGO( 12 | batch_size=10, 13 | num_epochs=500, 14 | learning_rate=0.01, 15 | adapt_epoch=5000, 16 | n_units_1=50, 17 | n_units_2=50, 18 | n_units_3=50, 19 | alpha=1.0, 20 | beta=1000, 21 | prior=None, 22 | do_mcmc=True, # turn this off for better sample efficiency 23 | n_hypers=20, 24 | chain_length=2000, 25 | burnin_steps=2000, 26 | normalize_input=False, 27 | normalize_output=True, 28 | ) 29 | return predictor 30 | 31 | def train_model(self, xtrain, ytrain): 32 | try: 33 | self.model.train(xtrain, ytrain, do_optimize=True) 34 | except ValueError: 35 | self.model.train(xtrain, ytrain, do_optimize=False) 36 | -------------------------------------------------------------------------------- /naslib/predictors/early_stopping.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from naslib.predictors.predictor import Predictor 4 | from naslib.search_spaces.core.query_metrics import Metric 5 | 6 | 7 | class EarlyStopping(Predictor): 8 | def __init__(self, metric): 9 | 10 | self.metric = metric 11 | 12 | def query(self, xtest, info): 13 | """ 14 | info: a list of dictionaries which include the learning curve of the 15 | corresponding architecture. 16 | Return the final value on the learning curve 17 | """ 18 | if self.metric in [Metric.VAL_LOSS, Metric.TRAIN_LOSS]: 19 | # invert to get accurate rank correlation 20 | return np.array([-inf["lc"][-1] for inf in info]) 21 | else: 22 | return np.array([inf["lc"][-1] for inf in info]) 23 | 24 | def get_metric(self): 25 | return self.metric 26 | 27 | def get_data_reqs(self): 28 | """ 29 | Returns a dictionary with info about whether the predictor needs 30 | extra info to train/query. 31 | """ 32 | reqs = { 33 | "requires_partial_lc": True, 34 | "metric": self.metric, 35 | "requires_hyperparameters": False, 36 | "hyperparams": None, 37 | "unlabeled": False, 38 | "unlabeled_factor": 0, 39 | } 40 | return reqs 41 | -------------------------------------------------------------------------------- /naslib/predictors/gp/__init__.py: -------------------------------------------------------------------------------- 1 | from .gp_base import BaseGPModel 2 | from .gp import GPPredictor 3 | from .sparse_gp import SparseGPPredictor 4 | from .var_sparse_gp import VarSparseGPPredictor 5 | from .gpwl import GPWLPredictor 6 | -------------------------------------------------------------------------------- /naslib/predictors/gp/gpwl_utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/predictors/gp/gpwl_utils/__init__.py -------------------------------------------------------------------------------- /naslib/predictors/gp/sparse_gp.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import pyro 3 | import pyro.contrib.gp as gp 4 | import pyro.distributions as dist 5 | import numpy as np 6 | 7 | from naslib.predictors.gp import GPPredictor 8 | 9 | 10 | class SparseGPPredictor(GPPredictor): 11 | def get_model(self, train_data, **kwargs): 12 | X_train, y_train = train_data 13 | # initialize the kernel and model 14 | pyro.clear_param_store() 15 | kernel = self.kernel(input_dim=X_train.shape[1]) 16 | Xu = torch.arange(10.0) / 2.0 17 | Xu.unsqueeze_(-1) 18 | Xu = Xu.expand(10, X_train.shape[1]).double() 19 | gpr = gp.models.SparseGPRegression( 20 | X_train, y_train, kernel, Xu=Xu, jitter=1.0e-5 21 | ) 22 | return gpr 23 | -------------------------------------------------------------------------------- /naslib/predictors/gp/var_sparse_gp.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import pyro 3 | import pyro.contrib.gp as gp 4 | 5 | from naslib.predictors.gp import GPPredictor 6 | 7 | 8 | class VarSparseGPPredictor(GPPredictor): 9 | def get_model(self, train_data, **kwargs): 10 | X_train, y_train = train_data 11 | # initialize the kernel and model 12 | pyro.clear_param_store() 13 | kernel = self.kernel(input_dim=X_train.shape[1]) 14 | Xu = torch.arange(10.0) / 2.0 15 | Xu.unsqueeze_(-1) 16 | Xu = Xu.expand(10, X_train.shape[1]).double() 17 | likelihood = gp.likelihoods.Gaussian() 18 | gpr = gp.models.VariationalSparseGP( 19 | X_train, y_train, kernel, Xu=Xu, likelihood=likelihood, whiten=True 20 | ) 21 | return gpr 22 | -------------------------------------------------------------------------------- /naslib/predictors/lce/__init__.py: -------------------------------------------------------------------------------- 1 | from .lce import LCEPredictor 2 | -------------------------------------------------------------------------------- /naslib/predictors/lce_m/__init__.py: -------------------------------------------------------------------------------- 1 | from .lce_m import LCEMPredictor 2 | -------------------------------------------------------------------------------- /naslib/predictors/oneshot/__init__.py: -------------------------------------------------------------------------------- 1 | from .oneshot import OneShotPredictor 2 | -------------------------------------------------------------------------------- /naslib/predictors/trees/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_tree_class import BaseTree 2 | from .lgb import LGBoost 3 | from .ngb import NGBoost 4 | from .xgb import XGBoost 5 | from .random_forest import RandomForestPredictor 6 | -------------------------------------------------------------------------------- /naslib/predictors/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/predictors/utils/__init__.py -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/SharedUtils.py: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # 3 | ##################################################### 4 | import torch 5 | import torch.nn as nn 6 | 7 | 8 | def additive_func(A, B): 9 | assert A.dim() == B.dim() and A.size(0) == B.size(0), "{:} vs {:}".format( 10 | A.size(), B.size() 11 | ) 12 | C = min(A.size(1), B.size(1)) 13 | if A.size(1) == B.size(1): 14 | return A + B 15 | elif A.size(1) < B.size(1): 16 | out = B.clone() 17 | out[:, :C] += A 18 | return out 19 | else: 20 | out = A.clone() 21 | out[:, :C] += B 22 | return out 23 | 24 | 25 | def change_key(key, value): 26 | def func(m): 27 | if hasattr(m, key): 28 | setattr(m, key, value) 29 | 30 | return func 31 | 32 | 33 | def parse_channel_info(xstring): 34 | blocks = xstring.split(" ") 35 | blocks = [x.split("-") for x in blocks] 36 | blocks = [[int(_) for _ in x] for x in blocks] 37 | return blocks 38 | -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/cell_infers/__init__.py: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # 3 | ##################################################### 4 | from .tiny_network import TinyNetwork 5 | from .nasnet_cifar import NASNetonCIFAR 6 | -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/cell_searchs/__init__.py: -------------------------------------------------------------------------------- 1 | ################################################## 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # 3 | ################################################## 4 | # The macro structure is defined in NAS-Bench-201 5 | from .search_model_darts import TinyNetworkDarts 6 | from .search_model_gdas import TinyNetworkGDAS 7 | from .search_model_setn import TinyNetworkSETN 8 | from .search_model_enas import TinyNetworkENAS 9 | from .search_model_random import TinyNetworkRANDOM 10 | from .genotypes import Structure as CellStructure, architectures as CellArchitectures 11 | 12 | # NASNet-based macro structure 13 | from .search_model_gdas_nasnet import NASNetworkGDAS 14 | from .search_model_darts_nasnet import NASNetworkDARTS 15 | 16 | 17 | nas201_super_nets = { 18 | "DARTS-V1": TinyNetworkDarts, 19 | "DARTS-V2": TinyNetworkDarts, 20 | "GDAS": TinyNetworkGDAS, 21 | "SETN": TinyNetworkSETN, 22 | "ENAS": TinyNetworkENAS, 23 | "RANDOM": TinyNetworkRANDOM, 24 | } 25 | 26 | nasnet_super_nets = {"GDAS": NASNetworkGDAS, "DARTS": NASNetworkDARTS} 27 | -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/cell_searchs/_test_module.py: -------------------------------------------------------------------------------- 1 | ################################################## 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # 3 | ################################################## 4 | import torch 5 | from search_model_enas_utils import Controller 6 | 7 | 8 | def main(): 9 | controller = Controller(6, 4) 10 | predictions = controller() 11 | 12 | 13 | if __name__ == "__main__": 14 | main() 15 | -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/initialization.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | def initialize_resnet(m): 6 | if isinstance(m, nn.Conv2d): 7 | nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") 8 | if m.bias is not None: 9 | nn.init.constant_(m.bias, 0) 10 | elif isinstance(m, nn.BatchNorm2d): 11 | nn.init.constant_(m.weight, 1) 12 | if m.bias is not None: 13 | nn.init.constant_(m.bias, 0) 14 | elif isinstance(m, nn.Linear): 15 | nn.init.normal_(m.weight, 0, 0.01) 16 | nn.init.constant_(m.bias, 0) 17 | -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/shape_infers/__init__.py: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # 3 | ##################################################### 4 | from .InferCifarResNet_width import InferWidthCifarResNet 5 | from .InferImagenetResNet import InferImagenetResNet 6 | from .InferCifarResNet_depth import InferDepthCifarResNet 7 | from .InferCifarResNet import InferCifarResNet 8 | from .InferMobileNetV2 import InferMobileNetV2 9 | from .InferTinyCellNet import DynamicShapeTinyNet 10 | -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/shape_infers/shared_utils.py: -------------------------------------------------------------------------------- 1 | def parse_channel_info(xstring): 2 | blocks = xstring.split(" ") 3 | blocks = [x.split("-") for x in blocks] 4 | blocks = [[int(_) for _ in x] for x in blocks] 5 | return blocks 6 | -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/shape_searchs/__init__.py: -------------------------------------------------------------------------------- 1 | ################################################## 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # 3 | ################################################## 4 | from .SearchCifarResNet_width import SearchWidthCifarResNet 5 | from .SearchCifarResNet_depth import SearchDepthCifarResNet 6 | from .SearchCifarResNet import SearchShapeCifarResNet 7 | from .SearchSimResNet_width import SearchWidthSimResNet 8 | from .SearchImagenetResNet import SearchShapeImagenetResNet 9 | -------------------------------------------------------------------------------- /naslib/predictors/utils/build_nets/shape_searchs/test.py: -------------------------------------------------------------------------------- 1 | ################################################## 2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # 3 | ################################################## 4 | import torch 5 | import torch.nn as nn 6 | from SoftSelect import ChannelWiseInter 7 | 8 | 9 | if __name__ == "__main__": 10 | 11 | tensors = torch.rand((16, 128, 7, 7)) 12 | 13 | for oc in range(200, 210): 14 | out_v1 = ChannelWiseInter(tensors, oc, "v1") 15 | out_v2 = ChannelWiseInter(tensors, oc, "v2") 16 | assert (out_v1 == out_v2).any().item() == 1 17 | for oc in range(48, 160): 18 | out_v1 = ChannelWiseInter(tensors, oc, "v1") 19 | out_v2 = ChannelWiseInter(tensors, oc, "v2") 20 | assert (out_v1 == out_v2).any().item() == 1 21 | -------------------------------------------------------------------------------- /naslib/predictors/utils/encodings.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from naslib.utils.encodings import EncodingType 4 | from naslib.search_spaces.nasbench101.encodings import encode_101_spec 5 | from naslib.search_spaces.nasbench201.encodings import encode_adjacency_one_hot_op_indices 6 | from naslib.search_spaces.nasbench301.encodings import encode_darts_compact 7 | from naslib.search_spaces.transbench101.encodings import encode_adjacency_one_hot_transbench_micro_op_indices, \ 8 | encode_adjacency_one_hot_transbench_macro_op_indices 9 | from naslib.search_spaces.nasbench101.conversions import convert_tuple_to_spec 10 | 11 | """ 12 | Currently we need search space specific methods. 13 | The plan is to unify encodings across all search spaces. 14 | nasbench201 and darts are implemented so far. 15 | TODO: clean up this file. 16 | """ 17 | 18 | logger = logging.getLogger(__name__) 19 | 20 | 21 | def encode_spec(spec, encoding_type=EncodingType.ADJACENCY_ONE_HOT, ss_type=None): 22 | if ss_type == 'nasbench101': 23 | if isinstance(spec, tuple): 24 | spec = convert_tuple_to_spec(spec) 25 | return encode_101_spec(spec, encoding_type=encoding_type) 26 | elif ss_type == 'nasbench201' and encoding_type == EncodingType.ADJACENCY_ONE_HOT: 27 | return encode_adjacency_one_hot_op_indices(spec) 28 | elif ss_type == 'nasbench301': 29 | return encode_darts_compact(spec, encoding_type=encoding_type) 30 | elif ss_type == 'transbench101_micro' and encoding_type == EncodingType.ADJACENCY_ONE_HOT: 31 | return encode_adjacency_one_hot_transbench_micro_op_indices(spec) 32 | elif ss_type == 'transbench101_macro' and encoding_type == EncodingType.ADJACENCY_ONE_HOT: 33 | return encode_adjacency_one_hot_transbench_macro_op_indices(spec) 34 | else: 35 | raise NotImplementedError(f'No implementation found for encoding search space {ss_type} with {encoding_type}') 36 | -------------------------------------------------------------------------------- /naslib/predictors/utils/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Samsung Electronics Co., Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================= 15 | 16 | from os.path import dirname, basename, isfile, join 17 | import glob 18 | 19 | modules = glob.glob(join(dirname(__file__), "*.py")) 20 | __all__ = [ 21 | basename(f)[:-3] for f in modules if isfile(f) and not f.endswith("__init__.py") 22 | ] 23 | -------------------------------------------------------------------------------- /naslib/predictors/utils/pruners/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Samsung Electronics Co., Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================= 15 | 16 | from os.path import dirname, basename, isfile, join 17 | import glob 18 | 19 | modules = glob.glob(join(dirname(__file__), "*.py")) 20 | __all__ = [ 21 | basename(f)[:-3] for f in modules if isfile(f) and not f.endswith("__init__.py") 22 | ] 23 | -------------------------------------------------------------------------------- /naslib/predictors/utils/pruners/measures/grad_norm.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Samsung Electronics Co., Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================= 15 | 16 | import torch 17 | import torch.nn.functional as F 18 | 19 | import copy 20 | 21 | from . import measure 22 | from ..p_utils import get_layer_metric_array 23 | 24 | 25 | @measure("grad_norm", bn=True) 26 | def get_grad_norm_arr(net, inputs, targets, loss_fn, split_data=1, skip_grad=False): 27 | net.zero_grad() 28 | N = inputs.shape[0] 29 | for sp in range(split_data): 30 | st = sp * N // split_data 31 | en = (sp + 1) * N // split_data 32 | 33 | outputs = net.forward(inputs[st:en]) 34 | loss = loss_fn(outputs, targets[st:en]) 35 | loss.backward() 36 | 37 | grad_norm_arr = get_layer_metric_array( 38 | net, 39 | lambda l: l.weight.grad.norm() 40 | if l.weight.grad is not None 41 | else torch.zeros_like(l.weight), 42 | mode="param", 43 | ) 44 | 45 | return grad_norm_arr 46 | -------------------------------------------------------------------------------- /naslib/predictors/utils/pruners/measures/l2_norm.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Samsung Electronics Co., Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================= 15 | 16 | from . import measure 17 | from ..p_utils import get_layer_metric_array 18 | 19 | 20 | @measure("l2_norm", copy_net=False, mode="param") 21 | def get_l2_norm_array(net, inputs, targets, mode, split_data=1, **kwargs): 22 | return get_layer_metric_array(net, lambda l: l.weight.norm(), mode=mode) 23 | -------------------------------------------------------------------------------- /naslib/predictors/utils/pruners/measures/model_stats.py: -------------------------------------------------------------------------------- 1 | import tensorwatch as tw 2 | 3 | 4 | def get_model_stats(model, 5 | input_tensor_shape, clone_model=True)->tw.ModelStats: 6 | # model stats is doing some hooks so do it last 7 | model_stats = tw.ModelStats(model, input_tensor_shape, 8 | clone_model=clone_model) 9 | return model_stats -------------------------------------------------------------------------------- /naslib/predictors/utils/pruners/measures/plain.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Samsung Electronics Co., Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================= 15 | 16 | import torch 17 | import torch.nn.functional as F 18 | 19 | from . import measure 20 | from ..p_utils import get_layer_metric_array 21 | 22 | 23 | @measure("plain", bn=True, mode="param") 24 | def compute_plain_per_weight(net, inputs, targets, mode, loss_fn, split_data=1): 25 | 26 | net.zero_grad() 27 | N = inputs.shape[0] 28 | for sp in range(split_data): 29 | st = sp * N // split_data 30 | en = (sp + 1) * N // split_data 31 | 32 | outputs = net.forward(inputs[st:en]) 33 | loss = loss_fn(outputs, targets[st:en]) 34 | loss.backward() 35 | 36 | # select the gradients that we want to use for search/prune 37 | def plain(layer): 38 | if layer.weight.grad is not None: 39 | return layer.weight.grad * layer.weight 40 | else: 41 | return torch.zeros_like(layer.weight) 42 | 43 | grads_abs = get_layer_metric_array(net, plain, mode) 44 | return grads_abs 45 | -------------------------------------------------------------------------------- /naslib/runners/bbo/discrete_config.yaml: -------------------------------------------------------------------------------- 1 | # random seed 2 | seed: 0 3 | 4 | # re, bananas, npenas, ls, rs 5 | optimizer: re 6 | 7 | # nasbench101, nasbench201, darts, nlp, transbench101, asr 8 | search_space: nasbench201 9 | 10 | # cifar10, cifar100, or ImageNet16-120 (only important for nasbench201) 11 | dataset: cifar10 12 | 13 | # output results to this directory 14 | out_dir: run_boschcpu 15 | 16 | # config id for the experiment 17 | config_id: 0 18 | 19 | # parameters for the optimizers 20 | search: 21 | # for bohb 22 | budgets: 50000000 23 | checkpoint_freq: 1000 24 | fidelity: 108 25 | 26 | # for all optimizers 27 | epochs: 10 28 | 29 | # for bananas and npenas, choose one predictor 30 | # out of the 16 model-based predictors 31 | predictor_type: var_sparse_gp 32 | 33 | # number of initial architectures 34 | num_init: 10 35 | 36 | # BANANAS 37 | k: 10 38 | num_ensemble: 3 39 | acq_fn_type: its 40 | acq_fn_optimization: mutation 41 | encoding_type: adjacency_one_hot 42 | num_arches_to_mutate: 1 43 | max_mutations: 1 44 | num_candidates: 50 45 | 46 | # jacov data loader 47 | batch_size: 256 48 | data_size: 25000 49 | cutout: False 50 | cutout_length: 16 51 | cutout_prob: 1.0 52 | train_portion: 0.7 53 | 54 | # other params 55 | debug_predictor: False 56 | sample_size: 10 57 | population_size: 30 58 | 59 | # copied directly from darts_defaults 60 | evaluation: 61 | checkpoint_freq: 30 62 | batch_size: 96 63 | learning_rate: 0.025 64 | learning_rate_min: 0.00 65 | momentum: 0.9 66 | weight_decay: 0.0003 67 | epochs: 600 68 | warm_start_epochs: 0 69 | grad_clip: 5 70 | train_portion: 1. 71 | data_size: 50000 72 | 73 | cutout: True 74 | cutout_length: 16 75 | cutout_prob: 1.0 76 | drop_path_prob: 0.2 77 | auxiliary_weight: 0.4 78 | -------------------------------------------------------------------------------- /naslib/runners/nas/discrete_config.yaml: -------------------------------------------------------------------------------- 1 | # random seed 2 | seed: 0 3 | 4 | # re, bananas, npenas, ls, rs 5 | optimizer: re 6 | 7 | # nasbench101, nasbench201, darts, nlp, 8 | # transbench101_micro, transbench101_macro, asr 9 | search_space: nasbench201 10 | 11 | # nasbench201 datasets: cifar10, cifar100, ImageNet16-120 12 | # transbench101 datasets: class_scene, class_object, 13 | # jigsaw, room_layout, segmentsemantic, normal, autoencoder 14 | dataset: cifar10 15 | 16 | # output results to this directory 17 | out_dir: run 18 | 19 | # parameters for the optimizers 20 | search: 21 | # for bohb 22 | budgets: 50000000 23 | checkpoint_freq: 1000 24 | fidelity: 108 25 | 26 | # for all optimizers 27 | epochs: 100 28 | 29 | # for bananas and npenas, choose one predictor 30 | # out of the 16 model-based predictors 31 | predictor_type: var_sparse_gp 32 | 33 | # number of initial architectures 34 | num_init: 10 35 | 36 | # BANANAS 37 | k: 10 38 | num_ensemble: 3 39 | acq_fn_type: its 40 | acq_fn_optimization: mutation 41 | encoding_type: adjacency_one_hot 42 | num_arches_to_mutate: 5 43 | max_mutations: 1 44 | num_candidates: 200 45 | 46 | # jacov data loader 47 | batch_size: 256 48 | data_size: 25000 49 | cutout: False 50 | cutout_length: 16 51 | cutout_prob: 1.0 52 | train_portion: 0.7 53 | 54 | # other params 55 | debug_predictor: False 56 | sample_size: 10 57 | population_size: 30 58 | -------------------------------------------------------------------------------- /naslib/runners/nas_predictors/discrete_config.yaml: -------------------------------------------------------------------------------- 1 | seed: 0 2 | optimizer: bananas 3 | search_space: nasbench201 4 | dataset: cifar10 5 | out_dir: run 6 | 7 | search: 8 | checkpoint_freq: 1000 9 | epochs: 100 10 | fidelity: -1 11 | 12 | predictor_type: var_sparse_gp 13 | num_init: 10 14 | k: 10 15 | 16 | # BANANAS 17 | num_ensemble: 3 18 | acq_fn_type: its 19 | acq_fn_optimization: random_sampling 20 | encoding_type: adjacency_one_hot 21 | num_arches_to_mutate: 2 22 | max_mutations: 1 23 | num_candidates: 20 24 | 25 | # jacov data loader 26 | batch_size: 256 27 | data_size: 25000 28 | cutout: False 29 | cutout_length: 16 30 | cutout_prob: 1.0 31 | train_portion: 0.7 32 | 33 | # other params 34 | debug_predictor: False 35 | sample_size: 10 36 | population_size: 30 37 | -------------------------------------------------------------------------------- /naslib/runners/nas_predictors/nas_predictor_config.yaml: -------------------------------------------------------------------------------- 1 | seed: 0 2 | optimizer: oneshot 3 | search_space: darts 4 | dataset: cifar10 5 | out_dir: run 6 | 7 | experiment_type: single 8 | predictor: oneshot 9 | test_size: 200 10 | train_size_list: [8, 12] 11 | train_size_single: 2 12 | fidelity_list: [5] 13 | fidelity_single: 5 14 | 15 | search: 16 | checkpoint_freq: 1000 17 | epochs: 50 18 | fidelity: -1 19 | 20 | # GDAS 21 | tau_max: 10 22 | tau_min: 0.1 23 | 24 | # RE 25 | sample_size: 10 26 | population_size: 30 27 | 28 | # LS 29 | num_init: 10 30 | 31 | # BANANAS 32 | k: 10 33 | num_ensemble: 3 34 | acq_fn_type: its 35 | acq_fn_optimization: mutation 36 | encoding_type: path 37 | num_arches_to_mutate: 2 38 | max_mutations: 1 39 | num_candidates: 100 40 | 41 | # BP 42 | predictor_type: oneshot 43 | debug_predictor: False 44 | 45 | 46 | # additional params 47 | batch_size: 64 48 | learning_rate: 0.025 49 | learning_rate_min: 0.001 50 | momentum: 0.9 51 | weight_decay: 0.0003 52 | warm_start_epochs: 0 53 | grad_clip: 5 54 | train_portion: 0.9 55 | data_size: 25000 56 | 57 | cutout: False 58 | cutout_length: 16 59 | cutout_prob: 1.0 60 | drop_path_prob: 0.0 61 | 62 | unrolled: False 63 | arch_learning_rate: 0.0003 64 | arch_weight_decay: 0.001 65 | output_weights: True 66 | 67 | 68 | evaluation: 69 | checkpoint_freq: 5000 70 | batch_size: 96 71 | learning_rate: 0.025 72 | learning_rate_min: 0.00 73 | momentum: 0.9 74 | weight_decay: 0.0003 75 | epochs: 600 76 | warm_start_epochs: 0 77 | grad_clip: 5 78 | train_portion: 1. 79 | data_size: 50000 80 | 81 | cutout: True 82 | cutout_length: 16 83 | cutout_prob: 1.0 84 | drop_path_prob: 0.2 85 | auxiliary_weight: 0.4 86 | 87 | -------------------------------------------------------------------------------- /naslib/runners/predictors/predictor_config.yaml: -------------------------------------------------------------------------------- 1 | # The experiment type can be single, vary_train_size, vary_fidelity, or vary_both 2 | # single will use train_size_single and fidleity_single 3 | # vary_train_size will use train_size_list and fidelity_single 4 | # vary_fidelity will use train_size_single and fidelity_list 5 | # vary_both will use train_size_list and fidelity_list 6 | experiment_type: single 7 | 8 | # nasbench101, nasbench201, darts, nlp, transbench101_micro 9 | search_space: nasbench201 10 | 11 | # nasbench201 datasets: cifar10, cifar100, ImageNet16-120 12 | # transbench101 datasets: class_scene, class_object, 13 | # jigsaw, room_layout, segmentsemantic, normal, autoencoder 14 | dataset: cifar10 15 | 16 | # one of the 31 predictors in benchmarks/predictors/runner.py 17 | predictor: synflow 18 | 19 | # 0: mutation-based, or 1: uniformly random, train/test sets 20 | uniform_random: 1 21 | 22 | # test set size 23 | test_size: 2 24 | 25 | # size of the training set (used by model-based predictors) 26 | train_size_single: 3 27 | train_size_list: [5, 8, 14, 24, 42, 71, 121, 205] 28 | 29 | # num. epochs to train the test set arches (used by learning curve methods) 30 | fidelity_single: 5 31 | fidelity_list: [1, 2, 3, 5, 7, 9, 13, 19, 26, 37, 52, 73] 32 | 33 | # output results to this directory 34 | out_dir: run 35 | 36 | # maximum number of seconds to run cross-validation (for model-based predictors) 37 | max_hpo_time: 0 38 | 39 | # load the hyperparams from the specified file. 40 | # otherwise, set to None or False 41 | hparams_from_file: predictor_hpo_configs/hpo_config_1.json 42 | 43 | # random seed 44 | seed: 1000 45 | 46 | # these are used by the zero-cost methods 47 | search: 48 | batch_size: 256 49 | data_size: 25000 50 | cutout: False 51 | cutout_length: 16 52 | cutout_prob: 1.0 53 | train_portion: 0.7 54 | -------------------------------------------------------------------------------- /naslib/runners/statistics/runner.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from naslib.defaults.statistics_evaluator import StatisticsEvaluator 4 | 5 | from naslib.search_spaces import ( 6 | NasBench101SearchSpace, 7 | NasBench201SearchSpace, 8 | NasBench301SearchSpace, 9 | NasBenchNLPSearchSpace, 10 | TransBench101SearchSpaceMicro, 11 | TransBench101SearchSpaceMacro 12 | ) 13 | from naslib import utils 14 | from naslib.utils import setup_logger, get_dataset_api 15 | 16 | 17 | config = utils.get_config_from_args(config_type="statistics") 18 | utils.set_seed(config.seed) 19 | logger = setup_logger(config.save + "/log.log") 20 | logger.setLevel(logging.INFO) 21 | utils.log_args(config) 22 | 23 | supported_search_spaces = { 24 | "nasbench101": NasBench101SearchSpace(), 25 | "nasbench201": NasBench201SearchSpace(), 26 | "nasbench301": NasBench301SearchSpace(), 27 | "nlp": NasBenchNLPSearchSpace(), 28 | "transbench101_micro": TransBench101SearchSpaceMicro(config.dataset), 29 | "transbench101_macro": TransBench101SearchSpaceMacro(), 30 | } 31 | 32 | """ 33 | If the API did not evaluate *all* architectures in the search space, 34 | set load_labeled=True 35 | """ 36 | load_labeled = True if config.search_space in ["nasbench301", "nlp"] else False 37 | dataset_api = get_dataset_api(config.search_space, config.dataset) 38 | 39 | # initialize the search space 40 | search_space = supported_search_spaces[config.search_space] 41 | 42 | # initialize the StatisticsEvaluator class 43 | statistics_evaluator = StatisticsEvaluator(config=config) 44 | statistics_evaluator.adapt_search_space( 45 | search_space, load_labeled=load_labeled, dataset_api=dataset_api 46 | ) 47 | 48 | # evaluate the statistics 49 | statistics_evaluator.evaluate() 50 | -------------------------------------------------------------------------------- /naslib/runners/statistics/statistics_config.yaml: -------------------------------------------------------------------------------- 1 | 2 | # nasbench101, nasbench201, darts, nlp, transbench101 3 | search_space: nasbench201 4 | 5 | # nasbench201 datasets: cifar10, cifar100, ImageNet16-120 6 | # transbench101 datasets: class_scene, class_object, 7 | # jigsaw, room_layout, segmentsemantic, normal, autoencoder 8 | dataset: cifar10 9 | 10 | # output results to this directory 11 | out_dir: run 12 | # random seed (only important for autocorrelation) 13 | seed: 1000 14 | 15 | # stats that can be computed by iterating through 16 | # all architectures in the search space 17 | run_acc_stats: 1 18 | max_set_size: 1000 19 | 20 | # compute the average nbhd size 21 | run_nbhd_size: 1 22 | max_nbhd_trials: 500 23 | 24 | # autocorrelation parameters 25 | run_autocorr: 1 26 | max_autocorr_trials: 10 27 | autocorr_size: 36 28 | walks: 500 29 | -------------------------------------------------------------------------------- /naslib/runners/zc/bbo/xgb_runner.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from naslib.evaluators.zc_ensemble_evaluator import ZCEnsembleEvaluator 3 | from naslib.predictors.ensemble import Ensemble 4 | from naslib.search_spaces import get_search_space 5 | from naslib.utils.get_dataset_api import get_dataset_api, get_zc_benchmark_api 6 | from naslib.utils.log import setup_logger 7 | from naslib import utils 8 | 9 | config = utils.get_config_from_args(config_type="zc") 10 | 11 | logger = setup_logger(config.save + "/log.log") 12 | logger.setLevel(logging.INFO) 13 | 14 | utils.log_args(config) 15 | 16 | search_space = get_search_space(config.search_space, config.dataset) 17 | # dataset_api = None #get_dataset_api(config.search_space, config.dataset) 18 | dataset_api = get_dataset_api(config.search_space, config.dataset) 19 | zc_api = get_zc_benchmark_api(config.search_space, config.dataset) 20 | search_space.instantiate_model = False 21 | search_space.sample_without_replacement = True 22 | search_space.labeled_archs = [eval(arch) for arch in zc_api.keys()] 23 | 24 | utils.set_seed(config.seed) 25 | 26 | evaluator = ZCEnsembleEvaluator( 27 | n_train=config.train_size, 28 | n_test=config.test_size, 29 | zc_names=config.zc_names, 30 | zc_api=zc_api 31 | ) 32 | 33 | evaluator.adapt_search_space(search_space, config.dataset, dataset_api, config) 34 | 35 | train_loader, _, _, _, _ = utils.get_train_val_loaders(config) 36 | 37 | ensemble = Ensemble(num_ensemble=1, 38 | ss_type=search_space.get_type(), 39 | predictor_type='xgb', 40 | zc=config.zc_ensemble, 41 | zc_only=config.zc_only, 42 | config=config) 43 | 44 | evaluator.evaluate(ensemble, train_loader) 45 | 46 | logger.info('Done.') 47 | -------------------------------------------------------------------------------- /naslib/runners/zc/runner.py: -------------------------------------------------------------------------------- 1 | """ Evaluates a ZeroCost predictor for a search space and dataset/task""" 2 | import logging 3 | import json 4 | from fvcore.common.config import CfgNode 5 | from naslib.evaluators.zc_evaluator import ZeroCostPredictorEvaluator 6 | from naslib.predictors import ZeroCost 7 | from naslib.search_spaces import get_search_space 8 | from naslib import utils 9 | from naslib.utils import setup_logger, get_dataset_api, get_zc_benchmark_api 10 | 11 | # Get the configs from naslib/configs/predictor_config.yaml and the command line arguments 12 | # The configs include the zero-cost method to use, the search space and dataset/task to use, 13 | # amongst others. 14 | 15 | config = utils.get_config_from_args(config_type="zc") 16 | 17 | utils.set_seed(config.seed) 18 | logger = setup_logger(config.save + "/log.log") 19 | logger.setLevel(logging.INFO) 20 | utils.log_args(config) 21 | 22 | # Get the benchmark API for this search space and dataset 23 | # dataset_api = None 24 | dataset_api = get_dataset_api(config.search_space, config.dataset) 25 | zc_api = get_zc_benchmark_api(config.search_space, config.dataset) 26 | 27 | # Initialize the search space and predictor 28 | # Method type can be "fisher", "grasp", "grad_norm", "jacov", "snip", "synflow", "flops", "params", "nwot", "zen", "plain", "l2_norm" or "epe_nas" 29 | predictor = ZeroCost(method_type=config.predictor) 30 | search_space = get_search_space(name=config.search_space, dataset=config.dataset) 31 | 32 | search_space.labeled_archs = [eval(arch) for arch in zc_api.keys()] 33 | 34 | # Initialize the ZeroCostPredictorEvaluator class 35 | predictor_evaluator = ZeroCostPredictorEvaluator(predictor, config=config, zc_api=zc_api, use_zc_api=True) 36 | predictor_evaluator.adapt_search_space(search_space, dataset_api=dataset_api, load_labeled=True) 37 | 38 | # Evaluate the predictor 39 | predictor_evaluator.evaluate(zc_api) 40 | 41 | logger.info('Correlation experiment complete.') 42 | -------------------------------------------------------------------------------- /naslib/runners/zc/zc_config.yaml: -------------------------------------------------------------------------------- 1 | batch_size: 32 2 | config_type: zc_example 3 | cutout: false 4 | cutout_length: 16 5 | cutout_prob: 1.0 6 | dataset: cifar10 7 | out_dir: run 8 | predictor: fisher 9 | search_space: nasbench201 #nasbench101 #nasbench301 10 | test_size: 200 11 | train_size: 400 12 | optimizer: npenas 13 | train_portion: 0.7 14 | seed: 0 15 | 16 | search: 17 | # for bohb 18 | seed: 0 19 | budgets: 50000000 20 | checkpoint_freq: 1000 21 | fidelity: 108 22 | 23 | # for all optimizers 24 | epochs: 10 25 | 26 | # for bananas and npenas, choose one predictor 27 | # out of the 16 model-based predictors 28 | predictor_type: var_sparse_gp 29 | 30 | # number of initial architectures 31 | num_init: 10 32 | 33 | # NPENAS 34 | k: 10 35 | num_ensemble: 3 36 | acq_fn_type: its 37 | acq_fn_optimization: mutation 38 | encoding_type: adjacency_one_hot 39 | num_arches_to_mutate: 1 40 | max_mutations: 1 41 | num_candidates: 50 42 | 43 | # jacov data loader 44 | batch_size: 256 45 | data_size: 25000 46 | cutout: False 47 | cutout_length: 16 48 | cutout_prob: 1.0 49 | train_portion: 0.7 50 | 51 | # other params 52 | debug_predictor: False 53 | sample_size: 10 54 | population_size: 30 55 | 56 | # zc parameters 57 | use_zc_api: False 58 | zc_ensemble: true 59 | zc_names: 60 | - params 61 | - flops 62 | - jacov 63 | - plain 64 | - grasp 65 | - snip 66 | - fisher 67 | - grad_norm 68 | - epe_nas 69 | - synflow 70 | - l2_norm 71 | zc_only: true -------------------------------------------------------------------------------- /naslib/search_spaces/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .graph import Graph 2 | from .query_metrics import Metric -------------------------------------------------------------------------------- /naslib/search_spaces/core/query_metrics.py: -------------------------------------------------------------------------------- 1 | from enum import Enum, auto 2 | 3 | 4 | class Metric(Enum): 5 | RAW = auto() 6 | ALL = auto() 7 | 8 | TRAIN_ACCURACY = auto() 9 | VAL_ACCURACY = auto() 10 | TEST_ACCURACY = auto() 11 | 12 | TRAIN_LOSS = auto() 13 | VAL_LOSS = auto() 14 | TEST_LOSS = auto() 15 | 16 | TRAIN_TIME = auto() 17 | VAL_TIME = auto() 18 | TEST_TIME = auto() 19 | 20 | FLOPS = auto() 21 | LATENCY = auto() 22 | PARAMETERS = auto() 23 | EPOCH = auto() 24 | HP = auto() 25 | -------------------------------------------------------------------------------- /naslib/search_spaces/hierarchical/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/search_spaces/hierarchical/__init__.py -------------------------------------------------------------------------------- /naslib/search_spaces/hierarchical/primitives.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from ..core.primitives import AbstractPrimitive 4 | 5 | 6 | class ConvBNReLU(AbstractPrimitive): 7 | def __init__(self, C_in, C_out, kernel_size, stride=1, affine=False): 8 | super().__init__(locals()) 9 | pad = 0 if stride == 1 and kernel_size == 1 else 1 10 | self.op = nn.Sequential( 11 | nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=pad, bias=False), 12 | nn.BatchNorm2d(C_out, affine=affine), 13 | nn.ReLU(inplace=False), 14 | ) 15 | 16 | def forward(self, x, edge_data): 17 | return self.op(x) 18 | 19 | def get_embedded_ops(self): 20 | return None 21 | 22 | 23 | class DepthwiseConv(AbstractPrimitive): 24 | """ 25 | Depthwise convolution 26 | """ 27 | 28 | def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): 29 | super().__init__(locals()) 30 | self.op = nn.Sequential( 31 | nn.Conv2d( 32 | C_in, 33 | C_in, 34 | kernel_size=kernel_size, 35 | stride=stride, 36 | padding=padding, 37 | groups=C_in, 38 | bias=False, 39 | ), 40 | nn.BatchNorm2d(C_in, affine=affine), 41 | nn.ReLU(inplace=False), 42 | ) 43 | 44 | def forward(self, x, edge_data): 45 | return self.op(x) 46 | 47 | def get_embedded_ops(self): 48 | return None 49 | -------------------------------------------------------------------------------- /naslib/search_spaces/nasbench101/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/search_spaces/nasbench101/__init__.py -------------------------------------------------------------------------------- /naslib/search_spaces/nasbench101/primitives.py: -------------------------------------------------------------------------------- 1 | from naslib.search_spaces.core.primitives import AbstractPrimitive 2 | 3 | class ModelWrapper(AbstractPrimitive): 4 | def __init__(self, model): 5 | super().__init__(locals()) 6 | self.model = model 7 | 8 | def get_embedded_ops(self): 9 | return None 10 | 11 | def forward(self, x, edge_data): 12 | return self.model(x) 13 | 14 | forward_beforeGP = forward 15 | 16 | -------------------------------------------------------------------------------- /naslib/search_spaces/nasbench201/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/search_spaces/nasbench201/__init__.py -------------------------------------------------------------------------------- /naslib/search_spaces/nasbench201/primitives.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from ..core.primitives import AbstractPrimitive, ReLUConvBN 4 | 5 | 6 | """ 7 | Code below from NASBench-201 and slighly adapted 8 | @inproceedings{dong2020nasbench201, 9 | title = {NAS-Bench-201: Extending the Scope of Reproducible Neural Architecture Search}, 10 | author = {Dong, Xuanyi and Yang, Yi}, 11 | booktitle = {International Conference on Learning Representations (ICLR)}, 12 | url = {https://openreview.net/forum?id=HJxyZkBKDr}, 13 | year = {2020} 14 | } 15 | """ 16 | 17 | 18 | class ResNetBasicblock(AbstractPrimitive): 19 | def __init__(self, C_in, C_out, stride, affine=True): 20 | super().__init__(locals()) 21 | assert stride == 1 or stride == 2, "invalid stride {:}".format(stride) 22 | self.conv_a = ReLUConvBN(C_in, C_out, 3, stride) 23 | self.conv_b = ReLUConvBN(C_out, C_out, 3) 24 | if stride == 2: 25 | self.downsample = nn.Sequential( 26 | nn.AvgPool2d(kernel_size=2, stride=2, padding=0), 27 | nn.Conv2d(C_in, C_out, kernel_size=1, stride=1, padding=0, bias=False), 28 | ) 29 | else: 30 | self.downsample = None 31 | 32 | def forward(self, x, edge_data): 33 | basicblock = self.conv_a(x, None) 34 | basicblock = self.conv_b(basicblock, None) 35 | residual = self.downsample(x) if self.downsample is not None else x 36 | return residual + basicblock 37 | 38 | def get_embedded_ops(self): 39 | return None 40 | -------------------------------------------------------------------------------- /naslib/search_spaces/nasbench301/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/search_spaces/nasbench301/__init__.py -------------------------------------------------------------------------------- /naslib/search_spaces/nasbench301/primitives.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from ..core.primitives import AbstractPrimitive 5 | 6 | 7 | class FactorizedReduce(AbstractPrimitive): 8 | """ 9 | Factorized reduce as used in ResNet to add some sort 10 | of Identity connection even though the resolution does not 11 | match. 12 | 13 | If the resolution matches it resolves to identity 14 | """ 15 | 16 | def __init__(self, C_in, C_out, stride=1, affine=True, **kwargs): 17 | super().__init__(locals()) 18 | 19 | if stride == 1 and C_in == C_out: 20 | self.is_identity = True 21 | else: 22 | self.is_identity = False 23 | assert C_out % 2 == 0 24 | self.relu = nn.ReLU(inplace=False) 25 | self.conv_1 = nn.Conv2d( 26 | C_in, C_out // 2, 1, stride=2, padding=0, bias=False 27 | ) 28 | self.conv_2 = nn.Conv2d( 29 | C_in, C_out // 2, 1, stride=2, padding=0, bias=False 30 | ) 31 | self.bn = nn.BatchNorm2d(C_out, affine=affine) 32 | 33 | def forward(self, x, edge_data): 34 | if self.is_identity: 35 | return x 36 | else: 37 | x = self.relu(x) 38 | out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1) 39 | out = self.bn(out) 40 | return out 41 | 42 | def get_embedded_ops(self): 43 | return None 44 | -------------------------------------------------------------------------------- /naslib/search_spaces/nasbenchasr/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/search_spaces/nasbenchasr/__init__.py -------------------------------------------------------------------------------- /naslib/search_spaces/nasbenchasr/conversions.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Sequence 2 | 3 | 4 | # utils to work with nested collections 5 | def recursive_iter(seq): 6 | """ Iterate over elements in seq recursively (returns only non-sequences) 7 | """ 8 | if isinstance(seq, Sequence): 9 | for e in seq: 10 | for v in recursive_iter(e): 11 | yield v 12 | else: 13 | yield seq 14 | 15 | 16 | def flatten(seq): 17 | """ Flatten all nested sequences, returned type is type of ``seq`` 18 | """ 19 | return list(recursive_iter(seq)) 20 | 21 | 22 | def copy_structure(data, shape): 23 | """ Put data from ``data`` into nested containers like in ``shape``. 24 | This can be seen as "unflatten" operation, i.e.: 25 | seq == copy_structure(flatten(seq), seq) 26 | """ 27 | d_it = recursive_iter(data) 28 | 29 | def copy_level(s): 30 | if isinstance(s, Sequence): 31 | return type(s)(copy_level(ss) for ss in s) 32 | else: 33 | return next(d_it) 34 | return copy_level(shape) 35 | 36 | 37 | def make_compact_immutable(compact): 38 | return tuple([tuple(c) for c in compact]) 39 | 40 | 41 | def make_compact_mutable(compact): 42 | return [list(c) for c in compact] -------------------------------------------------------------------------------- /naslib/search_spaces/nasbenchasr/encodings.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import logging 3 | 4 | from naslib.search_spaces.nasbenchasr.conversions import flatten 5 | from naslib.utils.encodings import EncodingType 6 | 7 | """ 8 | These are the encoding methods for nas-bench-asr. 9 | The plan is to unify encodings across all search spaces. 10 | Note: this has not been thoroughly tested yet. 11 | """ 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | 16 | one_hot_ops = [ 17 | [1, 0, 0, 0, 0, 0], 18 | [0, 1, 0, 0, 0, 0], 19 | [0, 0, 1, 0, 0, 0], 20 | [0, 0, 0, 1, 0, 0], 21 | [0, 0, 0, 0, 1, 0], 22 | [0, 0, 0, 0, 0, 1], 23 | ] 24 | 25 | 26 | def encode_compact(compact): 27 | return flatten(compact) 28 | 29 | 30 | def encode_adjacency_one_hot(compact): 31 | one_hot = [] 32 | for e in flatten(compact): 33 | one_hot = [*one_hot, *one_hot_ops[e]] 34 | return one_hot 35 | 36 | 37 | def encode_seminas_nasbenchasr(compact): 38 | # note: the adjacency matrix is fixed for ASR, 39 | # so the identity matrix can be passed in 40 | dic = { 41 | "num_vertices": 9, 42 | "adjacency": np.identity(9, dtype=np.float32), 43 | "operations": flatten(compact), 44 | "mask": np.array([i < 9 for i in range(9)], dtype=np.float32), 45 | "val_acc": 0.0, 46 | } 47 | return dic 48 | 49 | 50 | def encode_asr(arch, encoding_type=EncodingType.ADJACENCY_ONE_HOT): 51 | 52 | compact = arch.get_compact() 53 | 54 | if encoding_type == EncodingType.ADJACENCY_ONE_HOT: 55 | return encode_adjacency_one_hot(compact) 56 | 57 | elif encoding_type == EncodingType.COMPACT: 58 | return encode_compact(compact) 59 | 60 | elif encoding_type == EncodingType.SEMINAS: 61 | return encode_seminas_nasbenchasr(compact) 62 | 63 | else: 64 | logger.info(f"{encoding_type} is not yet implemented as an encoding type for asr") 65 | raise NotImplementedError() 66 | -------------------------------------------------------------------------------- /naslib/search_spaces/nasbenchnlp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/search_spaces/nasbenchnlp/__init__.py -------------------------------------------------------------------------------- /naslib/search_spaces/natsbenchsize/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/search_spaces/natsbenchsize/__init__.py -------------------------------------------------------------------------------- /naslib/search_spaces/simple_cell/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/search_spaces/simple_cell/__init__.py -------------------------------------------------------------------------------- /naslib/search_spaces/transbench101/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/search_spaces/transbench101/__init__.py -------------------------------------------------------------------------------- /naslib/search_spaces/transbench101/loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import functional as F 3 | from torch.nn.modules.loss import _WeightedLoss 4 | 5 | class SoftmaxCrossEntropyWithLogits(_WeightedLoss): 6 | def __init__(self, weight=None): 7 | super(SoftmaxCrossEntropyWithLogits, self).__init__(weight=None) 8 | self.weight = weight 9 | 10 | def forward(self, input, target): 11 | logits_scaled = torch.log(F.softmax(input, dim=-1) + 0.00001) 12 | 13 | if self.weight is not None: 14 | loss = -((target * logits_scaled) * self.weight).sum(dim=-1) 15 | else: 16 | loss = -(target * logits_scaled).sum(dim=-1) 17 | return loss.mean() 18 | -------------------------------------------------------------------------------- /naslib/search_spaces/transbench101/primitives.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from ..core.primitives import AbstractPrimitive, ReLUConvBN 4 | 5 | 6 | """ 7 | Code below from NASBench-201 and slighly adapted 8 | @inproceedings{dong2020nasbench201, 9 | title = {NAS-Bench-201: Extending the Scope of Reproducible Neural Architecture Search}, 10 | author = {Dong, Xuanyi and Yang, Yi}, 11 | booktitle = {International Conference on Learning Representations (ICLR)}, 12 | url = {https://openreview.net/forum?id=HJxyZkBKDr}, 13 | year = {2020} 14 | } 15 | """ 16 | 17 | 18 | class ResNetBasicblock(AbstractPrimitive): 19 | 20 | def __init__(self, C_in, C_out, stride, affine=True): 21 | super().__init__(locals()) 22 | assert stride == 1 or stride == 2, 'invalid stride {:}'.format(stride) 23 | self.conv_a = ReLUConvBN(C_in, C_out, 3, stride) 24 | self.conv_b = ReLUConvBN(C_out, C_out, 3) 25 | if stride == 2: 26 | self.downsample = nn.Sequential( 27 | nn.AvgPool2d(kernel_size=2, stride=2, padding=0), 28 | nn.Conv2d(C_in, C_out, kernel_size=1, stride=1, padding=0, bias=False)) 29 | else: 30 | self.downsample = None 31 | 32 | 33 | def forward(self, x, edge_data): 34 | basicblock = self.conv_a(x, None) 35 | basicblock = self.conv_b(basicblock, None) 36 | residual = self.downsample(x) if self.downsample is not None else x 37 | return residual + basicblock 38 | 39 | 40 | def get_embedded_ops(self): 41 | return None 42 | -------------------------------------------------------------------------------- /naslib/search_spaces/transbench101/tnb101/README.MD: -------------------------------------------------------------------------------- 1 | ## Acknowledgments 2 | 3 | We would like to thanks the authors of the paper **'TransNAS-Bench-101: Improving Transferability and Generalizability of Cross-Task Neural Architecture Search'** 4 | for the [TransNAS-Bench-101](https://github.com/yawen-d/TransNASBench) code. 5 | 6 | 7 | - Duan, Y., Chen, X., Xu, H., Chen, Z., Liang, X., Zhang, T., and Li, Z. (2021) '**Transnas-bench- 8 | 101: Improving transferability and generalizability of cross-task neural architecture search**' 9 | _In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition_, pages 10 | 5251–5260. 11 | -------------------------------------------------------------------------------- /naslib/search_spaces/transbench101/tnb101/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/search_spaces/transbench101/tnb101/__init__.py -------------------------------------------------------------------------------- /naslib/search_spaces/transbench101/tnb101/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/search_spaces/transbench101/tnb101/models/__init__.py -------------------------------------------------------------------------------- /naslib/search_spaces/transbench101/tnb101/models/discriminator.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from .net_ops.cell_ops import ConvLayer 4 | 5 | 6 | class Discriminator(nn.Module): 7 | def __init__(self, norm="spectral"): 8 | """ 9 | Discriminator component for Pix2Pix tasks 10 | :param norm: ["batch": BN, "spectral": spectral norm for GAN] 11 | """ 12 | super(Discriminator, self).__init__() 13 | if norm == "batch": 14 | norm = nn.BatchNorm2d 15 | elif norm == "spectral": 16 | norm = nn.utils.spectral_norm 17 | else: 18 | raise ValueError(f"{norm} is invalid!") 19 | self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) 20 | 21 | # input: [batch x 6 x 256 x 256] 22 | self.conv1 = ConvLayer(6, 64, 5, 4, 2, nn.LeakyReLU(0.2), norm) 23 | self.conv2 = ConvLayer(64, 128, 5, 4, 2, nn.LeakyReLU(0.2), norm) 24 | self.conv3 = ConvLayer(128, 256, 5, 4, 2, nn.LeakyReLU(0.2), norm) 25 | self.conv4 = ConvLayer(256, 256, 3, 1, 1, nn.LeakyReLU(0.2), norm) 26 | self.conv5 = ConvLayer(256, 512, 3, 1, 1, nn.LeakyReLU(0.2), norm) 27 | self.conv6 = ConvLayer(512, 512, 3, 1, 1, nn.LeakyReLU(0.2), norm) 28 | self.conv7 = ConvLayer(512, 1, 3, 1, 1, None, None) 29 | 30 | def forward(self, x): 31 | x = self.conv1(x) 32 | x = self.conv2(x) 33 | x = self.conv3(x) 34 | x = self.conv4(x) 35 | x = self.conv5(x) 36 | x = self.conv6(x) 37 | x = self.conv7(x) 38 | x = self.avgpool(x) 39 | return x.flatten() 40 | -------------------------------------------------------------------------------- /naslib/search_spaces/transbench101/tnb101/models/encoder.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torchvision.models as models 3 | 4 | from .net_infer.net_macro import MacroNet 5 | 6 | 7 | class FFEncoder(nn.Module): 8 | """Encoder class for the definition of backbone including resnet50 and MacroNet()""" 9 | def __init__(self, encoder_str, task_name=None): 10 | super(FFEncoder, self).__init__() 11 | self.encoder_str = encoder_str 12 | 13 | # Initialize network 14 | if self.encoder_str == 'resnet50': 15 | self.network = models.resnet50() # resnet50: Bottleneck, [3,4,6,3] 16 | # Adjust according to task 17 | if task_name in ['autoencoder', 'normal', 'inpainting', 'segmentsemantic']: 18 | self.network.inplanes = 1024 19 | self.network.layer4 = self.network._make_layer( 20 | models.resnet.Bottleneck, 512, 3, stride=1, dilate=False) 21 | self.network = nn.Sequential( 22 | *list(self.network.children())[:-2], 23 | ) 24 | else: 25 | self.network = nn.Sequential(*list(self.network.children())[:-2]) 26 | # elif self.encoder_str == '64-41414-super_0123': 27 | # self.network = SuperNetDartsV1(encoder_str, structure='backbone') 28 | else: 29 | self.network = MacroNet(encoder_str, structure='backbone') 30 | 31 | def forward(self, x): 32 | x = self.network(x) 33 | return x 34 | 35 | 36 | if __name__ == "__main__": 37 | # net = FFEncoder("64-41414-3_33_333", 'segmentsemantic').cuda() 38 | net = FFEncoder("resnet50", 'autoencoder').cuda() 39 | # x = torch.randn([2, 3, 256, 256]) 40 | # print(net(x).shape) 41 | # print(net) 42 | summary(net, (3, 256, 256)) 43 | -------------------------------------------------------------------------------- /naslib/search_spaces/transbench101/tnb101/models/feedforward.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | 4 | class FeedForwardNet(nn.Module): 5 | """FeedForwardNet class used by classification and regression tasks""" 6 | 7 | def __init__(self, encoder, decoder): 8 | super(FeedForwardNet, self).__init__() 9 | self.encoder = encoder 10 | self.decoder = decoder 11 | 12 | def forward(self, x): 13 | return self.decoder(self.encoder(x)) -------------------------------------------------------------------------------- /naslib/search_spaces/transbench101/tnb101/models/gan.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | 4 | class GAN(nn.Module): 5 | """GAN model used for Pix2Pix tasks 6 | Adapted from https://github.com/phillipi/pix2pix 7 | """ 8 | def __init__(self, encoder, decoder, discriminator): 9 | super(GAN, self).__init__() 10 | self.encoder = encoder 11 | self.decoder = decoder 12 | self.discriminator = discriminator 13 | 14 | def forward(self, x): 15 | return self.decoder(self.encoder(x)) 16 | 17 | @staticmethod 18 | def set_requires_grad(nets, requires_grad=False): 19 | """Set requies_grad=Fasle for all the networks to avoid unnecessary computations 20 | Parameters: 21 | nets (network list) -- a list of networks 22 | requires_grad (bool) -- whether the networks require gradients or not 23 | """ 24 | if not isinstance(nets, list): 25 | nets = [nets] 26 | for net in nets: 27 | if net is not None: 28 | for param in net.parameters(): 29 | param.requires_grad = requires_grad 30 | 31 | @staticmethod 32 | def denormalize(imgs, mean, std): 33 | for i, (m, s) in enumerate(zip(mean, std)): 34 | imgs[:, i, :, :] = imgs[:, i, :, :] * s + m 35 | return imgs 36 | -------------------------------------------------------------------------------- /naslib/search_spaces/transbench101/tnb101/models/net_infer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/search_spaces/transbench101/tnb101/models/net_infer/__init__.py -------------------------------------------------------------------------------- /naslib/search_spaces/transbench101/tnb101/models/net_ops/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/naslib/search_spaces/transbench101/tnb101/models/net_ops/__init__.py -------------------------------------------------------------------------------- /naslib/search_spaces/transbench101/tnb101/models/segmentation.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | import torch.distributed as dist 3 | from torch.nn.parallel import DistributedDataParallel as DDP 4 | 5 | 6 | class Segmentation(nn.Module): 7 | """Segmentation used by semanticsegment task""" 8 | def __init__(self, encoder, decoder): 9 | super(Segmentation, self).__init__() 10 | self.encoder = encoder 11 | self.decoder = decoder 12 | 13 | def forward(self, x): 14 | return self.decoder(self.encoder(x)) 15 | 16 | def to_device(self, device_list, rank=None, ddp=False): 17 | self.device_list = device_list 18 | if len(self.device_list) > 1: 19 | if ddp: 20 | self.encoder = nn.SyncBatchNorm.convert_sync_batchnorm(self.encoder) 21 | self.decoder = nn.SyncBatchNorm.convert_sync_batchnorm(self.decoder) 22 | self.encoder = DDP(self.encoder.to(rank), device_ids=[rank], find_unused_parameters=True) 23 | self.decoder = DDP(self.decoder.to(rank), device_ids=[rank], find_unused_parameters=True) 24 | self.rank = rank 25 | else: 26 | self.encoder = nn.DataParallel(self.encoder).to(self.device_list[0]) 27 | self.decoder = nn.DataParallel(self.decoder).to(self.device_list[0]) 28 | self.rank = rank 29 | else: 30 | self.rank = self.device_list[0] 31 | self.to(self.rank) 32 | -------------------------------------------------------------------------------- /naslib/search_spaces/transbench101/tnb101/models/siamese.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | 5 | class SiameseNet(nn.Module): 6 | """SiameseNet used in Jigsaw task""" 7 | def __init__(self, encoder, decoder): 8 | super(SiameseNet, self).__init__() 9 | self.encoder = encoder 10 | self.decoder = decoder 11 | 12 | def forward(self, x): 13 | if len(x.shape) == 4: 14 | assert x.shape == (1, 3, 720, 1080) 15 | x = image2tiles4testing(x) 16 | imgtile_num = x.shape[1] 17 | encoder_output = [] 18 | for index in range(imgtile_num): 19 | input_i = x[:, index, :, :, :] 20 | ith_encoder_output = self.encoder(input_i) 21 | encoder_output.append(ith_encoder_output) 22 | concat_output = torch.cat(encoder_output, axis=1) 23 | final_output = self.decoder(concat_output) 24 | return final_output 25 | 26 | 27 | def image2tiles4testing(img, num_pieces=9): 28 | """ 29 | Generate the 9 pieces input for Jigsaw task. 30 | 31 | Parameters: 32 | ----------- 33 | img (tensor): Image to be cropped (1, 3, 720, 1080) 34 | h 35 | Return: 36 | ----------- 37 | img_tiles: tensor (1, 9, 3, 240, 360) 38 | """ 39 | 40 | if num_pieces != 9: 41 | raise ValueError(f'Target permutation of Jigsaw is supposed to have length 9, getting {num_pieces} here') 42 | 43 | Ba, Ch, He, Wi = img.shape # (1, 3, 720, 1080) 44 | 45 | unitH = int(He / 3) # 240 46 | unitW = int(Wi / 3) # 360 47 | 48 | return img.view(Ba, 9, Ch, unitH, unitW) 49 | -------------------------------------------------------------------------------- /naslib/utils/encodings.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class EncodingType(Enum): 5 | ADJACENCY_ONE_HOT = "adjacency_one_hot" 6 | ADJACENCY_MIX = 'adjacency_mix' 7 | PATH = "path" 8 | GCN = "gcn" 9 | BONAS = "bonas" 10 | SEMINAS = "seminas" 11 | COMPACT = 'compact' 12 | -------------------------------------------------------------------------------- /naslib/utils/ninapro_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import torch.utils.data 4 | import torchvision.transforms 5 | 6 | 7 | # adapted from https://github.com/rtu715/NAS-Bench-360/blob/0d1af0ce37b5f656d6491beee724488c3fccf871/perceiver-io/perceiver/data/nb360/ninapro.py#L64 8 | class NinaPro(torch.utils.data.Dataset): 9 | def __init__(self, root, split="train", transform=None): 10 | self.root = root 11 | self.split = split 12 | self.transform = transform 13 | self.x = np.load(os.path.join(root, f"ninapro_{split}.npy")).astype(np.float32) 14 | self.x = self.x[:, np.newaxis, :, :].transpose(0, 2, 3, 1) 15 | self.y = np.load(os.path.join(root, f"label_{split}.npy")).astype(int) 16 | 17 | def __len__(self): 18 | return len(self.y) 19 | 20 | def __getitem__(self, idx): 21 | if torch.is_tensor(idx): 22 | idx = idx.tolist() 23 | 24 | x = self.x[idx, :] 25 | y = self.y[idx] 26 | 27 | if self.transform: 28 | x = self.transform(x) 29 | return x, y 30 | 31 | 32 | def ninapro_transform(args, channels_last: bool = True): 33 | transform_list = [] 34 | 35 | def channels_to_last(img: torch.Tensor): 36 | return img.permute(1, 2, 0).contiguous() 37 | 38 | transform_list.append(torchvision.transforms.ToTensor()) 39 | 40 | if channels_last: 41 | transform_list.append(channels_to_last) 42 | 43 | return torchvision.transforms.Compose(transform_list), torchvision.transforms.Compose(transform_list) 44 | 45 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ConfigSpace>=0.4.17 2 | pyyaml==5.4.1 3 | networkx==2.6.3 4 | numpy==1.21.5; python_version <= '3.7' 5 | numpy>=1.22.0; python_version > '3.7' 6 | torch>=1.9.0 7 | torchvision>=0.10.0 8 | fvcore==0.1.5.post20210630 9 | pytest==6.2.4 10 | lightgbm==3.2.1 11 | ngboost==0.3.11 12 | xgboost==1.4.2 13 | emcee==3.1.0 14 | pybnn==0.0.5 15 | grakel==0.1.8 16 | pyro-ppl==1.6.0 17 | scipy==1.4.1 18 | 19 | # additional from setup.py prev 20 | tqdm==4.61.1 21 | scikit-learn==1.0.2 22 | scikit-image==0.19.2 23 | pytorch-msssim==0.2.1 24 | tensorwatch==0.9.1 25 | 26 | # from zerocost 27 | transforms3d 28 | gdown 29 | -------------------------------------------------------------------------------- /scripts/bbo/make_configs_asr.sh: -------------------------------------------------------------------------------- 1 | #### NOTE: this script has to be run while being in the parent 'scripts' dir 2 | 3 | export OMP_NUM_THREADS=2 4 | # optimizers=(rs) 5 | optimizers=(rs re ls npenas bananas) 6 | 7 | start_seed=$1 8 | if [ -z "$start_seed" ] 9 | then 10 | start_seed=0 11 | fi 12 | 13 | if [[ $optimizers == bananas* ]] 14 | then 15 | acq_fn_optimization=mutation 16 | else 17 | acq_fn_optimization=random_sampling 18 | fi 19 | 20 | # folders: 21 | out_dir=run_cpu 22 | 23 | # bbo-bs or predictor-bs 24 | config_type=bbo-bs 25 | 26 | # search space / data: 27 | search_space=asr 28 | 29 | dataset=(TIMIT) 30 | 31 | # epoch number to get the values 32 | fidelity=-1 33 | epochs=200 34 | predictor=var_sparse_gp 35 | 36 | # trials / seeds: 37 | trials=10 38 | end_seed=$(($start_seed + $trials - 1)) 39 | 40 | # create config files 41 | for i in $(seq 0 $((${#dataset[@]}-1)) ) 42 | do 43 | dataset=${dataset[$i]} 44 | echo $dataset 45 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 46 | do 47 | optimizer=${optimizers[$i]} 48 | python create_configs.py \ 49 | --start_seed $start_seed --trials $trials \ 50 | --out_dir $out_dir --dataset=$dataset --config_type $config_type \ 51 | --search_space $search_space --optimizer $optimizer \ 52 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 53 | --fidelity $fidelity --epochs $epochs 54 | done 55 | done 56 | 57 | 58 | echo 'configs are ready, check config folder ...' -------------------------------------------------------------------------------- /scripts/bbo/make_configs_darts.sh: -------------------------------------------------------------------------------- 1 | export OMP_NUM_THREADS=2 2 | # optimizers=(rs) 3 | optimizers=(rs re ls npenas bananas) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | if [[ $optimizers == bananas* ]] 12 | then 13 | acq_fn_optimization=mutation 14 | else 15 | acq_fn_optimization=random_sampling 16 | fi 17 | 18 | # folders: 19 | base_file=naslib 20 | out_dir=run_cpu 21 | 22 | # bbo-bs or predictor-bs 23 | config_type=bbo-bs 24 | 25 | # search space / data: 26 | search_space=darts 27 | 28 | dataset=(cifar10) 29 | 30 | # epoch number to get the values 31 | fidelity=-1 32 | epochs=200 33 | predictor=var_sparse_gp 34 | os.makedirs(folder, exist_ok=True) 35 | 36 | 37 | # trials / seeds: 38 | trials=10 39 | end_seed=$(($start_seed + $trials - 1)) 40 | 41 | # create config files 42 | for i in $(seq 0 $((${#dataset[@]}-1)) ) 43 | do 44 | dataset=${dataset[$i]} 45 | echo $dataset 46 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 47 | do 48 | optimizer=${optimizers[$i]} 49 | python create_configs.py \ 50 | --start_seed $start_seed --trials $trials \ 51 | --out_dir $out_dir --dataset=$dataset --config_type $config_type \ 52 | --search_space $search_space --optimizer $optimizer \ 53 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 54 | --fidelity $fidelity --epochs $epochs 55 | done 56 | done 57 | 58 | 59 | echo 'configs are ready, check config folder ...' 60 | -------------------------------------------------------------------------------- /scripts/bbo/make_configs_mr.sh: -------------------------------------------------------------------------------- 1 | 2 | export OMP_NUM_THREADS=2 3 | # optimizers=(rs) 4 | optimizers=(rs re ls npenas bananas) 5 | 6 | start_seed=$1 7 | if [ -z "$start_seed" ] 8 | then 9 | start_seed=0 10 | fi 11 | 12 | if [[ $optimizers == bananas* ]] 13 | then 14 | acq_fn_optimization=mutation 15 | else 16 | acq_fn_optimization=random_sampling 17 | fi 18 | 19 | # folders: 20 | base_file=naslib 21 | out_dir=run_cpu 22 | 23 | # bbo-bs or predictor-bs 24 | config_type=bbo-bs 25 | 26 | # search space / data: 27 | search_space=mr 28 | 29 | dataset=(seg video 3ddet cls) 30 | 31 | # epoch number to get the values 32 | fidelity=-1 33 | epochs=200 34 | predictor=var_sparse_gp 35 | 36 | # trials / seeds: 37 | trials=10 38 | end_seed=$(($start_seed + $trials - 1)) 39 | 40 | # create config files 41 | for i in $(seq 0 $((${#dataset[@]}-1)) ) 42 | do 43 | dataset=${dataset[$i]} 44 | echo $dataset 45 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 46 | do 47 | optimizer=${optimizers[$i]} 48 | python create_configs.py \ 49 | --start_seed $start_seed --trials $trials \ 50 | --out_dir $out_dir --dataset=$dataset --config_type $config_type \ 51 | --search_space $search_space --optimizer $optimizer \ 52 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 53 | --fidelity $fidelity --epochs $epochs 54 | done 55 | done 56 | 57 | 58 | echo 'configs are ready, check config folder ...' -------------------------------------------------------------------------------- /scripts/bbo/make_configs_nb101.sh: -------------------------------------------------------------------------------- 1 | export OMP_NUM_THREADS=2 2 | # optimizers=(rs) 3 | optimizers=(rs re ls npenas bananas) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | if [[ $optimizers == bananas* ]] 12 | then 13 | acq_fn_optimization=mutation 14 | else 15 | acq_fn_optimization=random_sampling 16 | fi 17 | 18 | # folders: 19 | base_file=naslib 20 | out_dir=run_cpu 21 | 22 | # bbo-bs or predictor-bs 23 | config_type=bbo-bs 24 | 25 | # search space / data: 26 | search_space=nasbench101 27 | dataset=cifar10 28 | 29 | fidelity=-1 30 | epochs=200 31 | predictor=var_sparse_gp 32 | 33 | # trials / seeds: 34 | trials=10 35 | end_seed=$(($start_seed + $trials - 1)) 36 | 37 | # create config files 38 | for i in $(seq 0 $((${#dataset[@]}-1)) ) 39 | do 40 | dataset=${dataset[$i]} 41 | echo $dataset 42 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 43 | do 44 | optimizer=${optimizers[$i]} 45 | python create_configs.py \ 46 | --start_seed $start_seed --trials $trials \ 47 | --out_dir $out_dir --dataset=$dataset --config_type $config_type \ 48 | --search_space $search_space --optimizer $optimizer \ 49 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 50 | --fidelity $fidelity --epochs $epochs 51 | done 52 | done 53 | 54 | 55 | echo 'configs are ready, check config folder ...' 56 | -------------------------------------------------------------------------------- /scripts/bbo/make_configs_nb201.sh: -------------------------------------------------------------------------------- 1 | export OMP_NUM_THREADS=2 2 | # optimizers=(rs) 3 | optimizers=(rs re ls npenas bananas) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | if [[ $optimizers == bananas* ]] 12 | then 13 | acq_fn_optimization=mutation 14 | else 15 | acq_fn_optimization=random_sampling 16 | fi 17 | 18 | # folders: 19 | base_file=naslib 20 | out_dir=run_cpu 21 | 22 | # bbo-bs or predictor-bs 23 | config_type=bbo-bs 24 | 25 | # search space / data: 26 | search_space=nasbench201 27 | 28 | dataset=(cifar10 cifar100 ImageNet16-120) 29 | 30 | fidelity=-1 31 | epochs=200 32 | predictor=var_sparse_gp 33 | 34 | # trials / seeds: 35 | trials=10 36 | end_seed=$(($start_seed + $trials - 1)) 37 | 38 | # create config files 39 | for i in $(seq 0 $((${#dataset[@]}-1)) ) 40 | do 41 | dataset=${dataset[$i]} 42 | echo $dataset 43 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 44 | do 45 | optimizer=${optimizers[$i]} 46 | python create_configs.py \ 47 | --start_seed $start_seed --trials $trials \ 48 | --out_dir $out_dir --dataset=$dataset --config_type $config_type \ 49 | --search_space $search_space --optimizer $optimizer \ 50 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 51 | --fidelity $fidelity --epochs $epochs 52 | done 53 | done 54 | 55 | 56 | echo 'configs are ready, check config folder ...' 57 | -------------------------------------------------------------------------------- /scripts/bbo/make_configs_nlp.sh: -------------------------------------------------------------------------------- 1 | export OMP_NUM_THREADS=2 2 | # optimizers=(rs) 3 | optimizers=(rs re ls npenas bananas) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | if [[ $optimizers == bananas* ]] 12 | then 13 | acq_fn_optimization=mutation 14 | else 15 | acq_fn_optimization=random_sampling 16 | fi 17 | 18 | # folders: 19 | base_file=naslib 20 | out_dir=run_cpu 21 | 22 | # bbo-bs or predictor-bs 23 | config_type=bbo-bs 24 | 25 | # search space / data: 26 | search_space=nlp 27 | 28 | dataset=(LM-task) 29 | 30 | # epoch number to get the values 31 | fidelity=-1 32 | epochs=200 33 | predictor=var_sparse_gp 34 | 35 | # trials / seeds: 36 | trials=10 37 | end_seed=$(($start_seed + $trials - 1)) 38 | 39 | # create config files 40 | for i in $(seq 0 $((${#dataset[@]}-1)) ) 41 | do 42 | dataset=${dataset[$i]} 43 | echo $dataset 44 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 45 | do 46 | optimizer=${optimizers[$i]} 47 | python create_configs.py \ 48 | --start_seed $start_seed --trials $trials \ 49 | --out_dir $out_dir --dataset=$dataset --config_type $config_type \ 50 | --search_space $search_space --optimizer $optimizer \ 51 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 52 | --fidelity $fidelity --epochs $epochs 53 | done 54 | done 55 | 56 | 57 | echo 'configs are ready, check config folder ...' 58 | -------------------------------------------------------------------------------- /scripts/bbo/make_configs_transnb101_macro.sh: -------------------------------------------------------------------------------- 1 | export OMP_NUM_THREADS=2 2 | # optimizers=(rs) 3 | optimizers=(rs re ls npenas bananas) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | if [[ $optimizers == bananas* ]] 12 | then 13 | acq_fn_optimization=mutation 14 | else 15 | acq_fn_optimization=random_sampling 16 | fi 17 | 18 | # folders: 19 | base_file=naslib 20 | out_dir=run_cpu 21 | 22 | # bbo-bs or predictor-bs 23 | config_type=bbo-bs 24 | 25 | # search space / data: 26 | search_space=transbench101_macro 27 | 28 | dataset=(class_scene class_object jigsaw room_layout segmentsemantic normal autoencoder) 29 | 30 | # epoch number to get the values 31 | fidelity=-1 32 | epochs=200 33 | predictor=var_sparse_gp 34 | 35 | # trials / seeds: 36 | trials=10 37 | end_seed=$(($start_seed + $trials - 1)) 38 | 39 | # create config files 40 | for i in $(seq 0 $((${#dataset[@]}-1)) ) 41 | do 42 | dataset=${dataset[$i]} 43 | echo $dataset 44 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 45 | do 46 | optimizer=${optimizers[$i]} 47 | python create_configs.py \ 48 | --start_seed $start_seed --trials $trials \ 49 | --out_dir $out_dir --dataset=$dataset --config_type $config_type \ 50 | --search_space $search_space --optimizer $optimizer \ 51 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 52 | --fidelity $fidelity --epochs $epochs 53 | done 54 | done 55 | 56 | 57 | echo 'configs are ready, check config folder ...' 58 | -------------------------------------------------------------------------------- /scripts/bbo/make_configs_transnb101_micro.sh: -------------------------------------------------------------------------------- 1 | export OMP_NUM_THREADS=2 2 | # optimizers=(rs) 3 | optimizers=(rs re ls npenas bananas) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | if [[ $optimizers == bananas* ]] 12 | then 13 | acq_fn_optimization=mutation 14 | else 15 | acq_fn_optimization=random_sampling 16 | fi 17 | 18 | # folders: 19 | base_file=naslib 20 | out_dir=run_cpu 21 | 22 | # bbo-bs or predictor-bs 23 | config_type=bbo-bs 24 | 25 | # search space / data: 26 | search_space=transbench101_micro 27 | 28 | dataset=(class_scene class_object jigsaw room_layout segmentsemantic normal autoencoder) 29 | 30 | # epoch number to get the values 31 | fidelity=-1 32 | epochs=200 33 | predictor=var_sparse_gp 34 | 35 | # trials / seeds: 36 | trials=10 37 | end_seed=$(($start_seed + $trials - 1)) 38 | 39 | # create config files 40 | for i in $(seq 0 $((${#dataset[@]}-1)) ) 41 | do 42 | dataset=${dataset[$i]} 43 | echo $dataset 44 | for i in $(seq 0 $((${#optimizers[@]}-1)) ) 45 | do 46 | optimizer=${optimizers[$i]} 47 | python create_configs.py \ 48 | --start_seed $start_seed --trials $trials \ 49 | --out_dir $out_dir --dataset=$dataset --config_type $config_type \ 50 | --search_space $search_space --optimizer $optimizer \ 51 | --acq_fn_optimization $acq_fn_optimization --predictor $predictor \ 52 | --fidelity $fidelity --epochs $epochs 53 | done 54 | done 55 | 56 | 57 | echo 'configs are ready, check config folder ...' 58 | -------------------------------------------------------------------------------- /scripts/bbo/scheduler.sh: -------------------------------------------------------------------------------- 1 | search_space=$1 2 | 3 | user="robertsj" 4 | dataset_dir="/home/$user/NASLib/naslib/configs/bbo/configs_cpu/$search_space/*" 5 | 6 | for optimizer_dir in $dataset_dir/* 7 | do 8 | for config_dir in $optimizer_dir/* 9 | do 10 | echo starting to run ${config_dir} across 10 seeds ... 11 | sbatch ./scripts/bbo/submit_folder.sh $config_dir # for srun node testing 12 | 13 | done 14 | done 15 | 16 | # for running default config files separately 17 | # for optimizer_dir in $dataset_dir/* 18 | # do 19 | # echo starting to run $optimizer_dir/config_0 across 10 seeds ... 20 | # sbatch submit_folder.sh $optimizer_dir/config_0 21 | 22 | # done 23 | -------------------------------------------------------------------------------- /scripts/bbo/scheduler_bosch.sh: -------------------------------------------------------------------------------- 1 | search_space=$1 2 | bosch_partition=$2 3 | dataset_dir="/home/mehtay/research/NASLib/naslib/configs/bbo/configs_cpu/$search_space/*" 4 | 5 | for optimizer_dir in $dataset_dir/* 6 | do 7 | # echo $config_dir 8 | for config_dir in $optimizer_dir/* 9 | do 10 | echo starting to run ${config_dir} across 10 seeds ... 11 | if [ $bosch_partition == 'gpu' ] 12 | then 13 | sbatch --bosch submit_boschgpu_folder.sh $config_dir 14 | fi 15 | 16 | if [ $bosch_partition == 'cpu' ] 17 | then 18 | sbatch --bosch submit_bosch_folder.sh $config_dir 19 | fi 20 | done 21 | done -------------------------------------------------------------------------------- /scripts/bbo/submit_bosch_folder.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_cpu-cascadelake #bosch_gpu-rtx2080 #bosch_cpu-cascadelake # partition (queue) 3 | #SBATCH -t 0-07:00 # time (D-HH:MM) 4 | #SBATCH -o slurmlog/%A.%N.out # STDOUT (the folder log has to exist) %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e slurmlog/%A.%N.err # STDERR (the folder log has to exist) %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -J bbo-exps # sets the job name. 7 | #SBATCH --mem=7G 8 | 9 | # Print some information about the job to STDOUT 10 | 11 | echo "Workingdir: $PWD"; 12 | echo "Started at $(date)"; 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 14 | 15 | # python -u runner.py --config-file $1 16 | 17 | for config_file_seed in $1/* 18 | do 19 | echo submitted ${config_file_seed} 20 | python -u runner.py --config-file $config_file_seed 21 | done 22 | 23 | # echo $COMMAND; 24 | # eval $COMMAND; 25 | 26 | echo "DONE"; 27 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /scripts/bbo/submit_boschgpu_folder.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080 #bosch_cpu-cascadelake # partition (queue) 3 | #SBATCH -t 0-07:00 # time (D-HH:MM) 4 | #SBATCH -o slurmlog/%A.%N.out # STDOUT (the folder log has to exist) %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e slurmlog/%A.%N.err # STDERR (the folder log has to exist) %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -J bbo-exps # sets the job name. 7 | #SBATCH --mem=10G 8 | 9 | # Print some information about the job to STDOUT 10 | 11 | echo "Workingdir: $PWD"; 12 | echo "Started at $(date)"; 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 14 | 15 | # python -u runner.py --config-file $1 16 | 17 | for config_file_seed in $1/* 18 | do 19 | echo submitted ${config_file_seed} 20 | python -u runner.py --config-file $config_file_seed 21 | done 22 | 23 | # echo $COMMAND; 24 | # eval $COMMAND; 25 | 26 | echo "DONE"; 27 | echo "Finished at $(date)"; -------------------------------------------------------------------------------- /scripts/bbo/submit_folder.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p testdlc_gpu-rtx2080#alldlc_gpu-rtx2080 #bosch_gpu-rtx2080 #bosch_cpu-cascadelake #bosch_gpu-rtx2080 #mldlc_gpu-rtx2080 #alldlc_gpu-rtx2080 #gpu_tesla-P100 #ml_gpu-rtx2080 # bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH -t 0-02:00 # time (D-HH:MM) 4 | #SBATCH -o slurmlog/%A.%N.out # STDOUT (the folder log has to exist) %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e slurmlog/%A.%N.err # STDERR (the folder log has to exist) %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -J bbo-exps # sets the job name. 7 | #SBATCH --mem=10G 8 | 9 | # Print some information about the job to STDOUT 10 | 11 | echo "Workingdir: $PWD"; 12 | echo "Started at $(date)"; 13 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 14 | 15 | user="robertsj" 16 | runner_dir="/home/$user/NASLib/naslib/runners/bbo" 17 | 18 | # source $CONDA_SOURCE 19 | # conda activate $CONDA_ENV 20 | 21 | # python -u $runner_dir/runner.py --config-file $1 22 | 23 | for config_file_seed in $1/* 24 | do 25 | echo submitted ${config_file_seed} 26 | python -u $runner_dir/runner.py --config-file $config_file_seed 27 | # python -u -m debugpy --listen 0.0.0.0:$PORT --wait-for-client $runner_dir/runner.py --config-file $config_file_seed 28 | done 29 | 30 | # echo $COMMAND; 31 | # eval $COMMAND; 32 | 33 | # conda deactivate 34 | echo "DONE"; 35 | echo "Finished at $(date)"; 36 | -------------------------------------------------------------------------------- /scripts/darts/gsparse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Started at $(date)"; 4 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 5 | 6 | start=`date +%s` 7 | 8 | conda activate mlenv 9 | python runner.py 10 | 11 | end=`date +%s` 12 | runtime=$((end-start)) 13 | 14 | echo Runtime: $runtime 15 | -------------------------------------------------------------------------------- /scripts/nas_predictors/oneshot_eval.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080,ml_gpu-rtx2080 #ml_gpu-rtx2080 # bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH --gres=gpu:1 # reserves one GPU 4 | #SBATCH -o logs_oneshot_eval/%x.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e logs_oneshot_eval/%x.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -D . 7 | #SBATCH -a 1 # array size 8 | 9 | echo "Workingdir: $PWD"; 10 | echo "Started at $(date)"; 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 12 | 13 | start=`date +%s` 14 | 15 | # Activate virtual env so that run_experiment can load the correct packages 16 | source activate python37 17 | python oneshot_runner.py --config-file nas_predictor_config.yaml \ 18 | --model-path "run_epochs_size/${3}_${4}/cifar10/nas_predictors/${1}/${2}/$SLURM_ARRAY_TASK_ID/search/model_final.pth" \ 19 | search_space $1 optimizer $2 search.predictor_type $2 \ 20 | seed $SLURM_ARRAY_TASK_ID search.seed $SLURM_ARRAY_TASK_ID \ 21 | search.train_portion 0.$3 search.epochs $4 \ 22 | out_dir run_epochs_size-eval/$3\_$4 23 | 24 | 25 | end=`date +%s` 26 | runtime=$((end-start)) 27 | 28 | echo Runtime: $runtime 29 | -------------------------------------------------------------------------------- /scripts/nas_predictors/run_darts_bo.sh: -------------------------------------------------------------------------------- 1 | optimizer=bananas 2 | predictors=(omni_seminas bananas mlp lgb gcn bonas xgb ngb rf dngo \ 3 | bohamiann bayes_lin_reg gp seminas sparse_gp var_sparse_gp nao) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | # folders: 12 | base_file=naslib 13 | s3_folder=bo301_feb9_0 14 | out_dir=$s3_folder\_$start_seed 15 | 16 | # search space / data: 17 | search_space=darts 18 | dataset=cifar10 19 | search_epochs=500 20 | 21 | # trials / seeds: 22 | trials=100 23 | end_seed=$(($start_seed + $trials - 1)) 24 | save_to_s3=true 25 | 26 | # create config files 27 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 28 | do 29 | predictor=${predictors[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor \ 31 | --epochs $search_epochs --start_seed $start_seed --trials $trials \ 32 | --out_dir $out_dir --dataset=$dataset --config_type nas_predictor \ 33 | --search_space $search_space --optimizer $optimizer 34 | done 35 | 36 | # run experiments 37 | #for t in $(seq $start_seed $end_seed) 38 | #do 39 | #for predictor in ${predictors[@]} 40 | #do 41 | #config_file=$out_dir/$dataset/configs/nas_predictors/config\_$optimizer\_$predictor\_$t.yaml 42 | #echo ================running $predictor trial: $t ===================== 43 | #python $base_file/benchmarks/nas_predictors/runner.py --config-file $config_file 44 | #done 45 | #if [ "save_to_s3" ] 46 | #then 47 | ## zip and save to s3 48 | #echo zipping and saving to s3 49 | #zip -r $out_dir.zip $out_dir 50 | #python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 51 | #fi 52 | #done 53 | -------------------------------------------------------------------------------- /scripts/nas_predictors/run_darts_npenas.sh: -------------------------------------------------------------------------------- 1 | optimizer=npenas 2 | predictors=(omni_seminas bananas mlp lgb gcn bonas xgb ngb rf dngo \ 3 | bohamiann bayes_lin_reg gp seminas sparse_gp var_sparse_gp nao) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | # folders: 12 | base_file=NASLib/naslib 13 | s3_folder=np301 14 | out_dir=$s3_folder\_$start_seed 15 | 16 | # search space / data: 17 | search_space=darts 18 | dataset=cifar10 19 | search_epochs=500 20 | 21 | # trials / seeds: 22 | trials=100 23 | end_seed=$(($start_seed + $trials - 1)) 24 | save_to_s3=true 25 | 26 | # create config files 27 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 28 | do 29 | predictor=${predictors[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor \ 31 | --epochs $search_epochs --start_seed $start_seed --trials $trials \ 32 | --out_dir $out_dir --dataset=$dataset --config_type nas_predictor \ 33 | --search_space $search_space --optimizer $optimizer 34 | done 35 | 36 | # run experiments 37 | for t in $(seq $start_seed $end_seed) 38 | do 39 | for predictor in ${predictors[@]} 40 | do 41 | config_file=$out_dir/$dataset/configs/nas_predictors/config\_$optimizer\_$predictor\_$t.yaml 42 | echo ================running $predictor trial: $t ===================== 43 | python $base_file/benchmarks/nas_predictors/runner.py --config-file $config_file 44 | done 45 | if [ "save_to_s3" ] 46 | then 47 | # zip and save to s3 48 | echo zipping and saving to s3 49 | zip -r $out_dir.zip $out_dir 50 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 51 | fi 52 | done -------------------------------------------------------------------------------- /scripts/nas_predictors/run_im_bo_arber.sh: -------------------------------------------------------------------------------- 1 | optimizer=bananas 2 | predictors=(ngb_hp omni nao seminas bananas feedforward gbdt gcn bonas xgb ngb rf dngo \ 3 | bohamiann bayes_lin_reg gp sparse_gp var_sparse_gp) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | # folders: 12 | base_file=naslib 13 | s3_folder=bo201_im_feb4_2 14 | out_dir=$s3_folder\_$start_seed 15 | 16 | # search space / data: 17 | search_space=nasbench201 18 | dataset=ImageNet16-120 19 | search_epochs=500 20 | 21 | # trials / seeds: 22 | trials=100 23 | end_seed=$(($start_seed + $trials - 1)) 24 | save_to_s3=true 25 | 26 | # create config files 27 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 28 | do 29 | predictor=${predictors[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor \ 31 | --epochs $search_epochs --start_seed $start_seed --trials $trials \ 32 | --out_dir $out_dir --dataset=$dataset --config_type nas_predictor \ 33 | --search_space $search_space --optimizer $optimizer 34 | done 35 | 36 | # run experiments 37 | #for t in $(seq $start_seed $end_seed) 38 | #do 39 | #for predictor in ${predictors[@]} 40 | #do 41 | #config_file=$out_dir/$dataset/configs/nas_predictors/config\_$optimizer\_$predictor\_$t.yaml 42 | #echo ================running $predictor trial: $t ===================== 43 | #python $base_file/benchmarks/nas_predictors/runner.py --config-file $config_file 44 | #done 45 | #if [ "save_to_s3" ] 46 | #then 47 | ## zip and save to s3 48 | #echo zipping and saving to s3 49 | #zip -r $out_dir.zip $out_dir 50 | #python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 51 | #fi 52 | #done 53 | -------------------------------------------------------------------------------- /scripts/nas_predictors/run_nb101_bo.sh: -------------------------------------------------------------------------------- 1 | optimizer=bananas 2 | predictors=(bananas mlp lgb gcn xgb ngb rf dngo \ 3 | bohamiann bayes_lin_reg seminas nao gp sparse_gp var_sparse_gp) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | # folders: 12 | base_file=naslib 13 | s3_folder=bo101 14 | out_dir=$s3_folder\_$start_seed 15 | 16 | # search space / data: 17 | search_space=nasbench101 18 | dataset=cifar10 19 | search_epochs=500 20 | 21 | # trials / seeds: 22 | trials=100 23 | end_seed=$(($start_seed + $trials - 1)) 24 | save_to_s3=true 25 | 26 | # create config files 27 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 28 | do 29 | predictor=${predictors[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor \ 31 | --epochs $search_epochs --start_seed $start_seed --trials $trials \ 32 | --out_dir $out_dir --dataset=$dataset --config_type nas_predictor \ 33 | --search_space $search_space --optimizer $optimizer 34 | done 35 | 36 | # run experiments 37 | for t in $(seq $start_seed $end_seed) 38 | do 39 | for predictor in ${predictors[@]} 40 | do 41 | config_file=$out_dir/$dataset/configs/nas_predictors/config\_$optimizer\_$predictor\_$t.yaml 42 | echo ================running $predictor trial: $t ===================== 43 | python $base_file/benchmarks/nas_predictors/runner.py --config-file $config_file 44 | done 45 | if [ "save_to_s3" ] 46 | then 47 | # zip and save to s3 48 | echo zipping and saving to s3 49 | zip -r $out_dir.zip $out_dir 50 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 51 | fi 52 | done 53 | -------------------------------------------------------------------------------- /scripts/nas_predictors/run_nb101_npenas.sh: -------------------------------------------------------------------------------- 1 | optimizer=npenas 2 | predictors=(bananas mlp lgb gcn xgb ngb rf dngo \ 3 | bohamiann bayes_lin_reg seminas nao gp sparse_gp var_sparse_gp) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | # folders: 12 | base_file=naslib 13 | s3_folder=np101 14 | out_dir=$s3_folder\_$start_seed 15 | 16 | # search space / data: 17 | search_space=nasbench101 18 | dataset=cifar10 19 | search_epochs=500 20 | 21 | # trials / seeds: 22 | trials=100 23 | end_seed=$(($start_seed + $trials - 1)) 24 | save_to_s3=true 25 | 26 | # create config files 27 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 28 | do 29 | predictor=${predictors[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor \ 31 | --epochs $search_epochs --start_seed $start_seed --trials $trials \ 32 | --out_dir $out_dir --dataset=$dataset --config_type nas_predictor \ 33 | --search_space $search_space --optimizer $optimizer 34 | done 35 | 36 | # run experiments 37 | for t in $(seq $start_seed $end_seed) 38 | do 39 | for predictor in ${predictors[@]} 40 | do 41 | config_file=$out_dir/$dataset/configs/nas_predictors/config\_$optimizer\_$predictor\_$t.yaml 42 | echo ================running $predictor trial: $t ===================== 43 | python $base_file/benchmarks/nas_predictors/runner.py --config-file $config_file 44 | done 45 | if [ "save_to_s3" ] 46 | then 47 | # zip and save to s3 48 | echo zipping and saving to s3 49 | zip -r $out_dir.zip $out_dir 50 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 51 | fi 52 | done 53 | -------------------------------------------------------------------------------- /scripts/nas_predictors/run_nb201_bo.sh: -------------------------------------------------------------------------------- 1 | optimizer=bananas 2 | predictors=(omni_seminas bananas mlp lgb gcn bonas xgb ngb rf dngo \ 3 | bohamiann bayes_lin_reg gp seminas sparse_gp var_sparse_gp nao) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | # folders: 12 | base_file=NASLib/naslib 13 | s3_folder=bo201_feb22 14 | out_dir=$s3_folder\_$start_seed 15 | 16 | # search space / data: 17 | search_space=nasbench201 18 | dataset=ImageNet16-120 19 | search_epochs=500 20 | 21 | # trials / seeds: 22 | trials=100 23 | end_seed=$(($start_seed + $trials - 1)) 24 | save_to_s3=true 25 | 26 | # create config files 27 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 28 | do 29 | predictor=${predictors[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor \ 31 | --epochs $search_epochs --start_seed $start_seed --trials $trials \ 32 | --out_dir $out_dir --dataset=$dataset --config_type nas_predictor \ 33 | --search_space $search_space --optimizer $optimizer 34 | done 35 | 36 | # run experiments 37 | #for t in $(seq $start_seed $end_seed) 38 | #do 39 | #for predictor in ${predictors[@]} 40 | #do 41 | #config_file=$out_dir/$dataset/configs/nas_predictors/config\_$optimizer\_$predictor\_$t.yaml 42 | #echo ================running $predictor trial: $t ===================== 43 | #python $base_file/benchmarks/nas_predictors/runner.py --config-file $config_file 44 | #done 45 | #if [ "save_to_s3" ] 46 | #then 47 | ## zip and save to s3 48 | #echo zipping and saving to s3 49 | #zip -r $out_dir.zip $out_dir 50 | #python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 51 | #fi 52 | #done 53 | -------------------------------------------------------------------------------- /scripts/nas_predictors/run_nb201_bo_2.sh: -------------------------------------------------------------------------------- 1 | optimizer=bananas 2 | predictors=(bananas feedforward gbdt gcn bonas xgb rf dngo \ 3 | bohamiann bayes_lin_reg gp seminas sparse_gp var_sparse_gp nao) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | # folders: 12 | base_file=NASLib/naslib 13 | s3_folder=bo201_feb22 14 | out_dir=$s3_folder\_$start_seed 15 | 16 | # search space / data: 17 | search_space=nasbench201 18 | dataset=cifar10 19 | search_epochs=500 20 | 21 | # trials / seeds: 22 | trials=100 23 | end_seed=$(($start_seed + $trials - 1)) 24 | save_to_s3=true 25 | 26 | # create config files 27 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 28 | do 29 | predictor=${predictors[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor \ 31 | --epochs $search_epochs --start_seed $start_seed --trials $trials \ 32 | --out_dir $out_dir --dataset=$dataset --config_type nas_predictor \ 33 | --search_space $search_space --optimizer $optimizer 34 | done 35 | 36 | # run experiments 37 | for t in $(seq $start_seed $end_seed) 38 | do 39 | for predictor in ${predictors[@]} 40 | do 41 | config_file=$out_dir/$dataset/configs/nas_predictors/config\_$optimizer\_$predictor\_$t.yaml 42 | echo ================running $predictor trial: $t ===================== 43 | python $base_file/benchmarks/nas_predictors/runner.py --config-file $config_file 44 | done 45 | if [ "save_to_s3" ] 46 | then 47 | # zip and save to s3 48 | echo zipping and saving to s3 49 | zip -r $out_dir.zip $out_dir 50 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 51 | fi 52 | done 53 | -------------------------------------------------------------------------------- /scripts/nas_predictors/run_nb201_npenas.sh: -------------------------------------------------------------------------------- 1 | optimizer=npenas 2 | predictors=(omni_seminas bananas mlp lgb gcn bonas xgb ngb rf dngo \ 3 | bohamiann bayes_lin_reg gp seminas sparse_gp var_sparse_gp nao) 4 | 5 | start_seed=$1 6 | if [ -z "$start_seed" ] 7 | then 8 | start_seed=0 9 | fi 10 | 11 | # folders: 12 | base_file=NASLib/naslib 13 | s3_folder=np201 14 | out_dir=$s3_folder\_$start_seed 15 | 16 | # search space / data: 17 | search_space=nasbench201 18 | dataset=cifar10 19 | search_epochs=500 20 | 21 | # trials / seeds: 22 | trials=100 23 | end_seed=$(($start_seed + $trials - 1)) 24 | save_to_s3=true 25 | 26 | # create config files 27 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 28 | do 29 | predictor=${predictors[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor \ 31 | --epochs $search_epochs --start_seed $start_seed --trials $trials \ 32 | --out_dir $out_dir --dataset=$dataset --config_type nas_predictor \ 33 | --search_space $search_space --optimizer $optimizer 34 | done 35 | 36 | # run experiments 37 | for t in $(seq $start_seed $end_seed) 38 | do 39 | for predictor in ${predictors[@]} 40 | do 41 | config_file=$out_dir/$dataset/configs/nas_predictors/config\_$optimizer\_$predictor\_$t.yaml 42 | echo ================running $predictor trial: $t ===================== 43 | python $base_file/benchmarks/nas_predictors/runner.py --config-file $config_file 44 | done 45 | if [ "save_to_s3" ] 46 | then 47 | # zip and save to s3 48 | echo zipping and saving to s3 49 | zip -r $out_dir.zip $out_dir 50 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 51 | fi 52 | done 53 | -------------------------------------------------------------------------------- /scripts/nas_predictors/run_nb201_npenas_2.sh: -------------------------------------------------------------------------------- 1 | optimizer=npenas 2 | predictors=(omni_xgb) 3 | 4 | start_seed=$1 5 | if [ -z "$start_seed" ] 6 | then 7 | start_seed=0 8 | fi 9 | 10 | # folders: 11 | base_file=NASLib/naslib 12 | s3_folder=bo201_feb21 13 | out_dir=$s3_folder\_$start_seed 14 | 15 | # search space / data: 16 | search_space=nasbench201 17 | dataset=cifar10 18 | search_epochs=500 19 | 20 | # trials / seeds: 21 | trials=100 22 | end_seed=$(($start_seed + $trials - 1)) 23 | save_to_s3=true 24 | 25 | # create config files 26 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 27 | do 28 | predictor=${predictors[$i]} 29 | python $base_file/benchmarks/create_configs.py --predictor $predictor \ 30 | --epochs $search_epochs --start_seed $start_seed --trials $trials \ 31 | --out_dir $out_dir --dataset=$dataset --config_type nas_predictor \ 32 | --search_space $search_space --optimizer $optimizer 33 | done 34 | 35 | # run experiments 36 | for t in $(seq $start_seed $end_seed) 37 | do 38 | for predictor in ${predictors[@]} 39 | do 40 | config_file=$out_dir/$dataset/configs/nas_predictors/config\_$optimizer\_$predictor\_$t.yaml 41 | echo ================running $predictor trial: $t ===================== 42 | python $base_file/benchmarks/nas_predictors/runner.py --config-file $config_file 43 | done 44 | if [ "save_to_s3" ] 45 | then 46 | # zip and save to s3 47 | echo zipping and saving to s3 48 | zip -r $out_dir.zip $out_dir 49 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 50 | fi 51 | done 52 | -------------------------------------------------------------------------------- /scripts/nas_predictors/slurm_job-imgnet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_gpu-rtx2080,alldlc_gpu-rtx2080,ml_gpu-rtx2080 #ml_gpu-rtx2080 # bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH --gres=gpu:1 # reserves one GPU 4 | #SBATCH -o logs_bo-201-imagenet/%x.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e logs_bo-201-imagenet/%x.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -D . 7 | #SBATCH -a 0-99 # array size 8 | 9 | echo "Workingdir: $PWD"; 10 | echo "Started at $(date)"; 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 12 | 13 | start=`date +%s` 14 | 15 | # Activate virtual env so that run_experiment can load the correct packages 16 | source activate python37 17 | python runner.py --config-file bo201_im_feb4_2_0/ImageNet16-120/configs/nas_predictors/config_bananas_${1}_${SLURM_ARRAY_TASK_ID}.yaml 18 | 19 | 20 | end=`date +%s` 21 | runtime=$((end-start)) 22 | 23 | echo Runtime: $runtime 24 | -------------------------------------------------------------------------------- /scripts/nas_predictors/slurm_job-nb101.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 #bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH --gres=gpu:1 # reserves one GPU 4 | #SBATCH -o logs_bo-101/%x.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e logs_bo-101/%x.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -D . 7 | #SBATCH -a 0-99 # array size 8 | 9 | echo "Workingdir: $PWD"; 10 | echo "Started at $(date)"; 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 12 | 13 | start=`date +%s` 14 | 15 | # Activate virtual env so that run_experiment can load the correct packages 16 | source activate python37 17 | python runner.py --config-file bo101_feb03_0/cifar10/configs/nas_predictors/config_bananas_${1}_${SLURM_ARRAY_TASK_ID}.yaml 18 | 19 | 20 | 21 | end=`date +%s` 22 | runtime=$((end-start)) 23 | 24 | echo Runtime: $runtime 25 | -------------------------------------------------------------------------------- /scripts/nas_predictors/slurm_job-nb201-c10.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p ml_gpu-teslaP100 #ml_gpu-rtx2080 # bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH --gres=gpu:1 # reserves one GPU 4 | #SBATCH -o logs_bo-201-c10/%x.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e logs_bo-201-c10/%x.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -D . 7 | #SBATCH -a 17-23 # array size 8 | 9 | echo "Workingdir: $PWD"; 10 | echo "Started at $(date)"; 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 12 | 13 | start=`date +%s` 14 | 15 | # Activate virtual env so that run_experiment can load the correct packages 16 | source activate python37 17 | python runner.py --config-file bo201_feb22_0/cifar10/configs/nas_predictors/config_bananas_${1}_${SLURM_ARRAY_TASK_ID}.yaml 18 | #python runner.py --config-file bo201_c100_feb01_0/cifar100/configs/nas_predictors/config_bananas_${1}_${SLURM_ARRAY_TASK_ID}.yaml 19 | #python runner.py --config-file bo201_imagenet_feb01_0/ImageNet16-120/configs/nas_predictors/config_bananas_${1}_${SLURM_ARRAY_TASK_ID}.yaml 20 | 21 | 22 | end=`date +%s` 23 | runtime=$((end-start)) 24 | 25 | echo Runtime: $runtime 26 | -------------------------------------------------------------------------------- /scripts/nas_predictors/slurm_job-nb201-c100.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 #ml_gpu-rtx2080 # bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH --gres=gpu:1 # reserves one GPU 4 | #SBATCH -o logs_bo-201-c100/%x.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e logs_bo-201-c100/%x.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -D . 7 | #SBATCH -a 0-99 # array size 8 | 9 | echo "Workingdir: $PWD"; 10 | echo "Started at $(date)"; 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 12 | 13 | start=`date +%s` 14 | 15 | # Activate virtual env so that run_experiment can load the correct packages 16 | source activate python37 17 | python runner.py --config-file bo201_c100_feb03_0/cifar100/configs/nas_predictors/config_bananas_${1}_${SLURM_ARRAY_TASK_ID}.yaml 18 | 19 | 20 | end=`date +%s` 21 | runtime=$((end-start)) 22 | 23 | echo Runtime: $runtime 24 | -------------------------------------------------------------------------------- /scripts/nas_predictors/slurm_job-nb201-imagenet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 #ml_gpu-rtx2080 # bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH --gres=gpu:1 # reserves one GPU 4 | #SBATCH -o logs_bo-201-imagenet/%x.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e logs_bo-201-imagenet/%x.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -D . 7 | #SBATCH -a 0-99 # array size 8 | 9 | echo "Workingdir: $PWD"; 10 | echo "Started at $(date)"; 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 12 | 13 | start=`date +%s` 14 | 15 | # Activate virtual env so that run_experiment can load the correct packages 16 | source activate python37 17 | python runner.py --config-file bo201_feb22_0/ImageNet16-120/configs/nas_predictors/config_bananas_${1}_${SLURM_ARRAY_TASK_ID}.yaml 18 | 19 | 20 | end=`date +%s` 21 | runtime=$((end-start)) 22 | 23 | echo Runtime: $runtime 24 | -------------------------------------------------------------------------------- /scripts/nas_predictors/slurm_job-nb301.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p alldlc_gpu-rtx2080 #ml_gpu-rtx2080 #bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH --gres=gpu:1 # reserves one GPU 4 | #SBATCH -o logs_bo-301/%x.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -e logs_bo-301/%x.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 6 | #SBATCH -D . 7 | #SBATCH -a 0-99 # array size 8 | 9 | echo "Workingdir: $PWD"; 10 | echo "Started at $(date)"; 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 12 | 13 | start=`date +%s` 14 | 15 | # Activate virtual env so that run_experiment can load the correct packages 16 | source activate python37 17 | python runner.py --config-file bo301_feb9_0_0/cifar10/configs/nas_predictors/config_bananas_${1}_${SLURM_ARRAY_TASK_ID}.yaml 18 | 19 | 20 | end=`date +%s` 21 | runtime=$((end-start)) 22 | 23 | echo Runtime: $runtime 24 | -------------------------------------------------------------------------------- /scripts/nas_predictors/submit-all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #nb101 4 | #predictors101=(bananas mlp lgb gcn xgb ngb rf dngo \ 5 | #bohamiann bayes_lin_reg seminas nao gp sparse_gp var_sparse_gp) 6 | 7 | #nb201 8 | predictors201=(bananas feedforward gbdt gcn bonas xgb ngb rf dngo \ 9 | bohamiann bayes_lin_reg seminas nao gp sparse_gp var_sparse_gp) 10 | 11 | #nb301 12 | #predictors301=(bananas mlp lgb bonas xgb ngb rf dngo \ 13 | # bohamiann bayes_lin_reg gp sparse_gp var_sparse_gp nao) 14 | 15 | #for predictor in ${predictors101[@]} 16 | #do 17 | #sbatch -J 101-${predictor} slurm_job-nb101.sh $predictor 18 | #done 19 | 20 | #for predictor in ${predictors201[@]} 21 | #do 22 | #sbatch -J 201-${predictor} slurm_job-nb201-c10.sh $predictor 23 | #sbatch -J c100-201-${predictor} slurm_job-nb201-c100.sh $predictor 24 | #sbatch -J imnet-201-${predictor} slurm_job-nb201-imagenet.sh $predictor 25 | #sbatch -J imnet-201-${predictor} slurm_job-imgnet.sh $predictor 26 | #done 27 | 28 | for predictor in ${predictors201[@]} 29 | do 30 | sbatch -J c10-${predictor} slurm_job-nb201-c10.sh $predictor 31 | #sbatch -J im-${predictor} slurm_job-nb201-imagenet.sh $predictor 32 | done 33 | 34 | -------------------------------------------------------------------------------- /scripts/nas_predictors/submit-oneshot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | optimizers="oneshot rsws" 4 | space="darts nasbench201" 5 | portion="25 5 75 9" 6 | epochs="25 50 100 150" 7 | 8 | for s in $space 9 | do 10 | for o in $optimizers 11 | do 12 | for p in $portion 13 | do 14 | for e in $epochs 15 | do 16 | sbatch -J ${s}\_${o}\_$p\_$e oneshot_eval.sh $s $o $p $e 17 | echo $s $o 0.$p\_$e 18 | done 19 | done 20 | done 21 | done 22 | #for s in $space 23 | #do 24 | #for o in $optimizers 25 | #do 26 | #for p in $portion 27 | #do 28 | #for e in $epochs 29 | #do 30 | #scancel -n ${s}\_${o}\_$p\_$e 31 | #done 32 | #done 33 | #done 34 | #done 35 | -------------------------------------------------------------------------------- /scripts/nas_predictors/submit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | predictors=(bananas mlp lgb gcn bonas xgb ngb rf dngo \ 4 | bohamiann bayes_lin_reg seminas nao gp sparse_gp var_sparse_gp) 5 | 6 | for predictor in ${predictors[@]} 7 | do 8 | sbatch -J ${predictor} slurm_job.sh $predictor 9 | done 10 | 11 | -------------------------------------------------------------------------------- /scripts/nasbench201/gsparse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Started at $(date)"; 4 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 5 | 6 | start=`date +%s` 7 | 8 | conda activate mlenv 9 | python runner.py 10 | 11 | end=`date +%s` 12 | runtime=$((end-start)) 13 | 14 | echo Runtime: $runtime 15 | -------------------------------------------------------------------------------- /scripts/predictors/run_nb201_2.sh: -------------------------------------------------------------------------------- 1 | predictors=(xgb_hpo xgb feedforward_hpo feedforward) 2 | experiment_types=(vary_train_size vary_train_size vary_train_size vary_train_size) 3 | 4 | start_seed=$1 5 | if [ -z "$start_seed" ] 6 | then 7 | start_seed=0 8 | fi 9 | 10 | # folders: 11 | base_file=NASLib/naslib 12 | s3_folder=p201_c10_feb21 13 | out_dir=$s3_folder\_$start_seed 14 | 15 | # search space / data: 16 | search_space=nasbench201 17 | dataset=cifar10 18 | 19 | # other variables: 20 | trials=100 21 | end_seed=$(($start_seed + $trials - 1)) 22 | save_to_s3=true 23 | test_size=200 24 | 25 | # create config files 26 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 27 | do 28 | predictor=${predictors[$i]} 29 | experiment_type=${experiment_types[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor --experiment_type $experiment_type \ 31 | --test_size $test_size --start_seed $start_seed --trials $trials --out_dir $out_dir \ 32 | --dataset=$dataset --config_type predictor --search_space $search_space 33 | done 34 | 35 | # run experiments 36 | for t in $(seq $start_seed $end_seed) 37 | do 38 | for predictor in ${predictors[@]} 39 | do 40 | config_file=$out_dir/$dataset/configs/predictors/config\_$predictor\_$t.yaml 41 | echo ================running $predictor trial: $t ===================== 42 | python $base_file/benchmarks/predictors/runner.py --config-file $config_file 43 | done 44 | if [ "$save_to_s3" ] 45 | then 46 | # zip and save to s3 47 | echo zipping and saving to s3 48 | zip -r $out_dir.zip $out_dir 49 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 50 | fi 51 | done 52 | -------------------------------------------------------------------------------- /scripts/predictors/run_tnb_predictors.sh: -------------------------------------------------------------------------------- 1 | #predictors=(jacov2 snip synflow fisher grad_norm grasp) 2 | #experiment_types=(single single single single single single) 3 | 4 | predictors=(jacov2 snip synflow fisher grad_norm) 5 | experiment_types=(single single single single single) 6 | 7 | start_seed=$1 8 | if [ -z "$start_seed" ] 9 | then 10 | start_seed=0 11 | fi 12 | 13 | # folders: 14 | base_file=NASLib/naslib 15 | s3_folder=class_scene_zc_dec10_2021 16 | out_dir=$s3_folder\_$start_seed 17 | 18 | # search space / data: 19 | search_space=transbench101_micro 20 | dataset=class_scene 21 | 22 | # other variables: 23 | trials=100 24 | end_seed=$(($start_seed + $trials - 1)) 25 | save_to_s3=true 26 | test_size=100 27 | 28 | # create config files 29 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 30 | do 31 | predictor=${predictors[$i]} 32 | experiment_type=${experiment_types[$i]} 33 | python $base_file/benchmarks/create_configs.py --predictor $predictor --experiment_type $experiment_type \ 34 | --test_size $test_size --start_seed $start_seed --trials $trials --out_dir $out_dir \ 35 | --dataset=$dataset --config_type predictor --search_space $search_space 36 | done 37 | 38 | # run experiments 39 | for t in $(seq $start_seed $end_seed) 40 | do 41 | for predictor in ${predictors[@]} 42 | do 43 | config_file=$out_dir/$dataset/configs/predictors/config\_$predictor\_$t.yaml 44 | echo ================running $predictor trial: $t ===================== 45 | python $base_file/benchmarks/predictors/runner.py --config-file $config_file 46 | done 47 | if [ "$save_to_s3" ] 48 | then 49 | # zip and save to s3 50 | echo zipping and saving to s3 51 | zip -r $out_dir.zip $out_dir 52 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 53 | fi 54 | done 55 | -------------------------------------------------------------------------------- /scripts/predictors/test.sh: -------------------------------------------------------------------------------- 1 | predictors=(rf) 2 | experiment_types=(single) 3 | 4 | start_seed=$1 5 | if [ -z "$start_seed" ] 6 | then 7 | start_seed=0 8 | fi 9 | 10 | # folders: 11 | base_file=NASLib/naslib 12 | s3_folder=test 13 | out_dir=$s3_folder\_$start_seed 14 | 15 | # search space / data: 16 | search_space=nasbench201 17 | dataset=cifar10 18 | 19 | # other variables: 20 | trials=1 21 | end_seed=$(($start_seed + $trials - 1)) 22 | save_to_s3=true 23 | test_size=10 24 | 25 | # create config files 26 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 27 | do 28 | predictor=${predictors[$i]} 29 | experiment_type=${experiment_types[$i]} 30 | python $base_file/benchmarks/create_configs.py --predictor $predictor --experiment_type $experiment_type \ 31 | --test_size $test_size --start_seed $start_seed --trials $trials --out_dir $out_dir \ 32 | --dataset=$dataset --config_type predictor --search_space $search_space 33 | done 34 | 35 | # run experiments 36 | for t in $(seq $start_seed $end_seed) 37 | do 38 | for predictor in ${predictors[@]} 39 | do 40 | config_file=$out_dir/$dataset/configs/predictors/config\_$predictor\_$t.yaml 41 | echo ================running $predictor trial: $t ===================== 42 | python $base_file/benchmarks/predictors/runner.py --config-file $config_file 43 | done 44 | if [ "$save_to_s3" ] 45 | then 46 | # zip and save to s3 47 | echo zipping and saving to s3 48 | zip -r $out_dir.zip $out_dir 49 | python $base_file/benchmarks/upload_to_s3.py --out_dir $out_dir --s3_folder $s3_folder 50 | fi 51 | done 52 | -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/benchmarks/create_configs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # search space and datasets: 4 | search_space=$1 5 | dataset=$2 6 | start_seed=$3 7 | if [ -z "$start_seed" ] 8 | then 9 | start_seed=0 10 | fi 11 | 12 | # folders: 13 | config_root='naslib/configs' 14 | out_dir=run 15 | 16 | # predictors 17 | predictors=(fisher grad_norm grasp jacov snip synflow epe_nas flops params plain l2_norm nwot zen) 18 | 19 | # other variables: 20 | trials=10 21 | end_seed=$(($start_seed + $trials - 1)) 22 | train_size=100 23 | 24 | # create config files 25 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 26 | do 27 | predictor=${predictors[$i]} 28 | python scripts/zc/create_configs_benchmarks.py --predictor $predictor \ 29 | --start_seed $start_seed --out_dir $out_dir --dataset=$dataset \ 30 | --experiment benchmarks --search_space $search_space --config_root=$config_root 31 | done -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/benchmarks/create_configs_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./scripts/zc/bash_scripts/benchmarks/create_configs_nb101.sh 4 | ./scripts/zc/bash_scripts/benchmarks/create_configs_nb201.sh 5 | ./scripts/zc/bash_scripts/benchmarks/create_configs_nb301.sh 6 | ./scripts/zc/bash_scripts/benchmarks/create_configs_tnb101.sh 7 | -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/benchmarks/create_configs_nb101.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | searchspace=nasbench101 4 | datasets=(cifar10) 5 | 6 | for dataset in "${datasets[@]}" 7 | do 8 | scripts/zc/bash_scripts/benchmarks/create_configs.sh $searchspace $dataset 9000 9 | done -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/benchmarks/create_configs_nb201.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | searchspace=nasbench201 4 | datasets=(cifar10 cifar100 ImageNet16-120) 5 | 6 | for dataset in "${datasets[@]}" 7 | do 8 | scripts/zc/bash_scripts/benchmarks/create_configs.sh $searchspace $dataset 9000 9 | done -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/benchmarks/create_configs_nb301.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | searchspace=nasbench301 4 | datasets=(cifar10) 5 | 6 | for dataset in "${datasets[@]}" 7 | do 8 | scripts/zc/bash_scripts/benchmarks/create_configs.sh $searchspace $dataset 9000 9 | done -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/benchmarks/create_configs_tnb101.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | searchspaces=(transbench101_micro transbench101_macro) 4 | datasets=(autoencoder class_object class_scene normal jigsaw room_layout segmentsemantic) 5 | 6 | for searchspace in "${searchspaces[@]}" 7 | do 8 | for dataset in "${datasets[@]}" 9 | do 10 | scripts/zc/bash_scripts/benchmarks/create_configs.sh $searchspace $dataset 9000 11 | done 12 | done -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/correlation/create_configs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # search space and datasets: 4 | search_space=$1 5 | dataset=$2 6 | start_seed=$3 7 | if [ -z "$start_seed" ] 8 | then 9 | start_seed=0 10 | fi 11 | 12 | # folders: 13 | config_root='naslib/configs' 14 | out_dir=run 15 | 16 | # predictors 17 | predictors=(fisher grad_norm grasp jacov snip synflow epe_nas flops params plain l2_norm nwot zen) 18 | 19 | # other variables: 20 | trials=10 21 | end_seed=$(($start_seed + $trials - 1)) 22 | test_size=1000 23 | 24 | # create config files 25 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 26 | do 27 | predictor=${predictors[$i]} 28 | python scripts/zc/create_configs_correlation.py --predictor $predictor \ 29 | --test_size $test_size --start_seed $start_seed --trials $trials --out_dir $out_dir \ 30 | --dataset=$dataset --experiment correlation --search_space $search_space \ 31 | --config_root=$config_root 32 | done 33 | -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/correlation/create_configs_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./scripts/zc/bash_scripts/correlation/create_configs_nb101.sh 4 | ./scripts/zc/bash_scripts/correlation/create_configs_nb201.sh 5 | ./scripts/zc/bash_scripts/correlation/create_configs_nb301.sh 6 | ./scripts/zc/bash_scripts/correlation/create_configs_tnb101.sh 7 | -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/correlation/create_configs_nb101.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | searchspace=nasbench101 4 | datasets=(cifar10) 5 | 6 | for dataset in "${datasets[@]}" 7 | do 8 | scripts/zc/bash_scripts/correlation/create_configs.sh $searchspace $dataset 9000 9 | done -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/correlation/create_configs_nb201.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | searchspace=nasbench201 4 | datasets=(cifar10 cifar100 ImageNet16-120 svhn scifar100 ninapro) 5 | 6 | for dataset in "${datasets[@]}" 7 | do 8 | scripts/zc/bash_scripts/correlation/create_configs.sh $searchspace $dataset 9000 9 | done 10 | -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/correlation/create_configs_nb301.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | searchspace=nasbench301 4 | datasets=(cifar10 svhn scifar100 ninapro) 5 | 6 | for dataset in "${datasets[@]}" 7 | do 8 | scripts/zc/bash_scripts/correlation/create_configs.sh $searchspace $dataset 9000 9 | done 10 | -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/correlation/create_configs_tnb101.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | searchspaces=(transbench101_micro transbench101_macro) 4 | datasets=(autoencoder class_object class_scene normal jigsaw room_layout segmentsemantic svhn scifar100 ninapro) 5 | 6 | for searchspace in "${searchspaces[@]}" 7 | do 8 | for dataset in "${datasets[@]}" 9 | do 10 | scripts/zc/bash_scripts/correlation/create_configs.sh $searchspace $dataset 9000 11 | done 12 | done 13 | -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/download_nbs_zero.sh: -------------------------------------------------------------------------------- 1 | cd naslib/data 2 | search_space="$1" 3 | echo search_space = $search_space 4 | if [ "$search_space" = "tnb101" ] || [ "$search_space" = "all" ] 5 | then 6 | filepath1=zc_transbench101_macro.json 7 | filepath2=zc_transbench101_micro.json 8 | fileid1=1nMaa3LjlP1d_umgudX7abKSdDMeoBNb5 9 | fileid2=1i8N2n7yflN33xAuQVzlTYM4E7W1Cwn1S 10 | if [ -f $filepath1 ] && [ -f $filepath2] 11 | then 12 | echo "tnb101 files exist" 13 | else 14 | echo "tnb101 files no exist" 15 | gdown $fileid1 16 | gdown $fileid2 17 | fi 18 | fi 19 | if [ "$search_space" = "nb301" ] || [ "$search_space" = "all" ] 20 | then 21 | filepath=zc_nasbench301.json 22 | if [ -f $filepath ] 23 | then 24 | echo "nb301 file exist" 25 | else 26 | gdown 11kIyLr7LwFB_fXGDk3ic5yjiYMY5JLXG 27 | fi 28 | fi 29 | if [ "$search_space" = "nb201" ] || [ "$search_space" = "all" ] 30 | then 31 | filepath=zc_nasbench201.json 32 | if [ -f $filepath ] 33 | then 34 | echo "nb201 file exist" 35 | else 36 | gdown 1k2EUtVJ4JqoJCnuyJEVgZs6vAmbg6XVB 37 | fi 38 | fi 39 | if [ "$search_space" = "nb101" ] || [ "$search_space" = "all" ] 40 | then 41 | filepath=zc_nasbench101.json 42 | if [ -f $filepath ] 43 | then 44 | echo "nb101 file exist" 45 | else 46 | gdown 1uT3tuIDMVaB4U1N8l9imEYHWPOLls3FD 47 | fi 48 | fi 49 | cd .. 50 | cd .. 51 | -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/run_nb201.sh: -------------------------------------------------------------------------------- 1 | predictors=(fisher grad_norm grasp jacov snip synflow params flops) 2 | 3 | start_seed=$1 4 | if [ -z "$start_seed" ] 5 | then 6 | start_seed=0 7 | fi 8 | 9 | # folders: 10 | base_file=scripts 11 | s3_folder=p201_c10 12 | out_dir=$s3_folder\_$start_seed 13 | 14 | # search space / datasets: 15 | search_space=nasbench201 16 | datasets=(cifar10 cifar100 ImageNet16-120) 17 | 18 | # other variables: 19 | trials=1 20 | end_seed=$(($start_seed + $trials - 1)) 21 | test_size=5 22 | 23 | # create config files 24 | for dataset in ${datasets[@]} 25 | do 26 | for predictor in ${predictors[@]} 27 | do 28 | python $base_file/create_configs.py --predictor $predictor \ 29 | --test_size $test_size --start_seed $start_seed --trials $trials --out_dir $out_dir \ 30 | --dataset=$dataset --search_space $search_space 31 | done 32 | done 33 | 34 | # run experiments 35 | for t in $(seq $start_seed $end_seed) 36 | do 37 | for dataset in ${datasets[@]} 38 | do 39 | for predictor in ${predictors[@]} 40 | do 41 | config_file=$out_dir/$dataset/configs/predictors/config\_$predictor\_$t.yaml 42 | echo ================running $predictor trial: $t for $dataset ===================== 43 | python naslib/runners/runner.py --config-file $config_file 44 | done 45 | done 46 | done 47 | -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/run_nb301.sh: -------------------------------------------------------------------------------- 1 | predictors=(fisher grad_norm grasp jacov snip synflow params flops) 2 | 3 | start_seed=$1 4 | if [ -z "$start_seed" ] 5 | then 6 | start_seed=0 7 | fi 8 | 9 | # folders: 10 | base_file=scripts 11 | s3_folder=p301 12 | out_dir=$s3_folder\_$start_seed 13 | 14 | # search space / data: 15 | search_space=nasbench301 16 | dataset=cifar10 17 | 18 | # other variables: 19 | trials=1 20 | end_seed=$(($start_seed + $trials - 1)) 21 | test_size=5 22 | 23 | # create config files 24 | for predictor in ${predictors[@]} 25 | do 26 | python $base_file/create_configs.py --predictor $predictor \ 27 | --test_size $test_size --start_seed $start_seed --trials $trials --out_dir $out_dir \ 28 | --dataset=$dataset --search_space $search_space 29 | done 30 | 31 | # run experiments 32 | for t in $(seq $start_seed $end_seed) 33 | do 34 | for predictor in ${predictors[@]} 35 | do 36 | config_file=$out_dir/$dataset/configs/predictors/config\_$predictor\_$t.yaml 37 | echo ================running $predictor trial: $t for cifar10 ===================== 38 | python naslib/runners/runner.py --config-file $config_file 39 | done 40 | done 41 | -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/run_tnb101.sh: -------------------------------------------------------------------------------- 1 | predictors=(fisher grad_norm grasp jacov snip synflow params flops) 2 | 3 | 4 | start_seed=$1 5 | if [ -z "$start_seed" ] 6 | then 7 | start_seed=0 8 | fi 9 | 10 | # folders: 11 | base_file=scripts 12 | s3_folder=class_scene_zc_dec10_2021 13 | out_dir=$s3_folder\_$start_seed 14 | 15 | # search space / data: 16 | search_space=transbench101_micro 17 | datasets=(jigsaw class_object class_scene) 18 | 19 | # other variables: 20 | trials=1 21 | end_seed=$(($start_seed + $trials - 1)) 22 | test_size=5 23 | 24 | # create config files 25 | for dataset in ${datasets[@]} 26 | do 27 | for i in $(seq 0 $((${#predictors[@]}-1)) ) 28 | do 29 | predictor=${predictors[$i]} 30 | python $base_file/create_configs.py --predictor $predictor --test_size $test_size \ 31 | --start_seed $start_seed --trials $trials --out_dir $out_dir --dataset=$dataset \ 32 | --search_space $search_space 33 | done 34 | done 35 | 36 | # run experiments 37 | for t in $(seq $start_seed $end_seed) 38 | do 39 | for dataset in ${datasets[@]} 40 | do 41 | for predictor in ${predictors[@]} 42 | do 43 | config_file=$out_dir/$dataset/configs/predictors/config\_$predictor\_$t.yaml 44 | echo ================running $predictor trial: $t for $dataset ===================== 45 | python naslib/runners/runner.py --config-file $config_file 46 | done 47 | done 48 | done 49 | 50 | -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/xgb_correlation/create_configs_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | experiment=$1 4 | 5 | ./scripts/zc/bash_scripts/xgb_correlation/create_configs_nb101.sh $experiment 6 | ./scripts/zc/bash_scripts/xgb_correlation/create_configs_nb201.sh $experiment 7 | ./scripts/zc/bash_scripts/xgb_correlation/create_configs_nb301.sh $experiment 8 | ./scripts/zc/bash_scripts/xgb_correlation/create_configs_tnb101.sh $experiment 9 | -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/xgb_correlation/create_configs_nb101.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | experiment=$1 4 | 5 | if [ -z "$experiment" ] 6 | then 7 | echo Experiment argument not provided 8 | exit 1 9 | fi 10 | 11 | searchspace=nasbench101 12 | datasets=(cifar10) 13 | 14 | ks=(1 2 3 4 5 6 7 8 9 10 11 12 13) 15 | proxies=( 16 | "zen" 17 | "zen epe_nas" 18 | "zen epe_nas jacov" 19 | "zen epe_nas jacov synflow" 20 | "zen epe_nas jacov synflow plain" 21 | "zen epe_nas jacov synflow plain nwot" 22 | "zen epe_nas jacov synflow plain nwot grad_norm" 23 | "zen epe_nas jacov synflow plain nwot grad_norm l2_norm" 24 | "zen epe_nas jacov synflow plain nwot grad_norm l2_norm grasp" 25 | "zen epe_nas jacov synflow plain nwot grad_norm l2_norm grasp fisher" 26 | "zen epe_nas jacov synflow plain nwot grad_norm l2_norm grasp fisher snip" 27 | "zen epe_nas jacov synflow plain nwot grad_norm l2_norm grasp fisher snip params" 28 | "zen epe_nas jacov synflow plain nwot grad_norm l2_norm grasp fisher snip params flops" 29 | ) 30 | 31 | for i in "${!proxies[@]}" 32 | do 33 | echo "${proxies[$i]}" 34 | for dataset in "${datasets[@]}" 35 | do 36 | scripts/zc/bash_scripts/xgb_correlation/create_configs.sh $experiment $searchspace $dataset 9000 "${ks[$i]}" "${proxies[$i]}" 37 | done 38 | done -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/xgb_correlation/create_configs_nb201.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | experiment=$1 4 | 5 | if [ -z "$experiment" ] 6 | then 7 | echo Experiment argument not provided 8 | exit 1 9 | fi 10 | 11 | searchspace=nasbench201 12 | datasets=(cifar100) #cifar10 cifar100 ImageNet16-120) 13 | 14 | ks=(1 2 3 4 5 6 7 8 9 10 11 12 13) 15 | proxies=( 16 | "synflow" 17 | "synflow plain" 18 | "synflow plain l2_norm" 19 | "synflow plain l2_norm flops" 20 | "synflow plain l2_norm flops snip" 21 | "synflow plain l2_norm flops snip grad_norm" 22 | "synflow plain l2_norm flops snip grad_norm nwot" 23 | "synflow plain l2_norm flops snip grad_norm nwot zen" 24 | "synflow plain l2_norm flops snip grad_norm nwot zen fisher" 25 | "synflow plain l2_norm flops snip grad_norm nwot zen fisher jacov" 26 | "synflow plain l2_norm flops snip grad_norm nwot zen fisher jacov epe_nas" 27 | "synflow plain l2_norm flops snip grad_norm nwot zen fisher jacov epe_nas params" 28 | "synflow plain l2_norm flops snip grad_norm nwot zen fisher jacov epe_nas params grasp" 29 | ) 30 | 31 | for i in "${!proxies[@]}" 32 | do 33 | echo "${proxies[$i]}" 34 | for dataset in "${datasets[@]}" 35 | do 36 | scripts/zc/bash_scripts/xgb_correlation/create_configs.sh $experiment $searchspace $dataset 9000 "${ks[$i]}" "${proxies[$i]}" 37 | done 38 | done -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/xgb_correlation/create_configs_nb301.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | experiment=$1 4 | 5 | if [ -z "$experiment" ] 6 | then 7 | echo Experiment argument not provided 8 | exit 1 9 | fi 10 | 11 | searchspace=nasbench301 12 | datasets=(cifar10) 13 | 14 | ks=(1 2 3 4 5 6 7 8 9 10 11 12 13) 15 | proxies=( 16 | "nwot" 17 | "nwot epe_nas" 18 | "nwot epe_nas synflow" 19 | "nwot epe_nas synflow plain" 20 | "nwot epe_nas synflow plain grad_norm" 21 | "nwot epe_nas synflow plain grad_norm zen" 22 | "nwot epe_nas synflow plain grad_norm zen l2_norm" 23 | "nwot epe_nas synflow plain grad_norm zen l2_norm flops" 24 | "nwot epe_nas synflow plain grad_norm zen l2_norm flops snip" 25 | "nwot epe_nas synflow plain grad_norm zen l2_norm flops snip fisher" 26 | "nwot epe_nas synflow plain grad_norm zen l2_norm flops snip fisher params" 27 | "nwot epe_nas synflow plain grad_norm zen l2_norm flops snip fisher params jacov" 28 | "nwot epe_nas synflow plain grad_norm zen l2_norm flops snip fisher params jacov grasp" 29 | ) 30 | 31 | for i in "${!proxies[@]}" 32 | do 33 | echo "${proxies[$i]}" 34 | for dataset in "${datasets[@]}" 35 | do 36 | scripts/zc/bash_scripts/xgb_correlation/create_configs.sh $experiment $searchspace $dataset 9000 "${ks[$i]}" "${proxies[$i]}" 37 | done 38 | done -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/xgb_correlation/create_configs_tnb101.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | experiment=$1 4 | 5 | if [ -z "$experiment" ] 6 | then 7 | echo Experiment argument not provided 8 | exit 1 9 | fi 10 | 11 | searchspaces=(transbench101_macro) # transbench101_micro) 12 | datasets=(autoencoder) # class_object class_scene normal jigsaw room_layout segmentsemantic) 13 | 14 | ks=(1 2 3 4 5 6 7 8 9 10 11) 15 | proxies=( 16 | "flops" 17 | "flops l2_norm" 18 | "flops l2_norm plain" 19 | "flops l2_norm plain jacov" 20 | "flops l2_norm plain jacov grad_norm" 21 | "flops l2_norm plain jacov grad_norm nwot" 22 | "flops l2_norm plain jacov grad_norm nwot params" 23 | "flops l2_norm plain jacov grad_norm nwot params fisher" 24 | "flops l2_norm plain jacov grad_norm nwot params fisher zen" 25 | "flops l2_norm plain jacov grad_norm nwot params fisher zen snip" 26 | "flops l2_norm plain jacov grad_norm nwot params fisher zen snip grasp" 27 | ) 28 | 29 | for i in "${!proxies[@]}" 30 | do 31 | echo "${proxies[$i]}" 32 | for searchspace in "${searchspaces[@]}" 33 | do 34 | for dataset in "${datasets[@]}" 35 | do 36 | scripts/zc/bash_scripts/xgb_correlation/create_configs.sh $experiment $searchspace $dataset 9000 "${ks[$i]}" "${proxies[$i]}" 37 | done 38 | done 39 | done -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/zc_ensemble/create_configs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # search space and datasets: 4 | search_space=$1 5 | dataset=$2 6 | start_seed=$3 7 | if [ -z "$start_seed" ] 8 | then 9 | start_seed=0 10 | fi 11 | 12 | 13 | out_dir=run 14 | trials=500 15 | end_seed=$(($start_seed + $trials - 1)) 16 | epochs=500 17 | config_root='naslib/configs' 18 | 19 | zc_only=(True False) 20 | use_zc_api=(True False) 21 | optimizers=(bananas npenas) 22 | 23 | if [[ "$search_space" == "transbench101_micro" || "$search_space" == "transbench101_macro" ]]; then 24 | zc_names="flops params snip jacov grad_norm plain fisher grasp l2_norm nwot zen" 25 | else 26 | zc_names="flops params snip jacov grad_norm plain fisher grasp l2_norm nwot zen epe_nas synflow" 27 | fi 28 | 29 | for optimizer in "${optimizers[@]}" 30 | do 31 | for zc_only_bool in "${zc_only[@]}" 32 | do 33 | for use_zc_api_bool in "${use_zc_api[@]}" 34 | do 35 | python scripts/zc/create_configs_zc_ensembles.py --start_seed $start_seed --trials $trials --out_dir $out_dir \ 36 | --dataset=$dataset --search_space $search_space --config_root=$config_root --zc_names $zc_names --epochs $epochs \ 37 | --use_zc_api $use_zc_api_bool --zc_only $zc_only_bool --optimizer $optimizer 38 | done 39 | done 40 | done 41 | -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/zc_ensemble/create_configs_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./scripts/zc/bash_scripts/zc_ensemble/create_configs_nb101.sh 4 | ./scripts/zc/bash_scripts/zc_ensemble/create_configs_nb201.sh 5 | ./scripts/zc/bash_scripts/zc_ensemble/create_configs_nb301.sh 6 | ./scripts/zc/bash_scripts/zc_ensemble/create_configs_tnb101.sh -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/zc_ensemble/create_configs_nb101.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | searchspace=nasbench101 4 | datasets=(cifar10) 5 | 6 | for dataset in "${datasets[@]}" 7 | do 8 | scripts/zc/bash_scripts/zc_ensemble/create_configs.sh $searchspace $dataset 9000 9 | done -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/zc_ensemble/create_configs_nb201.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | searchspace=nasbench201 4 | datasets=(cifar10 cifar100 ImageNet16-120) 5 | 6 | for dataset in "${datasets[@]}" 7 | do 8 | scripts/zc/bash_scripts/zc_ensemble/create_configs.sh $searchspace $dataset 9000 9 | done -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/zc_ensemble/create_configs_nb301.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | searchspace=nasbench301 4 | datasets=(cifar10) 5 | 6 | for dataset in "${datasets[@]}" 7 | do 8 | scripts/zc/bash_scripts/zc_ensemble/create_configs.sh $searchspace $dataset 9000 9 | done -------------------------------------------------------------------------------- /scripts/zc/bash_scripts/zc_ensemble/create_configs_tnb101.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | searchspaces=(transbench101_micro transbench101_macro) 4 | datasets=(autoencoder class_object class_scene normal jigsaw room_layout segmentsemantic) 5 | 6 | for searchspace in "${searchspaces[@]}" 7 | do 8 | for dataset in "${datasets[@]}" 9 | do 10 | scripts/zc/bash_scripts/zc_ensemble/create_configs.sh $searchspace $dataset 9000 11 | done 12 | done -------------------------------------------------------------------------------- /scripts/zc/benchmarks/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_cpu-cascadelake #,ml_gpu-rtx2080 #ml_gpu-rtx2080 # bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH -o logs/%x.memMEM_FOR_JOB.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 4 | #SBATCH -e logs/%x.memMEM_FOR_JOB.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -a JOB_ARRAY_RANGE # array size 6 | #SBATCH --mem=MEM_FOR_JOB 7 | #SBATCH --job-name="THE_JOB_NAME" 8 | 9 | echo "Workingdir: $PWD"; 10 | echo "Started at $(date)"; 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 12 | 13 | searchspace=$1 14 | dataset=$2 15 | predictor=$3 16 | start_seed=$4 17 | experiment=$5 18 | N_MODELS=2500 19 | 20 | SCRIPT_DIR="/home/jhaa/NASLib/scripts/vscode_remote_debugging" 21 | while read var value 22 | do 23 | export "$var"="$value" 24 | done < $SCRIPT_DIR/config.conf 25 | 26 | if [ -z "$searchspace" ] 27 | then 28 | echo "Search space argument not provided" 29 | exit 1 30 | fi 31 | 32 | if [ -z "$dataset" ] 33 | then 34 | echo "Dataset argument not provided" 35 | exit 1 36 | fi 37 | 38 | if [ -z "$predictor" ] 39 | then 40 | echo "Predictor argument not provided" 41 | exit 1 42 | fi 43 | 44 | if [ -z "$start_seed" ] 45 | then 46 | echo "Start seed not provided" 47 | exit 1 48 | fi 49 | 50 | if [ -z "$experiment" ] 51 | then 52 | echo "experiment not provided" 53 | exit 1 54 | fi 55 | 56 | start=`date +%s` 57 | python naslib/runners/zc/benchmarks/runner.py --config-file naslib/configs/${experiment}/${predictor}/${searchspace}-${start_seed}/${dataset}/config_${start_seed}.yaml start_idx ${SLURM_ARRAY_TASK_ID} n_models $N_MODELS 58 | 59 | end=`date +%s` 60 | runtime=$((end-start)) 61 | 62 | echo Runtime: $runtime 63 | -------------------------------------------------------------------------------- /scripts/zc/benchmarks/run_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | experiment=$1 4 | 5 | ./scripts/zc/benchmarks/run_nb101.sh $experiment 6 | ./scripts/zc/benchmarks/run_nb201.sh $experiment 7 | ./scripts/zc/benchmarks/run_nb301.sh $experiment 8 | ./scripts/zc/benchmarks/run_tnb101.sh $experiment 9 | -------------------------------------------------------------------------------- /scripts/zc/correlation/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_cpu-cascadelake #ml_gpu-rtx2080 #bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH -o logs/%x.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 4 | #SBATCH -e logs/%x.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -a 0-4 # array size 6 | #SBATCH --mem=5G 7 | #SBATCH --job-name="THE_JOB_NAME" 8 | 9 | echo "Workingdir: $PWD"; 10 | echo "Started at $(date)"; 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 12 | 13 | searchspace=$1 14 | dataset=$2 15 | predictor=$3 16 | start_seed=$4 17 | experiment=$5 18 | 19 | if [ -z "$searchspace" ] 20 | then 21 | echo "Search space argument not provided" 22 | exit 1 23 | fi 24 | 25 | if [ -z "$dataset" ] 26 | then 27 | echo "Dataset argument not provided" 28 | exit 1 29 | fi 30 | 31 | if [ -z "$predictor" ] 32 | then 33 | echo "Predictor argument not provided" 34 | exit 1 35 | fi 36 | 37 | if [ -z "$start_seed" ] 38 | then 39 | echo "Start seed not provided" 40 | exit 1 41 | fi 42 | 43 | if [ -z "$experiment" ] 44 | then 45 | echo "experiment not provided" 46 | exit 1 47 | fi 48 | 49 | # Change the configs root folder. The scripts for generating configs outputs it in configs, while there is already a naslib/configs folder. 50 | start=`date +%s` 51 | test_id=0 52 | # seed=$(($start_seed + ${SLURM_ARRAY_TASK_ID})) 53 | seed=$(($start_seed + ${test_id})) 54 | python naslib/runners/zc/runner.py --config-file naslib/configs/${experiment}/${predictor}/${searchspace}-${start_seed}/${dataset}/config_${seed}.yaml 55 | end=`date +%s` 56 | runtime=$((end-start)) 57 | 58 | echo Runtime: $runtime 59 | -------------------------------------------------------------------------------- /scripts/zc/correlation/run_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | experiment=$1 4 | 5 | ./scripts/zc/correlation/run_nb101.sh $experiment 6 | ./scripts/zc/correlation/run_nb201.sh $experiment 7 | ./scripts/zc/correlation/run_nb301.sh $experiment 8 | ./scripts/zc/correlation/run_tnb101.sh $experiment 9 | -------------------------------------------------------------------------------- /scripts/zc/correlation/run_nb101.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | experiment=$1 4 | predictor=$2 5 | start_seed=9000 6 | 7 | if [ -z "$experiment" ] 8 | then 9 | echo "Experiment argument not provided" 10 | exit 1 11 | fi 12 | 13 | if [ -z "$predictor" ]; 14 | then 15 | predictors=(fisher grad_norm grasp jacov snip synflow epe_nas flops params plain l2_norm nwot zen) 16 | else 17 | predictors=($predictor) 18 | fi 19 | 20 | searchspace=nasbench101 21 | datasets=(cifar10) 22 | 23 | for dataset in "${datasets[@]}" 24 | do 25 | for pred in "${predictors[@]}" 26 | do 27 | sed -i "s/THE_JOB_NAME/${searchspace}-${dataset}-${pred}/" scripts/zc/correlation/run.sh 28 | echo $searchspace $dataset $pred 29 | sbatch ./scripts/zc/correlation/run.sh $searchspace $dataset $pred $start_seed $experiment --bosch 30 | sed -i "s/${searchspace}-${dataset}-${pred}/THE_JOB_NAME/" scripts/zc/correlation/run.sh 31 | done 32 | 33 | echo "" 34 | done -------------------------------------------------------------------------------- /scripts/zc/correlation/run_nb201.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | experiment=$1 4 | predictor=$2 5 | start_seed=9000 6 | 7 | if [ -z "$experiment" ] 8 | then 9 | echo "Experiment argument not provided" 10 | exit 1 11 | fi 12 | 13 | if [ -z "$predictor" ]; 14 | then 15 | predictors=(fisher grad_norm grasp jacov snip synflow epe_nas flops params plain l2_norm nwot zen) 16 | else 17 | predictors=($predictor) 18 | fi 19 | 20 | searchspace=nasbench201 21 | datasets=(cifar10 cifar100 ImageNet16-120) 22 | 23 | for dataset in "${datasets[@]}" 24 | do 25 | for pred in "${predictors[@]}" 26 | do 27 | sed -i "s/THE_JOB_NAME/${searchspace}-${dataset}-${pred}/" ./scripts/zc/correlation/run.sh 28 | echo $searchspace $dataset $pred 29 | sbatch ./scripts/zc/correlation/run.sh $searchspace $dataset $pred $start_seed $experiment --bosch 30 | sed -i "s/${searchspace}-${dataset}-${pred}/THE_JOB_NAME/" ./scripts/zc/correlation/run.sh 31 | done 32 | 33 | echo "" 34 | done -------------------------------------------------------------------------------- /scripts/zc/correlation/run_nb301.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | experiment=$1 4 | predictor=$2 5 | start_seed=9000 6 | 7 | if [ -z "$experiment" ] 8 | then 9 | echo "Experiment argument not provided" 10 | exit 1 11 | fi 12 | 13 | if [ -z "$predictor" ]; 14 | then 15 | predictors=(fisher grad_norm grasp jacov snip synflow epe_nas flops params plain l2_norm nwot zen) 16 | else 17 | predictors=($predictor) 18 | fi 19 | 20 | searchspace=nasbench301 21 | datasets=(cifar10) 22 | 23 | for dataset in "${datasets[@]}" 24 | do 25 | for pred in "${predictors[@]}" 26 | do 27 | sed -i "s/THE_JOB_NAME/${searchspace}-${dataset}-${pred}/" ./scripts/zc/correlation/run.sh 28 | echo $searchspace $dataset $pred 29 | sbatch ./scripts/zc/correlation/run.sh $searchspace $dataset $pred $start_seed $experiment --bosch 30 | sed -i "s/${searchspace}-${dataset}-${pred}/THE_JOB_NAME/" ./scripts/zc/correlation/run.sh 31 | done 32 | 33 | echo "" 34 | done -------------------------------------------------------------------------------- /scripts/zc/correlation/run_one.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p testdlc_gpu-rtx2080 #bosch_cpu-cascadelake #,ml_gpu-rtx2080 #ml_gpu-rtx2080 # bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH -o logs/%x.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 4 | #SBATCH -e logs/%x.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -a 0 # array size 6 | #SBATCH --mem=16GB 7 | #SBATCH --job-name="ZC_ONE_RUN" 8 | 9 | echo "Workingdir: $PWD"; 10 | echo "Started at $(date)"; 11 | echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 12 | 13 | searchspace=$1 14 | predictor=$3 15 | dataset=$2 16 | seed=$4 17 | experiment=correlation 18 | 19 | if [ -z "$searchspace" ] 20 | then 21 | echo "Search space argument not provided" 22 | exit 1 23 | fi 24 | 25 | if [ -z "$dataset" ] 26 | then 27 | echo "Dataset argument not provided" 28 | exit 1 29 | fi 30 | 31 | if [ -z "$predictor" ] 32 | then 33 | echo "Predictor argument not provided" 34 | exit 1 35 | fi 36 | 37 | if [ -z "$seed" ] 38 | then 39 | echo "Seed not provided" 40 | exit 1 41 | fi 42 | 43 | if [ -z "$experiment" ] 44 | then 45 | echo "experiment not provided" 46 | exit 1 47 | fi 48 | 49 | start=`date +%s` 50 | 51 | python naslib/runners/zc/runner.py --config-file configs/${experiment}/${predictor}/${searchspace}-9000/${dataset}/config_${seed}.yaml 52 | 53 | end=`date +%s` 54 | runtime=$((end-start)) 55 | 56 | echo Runtime: $runtime 57 | -------------------------------------------------------------------------------- /scripts/zc/correlation/run_tnb101.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | experiment=$1 4 | predictor=$2 5 | start_seed=9000 6 | 7 | if [ -z "$experiment" ] 8 | then 9 | echo "Experiment argument not provided" 10 | exit 1 11 | fi 12 | 13 | if [ -z "$predictor" ]; 14 | then 15 | predictors=(fisher grad_norm grasp jacov snip flops params plain l2_norm nwot zen) 16 | else 17 | predictors=($predictor) 18 | fi 19 | 20 | searchspaces=(transbench101_micro transbench101_macro) 21 | datasets=(autoencoder class_object class_scene normal jigsaw room_layout segmentsemantic) 22 | 23 | 24 | for searchspace in "${searchspaces[@]}" 25 | do 26 | for dataset in "${datasets[@]}" 27 | do 28 | for pred in "${predictors[@]}" 29 | do 30 | sed -i "s/THE_JOB_NAME/${searchspace}-${dataset}-${pred}/" ./scripts/zc/correlation/run.sh 31 | echo $searchspace $dataset $pred 32 | sbatch ./scripts/zc/correlation/run.sh $searchspace $dataset $pred $start_seed $experiment --bosch 33 | sed -i "s/${searchspace}-${dataset}-${pred}/THE_JOB_NAME/" ./scripts/zc/correlation/run.sh 34 | done 35 | 36 | echo "" 37 | done 38 | done -------------------------------------------------------------------------------- /scripts/zc/xgb_correlation/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p testdlc_gpu-rtx2080 #bosch_cpu-cascadelake #,ml_gpu-rtx2080 #ml_gpu-rtx2080 # bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH -o logs/%x.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 4 | #SBATCH -e logs/%x.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -a 0 # array size 6 | #SBATCH --mem=5G 7 | #SBATCH --job-name="XGB_ZC_CORRELATION" 8 | 9 | searchspace=$1 10 | dataset=$2 11 | train_size=train_size_$3 12 | start_seed=$4 13 | experiment=$5 14 | k=k_$6 15 | n_seeds=100 16 | 17 | if [ -z "$searchspace" ] 18 | then 19 | echo "Search space argument not provided" 20 | exit 1 21 | fi 22 | 23 | if [ -z "$dataset" ] 24 | then 25 | echo "Dataset argument not provided" 26 | exit 1 27 | fi 28 | 29 | if [ -z "$train_size" ] 30 | then 31 | echo "Train size argument not provided" 32 | exit 1 33 | fi 34 | 35 | if [ -z "$start_seed" ] 36 | then 37 | echo "Start seed not provided" 38 | exit 1 39 | fi 40 | 41 | if [ -z "$experiment" ] 42 | then 43 | echo "Experiment argument not provided" 44 | exit 1 45 | fi 46 | 47 | start=`date +%s` 48 | for t in $(seq 0 $n_seeds) 49 | do 50 | seed=$(($start_seed + $t)) 51 | python naslib/runners/zc/bbo/xgb_runner.py --config-file naslib/configs/${experiment}/${train_size}/$k/${searchspace}-${start_seed}/${dataset}/config_${seed}.yaml 52 | done 53 | 54 | end=`date +%s` 55 | runtime=$((end-start)) 56 | 57 | echo Runtime: $runtime 58 | -------------------------------------------------------------------------------- /scripts/zc/xgb_correlation/run_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | experiment=$1 4 | 5 | if [ -z "$experiment" ] 6 | then 7 | echo "Experiment argument not provided" 8 | exit 1 9 | fi 10 | 11 | ./scripts/zc/xgb_correlation/run_nb101.sh $experiment 12 | ./scripts/zc/xgb_correlation/run_nb201.sh $experiment 13 | ./scripts/zc/xgb_correlation/run_nb301.sh $experiment 14 | ./scripts/zc/xgb_correlation/run_tnb101.sh $experiment 15 | -------------------------------------------------------------------------------- /scripts/zc/xgb_correlation/run_nb101.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # train_sizes=(5 8 14 24 42 71 121 205 347 589 1000) #(1000) #(10 15 23 36 56 87 135 209 323 500) 4 | train_sizes=(1000) #(1000) #(10 15 23 36 56 87 135 209 323 500) 5 | searchspace=nasbench101 6 | datasets=(cifar10) 7 | ks=(1 2 3 4 5 6 7 8 9 10 11 12 13) 8 | start_seed=9000 9 | n_seeds=10 10 | 11 | experiment=$1 12 | 13 | if [ -z "$experiment" ] 14 | then 15 | echo "Experiment argument not provided" 16 | exit 1 17 | fi 18 | 19 | for k in "${ks[@]}" 20 | do 21 | for dataset in "${datasets[@]}" 22 | do 23 | for size in "${train_sizes[@]}" 24 | do 25 | sbatch ./scripts/zc/xgb_correlation/run.sh $searchspace $dataset $size $start_seed $experiment $k --bosch 26 | done 27 | 28 | done 29 | done 30 | -------------------------------------------------------------------------------- /scripts/zc/xgb_correlation/run_nb201.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # train_sizes=(5 8 14 24 42 71 121 205 347 589 1000) #(1000) #(10 15 23 36 56 87 135 209 323 500) 4 | train_sizes=(1000) 5 | searchspace=nasbench201 6 | # datasets=(cifar10 cifar100 ImageNet16-120) 7 | ks=(1 2 3 4 5 6 7 8 9 10 11 12 13) 8 | datasets=(cifar100) 9 | start_seed=9000 10 | 11 | experiment=$1 12 | 13 | if [ -z "$experiment" ] 14 | then 15 | echo "Experiment argument not provided" 16 | exit 1 17 | fi 18 | 19 | for k in "${ks[@]}" 20 | do 21 | for dataset in "${datasets[@]}" 22 | do 23 | for size in "${train_sizes[@]}" 24 | do 25 | sbatch ./scripts/zc/xgb_correlation/run.sh $searchspace $dataset $size $start_seed $experiment $k --bosch 26 | done 27 | 28 | done 29 | done 30 | -------------------------------------------------------------------------------- /scripts/zc/xgb_correlation/run_nb301.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # train_sizes=(5 8 14 24 42 71 121 205 347 589 1000) #(1000) #(10 15 23 36 56 87 135 209 323 500) 4 | train_sizes=(1000) #(1000) #(10 15 23 36 56 87 135 209 323 500) 5 | searchspace=nasbench301 6 | datasets=(cifar10) 7 | ks=(1 2 3 4 5 6 7 8 9 10 11 12 13) 8 | start_seed=9000 9 | 10 | experiment=$1 11 | 12 | if [ -z "$experiment" ] 13 | then 14 | echo "Experiment argument not provided" 15 | exit 1 16 | fi 17 | 18 | for k in "${ks[@]}" 19 | do 20 | for dataset in "${datasets[@]}" 21 | do 22 | for size in "${train_sizes[@]}" 23 | do 24 | sbatch ./scripts/zc/xgb_correlation/run.sh $searchspace $dataset $size $start_seed $experiment $k --bosch 25 | done 26 | 27 | done 28 | done 29 | -------------------------------------------------------------------------------- /scripts/zc/xgb_correlation/run_tnb101.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # train_sizes=(5 8 14 24 42 71 121 205 347 589 1000) #(1000) #(10 15 23 36 56 87 135 209 323 500) 4 | train_sizes=(1000) #(1000) #(10 15 23 36 56 87 135 209 323 500) 5 | searchspaces=(transbench101_macro) # transbench101_micro) 6 | ks=(1 2 3 4 5 6 7 8 9 10 11) 7 | datasets=(autoencoder) # class_object class_scene autoencoder normal room_layout segmentsemantic) 8 | start_seed=9000 9 | 10 | experiment=$1 11 | 12 | if [ -z "$experiment" ] 13 | then 14 | echo "Experiment argument not provided" 15 | exit 1 16 | fi 17 | 18 | for k in "${ks[@]}" 19 | do 20 | for searchspace in "${searchspaces[@]}" 21 | do 22 | for dataset in "${datasets[@]}" 23 | do 24 | for size in "${train_sizes[@]}" 25 | do 26 | sbatch ./scripts/zc/xgb_correlation/run.sh $searchspace $dataset $size $start_seed $experiment $k --bosch 27 | done 28 | 29 | done 30 | done 31 | done 32 | -------------------------------------------------------------------------------- /scripts/zc/zc_ensembles/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -p bosch_cpu-cascadelake #,ml_gpu-rtx2080 #ml_gpu-rtx2080 # bosch_gpu-rtx2080 #alldlc_gpu-rtx2080 # partition (queue) 3 | #SBATCH -o logs/%x.%A-%a.%N.out # STDOUT %A will be replaced by the SLURM_ARRAY_JOB_ID value 4 | #SBATCH -e logs/%x.%A-%a.%N.err # STDERR %A will be replaced by the SLURM_ARRAY_JOB_ID value 5 | #SBATCH -a 0-399:10 # array size 6 | #SBATCH --job-name="THE_JOB_NAME" 7 | #SBATCH --mem=5G 8 | echo "Workingdir: $PWD"; 9 | echo "Started at $(date)"; 10 | #echo "Running job $SLURM_JOB_NAME using $SLURM_JOB_CPUS_PER_NODE cpus per node with given JID $SLURM_JOB_ID on queue $SLURM_JOB_PARTITION"; 11 | 12 | searchspace=$1 13 | dataset=$2 14 | start_seed=$3 15 | n_seeds=10 16 | experiment=$5 17 | optimizer=bananas 18 | zc_usage=$6 19 | zc_source=$7 20 | 21 | if [ -z "$searchspace" ] 22 | then 23 | echo "Search space argument not provided" 24 | exit 1 25 | fi 26 | 27 | if [ -z "$dataset" ] 28 | then 29 | echo "Dataset argument not provided" 30 | exit 1 31 | fi 32 | 33 | if [ -z "$start_seed" ] 34 | then 35 | echo "Start seed not provided" 36 | exit 1 37 | fi 38 | 39 | if [ -z "$n_seeds" ] 40 | then 41 | echo "n_seeds not provided" 42 | exit 1 43 | fi 44 | 45 | if [ -z "$experiment" ] 46 | then 47 | echo "experiment not provided" 48 | exit 1 49 | fi 50 | 51 | if [ -z "$zc_usage" ] 52 | then 53 | echo "zc_usage not provided" 54 | exit 1 55 | fi 56 | 57 | if [ -z "$zc_source" ] 58 | then 59 | echo "zc_source not provided" 60 | exit 1 61 | fi 62 | 63 | start=`date +%s` 64 | 65 | for i in $(seq 0 $(($n_seeds - 1))) 66 | do 67 | echo "running experiment for config_$(($start_seed + $i)).yaml" 68 | python naslib/runners/bbo/runner.py --config-file naslib/configs/${experiment}/${zc_usage}/${zc_source}/${optimizer}/${searchspace}-${start_seed}/${dataset}/config_$(($start_seed + $i)).yaml 69 | done 70 | 71 | end=`date +%s` 72 | runtime=$((end-start)) 73 | 74 | echo Runtime: $runtime 75 | -------------------------------------------------------------------------------- /scripts/zc/zc_ensembles/run_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | experiment=$1 4 | zc_usage=$2 5 | zc_source=$3 6 | n_seeds=10 7 | 8 | if [ -z "$experiment" ] 9 | then 10 | echo "Experiment argument not provided" 11 | exit 1 12 | fi 13 | 14 | if [ -z "$zc_usage" ] 15 | then 16 | echo "zc_usage argument not provided" 17 | exit 1 18 | fi 19 | 20 | if [ -z "$zc_source" ] 21 | then 22 | echo "zc_source argument not provided" 23 | exit 1 24 | fi 25 | 26 | bash scripts/zc/zc_ensembles/run_nb101.sh $experiment $zc_usage $zc_source $n_seeds 27 | bash scripts/zc/zc_ensembles/run_nb201.sh $experiment $zc_usage $zc_source $n_seeds 28 | bash scripts/zc/zc_ensembles/run_nb301.sh $experiment $zc_usage $zc_source $n_seeds 29 | bash scripts/zc/zc_ensembles/run_tnb101.sh $experiment $zc_usage $zc_source $n_seeds 30 | -------------------------------------------------------------------------------- /scripts/zc/zc_ensembles/run_nb101.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | searchspace=nasbench101 4 | datasets=(cifar10) 5 | start_seed=9000 6 | 7 | experiment=$1 8 | zc_usage=$2 9 | zc_source=$3 10 | n_seeds=$4 11 | 12 | if [ -z "$experiment" ] 13 | then 14 | echo "Experiment argument not provided" 15 | exit 1 16 | fi 17 | 18 | if [ -z "$zc_usage" ] 19 | then 20 | echo "zc_usage argument not provided" 21 | exit 1 22 | fi 23 | 24 | if [ -z "$zc_source" ] 25 | then 26 | echo "zc_source argument not provided" 27 | exit 1 28 | fi 29 | 30 | if [ -z "$n_seeds" ] 31 | then 32 | echo "n_seeds argument not provided" 33 | exit 1 34 | fi 35 | 36 | 37 | for dataset in "${datasets[@]}" 38 | do 39 | echo $searchspace $dataset 40 | bash ./scripts/zc/zc_ensembles/run.sh $searchspace $dataset $start_seed $n_seeds $experiment $zc_usage $zc_source 41 | done -------------------------------------------------------------------------------- /scripts/zc/zc_ensembles/run_nb201.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | searchspace=nasbench201 4 | datasets=(cifar10 cifar100 ImageNet16-120) 5 | start_seed=9000 6 | 7 | experiment=$1 8 | zc_usage=$2 9 | zc_source=$3 10 | n_seeds=$4 11 | 12 | if [ -z "$experiment" ] 13 | then 14 | echo "Experiment argument not provided" 15 | exit 1 16 | fi 17 | 18 | if [ -z "$zc_usage" ] 19 | then 20 | echo "zc_usage argument not provided" 21 | exit 1 22 | fi 23 | 24 | if [ -z "$zc_source" ] 25 | then 26 | echo "zc_source argument not provided" 27 | exit 1 28 | fi 29 | 30 | if [ -z "$n_seeds" ] 31 | then 32 | echo "n_seeds argument not provided" 33 | exit 1 34 | fi 35 | 36 | 37 | for dataset in "${datasets[@]}" 38 | do 39 | echo $searchspace $dataset 40 | bash ./scripts/zc/zc_ensembles/run.sh $searchspace $dataset $start_seed $n_seeds $experiment $zc_usage $zc_source 41 | done -------------------------------------------------------------------------------- /scripts/zc/zc_ensembles/run_nb301.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | searchspace=nasbench301 4 | datasets=(cifar10) 5 | start_seed=9000 6 | 7 | experiment=$1 8 | zc_usage=$2 9 | zc_source=$3 10 | n_seeds=$4 11 | 12 | if [ -z "$experiment" ] 13 | then 14 | echo "Experiment argument not provided" 15 | exit 1 16 | fi 17 | 18 | if [ -z "$zc_usage" ] 19 | then 20 | echo "zc_usage argument not provided" 21 | exit 1 22 | fi 23 | 24 | if [ -z "$zc_source" ] 25 | then 26 | echo "zc_source argument not provided" 27 | exit 1 28 | fi 29 | 30 | if [ -z "$n_seeds" ] 31 | then 32 | echo "n_seeds argument not provided" 33 | exit 1 34 | fi 35 | 36 | 37 | for dataset in "${datasets[@]}" 38 | do 39 | echo $searchspace $dataset 40 | bash ./scripts/zc/zc_ensembles/run.sh $searchspace $dataset $start_seed $n_seeds $experiment $zc_usage $zc_source 41 | done -------------------------------------------------------------------------------- /scripts/zc/zc_ensembles/run_tnb101.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | searchspace=(transbench101_micro) # transbench101_macro) 4 | datasets=(autoencoder class_object class_scene normal jigsaw room_layout segmentsemantic) 5 | start_seed=9000 6 | 7 | experiment=$1 8 | zc_usage=$2 9 | zc_source=$3 10 | n_seeds=$4 11 | 12 | if [ -z "$experiment" ] 13 | then 14 | echo "Experiment argument not provided" 15 | exit 1 16 | fi 17 | 18 | if [ -z "$zc_usage" ] 19 | then 20 | echo "zc_usage argument not provided" 21 | exit 1 22 | fi 23 | 24 | if [ -z "$zc_source" ] 25 | then 26 | echo "zc_source argument not provided" 27 | exit 1 28 | fi 29 | 30 | if [ -z "$n_seeds" ] 31 | then 32 | echo "n_seeds argument not provided" 33 | exit 1 34 | fi 35 | 36 | 37 | for dataset in "${datasets[@]}" 38 | do 39 | echo $searchspace $dataset 40 | bash ./scripts/zc/zc_ensembles/run.sh $searchspace $dataset $start_seed $n_seeds $experiment $zc_usage $zc_source 41 | done -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.md 3 | 4 | [mypy] 5 | ignore_missing_imports = True 6 | follow_imports=skip 7 | disallow_untyped_decorators = True 8 | disallow_incomplete_defs = True 9 | disallow_untyped_defs = True -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/tests/__init__.py -------------------------------------------------------------------------------- /tests/assets/config.yaml: -------------------------------------------------------------------------------- 1 | seed: 12 2 | out_dir: "tmp/test/hallo" 3 | search_space: darts 4 | optimizer: darts 5 | dataset: cifar10 6 | 7 | search: 8 | batch_size: 300 9 | 10 | evaluation: 11 | batch_size: 200 -------------------------------------------------------------------------------- /tests/assets/nb101_dummy.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/tests/assets/nb101_dummy.pkl -------------------------------------------------------------------------------- /tests/assets/nb201_test_set_info.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/tests/assets/nb201_test_set_info.npy -------------------------------------------------------------------------------- /tests/assets/nb201_test_set_times.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/tests/assets/nb201_test_set_times.npy -------------------------------------------------------------------------------- /tests/assets/nb201_test_set_x.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/tests/assets/nb201_test_set_x.npy -------------------------------------------------------------------------------- /tests/assets/nb201_test_set_y.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/automl/NASLib/8cb5d2ba1e29784de43039d9824c68e88fb1a1da/tests/assets/nb201_test_set_y.npy -------------------------------------------------------------------------------- /tests/assets/test_predictor.yaml: -------------------------------------------------------------------------------- 1 | experiment_type: single 2 | search_space: nasbench201 3 | dataset: cifar10 4 | predictor: mlp 5 | uniform_random: 1 6 | test_size: 10 7 | train_size_single: 20 8 | train_size_list: [5, 8, 14, 24, 42, 71, 121, 205] 9 | fidelity_single: 5 10 | fidelity_list: [1, 2, 3, 5, 7, 9, 13, 19, 26, 37, 52, 73] 11 | out_dir: run 12 | max_hpo_time: 0 13 | seed: 1000 14 | search: 15 | seed: 1000 16 | batch_size: 256 17 | data_size: 25000 18 | cutout: False 19 | cutout_length: 16 20 | cutout_prob: 1.0 21 | train_portion: 0.7 -------------------------------------------------------------------------------- /tests/test_optimizer_factory.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | 6 | from naslib.utils.pytorch_helper import create_optimizer 7 | 8 | 9 | class OptimizerFactoryTest(unittest.TestCase): 10 | def setUp(self): 11 | self.model = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2)) 12 | 13 | def test_params_type_correct(self): 14 | create_optimizer("sgd", self.model.parameters()) 15 | 16 | def test_params_type_false(self): 17 | with self.assertRaises(ValueError): 18 | create_optimizer("sgd", self.model) 19 | 20 | def test_sgd(self): 21 | self._test_optimizer_helper('sgd', optim.SGD, lr=0.026, weight_decay=0, momentum=0.91) 22 | 23 | def test_adam(self): 24 | self._test_optimizer_helper('Adam', optim.Adam, lr=0.026, weight_decay=0, betas=(0.5, 0.999)) 25 | 26 | # Helper functions 27 | def _test_optimizer_helper(self, opt, optimizer_type, lr, weight_decay, **kwargs): 28 | optimizer = create_optimizer( 29 | opt, 30 | self.model.parameters(), 31 | lr, 32 | weight_decay, 33 | **kwargs 34 | ) 35 | 36 | self.assertIsInstance(optimizer, optimizer_type) 37 | self.assertEqual(optimizer.defaults["lr"], lr) 38 | self.assertEqual(optimizer.defaults["weight_decay"], weight_decay) 39 | for item in kwargs: 40 | self.assertEqual(optimizer.defaults[item], kwargs[item]) 41 | 42 | 43 | if __name__ == '__main__': 44 | unittest.main() 45 | --------------------------------------------------------------------------------