├── .github
├── CODE-OF-CONDUCT.md
├── CONTRIBUTING.md
├── ISSUE_TEMPLATE
│ ├── bug-report.md
│ └── question.md
└── workflows
│ ├── test-basic.yaml
│ ├── test-misc.yaml
│ └── test-super-layer_model.yaml
├── .gitignore
├── .gitmodules
├── .latent-data
├── .gitignore
├── init-configs
│ ├── README.md
│ ├── bashrc
│ ├── setup.sh
│ ├── tmux.conf
│ ├── vimrc
│ └── zshrc
└── splits
│ └── .gitignore
├── CHANGE-LOG.md
├── LICENSE.md
├── README.md
├── configs
├── NeurIPS-2019
│ ├── C010-ResNet110.config
│ ├── C010-ResNet164.config
│ ├── C010-ResNet20.config
│ ├── C010-ResNet32.config
│ ├── C010-ResNet56.config
│ ├── C100-ResNet110.config
│ ├── C100-ResNet164.config
│ ├── C100-ResNet20.config
│ ├── C100-ResNet32.config
│ ├── C100-ResNet56.config
│ ├── ImageNet-ResNet18V1.config
│ └── ImageNet-ResNet50V1.config
├── archs
│ ├── CIFAR-ResNet08.config
│ ├── CIFAR-ResNet1001.config
│ ├── CIFAR-ResNet110.config
│ ├── CIFAR-ResNet164.config
│ ├── CIFAR-ResNet20.config
│ ├── CIFAR-ResNet32.config
│ ├── CIFAR-ResNet56.config
│ ├── CIFAR-SIM05.config
│ ├── CIFAR-WRN28-10.config
│ ├── ImageNet-ResNet101V1.config
│ ├── ImageNet-ResNet101V2.config
│ ├── ImageNet-ResNet152V1.config
│ ├── ImageNet-ResNet152V2.config
│ ├── ImageNet-ResNet18V1.config
│ ├── ImageNet-ResNet18V2.config
│ ├── ImageNet-ResNet200V1.config
│ ├── ImageNet-ResNet200V2.config
│ ├── ImageNet-ResNet34V1.config
│ ├── ImageNet-ResNet34V2.config
│ ├── ImageNet-ResNet50V1.config
│ ├── ImageNet-ResNet50V2.config
│ ├── ImageNet-ResNext50-32x4dV2.config
│ ├── NAS-CIFAR-DARTS.config
│ ├── NAS-CIFAR-GDAS_V1.config
│ ├── NAS-CIFAR-NAS.config
│ ├── NAS-CIFAR-SETN.config
│ ├── NAS-CIFAR-none.config
│ ├── NAS-IMAGENET-DARTS_V2.config
│ ├── NAS-IMAGENET-GDAS_V1.config
│ ├── NAS-IMAGENET-SETN.config
│ ├── NAS-IMAGENET-SETN1.config
│ ├── NAS-IMAGENET-SETN2.config
│ ├── NAS-IMAGENET-SETN3.config
│ ├── NAS-IMAGENET-SETN4.config
│ └── NAS-IMAGENET-none.config
├── compares
│ ├── CIFAR010-TAS-R110.config
│ ├── CIFAR010-TAS-R56.config
│ ├── CIFAR100-FIX-R32.config
│ ├── CIFAR100-RAND-R32.config
│ ├── CIFAR100-TAS-R110.config
│ ├── CIFAR100-TAS-R32.config
│ └── CIFAR100-TAS-R56.config
├── nas-benchmark
│ ├── CIFAR.config
│ ├── ImageNet-16.config
│ ├── ImageNet16-120-split.txt
│ ├── LESS.config
│ ├── algos
│ │ ├── DARTS.config
│ │ ├── ENAS.config
│ │ ├── GDAS.config
│ │ ├── R-EA.config
│ │ ├── RANDOM.config
│ │ ├── SETN.config
│ │ └── weight-sharing.config
│ ├── cifar-split.txt
│ ├── cifar100-test-split.txt
│ ├── hyper-opts
│ │ ├── 01E.config
│ │ ├── 12E.config
│ │ ├── 200E.config
│ │ └── 90E.config
│ └── imagenet-16-120-test-split.txt
├── opts
│ ├── CIFAR-E300-W5-L1-COS.config
│ ├── CIFAR-E300-W5-L4-COS.config
│ ├── CIFAR-E600-W5-L1-COS.config
│ ├── CIFAR-Fast-Random.config
│ ├── CIFAR-Slow-Random.config
│ ├── Com-Paddle-NAS.config
│ ├── Com-Paddle-RES.config
│ ├── ImageNet-E120-Cos-Smooth.config
│ ├── ImageNet-E120-Cos-Soft.config
│ ├── ImageNet-E120-Step-Soft.config
│ ├── NAS-CIFAR-V2.config
│ ├── NAS-CIFAR.config
│ └── NAS-IMAGENET.config
├── qlib
│ ├── workflow_config_TabNet_Alpha360.yaml
│ ├── workflow_config_alstm_Alpha360.yaml
│ ├── workflow_config_doubleensemble_Alpha360.yaml
│ ├── workflow_config_gru_Alpha360.yaml
│ ├── workflow_config_lightgbm_Alpha360.yaml
│ ├── workflow_config_lstm_Alpha360.yaml
│ ├── workflow_config_mlp_Alpha360.yaml
│ ├── workflow_config_naive_v1_Alpha360.yaml
│ ├── workflow_config_naive_v2_Alpha360.yaml
│ ├── workflow_config_sfm_Alpha360.yaml
│ ├── workflow_config_transformer_Alpha360.yaml
│ ├── workflow_config_transformer_basic_Alpha360.yaml
│ └── workflow_config_xgboost_Alpha360.yaml
├── search-KD-opts
│ ├── ImageNet-E120-Cos-Soft.config
│ └── ImageNet-E150-MobileFast.config
├── search-archs
│ ├── DARTS-NASNet-CIFAR.config
│ ├── GDAS-NASNet-CIFAR.config
│ ├── GDASFRC-NASNet-CIFAR.config
│ ├── ImageNet-MobileNetV2-X.config
│ ├── ImageNet-ResNet18V1.config
│ ├── ImageNet-ResNet34V1.config
│ └── ImageNet-ResNet50V1.config
├── search-opts
│ ├── CIFAR.config
│ ├── CIFARX.config
│ ├── CIFARXX.config
│ ├── DARTS-NASNet-CIFAR.config
│ ├── GDAS-NASNet-CIFAR.config
│ ├── ImageNet-MobileFast.config
│ └── ImageNet-ResNet.config
├── temps
│ ├── T-MobileNetV2-X.config
│ ├── T-ResNet18.config
│ ├── T-ResNet50.config
│ ├── X-ResNet18.config
│ └── X-ResNet50.config
├── yaml.data
│ ├── cifar10.test
│ └── cifar10.train
├── yaml.loss
│ ├── cross-entropy
│ ├── top1-ce
│ └── top1.acc
├── yaml.model
│ └── vit-cifar10.s0
└── yaml.opt
│ └── vit.cifar
├── docs
├── BASELINE.md
├── CVPR-2019-GDAS.md
├── ICCV-2019-SETN.md
├── ICLR-2019-DARTS.md
├── NAS-Bench-201-PURE.md
├── NAS-Bench-201.md
├── NeurIPS-2019-TAS.md
├── README_CN.md
└── requirements.txt
├── exps
├── NAS-Bench-201-algos
│ ├── BOHB.py
│ ├── DARTS-V1.py
│ ├── DARTS-V2.py
│ ├── ENAS.py
│ ├── GDAS.py
│ ├── RANDOM-NAS.py
│ ├── RANDOM.py
│ ├── README.md
│ ├── R_EA.py
│ ├── SETN.py
│ └── reinforce.py
├── NAS-Bench-201
│ ├── check.py
│ ├── dist-setup.py
│ ├── functions.py
│ ├── main.py
│ ├── show-best.py
│ ├── statistics-v2.py
│ ├── statistics.py
│ ├── test-correlation.py
│ └── visualize.py
├── NATS-Bench
│ ├── Analyze-time.py
│ ├── draw-correlations.py
│ ├── draw-fig2_5.py
│ ├── draw-fig6.py
│ ├── draw-fig7.py
│ ├── draw-fig8.py
│ ├── draw-ranks.py
│ ├── draw-table.py
│ ├── main-sss.py
│ ├── main-tss.py
│ ├── show-dataset.py
│ ├── sss-collect.py
│ ├── sss-file-manager.py
│ ├── test-nats-api.py
│ ├── tss-collect-patcher.py
│ ├── tss-collect.py
│ └── tss-file-manager.py
├── NATS-algos
│ ├── README.md
│ ├── bohb.py
│ ├── random_wo_share.py
│ ├── regularized_ea.py
│ ├── reinforce.py
│ ├── run-all.sh
│ ├── search-cell.py
│ └── search-size.py
├── TAS
│ ├── prepare.py
│ ├── search-shape.py
│ └── search-transformable.py
├── basic
│ ├── KD-main.py
│ ├── basic-eval.py
│ ├── basic-main.py
│ └── xmain.py
├── experimental
│ ├── GeMOSA
│ │ ├── baselines
│ │ │ ├── maml-ft.py
│ │ │ ├── maml-nof.py
│ │ │ ├── slbm-ft.py
│ │ │ └── slbm-nof.py
│ │ ├── basic-his.py
│ │ ├── basic-prev.py
│ │ ├── basic-same.py
│ │ ├── main.py
│ │ ├── meta_model.py
│ │ ├── meta_model_ablation.py
│ │ └── vis-synthetic.py
│ ├── example-nas-bench.py
│ ├── test-dks.py
│ ├── test-dynamic.py
│ ├── test-flops.py
│ ├── test-nas-plot.py
│ ├── test-resnest.py
│ ├── test-ww-bench.py
│ ├── test-ww.py
│ ├── vis-nats-bench-algos.py
│ ├── vis-nats-bench-ws.py
│ └── visualize-nas-bench-x.py
└── trading
│ ├── baselines.py
│ ├── organize_results.py
│ └── workflow_tt.py
├── notebooks
├── NATS-Bench
│ ├── BayesOpt.ipynb
│ ├── find-largest.ipynb
│ ├── issue-96.ipynb
│ └── issue-97.ipynb
├── Q
│ ├── qlib-data-play.ipynb
│ └── workflow-test.ipynb
├── TOT
│ ├── ES-Model-DC.ipynb
│ ├── ES-Model-Drop.ipynb
│ ├── Time-Curve.ipynb
│ └── time-curve.py
└── spaces-xmisc
│ ├── random-search-transformer.ipynb
│ ├── scheduler.ipynb
│ ├── synthetic-data.ipynb
│ ├── synthetic-env.ipynb
│ ├── synthetic-visualize-env.ipynb
│ └── test-transformer-encoder.ipynb
├── scripts-search
├── NAS-Bench-201-algos
│ ├── BOHB.sh
│ ├── DARTS-V1.sh
│ ├── DARTS-V2.sh
│ ├── ENAS.sh
│ ├── GDAS.sh
│ ├── R-EA.sh
│ ├── RANDOM-NAS.sh
│ ├── README.md
│ ├── REINFORCE.sh
│ ├── Random.sh
│ ├── SETN.sh
│ └── hps
│ │ ├── DARTS-test-Gradient.sh
│ │ └── GRID-RL.sh
├── NAS-Bench-201
│ ├── build.sh
│ ├── meta-gen.sh
│ ├── train-a-net.sh
│ └── train-models.sh
├── NASNet-space-search-by-DARTS1V.sh
├── NASNet-space-search-by-GDAS-FRC.sh
├── NASNet-space-search-by-GDAS.sh
├── NASNet-space-search-by-SETN.sh
├── NATS
│ └── search-size.sh
├── search-depth-cifar.sh
├── search-depth-gumbel.sh
├── search-shape-cifar.sh
├── search-width-cifar.sh
└── search-width-gumbel.sh
├── scripts
├── NATS-Bench
│ ├── train-shapes.sh
│ └── train-topology.sh
├── TAS
│ └── prepare.sh
├── base-train.sh
├── black.sh
├── experimental
│ └── train-vit.sh
├── nas-infer-train.sh
├── retrain-searched-net.sh
├── tas-infer-train.sh
└── trade
│ ├── baseline.sh
│ ├── tsf-all.sh
│ ├── tsf-time.sh
│ └── tsf.sh
├── setup.py
├── tests
├── test_basic_space.py
├── test_import.py
├── test_loader.py
├── test_math_static.py
├── test_misc_scheduler.py
├── test_super_att.py
├── test_super_container.py
├── test_super_mlp.py
├── test_super_norm.py
├── test_super_rearrange.py
├── test_super_vit.py
├── test_synthetic_env.py
├── test_synthetic_utils.py
├── test_tas.py
├── test_torch.sh
└── test_torch_gpu_bugs.py
└── xautodl
├── __init__.py
├── config_utils
├── __init__.py
├── args_utils.py
├── attention_args.py
├── basic_args.py
├── cls_init_args.py
├── cls_kd_args.py
├── config_utils.py
├── pruning_args.py
├── random_baseline.py
├── search_args.py
├── search_single_args.py
└── share_args.py
├── datasets
├── DownsampledImageNet.py
├── LandmarkDataset.py
├── SearchDatasetWrap.py
├── __init__.py
├── get_dataset_with_transform.py
├── landmark_utils
│ ├── __init__.py
│ └── point_meta.py
├── math_base_funcs.py
├── math_core.py
├── math_dynamic_funcs.py
├── math_dynamic_generator.py
├── math_static_funcs.py
├── synthetic_core.py
├── synthetic_env.py
├── synthetic_utils.py
└── test_utils.py
├── log_utils
├── __init__.py
├── logger.py
├── meter.py
├── pickle_wrap.py
└── time_utils.py
├── models
├── CifarDenseNet.py
├── CifarResNet.py
├── CifarWideResNet.py
├── ImageNet_MobileNetV2.py
├── ImageNet_ResNet.py
├── SharedUtils.py
├── __init__.py
├── cell_infers
│ ├── __init__.py
│ ├── cells.py
│ ├── nasnet_cifar.py
│ └── tiny_network.py
├── cell_operations.py
├── cell_searchs
│ ├── __init__.py
│ ├── _test_module.py
│ ├── generic_model.py
│ ├── genotypes.py
│ ├── search_cells.py
│ ├── search_model_darts.py
│ ├── search_model_darts_nasnet.py
│ ├── search_model_enas.py
│ ├── search_model_enas_utils.py
│ ├── search_model_gdas.py
│ ├── search_model_gdas_frc_nasnet.py
│ ├── search_model_gdas_nasnet.py
│ ├── search_model_random.py
│ ├── search_model_setn.py
│ └── search_model_setn_nasnet.py
├── clone_weights.py
├── initialization.py
├── shape_infers
│ ├── InferCifarResNet.py
│ ├── InferCifarResNet_depth.py
│ ├── InferCifarResNet_width.py
│ ├── InferImagenetResNet.py
│ ├── InferMobileNetV2.py
│ ├── InferTinyCellNet.py
│ ├── __init__.py
│ └── shared_utils.py
└── shape_searchs
│ ├── SearchCifarResNet.py
│ ├── SearchCifarResNet_depth.py
│ ├── SearchCifarResNet_width.py
│ ├── SearchImagenetResNet.py
│ ├── SearchSimResNet_width.py
│ ├── SoftSelect.py
│ ├── __init__.py
│ └── generic_size_tiny_cell_model.py
├── nas_infer_model
├── DXYs
│ ├── CifarNet.py
│ ├── ImageNet.py
│ ├── __init__.py
│ ├── base_cells.py
│ ├── construct_utils.py
│ ├── genotypes.py
│ └── head_utils.py
├── __init__.py
└── operations.py
├── procedures
├── __init__.py
├── advanced_main.py
├── basic_main.py
├── eval_funcs.py
├── funcs_nasbench.py
├── metric_utils.py
├── optimizers.py
├── q_exps.py
├── search_main.py
├── search_main_v2.py
├── simple_KD_main.py
└── starts.py
├── spaces
├── __init__.py
├── basic_op.py
└── basic_space.py
├── trade_models
├── __init__.py
├── naive_v1_model.py
├── naive_v2_model.py
├── quant_transformer.py
└── transformers.py
├── utils
├── __init__.py
├── affine_utils.py
├── evaluation_utils.py
├── flop_benchmark.py
├── gpu_manager.py
├── hash_utils.py
├── nas_utils.py
├── qlib_utils.py
├── str_utils.py
├── temp_sync.py
└── weight_watcher.py
├── xlayers
├── __init__.py
├── misc_utils.py
├── super_activations.py
├── super_attention.py
├── super_attention_v2.py
├── super_container.py
├── super_core.py
├── super_dropout.py
├── super_linear.py
├── super_module.py
├── super_norm.py
├── super_positional_embedding.py
├── super_rearrange.py
├── super_trade_stem.py
├── super_transformer.py
├── super_utils.py
└── weight_init.py
├── xmisc
├── __init__.py
├── logger_utils.py
├── meter_utils.py
├── module_utils.py
├── sampler_utils.py
├── scheduler_utils.py
├── time_utils.py
├── torch_utils.py
└── yaml_utils.py
└── xmodels
├── __init__.py
├── core.py
├── transformers.py
└── transformers_quantum.py
/.github/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to AutoDL-Projects
2 |
3 | :+1::tada: First off, thanks for taking the time to contribute! :tada::+1:
4 |
5 | The following is a set of guidelines for contributing to AutoDL-Projects.
6 |
7 | ## Table Of Contents
8 |
9 | [How Can I Contribute?](#how-can-i-contribute)
10 | * [Reporting Bugs](#reporting-bugs)
11 | * [Suggesting Enhancements](#suggesting-enhancements)
12 | * [Your First Code Contribution](#your-first-code-contribution)
13 |
14 | ## How Can I Contribute?
15 |
16 | ### Reporting Bugs
17 |
18 | This section guides you through submitting a bug report for AutoDL-Projects.
19 | Following these guidelines helps maintainers and the community understand your report :pencil:, reproduce the behavior :computer: :computer:, and find related reports :mag_right:.
20 |
21 | When you are creating a bug report, please include as many details as possible.
22 | Fill out [the required template](https://github.com/D-X-Y/AutoDL-Projects/blob/main/.github/ISSUE_TEMPLATE/bug-report.md). The information it asks for helps us resolve issues faster.
23 |
24 | > **Note:** If you find a **Closed** issue that seems like it is the same thing that you're experiencing, open a new issue and include a link to the original issue in the body of your new one.
25 |
26 | ### Suggesting Enhancements
27 |
28 | Please feel free to email me (dongxuanyi888@gmail.com).
29 |
30 | ### Your First Code Contribution
31 |
32 | Please feel free to open an Pull Requests.
33 |
34 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug-report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug Report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Please provide a small script to reproduce the behavior:
15 | ```
16 | codes to reproduce the bug
17 | ```
18 | Please let me know your OS, Python version, PyTorch version.
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/question.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Questions w.r.t. an AutoDL algorithm
3 | about: Ask questions about or discuss on some algorithms
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Which Algorithm**
11 | Please put the title of the AutoDL algorithm that you want to ask here.
12 |
13 | **Describe the Question**
14 | A clear and concise description of what the bug is.
15 |
16 |
--------------------------------------------------------------------------------
/.github/workflows/test-basic.yaml:
--------------------------------------------------------------------------------
1 | name: Test Spaces
2 | on:
3 | push:
4 | branches:
5 | - main
6 | pull_request:
7 | branches:
8 | - main
9 |
10 |
11 | jobs:
12 | build:
13 | strategy:
14 | matrix:
15 | os: [ubuntu-18.04, ubuntu-20.04, macos-latest]
16 | python-version: [3.6, 3.7, 3.8, 3.9]
17 |
18 | runs-on: ${{ matrix.os }}
19 | steps:
20 | - uses: actions/checkout@v2
21 |
22 | - name: Set up Python ${{ matrix.python-version }}
23 | uses: actions/setup-python@v2
24 | with:
25 | python-version: ${{ matrix.python-version }}
26 |
27 | - name: Lint with Black
28 | run: |
29 | python -m pip install black
30 | python --version
31 | python -m black --version
32 | echo $PWD ; ls
33 | python -m black ./exps -l 88 --check --diff --verbose
34 | python -m black ./tests -l 88 --check --diff --verbose
35 | python -m black ./xautodl/x* -l 88 --check --diff --verbose
36 | python -m black ./xautodl/spaces -l 88 --check --diff --verbose
37 | python -m black ./xautodl/trade_models -l 88 --check --diff --verbose
38 | python -m black ./xautodl/procedures -l 88 --check --diff --verbose
39 | python -m black ./xautodl/config_utils -l 88 --check --diff --verbose
40 | python -m black ./xautodl/log_utils -l 88 --check --diff --verbose
41 |
42 | - name: Install XAutoDL from source
43 | run: |
44 | pip install .
45 |
46 | - name: Test Search Space
47 | run: |
48 | python -m pip install pytest
49 | python -m pip install torch torchvision
50 | python -m pip install parameterized
51 | echo $PWD
52 | echo "Show what we have here:"
53 | ls
54 | python --version
55 | python -m pytest ./tests/test_import.py -s
56 | python -m pytest ./tests/test_basic_space.py -s
57 | shell: bash
58 |
59 | - name: Test Math
60 | run: |
61 | python -m pytest ./tests/test_math*.py -s
62 | shell: bash
63 |
64 | - name: Test Synthetic Data
65 | run: |
66 | python -m pytest ./tests/test_synthetic*.py -s
67 | shell: bash
68 |
--------------------------------------------------------------------------------
/.github/workflows/test-misc.yaml:
--------------------------------------------------------------------------------
1 | name: Test Xmisc
2 | on:
3 | push:
4 | branches:
5 | - main
6 | pull_request:
7 | branches:
8 | - main
9 |
10 |
11 | jobs:
12 | build:
13 | strategy:
14 | matrix:
15 | os: [ubuntu-18.04, ubuntu-20.04, macos-latest]
16 | python-version: [3.6, 3.7, 3.8, 3.9]
17 |
18 | runs-on: ${{ matrix.os }}
19 | steps:
20 | - uses: actions/checkout@v2
21 |
22 | - name: Set up Python ${{ matrix.python-version }}
23 | uses: actions/setup-python@v2
24 | with:
25 | python-version: ${{ matrix.python-version }}
26 |
27 | - name: Install XAutoDL from source
28 | run: |
29 | pip install .
30 |
31 | - name: Test Xmisc
32 | run: |
33 | python -m pip install pytest
34 | python -m pip install torch torchvision
35 | python -m pip install parameterized
36 | echo $PWD
37 | echo "Show what we have here:"
38 | ls
39 | python --version
40 | python -m pytest ./tests/test_misc* -s
41 | shell: bash
42 |
--------------------------------------------------------------------------------
/.github/workflows/test-super-layer_model.yaml:
--------------------------------------------------------------------------------
1 | name: Test Super Model
2 | on:
3 | push:
4 | branches:
5 | - main
6 | pull_request:
7 | branches:
8 | - main
9 |
10 |
11 | jobs:
12 | build:
13 | strategy:
14 | matrix:
15 | os: [ubuntu-18.04, ubuntu-20.04, macos-latest]
16 | python-version: [3.6, 3.7, 3.8, 3.9]
17 |
18 | runs-on: ${{ matrix.os }}
19 | steps:
20 | - uses: actions/checkout@v2
21 |
22 | - name: Set up Python ${{ matrix.python-version }}
23 | uses: actions/setup-python@v2
24 | with:
25 | python-version: ${{ matrix.python-version }}
26 |
27 | - name: Install XAutoDL from source
28 | run: |
29 | pip install .
30 |
31 | - name: Test Super Model
32 | run: |
33 | python -m pip install pytest
34 | python -m pip install parameterized
35 | python -m pip install torch torchvision
36 | python -m pytest ./tests/test_super_*.py
37 | shell: bash
38 |
39 | - name: Test TAS (NeurIPS 2019)
40 | run: |
41 | python -m pytest ./tests/test_tas.py
42 | shell: bash
43 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | # Byte-compiled / optimized / DLL files
3 | __pycache__/
4 | *.py[cod]
5 | *$py.class
6 |
7 | # C extensions
8 | *.so
9 |
10 | # Distribution / packaging
11 | .Python
12 | env/
13 | build/
14 | develop-eggs/
15 | dist/
16 | downloads/
17 | eggs/
18 | .eggs/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 |
27 | # PyInstaller
28 | # Usually these files are written by a python script from a template
29 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
30 | *.manifest
31 | *.spec
32 |
33 | # Installer logs
34 | pip-log.txt
35 | pip-delete-this-directory.txt
36 |
37 | # Unit test / coverage reports
38 | htmlcov/
39 | .tox/
40 | .coverage
41 | .coverage.*
42 | .cache
43 | nosetests.xml
44 | coverage.xml
45 | *,cover
46 | .hypothesis/
47 |
48 | # Translations
49 | *.mo
50 | *.pot
51 |
52 | # Django stuff:
53 | *.log
54 | local_settings.py
55 |
56 | # Flask stuff:
57 | instance/
58 | .webassets-cache
59 |
60 | # Scrapy stuff:
61 | .scrapy
62 |
63 | # Sphinx documentation
64 | docs/_build/
65 |
66 | # PyBuilder
67 | target/
68 |
69 | # IPython Notebook
70 | .ipynb_checkpoints
71 |
72 | # pyenv
73 | .python-version
74 |
75 | # celery beat schedule file
76 | celerybeat-schedule
77 |
78 | # dotenv
79 | .env
80 |
81 | # virtualenv
82 | venv/
83 | ENV/
84 |
85 | # Spyder project settings
86 | .spyderproject
87 |
88 | # Rope project settings
89 | .ropeproject
90 |
91 | # Pycharm project
92 | .idea
93 | snapshots
94 | *.pytorch
95 | *.tar.bz
96 | data
97 | .*.swp
98 | main_main.py
99 | *.pdf
100 | */*.pdf
101 |
102 | # Device
103 | scripts-nas/.nfs00*
104 | */.nfs00*
105 | *.DS_Store
106 |
107 | # logs and snapshots
108 | output
109 | logs
110 |
111 | # snapshot
112 | a.pth
113 | cal-merge*.sh
114 | GPU-*.sh
115 | cal.sh
116 | aaa
117 | cx.sh
118 |
119 | NAS-Bench-*-v1_0.pth
120 | lib/NAS-Bench-*-v1_0.pth
121 | others/TF
122 | scripts-search/l2s-algos
123 | TEMP-L.sh
124 |
125 | .nfs00*
126 | *.swo
127 | */*.swo
128 |
129 | # Visual Studio Code
130 | .vscode
131 | mlruns*
132 | outputs
133 |
134 | pytest_cache
135 | *.pkl
136 | *.pth
137 |
138 | *.tgz
139 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule ".latent-data/qlib"]
2 | path = .latent-data/qlib
3 | url = https://github.com/microsoft/qlib.git
4 | [submodule ".latent-data/NATS-Bench"]
5 | path = .latent-data/NATS-Bench
6 | url = https://github.com/D-X-Y/NATS-Bench.git
7 | [submodule ".latent-data/NAS-Bench-201"]
8 | path = .latent-data/NAS-Bench-201
9 | url = https://github.com/D-X-Y/NAS-Bench-201.git
10 |
--------------------------------------------------------------------------------
/.latent-data/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 |
--------------------------------------------------------------------------------
/.latent-data/init-configs/README.md:
--------------------------------------------------------------------------------
1 | # Git Commands
2 |
3 | ```
4 | git clone --recurse-submodules git@github.com:D-X-Y/AutoDL-Projects.git
5 |
6 | git submodule init
7 | git submodule update
8 | git pull orign main
9 |
10 | git submodule update --remote --recursive
11 | ```
12 |
13 | Pylint check for Q-lib:
14 | ```
15 | python -m black __init__.py -l 120
16 |
17 | pytest -W ignore::DeprecationWarning qlib/tests/test_all_pipeline.py
18 | ```
19 |
20 |
21 | ```
22 | conda update --all
23 | ```
24 |
25 | ## [phillytools](https://phillytools.azurewebsites.net/master/get_started/2_installation.html)
26 |
27 | ```
28 | conda create -n pt6 python=3.7
29 |
30 | conda activate pt6
31 |
32 | pip install -U phillytools --extra-index-url https://msrpypi.azurewebsites.net/stable/7e404de797f4e1eeca406c1739b00867 --extra-index-url https://azuremlsdktestpypi.azureedge.net/K8s-Compute/D58E86006C65
33 | ```
34 |
--------------------------------------------------------------------------------
/.latent-data/init-configs/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | script=$(readlink -f "$0")
4 | script_dir=$(dirname "$script")
5 | echo "script-directory: $script_dir"
6 |
7 | cp ${script_dir}/tmux.conf ~/.tmux.conf
8 | cp ${script_dir}/vimrc ~/.vimrc
9 | cp ${script_dir}/bashrc ~/.bashrc
10 | cp ${script_dir}/condarc ~/.condarc
11 |
12 | wget https://repo.anaconda.com/miniconda/Miniconda3-4.7.12.1-Linux-x86_64.sh
13 | wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
14 | wget https://repo.anaconda.com/archive/Anaconda3-2021.05-Linux-x86_64.sh
15 |
--------------------------------------------------------------------------------
/.latent-data/init-configs/vimrc:
--------------------------------------------------------------------------------
1 | color desert
2 | "设置背景色,每种配色有两种方案,一个light、一个dark
3 | "set background=light
4 | ""打开语法高亮
5 | syntax enable
6 | set background=dark
7 | "colorscheme solarized
8 | colorscheme desert
9 | let g:solarized_termcolors=256
10 |
11 | if has('gui_running')
12 | set background=light
13 | else
14 | set background=dark
15 | endif
16 | "显示行号
17 | "set number
18 | ""设置缩进有三个取值cindent(c风格)、smartindent(智能模式,其实不觉得有什么智能)、autoindent(简单的与上一行保持一致)
19 | set autoindent
20 | "在windows版本中vim的退格键模式默认与vi兼容,与我们的使用习惯不太符合,下边这条可以改过来
21 | "set backspace=indent,eol,start
22 | ""用空格键替换制表符
23 | "set expandtab
24 | "制表符占4个空格
25 | "set tabstop=4
26 | ""默认缩进4个空格大小
27 | "set shiftwidth=4
28 | "增量式搜索
29 | "set incsearch
30 | ""高亮搜索
31 | set hlsearch
32 | set statusline=[%F]%y%r%m%*%=[Line:%l/%L,Column:%c][%p%%]
33 | set laststatus=2
34 | set ruler
35 | "有时中文会显示乱码,用一下几条命令解决
36 | "let &termencoding=&encoding
37 | "set fileencodings=utf-8,gbk
38 | ""很多插件都会要求的配置检测文件类型
39 | :filetype on
40 | :filetype plugin on
41 | :filetype indent on
42 | "下边这个很有用可以根据不同的文件类型执行不同的命令
43 | ""例如:如果是c/c++类型
44 | :autocmd FileType c,cpp,cuda :set foldmethod=syntax
45 | :autocmd FileType c,cpp,cuda :set number
46 | :autocmd FileType c,cpp,cuda :set cindent
47 | :autocmd FileType c,cpp,cuda :set expandtab
48 | :autocmd FileType c,cpp,cuda :set tabstop=4
49 | :autocmd FileType c,cpp,cuda :set shiftwidth=4
50 | "例如:如果是python类型
51 | :autocmd FileType python :set number
52 | :autocmd FileType python :set foldmethod=syntax
53 | :autocmd FileType python :set paste
54 | :autocmd FileType python :set expandtab
55 | :autocmd FileType python :set tabstop=2
56 | :autocmd FileType python :set shiftwidth=2
57 | "例如:如果是python类型
58 | :autocmd FileType matlab :set number
59 | :autocmd FileType matlab :set foldmethod=syntax
60 | :autocmd FileType matlab :set paste
61 | :autocmd FileType matlab :set expandtab
62 | :autocmd FileType matlab :set tabstop=2
63 | :autocmd FileType matlab :set shiftwidth=2
64 |
--------------------------------------------------------------------------------
/.latent-data/splits/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 |
--------------------------------------------------------------------------------
/CHANGE-LOG.md:
--------------------------------------------------------------------------------
1 | # This file shows the major updates of this repo.
2 |
3 | - [2020.04.11] [4ef9531](https://github.com/D-X-Y/AutoDL-Projects/tree/4ef9531) Add change log as `CHANGE-LOG.md`.
4 | - [2019.12.20] [69ca086](https://github.com/D-X-Y/AutoDL-Projects/tree/69ca086) Release NAS-Bench-201.
5 | - [2019.09.28] [f8f3f38](https://github.com/D-X-Y/AutoDL-Projects/tree/f8f3f38) TAS and SETN codes were publicly released.
6 | - [2019.01.31] [13e908f](https://github.com/D-X-Y/AutoDL-Projects/tree/13e908f) GDAS codes were publicly released.
7 | - [2020.07.01] [a45808b](https://github.com/D-X-Y/AutoDL-Projects/tree/a45808b) Upgrade NAS-API to the 2.0 version.
8 | - [2020.09.16] [7052265](https://github.com/D-X-Y/AutoDL-Projects/tree/7052265) Create NATS-BENCH.
9 | - [2020.10.15] [446262a](https://github.com/D-X-Y/AutoDL-Projects/tree/446262a) Update NATS-BENCH to version 1.0
10 | - [2020.12.20] [dae387a](https://github.com/D-X-Y/AutoDL-Projects/tree/dae387a) Update NATS-BENCH to version 1.1
11 | - [2021.05.18] [98fadf8](https://github.com/D-X-Y/AutoDL-Projects/tree/98fadf8) Before moving to `xautodl`
12 | - [2021.05.21] [df99173](https://github.com/D-X-Y/AutoDL-Projects/tree/df99173) `xautodl` is close to ready
13 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) since 2019.01.01, author: Xuanyi Dong (GitHub: https://github.com/D-X-Y)
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/configs/NeurIPS-2019/C010-ResNet110.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "cifar"],
3 | "arch" : ["str" , "resnet"],
4 | "depth" : ["int" , "110"],
5 | "module" : ["str" , "ResNetBasicblock"],
6 | "super_type" : ["str" , "infer-shape"],
7 | "zero_init_residual" : ["bool" , "0"],
8 | "class_num" : ["int" , "10"],
9 | "search_mode" : ["str" , "shape"],
10 | "xchannels" : ["int" , ["3", "16", "14", "16", "14", "16", "16", "16", "16", "16", "14", "16", "16", "16", "12", "16", "16", "16", "9", "16", "8", "16", "4", "16", "4", "4", "4", "16", "4", "4", "4", "4", "6", "6", "4", "6", "11", "4", "32", "32", "32", "32", "32", "32", "32", "32", "28", "32", "32", "28", "22", "22", "22", "32", "32", "25", "28", "9", "9", "28", "12", "9", "12", "32", "9", "9", "22", "12", "16", "9", "12", "9", "9", "9", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "38", "64", "25", "19", "19", "19", "19", "19", "25", "32", "19", "19", "25", "25", "19", "19", "38", "38", "19", "19", "51"]],
11 | "xblocks" : ["int" , ["11", "11", "9"]],
12 | "estimated_FLOP" : ["float" , "117.498238"]
13 | }
--------------------------------------------------------------------------------
/configs/NeurIPS-2019/C010-ResNet164.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "cifar"],
3 | "arch" : ["str" , "resnet"],
4 | "depth" : ["int" , "164"],
5 | "module" : ["str" , "ResNetBottleneck"],
6 | "super_type" : ["str" , "infer-shape"],
7 | "zero_init_residual" : ["bool" , "0"],
8 | "class_num" : ["int" , "10"],
9 | "search_mode" : ["str" , "shape"],
10 | "xchannels" : ["int" , ["3", "16", "8", "16", "64", "6", "16", "64", "14", "16", "64", "8", "16", "64", "4", "16", "64", "6", "16", "64", "6", "16", "64", "11", "11", "64", "4", "14", "64", "4", "4", "57", "4", "16", "64", "9", "12", "57", "4", "16", "64", "4", "8", "57", "6", "6", "51", "6", "4", "44", "6", "4", "57", "6", "6", "19", "32", "32", "128", "32", "32", "128", "32", "32", "128", "9", "32", "128", "32", "32", "128", "25", "28", "115", "12", "32", "128", "32", "32", "128", "32", "32", "128", "32", "32", "102", "28", "32", "128", "16", "32", "128", "28", "19", "128", "32", "9", "51", "16", "12", "102", "12", "22", "115", "9", "12", "51", "12", "16", "38", "64", "64", "256", "64", "64", "256", "64", "64", "256", "64", "64", "256", "64", "64", "256", "64", "64", "256", "64", "64", "256", "64", "64", "256", "64", "64", "256", "64", "64", "256", "64", "57", "204", "64", "25", "179", "19", "25", "204", "44", "19", "153", "38", "25", "76", "19", "32", "128", "19", "51", "76", "57", "32", "76"]],
11 | "xblocks" : ["int" , ["13", "15", "13"]],
12 | "estimated_FLOP" : ["float" , "173.023672"]
13 | }
--------------------------------------------------------------------------------
/configs/NeurIPS-2019/C010-ResNet20.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "cifar"],
3 | "arch" : ["str" , "resnet"],
4 | "depth" : ["int" , "20"],
5 | "module" : ["str" , "ResNetBasicblock"],
6 | "super_type" : ["str" , "infer-shape"],
7 | "zero_init_residual" : ["bool" , "0"],
8 | "class_num" : ["int" , "10"],
9 | "search_mode" : ["str" , "shape"],
10 | "xchannels" : ["int" , ["3", "16", "6", "4", "4", "4", "4", "4", "32", "32", "12", "19", "32", "28", "64", "64", "64", "64", "64", "44"]],
11 | "xblocks" : ["int" , ["3", "3", "3"]],
12 | "estimated_FLOP" : ["float" , "22.444472"]
13 | }
--------------------------------------------------------------------------------
/configs/NeurIPS-2019/C010-ResNet32.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "cifar"],
3 | "arch" : ["str" , "resnet"],
4 | "depth" : ["int" , "32"],
5 | "module" : ["str" , "ResNetBasicblock"],
6 | "super_type" : ["str" , "infer-shape"],
7 | "zero_init_residual" : ["bool" , "0"],
8 | "class_num" : ["int" , "10"],
9 | "search_mode" : ["str" , "shape"],
10 | "xchannels" : ["int" , ["3", "16", "6", "4", "12", "4", "4", "16", "9", "9", "6", "14", "32", "32", "9", "19", "28", "9", "32", "19", "32", "9", "64", "64", "64", "64", "64", "64", "64", "32", "38", "32"]],
11 | "xblocks" : ["int" , ["5", "5", "5"]],
12 | "estimated_FLOP" : ["float" , "34.945344"]
13 | }
--------------------------------------------------------------------------------
/configs/NeurIPS-2019/C010-ResNet56.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "cifar"],
3 | "arch" : ["str" , "resnet"],
4 | "depth" : ["int" , "56"],
5 | "module" : ["str" , "ResNetBasicblock"],
6 | "super_type" : ["str" , "infer-shape"],
7 | "zero_init_residual" : ["bool" , "0"],
8 | "class_num" : ["int" , "10"],
9 | "search_mode" : ["str" , "shape"],
10 | "xchannels" : ["int" , ["3", "16", "16", "16", "14", "11", "9", "16", "12", "16", "6", "16", "4", "8", "4", "14", "6", "4", "4", "4", "32", "32", "32", "32", "32", "32", "22", "28", "32", "32", "19", "9", "19", "16", "9", "25", "16", "9", "64", "64", "64", "64", "64", "64", "64", "64", "64", "51", "19", "19", "32", "19", "19", "32", "19", "25"]],
11 | "xblocks" : ["int" , ["5", "5", "5"]],
12 | "estimated_FLOP" : ["float" , "57.93305"]
13 | }
--------------------------------------------------------------------------------
/configs/NeurIPS-2019/C100-ResNet110.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "cifar"],
3 | "arch" : ["str" , "resnet"],
4 | "depth" : ["int" , "110"],
5 | "module" : ["str" , "ResNetBasicblock"],
6 | "super_type" : ["str" , "infer-shape"],
7 | "zero_init_residual" : ["bool" , "0"],
8 | "class_num" : ["int" , "100"],
9 | "search_mode" : ["str" , "shape"],
10 | "xchannels" : ["int" , ["3", "16", "14", "16", "11", "14", "16", "16", "11", "16", "9", "14", "12", "16", "16", "16", "8", "16", "14", "16", "12", "4", "11", "16", "4", "4", "4", "16", "12", "4", "8", "4", "9", "4", "6", "14", "4", "4", "32", "32", "32", "32", "28", "28", "32", "32", "32", "32", "32", "28", "32", "28", "25", "32", "32", "32", "9", "9", "32", "32", "9", "25", "28", "32", "28", "9", "9", "32", "12", "12", "9", "22", "12", "9", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "44", "64", "57", "19", "19", "19", "19", "25", "19", "25", "19", "25", "19", "19", "25", "19", "19", "19", "25", "25", "19"]],
11 | "xblocks" : ["int" , ["13", "9", "11"]],
12 | "estimated_FLOP" : ["float" , "117.653164"]
13 | }
--------------------------------------------------------------------------------
/configs/NeurIPS-2019/C100-ResNet164.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "cifar"],
3 | "arch" : ["str" , "resnet"],
4 | "depth" : ["int" , "164"],
5 | "module" : ["str" , "ResNetBottleneck"],
6 | "super_type" : ["str" , "infer-shape"],
7 | "zero_init_residual" : ["bool" , "0"],
8 | "class_num" : ["int" , "100"],
9 | "search_mode" : ["str" , "shape"],
10 | "xchannels" : ["int" , ["3", "16", "16", "16", "57", "6", "11", "64", "4", "6", "51", "6", "9", "64", "4", "8", "64", "4", "14", "64", "4", "8", "64", "4", "8", "64", "6", "12", "64", "6", "16", "64", "8", "16", "64", "14", "12", "64", "4", "16", "64", "4", "14", "64", "11", "16", "64", "4", "14", "64", "11", "4", "64", "4", "4", "19", "25", "32", "128", "28", "32", "115", "28", "32", "128", "25", "32", "128", "32", "32", "128", "25", "32", "128", "12", "32", "128", "25", "32", "128", "28", "32", "128", "25", "28", "128", "32", "32", "128", "28", "19", "128", "32", "32", "128", "19", "28", "128", "9", "19", "128", "28", "9", "89", "28", "19", "128", "9", "16", "38", "64", "64", "256", "64", "64", "256", "64", "64", "256", "64", "64", "256", "64", "64", "256", "64", "64", "256", "64", "64", "256", "64", "64", "256", "64", "64", "256", "64", "64", "204", "64", "64", "179", "64", "64", "102", "64", "64", "102", "44", "19", "76", "19", "19", "76", "19", "38", "76", "25", "38", "153", "44", "25", "230"]],
11 | "xblocks" : ["int" , ["15", "15", "15"]],
12 | "estimated_FLOP" : ["float" , "165.583512"]
13 | }
--------------------------------------------------------------------------------
/configs/NeurIPS-2019/C100-ResNet20.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "cifar"],
3 | "arch" : ["str" , "resnet"],
4 | "depth" : ["int" , "20"],
5 | "module" : ["str" , "ResNetBasicblock"],
6 | "super_type" : ["str" , "infer-shape"],
7 | "zero_init_residual" : ["bool" , "0"],
8 | "class_num" : ["int" , "100"],
9 | "search_mode" : ["str" , "shape"],
10 | "xchannels" : ["int" , ["3", "16", "4", "4", "4", "4", "6", "4", "32", "32", "9", "19", "32", "28", "64", "64", "64", "64", "64", "64"]],
11 | "xblocks" : ["int" , ["3", "3", "3"]],
12 | "estimated_FLOP" : ["float" , "22.433792"]
13 | }
--------------------------------------------------------------------------------
/configs/NeurIPS-2019/C100-ResNet32.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "cifar"],
3 | "arch" : ["str" , "resnet"],
4 | "depth" : ["int" , "32"],
5 | "module" : ["str" , "ResNetBasicblock"],
6 | "super_type" : ["str" , "infer-shape"],
7 | "zero_init_residual" : ["bool" , "0"],
8 | "class_num" : ["int" , "100"],
9 | "xchannels" : ["int" , ["3", "16", "4", "4", "6", "11", "6", "4", "8", "4", "4", "4", "32", "32", "9", "28", "28", "28", "28", "28", "32", "32", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64"]],
10 | "xblocks" : ["int" , ["5", "5", "5"]],
11 | "estimated_FLOP" : ["float" , "42.47"]
12 | }
13 |
--------------------------------------------------------------------------------
/configs/NeurIPS-2019/C100-ResNet56.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "cifar"],
3 | "arch" : ["str" , "resnet"],
4 | "depth" : ["int" , "56"],
5 | "module" : ["str" , "ResNetBasicblock"],
6 | "super_type" : ["str" , "infer-shape"],
7 | "zero_init_residual" : ["bool" , "0"],
8 | "class_num" : ["int" , "100"],
9 | "search_mode" : ["str" , "shape"],
10 | "xchannels" : ["int" , ["3", "16", "16", "9", "14", "16", "14", "16", "8", "16", "8", "14", "4", "4", "4", "8", "4", "6", "4", "4", "32", "32", "32", "28", "32", "32", "32", "22", "32", "32", "32", "9", "25", "19", "25", "12", "9", "9", "64", "64", "64", "64", "64", "64", "64", "64", "64", "51", "19", "19", "19", "19", "25", "38", "19", "19"]],
11 | "xblocks" : ["int" , ["5", "5", "7"]],
12 | "estimated_FLOP" : ["float" , "59.472556"]
13 | }
--------------------------------------------------------------------------------
/configs/NeurIPS-2019/ImageNet-ResNet18V1.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "imagenet"],
3 | "arch" : ["str" , "resnet"],
4 | "block_name" : ["str" , "BasicBlock"],
5 | "layers" : ["int" , ["2", "2", "2", "2"]],
6 | "deep_stem" : ["bool" , "0"],
7 | "zero_init_residual" : ["bool" , "1"],
8 | "class_num" : ["int" , "1000"],
9 | "search_mode" : ["str" , "shape"],
10 | "xchannels" : ["int" , ["3", "64", "25", "64", "38", "19", "128", "128", "38", "38", "256", "256", "256", "256", "512", "512", "512", "512"]],
11 | "xblocks" : ["int" , ["1", "1", "2", "2"]],
12 | "super_type" : ["str" , "infer-shape"],
13 | "estimated_FLOP" : ["float" , "1120.44032"]
14 | }
--------------------------------------------------------------------------------
/configs/NeurIPS-2019/ImageNet-ResNet50V1.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "imagenet"],
3 | "arch" : ["str" , "resnet"],
4 | "block_name" : ["str" , "Bottleneck"],
5 | "layers" : ["int" , ["3", "4", "6", "3"]],
6 | "deep_stem" : ["bool" , "0"],
7 | "zero_init_residual" : ["bool" , "1"],
8 | "class_num" : ["int" , "1000"],
9 | "search_mode" : ["str" , "shape"],
10 | "xchannels" : ["int" , ["3", "45", "45", "30", "102", "33", "60", "154", "68", "70", "180", "38", "38", "307", "38", "38", "410", "64", "128", "358", "38", "51", "256", "76", "76", "512", "76", "76", "512", "179", "256", "614", "100", "102", "307", "179", "230", "614", "204", "102", "307", "153", "153", "1228", "512", "512", "1434", "512", "512", "1844"]],
11 | "xblocks" : ["int" , ["3", "4", "5", "3"]],
12 | "super_type" : ["str" , "infer-shape"],
13 | "estimated_FLOP" : ["float" , "2291.316289"]
14 | }
15 |
--------------------------------------------------------------------------------
/configs/archs/CIFAR-ResNet08.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "cifar"],
3 | "arch" : ["str", "resnet"],
4 | "depth" : ["int", 8],
5 | "module" : ["str", "ResNetBasicblock"],
6 | "super_type": ["str" , "basic"],
7 | "zero_init_residual" : ["bool", "0"]
8 | }
9 |
--------------------------------------------------------------------------------
/configs/archs/CIFAR-ResNet1001.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "cifar"],
3 | "arch" : ["str", "resnet"],
4 | "depth" : ["int", 1001],
5 | "module" : ["str", "ResNetBottleneck"],
6 | "super_type": ["str" , "basic"],
7 | "zero_init_residual" : ["bool", "0"]
8 | }
9 |
--------------------------------------------------------------------------------
/configs/archs/CIFAR-ResNet110.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "cifar"],
3 | "arch" : ["str", "resnet"],
4 | "depth" : ["int", 110],
5 | "module" : ["str", "ResNetBasicblock"],
6 | "super_type": ["str" , "basic"],
7 | "zero_init_residual" : ["bool", "0"]
8 | }
9 |
--------------------------------------------------------------------------------
/configs/archs/CIFAR-ResNet164.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "cifar"],
3 | "arch" : ["str", "resnet"],
4 | "depth" : ["int", 164],
5 | "module" : ["str", "ResNetBottleneck"],
6 | "super_type": ["str" , "basic"],
7 | "zero_init_residual" : ["bool", "0"]
8 | }
9 |
--------------------------------------------------------------------------------
/configs/archs/CIFAR-ResNet20.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "cifar"],
3 | "arch" : ["str", "resnet"],
4 | "depth" : ["int", 20],
5 | "module" : ["str", "ResNetBasicblock"],
6 | "super_type": ["str" , "basic"],
7 | "zero_init_residual" : ["bool", "0"]
8 | }
9 |
--------------------------------------------------------------------------------
/configs/archs/CIFAR-ResNet32.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "cifar"],
3 | "arch" : ["str", "resnet"],
4 | "depth" : ["int", 32],
5 | "module" : ["str", "ResNetBasicblock"],
6 | "super_type": ["str" , "basic"],
7 | "zero_init_residual" : ["bool", "0"]
8 | }
9 |
--------------------------------------------------------------------------------
/configs/archs/CIFAR-ResNet56.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "cifar"],
3 | "arch" : ["str", "resnet"],
4 | "depth" : ["int", 56],
5 | "module" : ["str", "ResNetBasicblock"],
6 | "super_type": ["str" , "basic"],
7 | "zero_init_residual" : ["bool", "0"]
8 | }
9 |
--------------------------------------------------------------------------------
/configs/archs/CIFAR-SIM05.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "cifar"],
3 | "arch" : ["str", "simres"],
4 | "depth" : ["int", 5],
5 | "super_type": ["str" , "basic"],
6 | "zero_init_residual" : ["bool", "0"]
7 | }
8 |
--------------------------------------------------------------------------------
/configs/archs/CIFAR-WRN28-10.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "cifar"],
3 | "arch" : ["str", "wideresnet"],
4 | "depth" : ["int", 28],
5 | "wide_factor":["int", 10],
6 | "dropout" : ["bool", 0],
7 | "super_type": ["str" , "basic"]
8 | }
9 |
--------------------------------------------------------------------------------
/configs/archs/ImageNet-ResNet101V1.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "imagenet"],
3 | "arch" : ["str", "resnet"],
4 | "block_name" : ["str", "Bottleneck"],
5 | "layers" : ["int", [3,4,23,3]],
6 | "deep_stem" : ["bool", 0],
7 | "zero_init_residual" : ["bool", "1"],
8 | "groups" : ["int", 1],
9 | "width_per_group" : ["int", 64],
10 | "norm_layer" : ["none", "None"]
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/configs/archs/ImageNet-ResNet101V2.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "imagenet"],
3 | "arch" : ["str", "resnet"],
4 | "block_name" : ["str", "Bottleneck"],
5 | "layers" : ["int", [3,4,23,3]],
6 | "deep_stem" : ["bool", 1],
7 | "zero_init_residual" : ["bool", "1"],
8 | "groups" : ["int", 1],
9 | "width_per_group" : ["int", 64],
10 | "norm_layer" : ["none", "None"]
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/configs/archs/ImageNet-ResNet152V1.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "imagenet"],
3 | "arch" : ["str", "resnet"],
4 | "block_name" : ["str", "Bottleneck"],
5 | "layers" : ["int", [3,8,36,3]],
6 | "deep_stem" : ["bool", 0],
7 | "zero_init_residual" : ["bool", "1"],
8 | "groups" : ["int", 1],
9 | "width_per_group" : ["int", 64],
10 | "norm_layer" : ["none", "None"]
11 | }
12 |
--------------------------------------------------------------------------------
/configs/archs/ImageNet-ResNet152V2.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "imagenet"],
3 | "arch" : ["str", "resnet"],
4 | "block_name" : ["str", "Bottleneck"],
5 | "layers" : ["int", [3,8,36,3]],
6 | "deep_stem" : ["bool", 1],
7 | "zero_init_residual" : ["bool", "1"],
8 | "groups" : ["int", 1],
9 | "width_per_group" : ["int", 64],
10 | "norm_layer" : ["none", "None"]
11 | }
12 |
--------------------------------------------------------------------------------
/configs/archs/ImageNet-ResNet18V1.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "imagenet"],
3 | "arch" : ["str", "resnet"],
4 | "block_name" : ["str", "BasicBlock"],
5 | "layers" : ["int", [2,2,2,2]],
6 | "deep_stem" : ["bool", 0],
7 | "zero_init_residual" : ["bool", "1"],
8 | "groups" : ["int", 1],
9 | "width_per_group" : ["int", 64],
10 | "norm_layer" : ["none", "None"]
11 | }
12 |
--------------------------------------------------------------------------------
/configs/archs/ImageNet-ResNet18V2.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "imagenet"],
3 | "arch" : ["str", "resnet"],
4 | "block_name" : ["str", "BasicBlock"],
5 | "layers" : ["int", [2,2,2,2]],
6 | "deep_stem" : ["bool", 1],
7 | "zero_init_residual" : ["bool", "1"],
8 | "groups" : ["int", 1],
9 | "width_per_group" : ["int", 64],
10 | "norm_layer" : ["none", "None"]
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/configs/archs/ImageNet-ResNet200V1.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "imagenet"],
3 | "arch" : ["str", "resnet"],
4 | "block_name" : ["str", "Bottleneck"],
5 | "layers" : ["int", [3,24,36,3]],
6 | "deep_stem" : ["bool", 0],
7 | "zero_init_residual" : ["bool", "1"],
8 | "groups" : ["int", 1],
9 | "width_per_group" : ["int", 64],
10 | "norm_layer" : ["none", "None"]
11 | }
12 |
--------------------------------------------------------------------------------
/configs/archs/ImageNet-ResNet200V2.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "imagenet"],
3 | "arch" : ["str", "resnet"],
4 | "block_name" : ["str", "Bottleneck"],
5 | "layers" : ["int", [3,24,36,3]],
6 | "deep_stem" : ["bool", 1],
7 | "zero_init_residual" : ["bool", "1"],
8 | "groups" : ["int", 1],
9 | "width_per_group" : ["int", 64],
10 | "norm_layer" : ["none", "None"]
11 | }
12 |
--------------------------------------------------------------------------------
/configs/archs/ImageNet-ResNet34V1.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "imagenet"],
3 | "arch" : ["str", "resnet"],
4 | "block_name" : ["str", "BasicBlock"],
5 | "layers" : ["int", [3,4,6,3]],
6 | "deep_stem" : ["bool", 0],
7 | "zero_init_residual" : ["bool", "1"],
8 | "groups" : ["int", 1],
9 | "width_per_group" : ["int", 64],
10 | "norm_layer" : ["none", "None"]
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/configs/archs/ImageNet-ResNet34V2.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "imagenet"],
3 | "arch" : ["str", "resnet"],
4 | "block_name" : ["str", "BasicBlock"],
5 | "layers" : ["int", [3,4,6,3]],
6 | "deep_stem" : ["bool", 1],
7 | "zero_init_residual" : ["bool", "1"],
8 | "groups" : ["int", 1],
9 | "width_per_group" : ["int", 64],
10 | "norm_layer" : ["none", "None"]
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/configs/archs/ImageNet-ResNet50V1.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "imagenet"],
3 | "arch" : ["str", "resnet"],
4 | "block_name" : ["str", "Bottleneck"],
5 | "layers" : ["int", [3,4,6,3]],
6 | "deep_stem" : ["bool", 0],
7 | "zero_init_residual" : ["bool", "1"],
8 | "groups" : ["int", 1],
9 | "width_per_group" : ["int", 64],
10 | "norm_layer" : ["none", "None"]
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/configs/archs/ImageNet-ResNet50V2.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "imagenet"],
3 | "arch" : ["str", "resnet"],
4 | "block_name" : ["str", "Bottleneck"],
5 | "layers" : ["int", [3,4,6,3]],
6 | "deep_stem" : ["bool", 1],
7 | "zero_init_residual" : ["bool", "1"],
8 | "groups" : ["int", 1],
9 | "width_per_group" : ["int", 64],
10 | "norm_layer" : ["none", "None"]
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/configs/archs/ImageNet-ResNext50-32x4dV2.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "imagenet"],
3 | "arch" : ["str", "resnet"],
4 | "block_name" : ["str", "Bottleneck"],
5 | "layers" : ["int", [3,4,6,3]],
6 | "deep_stem" : ["bool", 1],
7 | "zero_init_residual" : ["bool", "1"],
8 | "groups" : ["int", 32],
9 | "width_per_group" : ["int", 4],
10 | "norm_layer" : ["none", "None"]
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/configs/archs/NAS-CIFAR-DARTS.config:
--------------------------------------------------------------------------------
1 | {
2 | "arch" : ["str", "dxys"],
3 | "genotype" : ["str", "DARTS"],
4 | "dataset" : ["str", "cifar"],
5 | "ichannel" : ["int", 36],
6 | "layers" : ["int", 6],
7 | "stem_multi": ["int", 3],
8 | "auxiliary" : ["bool", 1],
9 | "drop_path_prob": ["float", 0.2]
10 | }
11 |
--------------------------------------------------------------------------------
/configs/archs/NAS-CIFAR-GDAS_V1.config:
--------------------------------------------------------------------------------
1 | {
2 | "arch" : ["str", "dxys"],
3 | "genotype" : ["str", "GDAS_V1"],
4 | "dataset" : ["str", "cifar"],
5 | "ichannel" : ["int", 36],
6 | "layers" : ["int", 6],
7 | "stem_multi": ["int", 3],
8 | "auxiliary" : ["bool", 1],
9 | "drop_path_prob": ["float", 0.2]
10 | }
11 |
--------------------------------------------------------------------------------
/configs/archs/NAS-CIFAR-NAS.config:
--------------------------------------------------------------------------------
1 | {
2 | "arch" : ["str", "dxys"],
3 | "genotype" : ["str", "NASNet"],
4 | "dataset" : ["str", "cifar"],
5 | "ichannel" : ["int", 33],
6 | "layers" : ["int", 6],
7 | "stem_multi": ["int", 3],
8 | "auxiliary" : ["bool", 1],
9 | "drop_path_prob": ["float", 0.2]
10 | }
11 |
--------------------------------------------------------------------------------
/configs/archs/NAS-CIFAR-SETN.config:
--------------------------------------------------------------------------------
1 | {
2 | "arch" : ["str", "dxys"],
3 | "genotype" : ["str", "SETN"],
4 | "dataset" : ["str", "cifar"],
5 | "ichannel" : ["int", 36],
6 | "layers" : ["int", 6],
7 | "stem_multi": ["int", 3],
8 | "auxiliary" : ["bool", 1],
9 | "drop_path_prob": ["float", 0.2]
10 | }
11 |
--------------------------------------------------------------------------------
/configs/archs/NAS-CIFAR-none.config:
--------------------------------------------------------------------------------
1 | {
2 | "super_type": ["str", "infer-nasnet.cifar"],
3 | "genotype" : ["none", "none"],
4 | "dataset" : ["str", "cifar"],
5 | "ichannel" : ["int", 33],
6 | "layers" : ["int", 6],
7 | "stem_multi": ["int", 3],
8 | "auxiliary" : ["bool", 1],
9 | "drop_path_prob": ["float", 0.2]
10 | }
11 |
--------------------------------------------------------------------------------
/configs/archs/NAS-IMAGENET-DARTS_V2.config:
--------------------------------------------------------------------------------
1 | {
2 | "arch" : ["str", "dxys"],
3 | "genotype" : ["str", "DARTS_V2"],
4 | "dataset" : ["str", "imagenet"],
5 | "ichannel" : ["int", 48],
6 | "layers" : ["int", 4],
7 | "auxiliary" : ["bool", 1],
8 | "drop_path_prob": ["float", 0]
9 | }
10 |
--------------------------------------------------------------------------------
/configs/archs/NAS-IMAGENET-GDAS_V1.config:
--------------------------------------------------------------------------------
1 | {
2 | "arch" : ["str", "dxys"],
3 | "genotype" : ["str", "GDAS_V1"],
4 | "dataset" : ["str", "imagenet"],
5 | "ichannel" : ["int", 50],
6 | "layers" : ["int", 4],
7 | "auxiliary" : ["bool", 1],
8 | "drop_path_prob": ["float", 0]
9 | }
10 |
--------------------------------------------------------------------------------
/configs/archs/NAS-IMAGENET-SETN.config:
--------------------------------------------------------------------------------
1 | {
2 | "arch" : ["str", "dxys"],
3 | "genotype" : ["str", "SETN"],
4 | "dataset" : ["str", "imagenet"],
5 | "ichannel" : ["int", 58],
6 | "layers" : ["int", 2],
7 | "auxiliary" : ["bool", 1],
8 | "drop_path_prob": ["float", 0]
9 | }
10 |
--------------------------------------------------------------------------------
/configs/archs/NAS-IMAGENET-SETN1.config:
--------------------------------------------------------------------------------
1 | {
2 | "arch" : ["str", "dxys"],
3 | "genotype" : ["str", "SETN"],
4 | "dataset" : ["str", "imagenet"],
5 | "ichannel" : ["int", 73],
6 | "layers" : ["int", 1],
7 | "auxiliary" : ["bool", 1],
8 | "drop_path_prob": ["float", 0]
9 | }
10 |
--------------------------------------------------------------------------------
/configs/archs/NAS-IMAGENET-SETN2.config:
--------------------------------------------------------------------------------
1 | {
2 | "arch" : ["str", "dxys"],
3 | "genotype" : ["str", "SETN"],
4 | "dataset" : ["str", "imagenet"],
5 | "ichannel" : ["int", 58],
6 | "layers" : ["int", 2],
7 | "auxiliary" : ["bool", 1],
8 | "drop_path_prob": ["float", 0]
9 | }
10 |
--------------------------------------------------------------------------------
/configs/archs/NAS-IMAGENET-SETN3.config:
--------------------------------------------------------------------------------
1 | {
2 | "arch" : ["str", "dxys"],
3 | "genotype" : ["str", "SETN"],
4 | "dataset" : ["str", "imagenet"],
5 | "ichannel" : ["int", 49],
6 | "layers" : ["int", 3],
7 | "auxiliary" : ["bool", 1],
8 | "drop_path_prob": ["float", 0]
9 | }
10 |
--------------------------------------------------------------------------------
/configs/archs/NAS-IMAGENET-SETN4.config:
--------------------------------------------------------------------------------
1 | {
2 | "arch" : ["str", "dxys"],
3 | "genotype" : ["str", "SETN"],
4 | "dataset" : ["str", "imagenet"],
5 | "ichannel" : ["int", 44],
6 | "layers" : ["int", 4],
7 | "auxiliary" : ["bool", 1],
8 | "drop_path_prob": ["float", 0]
9 | }
10 |
--------------------------------------------------------------------------------
/configs/archs/NAS-IMAGENET-none.config:
--------------------------------------------------------------------------------
1 | {
2 | "super_type": ["str", "infer-nasnet.imagenet"],
3 | "genotype" : ["none", "none"],
4 | "dataset" : ["str", "imagenet"],
5 | "ichannel" : ["int", 50],
6 | "layers" : ["int", 4],
7 | "auxiliary" : ["bool", 1],
8 | "drop_path_prob": ["float", 0]
9 | }
10 |
--------------------------------------------------------------------------------
/configs/compares/CIFAR010-TAS-R110.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "cifar"],
3 | "arch" : ["str" , "resnet"],
4 | "depth" : ["int" , "110"],
5 | "module" : ["str" , "ResNetBasicblock"],
6 | "super_type" : ["str" , "infer-shape"],
7 | "zero_init_residual" : ["bool" , "0"],
8 | "class_num" : ["int" , "10"],
9 | "search_mode" : ["str" , "shape"],
10 | "xchannels" : ["int" , ["3", "16", "14", "16", "14", "16", "16", "16", "16", "16", "14", "16", "16", "16", "12", "16", "16", "16", "9", "16", "8", "16", "4", "16", "4", "4", "4", "16", "4", "4", "4", "4", "6", "6", "4", "6", "11", "4", "32", "32", "32", "32", "32", "32", "32", "32", "28", "32", "32", "28", "22", "22", "22", "32", "32", "25", "28", "9", "9", "28", "12", "9", "12", "32", "9", "9", "22", "12", "16", "9", "12", "9", "9", "9", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "38", "64", "25", "19", "19", "19", "19", "19", "25", "32", "19", "19", "25", "25", "19", "19", "38", "38", "19", "19", "51"]],
11 | "xblocks" : ["int" , ["11", "11", "9"]],
12 | "estimated_FLOP" : ["float" , "117.498238"]
13 | }
--------------------------------------------------------------------------------
/configs/compares/CIFAR010-TAS-R56.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "cifar"],
3 | "arch" : ["str" , "resnet"],
4 | "depth" : ["int" , "56"],
5 | "module" : ["str" , "ResNetBasicblock"],
6 | "super_type" : ["str" , "infer-shape"],
7 | "zero_init_residual" : ["bool" , "0"],
8 | "class_num" : ["int" , "10"],
9 | "search_mode" : ["str" , "shape"],
10 | "xchannels" : ["int" , ["3", "16", "16", "16", "14", "14", "14", "16", "12", "16", "6", "16", "4", "4", "4", "14", "4", "4", "6", "4", "32", "32", "32", "32", "32", "32", "22", "25", "16", "32", "19", "9", "9", "16", "25", "12", "16", "9", "64", "64", "64", "64", "64", "64", "64", "64", "64", "51", "19", "19", "32", "51", "25", "32", "19", "19"]],
11 | "xblocks" : ["int" , ["5", "5", "5"]],
12 | "estimated_FLOP" : ["float" , "57.52595"]
13 | }
--------------------------------------------------------------------------------
/configs/compares/CIFAR100-FIX-R32.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "cifar"],
3 | "arch" : ["str" , "resnet"],
4 | "depth" : ["int" , "32"],
5 | "module" : ["str" , "ResNetBasicblock"],
6 | "super_type" : ["str" , "infer-width"],
7 | "zero_init_residual" : ["bool" , "0"],
8 | "class_num" : ["int" , "100"],
9 | "xchannels" : ["int" , ["3", "12", "12", "12", "12", "12", "12", "12", "12", "12", "12", "12", "25", "25", "25", "25", "25", "25", "25", "25", "25", "25", "50", "50", "50", "50", "50", "50", "50", "50", "50", "50"]],
10 | "estimated_FLOP" : ["float" , "41.095816"]
11 | }
12 |
--------------------------------------------------------------------------------
/configs/compares/CIFAR100-RAND-R32.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "cifar"],
3 | "arch" : ["str" , "resnet"],
4 | "depth" : ["int" , "32"],
5 | "module" : ["str" , "ResNetBasicblock"],
6 | "super_type" : ["str" , "infer-width"],
7 | "zero_init_residual" : ["bool" , "0"],
8 | "class_num" : ["int" , "100"],
9 | "xchannels" : ["int" , ["3", "4", "11", "11", "11", "12", "14", "16", "8", "9", "6", "12", "28", "32", "28", "32", "12", "25", "28", "22", "28", "25", "57", "19", "38", "64", "64", "51", "57", "64", "64", "57"]],
10 | "estimated_FLOP" : ["float" , "42.908996"]
11 | }
12 |
--------------------------------------------------------------------------------
/configs/compares/CIFAR100-TAS-R110.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "cifar"],
3 | "arch" : ["str" , "resnet"],
4 | "depth" : ["int" , "110"],
5 | "module" : ["str" , "ResNetBasicblock"],
6 | "super_type" : ["str" , "infer-shape"],
7 | "zero_init_residual" : ["bool" , "0"],
8 | "class_num" : ["int" , "100"],
9 | "search_mode" : ["str" , "shape"],
10 | "xchannels" : ["int" , ["3", "16", "14", "16", "11", "14", "16", "16", "11", "16", "9", "14", "12", "16", "16", "16", "8", "16", "14", "16", "12", "4", "11", "16", "4", "4", "4", "16", "12", "4", "8", "4", "9", "4", "6", "14", "4", "4", "32", "32", "32", "32", "28", "28", "32", "32", "32", "32", "32", "28", "32", "28", "25", "32", "32", "32", "9", "9", "32", "32", "9", "25", "28", "32", "28", "9", "9", "32", "12", "12", "9", "22", "12", "9", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64", "44", "64", "57", "19", "19", "19", "19", "25", "19", "25", "19", "25", "19", "19", "25", "19", "19", "19", "25", "25", "19"]],
11 | "xblocks" : ["int" , ["13", "9", "11"]],
12 | "estimated_FLOP" : ["float" , "117.653164"]
13 | }
--------------------------------------------------------------------------------
/configs/compares/CIFAR100-TAS-R32.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "cifar"],
3 | "arch" : ["str" , "resnet"],
4 | "depth" : ["int" , "32"],
5 | "module" : ["str" , "ResNetBasicblock"],
6 | "super_type" : ["str" , "infer-width"],
7 | "zero_init_residual" : ["bool" , "0"],
8 | "class_num" : ["int" , "100"],
9 | "xchannels" : ["int" , ["3", "16", "4", "4", "4", "14", "6", "4", "8", "4", "4", "4", "32", "32", "9", "28", "28", "28", "28", "28", "32", "32", "64", "64", "64", "64", "64", "64", "64", "64", "64", "64"]],
10 | "estimated_FLOP" : ["float" , "42.493184"]
11 | }
12 |
--------------------------------------------------------------------------------
/configs/compares/CIFAR100-TAS-R56.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "cifar"],
3 | "arch" : ["str" , "resnet"],
4 | "depth" : ["int" , "56"],
5 | "module" : ["str" , "ResNetBasicblock"],
6 | "super_type" : ["str" , "infer-shape"],
7 | "zero_init_residual" : ["bool" , "0"],
8 | "class_num" : ["int" , "100"],
9 | "search_mode" : ["str" , "shape"],
10 | "xchannels" : ["int" , ["3", "16", "16", "9", "14", "16", "14", "16", "8", "16", "8", "14", "4", "4", "4", "8", "4", "6", "4", "4", "32", "32", "32", "28", "32", "32", "32", "22", "32", "32", "32", "9", "25", "19", "25", "12", "9", "9", "64", "64", "64", "64", "64", "64", "64", "64", "64", "51", "19", "19", "19", "19", "25", "38", "19", "19"]],
11 | "xblocks" : ["int" , ["5", "5", "7"]],
12 | "estimated_FLOP" : ["float" , "59.472556"]
13 | }
--------------------------------------------------------------------------------
/configs/nas-benchmark/CIFAR.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "200"],
5 | "warmup" : ["int", "0"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "batch_size": ["int", "256"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/nas-benchmark/ImageNet-16.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "200"],
5 | "warmup" : ["int", "0"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "batch_size": ["int", "256"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/nas-benchmark/LESS.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "12"],
5 | "warmup" : ["int", "0"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "batch_size": ["int", "256"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/nas-benchmark/algos/DARTS.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "LR" : ["float", "0.025"],
4 | "eta_min" : ["float", "0.001"],
5 | "epochs" : ["int", "50"],
6 | "warmup" : ["int", "0"],
7 | "optim" : ["str", "SGD"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "batch_size": ["int", "64"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/nas-benchmark/algos/ENAS.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "LR" : ["float", "0.05"],
4 | "eta_min" : ["float", "0.0005"],
5 | "epochs" : ["int", "250"],
6 | "T_max" : ["int", "10"],
7 | "warmup" : ["int", "0"],
8 | "optim" : ["str", "SGD"],
9 | "decay" : ["float", "0.00025"],
10 | "momentum" : ["float", "0.9"],
11 | "nesterov" : ["bool", "1"],
12 | "controller_lr" : ["float", "0.001"],
13 | "controller_betas": ["float", [0, 0.999]],
14 | "controller_eps" : ["float", 0.001],
15 | "criterion": ["str", "Softmax"],
16 | "batch_size": ["int", "128"]
17 | }
18 |
--------------------------------------------------------------------------------
/configs/nas-benchmark/algos/GDAS.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "LR" : ["float", "0.025"],
4 | "eta_min" : ["float", "0.001"],
5 | "epochs" : ["int", "250"],
6 | "warmup" : ["int", "0"],
7 | "optim" : ["str", "SGD"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "batch_size": ["int", "64"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/nas-benchmark/algos/R-EA.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.001"],
4 | "epochs" : ["int", "25"],
5 | "warmup" : ["int", "0"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.025"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "batch_size": ["int", "64"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/nas-benchmark/algos/RANDOM.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "LR" : ["float", "0.025"],
4 | "eta_min" : ["float", "0.001"],
5 | "epochs" : ["int", "250"],
6 | "warmup" : ["int", "0"],
7 | "optim" : ["str", "SGD"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "batch_size": ["int", "64"],
13 | "test_batch_size": ["int", "512"]
14 | }
15 |
--------------------------------------------------------------------------------
/configs/nas-benchmark/algos/SETN.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "LR" : ["float", "0.025"],
4 | "eta_min" : ["float", "0.001"],
5 | "epochs" : ["int", "250"],
6 | "warmup" : ["int", "0"],
7 | "optim" : ["str", "SGD"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "batch_size": ["int", "64"],
13 | "test_batch_size": ["int", "512"]
14 | }
15 |
--------------------------------------------------------------------------------
/configs/nas-benchmark/algos/weight-sharing.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "LR" : ["float", "0.025"],
4 | "eta_min" : ["float", "0.001"],
5 | "epochs" : ["int", "100"],
6 | "warmup" : ["int", "0"],
7 | "optim" : ["str", "SGD"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "batch_size": ["int", "64"],
13 | "test_batch_size": ["int", "512"]
14 | }
15 |
--------------------------------------------------------------------------------
/configs/nas-benchmark/hyper-opts/01E.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "1"],
5 | "warmup" : ["int", "0"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "batch_size": ["int", "256"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/nas-benchmark/hyper-opts/12E.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "12"],
5 | "warmup" : ["int", "0"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "batch_size": ["int", "256"]
13 | }
--------------------------------------------------------------------------------
/configs/nas-benchmark/hyper-opts/200E.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "200"],
5 | "warmup" : ["int", "0"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "batch_size": ["int", "256"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/nas-benchmark/hyper-opts/90E.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "90"],
5 | "warmup" : ["int", "0"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "batch_size": ["int", "256"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/opts/CIFAR-E300-W5-L1-COS.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "300"],
5 | "warmup" : ["int", "5"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "auxiliary": ["float", "-1"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/opts/CIFAR-E300-W5-L4-COS.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "300"],
5 | "warmup" : ["int", "5"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.4"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"]
12 | }
13 |
--------------------------------------------------------------------------------
/configs/opts/CIFAR-E600-W5-L1-COS.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "600"],
5 | "warmup" : ["int", "5"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"]
12 | }
13 |
--------------------------------------------------------------------------------
/configs/opts/CIFAR-Fast-Random.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "100"],
5 | "warmup" : ["int", "5"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "auxiliary": ["float", "-1"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/opts/CIFAR-Slow-Random.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "300"],
5 | "warmup" : ["int", "5"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "auxiliary": ["float", "-1"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/opts/Com-Paddle-NAS.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "595"],
5 | "warmup" : ["int", "5"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.025"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "auxiliary": ["float", "0.4"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/opts/Com-Paddle-RES.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "300"],
5 | "warmup" : ["int", "5"] ,
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool" , "1"] ,
11 | "criterion": ["str" , "Softmax"],
12 | "auxiliary": ["float", "-1"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/opts/ImageNet-E120-Cos-Smooth.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "120"],
5 | "warmup" : ["int", "5"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.0001"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "SmoothSoftmax"],
12 | "label_smooth": ["float", 0.1],
13 | "auxiliary": ["float", "-1"]
14 | }
15 |
--------------------------------------------------------------------------------
/configs/opts/ImageNet-E120-Cos-Soft.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "120"],
5 | "warmup" : ["int", "5"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.0001"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "auxiliary": ["float", "-1"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/opts/ImageNet-E120-Step-Soft.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "multistep"],
3 | "epochs" : ["int", "120"],
4 | "warmup" : ["int", "5"],
5 | "optim" : ["str", "SGD"],
6 | "LR" : ["float", "0.1"],
7 | "milestones":["int", [30, 60, 90]],
8 | "gammas" : ["float", [0.1, 0.1, 0.1]],
9 | "decay" : ["float", "0.0001"],
10 | "momentum" : ["float", "0.9"],
11 | "nesterov" : ["bool", "1"],
12 | "criterion": ["str", "Softmax"],
13 | "auxiliary": ["float", "-1"]
14 | }
15 |
--------------------------------------------------------------------------------
/configs/opts/NAS-CIFAR-V2.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "595"],
5 | "warmup" : ["int", "5"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.025"],
8 | "decay" : ["float", "0.0003"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "auxiliary": ["float", "0.4"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/opts/NAS-CIFAR.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "295"],
5 | "warmup" : ["int", "5"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.025"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "auxiliary": ["float", "0.4"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/opts/NAS-IMAGENET.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "245"],
5 | "warmup" : ["int", "5"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.00003"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "SmoothSoftmax"],
12 | "label_smooth": ["float", 0.1],
13 | "auxiliary" : ["float", "0.4"]
14 | }
15 |
--------------------------------------------------------------------------------
/configs/qlib/workflow_config_naive_v1_Alpha360.yaml:
--------------------------------------------------------------------------------
1 | qlib_init:
2 | provider_uri: "~/.qlib/qlib_data/cn_data"
3 | region: cn
4 | market: &market all
5 | benchmark: &benchmark SH000300
6 | data_handler_config: &data_handler_config
7 | start_time: 2008-01-01
8 | end_time: 2020-08-01
9 | fit_start_time: 2008-01-01
10 | fit_end_time: 2014-12-31
11 | instruments: *market
12 | infer_processors: []
13 | learn_processors: []
14 | label: ["Ref($close, -2) / Ref($close, -1) - 1"]
15 | port_analysis_config: &port_analysis_config
16 | strategy:
17 | class: TopkDropoutStrategy
18 | module_path: qlib.contrib.strategy.strategy
19 | kwargs:
20 | topk: 50
21 | n_drop: 5
22 | backtest:
23 | verbose: False
24 | limit_threshold: 0.095
25 | account: 100000000
26 | benchmark: *benchmark
27 | deal_price: close
28 | open_cost: 0.0005
29 | close_cost: 0.0015
30 | min_cost: 5
31 | task:
32 | model:
33 | class: NAIVE_V1
34 | module_path: trade_models.naive_v1_model
35 | kwargs:
36 | d_feat: 6
37 | dataset:
38 | class: DatasetH
39 | module_path: qlib.data.dataset
40 | kwargs:
41 | handler:
42 | class: Alpha360
43 | module_path: qlib.contrib.data.handler
44 | kwargs: *data_handler_config
45 | segments:
46 | train: [2008-01-01, 2014-12-31]
47 | valid: [2015-01-01, 2016-12-31]
48 | test: [2017-01-01, 2020-08-01]
49 | record:
50 | - class: SignalRecord
51 | module_path: qlib.workflow.record_temp
52 | kwargs: {}
53 | - class: SignalMseRecord
54 | module_path: qlib.contrib.workflow.record_temp
55 | kwargs: {}
56 | - class: SigAnaRecord
57 | module_path: qlib.workflow.record_temp
58 | kwargs:
59 | ana_long_short: False
60 | ann_scaler: 252
61 | - class: PortAnaRecord
62 | module_path: qlib.workflow.record_temp
63 | kwargs:
64 | config: *port_analysis_config
65 |
--------------------------------------------------------------------------------
/configs/qlib/workflow_config_naive_v2_Alpha360.yaml:
--------------------------------------------------------------------------------
1 | qlib_init:
2 | provider_uri: "~/.qlib/qlib_data/cn_data"
3 | region: cn
4 | market: &market all
5 | benchmark: &benchmark SH000300
6 | data_handler_config: &data_handler_config
7 | start_time: 2008-01-01
8 | end_time: 2020-08-01
9 | fit_start_time: 2008-01-01
10 | fit_end_time: 2014-12-31
11 | instruments: *market
12 | infer_processors: []
13 | learn_processors: []
14 | label: ["Ref($close, -2) / Ref($close, -1) - 1"]
15 | port_analysis_config: &port_analysis_config
16 | strategy:
17 | class: TopkDropoutStrategy
18 | module_path: qlib.contrib.strategy.strategy
19 | kwargs:
20 | topk: 50
21 | n_drop: 5
22 | backtest:
23 | verbose: False
24 | limit_threshold: 0.095
25 | account: 100000000
26 | benchmark: *benchmark
27 | deal_price: close
28 | open_cost: 0.0005
29 | close_cost: 0.0015
30 | min_cost: 5
31 | task:
32 | model:
33 | class: NAIVE_V2
34 | module_path: trade_models.naive_v2_model
35 | kwargs:
36 | d_feat: 6
37 | dataset:
38 | class: DatasetH
39 | module_path: qlib.data.dataset
40 | kwargs:
41 | handler:
42 | class: Alpha360
43 | module_path: qlib.contrib.data.handler
44 | kwargs: *data_handler_config
45 | segments:
46 | train: [2008-01-01, 2014-12-31]
47 | valid: [2015-01-01, 2016-12-31]
48 | test: [2017-01-01, 2020-08-01]
49 | record:
50 | - class: SignalRecord
51 | module_path: qlib.workflow.record_temp
52 | kwargs: {}
53 | - class: SignalMseRecord
54 | module_path: qlib.contrib.workflow.record_temp
55 | kwargs: {}
56 | - class: SigAnaRecord
57 | module_path: qlib.workflow.record_temp
58 | kwargs:
59 | ana_long_short: False
60 | ann_scaler: 252
61 | - class: PortAnaRecord
62 | module_path: qlib.workflow.record_temp
63 | kwargs:
64 | config: *port_analysis_config
65 |
--------------------------------------------------------------------------------
/configs/search-KD-opts/ImageNet-E120-Cos-Soft.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "125"],
5 | "T_max" : ["int", "120"],
6 | "warmup" : ["int", "5"],
7 | "optim" : ["str", "SGD"],
8 | "LR" : ["float", "0.1"],
9 | "decay" : ["float", "0.0001"],
10 | "momentum" : ["float", "0.9"],
11 | "nesterov" : ["bool", "1"],
12 | "criterion": ["str", "Softmax"],
13 | "auxiliary": ["float", "-1"]
14 | }
15 |
--------------------------------------------------------------------------------
/configs/search-KD-opts/ImageNet-E150-MobileFast.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "155"],
5 | "T_max" : ["int", "150"],
6 | "warmup" : ["int", "0"],
7 | "gamma" : ["float", "0.98"],
8 | "optim" : ["str", "SGD"],
9 | "LR" : ["float", "0.05"],
10 | "decay" : ["float", "0.00004"],
11 | "momentum" : ["float", "0.9"],
12 | "nesterov" : ["bool", "0"],
13 | "criterion": ["str", "Softmax"],
14 | "auxiliary": ["float", "-1"]
15 | }
16 |
--------------------------------------------------------------------------------
/configs/search-archs/DARTS-NASNet-CIFAR.config:
--------------------------------------------------------------------------------
1 | {
2 | "super_type" : ["str", "nasnet-super"],
3 | "name" : ["str", "DARTS"],
4 | "C" : ["int", "16" ],
5 | "N" : ["int", "2" ],
6 | "steps" : ["int", "4" ],
7 | "multiplier" : ["int", "4" ],
8 | "stem_multiplier" : ["int", "3" ]
9 | }
10 |
--------------------------------------------------------------------------------
/configs/search-archs/GDAS-NASNet-CIFAR.config:
--------------------------------------------------------------------------------
1 | {
2 | "super_type" : ["str", "nasnet-super"],
3 | "name" : ["str", "GDAS"],
4 | "C" : ["int", "16" ],
5 | "N" : ["int", "2" ],
6 | "steps" : ["int", "4" ],
7 | "multiplier" : ["int", "4" ],
8 | "stem_multiplier" : ["int", "3" ]
9 | }
10 |
--------------------------------------------------------------------------------
/configs/search-archs/GDASFRC-NASNet-CIFAR.config:
--------------------------------------------------------------------------------
1 | {
2 | "super_type" : ["str", "nasnet-super"],
3 | "name" : ["str", "GDAS_FRC"],
4 | "C" : ["int", "16" ],
5 | "N" : ["int", "2" ],
6 | "steps" : ["int", "4" ],
7 | "multiplier" : ["int", "4" ],
8 | "stem_multiplier" : ["int", "3" ]
9 | }
10 |
--------------------------------------------------------------------------------
/configs/search-archs/ImageNet-MobileNetV2-X.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "imagenet"],
3 | "arch" : ["str", "MobileNetV2"],
4 | "width_mult" : ["float", 1.0],
5 | "dropout" : ["float", 0.0],
6 | "input_channel" : ["int", 32],
7 | "last_channel" : ["int", 1280],
8 | "block_name" : ["str", "InvertedResidual"]
9 | }
10 |
--------------------------------------------------------------------------------
/configs/search-archs/ImageNet-ResNet18V1.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "imagenet"],
3 | "arch" : ["str", "resnet"],
4 | "block_name" : ["str", "BasicBlock"],
5 | "layers" : ["int", [2,2,2,2]],
6 | "deep_stem" : ["bool", 0],
7 | "zero_init_residual" : ["bool", "1"]
8 | }
9 |
--------------------------------------------------------------------------------
/configs/search-archs/ImageNet-ResNet34V1.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "imagenet"],
3 | "arch" : ["str", "resnet"],
4 | "block_name" : ["str", "BasicBlock"],
5 | "layers" : ["int", [3,4,6,3]],
6 | "deep_stem" : ["bool", 0],
7 | "zero_init_residual" : ["bool", "1"]
8 | }
9 |
--------------------------------------------------------------------------------
/configs/search-archs/ImageNet-ResNet50V1.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "imagenet"],
3 | "arch" : ["str", "resnet"],
4 | "block_name" : ["str", "Bottleneck"],
5 | "layers" : ["int", [3,4,6,3]],
6 | "deep_stem" : ["bool", 0],
7 | "zero_init_residual" : ["bool", "1"]
8 | }
9 |
--------------------------------------------------------------------------------
/configs/search-opts/CIFAR.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "300"],
5 | "warmup" : ["int", "5"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "arch_LR" : ["float", "0.001"],
13 | "arch_decay" : ["float", "0.001"]
14 | }
15 |
--------------------------------------------------------------------------------
/configs/search-opts/CIFARX.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "600"],
5 | "warmup" : ["int", "0"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "arch_LR" : ["float", "0.001"],
13 | "arch_decay" : ["float", "0.001"]
14 | }
15 |
--------------------------------------------------------------------------------
/configs/search-opts/CIFARXX.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "900"],
5 | "warmup" : ["int", "5"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "arch_LR" : ["float", "0.001"],
13 | "arch_decay" : ["float", "0.001"]
14 | }
15 |
--------------------------------------------------------------------------------
/configs/search-opts/DARTS-NASNet-CIFAR.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "LR" : ["float", "0.025"],
4 | "eta_min" : ["float", "0.001"],
5 | "epochs" : ["int", "50"],
6 | "warmup" : ["int", "0"],
7 | "optim" : ["str", "SGD"],
8 | "decay" : ["float", "0.0003"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "batch_size": ["int", "64"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/search-opts/GDAS-NASNet-CIFAR.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "LR" : ["float", "0.025"],
4 | "eta_min" : ["float", "0.001"],
5 | "epochs" : ["int", "250"],
6 | "warmup" : ["int", "0"],
7 | "optim" : ["str", "SGD"],
8 | "decay" : ["float", "0.0005"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "batch_size": ["int", "256"]
13 | }
14 |
--------------------------------------------------------------------------------
/configs/search-opts/ImageNet-MobileFast.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "150"],
5 | "warmup" : ["int", "0"],
6 | "gamma" : ["float", "0.98"],
7 | "optim" : ["str", "SGD"],
8 | "LR" : ["float", "0.05"],
9 | "decay" : ["float", "0.00004"],
10 | "momentum" : ["float", "0.9"],
11 | "nesterov" : ["bool", "0"],
12 | "criterion": ["str", "Softmax"],
13 | "arch_LR" : ["float", "0.001"],
14 | "arch_decay" : ["float", "0.001"]
15 | }
16 |
--------------------------------------------------------------------------------
/configs/search-opts/ImageNet-ResNet.config:
--------------------------------------------------------------------------------
1 | {
2 | "scheduler": ["str", "cos"],
3 | "eta_min" : ["float", "0.0"],
4 | "epochs" : ["int", "115"],
5 | "warmup" : ["int", "5"],
6 | "optim" : ["str", "SGD"],
7 | "LR" : ["float", "0.1"],
8 | "decay" : ["float", "0.0001"],
9 | "momentum" : ["float", "0.9"],
10 | "nesterov" : ["bool", "1"],
11 | "criterion": ["str", "Softmax"],
12 | "arch_LR" : ["float", "0.001"],
13 | "arch_decay" : ["float", "0.001"]
14 | }
15 |
--------------------------------------------------------------------------------
/configs/temps/T-MobileNetV2-X.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str", "imagenet"],
3 | "arch" : ["str", "MobileNetV2"],
4 | "dropout" : ["float", 0.0],
5 | "super_type" : ["str", "infer-shape"],
6 | "xchannels" : ["str", "3-32 32-16 16-96-24 24-86-24 24-86-32 32-96-32 32-77-32 32-77-32 32-231-48 48-269-48 48-269-64 64-269-64 64-192-67 67-246-77 77-288-96 96-346-96 96-576-160 160-480-128 128-576-320 320-768"],
7 | "xblocks" : ["int", [1, 2, 3, 3, 3, 3, 1]]
8 | }
9 |
--------------------------------------------------------------------------------
/configs/temps/T-ResNet18.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "imagenet"],
3 | "arch" : ["str" , "resnet"],
4 | "block_name" : ["str" , "BasicBlock"],
5 | "layers" : ["int" , ["2", "2", "2", "2"]],
6 | "deep_stem" : ["bool" , "0"],
7 | "class_num" : ["int" , "1000"],
8 | "search_mode" : ["str" , "shape"],
9 | "xchannels" : ["int" , ["3", "48", "64", "38", "25", "57", "102", "89", "102", "115", "128", "230", "230", "256", "153", "358", "358", "460"]],
10 | "xblocks" : ["int" , ["2", "2", "2", "2"]],
11 | "super_type" : ["str" , "infer-shape"],
12 | "estimated_FLOP" : ["float" , "953.381522"],
13 | "zero_init_residual" : ["bool" , "1"]
14 | }
15 |
--------------------------------------------------------------------------------
/configs/temps/T-ResNet50.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "imagenet"],
3 | "arch" : ["str" , "resnet"],
4 | "block_name" : ["str" , "Bottleneck"],
5 | "layers" : ["int" , ["3", "4", "6", "3"]],
6 | "deep_stem" : ["bool" , "0"],
7 | "zero_init_residual" : ["bool" , "1"],
8 | "class_num" : ["int" , "1000"],
9 | "search_mode" : ["str" , "shape"],
10 | "xchannels" : ["int" , ["3", "45", "45", "30", "102", "33", "60", "154", "68", "70", "180", "38", "38", "307", "38", "38", "410", "64", "128", "358", "38", "51", "256", "76", "76", "512", "76", "76", "512", "179", "256", "614", "100", "102", "307", "179", "230", "614", "204", "102", "307", "153", "153", "1228", "512", "512", "1434", "512", "512", "1844"]],
11 | "xblocks" : ["int" , ["3", "4", "5", "3"]],
12 | "super_type" : ["str" , "infer-shape"],
13 | "estimated_FLOP" : ["float" , "2291.316289"]
14 | }
15 |
--------------------------------------------------------------------------------
/configs/temps/X-ResNet18.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "imagenet"],
3 | "arch" : ["str" , "resnet"],
4 | "block_name" : ["str" , "BasicBlock"],
5 | "layers" : ["int" , ["2", "2", "2", "2"]],
6 | "deep_stem" : ["bool" , "0"],
7 | "zero_init_residual" : ["bool" , "1"],
8 | "class_num" : ["int" , "1000"],
9 | "search_mode" : ["str" , "shape"],
10 | "xchannels" : ["int" , ["3", "64", "25", "64", "38", "19", "128", "128", "38", "38", "256", "256", "256", "256", "512", "512", "512", "512"]],
11 | "xblocks" : ["int" , ["1", "1", "2", "2"]],
12 | "super_type" : ["str" , "infer-shape"],
13 | "estimated_FLOP" : ["float" , "1120.44032"]
14 | }
--------------------------------------------------------------------------------
/configs/temps/X-ResNet50.config:
--------------------------------------------------------------------------------
1 | {
2 | "dataset" : ["str" , "imagenet"],
3 | "arch" : ["str" , "resnet"],
4 | "block_name" : ["str" , "Bottleneck"],
5 | "layers" : ["int" , ["3", "4", "6", "3"]],
6 | "deep_stem" : ["bool" , "0"],
7 | "zero_init_residual" : ["bool" , "1"],
8 | "class_num" : ["int" , "1000"],
9 | "search_mode" : ["str" , "shape"],
10 | "xchannels" : ["int" , ["3", "64", "64", "64", "230", "25", "19", "128", "19", "38", "76", "128", "128", "512", "38", "38", "204", "38", "51", "256", "51", "51", "153", "256", "256", "1024", "256", "256", "1024", "256", "256", "921", "256", "256", "1024", "76", "76", "614", "102", "76", "307", "512", "512", "2048", "512", "512", "2048", "512", "512", "2048"]],
11 | "xblocks" : ["int" , ["1", "1", "2", "3"]],
12 | "super_type" : ["str" , "infer-shape"],
13 | "estimated_FLOP" : ["float" , "1680.93696"]
14 | }
--------------------------------------------------------------------------------
/configs/yaml.data/cifar10.test:
--------------------------------------------------------------------------------
1 | class_or_func: CIFAR10
2 | module_path: torchvision.datasets
3 | args: []
4 | kwargs:
5 | train: False
6 | download: True
7 | transform:
8 | class_or_func: Compose
9 | module_path: torchvision.transforms
10 | args:
11 | -
12 | - class_or_func: ToTensor
13 | module_path: torchvision.transforms
14 | args: []
15 | kwargs: {}
16 | - class_or_func: Normalize
17 | module_path: torchvision.transforms
18 | args: []
19 | kwargs:
20 | mean: [0.491, 0.482, 0.447]
21 | std: [0.247, 0.244, 0.262]
22 | kwargs: {}
23 |
--------------------------------------------------------------------------------
/configs/yaml.data/cifar10.train:
--------------------------------------------------------------------------------
1 | class_or_func: CIFAR10
2 | module_path: torchvision.datasets
3 | args: []
4 | kwargs:
5 | train: True
6 | download: True
7 | transform:
8 | class_or_func: Compose
9 | module_path: torchvision.transforms
10 | args:
11 | -
12 | - class_or_func: RandomHorizontalFlip
13 | module_path: torchvision.transforms
14 | args: []
15 | kwargs: {}
16 | - class_or_func: RandomCrop
17 | module_path: torchvision.transforms
18 | args: [32]
19 | kwargs: {padding: 4}
20 | - class_or_func: ToTensor
21 | module_path: torchvision.transforms
22 | args: []
23 | kwargs: {}
24 | - class_or_func: Normalize
25 | module_path: torchvision.transforms
26 | args: []
27 | kwargs:
28 | mean: [0.491, 0.482, 0.447]
29 | std: [0.247, 0.244, 0.262]
30 | kwargs: {}
31 |
--------------------------------------------------------------------------------
/configs/yaml.loss/cross-entropy:
--------------------------------------------------------------------------------
1 | class_or_func: CrossEntropyLoss
2 | module_path: torch.nn
3 | args: []
4 | kwargs: {}
5 |
--------------------------------------------------------------------------------
/configs/yaml.loss/top1-ce:
--------------------------------------------------------------------------------
1 | class_or_func: ComposeMetric
2 | module_path: xautodl.xmisc.meter_utils
3 | args:
4 | - class_or_func: Top1AccMetric
5 | module_path: xautodl.xmisc.meter_utils
6 | args: [False]
7 | kwargs: {}
8 | - class_or_func: CrossEntropyMetric
9 | module_path: xautodl.xmisc.meter_utils
10 | args: [False]
11 | kwargs: {}
12 | kwargs: {}
13 |
--------------------------------------------------------------------------------
/configs/yaml.loss/top1.acc:
--------------------------------------------------------------------------------
1 | class_or_func: Top1AccMetric
2 | module_path: xautodl.xmisc.meter_utils
3 | args: [False]
4 | kwargs: {}
5 |
--------------------------------------------------------------------------------
/configs/yaml.model/vit-cifar10.s0:
--------------------------------------------------------------------------------
1 | class_or_func: get_transformer
2 | module_path: xautodl.xmodels.transformers
3 | args: [vit-cifar10-p4-d4-h4-c32]
4 | kwargs: {}
5 |
--------------------------------------------------------------------------------
/configs/yaml.opt/vit.cifar:
--------------------------------------------------------------------------------
1 | class_or_func: Adam
2 | module_path: torch.optim
3 | args: []
4 | kwargs:
5 | betas: [0.9, 0.999]
6 | weight_decay: 0.1
7 | amsgrad: False
8 |
--------------------------------------------------------------------------------
/docs/ICCV-2019-SETN.md:
--------------------------------------------------------------------------------
1 | # [One-Shot Neural Architecture Search via Self-Evaluated Template Network](https://arxiv.org/abs/1910.05733)
2 |
3 |
4 |
5 | Highlight: we equip one-shot NAS with an architecture sampler and train network weights using uniformly sampling.
6 |
7 | One-Shot Neural Architecture Search via Self-Evaluated Template Network is accepted by ICCV 2019.
8 |
9 |
10 | ## Requirements and Preparation
11 |
12 | Please install `Python>=3.6` and `PyTorch>=1.2.0`.
13 |
14 | ### Usefull tools
15 | 1. Compute the number of parameters and FLOPs of a model:
16 | ```
17 | from utils import get_model_infos
18 | flop, param = get_model_infos(net, (1,3,32,32))
19 | ```
20 |
21 | 2. Different NAS-searched architectures are defined [here](https://github.com/D-X-Y/AutoDL-Projects/blob/main/lib/nas_infer_model/DXYs/genotypes.py).
22 |
23 |
24 | ## Usage
25 |
26 | Please use the following scripts to train the searched SETN-searched CNN on CIFAR-10, CIFAR-100, and ImageNet.
27 | ```
28 | CUDA_VISIBLE_DEVICES=0 bash ./scripts/nas-infer-train.sh cifar10 SETN 96 -1
29 | CUDA_VISIBLE_DEVICES=0 bash ./scripts/nas-infer-train.sh cifar100 SETN 96 -1
30 | CUDA_VISIBLE_DEVICES=0,1,2,3 bash ./scripts/nas-infer-train.sh imagenet-1k SETN 256 -1
31 | ```
32 |
33 | ### Searching on the NAS-Bench-201 search space
34 | The searching codes of SETN on a small search space (NAS-Bench-201).
35 | ```
36 | CUDA_VISIBLE_DEVICES=0 bash ./scripts-search/algos/SETN.sh cifar10 1 -1
37 | ```
38 |
39 | **Searching on the NASNet search space** is not ready yet.
40 |
41 |
42 | # Citation
43 |
44 | If you find that this project helps your research, please consider citing the following paper:
45 | ```
46 | @inproceedings{dong2019one,
47 | title = {One-Shot Neural Architecture Search via Self-Evaluated Template Network},
48 | author = {Dong, Xuanyi and Yang, Yi},
49 | booktitle = {Proceedings of the IEEE International Conference on Computer Vision (ICCV)},
50 | pages = {3681--3690},
51 | year = {2019}
52 | }
53 | ```
54 |
--------------------------------------------------------------------------------
/docs/ICLR-2019-DARTS.md:
--------------------------------------------------------------------------------
1 | # DARTS: Differentiable Architecture Search
2 |
3 | DARTS: Differentiable Architecture Search is accepted by ICLR 2019.
4 | In this paper, Hanxiao proposed a differentiable neural architecture search method, named as DARTS.
5 | Recently, DARTS becomes very popular due to its simplicity and performance.
6 |
7 | ## Run DARTS on the NAS-Bench-201 search space
8 | ```
9 | CUDA_VISIBLE_DEVICES=0 bash ./scripts-search/algos/DARTS-V2.sh cifar10 1 -1
10 | CUDA_VISIBLE_DEVICES=0 bash ./scripts-search/algos/GDAS.sh cifar10 1 -1
11 | ```
12 |
13 | ## Run the first-order DARTS on the NASNet/DARTS search space
14 | This command will start to use the first-order DARTS to search architectures on the DARTS search space.
15 | ```
16 | CUDA_VISIBLE_DEVICES=0 bash ./scripts-search/DARTS1V-search-NASNet-space.sh cifar10 -1
17 | ```
18 |
19 | After searching, if you want to train the searched architecture found by the above scripts, you need to add the config of that architecture (will be printed in log) in [genotypes.py](https://github.com/D-X-Y/AutoDL-Projects/blob/main/lib/nas_infer_model/DXYs/genotypes.py).
20 | In future, I will add a more eligent way to train the searched architecture from the DARTS search space.
21 |
22 |
23 | # Citation
24 |
25 | ```
26 | @inproceedings{liu2019darts,
27 | title = {{DARTS}: Differentiable architecture search},
28 | author = {Liu, Hanxiao and Simonyan, Karen and Yang, Yiming},
29 | booktitle = {International Conference on Learning Representations (ICLR)},
30 | year = {2019}
31 | }
32 | ```
33 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | torch
2 | torchvision
3 |
4 | nats_bench>=1.4
5 | nas_201_api
6 |
--------------------------------------------------------------------------------
/exps/NAS-Bench-201-algos/README.md:
--------------------------------------------------------------------------------
1 | # NAS Algorithms evaluated in NAS-Bench-201
2 |
3 | The Python files in this folder are used to re-produce the results in our NAS-Bench-201 paper.
4 |
5 | We have upgraded the codes to be more general and extendable at [NATS-algos](https://github.com/D-X-Y/AutoDL-Projects/tree/main/exps/NATS-algos).
6 |
7 | **Notice** On 24 May 2021, the codes in `AutoDL` repo have been re-organized. If you find `module not found` error, please let me know. I will fix them ASAP.
8 |
--------------------------------------------------------------------------------
/exps/NAS-Bench-201/dist-setup.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.08 #
3 | #####################################################
4 | # [2020.02.25] Initialize the API as v1.1
5 | # [2020.03.09] Upgrade the API to v1.2
6 | # [2020.03.16] Upgrade the API to v1.3
7 | # [2020.06.30] Upgrade the API to v2.0
8 | import os
9 | from setuptools import setup
10 |
11 |
12 | def read(fname="README.md"):
13 | with open(
14 | os.path.join(os.path.dirname(__file__), fname), encoding="utf-8"
15 | ) as cfile:
16 | return cfile.read()
17 |
18 |
19 | setup(
20 | name="nas_bench_201",
21 | version="2.0",
22 | author="Xuanyi Dong",
23 | author_email="dongxuanyi888@gmail.com",
24 | description="API for NAS-Bench-201 (a benchmark for neural architecture search).",
25 | license="MIT",
26 | keywords="NAS Dataset API DeepLearning",
27 | url="https://github.com/D-X-Y/NAS-Bench-201",
28 | packages=["nas_201_api"],
29 | long_description=read("README.md"),
30 | long_description_content_type="text/markdown",
31 | classifiers=[
32 | "Programming Language :: Python",
33 | "Topic :: Database",
34 | "Topic :: Scientific/Engineering :: Artificial Intelligence",
35 | "License :: OSI Approved :: MIT License",
36 | ],
37 | )
38 |
--------------------------------------------------------------------------------
/exps/NAS-Bench-201/show-best.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020.01 #
3 | ################################################################################################
4 | # python exps/NAS-Bench-201/show-best.py --api_path $HOME/.torch/NAS-Bench-201-v1_0-e61699.pth #
5 | ################################################################################################
6 | import argparse
7 | from pathlib import Path
8 |
9 | from nas_201_api import NASBench201API as API
10 |
11 | if __name__ == "__main__":
12 | parser = argparse.ArgumentParser("Analysis of NAS-Bench-201")
13 | parser.add_argument(
14 | "--api_path",
15 | type=str,
16 | default=None,
17 | help="The path to the NAS-Bench-201 benchmark file.",
18 | )
19 | args = parser.parse_args()
20 |
21 | meta_file = Path(args.api_path)
22 | assert meta_file.exists(), "invalid path for api : {:}".format(meta_file)
23 |
24 | api = API(str(meta_file))
25 |
26 | # This will show the results of the best architecture based on the validation set of each dataset.
27 | arch_index, accuracy = api.find_best("cifar10-valid", "x-valid", None, None, False)
28 | print("FOR CIFAR-010, using the hyper-parameters with 200 training epochs :::")
29 | print("arch-index={:5d}, arch={:}".format(arch_index, api.arch(arch_index)))
30 | api.show(arch_index)
31 | print("")
32 |
33 | arch_index, accuracy = api.find_best("cifar100", "x-valid", None, None, False)
34 | print("FOR CIFAR-100, using the hyper-parameters with 200 training epochs :::")
35 | print("arch-index={:5d}, arch={:}".format(arch_index, api.arch(arch_index)))
36 | api.show(arch_index)
37 | print("")
38 |
39 | arch_index, accuracy = api.find_best("ImageNet16-120", "x-valid", None, None, False)
40 | print("FOR ImageNet16-120, using the hyper-parameters with 200 training epochs :::")
41 | print("arch-index={:5d}, arch={:}".format(arch_index, api.arch(arch_index)))
42 | api.show(arch_index)
43 | print("")
44 |
--------------------------------------------------------------------------------
/exps/NATS-algos/README.md:
--------------------------------------------------------------------------------
1 | # NAS Algorithms evaluated in [NATS-Bench](https://arxiv.org/abs/2009.00437)
2 |
3 | The Python files in this folder are used to re-produce the results in ``NATS-Bench: Benchmarking NAS Algorithms for Architecture Topology and Size''.
4 |
5 | - [`search-size.py`](https://github.com/D-X-Y/AutoDL-Projects/blob/main/exps/NATS-algos/search-size.py) contains codes for weight-sharing-based search on the size search space.
6 | - [`search-cell.py`](https://github.com/D-X-Y/AutoDL-Projects/blob/main/exps/NATS-algos/search-cell.py) contains codes for weight-sharing-based search on the topology search space.
7 | - [`bohb.py`](https://github.com/D-X-Y/AutoDL-Projects/blob/main/exps/NATS-algos/bohb.py) contains the BOHB algorithm for both size and topology search spaces.
8 | - [`random_wo_share.py`](https://github.com/D-X-Y/AutoDL-Projects/blob/main/exps/NATS-algos/random_wo_share.py) contains the random search algorithm for both search spaces.
9 | - [`regularized_ea.py`](https://github.com/D-X-Y/AutoDL-Projects/blob/main/exps/NATS-algos/regularized_ea.py) contains the REA algorithm for both search spaces.
10 | - [`reinforce.py`](https://github.com/D-X-Y/AutoDL-Projects/blob/main/exps/NATS-algos/reinforce.py) contains the REINFORCE algorithm for both search spaces.
11 |
12 | ## Requirements
13 |
14 | - `nats_bench`>=v1.2 : you can use `pip install nats_bench` to install or from [sources](https://github.com/D-X-Y/NATS-Bench)
15 | - `hpbandster` : if you want to run BOHB
16 |
17 | ## Citation
18 |
19 | If you find that this project helps your research, please consider citing the related paper:
20 | ```
21 | @article{dong2021nats,
22 | title = {{NATS-Bench}: Benchmarking NAS Algorithms for Architecture Topology and Size},
23 | author = {Dong, Xuanyi and Liu, Lu and Musial, Katarzyna and Gabrys, Bogdan},
24 | doi = {10.1109/TPAMI.2021.3054824},
25 | journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI)},
26 | year = {2021},
27 | note = {\mbox{doi}:\url{10.1109/TPAMI.2021.3054824}}
28 | }
29 | ```
30 |
--------------------------------------------------------------------------------
/exps/experimental/test-dks.py:
--------------------------------------------------------------------------------
1 | from dks.base.activation_getter import (
2 | get_activation_function as _get_numpy_activation_function,
3 | )
4 | from dks.base.activation_transform import _get_activations_params
5 |
6 |
7 | def subnet_max_func(x, r_fn):
8 | depth = 7
9 | res_x = r_fn(x)
10 | x = r_fn(x)
11 | for _ in range(depth):
12 | x = r_fn(r_fn(x)) + x
13 | return max(x, res_x)
14 |
15 |
16 | def subnet_max_func_v2(x, r_fn):
17 | depth = 2
18 | res_x = r_fn(x)
19 |
20 | x = r_fn(x)
21 | for _ in range(depth):
22 | x = 0.8 * r_fn(r_fn(x)) + 0.2 * x
23 |
24 | return max(x, res_x)
25 |
26 |
27 | def get_transformed_activations(
28 | activation_names,
29 | method="TAT",
30 | dks_params=None,
31 | tat_params=None,
32 | max_slope_func=None,
33 | max_curv_func=None,
34 | subnet_max_func=None,
35 | activation_getter=_get_numpy_activation_function,
36 | ):
37 | params = _get_activations_params(
38 | activation_names,
39 | method=method,
40 | dks_params=dks_params,
41 | tat_params=tat_params,
42 | max_slope_func=max_slope_func,
43 | max_curv_func=max_curv_func,
44 | subnet_max_func=subnet_max_func,
45 | )
46 | return params
47 |
48 |
49 | params = get_transformed_activations(
50 | ["swish"], method="TAT", subnet_max_func=subnet_max_func
51 | )
52 | print(params)
53 |
54 | params = get_transformed_activations(
55 | ["leaky_relu"], method="TAT", subnet_max_func=subnet_max_func_v2
56 | )
57 | print(params)
58 |
--------------------------------------------------------------------------------
/exps/experimental/test-dynamic.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.08 #
3 | #####################################################
4 | # python test-dynamic.py
5 | #####################################################
6 | import sys
7 | from pathlib import Path
8 |
9 | lib_dir = (Path(__file__).parent / ".." / "..").resolve()
10 | print("LIB-DIR: {:}".format(lib_dir))
11 | if str(lib_dir) not in sys.path:
12 | sys.path.insert(0, str(lib_dir))
13 |
14 | from xautodl.datasets.math_core import ConstantFunc
15 | from xautodl.datasets.math_core import GaussianDGenerator
16 |
17 | mean_generator = ConstantFunc(0)
18 | cov_generator = ConstantFunc(1)
19 |
20 | generator = GaussianDGenerator([mean_generator], [[cov_generator]], (-1, 1))
21 | generator(0, 10)
22 |
--------------------------------------------------------------------------------
/exps/experimental/test-flops.py:
--------------------------------------------------------------------------------
1 | import sys, time, random, argparse
2 | from copy import deepcopy
3 | import torchvision.models as models
4 | from pathlib import Path
5 |
6 | lib_dir = (Path(__file__).parent / ".." / ".." / "lib").resolve()
7 | if str(lib_dir) not in sys.path:
8 | sys.path.insert(0, str(lib_dir))
9 |
10 | from utils import get_model_infos
11 |
12 | # from models.ImageNet_MobileNetV2 import MobileNetV2
13 | from torchvision.models.mobilenet import MobileNetV2
14 |
15 |
16 | def main(width_mult):
17 | # model = MobileNetV2(1001, width_mult, 32, 1280, 'InvertedResidual', 0.2)
18 | model = MobileNetV2(width_mult=width_mult)
19 | print(model)
20 | flops, params = get_model_infos(model, (2, 3, 224, 224))
21 | print("FLOPs : {:}".format(flops))
22 | print("Params : {:}".format(params))
23 | print("-" * 50)
24 |
25 |
26 | if __name__ == "__main__":
27 | main(1.0)
28 | main(1.4)
29 |
--------------------------------------------------------------------------------
/exps/experimental/test-resnest.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020.06 #
3 | #####################################################
4 | # python exps/experimental/test-resnest.py
5 | #####################################################
6 | import sys, time, torch, random, argparse
7 | from PIL import ImageFile
8 |
9 | ImageFile.LOAD_TRUNCATED_IMAGES = True
10 | from copy import deepcopy
11 | from pathlib import Path
12 |
13 | lib_dir = (Path(__file__).parent / ".." / ".." / "lib").resolve()
14 | if str(lib_dir) not in sys.path:
15 | sys.path.insert(0, str(lib_dir))
16 | from utils import get_model_infos
17 |
18 | torch.hub.list("zhanghang1989/ResNeSt", force_reload=True)
19 |
20 | for model_name, xshape in [
21 | ("resnest50", (1, 3, 224, 224)),
22 | ("resnest101", (1, 3, 256, 256)),
23 | ("resnest200", (1, 3, 320, 320)),
24 | ("resnest269", (1, 3, 416, 416)),
25 | ]:
26 | # net = torch.hub.load('zhanghang1989/ResNeSt', model_name, pretrained=True)
27 | net = torch.hub.load("zhanghang1989/ResNeSt", model_name, pretrained=False)
28 | print("Model : {:}, input shape : {:}".format(model_name, xshape))
29 | flops, param = get_model_infos(net, xshape)
30 | print("flops : {:.3f}M".format(flops))
31 | print("params : {:.3f}M".format(param))
32 |
--------------------------------------------------------------------------------
/exps/experimental/test-ww.py:
--------------------------------------------------------------------------------
1 | import sys, time, random, argparse
2 | from copy import deepcopy
3 | import torchvision.models as models
4 | from pathlib import Path
5 |
6 | from xautodl.utils import weight_watcher
7 |
8 |
9 | def main():
10 | # model = models.vgg19_bn(pretrained=True)
11 | # _, summary = weight_watcher.analyze(model, alphas=False)
12 | # for key, value in summary.items():
13 | # print('{:10s} : {:}'.format(key, value))
14 |
15 | _, summary = weight_watcher.analyze(models.vgg13(pretrained=True), alphas=False)
16 | print("vgg-13 : {:}".format(summary["lognorm"]))
17 | _, summary = weight_watcher.analyze(models.vgg13_bn(pretrained=True), alphas=False)
18 | print("vgg-13-BN : {:}".format(summary["lognorm"]))
19 | _, summary = weight_watcher.analyze(models.vgg16(pretrained=True), alphas=False)
20 | print("vgg-16 : {:}".format(summary["lognorm"]))
21 | _, summary = weight_watcher.analyze(models.vgg16_bn(pretrained=True), alphas=False)
22 | print("vgg-16-BN : {:}".format(summary["lognorm"]))
23 | _, summary = weight_watcher.analyze(models.vgg19(pretrained=True), alphas=False)
24 | print("vgg-19 : {:}".format(summary["lognorm"]))
25 | _, summary = weight_watcher.analyze(models.vgg19_bn(pretrained=True), alphas=False)
26 | print("vgg-19-BN : {:}".format(summary["lognorm"]))
27 |
28 |
29 | if __name__ == "__main__":
30 | main()
31 |
--------------------------------------------------------------------------------
/notebooks/NATS-Bench/issue-97.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [
8 | {
9 | "name": "stdout",
10 | "output_type": "stream",
11 | "text": [
12 | "[2021-03-09 08:44:19] Try to use the default NATS-Bench (size) path from fast_mode=True and path=None.\n"
13 | ]
14 | }
15 | ],
16 | "source": [
17 | "from nats_bench import create\n",
18 | "import numpy as np\n",
19 | "\n",
20 | "# Create the API for size search space\n",
21 | "api = create(None, 'sss', fast_mode=True, verbose=False)"
22 | ]
23 | },
24 | {
25 | "cell_type": "code",
26 | "execution_count": 2,
27 | "metadata": {},
28 | "outputs": [
29 | {
30 | "name": "stdout",
31 | "output_type": "stream",
32 | "text": [
33 | "There are 32768 architectures on the size search space\n"
34 | ]
35 | }
36 | ],
37 | "source": [
38 | "print('There are {:} architectures on the size search space'.format(len(api)))\n",
39 | "\n",
40 | "c2acc = dict()\n",
41 | "for index in range(len(api)):\n",
42 | " info = api.get_more_info(index, 'cifar10', hp='90')\n",
43 | " config = api.get_net_config(index, 'cifar10')\n",
44 | " c2acc[config['channels']] = info['test-accuracy']"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": 4,
50 | "metadata": {},
51 | "outputs": [
52 | {
53 | "name": "stdout",
54 | "output_type": "stream",
55 | "text": [
56 | "91.08546417236329\n"
57 | ]
58 | }
59 | ],
60 | "source": [
61 | "print(np.mean(list(c2acc.values())))"
62 | ]
63 | }
64 | ],
65 | "metadata": {
66 | "kernelspec": {
67 | "display_name": "Python 3",
68 | "language": "python",
69 | "name": "python3"
70 | },
71 | "language_info": {
72 | "codemirror_mode": {
73 | "name": "ipython",
74 | "version": 3
75 | },
76 | "file_extension": ".py",
77 | "mimetype": "text/x-python",
78 | "name": "python",
79 | "nbconvert_exporter": "python",
80 | "pygments_lexer": "ipython3",
81 | "version": "3.8.3"
82 | }
83 | },
84 | "nbformat": 4,
85 | "nbformat_minor": 4
86 | }
87 |
--------------------------------------------------------------------------------
/scripts-search/NAS-Bench-201-algos/BOHB.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts-search/NAS-Bench-201-algos/BOHB.sh -1
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 2 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 2 parameters for dataset and seed"
8 | exit 1
9 | fi
10 | if [ "$TORCH_HOME" = "" ]; then
11 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
12 | exit 1
13 | else
14 | echo "TORCH_HOME : $TORCH_HOME"
15 | fi
16 |
17 | dataset=$1
18 | seed=$2
19 | channel=16
20 | num_cells=5
21 | max_nodes=4
22 | space=nas-bench-201
23 | #benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_0-e61699.pth
24 | benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_1-096897.pth
25 |
26 | save_dir=./output/search-cell-${space}/BOHB-${dataset}
27 |
28 | OMP_NUM_THREADS=4 python ./exps/NAS-Bench-201-algos/BOHB.py \
29 | --save_dir ${save_dir} --max_nodes ${max_nodes} --channel ${channel} --num_cells ${num_cells} \
30 | --dataset ${dataset} \
31 | --search_space_name ${space} \
32 | --arch_nas_dataset ${benchmark_file} \
33 | --time_budget 12000 \
34 | --n_iters 50 --num_samples 4 --random_fraction 0.0 --bandwidth_factor 3 \
35 | --workers 4 --print_freq 200 --rand_seed ${seed}
36 |
--------------------------------------------------------------------------------
/scripts-search/NAS-Bench-201-algos/DARTS-V1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts-search/NAS-Bench-201-algos/DARTS-V1.sh cifar10 0 -1
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 3 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 3 parameters for dataset, tracking_status, and seed"
8 | exit 1
9 | fi
10 | if [ "$TORCH_HOME" = "" ]; then
11 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
12 | exit 1
13 | else
14 | echo "TORCH_HOME : $TORCH_HOME"
15 | fi
16 |
17 | dataset=$1
18 | BN=$2
19 | seed=$3
20 | channel=16
21 | num_cells=5
22 | max_nodes=4
23 | space=nas-bench-201
24 |
25 | if [ "$dataset" == "cifar10" ] || [ "$dataset" == "cifar100" ]; then
26 | data_path="$TORCH_HOME/cifar.python"
27 | else
28 | data_path="$TORCH_HOME/cifar.python/ImageNet16"
29 | fi
30 | #benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_0-e61699.pth
31 | benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_1-096897.pth
32 |
33 | save_dir=./output/search-cell-${space}/DARTS-V1-${dataset}-BN${BN}
34 |
35 | OMP_NUM_THREADS=4 python ./exps/NAS-Bench-201-algos/DARTS-V1.py \
36 | --save_dir ${save_dir} --max_nodes ${max_nodes} --channel ${channel} --num_cells ${num_cells} \
37 | --dataset ${dataset} --data_path ${data_path} \
38 | --search_space_name ${space} \
39 | --config_path configs/nas-benchmark/algos/DARTS.config \
40 | --arch_nas_dataset ${benchmark_file} \
41 | --track_running_stats ${BN} \
42 | --arch_learning_rate 0.0003 --arch_weight_decay 0.001 \
43 | --workers 4 --print_freq 200 --rand_seed ${seed}
44 |
--------------------------------------------------------------------------------
/scripts-search/NAS-Bench-201-algos/DARTS-V2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts-search/NAS-Bench-201-algos/DARTS-V2.sh cifar10 0 -1
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 3 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 3 parameters for dataset, tracking_status, and seed"
8 | exit 1
9 | fi
10 | if [ "$TORCH_HOME" = "" ]; then
11 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
12 | exit 1
13 | else
14 | echo "TORCH_HOME : $TORCH_HOME"
15 | fi
16 |
17 | dataset=$1
18 | BN=$2
19 | seed=$3
20 | channel=16
21 | num_cells=5
22 | max_nodes=4
23 | space=nas-bench-201
24 |
25 | if [ "$dataset" == "cifar10" ] || [ "$dataset" == "cifar100" ]; then
26 | data_path="$TORCH_HOME/cifar.python"
27 | else
28 | data_path="$TORCH_HOME/cifar.python/ImageNet16"
29 | fi
30 | #benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_0-e61699.pth
31 | benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_1-096897.pth
32 |
33 | save_dir=./output/search-cell-${space}/DARTS-V2-${dataset}-BN${BN}
34 |
35 | OMP_NUM_THREADS=4 python ./exps/NAS-Bench-201-algos/DARTS-V2.py \
36 | --save_dir ${save_dir} --max_nodes ${max_nodes} --channel ${channel} --num_cells ${num_cells} \
37 | --dataset ${dataset} --data_path ${data_path} \
38 | --search_space_name ${space} \
39 | --config_path configs/nas-benchmark/algos/DARTS.config \
40 | --arch_nas_dataset ${benchmark_file} \
41 | --track_running_stats ${BN} \
42 | --arch_learning_rate 0.0003 --arch_weight_decay 0.001 \
43 | --workers 4 --print_freq 200 --rand_seed ${seed}
44 |
--------------------------------------------------------------------------------
/scripts-search/NAS-Bench-201-algos/ENAS.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Efficient Neural Architecture Search via Parameter Sharing, ICML 2018
3 | # bash ./scripts-search/NAS-Bench-201-algos/ENAS.sh cifar10 0 -1
4 | echo script name: $0
5 | echo $# arguments
6 | if [ "$#" -ne 3 ] ;then
7 | echo "Input illegal number of parameters " $#
8 | echo "Need 3 parameters for dataset, BN-tracking-status, and seed"
9 | exit 1
10 | fi
11 | if [ "$TORCH_HOME" = "" ]; then
12 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
13 | exit 1
14 | else
15 | echo "TORCH_HOME : $TORCH_HOME"
16 | fi
17 |
18 | dataset=$1
19 | BN=$2
20 | seed=$3
21 | channel=16
22 | num_cells=5
23 | max_nodes=4
24 | space=nas-bench-201
25 |
26 | if [ "$dataset" == "cifar10" ] || [ "$dataset" == "cifar100" ]; then
27 | data_path="$TORCH_HOME/cifar.python"
28 | else
29 | data_path="$TORCH_HOME/cifar.python/ImageNet16"
30 | fi
31 | #benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_0-e61699.pth
32 | benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_1-096897.pth
33 |
34 | save_dir=./output/search-cell-${space}/ENAS-${dataset}-BN${BN}
35 |
36 | OMP_NUM_THREADS=4 python ./exps/NAS-Bench-201-algos/ENAS.py \
37 | --save_dir ${save_dir} --max_nodes ${max_nodes} --channel ${channel} --num_cells ${num_cells} \
38 | --dataset ${dataset} --data_path ${data_path} \
39 | --search_space_name ${space} \
40 | --arch_nas_dataset ${benchmark_file} \
41 | --track_running_stats ${BN} \
42 | --config_path ./configs/nas-benchmark/algos/ENAS.config \
43 | --controller_entropy_weight 0.0001 \
44 | --controller_bl_dec 0.99 \
45 | --controller_train_steps 50 \
46 | --controller_num_aggregate 20 \
47 | --controller_num_samples 100 \
48 | --workers 4 --print_freq 200 --rand_seed ${seed}
49 |
--------------------------------------------------------------------------------
/scripts-search/NAS-Bench-201-algos/GDAS.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts-search/NAS-Bench-201-algos/GDAS.sh cifar10 0 -1
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 3 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 3 parameters for dataset, BN-tracking, and seed"
8 | exit 1
9 | fi
10 | if [ "$TORCH_HOME" = "" ]; then
11 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
12 | exit 1
13 | else
14 | echo "TORCH_HOME : $TORCH_HOME"
15 | fi
16 |
17 | dataset=$1
18 | BN=$2
19 | seed=$3
20 | channel=16
21 | num_cells=5
22 | max_nodes=4
23 | space=nas-bench-201
24 |
25 | if [ "$dataset" == "cifar10" ] || [ "$dataset" == "cifar100" ]; then
26 | data_path="$TORCH_HOME/cifar.python"
27 | else
28 | data_path="$TORCH_HOME/cifar.python/ImageNet16"
29 | fi
30 | #benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_0-e61699.pth
31 | benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_1-096897.pth
32 |
33 | save_dir=./output/search-cell-${space}/GDAS-${dataset}-BN${BN}
34 |
35 | OMP_NUM_THREADS=4 python ./exps/NAS-Bench-201-algos/GDAS.py \
36 | --save_dir ${save_dir} --max_nodes ${max_nodes} --channel ${channel} --num_cells ${num_cells} \
37 | --dataset ${dataset} --data_path ${data_path} \
38 | --search_space_name ${space} \
39 | --arch_nas_dataset ${benchmark_file} \
40 | --config_path configs/nas-benchmark/algos/GDAS.config \
41 | --tau_max 10 --tau_min 0.1 --track_running_stats ${BN} \
42 | --arch_learning_rate 0.0003 --arch_weight_decay 0.001 \
43 | --workers 4 --print_freq 200 --rand_seed ${seed}
44 |
--------------------------------------------------------------------------------
/scripts-search/NAS-Bench-201-algos/R-EA.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Regularized Evolution for Image Classifier Architecture Search, AAAI 2019
3 | # bash ./scripts-search/NAS-Bench-201-algos/R-EA.sh cifar10 3 -1
4 | echo script name: $0
5 | echo $# arguments
6 | if [ "$#" -ne 3 ] ;then
7 | echo "Input illegal number of parameters " $#
8 | echo "Need 3 parameters for the-dataset-name, the-ea-sample-size and the-seed"
9 | exit 1
10 | fi
11 | if [ "$TORCH_HOME" = "" ]; then
12 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
13 | exit 1
14 | else
15 | echo "TORCH_HOME : $TORCH_HOME"
16 | fi
17 |
18 | #dataset=cifar10
19 | dataset=$1
20 | sample_size=$2
21 | seed=$3
22 | channel=16
23 | num_cells=5
24 | max_nodes=4
25 | space=nas-bench-201
26 | #benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_0-e61699.pth
27 | benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_1-096897.pth
28 |
29 | save_dir=./output/search-cell-${space}/R-EA-${dataset}-SS${sample_size}
30 |
31 | OMP_NUM_THREADS=4 python ./exps/NAS-Bench-201-algos/R_EA.py \
32 | --save_dir ${save_dir} --max_nodes ${max_nodes} --channel ${channel} --num_cells ${num_cells} \
33 | --dataset ${dataset} \
34 | --search_space_name ${space} \
35 | --arch_nas_dataset ${benchmark_file} \
36 | --time_budget 12000 \
37 | --ea_cycles 200 --ea_population 10 --ea_sample_size ${sample_size} --ea_fast_by_api 1 \
38 | --workers 4 --print_freq 200 --rand_seed ${seed}
39 |
--------------------------------------------------------------------------------
/scripts-search/NAS-Bench-201-algos/RANDOM-NAS.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Random Search and Reproducibility for Neural Architecture Search, UAI 2019
3 | # bash ./scripts-search/NAS-Bench-201-algos/RANDOM-NAS.sh cifar10 0 -1
4 | echo script name: $0
5 | echo $# arguments
6 | if [ "$#" -ne 3 ] ;then
7 | echo "Input illegal number of parameters " $#
8 | echo "Need 3 parameters for dataset, BN-tracking-status, and seed"
9 | exit 1
10 | fi
11 | if [ "$TORCH_HOME" = "" ]; then
12 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
13 | exit 1
14 | else
15 | echo "TORCH_HOME : $TORCH_HOME"
16 | fi
17 |
18 | dataset=$1
19 | BN=$2
20 | seed=$3
21 | channel=16
22 | num_cells=5
23 | max_nodes=4
24 | space=nas-bench-201
25 |
26 | if [ "$dataset" == "cifar10" ] || [ "$dataset" == "cifar100" ]; then
27 | data_path="$TORCH_HOME/cifar.python"
28 | else
29 | data_path="$TORCH_HOME/cifar.python/ImageNet16"
30 | fi
31 | #benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_0-e61699.pth
32 | benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_1-096897.pth
33 |
34 | save_dir=./output/search-cell-${space}/RANDOM-NAS-${dataset}-BN${BN}
35 |
36 | OMP_NUM_THREADS=4 python ./exps/NAS-Bench-201-algos/RANDOM-NAS.py \
37 | --save_dir ${save_dir} --max_nodes ${max_nodes} --channel ${channel} --num_cells ${num_cells} \
38 | --dataset ${dataset} --data_path ${data_path} \
39 | --search_space_name ${space} \
40 | --track_running_stats ${BN} \
41 | --arch_nas_dataset ${benchmark_file} \
42 | --config_path ./configs/nas-benchmark/algos/RANDOM.config \
43 | --select_num 100 \
44 | --workers 4 --print_freq 200 --rand_seed ${seed}
45 |
--------------------------------------------------------------------------------
/scripts-search/NAS-Bench-201-algos/README.md:
--------------------------------------------------------------------------------
1 | # 10 NAS algorithms in NAS-Bench-201
2 |
3 | Each script in this folder corresponds to one NAS algorithm, you can simple run it by one command.
4 |
--------------------------------------------------------------------------------
/scripts-search/NAS-Bench-201-algos/REINFORCE.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts-search/NAS-Bench-201-algos/REINFORCE.sh 0.001 -1
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 3 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 3 parameters for dataset, LR, and seed"
8 | exit 1
9 | fi
10 | if [ "$TORCH_HOME" = "" ]; then
11 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
12 | exit 1
13 | else
14 | echo "TORCH_HOME : $TORCH_HOME"
15 | fi
16 |
17 | dataset=$1
18 | LR=$2
19 | seed=$3
20 | channel=16
21 | num_cells=5
22 | max_nodes=4
23 | space=nas-bench-201
24 | #benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_0-e61699.pth
25 | benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_1-096897.pth
26 |
27 | save_dir=./output/search-cell-${space}/REINFORCE-${dataset}-${LR}
28 |
29 | OMP_NUM_THREADS=4 python ./exps/NAS-Bench-201-algos/reinforce.py \
30 | --save_dir ${save_dir} --max_nodes ${max_nodes} --channel ${channel} --num_cells ${num_cells} \
31 | --dataset ${dataset} \
32 | --search_space_name ${space} \
33 | --arch_nas_dataset ${benchmark_file} \
34 | --time_budget 12000 \
35 | --learning_rate ${LR} --EMA_momentum 0.9 \
36 | --workers 4 --print_freq 200 --rand_seed ${seed}
37 |
--------------------------------------------------------------------------------
/scripts-search/NAS-Bench-201-algos/Random.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts-search/NAS-Bench-201-algos/Random.sh -1
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 2 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 2 parameters for dataset and seed"
8 | exit 1
9 | fi
10 | if [ "$TORCH_HOME" = "" ]; then
11 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
12 | exit 1
13 | else
14 | echo "TORCH_HOME : $TORCH_HOME"
15 | fi
16 |
17 | dataset=$1
18 | seed=$2
19 | channel=16
20 | num_cells=5
21 | max_nodes=4
22 | space=nas-bench-201
23 | #benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_0-e61699.pth
24 | benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_1-096897.pth
25 |
26 | save_dir=./output/search-cell-${space}/RAND-${dataset}
27 |
28 | OMP_NUM_THREADS=4 python ./exps/NAS-Bench-201-algos/RANDOM.py \
29 | --save_dir ${save_dir} --max_nodes ${max_nodes} --channel ${channel} --num_cells ${num_cells} \
30 | --dataset ${dataset} \
31 | --search_space_name ${space} \
32 | --arch_nas_dataset ${benchmark_file} \
33 | --time_budget 12000 \
34 | --workers 4 --print_freq 200 --rand_seed ${seed}
35 |
--------------------------------------------------------------------------------
/scripts-search/NAS-Bench-201-algos/SETN.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # One-Shot Neural Architecture Search via Self-Evaluated Template Network, ICCV 2019
3 | # bash ./scripts-search/NAS-Bench-201-algos/SETN.sh cifar10 0 -1
4 | echo script name: $0
5 | echo $# arguments
6 | if [ "$#" -ne 3 ] ;then
7 | echo "Input illegal number of parameters " $#
8 | echo "Need 3 parameters for dataset, BN-tracking-status, and seed"
9 | exit 1
10 | fi
11 | if [ "$TORCH_HOME" = "" ]; then
12 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
13 | exit 1
14 | else
15 | echo "TORCH_HOME : $TORCH_HOME"
16 | fi
17 |
18 | dataset=$1
19 | BN=$2
20 | seed=$3
21 | channel=16
22 | num_cells=5
23 | max_nodes=4
24 | space=nas-bench-201
25 |
26 | if [ "$dataset" == "cifar10" ] || [ "$dataset" == "cifar100" ]; then
27 | data_path="$TORCH_HOME/cifar.python"
28 | else
29 | data_path="$TORCH_HOME/cifar.python/ImageNet16"
30 | fi
31 | #benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_0-e61699.pth
32 | benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_1-096897.pth
33 |
34 | save_dir=./output/search-cell-${space}/SETN-${dataset}-BN${BN}
35 |
36 | OMP_NUM_THREADS=4 python ./exps/NAS-Bench-201-algos/SETN.py \
37 | --save_dir ${save_dir} --max_nodes ${max_nodes} --channel ${channel} --num_cells ${num_cells} \
38 | --dataset ${dataset} --data_path ${data_path} \
39 | --search_space_name ${space} \
40 | --arch_nas_dataset ${benchmark_file} \
41 | --config_path configs/nas-benchmark/algos/SETN.config \
42 | --track_running_stats ${BN} \
43 | --arch_learning_rate 0.0003 --arch_weight_decay 0.001 \
44 | --select_num 100 \
45 | --workers 4 --print_freq 200 --rand_seed ${seed}
46 |
--------------------------------------------------------------------------------
/scripts-search/NAS-Bench-201-algos/hps/DARTS-test-Gradient.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts-search/NAS-Bench-201-algos/DARTS-test-Gradient.sh cifar10 0 5
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 3 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 3 parameters for dataset, tracking_status, and gradient_clip"
8 | exit 1
9 | fi
10 | if [ "$TORCH_HOME" = "" ]; then
11 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
12 | exit 1
13 | else
14 | echo "TORCH_HOME : $TORCH_HOME"
15 | fi
16 |
17 | dataset=$1
18 | BN=$2
19 | gradient_clip=$3
20 | seed=-1
21 | channel=16
22 | num_cells=5
23 | max_nodes=4
24 | space=nas-bench-201
25 |
26 | if [ "$dataset" == "cifar10" ] || [ "$dataset" == "cifar100" ]; then
27 | data_path="$TORCH_HOME/cifar.python"
28 | else
29 | data_path="$TORCH_HOME/cifar.python/ImageNet16"
30 | fi
31 | #benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_0-e61699.pth
32 | benchmark_file=${TORCH_HOME}/NAS-Bench-201-v1_1-096897.pth
33 |
34 | save_dir=./output/search-cell-${space}/DARTS-V1-${dataset}-BN${BN}-Gradient${gradient_clip}
35 |
36 | OMP_NUM_THREADS=4 python ./exps/NAS-Bench-201-algos/DARTS-V1.py \
37 | --save_dir ${save_dir} --max_nodes ${max_nodes} --channel ${channel} --num_cells ${num_cells} \
38 | --dataset ${dataset} --data_path ${data_path} \
39 | --search_space_name ${space} \
40 | --config_path configs/nas-benchmark/algos/DARTS.config \
41 | --arch_nas_dataset ${benchmark_file} \
42 | --track_running_stats ${BN} --gradient_clip ${gradient_clip} \
43 | --arch_learning_rate 0.0003 --arch_weight_decay 0.001 \
44 | --workers 4 --print_freq 200 --rand_seed ${seed}
45 |
--------------------------------------------------------------------------------
/scripts-search/NAS-Bench-201-algos/hps/GRID-RL.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | echo script name: $0
3 |
4 | #lrs="0.01 0.02 0.1 0.2 0.5 1.0 1.5 2.0 2.5 3.0"
5 | lrs="0.01 0.02 0.1 0.2 0.5"
6 |
7 | for lr in ${lrs}
8 | do
9 | bash ./scripts-search/NAS-Bench-201-algos/REINFORCE.sh ${lr} -1
10 | done
11 |
--------------------------------------------------------------------------------
/scripts-search/NAS-Bench-201/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash scripts-search/NAS-Bench-201/build.sh
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 0 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 0 parameters"
8 | exit 1
9 | fi
10 |
11 | save_dir=./output/nas_bench_201_package
12 | echo "Prepare to build the package in ${save_dir}"
13 | rm -rf ${save_dir}
14 | mkdir -p ${save_dir}
15 |
16 | #cp NAS-Bench-201.md ${save_dir}/README.md
17 | sed '125,187d' NAS-Bench-201.md > ${save_dir}/README.md
18 | cp LICENSE.md ${save_dir}/LICENSE.md
19 | cp -r lib/nas_201_api ${save_dir}/
20 | rm -rf ${save_dir}/nas_201_api/__pycache__
21 | cp exps/NAS-Bench-201/dist-setup.py ${save_dir}/setup.py
22 |
23 | cd ${save_dir}
24 | # python setup.py sdist bdist_wheel
25 | # twine upload --repository-url https://test.pypi.org/legacy/ dist/*
26 | # twine upload dist/*
27 |
--------------------------------------------------------------------------------
/scripts-search/NAS-Bench-201/meta-gen.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash scripts-search/NAS-Bench-201/meta-gen.sh NAS-BENCH-201 4
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 2 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 2 parameters for save-dir-name and maximum-node-in-cell"
8 | exit 1
9 | fi
10 |
11 | name=$1
12 | node=$2
13 |
14 | save_dir=./output/${name}-${node}
15 |
16 | python ./exps/NAS-Bench-201/main.py --mode meta --save_dir ${save_dir} --max_node ${node}
17 |
--------------------------------------------------------------------------------
/scripts-search/NAS-Bench-201/train-a-net.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts-search/NAS-Bench-201/train-a-net.sh resnet 16 5
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 3 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 3 parameters for network, channel, num-of-cells"
8 | exit 1
9 | fi
10 | if [ "$TORCH_HOME" = "" ]; then
11 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
12 | exit 1
13 | else
14 | echo "TORCH_HOME : $TORCH_HOME"
15 | fi
16 |
17 | model=$1
18 | channel=$2
19 | num_cells=$3
20 |
21 | save_dir=./output/NAS-BENCH-201-4/
22 |
23 | OMP_NUM_THREADS=4 python ./exps/NAS-Bench-201/main.py \
24 | --mode specific-${model} --save_dir ${save_dir} --max_node 4 \
25 | --datasets cifar10 cifar10 cifar100 ImageNet16-120 \
26 | --use_less 0 \
27 | --splits 1 0 0 0 \
28 | --xpaths $TORCH_HOME/cifar.python \
29 | $TORCH_HOME/cifar.python \
30 | $TORCH_HOME/cifar.python \
31 | $TORCH_HOME/cifar.python/ImageNet16 \
32 | --channel ${channel} --num_cells ${num_cells} \
33 | --workers 4 \
34 | --seeds 777 888 999
35 |
--------------------------------------------------------------------------------
/scripts-search/NAS-Bench-201/train-models.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts-search/train-models.sh 0/1 0 100 -1 '777 888 999'
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 5 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 5 parameters for use-less-or-not, start-and-end, arch-index, and seeds"
8 | exit 1
9 | fi
10 | if [ "$TORCH_HOME" = "" ]; then
11 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
12 | exit 1
13 | else
14 | echo "TORCH_HOME : $TORCH_HOME"
15 | fi
16 |
17 | use_less=$1
18 | xstart=$2
19 | xend=$3
20 | arch_index=$4
21 | all_seeds=$5
22 |
23 | save_dir=./output/NAS-BENCH-201-4/
24 |
25 | if [ ${arch_index} == "-1" ]; then
26 | mode=new
27 | else
28 | mode=cover
29 | fi
30 |
31 | OMP_NUM_THREADS=4 python ./exps/NAS-Bench-201/main.py \
32 | --mode ${mode} --save_dir ${save_dir} --max_node 4 \
33 | --use_less ${use_less} \
34 | --datasets cifar10 cifar10 cifar100 ImageNet16-120 \
35 | --splits 1 0 0 0 \
36 | --xpaths $TORCH_HOME/cifar.python \
37 | $TORCH_HOME/cifar.python \
38 | $TORCH_HOME/cifar.python \
39 | $TORCH_HOME/cifar.python/ImageNet16 \
40 | --channel 16 --num_cells 5 \
41 | --workers 4 \
42 | --srange ${xstart} ${xend} --arch_index ${arch_index} \
43 | --seeds ${all_seeds}
44 |
--------------------------------------------------------------------------------
/scripts-search/NASNet-space-search-by-DARTS1V.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts-search/NASNet-space-search-by-DARTS1V.sh cifar10 -1
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 2 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 2 parameters for dataset, and seed"
8 | exit 1
9 | fi
10 | if [ "$TORCH_HOME" = "" ]; then
11 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
12 | exit 1
13 | else
14 | echo "TORCH_HOME : $TORCH_HOME"
15 | fi
16 |
17 | dataset=$1
18 | BN=1
19 | seed=$2
20 | channel=16
21 | num_cells=5
22 | max_nodes=4
23 | space=darts
24 |
25 | if [ "$dataset" == "cifar10" ] || [ "$dataset" == "cifar100" ]; then
26 | data_path="$TORCH_HOME/cifar.python"
27 | else
28 | data_path="$TORCH_HOME/cifar.python/ImageNet16"
29 | fi
30 |
31 | save_dir=./output/search-cell-${space}/DARTS-V1-${dataset}-BN${BN}
32 |
33 | OMP_NUM_THREADS=4 python ./exps/NAS-Bench-201-algos/DARTS-V1.py \
34 | --save_dir ${save_dir} --max_nodes ${max_nodes} --channel ${channel} --num_cells ${num_cells} \
35 | --dataset ${dataset} --data_path ${data_path} \
36 | --search_space_name ${space} \
37 | --config_path configs/search-opts/DARTS-NASNet-CIFAR.config \
38 | --model_config configs/search-archs/DARTS-NASNet-CIFAR.config \
39 | --track_running_stats ${BN} \
40 | --arch_learning_rate 0.0003 --arch_weight_decay 0.001 \
41 | --workers 4 --print_freq 200 --rand_seed ${seed}
42 |
--------------------------------------------------------------------------------
/scripts-search/NASNet-space-search-by-GDAS-FRC.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts-search/NASNet-space-search-by-GDAS-FRC.sh cifar10 0 -1
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 3 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 3 parameters for dataset, track_running_stats, and seed"
8 | exit 1
9 | fi
10 | if [ "$TORCH_HOME" = "" ]; then
11 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
12 | exit 1
13 | else
14 | echo "TORCH_HOME : $TORCH_HOME"
15 | fi
16 |
17 | dataset=$1
18 | track_running_stats=$2
19 | seed=$3
20 | space=darts
21 |
22 | if [ "$dataset" == "cifar10" ] || [ "$dataset" == "cifar100" ]; then
23 | data_path="$TORCH_HOME/cifar.python"
24 | else
25 | data_path="$TORCH_HOME/cifar.python/ImageNet16"
26 | fi
27 |
28 | save_dir=./output/search-cell-${space}/GDAS-FRC-${dataset}-BN${track_running_stats}
29 |
30 | OMP_NUM_THREADS=4 python ./exps/algos/GDAS.py \
31 | --save_dir ${save_dir} \
32 | --dataset ${dataset} --data_path ${data_path} \
33 | --search_space_name ${space} \
34 | --config_path configs/search-opts/GDAS-NASNet-CIFAR.config \
35 | --model_config configs/search-archs/GDASFRC-NASNet-CIFAR.config \
36 | --tau_max 10 --tau_min 0.1 --track_running_stats ${track_running_stats} \
37 | --arch_learning_rate 0.0003 --arch_weight_decay 0.001 \
38 | --workers 4 --print_freq 200 --rand_seed ${seed}
39 |
--------------------------------------------------------------------------------
/scripts-search/NASNet-space-search-by-GDAS.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts-search/NASNet-space-search-by-GDAS.sh cifar10 1 -1
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 3 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 3 parameters for dataset, track_running_stats, and seed"
8 | exit 1
9 | fi
10 | if [ "$TORCH_HOME" = "" ]; then
11 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
12 | exit 1
13 | else
14 | echo "TORCH_HOME : $TORCH_HOME"
15 | fi
16 |
17 | dataset=$1
18 | track_running_stats=$2
19 | seed=$3
20 | space=darts
21 |
22 | if [ "$dataset" == "cifar10" ] || [ "$dataset" == "cifar100" ]; then
23 | data_path="$TORCH_HOME/cifar.python"
24 | else
25 | data_path="$TORCH_HOME/cifar.python/ImageNet16"
26 | fi
27 |
28 | save_dir=./output/search-cell-${space}/GDAS-${dataset}-BN${track_running_stats}
29 |
30 | OMP_NUM_THREADS=4 python ./exps/NAS-Bench-201-algos/GDAS.py \
31 | --save_dir ${save_dir} \
32 | --dataset ${dataset} --data_path ${data_path} \
33 | --search_space_name ${space} \
34 | --config_path configs/search-opts/GDAS-NASNet-CIFAR.config \
35 | --model_config configs/search-archs/GDAS-NASNet-CIFAR.config \
36 | --tau_max 10 --tau_min 0.1 --track_running_stats ${track_running_stats} \
37 | --arch_learning_rate 0.0003 --arch_weight_decay 0.001 \
38 | --workers 4 --print_freq 200 --rand_seed ${seed}
39 |
--------------------------------------------------------------------------------
/scripts-search/NASNet-space-search-by-SETN.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts-search/NASNet-space-search-by-SETN.sh cifar10 1 -1
3 | # [TO BE DONE]
4 | echo script name: $0
5 | echo $# arguments
6 | if [ "$#" -ne 3 ] ;then
7 | echo "Input illegal number of parameters " $#
8 | echo "Need 3 parameters for dataset, track_running_stats, and seed"
9 | exit 1
10 | fi
11 | if [ "$TORCH_HOME" = "" ]; then
12 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
13 | exit 1
14 | else
15 | echo "TORCH_HOME : $TORCH_HOME"
16 | fi
17 |
18 | dataset=$1
19 | track_running_stats=$2
20 | seed=$3
21 | space=darts
22 |
23 | if [ "$dataset" == "cifar10" ] || [ "$dataset" == "cifar100" ]; then
24 | data_path="$TORCH_HOME/cifar.python"
25 | else
26 | data_path="$TORCH_HOME/cifar.python/ImageNet16"
27 | fi
28 |
29 | save_dir=./output/search-cell-${space}/SETN-${dataset}-BN${track_running_stats}
30 |
31 | OMP_NUM_THREADS=4 python ./exps/NAS-Bench-201-algos/SETN.py \
32 | --save_dir ${save_dir} \
33 | --dataset ${dataset} --data_path ${data_path} \
34 | --search_space_name ${space} \
35 | --config_path configs/search-opts/SETN-NASNet-CIFAR.config \
36 | --model_config configs/search-archs/SETN-NASNet-CIFAR.config \
37 | --track_running_stats ${track_running_stats} \
38 | --select_num 1000 \
39 | --arch_learning_rate 0.0003 --arch_weight_decay 0.001 \
40 | --workers 4 --print_freq 200 --rand_seed ${seed}
41 |
--------------------------------------------------------------------------------
/scripts-search/NATS/search-size.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash scripts-search/NATS/search-size.sh 0 0.3 777
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 3 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 3 parameters for GPU-device, warmup-ratio, and seed"
8 | exit 1
9 | fi
10 | if [ "$TORCH_HOME" = "" ]; then
11 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
12 | exit 1
13 | else
14 | echo "TORCH_HOME : $TORCH_HOME"
15 | fi
16 |
17 | device=$1
18 | ratio=$2
19 | seed=$3
20 |
21 | CUDA_VISIBLE_DEVICES=${device} python ./exps/NATS-algos/search-size.py --dataset cifar10 --data_path $TORCH_HOME/cifar.python --algo tas --warmup_ratio ${ratio} --rand_seed ${seed}
22 | CUDA_VISIBLE_DEVICES=${device} python ./exps/NATS-algos/search-size.py --dataset cifar100 --data_path $TORCH_HOME/cifar.python --algo tas --warmup_ratio ${ratio} --rand_seed ${seed}
23 | CUDA_VISIBLE_DEVICES=${device} python ./exps/NATS-algos/search-size.py --dataset ImageNet16-120 --data_path $TORCH_HOME/cifar.python/ImageNet16 --algo tas --warmup_ratio ${ratio} --rand_seed ${seed}
24 |
25 | #
26 | CUDA_VISIBLE_DEVICES=${device} python ./exps/NATS-algos/search-size.py --dataset cifar10 --data_path $TORCH_HOME/cifar.python --algo mask_gumbel --warmup_ratio ${ratio} --rand_seed ${seed}
27 | CUDA_VISIBLE_DEVICES=${device} python ./exps/NATS-algos/search-size.py --dataset cifar100 --data_path $TORCH_HOME/cifar.python --algo mask_gumbel --warmup_ratio ${ratio} --rand_seed ${seed}
28 | CUDA_VISIBLE_DEVICES=${device} python ./exps/NATS-algos/search-size.py --dataset ImageNet16-120 --data_path $TORCH_HOME/cifar.python/ImageNet16 --algo mask_gumbel --warmup_ratio ${ratio} --rand_seed ${seed}
29 |
30 | #
31 | CUDA_VISIBLE_DEVICES=${device} python ./exps/NATS-algos/search-size.py --dataset cifar10 --data_path $TORCH_HOME/cifar.python --algo mask_rl --arch_weight_decay 0 --warmup_ratio ${ratio} --rand_seed ${seed}
32 | CUDA_VISIBLE_DEVICES=${device} python ./exps/NATS-algos/search-size.py --dataset cifar100 --data_path $TORCH_HOME/cifar.python --algo mask_rl --arch_weight_decay 0 --warmup_ratio ${ratio} --rand_seed ${seed}
33 | CUDA_VISIBLE_DEVICES=${device} python ./exps/NATS-algos/search-size.py --dataset ImageNet16-120 --data_path $TORCH_HOME/cifar.python/ImageNet16 --algo mask_rl --arch_weight_decay 0 --warmup_ratio ${ratio} --rand_seed ${seed}
34 |
--------------------------------------------------------------------------------
/scripts-search/search-depth-gumbel.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts-search/search-depth-gumbel.sh cifar10 ResNet110 CIFARX 0.57 777
3 | set -e
4 | echo script name: $0
5 | echo $# arguments
6 | if [ "$#" -ne 5 ] ;then
7 | echo "Input illegal number of parameters " $#
8 | echo "Need 5 parameters for the dataset and the-model-name and the-optimizer and FLOP-ratio and the-random-seed"
9 | exit 1
10 | fi
11 | if [ "$TORCH_HOME" = "" ]; then
12 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
13 | exit 1
14 | else
15 | echo "TORCH_HOME : $TORCH_HOME"
16 | fi
17 |
18 | dataset=$1
19 | model=$2
20 | optim=$3
21 | expected_FLOP_ratio=$4
22 | rseed=$5
23 |
24 | bash ./scripts-search/search-depth-cifar.sh ${dataset} ${model} ${optim} 0.1 5 ${expected_FLOP_ratio} ${rseed}
25 |
--------------------------------------------------------------------------------
/scripts-search/search-width-gumbel.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts-search/search-width-gumbel.sh cifar10 ResNet110 CIFARX 0.57 777
3 | set -e
4 | echo script name: $0
5 | echo $# arguments
6 | if [ "$#" -ne 5 ] ;then
7 | echo "Input illegal number of parameters " $#
8 | echo "Need 5 parameters for the dataset and the-model-name and the-optimizer and FLOP-ratio and the-random-seed"
9 | exit 1
10 | fi
11 | if [ "$TORCH_HOME" = "" ]; then
12 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
13 | exit 1
14 | else
15 | echo "TORCH_HOME : $TORCH_HOME"
16 | fi
17 |
18 | dataset=$1
19 | model=$2
20 | optim=$3
21 | expected_FLOP_ratio=$4
22 | rseed=$5
23 |
24 | bash ./scripts-search/search-width-cifar.sh ${dataset} ${model} ${optim} 0.1 5 ${expected_FLOP_ratio} ${rseed}
25 |
--------------------------------------------------------------------------------
/scripts/NATS-Bench/train-topology.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##############################################################################
3 | # NATS-Bench: Benchmarking NAS algorithms for Architecture Topology and Size #
4 | ##############################################################################
5 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020.01 #
6 | ##############################################################################
7 | # [saturn1] CUDA_VISIBLE_DEVICES=0 bash scripts/NATS-Bench/train-topology.sh 00000-02000 200 "777 888 999"
8 | # [saturn1] CUDA_VISIBLE_DEVICES=0 bash scripts/NATS-Bench/train-topology.sh 02000-04000 200 "777 888 999"
9 | # [saturn1] CUDA_VISIBLE_DEVICES=1 bash scripts/NATS-Bench/train-topology.sh 04000-06000 200 "777 888 999"
10 | # [saturn1] CUDA_VISIBLE_DEVICES=1 bash scripts/NATS-Bench/train-topology.sh 06000-08000 200 "777 888 999"
11 | #
12 | # CUDA_VISIBLE_DEVICES=0 bash scripts/NATS-Bench/train-topology.sh 00000-05000 12 777
13 | # bash ./scripts/NATS-Bench/train-topology.sh 05001-10000 12 777
14 | # bash ./scripts/NATS-Bench/train-topology.sh 10001-14500 12 777
15 | # bash ./scripts/NATS-Bench/train-topology.sh 14501-15624 12 777
16 | #
17 | ##############################################################################
18 | echo script name: $0
19 | echo $# arguments
20 | if [ "$#" -ne 3 ] ;then
21 | echo "Input illegal number of parameters " $#
22 | echo "Need 3 parameters for start-and-end, hyper-parameters-opt-file, and seeds"
23 | exit 1
24 | fi
25 | if [ "$TORCH_HOME" = "" ]; then
26 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
27 | exit 1
28 | else
29 | echo "TORCH_HOME : $TORCH_HOME"
30 | fi
31 |
32 | srange=$1
33 | opt=$2
34 | all_seeds=$3
35 | cpus=4
36 |
37 | save_dir=./output/NATS-Bench-topology/
38 |
39 | OMP_NUM_THREADS=${cpus} python exps/NATS-Bench/main-tss.py \
40 | --mode new --srange ${srange} --hyper ${opt} --save_dir ${save_dir} \
41 | --datasets cifar10 cifar10 cifar100 ImageNet16-120 \
42 | --splits 1 0 0 0 \
43 | --xpaths $TORCH_HOME/cifar.python \
44 | $TORCH_HOME/cifar.python \
45 | $TORCH_HOME/cifar.python \
46 | $TORCH_HOME/cifar.python/ImageNet16 \
47 | --workers ${cpus} \
48 | --seeds ${all_seeds}
49 |
--------------------------------------------------------------------------------
/scripts/TAS/prepare.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts/TAS/prepare.sh
3 | #datasets='cifar10 cifar100 imagenet-1k'
4 | #ratios='0.5 0.8 0.9'
5 | ratios='0.5'
6 | save_dir=./.latent-data/splits
7 |
8 | for ratio in ${ratios}
9 | do
10 | python ./exps/TAS/prepare.py --name cifar10 --root $TORCH_HOME/cifar.python --save ${save_dir}/cifar10-${ratio}.pth --ratio ${ratio}
11 | python ./exps/TAS/prepare.py --name cifar100 --root $TORCH_HOME/cifar.python --save ${save_dir}/cifar100-${ratio}.pth --ratio ${ratio}
12 | python ./exps/TAS/prepare.py --name imagenet-1k --root $TORCH_HOME/ILSVRC2012 --save ${save_dir}/imagenet-1k-${ratio}.pth --ratio ${ratio}
13 | done
14 |
--------------------------------------------------------------------------------
/scripts/base-train.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts/base-train.sh cifar10 ResNet110 E300 L1 256 -1
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 6 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 6 parameters for the dataset and the-model-name and epochs and LR and the-batch-size and the-random-seed"
8 | exit 1
9 | fi
10 | if [ "$TORCH_HOME" = "" ]; then
11 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
12 | exit 1
13 | else
14 | echo "TORCH_HOME : $TORCH_HOME"
15 | fi
16 |
17 | dataset=$1
18 | model=$2
19 | epoch=$3
20 | LR=$4
21 | batch=$5
22 | rseed=$6
23 |
24 |
25 | save_dir=./output/basic/${dataset}/${model}-${epoch}-${LR}-${batch}
26 |
27 | python --version
28 |
29 | OMP_NUM_THREADS=4 python ./exps/basic/basic-main.py --dataset ${dataset} \
30 | --data_path $TORCH_HOME/cifar.python \
31 | --model_config ./configs/archs/CIFAR-${model}.config \
32 | --optim_config ./configs/opts/CIFAR-${epoch}-W5-${LR}-COS.config \
33 | --procedure basic \
34 | --save_dir ${save_dir} \
35 | --cutout_length -1 \
36 | --batch_size ${batch} --rand_seed ${rseed} --workers 4 \
37 | --eval_frequency 1 --print_freq 100 --print_freq_eval 200
38 |
--------------------------------------------------------------------------------
/scripts/black.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts/black.sh
3 |
4 | # script=$(readlink -f "$0")
5 | # scriptpath=$(dirname "$script")
6 | # echo $scriptpath
7 |
8 | # delete Python cache files
9 | find . | grep -E "(__pycache__|\.pyc|\.DS_Store|\.pyo$)" | xargs rm -rf
10 |
11 | black ./tests/
12 | black ./xautodl/procedures
13 | black ./xautodl/datasets
14 | black ./xautodl/xlayers
15 | black ./exps/trading
16 | rm -rf ./xautodl.egg-info
17 | rm -rf ./build
18 | rm -rf ./dist
19 | rm -rf ./.pytest_cache
20 |
--------------------------------------------------------------------------------
/scripts/experimental/train-vit.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts/experimental/train-vit.sh cifar10 -1
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 2 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 2 parameters for dataset and random-seed"
8 | exit 1
9 | fi
10 | if [ "$TORCH_HOME" = "" ]; then
11 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
12 | exit 1
13 | else
14 | echo "TORCH_HOME : $TORCH_HOME"
15 | fi
16 |
17 | dataset=$1
18 | rseed=$2
19 |
20 | save_dir=./outputs/${dataset}/vit-experimental
21 |
22 | python --version
23 |
24 | python ./exps/basic/xmain.py --save_dir ${save_dir} --rand_seed ${rseed} \
25 | --train_data_config ./configs/yaml.data/${dataset}.train \
26 | --valid_data_config ./configs/yaml.data/${dataset}.test \
27 | --data_path $TORCH_HOME/cifar.python \
28 | --model_config ./configs/yaml.model/vit-cifar10.s0 \
29 | --optim_config ./configs/yaml.opt/vit.cifar \
30 | --loss_config ./configs/yaml.loss/cross-entropy \
31 | --metric_config ./configs/yaml.loss/top-ce \
32 | --batch_size 256 \
33 | --lr 0.003 --weight_decay 0.3 --scheduler warm-cos --steps 10000
34 |
--------------------------------------------------------------------------------
/scripts/nas-infer-train.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts/nas-infer-train.sh cifar10 SETN 256 -1
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 4 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 4 parameters for dataset, the-model-name, the-batch-size and the-random-seed"
8 | exit 1
9 | fi
10 | if [ "$TORCH_HOME" = "" ]; then
11 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
12 | exit 1
13 | else
14 | echo "TORCH_HOME : $TORCH_HOME"
15 | fi
16 |
17 | dataset=$1
18 | model=$2
19 | batch=$3
20 | rseed=$4
21 |
22 | if [ ${dataset} == 'cifar10' ] || [ ${dataset} == 'cifar100' ]; then
23 | xpath=$TORCH_HOME/cifar.python
24 | base=CIFAR
25 | workers=4
26 | cutout_length=16
27 | elif [ ${dataset} == 'imagenet-1k' ]; then
28 | xpath=$TORCH_HOME/ILSVRC2012
29 | base=IMAGENET
30 | workers=28
31 | cutout_length=-1
32 | else
33 | exit 1
34 | echo 'Unknown dataset: '${dataset}
35 | fi
36 |
37 | SAVE_ROOT="./output"
38 |
39 | save_dir=${SAVE_ROOT}/nas-infer/${dataset}-${model}-${batch}
40 |
41 | python --version
42 |
43 | python ./exps/basic/basic-main.py --dataset ${dataset} \
44 | --data_path ${xpath} --model_source nas \
45 | --model_config ./configs/archs/NAS-${base}-${model}.config \
46 | --optim_config ./configs/opts/NAS-${base}.config \
47 | --procedure basic \
48 | --save_dir ${save_dir} \
49 | --cutout_length ${cutout_length} \
50 | --batch_size ${batch} --rand_seed ${rseed} --workers ${workers} \
51 | --eval_frequency 1 --print_freq 500 --print_freq_eval 1000
52 |
--------------------------------------------------------------------------------
/scripts/retrain-searched-net.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts/retrain-searched-net.sh cifar10 ${NAME} ${PATH} 256 -1
3 | echo script name: $0
4 | echo $# arguments
5 | if [ "$#" -ne 5 ] ;then
6 | echo "Input illegal number of parameters " $#
7 | echo "Need 5 parameters for dataset, the save dir base name, the model path, the batch size, the random seed"
8 | exit 1
9 | fi
10 | if [ "$TORCH_HOME" = "" ]; then
11 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
12 | exit 1
13 | else
14 | echo "TORCH_HOME : $TORCH_HOME"
15 | fi
16 |
17 | dataset=$1
18 | save_name=$2
19 | model_path=$3
20 | batch=$4
21 | rseed=$5
22 |
23 | if [ ${dataset} == 'cifar10' ] || [ ${dataset} == 'cifar100' ]; then
24 | xpath=$TORCH_HOME/cifar.python
25 | base=CIFAR
26 | workers=4
27 | cutout_length=16
28 | elif [ ${dataset} == 'imagenet-1k' ]; then
29 | xpath=$TORCH_HOME/ILSVRC2012
30 | base=IMAGENET
31 | workers=28
32 | cutout_length=-1
33 | else
34 | exit 1
35 | echo 'Unknown dataset: '${dataset}
36 | fi
37 |
38 | SAVE_ROOT="./output"
39 |
40 | save_dir=${SAVE_ROOT}/nas-infer/${dataset}-BS${batch}-${save_name}
41 |
42 | python --version
43 |
44 | python ./exps/basic/basic-main.py --dataset ${dataset} \
45 | --data_path ${xpath} --model_source autodl-searched \
46 | --model_config ./configs/archs/NAS-${base}-none.config \
47 | --optim_config ./configs/opts/NAS-${base}.config \
48 | --extra_model_path ${model_path} \
49 | --procedure basic \
50 | --save_dir ${save_dir} \
51 | --cutout_length ${cutout_length} \
52 | --batch_size ${batch} --rand_seed ${rseed} --workers ${workers} \
53 | --eval_frequency 1 --print_freq 500 --print_freq_eval 1000
54 |
--------------------------------------------------------------------------------
/scripts/tas-infer-train.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # bash ./scripts/tas-infer-train.sh cifar10 C100-ResNet32 -1
3 | set -e
4 | echo script name: $0
5 | echo $# arguments
6 | if [ "$#" -ne 3 ] ;then
7 | echo "Input illegal number of parameters " $#
8 | echo "Need 3 parameters for the dataset and the-config-name and the-random-seed"
9 | exit 1
10 | fi
11 | if [ "$TORCH_HOME" = "" ]; then
12 | echo "Must set TORCH_HOME envoriment variable for data dir saving"
13 | exit 1
14 | else
15 | echo "TORCH_HOME : $TORCH_HOME"
16 | fi
17 |
18 | dataset=$1
19 | model=$2
20 | rseed=$3
21 | batch=256
22 |
23 | save_dir=./output/search-shape/TAS-INFER-${dataset}-${model}
24 |
25 | if [ ${dataset} == 'cifar10' ] || [ ${dataset} == 'cifar100' ]; then
26 | xpath=$TORCH_HOME/cifar.python
27 | opt_config=./configs/opts/CIFAR-E300-W5-L1-COS.config
28 | workers=4
29 | elif [ ${dataset} == 'imagenet-1k' ]; then
30 | xpath=$TORCH_HOME/ILSVRC2012
31 | #opt_config=./configs/opts/ImageNet-E120-Cos-Smooth.config
32 | opt_config=./configs/opts/RImageNet-E120-Cos-Soft.config
33 | workers=28
34 | else
35 | echo 'Unknown dataset: '${dataset}
36 | exit 1
37 | fi
38 |
39 | python --version
40 |
41 | # normal training
42 | xsave_dir=${save_dir}-NMT
43 | OMP_NUM_THREADS=4 python ./exps/basic/basic-main.py --dataset ${dataset} \
44 | --data_path ${xpath} \
45 | --model_config ./configs/NeurIPS-2019/${model}.config \
46 | --optim_config ${opt_config} \
47 | --procedure basic \
48 | --save_dir ${xsave_dir} \
49 | --cutout_length -1 \
50 | --batch_size ${batch} --rand_seed ${rseed} --workers ${workers} \
51 | --eval_frequency 1 --print_freq 100 --print_freq_eval 200
52 |
53 | # KD training
54 | xsave_dir=${save_dir}-KDT
55 | OMP_NUM_THREADS=4 python ./exps/basic/KD-main.py --dataset ${dataset} \
56 | --data_path ${xpath} \
57 | --model_config ./configs/NeurIPS-2019/${model}.config \
58 | --optim_config ${opt_config} \
59 | --KD_checkpoint ./.latent-data/basemodels/${dataset}/${model}.pth \
60 | --procedure Simple-KD \
61 | --save_dir ${xsave_dir} \
62 | --KD_alpha 0.9 --KD_temperature 4 \
63 | --cutout_length -1 \
64 | --batch_size ${batch} --rand_seed ${rseed} --workers ${workers} \
65 | --eval_frequency 1 --print_freq 100 --print_freq_eval 200
66 |
--------------------------------------------------------------------------------
/scripts/trade/baseline.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # bash scripts/trade/baseline.sh 0 csi300
4 | # bash scripts/trade/baseline.sh 1 csi100
5 | # bash scripts/trade/baseline.sh 1 all
6 | #
7 | set -e
8 | echo script name: $0
9 | echo $# arguments
10 |
11 | if [ "$#" -ne 2 ] ;then
12 | echo "Input illegal number of parameters " $#
13 | exit 1
14 | fi
15 |
16 | gpu=$1
17 | market=$2
18 |
19 | # algorithms="NAIVE-V1 NAIVE-V2 MLP GRU LSTM ALSTM XGBoost LightGBM SFM TabNet DoubleE"
20 | algorithms="XGBoost LightGBM SFM TabNet DoubleE"
21 |
22 | for alg in ${algorithms}
23 | do
24 | python exps/trading/baselines.py --alg ${alg} --gpu ${gpu} --market ${market}
25 | done
26 |
--------------------------------------------------------------------------------
/scripts/trade/tsf-all.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # bash scripts/trade/tsf-all.sh 0 csi300 0_0
4 | # bash scripts/trade/tsf-all.sh 0 csi300 0.1_0
5 | # bash scripts/trade/tsf-all.sh 1 all
6 | #
7 | set -e
8 | echo script name: $0
9 | echo $# arguments
10 |
11 | if [ "$#" -ne 3 ] ;then
12 | echo "Input illegal number of parameters " $#
13 | exit 1
14 | fi
15 |
16 | gpu=$1
17 | market=$2
18 | drop=$3
19 |
20 | channels="6 12 24 32 48 64"
21 | #depths="1 2 3 4 5 6 7 8"
22 |
23 | for channel in ${channels}
24 | do
25 | python exps/trading/baselines.py --alg TSF-1x${channel}-drop${drop} \
26 | TSF-2x${channel}-drop${drop} \
27 | TSF-3x${channel}-drop${drop} \
28 | TSF-4x${channel}-drop${drop} \
29 | TSF-5x${channel}-drop${drop} \
30 | TSF-6x${channel}-drop${drop} \
31 | TSF-7x${channel}-drop${drop} \
32 | TSF-8x${channel}-drop${drop} \
33 | --gpu ${gpu} --market ${market} --shared_dataset True
34 | done
35 |
--------------------------------------------------------------------------------
/scripts/trade/tsf-time.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # bash scripts/trade/tsf-time.sh 0 csi300 TSF-2x24-drop0_0
4 | # bash scripts/trade/tsf-time.sh 1 csi100
5 | # bash scripts/trade/tsf-time.sh 1 all
6 | #
7 | set -e
8 | echo script name: $0
9 | echo $# arguments
10 |
11 | if [ "$#" -ne 3 ] ;then
12 | echo "Input illegal number of parameters " $#
13 | exit 1
14 | fi
15 |
16 | gpu=$1
17 | market=$2
18 | base=$3
19 | xtimes="2008-01-01 2008-07-01 2009-01-01 2009-07-01 2010-01-01 2011-01-01 2012-01-01 2013-01-01"
20 |
21 | for xtime in ${xtimes}
22 | do
23 |
24 | python exps/trading/baselines.py --alg ${base}s${xtime} --gpu ${gpu} --market ${market} --shared_dataset False
25 |
26 | done
27 |
--------------------------------------------------------------------------------
/scripts/trade/tsf.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # bash scripts/trade/tsf.sh 0 csi300 3 0_0
4 | # bash scripts/trade/tsf.sh 0 csi300 3 0.1_0
5 | # bash scripts/trade/tsf.sh 1 csi100 3 0.2_0
6 | # bash scripts/trade/tsf.sh 1 all 3 0.1_0
7 | #
8 | set -e
9 | echo script name: $0
10 | echo $# arguments
11 |
12 | if [ "$#" -ne 4 ] ;then
13 | echo "Input illegal number of parameters " $#
14 | exit 1
15 | fi
16 |
17 | gpu=$1
18 | market=$2
19 | depth=$3
20 | drop=$4
21 |
22 | channels="6 12 24 32 48 64"
23 |
24 | for channel in ${channels}
25 | do
26 |
27 | python exps/trading/baselines.py --alg TSF-${depth}x${channel}-drop${drop} --gpu ${gpu} --market ${market}
28 |
29 | done
30 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.05 #
3 | #####################################################
4 | """The setup function for pypi."""
5 | # The following is to make nats_bench avaliable on Python Package Index (PyPI)
6 | #
7 | # conda install -c conda-forge twine # Use twine to upload nats_bench to pypi
8 | #
9 | # python setup.py sdist bdist_wheel
10 | # python setup.py --help-commands
11 | # twine check dist/*
12 | #
13 | # twine upload --repository-url https://test.pypi.org/legacy/ dist/*
14 | # twine upload dist/*
15 | # https://pypi.org/project/xautodl
16 | #
17 | # TODO(xuanyidong): upload it to conda
18 | #
19 | # [2021.06.01] v0.9.9
20 | # [2021.08.14] v1.0.0
21 | #
22 | import os
23 | from setuptools import setup, find_packages
24 |
25 | NAME = "xautodl"
26 | REQUIRES_PYTHON = ">=3.6"
27 | DESCRIPTION = "Automated Deep Learning Package"
28 |
29 | VERSION = "1.0.0"
30 |
31 |
32 | def read(fname="README.md"):
33 | with open(
34 | os.path.join(os.path.dirname(__file__), fname), encoding="utf-8"
35 | ) as cfile:
36 | return cfile.read()
37 |
38 |
39 | # What packages are required for this module to be executed?
40 | REQUIRED = ["numpy>=1.16.5", "pyyaml>=5.0.0", "fvcore"]
41 |
42 | packages = find_packages(
43 | exclude=("tests", "scripts", "scripts-search", "lib*", "exps*")
44 | )
45 | print("packages: {:}".format(packages))
46 |
47 | setup(
48 | name=NAME,
49 | version=VERSION,
50 | author="Xuanyi Dong",
51 | author_email="dongxuanyi888@gmail.com",
52 | description=DESCRIPTION,
53 | license="MIT Licence",
54 | keywords="NAS Dataset API DeepLearning",
55 | url="https://github.com/D-X-Y/AutoDL-Projects",
56 | packages=packages,
57 | install_requires=REQUIRED,
58 | python_requires=REQUIRES_PYTHON,
59 | long_description=read("README.md"),
60 | long_description_content_type="text/markdown",
61 | classifiers=[
62 | "Programming Language :: Python",
63 | "Programming Language :: Python :: 3",
64 | "Topic :: Database",
65 | "Topic :: Scientific/Engineering :: Artificial Intelligence",
66 | "License :: OSI Approved :: MIT License",
67 | ],
68 | )
69 |
--------------------------------------------------------------------------------
/tests/test_import.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
3 | #####################################################
4 | # pytest ./tests/test_import.py #
5 | #####################################################
6 | def test_import():
7 | from xautodl import config_utils
8 | from xautodl import datasets
9 | from xautodl import log_utils
10 | from xautodl import models
11 | from xautodl import nas_infer_model
12 | from xautodl import procedures
13 | from xautodl import trade_models
14 | from xautodl import utils
15 |
16 | from xautodl import xlayers
17 | from xautodl import xmisc
18 | from xautodl import xmodels
19 | from xautodl import spaces
20 |
21 | print("Check all imports done")
22 |
--------------------------------------------------------------------------------
/tests/test_loader.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
3 | #####################################################
4 | # pytest tests/test_loader.py -s #
5 | #####################################################
6 | import unittest
7 | import tempfile
8 | import torch
9 |
10 | from xautodl.datasets import get_datasets
11 |
12 |
13 | def test_simple():
14 | xdir = tempfile.mkdtemp()
15 | train_data, valid_data, xshape, class_num = get_datasets("cifar10", xdir, -1)
16 | print(train_data)
17 | print(valid_data)
18 |
19 | xloader = torch.utils.data.DataLoader(
20 | train_data, batch_size=256, shuffle=True, num_workers=4, pin_memory=True
21 | )
22 | print(xloader)
23 | print(next(iter(xloader)))
24 |
25 | for i, data in enumerate(xloader):
26 | print(i)
27 |
28 |
29 | test_simple()
30 |
--------------------------------------------------------------------------------
/tests/test_math_static.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
3 | #####################################################
4 | # pytest tests/test_math_static.py -s #
5 | #####################################################
6 | import unittest
7 |
8 | from xautodl.datasets.math_core import QuadraticSFunc
9 | from xautodl.datasets.math_core import ConstantFunc
10 |
11 |
12 | class TestConstantFunc(unittest.TestCase):
13 | """Test the constant function."""
14 |
15 | def test_simple(self):
16 | function = ConstantFunc(0.1)
17 | for i in range(100):
18 | assert function(i) == 0.1
19 |
20 |
21 | class TestQuadraticSFunc(unittest.TestCase):
22 | """Test the quadratic function."""
23 |
24 | def test_simple(self):
25 | function = QuadraticSFunc({0: 1, 1: 2, 2: 1})
26 | print(function)
27 | for x in (0, 0.5, 1):
28 | print("f({:})={:}".format(x, function(x)))
29 | thresh = 1e-7
30 | self.assertTrue(abs(function(0) - 1) < thresh)
31 | self.assertTrue(abs(function(0.5) - 0.5 * 0.5 - 2 * 0.5 - 1) < thresh)
32 | self.assertTrue(abs(function(1) - 1 - 2 - 1) < thresh)
33 |
--------------------------------------------------------------------------------
/tests/test_super_rearrange.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
3 | #####################################################
4 | # pytest ./tests/test_super_rearrange.py -s #
5 | #####################################################
6 | import unittest
7 |
8 | import torch
9 | from xautodl import xlayers
10 |
11 |
12 | class TestSuperReArrange(unittest.TestCase):
13 | """Test the super re-arrange layer."""
14 |
15 | def test_super_re_arrange(self):
16 | layer = xlayers.SuperReArrange(
17 | "b c (h p1) (w p2) -> b (h w) (c p1 p2)", p1=4, p2=4
18 | )
19 | tensor = torch.rand((8, 4, 32, 32))
20 | print("The tensor shape: {:}".format(tensor.shape))
21 | print(layer)
22 | outs = layer(tensor)
23 | print("The output tensor shape: {:}".format(outs.shape))
24 | assert tuple(outs.shape) == (8, 32 * 32 // 16, 4 * 4 * 4)
25 |
--------------------------------------------------------------------------------
/tests/test_super_vit.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
3 | #####################################################
4 | # pytest ./tests/test_super_vit.py -s #
5 | #####################################################
6 | import unittest
7 | from parameterized import parameterized
8 |
9 | import torch
10 | from xautodl.xmodels import transformers
11 | from xautodl.utils.flop_benchmark import count_parameters
12 |
13 |
14 | class TestSuperViT(unittest.TestCase):
15 | """Test the super re-arrange layer."""
16 |
17 | def test_super_vit(self):
18 | model = transformers.get_transformer("vit-base-16")
19 | tensor = torch.rand((2, 3, 224, 224))
20 | print("The tensor shape: {:}".format(tensor.shape))
21 | # print(model)
22 | outs = model(tensor)
23 | print("The output tensor shape: {:}".format(outs.shape))
24 |
25 | @parameterized.expand(
26 | [
27 | ["vit-cifar10-p4-d4-h4-c32", 32],
28 | ["vit-base-16", 224],
29 | ["vit-large-16", 224],
30 | ["vit-huge-14", 224],
31 | ]
32 | )
33 | def test_imagenet(self, name, resolution):
34 | tensor = torch.rand((2, 3, resolution, resolution))
35 | config = transformers.name2config[name]
36 | model = transformers.get_transformer(config)
37 | outs = model(tensor)
38 | size = count_parameters(model, "mb", True)
39 | print(
40 | "{:10s} : size={:.2f}MB, out-shape: {:}".format(
41 | name, size, tuple(outs.shape)
42 | )
43 | )
44 |
--------------------------------------------------------------------------------
/tests/test_synthetic_env.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.04 #
3 | #####################################################
4 | # pytest tests/test_synthetic_env.py -s #
5 | #####################################################
6 | import unittest
7 |
8 | from xautodl.datasets.synthetic_core import get_synthetic_env
9 |
10 |
11 | class TestSynethicEnv(unittest.TestCase):
12 | """Test the synethtic environment."""
13 |
14 | def test_simple(self):
15 | versions = ["v1", "v2", "v3", "v4"]
16 | for version in versions:
17 | env = get_synthetic_env(version=version)
18 | print(env)
19 | for timestamp, (x, y) in env:
20 | self.assertEqual(x.shape, (1000, env._data_generator.ndim))
21 |
--------------------------------------------------------------------------------
/tests/test_synthetic_utils.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
3 | #####################################################
4 | # pytest tests/test_synthetic_utils.py -s #
5 | #####################################################
6 | import unittest
7 |
8 | from xautodl.datasets.synthetic_core import TimeStamp
9 |
10 |
11 | class TestTimeStamp(unittest.TestCase):
12 | """Test the timestamp generator."""
13 |
14 | def test_simple(self):
15 | for mode in (None, "train", "valid", "test"):
16 | generator = TimeStamp(0, 1)
17 | print(generator)
18 | for idx, (i, xtime) in enumerate(generator):
19 | self.assertTrue(i == idx)
20 | if idx == 0:
21 | self.assertTrue(xtime == 0)
22 | if idx + 1 == len(generator):
23 | self.assertTrue(abs(xtime - 1) < 1e-8)
24 |
--------------------------------------------------------------------------------
/tests/test_tas.py:
--------------------------------------------------------------------------------
1 | ##################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
3 | ##################################################
4 | import torch
5 | import torch.nn as nn
6 | import unittest
7 |
8 | from xautodl.models.shape_searchs.SoftSelect import ChannelWiseInter
9 |
10 |
11 | class TestTASFunc(unittest.TestCase):
12 | """Test the TAS function."""
13 |
14 | def test_channel_interplation(self):
15 | tensors = torch.rand((16, 128, 7, 7))
16 |
17 | for oc in range(200, 210):
18 | out_v1 = ChannelWiseInter(tensors, oc, "v1")
19 | out_v2 = ChannelWiseInter(tensors, oc, "v2")
20 | assert (out_v1 == out_v2).any().item() == 1
21 | for oc in range(48, 160):
22 | out_v1 = ChannelWiseInter(tensors, oc, "v1")
23 | out_v2 = ChannelWiseInter(tensors, oc, "v2")
24 | assert (out_v1 == out_v2).any().item() == 1
25 |
--------------------------------------------------------------------------------
/tests/test_torch.sh:
--------------------------------------------------------------------------------
1 | # bash ./tests/test_torch.sh
2 |
3 | pytest ./tests/test_torch_gpu_bugs.py::test_create -s
4 | CUDA_VISIBLE_DEVICES="" pytest ./tests/test_torch_gpu_bugs.py::test_load -s
5 |
--------------------------------------------------------------------------------
/tests/test_torch_gpu_bugs.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
3 | #####################################################
4 | # pytest ./tests/test_torch_gpu_bugs.py::test_create
5 | #
6 | # CUDA_VISIBLE_DEVICES="" pytest ./tests/test_torch_gpu_bugs.py::test_load
7 | #####################################################
8 | import os, sys, time, torch
9 | import pickle
10 | import tempfile
11 | from pathlib import Path
12 |
13 | root_dir = (Path(__file__).parent / ".." / "..").resolve()
14 |
15 | from xautodl.trade_models.quant_transformer import QuantTransformer
16 |
17 |
18 | def test_create():
19 | """Test the basic quant-model."""
20 | if not torch.cuda.is_available():
21 | return
22 | quant_model = QuantTransformer(GPU=0)
23 | temp_dir = root_dir / "tests" / ".pytest_cache"
24 | temp_dir.mkdir(parents=True, exist_ok=True)
25 | temp_file = temp_dir / "quant-model.pkl"
26 | with temp_file.open("wb") as f:
27 | # quant_model.to(None)
28 | quant_model.to("cpu")
29 | # del quant_model.model
30 | # del quant_model.train_optimizer
31 | pickle.dump(quant_model, f)
32 | print("save into {:}".format(temp_file))
33 |
34 |
35 | def test_load():
36 | temp_file = root_dir / "tests" / ".pytest_cache" / "quant-model.pkl"
37 | with temp_file.open("rb") as f:
38 | model = pickle.load(f)
39 | print(model.model)
40 | print(model.train_optimizer)
41 |
--------------------------------------------------------------------------------
/xautodl/__init__.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.05 #
3 | #####################################################
4 | # An Automated Deep Learning Package to support #
5 | # research activities. #
6 | #####################################################
7 |
8 |
9 | def version():
10 | versions = ["0.9.9"] # 2021.06.01
11 | versions = ["1.0.0"] # 2021.08.14
12 | return versions[-1]
13 |
--------------------------------------------------------------------------------
/xautodl/config_utils/__init__.py:
--------------------------------------------------------------------------------
1 | ##################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
3 | ##################################################
4 | # general config related functions
5 | from .config_utils import load_config, dict2config, configure2str
6 |
7 | # the args setting for different experiments
8 | from .basic_args import obtain_basic_args
9 | from .attention_args import obtain_attention_args
10 | from .random_baseline import obtain_RandomSearch_args
11 | from .cls_kd_args import obtain_cls_kd_args
12 | from .cls_init_args import obtain_cls_init_args
13 | from .search_single_args import obtain_search_single_args
14 | from .search_args import obtain_search_args
15 |
16 | # for network pruning
17 | from .pruning_args import obtain_pruning_args
18 |
19 | # utils for args
20 | from .args_utils import arg_str2bool
21 |
--------------------------------------------------------------------------------
/xautodl/config_utils/args_utils.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 |
4 | def arg_str2bool(v):
5 | if isinstance(v, bool):
6 | return v
7 | elif v.lower() in ("yes", "true", "t", "y", "1"):
8 | return True
9 | elif v.lower() in ("no", "false", "f", "n", "0"):
10 | return False
11 | else:
12 | raise argparse.ArgumentTypeError("Boolean value expected.")
13 |
--------------------------------------------------------------------------------
/xautodl/config_utils/attention_args.py:
--------------------------------------------------------------------------------
1 | import random, argparse
2 | from .share_args import add_shared_args
3 |
4 |
5 | def obtain_attention_args():
6 | parser = argparse.ArgumentParser(
7 | description="Train a classification model on typical image classification datasets.",
8 | formatter_class=argparse.ArgumentDefaultsHelpFormatter,
9 | )
10 | parser.add_argument("--resume", type=str, help="Resume path.")
11 | parser.add_argument("--init_model", type=str, help="The initialization model path.")
12 | parser.add_argument(
13 | "--model_config", type=str, help="The path to the model configuration"
14 | )
15 | parser.add_argument(
16 | "--optim_config", type=str, help="The path to the optimizer configuration"
17 | )
18 | parser.add_argument("--procedure", type=str, help="The procedure basic prefix.")
19 | parser.add_argument("--att_channel", type=int, help=".")
20 | parser.add_argument("--att_spatial", type=str, help=".")
21 | parser.add_argument("--att_active", type=str, help=".")
22 | add_shared_args(parser)
23 | # Optimization options
24 | parser.add_argument(
25 | "--batch_size", type=int, default=2, help="Batch size for training."
26 | )
27 | args = parser.parse_args()
28 |
29 | if args.rand_seed is None or args.rand_seed < 0:
30 | args.rand_seed = random.randint(1, 100000)
31 | assert args.save_dir is not None, "save-path argument can not be None"
32 | return args
33 |
--------------------------------------------------------------------------------
/xautodl/config_utils/basic_args.py:
--------------------------------------------------------------------------------
1 | ##################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020 #
3 | ##################################################
4 | import random, argparse
5 | from .share_args import add_shared_args
6 |
7 |
8 | def obtain_basic_args():
9 | parser = argparse.ArgumentParser(
10 | description="Train a classification model on typical image classification datasets.",
11 | formatter_class=argparse.ArgumentDefaultsHelpFormatter,
12 | )
13 | parser.add_argument("--resume", type=str, help="Resume path.")
14 | parser.add_argument("--init_model", type=str, help="The initialization model path.")
15 | parser.add_argument(
16 | "--model_config", type=str, help="The path to the model configuration"
17 | )
18 | parser.add_argument(
19 | "--optim_config", type=str, help="The path to the optimizer configuration"
20 | )
21 | parser.add_argument("--procedure", type=str, help="The procedure basic prefix.")
22 | parser.add_argument(
23 | "--model_source",
24 | type=str,
25 | default="normal",
26 | help="The source of model defination.",
27 | )
28 | parser.add_argument(
29 | "--extra_model_path",
30 | type=str,
31 | default=None,
32 | help="The extra model ckp file (help to indicate the searched architecture).",
33 | )
34 | add_shared_args(parser)
35 | # Optimization options
36 | parser.add_argument(
37 | "--batch_size", type=int, default=2, help="Batch size for training."
38 | )
39 | args = parser.parse_args()
40 |
41 | if args.rand_seed is None or args.rand_seed < 0:
42 | args.rand_seed = random.randint(1, 100000)
43 | assert args.save_dir is not None, "save-path argument can not be None"
44 | return args
45 |
--------------------------------------------------------------------------------
/xautodl/config_utils/cls_init_args.py:
--------------------------------------------------------------------------------
1 | import random, argparse
2 | from .share_args import add_shared_args
3 |
4 |
5 | def obtain_cls_init_args():
6 | parser = argparse.ArgumentParser(
7 | description="Train a classification model on typical image classification datasets.",
8 | formatter_class=argparse.ArgumentDefaultsHelpFormatter,
9 | )
10 | parser.add_argument("--resume", type=str, help="Resume path.")
11 | parser.add_argument("--init_model", type=str, help="The initialization model path.")
12 | parser.add_argument(
13 | "--model_config", type=str, help="The path to the model configuration"
14 | )
15 | parser.add_argument(
16 | "--optim_config", type=str, help="The path to the optimizer configuration"
17 | )
18 | parser.add_argument("--procedure", type=str, help="The procedure basic prefix.")
19 | parser.add_argument(
20 | "--init_checkpoint", type=str, help="The checkpoint path to the initial model."
21 | )
22 | add_shared_args(parser)
23 | # Optimization options
24 | parser.add_argument(
25 | "--batch_size", type=int, default=2, help="Batch size for training."
26 | )
27 | args = parser.parse_args()
28 |
29 | if args.rand_seed is None or args.rand_seed < 0:
30 | args.rand_seed = random.randint(1, 100000)
31 | assert args.save_dir is not None, "save-path argument can not be None"
32 | return args
33 |
--------------------------------------------------------------------------------
/xautodl/config_utils/cls_kd_args.py:
--------------------------------------------------------------------------------
1 | import random, argparse
2 | from .share_args import add_shared_args
3 |
4 |
5 | def obtain_cls_kd_args():
6 | parser = argparse.ArgumentParser(
7 | description="Train a classification model on typical image classification datasets.",
8 | formatter_class=argparse.ArgumentDefaultsHelpFormatter,
9 | )
10 | parser.add_argument("--resume", type=str, help="Resume path.")
11 | parser.add_argument("--init_model", type=str, help="The initialization model path.")
12 | parser.add_argument(
13 | "--model_config", type=str, help="The path to the model configuration"
14 | )
15 | parser.add_argument(
16 | "--optim_config", type=str, help="The path to the optimizer configuration"
17 | )
18 | parser.add_argument("--procedure", type=str, help="The procedure basic prefix.")
19 | parser.add_argument(
20 | "--KD_checkpoint",
21 | type=str,
22 | help="The teacher checkpoint in knowledge distillation.",
23 | )
24 | parser.add_argument(
25 | "--KD_alpha", type=float, help="The alpha parameter in knowledge distillation."
26 | )
27 | parser.add_argument(
28 | "--KD_temperature",
29 | type=float,
30 | help="The temperature parameter in knowledge distillation.",
31 | )
32 | # parser.add_argument('--KD_feature', type=float, help='Knowledge distillation at the feature level.')
33 | add_shared_args(parser)
34 | # Optimization options
35 | parser.add_argument(
36 | "--batch_size", type=int, default=2, help="Batch size for training."
37 | )
38 | args = parser.parse_args()
39 |
40 | if args.rand_seed is None or args.rand_seed < 0:
41 | args.rand_seed = random.randint(1, 100000)
42 | assert args.save_dir is not None, "save-path argument can not be None"
43 | return args
44 |
--------------------------------------------------------------------------------
/xautodl/config_utils/pruning_args.py:
--------------------------------------------------------------------------------
1 | import os, sys, time, random, argparse
2 | from .share_args import add_shared_args
3 |
4 |
5 | def obtain_pruning_args():
6 | parser = argparse.ArgumentParser(
7 | description="Train a classification model on typical image classification datasets.",
8 | formatter_class=argparse.ArgumentDefaultsHelpFormatter,
9 | )
10 | parser.add_argument("--resume", type=str, help="Resume path.")
11 | parser.add_argument("--init_model", type=str, help="The initialization model path.")
12 | parser.add_argument(
13 | "--model_config", type=str, help="The path to the model configuration"
14 | )
15 | parser.add_argument(
16 | "--optim_config", type=str, help="The path to the optimizer configuration"
17 | )
18 | parser.add_argument("--procedure", type=str, help="The procedure basic prefix.")
19 | parser.add_argument(
20 | "--keep_ratio",
21 | type=float,
22 | help="The left channel ratio compared to the original network.",
23 | )
24 | parser.add_argument("--model_version", type=str, help="The network version.")
25 | parser.add_argument(
26 | "--KD_alpha", type=float, help="The alpha parameter in knowledge distillation."
27 | )
28 | parser.add_argument(
29 | "--KD_temperature",
30 | type=float,
31 | help="The temperature parameter in knowledge distillation.",
32 | )
33 | parser.add_argument("--Regular_W_feat", type=float, help="The .")
34 | parser.add_argument("--Regular_W_conv", type=float, help="The .")
35 | add_shared_args(parser)
36 | # Optimization options
37 | parser.add_argument(
38 | "--batch_size", type=int, default=2, help="Batch size for training."
39 | )
40 | args = parser.parse_args()
41 |
42 | if args.rand_seed is None or args.rand_seed < 0:
43 | args.rand_seed = random.randint(1, 100000)
44 | assert args.save_dir is not None, "save-path argument can not be None"
45 | assert (
46 | args.keep_ratio > 0 and args.keep_ratio <= 1
47 | ), "invalid keep ratio : {:}".format(args.keep_ratio)
48 | return args
49 |
--------------------------------------------------------------------------------
/xautodl/config_utils/random_baseline.py:
--------------------------------------------------------------------------------
1 | import os, sys, time, random, argparse
2 | from .share_args import add_shared_args
3 |
4 |
5 | def obtain_RandomSearch_args():
6 | parser = argparse.ArgumentParser(
7 | description="Train a classification model on typical image classification datasets.",
8 | formatter_class=argparse.ArgumentDefaultsHelpFormatter,
9 | )
10 | parser.add_argument("--resume", type=str, help="Resume path.")
11 | parser.add_argument("--init_model", type=str, help="The initialization model path.")
12 | parser.add_argument(
13 | "--expect_flop", type=float, help="The expected flop keep ratio."
14 | )
15 | parser.add_argument(
16 | "--arch_nums",
17 | type=int,
18 | help="The maximum number of running random arch generating..",
19 | )
20 | parser.add_argument(
21 | "--model_config", type=str, help="The path to the model configuration"
22 | )
23 | parser.add_argument(
24 | "--optim_config", type=str, help="The path to the optimizer configuration"
25 | )
26 | parser.add_argument(
27 | "--random_mode",
28 | type=str,
29 | choices=["random", "fix"],
30 | help="The path to the optimizer configuration",
31 | )
32 | parser.add_argument("--procedure", type=str, help="The procedure basic prefix.")
33 | add_shared_args(parser)
34 | # Optimization options
35 | parser.add_argument(
36 | "--batch_size", type=int, default=2, help="Batch size for training."
37 | )
38 | args = parser.parse_args()
39 |
40 | if args.rand_seed is None or args.rand_seed < 0:
41 | args.rand_seed = random.randint(1, 100000)
42 | assert args.save_dir is not None, "save-path argument can not be None"
43 | # assert args.flop_ratio_min < args.flop_ratio_max, 'flop-ratio {:} vs {:}'.format(args.flop_ratio_min, args.flop_ratio_max)
44 | return args
45 |
--------------------------------------------------------------------------------
/xautodl/config_utils/search_single_args.py:
--------------------------------------------------------------------------------
1 | import os, sys, time, random, argparse
2 | from .share_args import add_shared_args
3 |
4 |
5 | def obtain_search_single_args():
6 | parser = argparse.ArgumentParser(
7 | description="Train a classification model on typical image classification datasets.",
8 | formatter_class=argparse.ArgumentDefaultsHelpFormatter,
9 | )
10 | parser.add_argument("--resume", type=str, help="Resume path.")
11 | parser.add_argument(
12 | "--model_config", type=str, help="The path to the model configuration"
13 | )
14 | parser.add_argument(
15 | "--optim_config", type=str, help="The path to the optimizer configuration"
16 | )
17 | parser.add_argument("--split_path", type=str, help="The split file path.")
18 | parser.add_argument("--search_shape", type=str, help="The shape to be searched.")
19 | # parser.add_argument('--arch_para_pure', type=int, help='The architecture-parameter pure or not.')
20 | parser.add_argument(
21 | "--gumbel_tau_max", type=float, help="The maximum tau for Gumbel."
22 | )
23 | parser.add_argument(
24 | "--gumbel_tau_min", type=float, help="The minimum tau for Gumbel."
25 | )
26 | parser.add_argument("--procedure", type=str, help="The procedure basic prefix.")
27 | parser.add_argument("--FLOP_ratio", type=float, help="The expected FLOP ratio.")
28 | parser.add_argument("--FLOP_weight", type=float, help="The loss weight for FLOP.")
29 | parser.add_argument(
30 | "--FLOP_tolerant", type=float, help="The tolerant range for FLOP."
31 | )
32 | add_shared_args(parser)
33 | # Optimization options
34 | parser.add_argument(
35 | "--batch_size", type=int, default=2, help="Batch size for training."
36 | )
37 | args = parser.parse_args()
38 |
39 | if args.rand_seed is None or args.rand_seed < 0:
40 | args.rand_seed = random.randint(1, 100000)
41 | assert args.save_dir is not None, "save-path argument can not be None"
42 | assert args.gumbel_tau_max is not None and args.gumbel_tau_min is not None
43 | assert (
44 | args.FLOP_tolerant is not None and args.FLOP_tolerant > 0
45 | ), "invalid FLOP_tolerant : {:}".format(FLOP_tolerant)
46 | # assert args.arch_para_pure is not None, 'arch_para_pure is not None: {:}'.format(args.arch_para_pure)
47 | # args.arch_para_pure = bool(args.arch_para_pure)
48 | return args
49 |
--------------------------------------------------------------------------------
/xautodl/config_utils/share_args.py:
--------------------------------------------------------------------------------
1 | import os, sys, time, random, argparse
2 |
3 |
4 | def add_shared_args(parser):
5 | # Data Generation
6 | parser.add_argument("--dataset", type=str, help="The dataset name.")
7 | parser.add_argument("--data_path", type=str, help="The dataset name.")
8 | parser.add_argument(
9 | "--cutout_length", type=int, help="The cutout length, negative means not use."
10 | )
11 | # Printing
12 | parser.add_argument(
13 | "--print_freq", type=int, default=100, help="print frequency (default: 200)"
14 | )
15 | parser.add_argument(
16 | "--print_freq_eval",
17 | type=int,
18 | default=100,
19 | help="print frequency (default: 200)",
20 | )
21 | # Checkpoints
22 | parser.add_argument(
23 | "--eval_frequency",
24 | type=int,
25 | default=1,
26 | help="evaluation frequency (default: 200)",
27 | )
28 | parser.add_argument(
29 | "--save_dir", type=str, help="Folder to save checkpoints and log."
30 | )
31 | # Acceleration
32 | parser.add_argument(
33 | "--workers",
34 | type=int,
35 | default=8,
36 | help="number of data loading workers (default: 8)",
37 | )
38 | # Random Seed
39 | parser.add_argument("--rand_seed", type=int, default=-1, help="manual seed")
40 |
--------------------------------------------------------------------------------
/xautodl/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | ##################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
3 | ##################################################
4 | from .get_dataset_with_transform import get_datasets, get_nas_search_loaders
5 | from .SearchDatasetWrap import SearchDataset
6 |
--------------------------------------------------------------------------------
/xautodl/datasets/landmark_utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .point_meta import PointMeta2V, apply_affine2point, apply_boundary
2 |
--------------------------------------------------------------------------------
/xautodl/datasets/math_base_funcs.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
3 | #####################################################
4 | import math
5 | import abc
6 | import copy
7 | import numpy as np
8 |
9 |
10 | class MathFunc(abc.ABC):
11 | """The math function -- a virtual class defining some APIs."""
12 |
13 | def __init__(self, freedom: int, params=None, xstr="x"):
14 | # initialize as empty
15 | self._params = dict()
16 | for i in range(freedom):
17 | self._params[i] = None
18 | self._freedom = freedom
19 | if params is not None:
20 | self.set(params)
21 | self._xstr = str(xstr)
22 | self._skip_check = True
23 |
24 | def set(self, params):
25 | for key in range(self._freedom):
26 | param = copy.deepcopy(params[key])
27 | self._params[key] = param
28 |
29 | def check_valid(self):
30 | if not self._skip_check:
31 | for key in range(self._freedom):
32 | value = self._params[key]
33 | if value is None:
34 | raise ValueError("The {:} is None".format(key))
35 |
36 | @property
37 | def xstr(self):
38 | return self._xstr
39 |
40 | def reset_xstr(self, xstr):
41 | self._xstr = str(xstr)
42 |
43 | def output_shape(self, input_shape):
44 | return input_shape
45 |
46 | @abc.abstractmethod
47 | def __call__(self, x):
48 | raise NotImplementedError
49 |
50 | @abc.abstractmethod
51 | def noise_call(self, x, std):
52 | clean_y = self.__call__(x)
53 | if isinstance(clean_y, np.ndarray):
54 | noise_y = clean_y + np.random.normal(scale=std, size=clean_y.shape)
55 | else:
56 | raise ValueError("Unkonwn type: {:}".format(type(clean_y)))
57 | return noise_y
58 |
59 | def __repr__(self):
60 | return "{name}(freedom={freedom})".format(
61 | name=self.__class__.__name__, freedom=freedom
62 | )
63 |
--------------------------------------------------------------------------------
/xautodl/datasets/math_core.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.05 #
3 | #####################################################
4 | from .math_static_funcs import (
5 | LinearSFunc,
6 | QuadraticSFunc,
7 | CubicSFunc,
8 | QuarticSFunc,
9 | ConstantFunc,
10 | ComposedSinSFunc,
11 | ComposedCosSFunc,
12 | )
13 | from .math_dynamic_funcs import (
14 | LinearDFunc,
15 | QuadraticDFunc,
16 | SinQuadraticDFunc,
17 | BinaryQuadraticDFunc,
18 | )
19 | from .math_dynamic_generator import UniformDGenerator, GaussianDGenerator
20 |
--------------------------------------------------------------------------------
/xautodl/datasets/test_utils.py:
--------------------------------------------------------------------------------
1 | ##################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
3 | ##################################################
4 | import os
5 |
6 |
7 | def test_imagenet_data(imagenet):
8 | total_length = len(imagenet)
9 | assert (
10 | total_length == 1281166 or total_length == 50000
11 | ), "The length of ImageNet is wrong : {}".format(total_length)
12 | map_id = {}
13 | for index in range(total_length):
14 | path, target = imagenet.imgs[index]
15 | folder, image_name = os.path.split(path)
16 | _, folder = os.path.split(folder)
17 | if folder not in map_id:
18 | map_id[folder] = target
19 | else:
20 | assert map_id[folder] == target, "Class : {} is not {}".format(
21 | folder, target
22 | )
23 | assert image_name.find(folder) == 0, "{} is wrong.".format(path)
24 | print("Check ImageNet Dataset OK")
25 |
--------------------------------------------------------------------------------
/xautodl/log_utils/__init__.py:
--------------------------------------------------------------------------------
1 | ##################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
3 | ##################################################
4 | # every package does not rely on pytorch or tensorflow
5 | # I tried to list all dependency here: os, sys, time, numpy, (possibly) matplotlib
6 | ##################################################
7 | from .logger import Logger, PrintLogger
8 | from .meter import AverageMeter
9 | from .time_utils import (
10 | time_for_file,
11 | time_string,
12 | time_string_short,
13 | time_print,
14 | convert_secs2time,
15 | )
16 | from .pickle_wrap import pickle_save, pickle_load
17 |
--------------------------------------------------------------------------------
/xautodl/log_utils/pickle_wrap.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 #
3 | #####################################################
4 | import pickle
5 | from pathlib import Path
6 |
7 |
8 | def pickle_save(obj, path):
9 | file_path = Path(path)
10 | file_dir = file_path.parent
11 | file_dir.mkdir(parents=True, exist_ok=True)
12 | with file_path.open("wb") as f:
13 | pickle.dump(obj, f)
14 |
15 |
16 | def pickle_load(path):
17 | if not Path(path).exists():
18 | raise ValueError("{:} does not exists".format(path))
19 | with Path(path).open("rb") as f:
20 | data = pickle.load(f)
21 | return data
22 |
--------------------------------------------------------------------------------
/xautodl/log_utils/time_utils.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 #
3 | #####################################################
4 | import time, sys
5 | import numpy as np
6 |
7 |
8 | def time_for_file():
9 | ISOTIMEFORMAT = "%d-%h-at-%H-%M-%S"
10 | return "{:}".format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time())))
11 |
12 |
13 | def time_string():
14 | ISOTIMEFORMAT = "%Y-%m-%d %X"
15 | string = "[{:}]".format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time())))
16 | return string
17 |
18 |
19 | def time_string_short():
20 | ISOTIMEFORMAT = "%Y%m%d"
21 | string = "{:}".format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time())))
22 | return string
23 |
24 |
25 | def time_print(string, is_print=True):
26 | if is_print:
27 | print("{} : {}".format(time_string(), string))
28 |
29 |
30 | def convert_secs2time(epoch_time, return_str=False):
31 | need_hour = int(epoch_time / 3600)
32 | need_mins = int((epoch_time - 3600 * need_hour) / 60)
33 | need_secs = int(epoch_time - 3600 * need_hour - 60 * need_mins)
34 | if return_str:
35 | str = "[{:02d}:{:02d}:{:02d}]".format(need_hour, need_mins, need_secs)
36 | return str
37 | else:
38 | return need_hour, need_mins, need_secs
39 |
40 |
41 | def print_log(print_string, log):
42 | # if isinstance(log, Logger): log.log('{:}'.format(print_string))
43 | if hasattr(log, "log"):
44 | log.log("{:}".format(print_string))
45 | else:
46 | print("{:}".format(print_string))
47 | if log is not None:
48 | log.write("{:}\n".format(print_string))
49 | log.flush()
50 |
--------------------------------------------------------------------------------
/xautodl/models/SharedUtils.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 #
3 | #####################################################
4 | import torch
5 | import torch.nn as nn
6 |
7 |
8 | def additive_func(A, B):
9 | assert A.dim() == B.dim() and A.size(0) == B.size(0), "{:} vs {:}".format(
10 | A.size(), B.size()
11 | )
12 | C = min(A.size(1), B.size(1))
13 | if A.size(1) == B.size(1):
14 | return A + B
15 | elif A.size(1) < B.size(1):
16 | out = B.clone()
17 | out[:, :C] += A
18 | return out
19 | else:
20 | out = A.clone()
21 | out[:, :C] += B
22 | return out
23 |
24 |
25 | def change_key(key, value):
26 | def func(m):
27 | if hasattr(m, key):
28 | setattr(m, key, value)
29 |
30 | return func
31 |
32 |
33 | def parse_channel_info(xstring):
34 | blocks = xstring.split(" ")
35 | blocks = [x.split("-") for x in blocks]
36 | blocks = [[int(_) for _ in x] for x in blocks]
37 | return blocks
38 |
--------------------------------------------------------------------------------
/xautodl/models/cell_infers/__init__.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 #
3 | #####################################################
4 | from .tiny_network import TinyNetwork
5 | from .nasnet_cifar import NASNetonCIFAR
6 |
--------------------------------------------------------------------------------
/xautodl/models/cell_searchs/__init__.py:
--------------------------------------------------------------------------------
1 | ##################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
3 | ##################################################
4 | # The macro structure is defined in NAS-Bench-201
5 | from .search_model_darts import TinyNetworkDarts
6 | from .search_model_gdas import TinyNetworkGDAS
7 | from .search_model_setn import TinyNetworkSETN
8 | from .search_model_enas import TinyNetworkENAS
9 | from .search_model_random import TinyNetworkRANDOM
10 | from .generic_model import GenericNAS201Model
11 | from .genotypes import Structure as CellStructure, architectures as CellArchitectures
12 |
13 | # NASNet-based macro structure
14 | from .search_model_gdas_nasnet import NASNetworkGDAS
15 | from .search_model_gdas_frc_nasnet import NASNetworkGDAS_FRC
16 | from .search_model_darts_nasnet import NASNetworkDARTS
17 |
18 |
19 | nas201_super_nets = {
20 | "DARTS-V1": TinyNetworkDarts,
21 | "DARTS-V2": TinyNetworkDarts,
22 | "GDAS": TinyNetworkGDAS,
23 | "SETN": TinyNetworkSETN,
24 | "ENAS": TinyNetworkENAS,
25 | "RANDOM": TinyNetworkRANDOM,
26 | "generic": GenericNAS201Model,
27 | }
28 |
29 | nasnet_super_nets = {
30 | "GDAS": NASNetworkGDAS,
31 | "GDAS_FRC": NASNetworkGDAS_FRC,
32 | "DARTS": NASNetworkDARTS,
33 | }
34 |
--------------------------------------------------------------------------------
/xautodl/models/cell_searchs/_test_module.py:
--------------------------------------------------------------------------------
1 | ##################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
3 | ##################################################
4 | import torch
5 | from search_model_enas_utils import Controller
6 |
7 |
8 | def main():
9 | controller = Controller(6, 4)
10 | predictions = controller()
11 |
12 |
13 | if __name__ == "__main__":
14 | main()
15 |
--------------------------------------------------------------------------------
/xautodl/models/initialization.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | def initialize_resnet(m):
6 | if isinstance(m, nn.Conv2d):
7 | nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
8 | if m.bias is not None:
9 | nn.init.constant_(m.bias, 0)
10 | elif isinstance(m, nn.BatchNorm2d):
11 | nn.init.constant_(m.weight, 1)
12 | if m.bias is not None:
13 | nn.init.constant_(m.bias, 0)
14 | elif isinstance(m, nn.Linear):
15 | nn.init.normal_(m.weight, 0, 0.01)
16 | nn.init.constant_(m.bias, 0)
17 |
--------------------------------------------------------------------------------
/xautodl/models/shape_infers/__init__.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 #
3 | #####################################################
4 | from .InferCifarResNet_width import InferWidthCifarResNet
5 | from .InferImagenetResNet import InferImagenetResNet
6 | from .InferCifarResNet_depth import InferDepthCifarResNet
7 | from .InferCifarResNet import InferCifarResNet
8 | from .InferMobileNetV2 import InferMobileNetV2
9 | from .InferTinyCellNet import DynamicShapeTinyNet
10 |
--------------------------------------------------------------------------------
/xautodl/models/shape_infers/shared_utils.py:
--------------------------------------------------------------------------------
1 | def parse_channel_info(xstring):
2 | blocks = xstring.split(" ")
3 | blocks = [x.split("-") for x in blocks]
4 | blocks = [[int(_) for _ in x] for x in blocks]
5 | return blocks
6 |
--------------------------------------------------------------------------------
/xautodl/models/shape_searchs/__init__.py:
--------------------------------------------------------------------------------
1 | ##################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
3 | ##################################################
4 | from .SearchCifarResNet_width import SearchWidthCifarResNet
5 | from .SearchCifarResNet_depth import SearchDepthCifarResNet
6 | from .SearchCifarResNet import SearchShapeCifarResNet
7 | from .SearchSimResNet_width import SearchWidthSimResNet
8 | from .SearchImagenetResNet import SearchShapeImagenetResNet
9 | from .generic_size_tiny_cell_model import GenericNAS301Model
10 |
--------------------------------------------------------------------------------
/xautodl/nas_infer_model/DXYs/__init__.py:
--------------------------------------------------------------------------------
1 | # Performance-Aware Template Network for One-Shot Neural Architecture Search
2 | from .CifarNet import NetworkCIFAR as CifarNet
3 | from .ImageNet import NetworkImageNet as ImageNet
4 | from .genotypes import Networks
5 | from .genotypes import build_genotype_from_dict
6 |
--------------------------------------------------------------------------------
/xautodl/nas_infer_model/__init__.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 #
3 | #####################################################
4 | # I write this package to make AutoDL-Projects to be compatible with the old GDAS projects.
5 | # Ideally, this package will be merged into lib/models/cell_infers in future.
6 | # Currently, this package is used to reproduce the results in GDAS (Searching for A Robust Neural Architecture in Four GPU Hours, CVPR 2019).
7 | ##################################################
8 |
9 | import os, torch
10 |
11 |
12 | def obtain_nas_infer_model(config, extra_model_path=None):
13 |
14 | if config.arch == "dxys":
15 | from .DXYs import CifarNet, ImageNet, Networks
16 | from .DXYs import build_genotype_from_dict
17 |
18 | if config.genotype is None:
19 | if extra_model_path is not None and not os.path.isfile(extra_model_path):
20 | raise ValueError(
21 | "When genotype in confiig is None, extra_model_path must be set as a path instead of {:}".format(
22 | extra_model_path
23 | )
24 | )
25 | xdata = torch.load(extra_model_path)
26 | current_epoch = xdata["epoch"]
27 | genotype_dict = xdata["genotypes"][current_epoch - 1]
28 | genotype = build_genotype_from_dict(genotype_dict)
29 | else:
30 | genotype = Networks[config.genotype]
31 | if config.dataset == "cifar":
32 | return CifarNet(
33 | config.ichannel,
34 | config.layers,
35 | config.stem_multi,
36 | config.auxiliary,
37 | genotype,
38 | config.class_num,
39 | )
40 | elif config.dataset == "imagenet":
41 | return ImageNet(
42 | config.ichannel,
43 | config.layers,
44 | config.auxiliary,
45 | genotype,
46 | config.class_num,
47 | )
48 | else:
49 | raise ValueError("invalid dataset : {:}".format(config.dataset))
50 | else:
51 | raise ValueError("invalid nas arch type : {:}".format(config.arch))
52 |
--------------------------------------------------------------------------------
/xautodl/procedures/__init__.py:
--------------------------------------------------------------------------------
1 | ##################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
3 | ######################################################################
4 | # This folder is deprecated, which is re-organized in "xalgorithms". #
5 | ######################################################################
6 | from .starts import prepare_seed
7 | from .starts import prepare_logger
8 | from .starts import get_machine_info
9 | from .starts import save_checkpoint
10 | from .starts import copy_checkpoint
11 | from .optimizers import get_optim_scheduler
12 | from .funcs_nasbench import evaluate_for_seed as bench_evaluate_for_seed
13 | from .funcs_nasbench import pure_evaluate as bench_pure_evaluate
14 | from .funcs_nasbench import get_nas_bench_loaders
15 |
16 |
17 | def get_procedures(procedure):
18 | from .basic_main import basic_train, basic_valid
19 | from .search_main import search_train, search_valid
20 | from .search_main_v2 import search_train_v2
21 | from .simple_KD_main import simple_KD_train, simple_KD_valid
22 |
23 | train_funcs = {
24 | "basic": basic_train,
25 | "search": search_train,
26 | "Simple-KD": simple_KD_train,
27 | "search-v2": search_train_v2,
28 | }
29 | valid_funcs = {
30 | "basic": basic_valid,
31 | "search": search_valid,
32 | "Simple-KD": simple_KD_valid,
33 | "search-v2": search_valid,
34 | }
35 |
36 | train_func = train_funcs[procedure]
37 | valid_func = valid_funcs[procedure]
38 | return train_func, valid_func
39 |
--------------------------------------------------------------------------------
/xautodl/procedures/eval_funcs.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020.04 #
3 | #####################################################
4 | import abc
5 |
6 |
7 | def obtain_accuracy(output, target, topk=(1,)):
8 | """Computes the precision@k for the specified values of k"""
9 | maxk = max(topk)
10 | batch_size = target.size(0)
11 |
12 | _, pred = output.topk(maxk, 1, True, True)
13 | pred = pred.t()
14 | correct = pred.eq(target.view(1, -1).expand_as(pred))
15 |
16 | res = []
17 | for k in topk:
18 | correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)
19 | res.append(correct_k.mul_(100.0 / batch_size))
20 | return res
21 |
--------------------------------------------------------------------------------
/xautodl/spaces/__init__.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.01 #
3 | #####################################################
4 | # Define complex searc space for AutoDL #
5 | #####################################################
6 |
7 | from .basic_space import Categorical
8 | from .basic_space import Continuous
9 | from .basic_space import Integer
10 | from .basic_space import Space
11 | from .basic_space import VirtualNode
12 | from .basic_op import has_categorical
13 | from .basic_op import has_continuous
14 | from .basic_op import is_determined
15 | from .basic_op import get_determined_value
16 | from .basic_op import get_min
17 | from .basic_op import get_max
18 |
--------------------------------------------------------------------------------
/xautodl/trade_models/__init__.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
3 | #####################################################
4 | from .transformers import get_transformer
5 |
--------------------------------------------------------------------------------
/xautodl/utils/__init__.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 #
3 | #####################################################
4 | # This directory contains some ad-hoc functions, classes, etc.
5 | # It will be re-formulated in the future.
6 | #####################################################
7 | from .evaluation_utils import obtain_accuracy
8 | from .gpu_manager import GPUManager
9 | from .flop_benchmark import get_model_infos, count_parameters, count_parameters_in_MB
10 | from .affine_utils import normalize_points, denormalize_points
11 | from .affine_utils import identity2affine, solve2theta, affine2image
12 | from .hash_utils import get_md5_file
13 | from .str_utils import split_str2indexes
14 | from .str_utils import show_mean_var
15 |
--------------------------------------------------------------------------------
/xautodl/utils/evaluation_utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | def obtain_accuracy(output, target, topk=(1,)):
5 | """Computes the precision@k for the specified values of k"""
6 | maxk = max(topk)
7 | batch_size = target.size(0)
8 |
9 | _, pred = output.topk(maxk, 1, True, True)
10 | pred = pred.t()
11 | correct = pred.eq(target.view(1, -1).expand_as(pred))
12 |
13 | res = []
14 | for k in topk:
15 | correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
16 | res.append(correct_k.mul_(100.0 / batch_size))
17 | return res
18 |
--------------------------------------------------------------------------------
/xautodl/utils/hash_utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import hashlib
3 |
4 |
5 | def get_md5_file(file_path, post_truncated=5):
6 | md5_hash = hashlib.md5()
7 | if os.path.exists(file_path):
8 | xfile = open(file_path, "rb")
9 | content = xfile.read()
10 | md5_hash.update(content)
11 | digest = md5_hash.hexdigest()
12 | else:
13 | raise ValueError("[get_md5_file] {:} does not exist".format(file_path))
14 | if post_truncated is None:
15 | return digest
16 | else:
17 | return digest[-post_truncated:]
18 |
--------------------------------------------------------------------------------
/xautodl/utils/str_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def split_str2indexes(string: str, max_check: int, length_limit=5):
5 | if not isinstance(string, str):
6 | raise ValueError("Invalid scheme for {:}".format(string))
7 | srangestr = "".join(string.split())
8 | indexes = set()
9 | for srange in srangestr.split(","):
10 | srange = srange.split("-")
11 | if len(srange) != 2:
12 | raise ValueError("invalid srange : {:}".format(srange))
13 | if length_limit is not None:
14 | assert (
15 | len(srange[0]) == len(srange[1]) == length_limit
16 | ), "invalid srange : {:}".format(srange)
17 | srange = (int(srange[0]), int(srange[1]))
18 | if not (0 <= srange[0] <= srange[1] < max_check):
19 | raise ValueError(
20 | "{:} vs {:} vs {:}".format(srange[0], srange[1], max_check)
21 | )
22 | for i in range(srange[0], srange[1] + 1):
23 | indexes.add(i)
24 | return indexes
25 |
26 |
27 | def show_mean_var(xlist):
28 | values = np.array(xlist)
29 | print(
30 | "{:.2f}".format(values.mean())
31 | + "$_{{\pm}{"
32 | + "{:.2f}".format(values.std())
33 | + "}}$"
34 | )
35 |
--------------------------------------------------------------------------------
/xautodl/utils/temp_sync.py:
--------------------------------------------------------------------------------
1 | # To be deleted.
2 | import copy
3 | import torch
4 |
5 | from xlayers.super_core import SuperSequential, SuperMLPv1
6 | from xlayers.super_core import SuperSimpleNorm
7 | from xlayers.super_core import SuperLinear
8 |
9 |
10 | def optimize_fn(xs, ys, device="cpu", max_iter=2000, max_lr=0.1):
11 | xs = torch.FloatTensor(xs).view(-1, 1).to(device)
12 | ys = torch.FloatTensor(ys).view(-1, 1).to(device)
13 |
14 | model = SuperSequential(
15 | SuperSimpleNorm(xs.mean().item(), xs.std().item()),
16 | SuperLinear(1, 200),
17 | torch.nn.LeakyReLU(),
18 | SuperLinear(200, 100),
19 | torch.nn.LeakyReLU(),
20 | SuperLinear(100, 1),
21 | ).to(device)
22 | model.train()
23 | optimizer = torch.optim.Adam(model.parameters(), lr=max_lr, amsgrad=True)
24 | loss_func = torch.nn.MSELoss()
25 | lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
26 | optimizer,
27 | milestones=[
28 | int(max_iter * 0.25),
29 | int(max_iter * 0.5),
30 | int(max_iter * 0.75),
31 | ],
32 | gamma=0.3,
33 | )
34 |
35 | best_loss, best_param = None, None
36 | for _iter in range(max_iter):
37 | preds = model(xs)
38 |
39 | optimizer.zero_grad()
40 | loss = loss_func(preds, ys)
41 | loss.backward()
42 | optimizer.step()
43 | lr_scheduler.step()
44 |
45 | if best_loss is None or best_loss > loss.item():
46 | best_loss = loss.item()
47 | best_param = copy.deepcopy(model.state_dict())
48 |
49 | # print('loss={:}, best-loss={:}'.format(loss.item(), best_loss))
50 | model.load_state_dict(best_param)
51 | return model, loss_func, best_loss
52 |
53 |
54 | def evaluate_fn(model, xs, ys, loss_fn, device="cpu"):
55 | with torch.no_grad():
56 | inputs = torch.FloatTensor(xs).view(-1, 1).to(device)
57 | ys = torch.FloatTensor(ys).view(-1, 1).to(device)
58 | preds = model(inputs)
59 | loss = loss_fn(preds, ys)
60 | preds = preds.view(-1).cpu().numpy()
61 | return preds, loss.item()
62 |
--------------------------------------------------------------------------------
/xautodl/xlayers/__init__.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 #
3 | #####################################################
4 | # This file is expected to be self-contained, expect
5 | # for importing from spaces to include search space.
6 | #####################################################
7 | from .super_core import *
8 |
--------------------------------------------------------------------------------
/xautodl/xlayers/super_core.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
3 | #####################################################
4 | from .super_module import SuperRunMode
5 | from .super_module import IntSpaceType
6 | from .super_module import LayerOrder
7 |
8 | from .super_module import SuperModule
9 | from .super_container import SuperSequential
10 | from .super_linear import SuperLinear
11 | from .super_linear import SuperMLPv1, SuperMLPv2
12 |
13 | from .super_norm import SuperSimpleNorm
14 | from .super_norm import SuperLayerNorm1D
15 | from .super_norm import SuperSimpleLearnableNorm
16 | from .super_norm import SuperIdentity
17 | from .super_dropout import SuperDropout
18 | from .super_dropout import SuperDrop
19 |
20 | super_name2norm = {
21 | "simple_norm": SuperSimpleNorm,
22 | "simple_learn_norm": SuperSimpleLearnableNorm,
23 | "layer_norm_1d": SuperLayerNorm1D,
24 | "identity": SuperIdentity,
25 | }
26 |
27 | from .super_attention import SuperSelfAttention
28 | from .super_attention import SuperQKVAttention
29 | from .super_attention_v2 import SuperQKVAttentionV2
30 | from .super_transformer import SuperTransformerEncoderLayer
31 |
32 | from .super_activations import SuperReLU
33 | from .super_activations import SuperLeakyReLU
34 | from .super_activations import SuperTanh
35 | from .super_activations import SuperGELU
36 | from .super_activations import SuperSigmoid
37 |
38 | super_name2activation = {
39 | "relu": SuperReLU,
40 | "sigmoid": SuperSigmoid,
41 | "gelu": SuperGELU,
42 | "leaky_relu": SuperLeakyReLU,
43 | "tanh": SuperTanh,
44 | }
45 |
46 |
47 | from .super_trade_stem import SuperAlphaEBDv1
48 | from .super_positional_embedding import SuperDynamicPositionE
49 | from .super_positional_embedding import SuperPositionalEncoder
50 |
51 | from .super_rearrange import SuperReArrange
52 |
--------------------------------------------------------------------------------
/xautodl/xmisc/__init__.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.06 #
3 | #####################################################
4 | """The module and yaml related functions."""
5 | from .module_utils import call_by_dict
6 | from .module_utils import call_by_yaml
7 | from .module_utils import nested_call_by_dict
8 | from .module_utils import nested_call_by_yaml
9 | from .yaml_utils import load_yaml
10 |
11 | from .torch_utils import count_parameters
12 |
13 | from .logger_utils import Logger
14 |
15 | """The data sampler related classes."""
16 | from .sampler_utils import BatchSampler
17 |
18 | """The meter related classes."""
19 | from .meter_utils import AverageMeter
20 |
21 | """The scheduler related classes."""
22 | from .scheduler_utils import CosineParamScheduler, WarmupParamScheduler, LRMultiplier
23 |
24 |
25 | def get_scheduler(indicator, lr):
26 | if indicator == "warm-cos":
27 | multiplier = WarmupParamScheduler(
28 | CosineParamScheduler(lr, lr * 1e-3),
29 | warmup_factor=0.001,
30 | warmup_length=0.05,
31 | warmup_method="linear",
32 | )
33 |
34 | else:
35 | raise ValueError("Unknown indicator: {:}".format(indicator))
36 | return multiplier
37 |
--------------------------------------------------------------------------------
/xautodl/xmisc/logger_utils.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.06 #
3 | #####################################################
4 | import sys
5 | from pathlib import Path
6 |
7 | from .time_utils import time_for_file, time_string
8 |
9 |
10 | class Logger:
11 | """A logger used in xautodl."""
12 |
13 | def __init__(self, root_dir, prefix="", log_time=True):
14 | """Create a summary writer logging to log_dir."""
15 | self.root_dir = Path(root_dir)
16 | self.log_dir = self.root_dir / "logs"
17 | self.log_dir.mkdir(parents=True, exist_ok=True)
18 |
19 | self._prefix = prefix
20 | self._log_time = log_time
21 | self.logger_path = self.log_dir / "{:}{:}.log".format(
22 | self._prefix, time_for_file()
23 | )
24 | self._logger_file = open(self.logger_path, "w")
25 |
26 | @property
27 | def logger(self):
28 | return self._logger_file
29 |
30 | def log(self, string, save=True, stdout=False):
31 | string = "{:} {:}".format(time_string(), string) if self._log_time else string
32 | if stdout:
33 | sys.stdout.write(string)
34 | sys.stdout.flush()
35 | else:
36 | print(string)
37 | if save:
38 | self._logger_file.write("{:}\n".format(string))
39 | self._logger_file.flush()
40 |
41 | def close(self):
42 | self._logger_file.close()
43 | if self.writer is not None:
44 | self.writer.close()
45 |
46 | def __repr__(self):
47 | return "{name}(dir={log_dir}, prefix={_prefix}, log_time={_log_time})".format(
48 | name=self.__class__.__name__, **self.__dict__
49 | )
50 |
--------------------------------------------------------------------------------
/xautodl/xmisc/sampler_utils.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.06 #
3 | #####################################################
4 | import random
5 |
6 |
7 | class BatchSampler:
8 | """A batch sampler used for single machine training."""
9 |
10 | def __init__(self, dataset, batch, steps):
11 | self._num_per_epoch = len(dataset)
12 | self._iter_per_epoch = self._num_per_epoch // batch
13 | self._steps = steps
14 | self._batch = batch
15 | if self._num_per_epoch < self._batch:
16 | raise ValueError(
17 | "The dataset size must be larger than batch={:}".format(batch)
18 | )
19 | self._indexes = list(range(self._num_per_epoch))
20 |
21 | def __iter__(self):
22 | """
23 | yield a batch of indexes using random sampling
24 | """
25 | for i in range(self._steps):
26 | if i % self._iter_per_epoch == 0:
27 | random.shuffle(self._indexes)
28 | j = i % self._iter_per_epoch
29 | yield self._indexes[j * self._batch : (j + 1) * self._batch]
30 |
31 | def __len__(self):
32 | return self._steps
33 |
--------------------------------------------------------------------------------
/xautodl/xmisc/time_utils.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.06 #
3 | #####################################################
4 | import time
5 |
6 |
7 | def time_for_file():
8 | ISOTIMEFORMAT = "%d-%h-at-%H-%M-%S"
9 | return "{:}".format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time())))
10 |
11 |
12 | def time_string():
13 | ISOTIMEFORMAT = "%Y-%m-%d %X"
14 | string = "[{:}]".format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time())))
15 | return string
16 |
17 |
18 | def convert_secs2time(epoch_time, return_str=False):
19 | need_hour = int(epoch_time / 3600)
20 | need_mins = int((epoch_time - 3600 * need_hour) / 60)
21 | need_secs = int(epoch_time - 3600 * need_hour - 60 * need_mins)
22 | if return_str:
23 | str = "[{:02d}:{:02d}:{:02d}]".format(need_hour, need_mins, need_secs)
24 | return str
25 | else:
26 | return need_hour, need_mins, need_secs
27 |
--------------------------------------------------------------------------------
/xautodl/xmisc/torch_utils.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.06 #
3 | #####################################################
4 | import torch
5 | import torch.nn as nn
6 | import numpy as np
7 |
8 |
9 | def count_parameters(model_or_parameters, unit="mb"):
10 | if isinstance(model_or_parameters, nn.Module):
11 | counts = sum(np.prod(v.size()) for v in model_or_parameters.parameters())
12 | elif isinstance(model_or_parameters, nn.Parameter):
13 | counts = models_or_parameters.numel()
14 | elif isinstance(model_or_parameters, (list, tuple)):
15 | counts = sum(count_parameters(x, None) for x in models_or_parameters)
16 | else:
17 | counts = sum(np.prod(v.size()) for v in model_or_parameters)
18 | if unit.lower() == "kb" or unit.lower() == "k":
19 | counts /= 1e3
20 | elif unit.lower() == "mb" or unit.lower() == "m":
21 | counts /= 1e6
22 | elif unit.lower() == "gb" or unit.lower() == "g":
23 | counts /= 1e9
24 | elif unit is not None:
25 | raise ValueError("Unknow unit: {:}".format(unit))
26 | return counts
27 |
--------------------------------------------------------------------------------
/xautodl/xmisc/yaml_utils.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.06 #
3 | #####################################################
4 | import os
5 | import yaml
6 |
7 |
8 | def load_yaml(path):
9 | if not os.path.isfile(path):
10 | raise ValueError("{:} is not a file.".format(path))
11 | with open(path, "r") as stream:
12 | data = yaml.safe_load(stream)
13 | return data
14 |
--------------------------------------------------------------------------------
/xautodl/xmodels/__init__.py:
--------------------------------------------------------------------------------
1 | #####################################################
2 | # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.06 #
3 | #####################################################
4 | # The models in this folder is written with xlayers #
5 | #####################################################
6 |
7 | from .core import *
8 |
--------------------------------------------------------------------------------