├── .gitignore ├── README.md ├── assets ├── codebase.png ├── f_supported.jpg └── teaser.png ├── configs ├── _base_ │ ├── datasets │ │ ├── bair.py │ │ ├── bridgedata.py │ │ ├── cityscapes.py │ │ ├── enso.py │ │ ├── human.py │ │ ├── kitti.py │ │ ├── kth.py │ │ ├── mnist.py │ │ ├── nuscenes.py │ │ ├── robonet.py │ │ ├── sevir.py │ │ ├── taxibj.py │ │ ├── traffic4cast2021.py │ │ └── weatherbench128x256_69.py │ ├── default_runtime.py │ ├── models │ │ ├── convlstm.py │ │ ├── e3dlstm.py │ │ ├── earthformer.py │ │ ├── mau.py │ │ ├── mcvd.py │ │ ├── phydnet.py │ │ ├── predrnnpp.py │ │ ├── predrnnv1.py │ │ ├── predrnnv2.py │ │ ├── simvpv1.py │ │ ├── simvpv2.py │ │ └── tau.py │ └── schedules │ │ ├── schedule_100e.py │ │ ├── schedule_1e6i.py │ │ ├── schedule_200e.py │ │ ├── schedule_2e6i.py │ │ ├── schedule_50e.py │ │ └── schedule_5e5i.py ├── bair │ ├── convlstm │ │ └── bs_64_lr_1e4.py │ ├── e3dlstm │ │ └── bs_64_lr_1e4.py │ ├── earthformer │ │ └── bs_64_lr_1e4.py │ ├── mau │ │ └── bs_64_lr_1e4.py │ ├── mcvd │ │ ├── bair_scheduler.py │ │ └── bs_64_lr_1e4.py │ ├── phydnet │ │ └── bs_64_lr_1e4.py │ ├── predrnnpp │ │ └── bs_64_lr_1e4.py │ ├── predrnnv1 │ │ └── bs_64_lr_1e4.py │ ├── predrnnv2 │ │ └── bs_64_lr_1e4.py │ ├── simvpv1 │ │ └── bs_64_lr_1e4.py │ ├── simvpv2 │ │ └── bs_64_lr_1e4.py │ └── tau │ │ └── bs_64_lr_1e4.py ├── bridgedata │ ├── convlstm │ │ └── bs_64_lr_1e4.py │ ├── e3dlstm │ │ └── bs_64_lr_1e4.py │ ├── earthformer │ │ └── bs_64_lr_1e3.py │ ├── mau │ │ └── bs_64_lr_1e4.py │ ├── mcvd │ │ ├── bridgedata_scheduler.py │ │ └── bs_128_lr_4e4.py │ ├── phydnet │ │ └── bs_64_lr_1e4.py │ ├── predrnnpp │ │ └── bs_64_lr_1e4.py │ ├── predrnnv1 │ │ └── bs_64_lr_1e4.py │ ├── predrnnv2 │ │ └── bs_64_lr_1e4.py │ ├── simvpv1 │ │ └── bs_64_lr_1e3.py │ ├── simvpv2 │ │ └── bs_64_lr_1e3.py │ └── tau │ │ └── bs_64_lr_1e3.py ├── cityscapes │ ├── convlstm │ │ └── bs_64_lr_1e4.py │ ├── e3dlstm │ │ └── bs_64_lr_1e4.py │ ├── earthformer │ │ └── bs_64_lr_1e4.py │ ├── mau │ │ └── bs_64_lr_1e4.py │ ├── mcvd │ │ ├── bs_64_lr_1e4.py │ │ └── cityscapes_scheduler.py │ ├── phydnet │ │ └── bs_64_lr_1e4.py │ ├── predrnnpp │ │ └── bs_64_lr_1e4.py │ ├── predrnnv1 │ │ └── bs_64_lr_1e4.py │ ├── predrnnv2 │ │ └── bs_64_lr_1e4.py │ ├── simvpv1 │ │ └── bs_64_lr_1e4.py │ ├── simvpv2 │ │ └── bs_64_lr_1e4.py │ └── tau │ │ └── bs_64_lr_1e4.py ├── enso │ ├── convlstm │ │ └── bs_64_lr_1e4.py │ ├── e3dlstm │ │ └── bs_64_lr_1e4.py │ ├── earthformer │ │ └── bs_64_lr_1e4.py │ ├── mau │ │ └── bs_64_lr_1e4.py │ ├── mcvd │ │ ├── bs_64_lr_1e4.py │ │ └── enso_scheduler.py │ ├── phydnet │ │ └── bs_64_lr_1e4.py │ ├── predrnnpp │ │ └── bs_64_lr_1e4.py │ ├── predrnnv1 │ │ └── bs_64_lr_1e4.py │ ├── predrnnv2 │ │ └── bs_64_lr_1e4.py │ ├── simvpv1 │ │ └── bs_64_lr_1e4.py │ ├── simvpv2 │ │ └── bs_64_lr_1e4.py │ └── tau │ │ └── bs_64_lr_1e4.py ├── human │ ├── convlstm │ │ └── bs_16_lr_1e4.py │ ├── e3dlstm │ │ └── bs_16_lr_1e4.py │ ├── earthformer │ │ └── bs_32_lr_1e3.py │ ├── mau │ │ └── bs_16_lr_1e4.py │ ├── mcvd │ │ ├── bs_64_lr_1e4.py │ │ └── human_scheduler.py │ ├── phydnet │ │ └── bs_16_lr_1e4.py │ ├── predrnnpp │ │ └── bs_16_lr_1e4.py │ ├── predrnnv1 │ │ └── bs_16_lr_1e4.py │ ├── predrnnv2 │ │ └── bs_16_lr_1e4.py │ ├── simvpv1 │ │ └── bs_16_lr_1e3.py │ ├── simvpv2 │ │ └── bs_16_lr_1e3.py │ └── tau │ │ └── bs_16_lr_1e3.py ├── kitti │ ├── convlstm │ │ └── bs_16_lr_1e3.py │ ├── e3dlstm │ │ └── bs_16_lr_1e3.py │ ├── earthformer │ │ └── bs_32_lr_1e3.py │ ├── mau │ │ └── bs_16_lr_1e3.py │ ├── mcvd │ │ ├── bs_64_lr_2e4.py │ │ └── kitti_scheduler.py │ ├── phydnet │ │ └── bs_16_lr_1e3.py │ ├── predrnnpp │ │ └── bs_16_lr_1e3.py │ ├── predrnnv1 │ │ └── bs_16_lr_1e3.py │ ├── predrnnv2 │ │ └── bs_16_lr_1e3.py │ ├── simvpv1 │ │ └── bs_16_lr_5e3.py │ ├── simvpv2 │ │ └── bs_16_lr_5e3.py │ └── tau │ │ └── bs_16_lr_5e3.py ├── kth │ ├── convlstm │ │ └── bs_16_lr_4e5.py │ ├── e3dlstm │ │ └── bs_8_lr_5e4.py │ ├── earthformer │ │ └── bs_32_lr_1e3.py │ ├── mau │ │ └── bs_16_lr_5e4.py │ ├── mcvd │ │ ├── bs_64_lr_2e4.py │ │ └── kth_scheduler.py │ ├── phydnet │ │ └── bs_16_lr_1e3.py │ ├── predrnnpp │ │ └── bs_16_lr_4e5.py │ ├── predrnnv1 │ │ └── bs_16_lr_4e5.py │ ├── predrnnv2 │ │ └── bs_16_lr_4e5.py │ ├── simvpv1 │ │ └── bs_16_lr_1e3.py │ ├── simvpv2 │ │ └── bs_16_lr_1e3.py │ └── tau │ │ └── bs_16_lr_1e3.py ├── mnist │ ├── convlstm │ │ └── bs_16_lr_5e4.py │ ├── e3dlstm │ │ └── bs_16_lr_1e4.py │ ├── earthformer │ │ └── bs_32_lr_1e3.py │ ├── mau │ │ └── bs_16_lr_1e3.py │ ├── mcvd │ │ ├── bs_64_lr_2e4.py │ │ └── mnist_scheduler.py │ ├── phydnet │ │ └── bs_16_lr_1e3.py │ ├── predrnnpp │ │ └── bs_16_lr_1e4.py │ ├── predrnnv1 │ │ └── bs_16_lr_5e4.py │ ├── predrnnv2 │ │ └── bs_16_lr_5e4.py │ ├── simvpv1 │ │ └── bs_16_lr_1e3.py │ ├── simvpv2 │ │ └── bs_16_lr_1e3.py │ └── tau │ │ └── bs_16_lr_1e3.py ├── nuscenes │ ├── convlstm │ │ └── bs_64_lr_1e4.py │ ├── e3dlstm │ │ └── bs_64_lr_1e4.py │ ├── earthformer │ │ └── bs_64_lr_1e3.py │ ├── mau │ │ └── bs_64_lr_1e4.py │ ├── mcvd │ │ ├── bs_128_lr_4e4.py │ │ └── nuscenes_scheduler.py │ ├── phydnet │ │ └── bs_64_lr_1e4.py │ ├── predrnnpp │ │ └── bs_64_lr_1e4.py │ ├── predrnnv1 │ │ └── bs_64_lr_1e4.py │ ├── predrnnv2 │ │ └── bs_64_lr_1e4.py │ ├── simvpv1 │ │ └── bs_64_lr_1e3.py │ ├── simvpv2 │ │ └── bs_64_lr_1e3.py │ └── tau │ │ └── bs_64_lr_1e3.py ├── robonet │ ├── convlstm │ │ └── bs_64_lr_1e4.py │ ├── e3dlstm │ │ └── bs_64_lr_1e4.py │ ├── earthformer │ │ └── bs_64_lr_1e3.py │ ├── mau │ │ └── bs_64_lr_1e4.py │ ├── mcvd │ │ ├── bs_128_lr_4e4.py │ │ └── robonet_scheduler.py │ ├── phydnet │ │ └── bs_64_lr_1e4.py │ ├── predrnnpp │ │ └── bs_64_lr_1e4.py │ ├── predrnnv1 │ │ └── bs_64_lr_1e4.py │ ├── predrnnv2 │ │ └── bs_64_lr_1e4.py │ ├── simvpv1 │ │ └── bs_64_lr_1e3.py │ ├── simvpv2 │ │ └── bs_64_lr_1e3.py │ └── tau │ │ └── bs_64_lr_1e3.py ├── sevir │ ├── convlstm │ │ └── bs_32_lr_1e3.py │ ├── e3dlstm │ │ └── bs_32_lr_1e3.py │ ├── earthformer │ │ └── bs_32_lr_1e3.py │ ├── mau │ │ └── bs_32_lr_1e3.py │ ├── mcvd │ │ ├── bs_32_lr_1e4.py │ │ └── sevir_scheduler.py │ ├── phydnet │ │ └── bs_32_lr_1e3.py │ ├── predrnnpp │ │ └── bs_32_lr_1e3.py │ ├── predrnnv1 │ │ └── bs_32_lr_1e3.py │ ├── predrnnv2 │ │ └── bs_64_lr_1e3.py │ ├── simvpv1 │ │ └── bs_32_lr_1e4.py │ ├── simvpv2 │ │ └── bs_32_lr_1e4.py │ └── tau │ │ └── bs_32_lr_1e4.py ├── taxibj │ ├── convlstm │ │ └── bs_16_lr_5e4.py │ ├── e3dlstm │ │ └── bs_64_lr_2e4.py │ ├── earthformer │ │ └── bs_32_lr_1e3.py │ ├── mau │ │ └── bs_16_lr_5e4.py │ ├── mcvd │ │ ├── bs_64_lr_1e4.py │ │ └── taxibj_scheduler.py │ ├── phydnet │ │ └── bs_16_lr_5e4.py │ ├── predrnnpp │ │ └── bs_16_lr_1e4.py │ ├── predrnnv1 │ │ └── bs_16_lr_1e4.py │ ├── predrnnv2 │ │ └── bs_16_lr_1e4.py │ ├── simvpv1 │ │ └── bs_16_lr_1e3.py │ ├── simvpv2 │ │ └── bs_16_lr_1e3.py │ └── tau │ │ └── bs_16_lr_13.py ├── traffic4cast2021 │ ├── convlstm │ │ └── bs_64_lr_1e4.py │ ├── e3dlstm │ │ └── bs_64_lr_1e4.py │ ├── earthformer │ │ └── bs_64_lr_1e4.py │ ├── mau │ │ └── bs_64_lr_1e4.py │ ├── mcvd │ │ ├── bs_64_lr_1e4.py │ │ └── traffic4cast2021_scheduler.py │ ├── phydnet │ │ └── bs_64_lr_1e4.py │ ├── predrnnpp │ │ └── bs_64_lr_1e4.py │ ├── predrnnv1 │ │ └── bs_64_lr_1e4.py │ ├── predrnnv2 │ │ └── bs_64_lr_1e4.py │ ├── simvpv1 │ │ └── bs_64_lr_1e4.py │ ├── simvpv2 │ │ └── bs_64_lr_1e4.py │ └── tau │ │ └── bs_64_lr_1e4.py └── weatherbench │ ├── convlstm │ └── bs_64_lr_1e4.py │ ├── e3dlstm │ └── bs_64_lr_1e4.py │ ├── earthformer │ └── bs_64_lr_1e4.py │ ├── mau │ └── bs_64_lr_1e4.py │ ├── mcvd │ ├── bs_64_lr_1e4.py │ └── weatherbench_scheduler.py │ ├── phydnet │ └── bs_64_lr_1e4.py │ ├── predrnnpp │ └── bs_64_lr_1e4.py │ ├── predrnnv1 │ └── bs_64_lr_1e4.py │ ├── predrnnv2 │ └── bs_64_lr_1e4.py │ ├── simvpv1 │ └── bs_64_lr_1e4.py │ ├── simvpv2 │ └── bs_64_lr_1e4.py │ └── tau │ └── bs_64_lr_1e4.py ├── data └── annotations │ ├── KITTI │ ├── kitti_test.json │ ├── kitti_train.json │ └── kitti_val.json │ ├── bridgedata │ ├── bridgedata_all_train_val_test.json │ ├── bridgedata_extrapolation.json │ ├── bridgedata_rbs.json │ ├── bridgedata_test.json │ ├── bridgedata_train.json │ ├── bridgedata_val.json │ ├── new_scene_new_task.json │ ├── new_scene_ori_task.json │ └── ori_scene_new_task.json │ ├── human │ ├── human_test.txt │ ├── human_train.txt │ └── human_val.txt │ ├── nuscenes │ ├── nuscenes_test.json │ ├── nuscenes_train.json │ └── nuscenes_val.json │ ├── robonet │ ├── robonet_test.json │ ├── robonet_train.json │ └── robonet_val.json │ └── traffic4cast2021 │ ├── traffic4cast2021_moscow.json │ ├── traffic4cast2021_train.json │ ├── traffic4cast2021_val.json │ └── traiifc4cast2021_test.json ├── pre_download ├── cartopy │ └── download.sh ├── i3d │ ├── convert_tf_pretrained.py │ └── download.sh └── inception │ └── download.sh ├── predbench ├── __init__.py ├── datasets │ ├── __init__.py │ ├── bair.py │ ├── bridgedata.py │ ├── caltech.py │ ├── cityscapes.py │ ├── enso.py │ ├── human.py │ ├── kitti.py │ ├── kth.py │ ├── movingmnist.py │ ├── nuscenes.py │ ├── robonet.py │ ├── samplers │ │ ├── __init__.py │ │ └── infinite_sampler.py │ ├── sevir.py │ ├── taxibj.py │ ├── traffic4cast.py │ ├── transforms │ │ ├── __init__.py │ │ ├── augmentations.py │ │ ├── compose.py │ │ ├── formatting.py │ │ ├── loading.py │ │ └── utils.py │ └── weatherbench_np.py ├── engine │ ├── __init__.py │ ├── hooks │ │ ├── __init__.py │ │ ├── epoch_record_hook.py │ │ ├── iter_record_hook.py │ │ ├── save_result_hook.py │ │ ├── viz_heatmap_data │ │ │ ├── __init__.py │ │ │ ├── viz_enso.py │ │ │ ├── viz_sevir.py │ │ │ ├── viz_taxibj.py │ │ │ ├── viz_traffic4cast2021.py │ │ │ └── viz_weatherbench.py │ │ └── viz_video_hook.py │ ├── optimizers │ │ ├── __init__.py │ │ └── multi_optimizer_constructor.py │ ├── runner │ │ ├── __init__.py │ │ └── infinite_epoch_loop.py │ ├── schedulers │ │ ├── __init__.py │ │ └── onecycle_scheduler.py │ └── visualization │ │ ├── __init__.py │ │ └── vis_backend.py ├── evaluation │ ├── __init__.py │ ├── error_metrics.py │ ├── perception_metrics │ │ ├── __init__.py │ │ ├── fid │ │ │ ├── __init__.py │ │ │ ├── fid.py │ │ │ └── inception_v3.py │ │ ├── fvd │ │ │ ├── __init__.py │ │ │ ├── fvd.py │ │ │ └── i3d_pytorch.py │ │ ├── isc.py │ │ ├── lpips.py │ │ └── perception_metrics.py │ ├── similarity_metrics.py │ └── weather_metrics │ │ ├── __init__.py │ │ ├── enso.py │ │ └── weather_metrics.py ├── models │ ├── __init__.py │ ├── data_processors │ │ ├── __init__.py │ │ ├── image_processor.py │ │ └── video_processor.py │ ├── forecasters │ │ ├── __init__.py │ │ ├── cnn_model.py │ │ ├── diffusion_model.py │ │ ├── rnn_model.py │ │ └── transformer_model.py │ ├── losses │ │ ├── __init__.py │ │ ├── ce_loss.py │ │ ├── dsm_loss.py │ │ └── weighted_loss.py │ ├── modules │ │ ├── __init__.py │ │ ├── cnn_modules │ │ │ ├── __init__.py │ │ │ ├── metavp_model.py │ │ │ └── metavp_modules │ │ │ │ ├── __init__.py │ │ │ │ ├── metavp_enc_dec.py │ │ │ │ ├── metavp_layers │ │ │ │ ├── __init__.py │ │ │ │ ├── hornet.py │ │ │ │ ├── moganet.py │ │ │ │ ├── poolformer.py │ │ │ │ ├── uniformer.py │ │ │ │ └── van.py │ │ │ │ ├── metavp_midnet.py │ │ │ │ └── modules.py │ │ ├── diffusion_modules │ │ │ ├── __init__.py │ │ │ ├── mcvd_modules │ │ │ │ ├── __init__.py │ │ │ │ ├── layers.py │ │ │ │ ├── layers3d.py │ │ │ │ ├── layerspp.py │ │ │ │ ├── ncsnpp_more.py │ │ │ │ ├── normalization.py │ │ │ │ └── up_or_down_sampling.py │ │ │ ├── pndm.py │ │ │ └── samplers.py │ │ ├── rnn_modules │ │ │ ├── __init__.py │ │ │ ├── convlstm.py │ │ │ ├── e3dlstm.py │ │ │ ├── lstm_cells │ │ │ │ ├── __init__.py │ │ │ │ ├── convlstm_cells.py │ │ │ │ ├── e3dlstm_cells.py │ │ │ │ ├── mau_cells.py │ │ │ │ ├── phydnet_cells.py │ │ │ │ ├── predrnnpp_cells.py │ │ │ │ ├── predrnnv1_cells.py │ │ │ │ └── predrnnv2_cells.py │ │ │ ├── mau.py │ │ │ ├── phydnet.py │ │ │ ├── predrnnpp.py │ │ │ ├── predrnnv1.py │ │ │ ├── predrnnv2.py │ │ │ └── rnn_base.py │ │ └── transformer_modules │ │ │ ├── __init__.py │ │ │ ├── cuboid_transformer_modules.py │ │ │ ├── cuboid_transformer_patterns.py │ │ │ ├── earthformer.py │ │ │ └── utils.py │ └── operators │ │ ├── __init__.py │ │ ├── conv2d_gradfix.py │ │ ├── fused_act.py │ │ ├── fused_bias_act.cpp │ │ ├── fused_bias_act_kernel.cu │ │ ├── upfirdn2d.cpp │ │ ├── upfirdn2d.py │ │ └── upfirdn2d_kernel.cu ├── registry.py └── version.py ├── requirements.txt ├── test.py ├── tools ├── complexity_analysis.py ├── generate_scripts.py ├── remove_ckpt_module.py └── scripts │ ├── bair │ ├── convlstm │ │ └── bs_64_lr_1e4.sh │ ├── e3dlstm │ │ └── bs_64_lr_1e4.sh │ ├── earthformer │ │ └── bs_64_lr_1e4.sh │ ├── mau │ │ └── bs_64_lr_1e4.sh │ ├── mcvd │ │ └── bs_64_lr_1e4.sh │ ├── phydnet │ │ └── bs_64_lr_1e4.sh │ ├── predrnnpp │ │ └── bs_64_lr_1e4.sh │ ├── predrnnv1 │ │ └── bs_64_lr_1e4.sh │ ├── predrnnv2 │ │ └── bs_64_lr_1e4.sh │ ├── simvpv1 │ │ └── bs_64_lr_1e4.sh │ ├── simvpv2 │ │ └── bs_64_lr_1e4.sh │ └── tau │ │ └── bs_64_lr_1e4.sh │ ├── bridgedata │ ├── convlstm │ │ └── bs_64_lr_1e4.sh │ ├── e3dlstm │ │ └── bs_64_lr_1e4.sh │ ├── earthformer │ │ └── bs_64_lr_1e3.sh │ ├── mau │ │ └── bs_64_lr_1e4.sh │ ├── mcvd │ │ └── bs_128_lr_4e4.sh │ ├── phydnet │ │ └── bs_64_lr_1e4.sh │ ├── predrnnpp │ │ └── bs_64_lr_1e4.sh │ ├── predrnnv1 │ │ └── bs_64_lr_1e4.sh │ ├── predrnnv2 │ │ └── bs_64_lr_1e4.sh │ ├── simvpv1 │ │ └── bs_64_lr_1e3.sh │ ├── simvpv2 │ │ └── bs_64_lr_1e3.sh │ └── tau │ │ └── bs_64_lr_1e3.sh │ ├── cityscapes │ ├── convlstm │ │ └── bs_64_lr_1e4.sh │ ├── e3dlstm │ │ └── bs_64_lr_1e4.sh │ ├── earthformer │ │ └── bs_64_lr_1e4.sh │ ├── mau │ │ └── bs_64_lr_1e4.sh │ ├── mcvd │ │ └── bs_64_lr_1e4.sh │ ├── phydnet │ │ └── bs_64_lr_1e4.sh │ ├── predrnnpp │ │ └── bs_64_lr_1e4.sh │ ├── predrnnv1 │ │ └── bs_64_lr_1e4.sh │ ├── predrnnv2 │ │ └── bs_64_lr_1e4.sh │ ├── simvpv1 │ │ └── bs_64_lr_1e4.sh │ ├── simvpv2 │ │ └── bs_64_lr_1e4.sh │ └── tau │ │ └── bs_64_lr_1e4.sh │ ├── enso │ ├── convlstm │ │ └── bs_64_lr_1e4.sh │ ├── e3dlstm │ │ └── bs_64_lr_1e4.sh │ ├── earthformer │ │ └── bs_64_lr_1e4.sh │ ├── mau │ │ └── bs_64_lr_1e4.sh │ ├── mcvd │ │ └── bs_64_lr_1e4.sh │ ├── phydnet │ │ └── bs_64_lr_1e4.sh │ ├── predrnnpp │ │ └── bs_64_lr_1e4.sh │ ├── predrnnv1 │ │ └── bs_64_lr_1e4.sh │ ├── predrnnv2 │ │ └── bs_64_lr_1e4.sh │ ├── simvpv1 │ │ └── bs_64_lr_1e4.sh │ ├── simvpv2 │ │ └── bs_64_lr_1e4.sh │ └── tau │ │ └── bs_64_lr_1e4.sh │ ├── human │ ├── convlstm │ │ └── bs_16_lr_1e4.sh │ ├── e3dlstm │ │ └── bs_16_lr_1e4.sh │ ├── earthformer │ │ └── bs_32_lr_1e3.sh │ ├── mau │ │ └── bs_16_lr_1e4.sh │ ├── mcvd │ │ └── bs_64_lr_1e4.sh │ ├── phydnet │ │ └── bs_16_lr_1e4.sh │ ├── predrnnpp │ │ └── bs_16_lr_1e4.sh │ ├── predrnnv1 │ │ └── bs_16_lr_1e4.sh │ ├── predrnnv2 │ │ └── bs_16_lr_1e4.sh │ ├── simvpv1 │ │ └── bs_16_lr_1e3.sh │ ├── simvpv2 │ │ └── bs_16_lr_1e3.sh │ └── tau │ │ └── bs_16_lr_1e3.sh │ ├── kitti │ ├── convlstm │ │ └── bs_16_lr_1e3.sh │ ├── e3dlstm │ │ └── bs_16_lr_1e3.sh │ ├── earthformer │ │ └── bs_32_lr_1e3.sh │ ├── mau │ │ └── bs_16_lr_1e3.sh │ ├── mcvd │ │ └── bs_64_lr_2e4.sh │ ├── phydnet │ │ └── bs_16_lr_1e3.sh │ ├── predrnnpp │ │ └── bs_16_lr_1e3.sh │ ├── predrnnv1 │ │ └── bs_16_lr_1e3.sh │ ├── predrnnv2 │ │ └── bs_16_lr_1e3.sh │ ├── simvpv1 │ │ └── bs_16_lr_5e3.sh │ ├── simvpv2 │ │ └── bs_16_lr_5e3.sh │ └── tau │ │ └── bs_16_lr_5e3.sh │ ├── kth │ ├── convlstm │ │ └── bs_16_lr_4e5.sh │ ├── e3dlstm │ │ └── bs_8_lr_5e4.sh │ ├── earthformer │ │ └── bs_32_lr_1e3.sh │ ├── mau │ │ └── bs_16_lr_5e4.sh │ ├── mcvd │ │ └── bs_64_lr_2e4.sh │ ├── phydnet │ │ └── bs_16_lr_1e3.sh │ ├── predrnnpp │ │ └── bs_16_lr_4e5.sh │ ├── predrnnv1 │ │ └── bs_16_lr_4e5.sh │ ├── predrnnv2 │ │ └── bs_16_lr_4e5.sh │ ├── simvpv1 │ │ └── bs_16_lr_1e3.sh │ ├── simvpv2 │ │ └── bs_16_lr_1e3.sh │ └── tau │ │ └── bs_16_lr_1e3.sh │ ├── mnist │ ├── convlstm │ │ └── bs_16_lr_5e4.sh │ ├── e3dlstm │ │ └── bs_16_lr_1e4.sh │ ├── earthformer │ │ └── bs_32_lr_1e3.sh │ ├── mau │ │ └── bs_16_lr_1e3.sh │ ├── mcvd │ │ └── bs_64_lr_2e4.sh │ ├── phydnet │ │ └── bs_16_lr_1e3.sh │ ├── predrnnpp │ │ └── bs_16_lr_1e4.sh │ ├── predrnnv1 │ │ └── bs_16_lr_5e4.sh │ ├── predrnnv2 │ │ └── bs_16_lr_5e4.sh │ ├── simvpv1 │ │ └── bs_16_lr_1e3.sh │ ├── simvpv2 │ │ └── bs_16_lr_1e3.sh │ └── tau │ │ └── bs_16_lr_1e3.sh │ ├── nuscenes │ ├── convlstm │ │ └── bs_64_lr_1e4.sh │ ├── e3dlstm │ │ └── bs_64_lr_1e4.sh │ ├── earthformer │ │ └── bs_64_lr_1e3.sh │ ├── mau │ │ └── bs_64_lr_1e4.sh │ ├── mcvd │ │ └── bs_128_lr_4e4.sh │ ├── phydnet │ │ └── bs_64_lr_1e4.sh │ ├── predrnnpp │ │ └── bs_64_lr_1e4.sh │ ├── predrnnv1 │ │ └── bs_64_lr_1e4.sh │ ├── predrnnv2 │ │ └── bs_64_lr_1e4.sh │ ├── simvpv1 │ │ └── bs_64_lr_1e3.sh │ ├── simvpv2 │ │ └── bs_64_lr_1e3.sh │ └── tau │ │ └── bs_64_lr_1e3.sh │ ├── robonet │ ├── convlstm │ │ └── bs_64_lr_1e4.sh │ ├── e3dlstm │ │ └── bs_64_lr_1e4.sh │ ├── earthformer │ │ └── bs_64_lr_1e3.sh │ ├── mau │ │ └── bs_64_lr_1e4.sh │ ├── mcvd │ │ └── bs_128_lr_4e4.sh │ ├── phydnet │ │ └── bs_64_lr_1e4.sh │ ├── predrnnpp │ │ └── bs_64_lr_1e4.sh │ ├── predrnnv1 │ │ └── bs_64_lr_1e4.sh │ ├── predrnnv2 │ │ └── bs_64_lr_1e4.sh │ ├── simvpv1 │ │ └── bs_64_lr_1e3.sh │ ├── simvpv2 │ │ └── bs_64_lr_1e3.sh │ └── tau │ │ └── bs_64_lr_1e3.sh │ ├── sevir │ ├── convlstm │ │ └── bs_32_lr_1e3.sh │ ├── e3dlstm │ │ └── bs_32_lr_1e3.sh │ ├── earthformer │ │ └── bs_32_lr_1e3.sh │ ├── mau │ │ └── bs_32_lr_1e3.sh │ ├── mcvd │ │ └── bs_32_lr_1e4.sh │ ├── phydnet │ │ └── bs_32_lr_1e3.sh │ ├── predrnnpp │ │ └── bs_32_lr_1e3.sh │ ├── predrnnv1 │ │ └── bs_32_lr_1e3.sh │ ├── predrnnv2 │ │ └── bs_64_lr_1e3.sh │ ├── simvpv1 │ │ └── bs_32_lr_1e4.sh │ ├── simvpv2 │ │ └── bs_32_lr_1e4.sh │ └── tau │ │ └── bs_32_lr_1e4.sh │ ├── taxibj │ ├── convlstm │ │ └── bs_16_lr_5e4.sh │ ├── e3dlstm │ │ └── bs_64_lr_2e4.sh │ ├── earthformer │ │ └── bs_32_lr_1e3.sh │ ├── mau │ │ └── bs_16_lr_5e4.sh │ ├── mcvd │ │ └── bs_64_lr_1e4.sh │ ├── phydnet │ │ └── bs_16_lr_5e4.sh │ ├── predrnnpp │ │ └── bs_16_lr_1e4.sh │ ├── predrnnv1 │ │ └── bs_16_lr_1e4.sh │ ├── predrnnv2 │ │ └── bs_16_lr_1e4.sh │ ├── simvpv1 │ │ └── bs_16_lr_1e3.sh │ ├── simvpv2 │ │ └── bs_16_lr_1e3.sh │ └── tau │ │ └── bs_16_lr_13.sh │ ├── traffic4cast2021 │ ├── convlstm │ │ └── bs_64_lr_1e4.sh │ ├── e3dlstm │ │ └── bs_64_lr_1e4.sh │ ├── earthformer │ │ └── bs_64_lr_1e4.sh │ ├── mau │ │ └── bs_64_lr_1e4.sh │ ├── mcvd │ │ └── bs_64_lr_1e4.sh │ ├── phydnet │ │ └── bs_64_lr_1e4.sh │ ├── predrnnpp │ │ └── bs_64_lr_1e4.sh │ ├── predrnnv1 │ │ └── bs_64_lr_1e4.sh │ ├── predrnnv2 │ │ └── bs_64_lr_1e4.sh │ ├── simvpv1 │ │ └── bs_64_lr_1e4.sh │ ├── simvpv2 │ │ └── bs_64_lr_1e4.sh │ └── tau │ │ └── bs_64_lr_1e4.sh │ └── weatherbench │ ├── convlstm │ └── bs_64_lr_1e4.sh │ ├── e3dlstm │ └── bs_64_lr_1e4.sh │ ├── earthformer │ └── bs_64_lr_1e4.sh │ ├── mau │ └── bs_64_lr_1e4.sh │ ├── mcvd │ └── bs_64_lr_1e4.sh │ ├── phydnet │ └── bs_64_lr_1e4.sh │ ├── predrnnpp │ └── bs_64_lr_1e4.sh │ ├── predrnnv1 │ └── bs_64_lr_1e4.sh │ ├── predrnnv2 │ └── bs_64_lr_1e4.sh │ ├── simvpv1 │ └── bs_64_lr_1e4.sh │ ├── simvpv2 │ └── bs_64_lr_1e4.sh │ └── tau │ └── bs_64_lr_1e4.sh └── train.py /assets/codebase.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenEarthLab/PredBench/9066d9162f75bb51a059e31b188c3839233990f1/assets/codebase.png -------------------------------------------------------------------------------- /assets/f_supported.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenEarthLab/PredBench/9066d9162f75bb51a059e31b188c3839233990f1/assets/f_supported.jpg -------------------------------------------------------------------------------- /assets/teaser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OpenEarthLab/PredBench/9066d9162f75bb51a059e31b188c3839233990f1/assets/teaser.png -------------------------------------------------------------------------------- /configs/_base_/default_runtime.py: -------------------------------------------------------------------------------- 1 | default_scope = 'predbench' 2 | 3 | 4 | default_hooks = dict( 5 | runtime_info=dict(type='RuntimeInfoHook'), 6 | timer=dict(type='IterTimerHook'), 7 | logger=dict(type='LoggerHook', interval=100), 8 | param_scheduler=dict(type='ParamSchedulerHook'), 9 | checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=1, save_best='error/mse', rule='less'), 10 | sampler_seed=dict(type='DistSamplerSeedHook'), 11 | sync_buffers=dict(type='SyncBuffersHook') 12 | ) 13 | 14 | custom_hooks = [dict(type='VizVideoHook')] 15 | 16 | load_from = None 17 | resume = False 18 | 19 | randomness=dict( 20 | seed=42, 21 | deterministic=False, 22 | diff_rank_seed=True, # ddp mode 23 | ) 24 | 25 | env_cfg = dict( 26 | cudnn_benchmark=True, 27 | mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), 28 | dist_cfg=dict(backend='nccl'), 29 | ) 30 | 31 | -------------------------------------------------------------------------------- /configs/_base_/models/convlstm.py: -------------------------------------------------------------------------------- 1 | num_hidden = [128,128,128,128] 2 | filter_size = 5 3 | stride = 1 4 | patch_size = 2 5 | layer_norm = 0 6 | 7 | 8 | RNN=dict( 9 | type='ConvLSTM', 10 | input_shape=(10, 3, 64, 64), # (T, C, H, W) 11 | num_layers=len(num_hidden), 12 | num_hidden=num_hidden, 13 | patch_size=patch_size, 14 | filter_size=filter_size, 15 | stride=stride, 16 | layer_norm=layer_norm, 17 | ) -------------------------------------------------------------------------------- /configs/_base_/models/e3dlstm.py: -------------------------------------------------------------------------------- 1 | num_hidden = [64,64,64,64] 2 | filter_size = [2, 5, 5] 3 | stride = 1 4 | patch_size = 4 5 | layer_norm = 0 6 | 7 | RNN=dict( 8 | type='E3DLSTM', 9 | input_shape=(10, 3, 63, 64), # (T, C, H, W) 10 | num_layers=len(num_hidden), 11 | num_hidden=num_hidden, 12 | patch_size=patch_size, 13 | filter_size=filter_size, 14 | stride=stride, 15 | layer_norm=layer_norm, 16 | ) -------------------------------------------------------------------------------- /configs/_base_/models/mau.py: -------------------------------------------------------------------------------- 1 | num_hidden = [64,64,64,64] 2 | filter_size = 5 3 | stride = 1 4 | patch_size = 1 5 | sr_size = 2 6 | tau = 5 7 | cell_mode = 'normal' 8 | model_mode = 'normal' 9 | 10 | 11 | RNN=dict( 12 | type='MAU', 13 | input_shape=(10, 3, 63, 64), # (T, C, H, W) 14 | num_layers=len(num_hidden), 15 | num_hidden=num_hidden, 16 | patch_size=patch_size, 17 | filter_size=filter_size, 18 | stride=stride, 19 | sr_size=sr_size, 20 | tau=tau, 21 | cell_mode=cell_mode, 22 | model_mode=model_mode, 23 | ) -------------------------------------------------------------------------------- /configs/_base_/models/phydnet.py: -------------------------------------------------------------------------------- 1 | num_hidden = [] 2 | patch_size = 1 3 | sr_size = 2 4 | 5 | 6 | RNN=dict( 7 | type='PhyDNet', 8 | input_shape=(10, 3, 63, 64), # (T, C, H, W) 9 | num_layers=len(num_hidden), 10 | num_hidden=num_hidden, 11 | patch_size=patch_size, 12 | sr_size=sr_size, 13 | ) -------------------------------------------------------------------------------- /configs/_base_/models/predrnnpp.py: -------------------------------------------------------------------------------- 1 | num_hidden = [128,128,128,128] 2 | filter_size = 5 3 | stride = 1 4 | patch_size = 2 5 | layer_norm = 0 6 | 7 | RNN=dict( 8 | type='PredRNNpp', 9 | input_shape=(10, 3, 63, 64), # (T, C, H, W) 10 | num_layers=len(num_hidden), 11 | num_hidden=num_hidden, 12 | patch_size=patch_size, 13 | filter_size=filter_size, 14 | stride=stride, 15 | layer_norm=layer_norm, 16 | ) -------------------------------------------------------------------------------- /configs/_base_/models/predrnnv1.py: -------------------------------------------------------------------------------- 1 | num_hidden = [128,128,128,128] 2 | filter_size = 5 3 | stride = 1 4 | patch_size = 2 5 | layer_norm = 0 6 | 7 | RNN=dict( 8 | type='PredRNNv1', 9 | input_shape=(10, 3, 63, 64), # (T, C, H, W) 10 | num_layers=len(num_hidden), 11 | num_hidden=num_hidden, 12 | patch_size=patch_size, 13 | filter_size=filter_size, 14 | stride=stride, 15 | layer_norm=layer_norm, 16 | ) -------------------------------------------------------------------------------- /configs/_base_/models/predrnnv2.py: -------------------------------------------------------------------------------- 1 | num_hidden = [128,128,128,128] 2 | filter_size = 5 3 | stride = 1 4 | patch_size = 2 5 | layer_norm = 0 6 | 7 | 8 | RNN=dict( 9 | type='PredRNNv2', 10 | input_shape=(10, 3, 63, 64), # (T, C, H, W) 11 | num_layers=len(num_hidden), 12 | num_hidden=num_hidden, 13 | patch_size=patch_size, 14 | filter_size=filter_size, 15 | stride=stride, 16 | layer_norm=layer_norm, 17 | decouple_beta=0.01, 18 | ) -------------------------------------------------------------------------------- /configs/_base_/models/simvpv1.py: -------------------------------------------------------------------------------- 1 | 2 | spatio_kernel_enc = 3 3 | spatio_kernel_dec = 3 4 | hid_S = 64 5 | hid_T = 512 6 | N_T = 6 7 | N_S = 4 8 | 9 | CNN = dict( 10 | type = 'MetaVPModel', 11 | input_shape = (10, 3, 64, 64), # (T, C, H, W) 12 | hid_S = hid_S, 13 | hid_T = hid_T, 14 | N_S = N_S, 15 | N_T = N_T, 16 | model_type='incepu', 17 | spatio_kernel_enc=spatio_kernel_enc, 18 | spatio_kernel_dec=spatio_kernel_dec, 19 | act_inplace=True 20 | ) 21 | 22 | -------------------------------------------------------------------------------- /configs/_base_/models/simvpv2.py: -------------------------------------------------------------------------------- 1 | 2 | spatio_kernel_enc = 3 3 | spatio_kernel_dec = 3 4 | hid_S = 64 5 | hid_T = 512 6 | N_T = 6 7 | N_S = 4 8 | 9 | CNN = dict( 10 | type = 'MetaVPModel', 11 | input_shape = (10, 3, 64, 64), # (T, C, H, W) 12 | hid_S = hid_S, 13 | hid_T = hid_T, 14 | N_S = N_S, 15 | N_T = N_T, 16 | model_type='gsta', 17 | spatio_kernel_enc=spatio_kernel_enc, 18 | spatio_kernel_dec=spatio_kernel_dec, 19 | act_inplace=True 20 | ) 21 | 22 | -------------------------------------------------------------------------------- /configs/_base_/models/tau.py: -------------------------------------------------------------------------------- 1 | spatio_kernel_enc = 3 2 | spatio_kernel_dec = 3 3 | hid_S = 64 4 | hid_T = 512 5 | N_T = 6 6 | N_S = 4 7 | 8 | CNN = dict( 9 | type = 'MetaVPModel', 10 | input_shape = (10, 3, 64, 64), # (T, C, H, W) 11 | hid_S = hid_S, 12 | hid_T = hid_T, 13 | N_S = N_S, 14 | N_T = N_T, 15 | model_type='tau', 16 | spatio_kernel_enc=spatio_kernel_enc, 17 | spatio_kernel_dec=spatio_kernel_dec, 18 | act_inplace=True 19 | ) 20 | 21 | -------------------------------------------------------------------------------- /configs/_base_/schedules/schedule_100e.py: -------------------------------------------------------------------------------- 1 | # training schedule 2 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=100, val_begin=1, val_interval=1) 3 | val_cfg = dict(type='ValLoop') 4 | test_cfg = dict(type='TestLoop') 5 | 6 | -------------------------------------------------------------------------------- /configs/_base_/schedules/schedule_1e6i.py: -------------------------------------------------------------------------------- 1 | # training schedule 2 | train_cfg = dict(type='IterBasedTrainLoop', max_iters=int(1e6), val_begin=1, val_interval=int(2e4)) 3 | val_cfg = dict(type='ValLoop') 4 | test_cfg = dict(type='TestLoop') 5 | 6 | -------------------------------------------------------------------------------- /configs/_base_/schedules/schedule_200e.py: -------------------------------------------------------------------------------- 1 | 2 | # training schedule 3 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=200, val_begin=1, val_interval=1) 4 | val_cfg = dict(type='ValLoop') 5 | test_cfg = dict(type='TestLoop') 6 | 7 | -------------------------------------------------------------------------------- /configs/_base_/schedules/schedule_2e6i.py: -------------------------------------------------------------------------------- 1 | # training schedule 2 | train_cfg = dict(type='IterBasedTrainLoop', max_iters=int(2e6), val_begin=1, val_interval=int(2e4)) 3 | val_cfg = dict(type='ValLoop') 4 | test_cfg = dict(type='TestLoop') 5 | 6 | -------------------------------------------------------------------------------- /configs/_base_/schedules/schedule_50e.py: -------------------------------------------------------------------------------- 1 | # training schedule 2 | train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=50, val_begin=1, val_interval=1) 3 | val_cfg = dict(type='ValLoop') 4 | test_cfg = dict(type='TestLoop') 5 | 6 | -------------------------------------------------------------------------------- /configs/_base_/schedules/schedule_5e5i.py: -------------------------------------------------------------------------------- 1 | # training schedule 2 | train_cfg = dict(type='IterBasedTrainLoop', max_iters=int(5e5), val_begin=1, val_interval=int(2e4)) 3 | val_cfg = dict(type='ValLoop') 4 | test_cfg = dict(type='TestLoop') 5 | 6 | -------------------------------------------------------------------------------- /configs/bair/mcvd/bair_scheduler.py: -------------------------------------------------------------------------------- 1 | 2 | # dataset settings 3 | _base_ = [ 4 | '../../_base_/datasets/bair.py', 5 | '../../_base_/default_runtime.py', 6 | '../../_base_/models/mcvd.py', 7 | '../../_base_/schedules/schedule_5e5i.py', 8 | ] 9 | 10 | default_hooks = dict( 11 | checkpoint=dict(type='CheckpointHook', 12 | by_epoch=False, interval=int(1e4), max_keep_ckpts=1, save_best='error/mse', rule='less' 13 | ), 14 | ) 15 | 16 | custom_hooks = [dict(type='VizVideoHook', viz_stages=['val','test']), dict(type='EMAHook', momentum=0.001)] 17 | 18 | img_norm_cfg = dict( 19 | mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_bgr=False 20 | ) 21 | 22 | val_evaluator=[ 23 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 24 | ] 25 | test_evaluator=[ 26 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 27 | dict(type='SimilarityMetrics', is_img=True, metric_list=['ssim', 'psnr', 'snr'], norm_01=False), 28 | dict(type='PerceptionMetrics', metric_list=['lpips', 'fvd'], collect_device='gpu', norm_01=False), 29 | ] 30 | 31 | 32 | data_processor=dict( 33 | type='VideoProcessor', 34 | input_len=_base_.input_len, 35 | output_len=_base_.output_len, 36 | pred_len=_base_.pred_len, 37 | mean=img_norm_cfg['mean'], 38 | std=img_norm_cfg['std'], 39 | ) 40 | -------------------------------------------------------------------------------- /configs/bair/simvpv1/bs_64_lr_1e4.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/bair.py', 4 | '../../_base_/models/simvpv1.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 32 9 | base_batch_size = 64 10 | base_lr = 1e-4 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='bair')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='simvpv1_bair_bs_64_lr_1e4' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/bair/simvpv2/bs_64_lr_1e4.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/bair.py', 4 | '../../_base_/models/simvpv2.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 32 9 | base_batch_size = 64 10 | base_lr = 1e-4 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='bair')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='simvpv2_bair_bs_64_lr_1e4' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/bair/tau/bs_64_lr_1e4.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/bair.py', 4 | '../../_base_/models/tau.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 32 9 | base_batch_size = 64 10 | base_lr = 1e-4 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='bair')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='tau_bair_bs_64_lr_1e4' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/bridgedata/mcvd/bridgedata_scheduler.py: -------------------------------------------------------------------------------- 1 | 2 | # dataset settings 3 | _base_ = [ 4 | '../../_base_/datasets/bridgedata.py', 5 | '../../_base_/default_runtime.py', 6 | '../../_base_/models/mcvd.py', 7 | '../../_base_/schedules/schedule_1e6i.py', 8 | ] 9 | 10 | default_hooks = dict( 11 | checkpoint=dict(type='CheckpointHook', 12 | by_epoch=False, interval=int(1e4), max_keep_ckpts=1, save_best='error/mse', rule='less' 13 | ), 14 | ) 15 | 16 | custom_hooks = [dict(type='VizVideoHook', data_type='bridgedata', viz_stages=['val','test']), dict(type='EMAHook', momentum=0.001)] 17 | 18 | img_norm_cfg = dict( 19 | mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_bgr=False 20 | ) 21 | 22 | val_evaluator=[ 23 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 24 | ] 25 | test_evaluator=[ 26 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 27 | dict(type='SimilarityMetrics', is_img=True, metric_list=['ssim', 'psnr', 'snr'], norm_01=False), 28 | dict(type='PerceptionMetrics', metric_list=['lpips', 'fvd'], collect_device='gpu', norm_01=False), 29 | ] 30 | 31 | 32 | data_processor=dict( 33 | type='VideoProcessor', 34 | input_len=_base_.input_len, 35 | output_len=_base_.output_len, 36 | pred_len=_base_.pred_len, 37 | mean=img_norm_cfg['mean'], 38 | std=img_norm_cfg['std'], 39 | ) 40 | -------------------------------------------------------------------------------- /configs/bridgedata/tau/bs_64_lr_1e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/bridgedata.py', 4 | '../../_base_/models/tau.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 32 9 | base_batch_size = 64 10 | base_lr = 1e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='bridgedata')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='tau_bridgedata_bs_64_lr_1e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/cityscapes/mcvd/cityscapes_scheduler.py: -------------------------------------------------------------------------------- 1 | 2 | # dataset settings 3 | _base_ = [ 4 | '../../_base_/datasets/cityscapes.py', 5 | '../../_base_/default_runtime.py', 6 | '../../_base_/models/mcvd.py', 7 | '../../_base_/schedules/schedule_5e5i.py', 8 | ] 9 | 10 | default_hooks = dict( 11 | checkpoint=dict(type='CheckpointHook', 12 | by_epoch=False, interval=int(1e4), max_keep_ckpts=1, save_best='error/mse', rule='less' 13 | ), 14 | ) 15 | 16 | custom_hooks = [dict(type='VizVideoHook', viz_stages=['val','test']), dict(type='EMAHook', momentum=0.001)] 17 | 18 | img_norm_cfg = dict( 19 | mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_bgr=False 20 | ) 21 | 22 | val_evaluator=[ 23 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 24 | ] 25 | test_evaluator=[ 26 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 27 | dict(type='SimilarityMetrics', is_img=True, metric_list=['ssim', 'psnr', 'snr'], norm_01=False), 28 | dict(type='PerceptionMetrics', metric_list=['lpips', 'fvd'], collect_device='gpu', norm_01=False), 29 | ] 30 | 31 | 32 | data_processor=dict( 33 | type='VideoProcessor', 34 | input_len=_base_.input_len, 35 | output_len=_base_.output_len, 36 | pred_len=_base_.pred_len, 37 | mean=img_norm_cfg['mean'], 38 | std=img_norm_cfg['std'], 39 | ) 40 | -------------------------------------------------------------------------------- /configs/cityscapes/tau/bs_64_lr_1e4.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/cityscapes.py', 4 | '../../_base_/models/tau.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 32 9 | base_batch_size = 64 10 | base_lr = 1e-4 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='cityscapes')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='tau_cityscapes_bs_64_lr_1e4' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/enso/mcvd/enso_scheduler.py: -------------------------------------------------------------------------------- 1 | 2 | # dataset settings 3 | _base_ = [ 4 | '../../_base_/datasets/enso.py', 5 | '../../_base_/default_runtime.py', 6 | '../../_base_/models/mcvd.py', 7 | '../../_base_/schedules/schedule_5e5i.py', 8 | ] 9 | 10 | default_hooks = dict( 11 | checkpoint=dict(type='CheckpointHook', 12 | by_epoch=False, interval=int(1e4), max_keep_ckpts=1, save_best='error/mse', rule='less' 13 | ), 14 | ) 15 | 16 | custom_hooks = [dict(type='VizVideoHook', data_type='enso', viz_stages=['val','test']), dict(type='EMAHook', momentum=0.001)] 17 | 18 | img_norm_cfg = dict( 19 | mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_bgr=False 20 | ) 21 | 22 | val_evaluator=[ 23 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 24 | dict(type='WeatherMetrics', metric_list=['nino3.4']), 25 | ] 26 | test_evaluator=[ 27 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 28 | dict(type='SimilarityMetrics', is_img=True, metric_list=['ssim', 'psnr', 'snr'], norm_01=False), 29 | dict(type='WeatherMetrics', metric_list=['nino3.4']), 30 | ] 31 | 32 | 33 | data_processor=dict( 34 | type='VideoProcessor', 35 | input_len=_base_.input_len, 36 | output_len=_base_.output_len, 37 | pred_len=_base_.pred_len, 38 | mean=img_norm_cfg['mean'], 39 | std=img_norm_cfg['std'], 40 | ) 41 | -------------------------------------------------------------------------------- /configs/enso/simvpv1/bs_64_lr_1e4.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/enso.py', 4 | '../../_base_/models/simvpv1.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 32 9 | base_batch_size = 64 10 | base_lr = 1e-4 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='enso')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='simvpv1_enso_bs_64_lr_1e4' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/enso/simvpv2/bs_64_lr_1e4.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/enso.py', 4 | '../../_base_/models/simvpv2.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 32 9 | base_batch_size = 64 10 | base_lr = 1e-4 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='enso')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='simvpv2_enso_bs_64_lr_1e4' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/enso/tau/bs_64_lr_1e4.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/enso.py', 4 | '../../_base_/models/tau.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 32 9 | base_batch_size = 64 10 | base_lr = 1e-4 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='enso')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='tau_enso_bs_64_lr_1e4' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/human/mcvd/human_scheduler.py: -------------------------------------------------------------------------------- 1 | 2 | # dataset settings 3 | _base_ = [ 4 | '../../_base_/datasets/human.py', 5 | '../../_base_/default_runtime.py', 6 | '../../_base_/models/mcvd.py', 7 | '../../_base_/schedules/schedule_5e5i.py', 8 | ] 9 | 10 | default_hooks = dict( 11 | checkpoint=dict(type='CheckpointHook', 12 | by_epoch=False, interval=int(1e4), max_keep_ckpts=1, save_best='error/mse', rule='less' 13 | ), 14 | ) 15 | 16 | custom_hooks = [dict(type='VizVideoHook', viz_stages=['val','test']), dict(type='EMAHook', momentum=0.001)] 17 | 18 | img_norm_cfg = dict( 19 | mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_bgr=False 20 | ) 21 | 22 | val_evaluator=[ 23 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 24 | ] 25 | test_evaluator=[ 26 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 27 | dict(type='SimilarityMetrics', is_img=True, metric_list=['ssim', 'psnr', 'snr'], norm_01=False), 28 | dict(type='PerceptionMetrics', metric_list=['lpips', 'fvd'], collect_device='gpu', norm_01=False), 29 | ] 30 | 31 | 32 | data_processor=dict( 33 | type='VideoProcessor', 34 | input_len=_base_.input_len, 35 | output_len=_base_.output_len, 36 | pred_len=_base_.pred_len, 37 | mean=img_norm_cfg['mean'], 38 | std=img_norm_cfg['std'], 39 | ) 40 | -------------------------------------------------------------------------------- /configs/human/simvpv1/bs_16_lr_1e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/human.py', 4 | '../../_base_/models/simvpv1.py', 5 | '../../_base_/schedules/schedule_50e.py', 6 | ] 7 | 8 | batch_size = 16 9 | base_batch_size = 16 10 | base_lr = 1e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='human')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='simvpv1_human_bs_16_lr_1e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/human/simvpv2/bs_16_lr_1e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/human.py', 4 | '../../_base_/models/simvpv2.py', 5 | '../../_base_/schedules/schedule_50e.py', 6 | ] 7 | 8 | batch_size = 16 9 | base_batch_size = 16 10 | base_lr = 1e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='human')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='simvpv2_human_bs_16_lr_1e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/human/tau/bs_16_lr_1e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/human.py', 4 | '../../_base_/models/tau.py', 5 | '../../_base_/schedules/schedule_50e.py', 6 | ] 7 | 8 | batch_size = 16 9 | base_batch_size = 16 10 | base_lr = 1e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='human')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='tau_human_bs_16_lr_1e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/kitti/mcvd/kitti_scheduler.py: -------------------------------------------------------------------------------- 1 | 2 | # dataset settings 3 | _base_ = [ 4 | '../../_base_/datasets/kitti.py', 5 | '../../_base_/default_runtime.py', 6 | '../../_base_/models/mcvd.py', 7 | '../../_base_/schedules/schedule_5e5i.py', 8 | ] 9 | 10 | default_hooks = dict( 11 | checkpoint=dict(type='CheckpointHook', 12 | by_epoch=False, interval=int(1e4), max_keep_ckpts=1, save_best='error/mse', rule='less' 13 | ), 14 | ) 15 | 16 | custom_hooks = [dict(type='VizVideoHook', viz_stages=['val','test']), dict(type='EMAHook', momentum=0.001)] 17 | 18 | img_norm_cfg = dict( 19 | mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_bgr=False 20 | ) 21 | 22 | val_evaluator=[ 23 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 24 | ] 25 | test_evaluator=[ 26 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 27 | dict(type='SimilarityMetrics', is_img=True, metric_list=['ssim', 'psnr', 'snr'], norm_01=False), 28 | dict(type='PerceptionMetrics', metric_list=['lpips', 'fvd'], collect_device='gpu', norm_01=False), 29 | ] 30 | 31 | 32 | data_processor=dict( 33 | type='VideoProcessor', 34 | input_len=_base_.input_len, 35 | output_len=_base_.output_len, 36 | pred_len=_base_.pred_len, 37 | mean=img_norm_cfg['mean'], 38 | std=img_norm_cfg['std'], 39 | ) 40 | -------------------------------------------------------------------------------- /configs/kitti/simvpv1/bs_16_lr_5e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/kitti.py', 4 | '../../_base_/models/simvpv1.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 16 9 | base_batch_size = 16 10 | base_lr = 5e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='kitti')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='simvpv1_kitti_bs_16_lr_5e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/kitti/simvpv2/bs_16_lr_5e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/kitti.py', 4 | '../../_base_/models/simvpv2.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 16 9 | base_batch_size = 16 10 | base_lr = 5e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='kitti')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='simvpv2_kitti_bs_16_lr_5e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/kitti/tau/bs_16_lr_5e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/kitti.py', 4 | '../../_base_/models/tau.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 16 9 | base_batch_size = 16 10 | base_lr = 5e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='kitti')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='tau_kitti_bs_16_lr_5e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/kth/mcvd/kth_scheduler.py: -------------------------------------------------------------------------------- 1 | 2 | # dataset settings 3 | _base_ = [ 4 | '../../_base_/datasets/kth.py', 5 | '../../_base_/default_runtime.py', 6 | '../../_base_/models/mcvd.py', 7 | '../../_base_/schedules/schedule_5e5i.py', 8 | ] 9 | 10 | default_hooks = dict( 11 | checkpoint=dict(type='CheckpointHook', 12 | by_epoch=False, interval=int(1e4), max_keep_ckpts=1, save_best='error/mse', rule='less' 13 | ), 14 | ) 15 | 16 | custom_hooks = [dict(type='VizVideoHook', viz_stages=['val','test']), dict(type='EMAHook', momentum=0.001)] 17 | 18 | img_norm_cfg = dict( 19 | mean=[127.5], std=[127.5], to_bgr=False 20 | ) 21 | 22 | val_evaluator=[ 23 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 24 | ] 25 | test_evaluator=[ 26 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 27 | dict(type='SimilarityMetrics', is_img=True, metric_list=['ssim', 'psnr', 'snr'], norm_01=False), 28 | dict(type='PerceptionMetrics', metric_list=['lpips', 'fvd'], collect_device='gpu', norm_01=False), 29 | ] 30 | 31 | 32 | data_processor=dict( 33 | type='VideoProcessor', 34 | input_len=_base_.input_len, 35 | output_len=_base_.output_len, 36 | pred_len=_base_.pred_len, 37 | mean=img_norm_cfg['mean'], 38 | std=img_norm_cfg['std'], 39 | ) 40 | -------------------------------------------------------------------------------- /configs/kth/simvpv1/bs_16_lr_1e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/kth.py', 4 | '../../_base_/models/simvpv1.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 16 9 | base_batch_size = 16 10 | base_lr = 1e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='kth')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='simvpv1_kth_bs_16_lr_1e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/kth/simvpv2/bs_16_lr_1e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/kth.py', 4 | '../../_base_/models/simvpv2.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 16 9 | base_batch_size = 16 10 | base_lr = 1e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='kth')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='simvpv2_kth_bs_16_lr_1e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/kth/tau/bs_16_lr_1e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/kth.py', 4 | '../../_base_/models/tau.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 16 9 | base_batch_size = 16 10 | base_lr = 1e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='kth')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='tau_kth_bs_16_lr_1e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/mnist/mcvd/mnist_scheduler.py: -------------------------------------------------------------------------------- 1 | 2 | # dataset settings 3 | _base_ = [ 4 | '../../_base_/datasets/mnist.py', 5 | '../../_base_/default_runtime.py', 6 | '../../_base_/models/mcvd.py', 7 | '../../_base_/schedules/schedule_5e5i.py', 8 | ] 9 | 10 | default_hooks = dict( 11 | checkpoint=dict(type='CheckpointHook', 12 | by_epoch=False, interval=int(1e4), max_keep_ckpts=1, save_best='error/mse', rule='less' 13 | ), 14 | ) 15 | 16 | custom_hooks = [dict(type='VizVideoHook', viz_stages=['val','test']), dict(type='EMAHook', momentum=0.001)] 17 | 18 | img_norm_cfg = dict( 19 | mean=[127.5], std=[127.5], to_bgr=False 20 | ) 21 | 22 | val_evaluator=[ 23 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 24 | ] 25 | test_evaluator=[ 26 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 27 | dict(type='SimilarityMetrics', is_img=True, metric_list=['ssim', 'psnr', 'snr'], norm_01=False), 28 | dict(type='PerceptionMetrics', metric_list=['lpips', 'fvd'], collect_device='gpu', norm_01=False), 29 | ] 30 | 31 | 32 | data_processor=dict( 33 | type='VideoProcessor', 34 | input_len=_base_.input_len, 35 | output_len=_base_.output_len, 36 | pred_len=_base_.pred_len, 37 | mean=img_norm_cfg['mean'], 38 | std=img_norm_cfg['std'], 39 | ) 40 | -------------------------------------------------------------------------------- /configs/mnist/simvpv1/bs_16_lr_1e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/mnist.py', 4 | '../../_base_/models/simvpv1.py', 5 | '../../_base_/schedules/schedule_200e.py', 6 | ] 7 | 8 | batch_size = 16 9 | base_batch_size = 16 10 | base_lr = 1e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='mnist')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='simvpv1_mnist_bs_16_lr_1e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | -------------------------------------------------------------------------------- /configs/mnist/simvpv2/bs_16_lr_1e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/mnist.py', 4 | '../../_base_/models/simvpv2.py', 5 | '../../_base_/schedules/schedule_200e.py', 6 | ] 7 | 8 | batch_size = 16 9 | base_batch_size = 16 10 | base_lr = 1e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='mnist')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='simvpv2_mnist_bs_16_lr_1e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/mnist/tau/bs_16_lr_1e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/mnist.py', 4 | '../../_base_/models/tau.py', 5 | '../../_base_/schedules/schedule_200e.py', 6 | ] 7 | 8 | batch_size = 16 9 | base_batch_size = 16 10 | base_lr = 1e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='mnist')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='tau_mnist_bs_16_lr_1e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/nuscenes/mcvd/nuscenes_scheduler.py: -------------------------------------------------------------------------------- 1 | 2 | # dataset settings 3 | _base_ = [ 4 | '../../_base_/datasets/nuscenes.py', 5 | '../../_base_/default_runtime.py', 6 | '../../_base_/models/mcvd.py', 7 | '../../_base_/schedules/schedule_1e6i.py', 8 | ] 9 | 10 | default_hooks = dict( 11 | checkpoint=dict(type='CheckpointHook', 12 | by_epoch=False, interval=int(1e4), max_keep_ckpts=1, save_best='error/mse', rule='less' 13 | ), 14 | ) 15 | 16 | custom_hooks = [dict(type='VizVideoHook', viz_stages=['val','test']), dict(type='EMAHook', momentum=0.001)] 17 | 18 | img_norm_cfg = dict( 19 | mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_bgr=False 20 | ) 21 | 22 | val_evaluator=[ 23 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 24 | ] 25 | test_evaluator=[ 26 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 27 | dict(type='SimilarityMetrics', is_img=True, metric_list=['ssim', 'psnr', 'snr'], norm_01=False), 28 | dict(type='PerceptionMetrics', metric_list=['lpips', 'fvd'], collect_device='gpu', norm_01=False), 29 | ] 30 | 31 | 32 | data_processor=dict( 33 | type='VideoProcessor', 34 | input_len=_base_.input_len, 35 | output_len=_base_.output_len, 36 | pred_len=_base_.pred_len, 37 | mean=img_norm_cfg['mean'], 38 | std=img_norm_cfg['std'], 39 | ) 40 | -------------------------------------------------------------------------------- /configs/nuscenes/simvpv1/bs_64_lr_1e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/nuscenes.py', 4 | '../../_base_/models/simvpv1.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 32 9 | base_batch_size = 64 10 | base_lr = 1e-4 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='nuscenes')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='simvpv1_nuscenes_bs_64_lr_1e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/nuscenes/tau/bs_64_lr_1e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/nuscenes.py', 4 | '../../_base_/models/tau.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 32 9 | base_batch_size = 64 10 | base_lr = 1e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='nuscenes')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='tau_nuscenes_bs_64_lr_1e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/robonet/mcvd/robonet_scheduler.py: -------------------------------------------------------------------------------- 1 | 2 | # dataset settings 3 | _base_ = [ 4 | '../../_base_/datasets/robonet.py', 5 | '../../_base_/default_runtime.py', 6 | '../../_base_/models/mcvd.py', 7 | '../../_base_/schedules/schedule_1e6i.py', 8 | ] 9 | 10 | default_hooks = dict( 11 | checkpoint=dict(type='CheckpointHook', 12 | by_epoch=False, interval=int(1e4), max_keep_ckpts=1, save_best='error/mse', rule='less' 13 | ), 14 | ) 15 | 16 | custom_hooks = [dict(type='VizVideoHook', viz_stages=['val','test']), dict(type='EMAHook', momentum=0.001)] 17 | 18 | img_norm_cfg = dict( 19 | mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_bgr=False 20 | ) 21 | 22 | val_evaluator=[ 23 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 24 | ] 25 | test_evaluator=[ 26 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 27 | dict(type='SimilarityMetrics', is_img=True, metric_list=['ssim', 'psnr', 'snr'], norm_01=False), 28 | dict(type='PerceptionMetrics', metric_list=['lpips', 'fvd'], collect_device='gpu', norm_01=False), 29 | ] 30 | 31 | 32 | data_processor=dict( 33 | type='VideoProcessor', 34 | input_len=_base_.input_len, 35 | output_len=_base_.output_len, 36 | pred_len=_base_.pred_len, 37 | mean=img_norm_cfg['mean'], 38 | std=img_norm_cfg['std'], 39 | ) 40 | -------------------------------------------------------------------------------- /configs/robonet/simvpv1/bs_64_lr_1e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/robonet.py', 4 | '../../_base_/models/simvpv1.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 32 9 | base_batch_size = 64 10 | base_lr = 1e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='robonet')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='simvpv1_robonet_bs_64_lr_1e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/robonet/simvpv2/bs_64_lr_1e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/robonet.py', 4 | '../../_base_/models/simvpv2.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 32 9 | base_batch_size = 64 10 | base_lr = 1e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='robonet')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='simvpv2_robonet_bs_64_lr_1e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/robonet/tau/bs_64_lr_1e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/robonet.py', 4 | '../../_base_/models/tau.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 32 9 | base_batch_size = 64 10 | base_lr = 1e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='robonet')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='tau_robonet_bs_64_lr_1e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/sevir/mcvd/sevir_scheduler.py: -------------------------------------------------------------------------------- 1 | 2 | # dataset settings 3 | _base_ = [ 4 | '../../_base_/datasets/sevir.py', 5 | '../../_base_/default_runtime.py', 6 | '../../_base_/models/mcvd.py', 7 | '../../_base_/schedules/schedule_1e6i.py', 8 | ] 9 | 10 | default_hooks = dict( 11 | checkpoint=dict(type='CheckpointHook', 12 | by_epoch=False, interval=int(1e4), max_keep_ckpts=1, save_best='error/mse', rule='less' 13 | ), 14 | ) 15 | 16 | custom_hooks = [dict(type='VizVideoHook', data_type='sevir', viz_stages=['val','test']), dict(type='EMAHook', momentum=0.001)] 17 | 18 | img_norm_cfg = dict( 19 | mean=[127.5], std=[127.5], to_bgr=False 20 | ) 21 | 22 | val_evaluator=[ 23 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 24 | dict(type='WeatherMetrics', metric_list=['bias', 'csi', 'pod', 'sucr'], threshold_list=[16, 74, 133, 160, 181, 219],), 25 | ] 26 | test_evaluator=[ 27 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 28 | dict(type='WeatherMetrics', metric_list=['bias', 'csi', 'pod', 'sucr'], threshold_list=[16, 74, 133, 160, 181, 219],), 29 | dict(type='SimilarityMetrics', is_img=True, metric_list=['ssim', 'psnr', 'snr'], norm_01=False), 30 | ] 31 | 32 | 33 | data_processor=dict( 34 | type='VideoProcessor', 35 | input_len=_base_.input_len, 36 | output_len=_base_.output_len, 37 | pred_len=_base_.pred_len, 38 | mean=img_norm_cfg['mean'], 39 | std=img_norm_cfg['std'], 40 | ) 41 | -------------------------------------------------------------------------------- /configs/taxibj/mcvd/taxibj_scheduler.py: -------------------------------------------------------------------------------- 1 | 2 | # dataset settings 3 | _base_ = [ 4 | '../../_base_/datasets/taxibj.py', 5 | '../../_base_/default_runtime.py', 6 | '../../_base_/models/mcvd.py', 7 | '../../_base_/schedules/schedule_5e5i.py', 8 | ] 9 | 10 | default_hooks = dict( 11 | checkpoint=dict(type='CheckpointHook', 12 | by_epoch=False, interval=int(1e4), max_keep_ckpts=1, save_best='error/mse', rule='less' 13 | ), 14 | ) 15 | 16 | custom_hooks = [dict(type='VizVideoHook', data_type='taxibj', viz_stages=['val','test']), dict(type='EMAHook', momentum=0.001)] 17 | 18 | img_norm_cfg = dict( 19 | mean=[625., 625.,], std=[625., 625.,], to_bgr=False 20 | ) 21 | 22 | val_evaluator=[ 23 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse', 'mape', 'wmape'], norm_01=False), 24 | ] 25 | test_evaluator=[ 26 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse', 'mape', 'wmape'], norm_01=False), 27 | dict(type='SimilarityMetrics', is_img=True, metric_list=['ssim', 'psnr', 'snr'], norm_01=False), 28 | ] 29 | 30 | 31 | data_processor=dict( 32 | type='VideoProcessor', 33 | input_len=_base_.input_len, 34 | output_len=_base_.output_len, 35 | pred_len=_base_.pred_len, 36 | mean=img_norm_cfg['mean'], 37 | std=img_norm_cfg['std'], 38 | ) 39 | -------------------------------------------------------------------------------- /configs/taxibj/simvpv1/bs_16_lr_1e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/taxibj.py', 4 | '../../_base_/models/simvpv1.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 16 9 | base_batch_size = 16 10 | base_lr = 1e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='taxibj')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='simvpv1_taxibj_bs_16_lr_1e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/taxibj/simvpv2/bs_16_lr_1e3.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/taxibj.py', 4 | '../../_base_/models/simvpv2.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 16 9 | base_batch_size = 16 10 | base_lr = 1e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='taxibj')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='simvpv2_taxibj_bs_16_lr_1e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/taxibj/tau/bs_16_lr_13.py: -------------------------------------------------------------------------------- 1 | _base_ = [ 2 | '../../_base_/default_runtime.py', 3 | '../../_base_/datasets/taxibj.py', 4 | '../../_base_/models/tau.py', 5 | '../../_base_/schedules/schedule_100e.py', 6 | ] 7 | 8 | batch_size = 16 9 | base_batch_size = 16 10 | base_lr = 1e-3 11 | auto_scale_lr = dict(enable=True, base_batch_size=base_batch_size) 12 | optim_wrapper = dict( 13 | type='AmpOptimWrapper', 14 | dtype='bfloat16', 15 | optimizer=dict(type='Adam', lr=base_lr), 16 | clip_grad=None 17 | ) 18 | param_scheduler = [ 19 | dict(type='OneCycleLR', 20 | eta_max=base_lr, total_steps=None, # automatically compute max iterations: len(train_dataset)*n_epoch/(batch_size*n_gpu) 21 | final_div_factor=int(base_lr*int(1e6)), by_epoch=False 22 | ) 23 | ] 24 | 25 | 26 | 27 | custom_hooks = [dict(type='VizVideoHook', data_type='taxibj')] 28 | visualizer=dict( 29 | type='Visualizer', 30 | vis_backends=[ 31 | dict(type='WandbVisBackend', 32 | init_kwargs=dict( 33 | project='PredBench', 34 | name='tau_taxibj_bs_16_lr_1e3' 35 | ) 36 | ), 37 | ], 38 | ) 39 | 40 | 41 | 42 | # dataset settings 43 | train_dataloader = dict(batch_size=batch_size,) 44 | val_dataloader=dict(batch_size=batch_size,) 45 | test_dataloader=dict(batch_size=batch_size,) 46 | 47 | 48 | 49 | # model 50 | CNN = _base_.CNN 51 | CNN.input_shape = _base_.input_shape 52 | 53 | model = dict(type='CNNModel', 54 | data_processor=_base_.data_processor, 55 | CNN=CNN, 56 | loss_fn=dict(type='WeightedLoss') 57 | ) 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /configs/traffic4cast2021/mcvd/traffic4cast2021_scheduler.py: -------------------------------------------------------------------------------- 1 | 2 | # dataset settings 3 | _base_ = [ 4 | '../../_base_/datasets/traffic4cast2021.py', 5 | '../../_base_/default_runtime.py', 6 | '../../_base_/models/mcvd.py', 7 | '../../_base_/schedules/schedule_2e6i.py', 8 | ] 9 | 10 | default_hooks = dict( 11 | checkpoint=dict(type='CheckpointHook', 12 | by_epoch=False, interval=int(1e4), max_keep_ckpts=1, save_best='error/mse', rule='less' 13 | ), 14 | ) 15 | 16 | custom_hooks = [dict(type='VizVideoHook', data_type='traffic4cast2021', viz_stages=['val','test']), dict(type='EMAHook', momentum=0.001)] 17 | 18 | img_C = _base_.img_C # img_C = 8 19 | img_norm_cfg = dict( 20 | mean=[127.5 for _ in range(img_C)], std=[127.5 for _ in range(img_C)], to_bgr=False 21 | ) 22 | 23 | val_evaluator=[ 24 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse', 'mape', 'wmape'], norm_01=False), 25 | ] 26 | test_evaluator=[ 27 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse', 'mape', 'wmape'], norm_01=False), 28 | dict(type='SimilarityMetrics', is_img=True, metric_list=['ssim', 'psnr', 'snr'], norm_01=False), 29 | ] 30 | 31 | 32 | data_processor=dict( 33 | type='VideoProcessor', 34 | input_len=_base_.input_len, 35 | output_len=_base_.output_len, 36 | pred_len=_base_.pred_len, 37 | mean=img_norm_cfg['mean'], 38 | std=img_norm_cfg['std'], 39 | ) 40 | -------------------------------------------------------------------------------- /configs/weatherbench/mcvd/weatherbench_scheduler.py: -------------------------------------------------------------------------------- 1 | 2 | # dataset settings 3 | _base_ = [ 4 | '../../_base_/datasets/weatherbench128x256_69.py', 5 | '../../_base_/default_runtime.py', 6 | '../../_base_/models/mcvd.py', 7 | '../../_base_/schedules/schedule_1e6i.py', 8 | ] 9 | 10 | default_hooks = dict( 11 | checkpoint=dict(type='CheckpointHook', 12 | by_epoch=False, interval=int(1e4), max_keep_ckpts=1, save_best='error/mse', rule='less' 13 | ), 14 | ) 15 | 16 | custom_hooks = [dict(type='VizVideoHook', data_type='weatherbench', viz_stages=['val','test']), dict(type='EMAHook', momentum=0.001)] 17 | 18 | val_evaluator=[ 19 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 20 | ] 21 | test_evaluator=[ 22 | dict(type='ErrorMetrics', metric_list=['mae', 'mse', 'rmse'], norm_01=False), 23 | dict(type='WeatherMetrics', metric_list=['wmae', 'wmse', 'wrmse', 'acc'], latitude=_base_.latitude, metric_channels=_base_.metric_channels), 24 | ] 25 | 26 | 27 | data_processor=dict( 28 | type='VideoProcessor', 29 | input_len=_base_.input_len, 30 | output_len=_base_.output_len, 31 | pred_len=_base_.pred_len, 32 | mean=_base_.img_norm_cfg['mean'], 33 | std=_base_.img_norm_cfg['std'], 34 | ) 35 | -------------------------------------------------------------------------------- /data/annotations/KITTI/kitti_test.json: -------------------------------------------------------------------------------- 1 | { 2 | "2011_09_26_drive_0018_sync": 270, 3 | "2011_09_26_drive_0022_sync": 800, 4 | "2011_09_26_drive_0061_sync": 703, 5 | "2011_09_26_drive_0079_sync": 100, 6 | "2011_09_28_drive_0001_sync": 106, 7 | "2011_09_29_drive_0004_sync": 339 8 | } -------------------------------------------------------------------------------- /data/annotations/KITTI/kitti_val.json: -------------------------------------------------------------------------------- 1 | { 2 | "2011_09_26_drive_0001_sync": 108, 3 | "2011_09_26_drive_0036_sync": 803, 4 | "2011_09_26_drive_0057_sync": 361, 5 | "2011_09_26_drive_0086_sync": 706, 6 | "2011_09_26_drive_0113_sync": 87, 7 | "2011_09_30_drive_0016_sync": 279 8 | } -------------------------------------------------------------------------------- /pre_download/i3d/download.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | python pre_download/i3d/convert_tf_pretrained.py 400 4 | 5 | # It will automatically downloads the i3D model pretrained on Kinetics-400 and converts this model PyTorch, resulting in `i3d_pretrained_400.pt`. -------------------------------------------------------------------------------- /pre_download/inception/download.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | wget -c "https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth" -O pt_inception-2015-12-05-6726825d.pth -------------------------------------------------------------------------------- /predbench/__init__.py: -------------------------------------------------------------------------------- 1 | import mmcv 2 | 3 | from .version import __version__, short_version 4 | 5 | def digit_version(version_str): 6 | digit_version = [] 7 | for x in version_str.split('.'): 8 | if x.isdigit(): 9 | digit_version.append(int(x)) 10 | elif x.find('rc') != -1: 11 | patch_version = x.split('rc') 12 | digit_version.append(int(patch_version[0]) - 1) 13 | digit_version.append(int(patch_version[1])) 14 | return digit_version 15 | 16 | 17 | mmcv_minimum_version = '2.0.0' 18 | mmcv_maximum_version = '2.0.0' 19 | mmcv_version = digit_version(mmcv.__version__) 20 | 21 | 22 | assert (mmcv_version >= digit_version(mmcv_minimum_version) 23 | and mmcv_version <= digit_version(mmcv_maximum_version)), \ 24 | f'MMCV=={mmcv.__version__} is used but incompatible. ' \ 25 | f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.' 26 | 27 | __all__ = ['__version__', 'short_version'] -------------------------------------------------------------------------------- /predbench/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .transforms import * 2 | 3 | 4 | from .movingmnist import MovingMNISTDataset 5 | from .kitti import KittiDataset 6 | from .caltech import CaltechPedestrianDataset 7 | from .bair import BAIRDataset 8 | from .bridgedata import BridgeDataDataset 9 | from .robonet import RoboNetDataset 10 | from .cityscapes import CityScapesDataset 11 | from .enso import ENSODataset 12 | from .sevir import SEVIRDataset 13 | from .human import HumanDataset 14 | from .nuscenes import NuScenesDataset 15 | from .taxibj import TaxiBJDataset 16 | from .traffic4cast import Traffic4CastDataset 17 | from .kth import KTHDataset 18 | from .weatherbench_np import WeatherBenchDatasetNp 19 | -------------------------------------------------------------------------------- /predbench/datasets/bair.py: -------------------------------------------------------------------------------- 1 | from typing import List, Sequence 2 | import numpy as np 3 | from predbench.registry import DATASETS 4 | from mmengine.dataset import BaseDataset 5 | import numpy as np 6 | 7 | 8 | @DATASETS.register_module() 9 | class BAIRDataset(BaseDataset): 10 | def __init__(self, 11 | clip_len=20, 12 | frame_interval=1, 13 | 14 | ann_file: str = '', 15 | metainfo: dict = None, 16 | data_root: str = '', 17 | data_prefix: dict = dict(img_path=''), 18 | filter_cfg: dict = None, 19 | indices: Sequence[int] = None, 20 | serialize_data: bool = True, 21 | pipeline: List[dict] = ..., 22 | test_mode: bool = False, 23 | lazy_init: bool = False, 24 | max_refetch: int = 1000 25 | ): 26 | 27 | self.clip_len = clip_len 28 | self.frame_interval = frame_interval 29 | super().__init__(ann_file, metainfo, data_root, data_prefix, filter_cfg, indices, serialize_data, pipeline, test_mode, lazy_init, max_refetch) 30 | 31 | 32 | def load_data_list(self): 33 | """Load annotation file to get video information.""" 34 | data_list = [] 35 | data = np.load(self.data_root) 36 | for idx in range(data.shape[0]): 37 | data_info = dict( 38 | array = data[idx], 39 | total_frames = data[idx].shape[0], 40 | ) 41 | data_list.append(data_info) 42 | print(len(data_list)) 43 | return data_list 44 | -------------------------------------------------------------------------------- /predbench/datasets/samplers/__init__.py: -------------------------------------------------------------------------------- 1 | from .infinite_sampler import InfiniteSampler 2 | -------------------------------------------------------------------------------- /predbench/datasets/samplers/infinite_sampler.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Sized 2 | import math 3 | from mmengine.dataset.sampler import InfiniteSampler as MMEngine_InfiniteSampler 4 | from predbench.registry import DATA_SAMPLERS 5 | 6 | 7 | @DATA_SAMPLERS.register_module() 8 | class InfiniteSampler(MMEngine_InfiniteSampler): 9 | def __init__(self, dataset: Sized, shuffle: bool = True, seed: int = None) -> None: 10 | super().__init__(dataset, shuffle, seed) 11 | 12 | def __len__(self) -> int: 13 | return math.ceil(self.size / self.world_size) 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /predbench/datasets/transforms/__init__.py: -------------------------------------------------------------------------------- 1 | from .augmentations import * 2 | from .compose import * 3 | from .formatting import * 4 | from .loading import * 5 | from .utils import * 6 | -------------------------------------------------------------------------------- /predbench/engine/__init__.py: -------------------------------------------------------------------------------- 1 | from .optimizers import * -------------------------------------------------------------------------------- /predbench/engine/hooks/__init__.py: -------------------------------------------------------------------------------- 1 | from .viz_heatmap_data import * 2 | 3 | from .iter_record_hook import * 4 | from .epoch_record_hook import * 5 | from .viz_video_hook import * 6 | from .save_result_hook import * 7 | 8 | -------------------------------------------------------------------------------- /predbench/engine/hooks/epoch_record_hook.py: -------------------------------------------------------------------------------- 1 | from predbench.registry import HOOKS 2 | 3 | from mmengine.hooks import Hook 4 | from mmengine.hooks.hook import DATA_BATCH 5 | 6 | @HOOKS.register_module() 7 | class EpochRecordHook(Hook): 8 | def before_train_iter(self, runner, batch_idx: int, data_batch: DATA_BATCH = None): 9 | """ 10 | set model._epoch to runner._train_loop._epoch 11 | """ 12 | runner.model._epoch = runner._train_loop._epoch 13 | -------------------------------------------------------------------------------- /predbench/engine/hooks/iter_record_hook.py: -------------------------------------------------------------------------------- 1 | from predbench.registry import HOOKS 2 | 3 | from mmengine.hooks import Hook 4 | from mmengine.hooks.hook import DATA_BATCH 5 | 6 | @HOOKS.register_module() 7 | class IterRecordHook(Hook): 8 | def before_train_iter(self, runner, batch_idx: int, data_batch: DATA_BATCH = None): 9 | """ 10 | set model.iter_num to runner._train_loop._iter 11 | """ 12 | runner.model._iter = runner._train_loop._iter 13 | -------------------------------------------------------------------------------- /predbench/engine/hooks/viz_heatmap_data/__init__.py: -------------------------------------------------------------------------------- 1 | from .viz_sevir import * 2 | from .viz_enso import * 3 | from .viz_weatherbench import * 4 | from .viz_taxibj import * 5 | from .viz_traffic4cast2021 import * -------------------------------------------------------------------------------- /predbench/engine/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | from .multi_optimizer_constructor import * -------------------------------------------------------------------------------- /predbench/engine/runner/__init__.py: -------------------------------------------------------------------------------- 1 | from .infinite_epoch_loop import InfiniteEpochBasedTrainLoop 2 | -------------------------------------------------------------------------------- /predbench/engine/runner/infinite_epoch_loop.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Optional, Sequence, Tuple, Union 2 | from torch.utils.data import DataLoader 3 | import math 4 | from mmengine.runner.loops import IterBasedTrainLoop as MMengine_IterBasedTrainLoop 5 | from mmengine.dist.utils import get_world_size 6 | from mmengine.runner.utils import calc_dynamic_intervals 7 | 8 | from predbench.registry import LOOPS 9 | 10 | @LOOPS.register_module() 11 | class InfiniteEpochBasedTrainLoop(MMengine_IterBasedTrainLoop): 12 | def __init__(self, 13 | runner, 14 | dataloader, 15 | max_epochs, 16 | val_begin: int = 1, 17 | val_interval: int = 1000, 18 | dynamic_intervals: List[Tuple[int, int]] = None 19 | ) -> None: 20 | super().__init__( 21 | runner, dataloader, max_iters=0, val_begin=val_begin, 22 | val_interval=0, dynamic_intervals=dynamic_intervals 23 | ) 24 | world_size = get_world_size() 25 | self.n_iter_per_epoch = math.ceil(len(self.dataloader.dataset) / (world_size * self.dataloader.batch_size)) 26 | self._max_epochs = max_epochs 27 | self.val_interval = val_interval * self.n_iter_per_epoch 28 | self._max_iters = max_epochs * self.n_iter_per_epoch 29 | 30 | self.dynamic_milestones, self.dynamic_intervals = \ 31 | calc_dynamic_intervals( 32 | self.val_interval, dynamic_intervals) 33 | 34 | def run_iter(self, data_batch: Sequence[dict]) -> None: 35 | super().run_iter(data_batch) 36 | if self._iter % self.n_iter_per_epoch == 0: 37 | self._epoch += 1 38 | -------------------------------------------------------------------------------- /predbench/engine/schedulers/__init__.py: -------------------------------------------------------------------------------- 1 | from .onecycle_scheduler import * -------------------------------------------------------------------------------- /predbench/engine/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | from .vis_backend import WandbVisBackend -------------------------------------------------------------------------------- /predbench/engine/visualization/vis_backend.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Union 2 | from mmengine.visualization.vis_backend import WandbVisBackend as MMEngine_WandbVisBackend 3 | 4 | 5 | from predbench.registry import VISBACKENDS 6 | 7 | 8 | @VISBACKENDS.register_module() 9 | class WandbVisBackend(MMEngine_WandbVisBackend): 10 | def __init__(self, 11 | save_dir: str, 12 | init_kwargs: Optional[dict] = None, 13 | define_metric_cfg: Union[dict, list, None] = None, 14 | commit: Optional[bool] = True, 15 | log_code_name: Optional[str] = None, 16 | watch_kwargs: Optional[dict] = None 17 | ): 18 | super().__init__(save_dir, init_kwargs, define_metric_cfg, commit, log_code_name, watch_kwargs) 19 | 20 | -------------------------------------------------------------------------------- /predbench/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from .error_metrics import * 2 | from .similarity_metrics import * 3 | from .perception_metrics import * 4 | from .weather_metrics import * 5 | 6 | -------------------------------------------------------------------------------- /predbench/evaluation/perception_metrics/__init__.py: -------------------------------------------------------------------------------- 1 | from .perception_metrics import * 2 | from .fvd import * 3 | from .fid import * 4 | from .lpips import * 5 | from .isc import * -------------------------------------------------------------------------------- /predbench/evaluation/perception_metrics/fid/__init__.py: -------------------------------------------------------------------------------- 1 | from .fid import * 2 | from .inception_v3 import * -------------------------------------------------------------------------------- /predbench/evaluation/perception_metrics/fvd/__init__.py: -------------------------------------------------------------------------------- 1 | from .fvd import * 2 | from .i3d_pytorch import * -------------------------------------------------------------------------------- /predbench/evaluation/perception_metrics/lpips.py: -------------------------------------------------------------------------------- 1 | from lpips import LPIPS 2 | import torch 3 | import einops 4 | 5 | 6 | @torch.no_grad() 7 | class cal_LPIPS: 8 | ''' 9 | gt and pred has the shape of 10 | [n t c h w] (video) 11 | [n c h w] (image) 12 | ''' 13 | def __init__(self, net='alex', use_gpu=False): 14 | assert net in ['alex', 'squeeze', 'vgg'] 15 | self.lpips_model = LPIPS(net=net).eval() 16 | self.use_gpu = use_gpu 17 | if torch.cuda.is_available() and self.use_gpu: 18 | self.lpips_model = self.lpips_model.cuda() 19 | 20 | def __call__(self, pred, gt): 21 | pred = torch.maximum(pred, torch.min(gt)) 22 | pred = torch.minimum(pred, torch.max(gt)) 23 | if torch.cuda.is_available() and self.use_gpu: 24 | pred, gt = pred.cuda(), gt.cuda() 25 | else: 26 | pred, gt = pred.cpu(), gt.cpu() 27 | if len(pred.shape) == 5: 28 | pred = einops.rearrange(pred, 'n t c h w -> (n t) c h w') 29 | gt = einops.rearrange(gt, 'n t c h w -> (n t) c h w') 30 | return self.lpips_model(pred, gt).mean().detach().cpu() 31 | -------------------------------------------------------------------------------- /predbench/evaluation/weather_metrics/__init__.py: -------------------------------------------------------------------------------- 1 | from .enso import * 2 | from .weather_metrics import * -------------------------------------------------------------------------------- /predbench/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .data_processors import * 2 | from .forecasters import * 3 | from .losses import * 4 | from .modules import * 5 | from .operators import * 6 | 7 | -------------------------------------------------------------------------------- /predbench/models/data_processors/__init__.py: -------------------------------------------------------------------------------- 1 | from .video_processor import * 2 | from .image_processor import * 3 | 4 | 5 | 6 | 7 | -------------------------------------------------------------------------------- /predbench/models/forecasters/__init__.py: -------------------------------------------------------------------------------- 1 | from .cnn_model import * 2 | from .rnn_model import * 3 | from .diffusion_model import * 4 | from .transformer_model import * 5 | 6 | 7 | -------------------------------------------------------------------------------- /predbench/models/losses/__init__.py: -------------------------------------------------------------------------------- 1 | from .weighted_loss import * 2 | from .dsm_loss import * 3 | from .ce_loss import * 4 | 5 | -------------------------------------------------------------------------------- /predbench/models/losses/ce_loss.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from typing import List 3 | import torch.nn.functional as F 4 | import torch 5 | 6 | from predbench.registry import MODELS 7 | 8 | 9 | @MODELS.register_module() 10 | class CrossEntropyLoss(nn.Module): 11 | def __init__(self, ignore_index, label_smoothing, loss_weight=1.0): 12 | super().__init__() 13 | self.loss_fn = nn.CrossEntropyLoss(ignore_index=ignore_index, label_smoothing=label_smoothing) 14 | self.loss_weight = loss_weight 15 | 16 | def forward(self, logits, labels): 17 | losses = dict(ce_loss = self.loss_fn(logits, labels)) 18 | return losses 19 | 20 | -------------------------------------------------------------------------------- /predbench/models/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .cnn_modules import * 2 | from .rnn_modules import * 3 | from .diffusion_modules import * 4 | from .transformer_modules import * 5 | 6 | # __all__ = [ 7 | # 'ConvLSTMCell', 'CausalLSTMCell', 'GHU', 'SpatioTemporalLSTMCellv1', 'SpatioTemporalLSTMCellv2', 8 | # 'MIMBlock', 'MIMN', 'Eidetic3DLSTMCell', 'tf_Conv3d', 'zig_rev_predictor', 'autoencoder', 9 | # 'PhyCell', 'PhyD_ConvLSTM', 'PhyD_EncoderRNN', 'PredNetConvLSTMCell', 'K2M', 'MAUCell', 10 | # 'BasicConv2d', 'ConvSC', 'GroupConv2d', 11 | # 'ConvNeXtSubBlock', 'ConvMixerSubBlock', 'GASubBlock', 'gInception_ST', 12 | # 'HorNetSubBlock', 'MLPMixerSubBlock', 'MogaSubBlock', 'PoolFormerSubBlock', 13 | # 'SwinSubBlock', 'UniformerSubBlock', 'VANSubBlock', 'ViTSubBlock', 'TAUSubBlock', 14 | # 'Routing', 'MVFB', 'RoundSTE', 'warp' 15 | # ] -------------------------------------------------------------------------------- /predbench/models/modules/cnn_modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .metavp_modules import * 2 | from .metavp_model import * -------------------------------------------------------------------------------- /predbench/models/modules/cnn_modules/metavp_modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .metavp_layers import * 2 | from .modules import * 3 | from .metavp_midnet import * 4 | from .metavp_enc_dec import * 5 | -------------------------------------------------------------------------------- /predbench/models/modules/cnn_modules/metavp_modules/metavp_layers/__init__.py: -------------------------------------------------------------------------------- 1 | from .hornet import HorBlock 2 | from .moganet import ChannelAggregationFFN, MultiOrderGatedAggregation, MultiOrderDWConv 3 | from .poolformer import PoolFormerBlock 4 | from .uniformer import CBlock, SABlock 5 | from .van import DWConv, MixMlp, VANBlock 6 | 7 | 8 | __all__ = [ 9 | 'HorBlock', 'ChannelAggregationFFN', 'MultiOrderGatedAggregation', 'MultiOrderDWConv', 10 | 'PoolFormerBlock', 'CBlock', 'SABlock', 'DWConv', 'MixMlp', 'VANBlock', 11 | ] -------------------------------------------------------------------------------- /predbench/models/modules/diffusion_modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .samplers import * 2 | from .mcvd_modules import UNetMore_DDPM -------------------------------------------------------------------------------- /predbench/models/modules/diffusion_modules/mcvd_modules/__init__.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright 2020 The Google Research Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | from .ncsnpp_more import UNetMore_DDPM -------------------------------------------------------------------------------- /predbench/models/modules/rnn_modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .lstm_cells import * 2 | from .rnn_base import * 3 | from .convlstm import * 4 | from .e3dlstm import * 5 | from .predrnnv1 import * 6 | from .predrnnpp import * 7 | from .predrnnv2 import * 8 | from .mau import * 9 | from .phydnet import * -------------------------------------------------------------------------------- /predbench/models/modules/rnn_modules/lstm_cells/__init__.py: -------------------------------------------------------------------------------- 1 | from .convlstm_cells import ConvLSTMCell 2 | from .predrnnv1_cells import SpatioTemporalLSTMCellv1 3 | from .predrnnpp_cells import CausalLSTMCell, GHU 4 | from .predrnnv2_cells import SpatioTemporalLSTMCellv2 5 | from .e3dlstm_cells import Eidetic3DLSTMCell, tf_Conv3d 6 | from .mau_cells import MAUCell 7 | from .phydnet_cells import PhyCell, PhyD_ConvLSTM, PhyD_EncoderRNN, K2M 8 | -------------------------------------------------------------------------------- /predbench/models/modules/rnn_modules/rnn_base.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch 3 | from abc import abstractmethod, ABCMeta 4 | 5 | class RNNBase(nn.Module, metaclass=ABCMeta): 6 | def __init__(self, 7 | input_shape, 8 | num_layers, 9 | num_hidden, 10 | patch_size, 11 | ): 12 | super().__init__() 13 | 14 | self.input_len, self.C_img, self.H_img, self.W_img = input_shape 15 | self.num_layers = num_layers 16 | self.num_hidden = num_hidden 17 | self.patch_size = patch_size 18 | 19 | self.H_patch = self.H_img // patch_size 20 | self.W_patch = self.W_img // patch_size 21 | self.C_patch = patch_size * patch_size * self.C_img 22 | 23 | 24 | @abstractmethod 25 | def forward(self): 26 | pass -------------------------------------------------------------------------------- /predbench/models/modules/transformer_modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .cuboid_transformer_modules import * 2 | from .cuboid_transformer_patterns import * 3 | from .earthformer import * 4 | from .utils import * -------------------------------------------------------------------------------- /predbench/models/operators/__init__.py: -------------------------------------------------------------------------------- 1 | from .fused_act import FusedLeakyReLU, fused_leaky_relu 2 | from .upfirdn2d import upfirdn2d 3 | -------------------------------------------------------------------------------- /predbench/models/operators/fused_bias_act.cpp: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | #include 4 | 5 | torch::Tensor fused_bias_act_op(const torch::Tensor &input, 6 | const torch::Tensor &bias, 7 | const torch::Tensor &refer, int act, int grad, 8 | float alpha, float scale); 9 | 10 | #define CHECK_CUDA(x) \ 11 | TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") 12 | #define CHECK_CONTIGUOUS(x) \ 13 | TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") 14 | #define CHECK_INPUT(x) \ 15 | CHECK_CUDA(x); \ 16 | CHECK_CONTIGUOUS(x) 17 | 18 | torch::Tensor fused_bias_act(const torch::Tensor &input, 19 | const torch::Tensor &bias, 20 | const torch::Tensor &refer, int act, int grad, 21 | float alpha, float scale) { 22 | CHECK_INPUT(input); 23 | CHECK_INPUT(bias); 24 | 25 | at::DeviceGuard guard(input.device()); 26 | 27 | return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale); 28 | } 29 | 30 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 31 | m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)"); 32 | } -------------------------------------------------------------------------------- /predbench/models/operators/upfirdn2d.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | torch::Tensor upfirdn2d_op(const torch::Tensor &input, 5 | const torch::Tensor &kernel, int up_x, int up_y, 6 | int down_x, int down_y, int pad_x0, int pad_x1, 7 | int pad_y0, int pad_y1); 8 | 9 | #define CHECK_CUDA(x) \ 10 | TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") 11 | #define CHECK_CONTIGUOUS(x) \ 12 | TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") 13 | #define CHECK_INPUT(x) \ 14 | CHECK_CUDA(x); \ 15 | CHECK_CONTIGUOUS(x) 16 | 17 | torch::Tensor upfirdn2d(const torch::Tensor &input, const torch::Tensor &kernel, 18 | int up_x, int up_y, int down_x, int down_y, int pad_x0, 19 | int pad_x1, int pad_y0, int pad_y1) { 20 | CHECK_INPUT(input); 21 | CHECK_INPUT(kernel); 22 | 23 | at::DeviceGuard guard(input.device()); 24 | 25 | return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, 26 | pad_y0, pad_y1); 27 | } 28 | 29 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 30 | m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)"); 31 | } -------------------------------------------------------------------------------- /predbench/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.0.1' 2 | short_version = __version__ 3 | 4 | 5 | def parse_version_info(version_str): 6 | version_info = [] 7 | for x in version_str.split('.'): 8 | if x.isdigit(): 9 | version_info.append(int(x)) 10 | elif x.find('rc') != -1: 11 | patch_version = x.split('rc') 12 | version_info.append(int(patch_version[0])) 13 | version_info.append(f'rc{patch_version[1]}') 14 | return tuple(version_info) 15 | 16 | 17 | version_info = parse_version_info(__version__) -------------------------------------------------------------------------------- /tools/remove_ckpt_module.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from collections import OrderedDict 3 | import argparse 4 | 5 | parser = argparse.ArgumentParser(description='Change model state dict name') 6 | parser.add_argument( 7 | 'ckpt_path', help='the path of the original model', 8 | ) 9 | parser.add_argument( 10 | 'new_ckpt_path', help='save the new model in this path' 11 | ) 12 | 13 | def remove_module_in_state_dict(ckpt_path, new_ckpt_path): 14 | 15 | ckpt = torch.load(ckpt_path, map_location='cpu') 16 | 17 | print(list(ckpt.keys())) 18 | # ['meta', 'state_dict', 'message_hub', 'optimizer', 'param_schedulers'] 19 | # print(sd['meta']) 20 | # print(sd['message_hub']) 21 | # print(sd['param_schedulers']) 22 | 23 | sd = ckpt['state_dict'] 24 | new_sd = OrderedDict() 25 | for k, v in sd.items(): 26 | if 'module' in k: 27 | new_k = k.replace('module.', '') 28 | else: 29 | new_k = k 30 | new_sd[new_k] = v 31 | 32 | ckpt['state_dict'] = new_sd 33 | 34 | torch.save(ckpt, new_ckpt_path) 35 | 36 | key_zip = zip(new_sd.keys(), sd.keys()) 37 | for keys in key_zip: 38 | print(keys) 39 | 40 | 41 | if __name__ == '__main__': 42 | args = parser.parse_args() 43 | ckpt_path = args.ckpt_path 44 | new_ckpt_path = args.new_ckpt_path 45 | remove_module_in_state_dict(ckpt_path, new_ckpt_path) -------------------------------------------------------------------------------- /tools/scripts/bair/convlstm/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bair/convlstm/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bair/convlstm/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bair/convlstm/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bair/e3dlstm/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bair/e3dlstm/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bair/e3dlstm/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bair/e3dlstm/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bair/earthformer/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bair/earthformer/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bair/earthformer/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bair/earthformer/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bair/mau/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bair/mau/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bair/mau/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bair/mau/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bair/mcvd/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bair/mcvd/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bair/mcvd/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bair/mcvd/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bair/phydnet/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bair/phydnet/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bair/phydnet/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bair/phydnet/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bair/predrnnpp/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bair/predrnnpp/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bair/predrnnpp/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bair/predrnnpp/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bair/predrnnv1/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bair/predrnnv1/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bair/predrnnv1/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bair/predrnnv1/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bair/predrnnv2/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bair/predrnnv2/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bair/predrnnv2/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bair/predrnnv2/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bair/simvpv1/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bair/simvpv1/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bair/simvpv1/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bair/simvpv1/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bair/simvpv2/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bair/simvpv2/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bair/simvpv2/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bair/simvpv2/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bair/tau/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bair/tau/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bair/tau/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bair/tau/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bridgedata/convlstm/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bridgedata/convlstm/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bridgedata/convlstm/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bridgedata/convlstm/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bridgedata/e3dlstm/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bridgedata/e3dlstm/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bridgedata/e3dlstm/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bridgedata/e3dlstm/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bridgedata/earthformer/bs_64_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bridgedata/earthformer/bs_64_lr_1e3.py \ 10 | --work-dir work_dirs/bridgedata/earthformer/bs_64_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bridgedata/earthformer/bs_64_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bridgedata/mau/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bridgedata/mau/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bridgedata/mau/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bridgedata/mau/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bridgedata/mcvd/bs_128_lr_4e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bridgedata/mcvd/bs_128_lr_4e4.py \ 10 | --work-dir work_dirs/bridgedata/mcvd/bs_128_lr_4e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bridgedata/mcvd/bs_128_lr_4e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bridgedata/phydnet/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bridgedata/phydnet/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bridgedata/phydnet/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bridgedata/phydnet/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bridgedata/predrnnpp/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bridgedata/predrnnpp/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bridgedata/predrnnpp/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bridgedata/predrnnpp/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bridgedata/predrnnv1/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bridgedata/predrnnv1/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bridgedata/predrnnv1/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bridgedata/predrnnv1/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bridgedata/predrnnv2/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bridgedata/predrnnv2/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/bridgedata/predrnnv2/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bridgedata/predrnnv2/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bridgedata/simvpv1/bs_64_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bridgedata/simvpv1/bs_64_lr_1e3.py \ 10 | --work-dir work_dirs/bridgedata/simvpv1/bs_64_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bridgedata/simvpv1/bs_64_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bridgedata/simvpv2/bs_64_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bridgedata/simvpv2/bs_64_lr_1e3.py \ 10 | --work-dir work_dirs/bridgedata/simvpv2/bs_64_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bridgedata/simvpv2/bs_64_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/bridgedata/tau/bs_64_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/bridgedata/tau/bs_64_lr_1e3.py \ 10 | --work-dir work_dirs/bridgedata/tau/bs_64_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/bridgedata/tau/bs_64_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/cityscapes/convlstm/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/cityscapes/convlstm/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/cityscapes/convlstm/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/cityscapes/convlstm/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/cityscapes/e3dlstm/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/cityscapes/e3dlstm/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/cityscapes/e3dlstm/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/cityscapes/e3dlstm/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/cityscapes/earthformer/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/cityscapes/earthformer/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/cityscapes/earthformer/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/cityscapes/earthformer/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/cityscapes/mau/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/cityscapes/mau/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/cityscapes/mau/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/cityscapes/mau/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/cityscapes/mcvd/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/cityscapes/mcvd/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/cityscapes/mcvd/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/cityscapes/mcvd/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/cityscapes/phydnet/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/cityscapes/phydnet/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/cityscapes/phydnet/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/cityscapes/phydnet/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/cityscapes/predrnnpp/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/cityscapes/predrnnpp/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/cityscapes/predrnnpp/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/cityscapes/predrnnpp/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/cityscapes/predrnnv1/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/cityscapes/predrnnv1/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/cityscapes/predrnnv1/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/cityscapes/predrnnv1/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/cityscapes/predrnnv2/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/cityscapes/predrnnv2/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/cityscapes/predrnnv2/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/cityscapes/predrnnv2/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/cityscapes/simvpv1/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/cityscapes/simvpv1/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/cityscapes/simvpv1/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/cityscapes/simvpv1/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/cityscapes/simvpv2/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/cityscapes/simvpv2/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/cityscapes/simvpv2/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/cityscapes/simvpv2/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/cityscapes/tau/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/cityscapes/tau/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/cityscapes/tau/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/cityscapes/tau/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/enso/convlstm/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/enso/convlstm/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/enso/convlstm/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/enso/convlstm/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/enso/e3dlstm/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/enso/e3dlstm/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/enso/e3dlstm/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/enso/e3dlstm/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/enso/earthformer/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/enso/earthformer/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/enso/earthformer/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/enso/earthformer/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/enso/mau/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/enso/mau/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/enso/mau/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/enso/mau/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/enso/mcvd/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/enso/mcvd/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/enso/mcvd/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/enso/mcvd/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/enso/phydnet/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/enso/phydnet/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/enso/phydnet/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/enso/phydnet/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/enso/predrnnpp/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/enso/predrnnpp/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/enso/predrnnpp/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/enso/predrnnpp/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/enso/predrnnv1/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/enso/predrnnv1/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/enso/predrnnv1/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/enso/predrnnv1/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/enso/predrnnv2/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/enso/predrnnv2/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/enso/predrnnv2/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/enso/predrnnv2/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/enso/simvpv1/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/enso/simvpv1/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/enso/simvpv1/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/enso/simvpv1/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/enso/simvpv2/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/enso/simvpv2/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/enso/simvpv2/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/enso/simvpv2/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/enso/tau/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/enso/tau/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/enso/tau/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/enso/tau/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/human/convlstm/bs_16_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/human/convlstm/bs_16_lr_1e4.py \ 10 | --work-dir work_dirs/human/convlstm/bs_16_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/human/convlstm/bs_16_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/human/e3dlstm/bs_16_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/human/e3dlstm/bs_16_lr_1e4.py \ 10 | --work-dir work_dirs/human/e3dlstm/bs_16_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/human/e3dlstm/bs_16_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/human/earthformer/bs_32_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/human/earthformer/bs_32_lr_1e3.py \ 10 | --work-dir work_dirs/human/earthformer/bs_32_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/human/earthformer/bs_32_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/human/mau/bs_16_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/human/mau/bs_16_lr_1e4.py \ 10 | --work-dir work_dirs/human/mau/bs_16_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/human/mau/bs_16_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/human/mcvd/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/human/mcvd/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/human/mcvd/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/human/mcvd/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/human/phydnet/bs_16_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/human/phydnet/bs_16_lr_1e4.py \ 10 | --work-dir work_dirs/human/phydnet/bs_16_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/human/phydnet/bs_16_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/human/predrnnpp/bs_16_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/human/predrnnpp/bs_16_lr_1e4.py \ 10 | --work-dir work_dirs/human/predrnnpp/bs_16_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/human/predrnnpp/bs_16_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/human/predrnnv1/bs_16_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/human/predrnnv1/bs_16_lr_1e4.py \ 10 | --work-dir work_dirs/human/predrnnv1/bs_16_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/human/predrnnv1/bs_16_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/human/predrnnv2/bs_16_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/human/predrnnv2/bs_16_lr_1e4.py \ 10 | --work-dir work_dirs/human/predrnnv2/bs_16_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/human/predrnnv2/bs_16_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/human/simvpv1/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/human/simvpv1/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/human/simvpv1/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/human/simvpv1/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/human/simvpv2/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/human/simvpv2/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/human/simvpv2/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/human/simvpv2/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/human/tau/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/human/tau/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/human/tau/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/human/tau/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kitti/convlstm/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kitti/convlstm/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/kitti/convlstm/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kitti/convlstm/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kitti/e3dlstm/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kitti/e3dlstm/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/kitti/e3dlstm/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kitti/e3dlstm/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kitti/earthformer/bs_32_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kitti/earthformer/bs_32_lr_1e3.py \ 10 | --work-dir work_dirs/kitti/earthformer/bs_32_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kitti/earthformer/bs_32_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kitti/mau/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kitti/mau/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/kitti/mau/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kitti/mau/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kitti/mcvd/bs_64_lr_2e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kitti/mcvd/bs_64_lr_2e4.py \ 10 | --work-dir work_dirs/kitti/mcvd/bs_64_lr_2e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kitti/mcvd/bs_64_lr_2e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kitti/phydnet/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kitti/phydnet/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/kitti/phydnet/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kitti/phydnet/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kitti/predrnnpp/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kitti/predrnnpp/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/kitti/predrnnpp/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kitti/predrnnpp/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kitti/predrnnv1/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kitti/predrnnv1/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/kitti/predrnnv1/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kitti/predrnnv1/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kitti/predrnnv2/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kitti/predrnnv2/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/kitti/predrnnv2/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kitti/predrnnv2/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kitti/simvpv1/bs_16_lr_5e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kitti/simvpv1/bs_16_lr_5e3.py \ 10 | --work-dir work_dirs/kitti/simvpv1/bs_16_lr_5e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kitti/simvpv1/bs_16_lr_5e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kitti/simvpv2/bs_16_lr_5e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kitti/simvpv2/bs_16_lr_5e3.py \ 10 | --work-dir work_dirs/kitti/simvpv2/bs_16_lr_5e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kitti/simvpv2/bs_16_lr_5e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kitti/tau/bs_16_lr_5e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kitti/tau/bs_16_lr_5e3.py \ 10 | --work-dir work_dirs/kitti/tau/bs_16_lr_5e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kitti/tau/bs_16_lr_5e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kth/convlstm/bs_16_lr_4e5.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kth/convlstm/bs_16_lr_4e5.py \ 10 | --work-dir work_dirs/kth/convlstm/bs_16_lr_4e5 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kth/convlstm/bs_16_lr_4e5 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kth/e3dlstm/bs_8_lr_5e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kth/e3dlstm/bs_8_lr_5e4.py \ 10 | --work-dir work_dirs/kth/e3dlstm/bs_8_lr_5e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kth/e3dlstm/bs_8_lr_5e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kth/earthformer/bs_32_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kth/earthformer/bs_32_lr_1e3.py \ 10 | --work-dir work_dirs/kth/earthformer/bs_32_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kth/earthformer/bs_32_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kth/mau/bs_16_lr_5e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kth/mau/bs_16_lr_5e4.py \ 10 | --work-dir work_dirs/kth/mau/bs_16_lr_5e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kth/mau/bs_16_lr_5e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kth/mcvd/bs_64_lr_2e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kth/mcvd/bs_64_lr_2e4.py \ 10 | --work-dir work_dirs/kth/mcvd/bs_64_lr_2e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kth/mcvd/bs_64_lr_2e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kth/phydnet/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kth/phydnet/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/kth/phydnet/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kth/phydnet/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kth/predrnnpp/bs_16_lr_4e5.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kth/predrnnpp/bs_16_lr_4e5.py \ 10 | --work-dir work_dirs/kth/predrnnpp/bs_16_lr_4e5 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kth/predrnnpp/bs_16_lr_4e5 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kth/predrnnv1/bs_16_lr_4e5.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kth/predrnnv1/bs_16_lr_4e5.py \ 10 | --work-dir work_dirs/kth/predrnnv1/bs_16_lr_4e5 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kth/predrnnv1/bs_16_lr_4e5 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kth/predrnnv2/bs_16_lr_4e5.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kth/predrnnv2/bs_16_lr_4e5.py \ 10 | --work-dir work_dirs/kth/predrnnv2/bs_16_lr_4e5 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kth/predrnnv2/bs_16_lr_4e5 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kth/simvpv1/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kth/simvpv1/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/kth/simvpv1/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kth/simvpv1/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kth/simvpv2/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kth/simvpv2/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/kth/simvpv2/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kth/simvpv2/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/kth/tau/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/kth/tau/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/kth/tau/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/kth/tau/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/mnist/convlstm/bs_16_lr_5e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/mnist/convlstm/bs_16_lr_5e4.py \ 10 | --work-dir work_dirs/mnist/convlstm/bs_16_lr_5e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/mnist/convlstm/bs_16_lr_5e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/mnist/e3dlstm/bs_16_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/mnist/e3dlstm/bs_16_lr_1e4.py \ 10 | --work-dir work_dirs/mnist/e3dlstm/bs_16_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/mnist/e3dlstm/bs_16_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/mnist/earthformer/bs_32_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/mnist/earthformer/bs_32_lr_1e3.py \ 10 | --work-dir work_dirs/mnist/earthformer/bs_32_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/mnist/earthformer/bs_32_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/mnist/mau/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/mnist/mau/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/mnist/mau/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/mnist/mau/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/mnist/mcvd/bs_64_lr_2e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/mnist/mcvd/bs_64_lr_2e4.py \ 10 | --work-dir work_dirs/mnist/mcvd/bs_64_lr_2e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/mnist/mcvd/bs_64_lr_2e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/mnist/phydnet/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/mnist/phydnet/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/mnist/phydnet/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/mnist/phydnet/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/mnist/predrnnpp/bs_16_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/mnist/predrnnpp/bs_16_lr_1e4.py \ 10 | --work-dir work_dirs/mnist/predrnnpp/bs_16_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/mnist/predrnnpp/bs_16_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/mnist/predrnnv1/bs_16_lr_5e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/mnist/predrnnv1/bs_16_lr_5e4.py \ 10 | --work-dir work_dirs/mnist/predrnnv1/bs_16_lr_5e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/mnist/predrnnv1/bs_16_lr_5e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/mnist/predrnnv2/bs_16_lr_5e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/mnist/predrnnv2/bs_16_lr_5e4.py \ 10 | --work-dir work_dirs/mnist/predrnnv2/bs_16_lr_5e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/mnist/predrnnv2/bs_16_lr_5e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/mnist/simvpv1/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/mnist/simvpv1/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/mnist/simvpv1/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/mnist/simvpv1/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/mnist/simvpv2/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/mnist/simvpv2/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/mnist/simvpv2/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/mnist/simvpv2/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/mnist/tau/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/mnist/tau/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/mnist/tau/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/mnist/tau/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/nuscenes/convlstm/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/nuscenes/convlstm/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/nuscenes/convlstm/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/nuscenes/convlstm/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/nuscenes/e3dlstm/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/nuscenes/e3dlstm/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/nuscenes/e3dlstm/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/nuscenes/e3dlstm/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/nuscenes/earthformer/bs_64_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/nuscenes/earthformer/bs_64_lr_1e3.py \ 10 | --work-dir work_dirs/nuscenes/earthformer/bs_64_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/nuscenes/earthformer/bs_64_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/nuscenes/mau/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/nuscenes/mau/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/nuscenes/mau/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/nuscenes/mau/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/nuscenes/mcvd/bs_128_lr_4e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/nuscenes/mcvd/bs_128_lr_4e4.py \ 10 | --work-dir work_dirs/nuscenes/mcvd/bs_128_lr_4e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/nuscenes/mcvd/bs_128_lr_4e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/nuscenes/phydnet/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/nuscenes/phydnet/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/nuscenes/phydnet/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/nuscenes/phydnet/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/nuscenes/predrnnpp/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/nuscenes/predrnnpp/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/nuscenes/predrnnpp/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/nuscenes/predrnnpp/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/nuscenes/predrnnv1/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/nuscenes/predrnnv1/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/nuscenes/predrnnv1/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/nuscenes/predrnnv1/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/nuscenes/predrnnv2/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/nuscenes/predrnnv2/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/nuscenes/predrnnv2/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/nuscenes/predrnnv2/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/nuscenes/simvpv1/bs_64_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/nuscenes/simvpv1/bs_64_lr_1e3.py \ 10 | --work-dir work_dirs/nuscenes/simvpv1/bs_64_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/nuscenes/simvpv1/bs_64_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/nuscenes/simvpv2/bs_64_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/nuscenes/simvpv2/bs_64_lr_1e3.py \ 10 | --work-dir work_dirs/nuscenes/simvpv2/bs_64_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/nuscenes/simvpv2/bs_64_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/nuscenes/tau/bs_64_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/nuscenes/tau/bs_64_lr_1e3.py \ 10 | --work-dir work_dirs/nuscenes/tau/bs_64_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/nuscenes/tau/bs_64_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/robonet/convlstm/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/robonet/convlstm/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/robonet/convlstm/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/robonet/convlstm/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/robonet/e3dlstm/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/robonet/e3dlstm/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/robonet/e3dlstm/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/robonet/e3dlstm/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/robonet/earthformer/bs_64_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/robonet/earthformer/bs_64_lr_1e3.py \ 10 | --work-dir work_dirs/robonet/earthformer/bs_64_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/robonet/earthformer/bs_64_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/robonet/mau/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/robonet/mau/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/robonet/mau/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/robonet/mau/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/robonet/mcvd/bs_128_lr_4e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/robonet/mcvd/bs_128_lr_4e4.py \ 10 | --work-dir work_dirs/robonet/mcvd/bs_128_lr_4e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/robonet/mcvd/bs_128_lr_4e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/robonet/phydnet/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/robonet/phydnet/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/robonet/phydnet/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/robonet/phydnet/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/robonet/predrnnpp/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/robonet/predrnnpp/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/robonet/predrnnpp/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/robonet/predrnnpp/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/robonet/predrnnv1/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/robonet/predrnnv1/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/robonet/predrnnv1/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/robonet/predrnnv1/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/robonet/predrnnv2/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/robonet/predrnnv2/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/robonet/predrnnv2/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/robonet/predrnnv2/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/robonet/simvpv1/bs_64_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/robonet/simvpv1/bs_64_lr_1e3.py \ 10 | --work-dir work_dirs/robonet/simvpv1/bs_64_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/robonet/simvpv1/bs_64_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/robonet/simvpv2/bs_64_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/robonet/simvpv2/bs_64_lr_1e3.py \ 10 | --work-dir work_dirs/robonet/simvpv2/bs_64_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/robonet/simvpv2/bs_64_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/robonet/tau/bs_64_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/robonet/tau/bs_64_lr_1e3.py \ 10 | --work-dir work_dirs/robonet/tau/bs_64_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/robonet/tau/bs_64_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/sevir/convlstm/bs_32_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/sevir/convlstm/bs_32_lr_1e3.py \ 10 | --work-dir work_dirs/sevir/convlstm/bs_32_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/sevir/convlstm/bs_32_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/sevir/e3dlstm/bs_32_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/sevir/e3dlstm/bs_32_lr_1e3.py \ 10 | --work-dir work_dirs/sevir/e3dlstm/bs_32_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/sevir/e3dlstm/bs_32_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/sevir/earthformer/bs_32_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/sevir/earthformer/bs_32_lr_1e3.py \ 10 | --work-dir work_dirs/sevir/earthformer/bs_32_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/sevir/earthformer/bs_32_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/sevir/mau/bs_32_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/sevir/mau/bs_32_lr_1e3.py \ 10 | --work-dir work_dirs/sevir/mau/bs_32_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/sevir/mau/bs_32_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/sevir/mcvd/bs_32_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/sevir/mcvd/bs_32_lr_1e4.py \ 10 | --work-dir work_dirs/sevir/mcvd/bs_32_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/sevir/mcvd/bs_32_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/sevir/phydnet/bs_32_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/sevir/phydnet/bs_32_lr_1e3.py \ 10 | --work-dir work_dirs/sevir/phydnet/bs_32_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/sevir/phydnet/bs_32_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/sevir/predrnnpp/bs_32_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/sevir/predrnnpp/bs_32_lr_1e3.py \ 10 | --work-dir work_dirs/sevir/predrnnpp/bs_32_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/sevir/predrnnpp/bs_32_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/sevir/predrnnv1/bs_32_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/sevir/predrnnv1/bs_32_lr_1e3.py \ 10 | --work-dir work_dirs/sevir/predrnnv1/bs_32_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/sevir/predrnnv1/bs_32_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/sevir/predrnnv2/bs_64_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/sevir/predrnnv2/bs_64_lr_1e3.py \ 10 | --work-dir work_dirs/sevir/predrnnv2/bs_64_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/sevir/predrnnv2/bs_64_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/sevir/simvpv1/bs_32_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/sevir/simvpv1/bs_32_lr_1e4.py \ 10 | --work-dir work_dirs/sevir/simvpv1/bs_32_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/sevir/simvpv1/bs_32_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/sevir/simvpv2/bs_32_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/sevir/simvpv2/bs_32_lr_1e4.py \ 10 | --work-dir work_dirs/sevir/simvpv2/bs_32_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/sevir/simvpv2/bs_32_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/sevir/tau/bs_32_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/sevir/tau/bs_32_lr_1e4.py \ 10 | --work-dir work_dirs/sevir/tau/bs_32_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/sevir/tau/bs_32_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/taxibj/convlstm/bs_16_lr_5e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/taxibj/convlstm/bs_16_lr_5e4.py \ 10 | --work-dir work_dirs/taxibj/convlstm/bs_16_lr_5e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/taxibj/convlstm/bs_16_lr_5e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/taxibj/e3dlstm/bs_64_lr_2e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/taxibj/e3dlstm/bs_64_lr_2e4.py \ 10 | --work-dir work_dirs/taxibj/e3dlstm/bs_64_lr_2e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/taxibj/e3dlstm/bs_64_lr_2e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/taxibj/earthformer/bs_32_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/taxibj/earthformer/bs_32_lr_1e3.py \ 10 | --work-dir work_dirs/taxibj/earthformer/bs_32_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/taxibj/earthformer/bs_32_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/taxibj/mau/bs_16_lr_5e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/taxibj/mau/bs_16_lr_5e4.py \ 10 | --work-dir work_dirs/taxibj/mau/bs_16_lr_5e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/taxibj/mau/bs_16_lr_5e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/taxibj/mcvd/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/taxibj/mcvd/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/taxibj/mcvd/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/taxibj/mcvd/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/taxibj/phydnet/bs_16_lr_5e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/taxibj/phydnet/bs_16_lr_5e4.py \ 10 | --work-dir work_dirs/taxibj/phydnet/bs_16_lr_5e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/taxibj/phydnet/bs_16_lr_5e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/taxibj/predrnnpp/bs_16_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/taxibj/predrnnpp/bs_16_lr_1e4.py \ 10 | --work-dir work_dirs/taxibj/predrnnpp/bs_16_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/taxibj/predrnnpp/bs_16_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/taxibj/predrnnv1/bs_16_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/taxibj/predrnnv1/bs_16_lr_1e4.py \ 10 | --work-dir work_dirs/taxibj/predrnnv1/bs_16_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/taxibj/predrnnv1/bs_16_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/taxibj/predrnnv2/bs_16_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/taxibj/predrnnv2/bs_16_lr_1e4.py \ 10 | --work-dir work_dirs/taxibj/predrnnv2/bs_16_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/taxibj/predrnnv2/bs_16_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/taxibj/simvpv1/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/taxibj/simvpv1/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/taxibj/simvpv1/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/taxibj/simvpv1/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/taxibj/simvpv2/bs_16_lr_1e3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/taxibj/simvpv2/bs_16_lr_1e3.py \ 10 | --work-dir work_dirs/taxibj/simvpv2/bs_16_lr_1e3 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/taxibj/simvpv2/bs_16_lr_1e3 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/taxibj/tau/bs_16_lr_13.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/taxibj/tau/bs_16_lr_13.py \ 10 | --work-dir work_dirs/taxibj/tau/bs_16_lr_13 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/taxibj/tau/bs_16_lr_13 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/traffic4cast2021/convlstm/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/traffic4cast2021/convlstm/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/traffic4cast2021/convlstm/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/traffic4cast2021/convlstm/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/traffic4cast2021/e3dlstm/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/traffic4cast2021/e3dlstm/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/traffic4cast2021/e3dlstm/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/traffic4cast2021/e3dlstm/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/traffic4cast2021/earthformer/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/traffic4cast2021/earthformer/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/traffic4cast2021/earthformer/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/traffic4cast2021/earthformer/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/traffic4cast2021/mau/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/traffic4cast2021/mau/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/traffic4cast2021/mau/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/traffic4cast2021/mau/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/traffic4cast2021/mcvd/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/traffic4cast2021/mcvd/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/traffic4cast2021/mcvd/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/traffic4cast2021/mcvd/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/traffic4cast2021/phydnet/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/traffic4cast2021/phydnet/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/traffic4cast2021/phydnet/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/traffic4cast2021/phydnet/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/traffic4cast2021/predrnnpp/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/traffic4cast2021/predrnnpp/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/traffic4cast2021/predrnnpp/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/traffic4cast2021/predrnnpp/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/traffic4cast2021/predrnnv1/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/traffic4cast2021/predrnnv1/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/traffic4cast2021/predrnnv1/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/traffic4cast2021/predrnnv1/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/traffic4cast2021/predrnnv2/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/traffic4cast2021/predrnnv2/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/traffic4cast2021/predrnnv2/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/traffic4cast2021/predrnnv2/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/traffic4cast2021/simvpv1/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/traffic4cast2021/simvpv1/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/traffic4cast2021/simvpv1/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/traffic4cast2021/simvpv1/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/traffic4cast2021/simvpv2/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/traffic4cast2021/simvpv2/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/traffic4cast2021/simvpv2/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/traffic4cast2021/simvpv2/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/traffic4cast2021/tau/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/traffic4cast2021/tau/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/traffic4cast2021/tau/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/traffic4cast2021/tau/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/weatherbench/convlstm/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/weatherbench/convlstm/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/weatherbench/convlstm/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/weatherbench/convlstm/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/weatherbench/e3dlstm/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/weatherbench/e3dlstm/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/weatherbench/e3dlstm/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/weatherbench/e3dlstm/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/weatherbench/earthformer/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/weatherbench/earthformer/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/weatherbench/earthformer/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/weatherbench/earthformer/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/weatherbench/mau/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/weatherbench/mau/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/weatherbench/mau/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/weatherbench/mau/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/weatherbench/mcvd/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/weatherbench/mcvd/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/weatherbench/mcvd/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/weatherbench/mcvd/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/weatherbench/phydnet/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/weatherbench/phydnet/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/weatherbench/phydnet/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/weatherbench/phydnet/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/weatherbench/predrnnpp/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/weatherbench/predrnnpp/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/weatherbench/predrnnpp/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/weatherbench/predrnnpp/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/weatherbench/predrnnv1/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/weatherbench/predrnnv1/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/weatherbench/predrnnv1/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/weatherbench/predrnnv1/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/weatherbench/predrnnv2/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/weatherbench/predrnnv2/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/weatherbench/predrnnv2/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/weatherbench/predrnnv2/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/weatherbench/simvpv1/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/weatherbench/simvpv1/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/weatherbench/simvpv1/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/weatherbench/simvpv1/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/weatherbench/simvpv2/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/weatherbench/simvpv2/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/weatherbench/simvpv2/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/weatherbench/simvpv2/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | -------------------------------------------------------------------------------- /tools/scripts/weatherbench/tau/bs_64_lr_1e4.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export MASTER_PORT=$((12000 + $RANDOM%20000)) 3 | export PYTHONPATH="$\{PYTHONPATH\}:$(pwd)" 4 | 5 | NUM_NODE=${1:-1} 6 | NUM_GPU=${2:-4} 7 | 8 | python -m torch.distributed.run --nnodes=${NUM_NODE} --nproc_per_node=${NUM_GPU} --master_port=${MASTER_PORT} train.py \ 9 | --config configs/weatherbench/tau/bs_64_lr_1e4.py \ 10 | --work-dir work_dirs/weatherbench/tau/bs_64_lr_1e4 \ 11 | --launcher pytorch 12 | 13 | python test.py \ 14 | --work-dir work_dirs/weatherbench/tau/bs_64_lr_1e4 \ 15 | --test-best --metric-only 16 | --------------------------------------------------------------------------------