├── zero-shot ├── ours │ ├── utils │ │ ├── __init__.py │ │ └── seed_utils.py │ ├── .DS_Store │ ├── README.md │ └── train │ │ └── flags.py ├── NeurIPS2023-One-Fits-All │ ├── Imputation │ │ ├── utils │ │ │ ├── __init__.py │ │ │ ├── masking.py │ │ │ └── metrics.py │ │ ├── data_provider │ │ │ └── __init__.py │ │ └── README.md │ ├── Classification │ │ ├── src │ │ │ ├── __init__.py │ │ │ ├── models │ │ │ │ └── __init__.py │ │ │ ├── utils │ │ │ │ └── __init__.py │ │ │ └── datasets │ │ │ │ └── __init__.py │ │ ├── scripts │ │ │ ├── EthanolConcentration.sh │ │ │ ├── PEMS-SF.sh │ │ │ ├── FaceDetection.sh │ │ │ ├── UWaveGestureLibrary.sh │ │ │ ├── SelfRegulationSCP1.sh │ │ │ ├── SpokenArabicDigits.sh │ │ │ ├── Heartbeat.sh │ │ │ ├── JapaneseVowels.sh │ │ │ ├── SelfRegulationSCP2.sh │ │ │ └── Handwriting.sh │ │ └── README.md │ ├── Anomaly_Detection │ │ ├── utils │ │ │ ├── __init__.py │ │ │ ├── masking.py │ │ │ └── metrics.py │ │ ├── data_provider │ │ │ └── __init__.py │ │ ├── scripts │ │ │ ├── SMD.sh │ │ │ ├── PSM.sh │ │ │ ├── MSL.sh │ │ │ ├── SWAT.sh │ │ │ └── SMAP.sh │ │ └── README.md │ ├── Few-shot_Learning │ │ ├── utils │ │ │ ├── __init__.py │ │ │ └── metrics.py │ │ ├── scripts │ │ │ ├── illness.sh │ │ │ ├── traffic.sh │ │ │ ├── ETTm2.sh │ │ │ ├── ETTm1.sh │ │ │ ├── weather.sh │ │ │ ├── electricity.sh │ │ │ ├── ETTh2.sh │ │ │ └── ETTh1.sh │ │ └── README.md │ ├── Zero-shot_Learning │ │ ├── utils │ │ │ ├── __init__.py │ │ │ ├── masking.py │ │ │ └── metrics.py │ │ ├── data_provider │ │ │ └── temp_for_debug │ │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── masking.py │ │ │ │ └── metrics.py │ │ ├── scripts │ │ │ ├── GPT4TS_wo_pretrain_train.sh │ │ │ ├── GPT4TS_wo_pretrain_inference.sh │ │ │ ├── m4_inference.sh │ │ │ ├── train_base.sh │ │ │ ├── old │ │ │ │ ├── test.sh │ │ │ │ └── m4_electricity.sh │ │ │ ├── inference.sh │ │ │ ├── inference_scale_check.sh │ │ │ └── other_dfs_inference.sh │ │ └── README.md │ ├── Long-term_Forecasting │ │ ├── utils │ │ │ ├── __init__.py │ │ │ └── metrics.py │ │ ├── scripts │ │ │ ├── illness.sh │ │ │ ├── traffic.sh │ │ │ ├── ETTm2.sh │ │ │ ├── ETTm1.sh │ │ │ ├── weather.sh │ │ │ ├── electricity.sh │ │ │ ├── ETTh2.sh │ │ │ └── ETTh1.sh │ │ └── README.md │ ├── Short-term_Forecasting │ │ ├── utils │ │ │ ├── __init__.py │ │ │ ├── masking.py │ │ │ └── metrics.py │ │ ├── data_provider │ │ │ └── __init__.py │ │ └── README.md │ ├── pic │ │ ├── main_result.png │ │ ├── anomaly_detection.png │ │ ├── few_shot_result.png │ │ ├── imputation_result.png │ │ ├── long_term_result.png │ │ ├── model_structure.png │ │ ├── short_term_result.png │ │ ├── zero_shot_result.png │ │ └── classification_result.png │ └── requirements.txt ├── .DS_Store └── ForecastPFN │ ├── .DS_Store │ └── README.md ├── few-shot ├── iTransformer │ ├── layers │ │ └── __init__.py │ ├── model │ │ └── __init__.py │ ├── utils │ │ ├── __init__.py │ │ ├── masking.py │ │ └── metrics.py │ ├── data_provider │ │ ├── __init__.py │ │ └── data_factory.py │ ├── requirements.txt │ ├── scripts │ │ ├── few_shot │ │ │ ├── other_datasets_2_iTransformer.sh │ │ │ ├── iTransformer.sh │ │ │ ├── other_datasets_1_iTransformer.sh │ │ │ ├── etth1_iTransformer.sh │ │ │ ├── etth2_iTransformer.sh │ │ │ ├── ettm1_iTransformer.sh │ │ │ ├── ettm2_iTransformer.sh │ │ │ ├── weather_iTransformer.sh │ │ │ ├── illness_iTransformer.sh │ │ │ ├── traffic_iTransformer.sh │ │ │ └── electricity_iTransformer.sh │ │ ├── boost_performance │ │ │ ├── ECL │ │ │ │ ├── iInformer.sh │ │ │ │ ├── iReformer.sh │ │ │ │ ├── iFlowformer.sh │ │ │ │ └── iTransformer.sh │ │ │ ├── Traffic │ │ │ │ ├── iInformer.sh │ │ │ │ ├── iReformer.sh │ │ │ │ ├── iFlowformer.sh │ │ │ │ └── iTransformer.sh │ │ │ └── Weather │ │ │ │ ├── iInformer.sh │ │ │ │ └── iReformer.sh │ │ └── multivariate_forecasting │ │ │ └── ETT │ │ │ ├── iTransformer_ETTh1.sh │ │ │ ├── iTransformer_ETTh2.sh │ │ │ ├── iTransformer_ETTm1.sh │ │ │ └── iTransformer_ETTm2.sh │ ├── README.md │ └── experiments │ │ └── exp_basic.py ├── NeurIPS2023-One-Fits-All_old │ ├── Few-shot_Learning │ │ ├── utils │ │ │ ├── __init__.py │ │ │ └── metrics.py │ │ ├── scripts │ │ │ ├── few-shot │ │ │ │ ├── ready │ │ │ │ │ ├── gpt0.sh │ │ │ │ │ ├── gpt6.sh │ │ │ │ │ ├── illness_gpt0.sh │ │ │ │ │ ├── illness_gpt6.sh │ │ │ │ │ ├── etth2_gpt0.sh │ │ │ │ │ ├── etth2_gpt6.sh │ │ │ │ │ ├── etth1_gpt0.sh │ │ │ │ │ └── etth1_gpt6.sh │ │ │ │ ├── other_datasets_2_gpt.sh │ │ │ │ ├── other_datasets_1_gpt.sh │ │ │ │ ├── traffic_gpt.sh │ │ │ │ ├── ili_gpt.sh │ │ │ │ ├── ettm1_gpt.sh │ │ │ │ ├── ettm2_gpt.sh │ │ │ │ ├── weather_gpt.sh │ │ │ │ ├── etth1_gpt.sh │ │ │ │ ├── etth2_gpt.sh │ │ │ │ └── electricity_gpt.sh │ │ │ ├── illness.sh │ │ │ ├── traffic.sh │ │ │ ├── ETTm2.sh │ │ │ ├── ETTm1.sh │ │ │ ├── weather.sh │ │ │ ├── electricity.sh │ │ │ ├── ETTh2.sh │ │ │ └── ETTh1.sh │ │ └── README.md │ └── requirements.txt ├── .DS_Store └── PatchTST │ ├── PatchTST_supervised │ ├── requirements.txt │ ├── scripts │ │ ├── few-shot │ │ │ ├── dlinear.sh │ │ │ ├── patchtst.sh │ │ │ ├── dlinear_other_datasets.sh │ │ │ ├── patchtst_other_datasets.sh │ │ │ ├── etth1-dlinear.sh │ │ │ ├── etth2-dlinear.sh │ │ │ ├── ettm1-dlinear.sh │ │ │ ├── ettm2-dlinear.sh │ │ │ ├── illness-dlinear.sh │ │ │ ├── traffic-dlinear.sh │ │ │ ├── electricity-dlinear.sh │ │ │ ├── etth1-patchtst.sh │ │ │ ├── etth2-patchtst.sh │ │ │ ├── ettm1-patchtst.sh │ │ │ └── illness-patchtst.sh │ │ ├── few-shot-horizon-6 │ │ │ ├── dlinear.sh │ │ │ ├── patchtst.sh │ │ │ ├── etth1-dlinear.sh │ │ │ ├── weather-dlinear.sh │ │ │ ├── etth2-dlinear.sh │ │ │ ├── ettm1-dlinear.sh │ │ │ ├── ettm2-dlinear.sh │ │ │ ├── traffic-dlinear.sh │ │ │ ├── electricity-dlinear.sh │ │ │ ├── illness-dlinear.sh │ │ │ ├── illness-patchtst.sh │ │ │ ├── illness-patchtst_2.sh │ │ │ ├── etth1-patchtst.sh │ │ │ ├── etth2-patchtst.sh │ │ │ ├── ettm1-patchtst.sh │ │ │ ├── ettm2-patchtst.sh │ │ │ ├── weather-patchtst.sh │ │ │ ├── traffic-patchtst.sh │ │ │ └── electricity-patchtst.sh │ │ └── PatchTST │ │ │ ├── etth1.sh │ │ │ ├── etth2.sh │ │ │ ├── weather.sh │ │ │ ├── illness.sh │ │ │ ├── ettm1.sh │ │ │ ├── ettm2.sh │ │ │ ├── traffic.sh │ │ │ ├── electricity.sh │ │ │ └── univariate │ │ │ ├── etth1.sh │ │ │ ├── etth2.sh │ │ │ ├── ettm1.sh │ │ │ └── ettm2.sh │ ├── utils │ │ ├── masking.py │ │ └── metrics.py │ ├── exp │ │ └── exp_basic.py │ └── data_provider │ │ └── data_factory.py │ ├── pic │ ├── model.png │ ├── table3.png │ ├── table4.png │ ├── table5.png │ ├── table6.png │ └── varying_L.png │ └── README.md ├── .gitignore ├── pic ├── share_of_wins_mae.pdf └── share_of_wins_mae.png └── LICENSE /zero-shot/ours/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /few-shot/iTransformer/layers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /few-shot/iTransformer/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /few-shot/iTransformer/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /few-shot/iTransformer/data_provider/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.DS_Store* 2 | zero-shot/.DS_Store 3 | .gitignore -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Imputation/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Classification/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Anomaly_Detection/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Classification/src/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Classification/src/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Few-shot_Learning/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Imputation/data_provider/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Zero-shot_Learning/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Anomaly_Detection/data_provider/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Classification/src/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Long-term_Forecasting/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Short-term_Forecasting/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Short-term_Forecasting/data_provider/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /few-shot/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/few-shot/.DS_Store -------------------------------------------------------------------------------- /zero-shot/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/zero-shot/.DS_Store -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Zero-shot_Learning/data_provider/temp_for_debug/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pic/share_of_wins_mae.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/pic/share_of_wins_mae.pdf -------------------------------------------------------------------------------- /pic/share_of_wins_mae.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/pic/share_of_wins_mae.png -------------------------------------------------------------------------------- /zero-shot/ours/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/zero-shot/ours/.DS_Store -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | matplotlib 3 | pandas 4 | scikit-learn 5 | torch==1.11.0 -------------------------------------------------------------------------------- /few-shot/PatchTST/pic/model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/few-shot/PatchTST/pic/model.png -------------------------------------------------------------------------------- /few-shot/PatchTST/pic/table3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/few-shot/PatchTST/pic/table3.png -------------------------------------------------------------------------------- /few-shot/PatchTST/pic/table4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/few-shot/PatchTST/pic/table4.png -------------------------------------------------------------------------------- /few-shot/PatchTST/pic/table5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/few-shot/PatchTST/pic/table5.png -------------------------------------------------------------------------------- /few-shot/PatchTST/pic/table6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/few-shot/PatchTST/pic/table6.png -------------------------------------------------------------------------------- /zero-shot/ForecastPFN/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/zero-shot/ForecastPFN/.DS_Store -------------------------------------------------------------------------------- /few-shot/PatchTST/pic/varying_L.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/few-shot/PatchTST/pic/varying_L.png -------------------------------------------------------------------------------- /few-shot/iTransformer/requirements.txt: -------------------------------------------------------------------------------- 1 | pandas==1.5.3 2 | scikit-learn==1.2.2 3 | numpy==1.23.5 4 | matplotlib==3.7.0 5 | torch==2.0.0 6 | reformer-pytorch==1.4.4 7 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/pic/main_result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/zero-shot/NeurIPS2023-One-Fits-All/pic/main_result.png -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot/dlinear.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | sh scripts/few-shot/etth1-dlinear.sh 3 | sh scripts/few-shot/etth2-dlinear.sh -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot/patchtst.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | sh scripts/few-shot/etth1-patchtst.sh 3 | sh scripts/few-shot/etth2-patchtst.sh -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/pic/anomaly_detection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/zero-shot/NeurIPS2023-One-Fits-All/pic/anomaly_detection.png -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/pic/few_shot_result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/zero-shot/NeurIPS2023-One-Fits-All/pic/few_shot_result.png -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/pic/imputation_result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/zero-shot/NeurIPS2023-One-Fits-All/pic/imputation_result.png -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/pic/long_term_result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/zero-shot/NeurIPS2023-One-Fits-All/pic/long_term_result.png -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/pic/model_structure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/zero-shot/NeurIPS2023-One-Fits-All/pic/model_structure.png -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/pic/short_term_result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/zero-shot/NeurIPS2023-One-Fits-All/pic/short_term_result.png -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/pic/zero_shot_result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/zero-shot/NeurIPS2023-One-Fits-All/pic/zero_shot_result.png -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/few-shot/ready/gpt0.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | sh scripts/few-shot/etth1_gpt0.sh 3 | sh scripts/few-shot/etth2_gpt0.sh -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/few-shot/ready/gpt6.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=3 2 | sh scripts/few-shot/etth1_gpt6.sh 3 | sh scripts/few-shot/etth2_gpt6.sh -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/pic/classification_result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sb-ai-lab/synthesize_or_not/main/zero-shot/NeurIPS2023-One-Fits-All/pic/classification_result.png -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/few_shot/other_datasets_2_iTransformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=4 2 | sh scripts/few_shot/traffic_iTransformer.sh 3 | sh scripts/few_shot/weather_iTransformer.sh -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/few-shot/other_datasets_2_gpt.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | sh scripts/few-shot/traffic_gpt.sh 3 | sh scripts/few-shot/weather_gpt.sh -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/few_shot/iTransformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=4 2 | sh scripts/few_shot/etth1_iTransformer.sh 3 | sh scripts/few_shot/etth2_iTransformer.sh 4 | sh scripts/few_shot/illness_iTransformer.sh -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/few_shot/other_datasets_1_iTransformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=4 2 | sh scripts/few_shot/ettm1_iTransformer.sh 3 | sh scripts/few_shot/ettm2_iTransformer.sh 4 | sh scripts/few_shot/electricity_iTransformer.sh -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot/dlinear_other_datasets.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=3 2 | sh scripts/few-shot/ettm1-dlinear.sh 3 | sh scripts/few-shot/ettm2-dlinear.sh 4 | sh scripts/few-shot/electricity-dlinear.sh 5 | sh scripts/few-shot/traffic-dlinear.sh -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot/patchtst_other_datasets.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=3 2 | sh scripts/few-shot/ettm1-patchtst.sh 3 | sh scripts/few-shot/ettm2-patchtst.sh 4 | sh scripts/few-shot/electricity-patchtst.sh 5 | sh scripts/few-shot/traffic-patchtst.sh -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/few-shot/other_datasets_1_gpt.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | sh scripts/few-shot/etth1_gpt.sh 3 | sh scripts/few-shot/etth2_gpt.sh 4 | sh scripts/few-shot/ettm1_gpt.sh 5 | sh scripts/few-shot/ettm2_gpt.sh 6 | sh scripts/few-shot/electricity_gpt.sh -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Zero-shot_Learning/scripts/GPT4TS_wo_pretrain_train.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | model=GPT4TS_M4_Monthly 3 | python train.py \ 4 | --model $model \ 5 | --config_path ./configs/GPT4TS_wo_pretrain/ \ 6 | --checkpoints ./checkpoints/GPT4TS_wo_pretrain/ > ./logs/GPT4TS_wo_pretrain/$model.txt -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==1.2.0 2 | einops==0.4.1 3 | h5py==3.7.0 4 | keopscore==2.1 5 | opt-einsum==3.3.0 6 | pandas==1.4.2 7 | pytorch-wavelet 8 | PyWavelets==1.4.1 9 | scikit-image==0.19.3 10 | scikit-learn==1.0.2 11 | scipy==1.7.3 12 | statsmodels==0.13.2 13 | sympy==1.11.1 14 | torch==1.8.1 15 | transformers==4.30.1 16 | sktime==0.4.1 17 | -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==1.2.0 2 | einops==0.4.1 3 | h5py==3.7.0 4 | keopscore==2.1 5 | opt-einsum==3.3.0 6 | pandas==1.4.2 7 | pytorch-wavelet 8 | PyWavelets==1.4.1 9 | scikit-image==0.19.3 10 | scikit-learn==1.0.2 11 | scipy==1.7.3 12 | statsmodels==0.13.2 13 | sympy==1.11.1 14 | torch==1.8.1 15 | transformers==4.30.1 16 | sktime==0.4.1 17 | -------------------------------------------------------------------------------- /zero-shot/ours/utils/seed_utils.py: -------------------------------------------------------------------------------- 1 | import random 2 | import torch 3 | 4 | import numpy as np 5 | 6 | def set_seed(seed): 7 | torch.manual_seed(seed) 8 | torch.cuda.manual_seed(seed) 9 | torch.cuda.manual_seed_all(seed) 10 | np.random.seed(seed) 11 | random.seed(seed) 12 | torch.manual_seed(seed) 13 | torch.backends.cudnn.benchmark = False 14 | torch.backends.cudnn.deterministic = True -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/dlinear.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | sh scripts/few-shot-horizon-6/weather-dlinear.sh 3 | sh scripts/few-shot-horizon-6/etth1-dlinear.sh 4 | sh scripts/few-shot-horizon-6/etth2-dlinear.sh 5 | sh scripts/few-shot-horizon-6/ettm1-dlinear.sh 6 | sh scripts/few-shot-horizon-6/ettm2-dlinear.sh 7 | sh scripts/few-shot-horizon-6/electricity-dlinear.sh 8 | sh scripts/few-shot-horizon-6/illness-dlinear.sh 9 | sh scripts/few-shot-horizon-6/traffic-dlinear.sh -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/patchtst.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | sh scripts/few-shot-horizon-6/weather-patchtst.sh 3 | sh scripts/few-shot-horizon-6/etth1-patchtst.sh 4 | sh scripts/few-shot-horizon-6/etth2-patchtst.sh 5 | sh scripts/few-shot-horizon-6/ettm1-patchtst.sh 6 | sh scripts/few-shot-horizon-6/ettm2-patchtst.sh 7 | sh scripts/few-shot-horizon-6/electricity-patchtst.sh 8 | sh scripts/few-shot-horizon-6/illness-patchtst.sh 9 | sh scripts/few-shot-horizon-6/traffic-patchtst.sh -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Anomaly_Detection/scripts/SMD.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | python -u run.py \ 4 | --task_name anomaly_detection \ 5 | --is_training 1 \ 6 | --root_path ./dataset/SMD \ 7 | --model_id SMD \ 8 | --model GPT4TS \ 9 | --data SMD \ 10 | --features M \ 11 | --seq_len 100 \ 12 | --pred_len 0 \ 13 | --d_model 768 \ 14 | --d_ff 768 \ 15 | --gpt_layer 6 \ 16 | --enc_in 38 \ 17 | --c_out 38 \ 18 | --anomaly_ratio 0.5 \ 19 | --batch_size 128 \ 20 | --patch_size 1 \ 21 | --stride 1 \ 22 | --train_epochs 5 -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Anomaly_Detection/scripts/PSM.sh: -------------------------------------------------------------------------------- 1 | 2 | python -u run.py \ 3 | --task_name anomaly_detection \ 4 | --is_training 1 \ 5 | --root_path ./dataset/PSM \ 6 | --model_id PSM \ 7 | --model GPT4TS \ 8 | --data PSM \ 9 | --features M \ 10 | --seq_len 100 \ 11 | --pred_len 0 \ 12 | --gpt_layer 6 \ 13 | --d_model 768 \ 14 | --d_ff 768 \ 15 | --patch_size 1 \ 16 | --stride 1 \ 17 | --enc_in 25 \ 18 | --c_out 25 \ 19 | --anomaly_ratio 1 \ 20 | --batch_size 128 \ 21 | --learning_rate 0.0001 \ 22 | --train_epochs 10 -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Zero-shot_Learning/scripts/GPT4TS_wo_pretrain_inference.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | model=GPT4TS_M4_Monthly 3 | for target_data in Weather ECL ILI Traffic ETTh1 ETTh2 ETTm1 ETTm2 4 | do 5 | python inference.py \ 6 | --model $model \ 7 | --target_data $target_data \ 8 | --checkpoints ./checkpoints/GPT4TS_wo_pretrain \ 9 | --test_on_val 0 \ 10 | --res_path ./results/GPT4TS_wo_pretrain/$model'_'$target_data'.csv' \ 11 | --source_scaling standard_scaler >> ./logs/GPT4TS_wo_pretrain/$model.txt 12 | done 13 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Anomaly_Detection/scripts/MSL.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | python -u run.py \ 4 | --task_name anomaly_detection \ 5 | --is_training 1 \ 6 | --root_path ./dataset/MSL \ 7 | --model_id MSL \ 8 | --model GPT4TS \ 9 | --data MSL \ 10 | --features M \ 11 | --seq_len 100 \ 12 | --pred_len 0 \ 13 | --gpt_layer 6 \ 14 | --d_model 768 \ 15 | --d_ff 8 \ 16 | --patch_size 1 \ 17 | --stride 1 \ 18 | --enc_in 55 \ 19 | --c_out 55 \ 20 | --anomaly_ratio 2 \ 21 | --batch_size 128 \ 22 | --learning_rate 0.0001 \ 23 | --train_epochs 10 -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Anomaly_Detection/scripts/SWAT.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | python -u run.py \ 4 | --task_name anomaly_detection \ 5 | --is_training 1 \ 6 | --root_path ./dataset/SWaT \ 7 | --model_id SWAT \ 8 | --model GPT4TS \ 9 | --data SWAT \ 10 | --features M \ 11 | --seq_len 100 \ 12 | --pred_len 0 \ 13 | --gpt_layer 6 \ 14 | --d_model 768 \ 15 | --d_ff 128 \ 16 | --patch_size 1 \ 17 | --stride 1 \ 18 | --enc_in 51 \ 19 | --c_out 51 \ 20 | --anomaly_ratio 1 \ 21 | --batch_size 128 \ 22 | --learning_rate 0.0001 \ 23 | --train_epochs 10 -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Anomaly_Detection/scripts/SMAP.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | python -u run.py \ 4 | --task_name anomaly_detection \ 5 | --is_training 1 \ 6 | --root_path ./dataset/SMAP \ 7 | --model_id SMAP \ 8 | --model GPT4TS \ 9 | --data SMAP \ 10 | --features M \ 11 | --seq_len 100 \ 12 | --pred_len 0 \ 13 | --gpt_layer 6 \ 14 | --d_model 768 \ 15 | --d_ff 768 \ 16 | --patch_size 1 \ 17 | --stride 1 \ 18 | --enc_in 25 \ 19 | --c_out 25 \ 20 | --anomaly_ratio 1 \ 21 | --batch_size 128 \ 22 | --learning_rate 0.0005 \ 23 | --train_epochs 10 24 | -------------------------------------------------------------------------------- /zero-shot/ours/README.md: -------------------------------------------------------------------------------- 1 | # Our synthetic data 2 | 3 | Here, you can read how to reproduce paper results for our model and synthetic series. 4 | 5 | ## Get Start 6 | 7 | - Install environment via ```conda env create -f time_series.yml```. 8 | - To reproduce few-shot resuls for benchmarks, use ```run_benchmarks_few_shot.py```. Please specify checkpoint and data location in ```CH_PATH``` and ```PATH```. 9 | - To reproduce zero-shot results for benchmarks and M4 dataset, use ```run_benchmarks_zero_shot.py``` and ```run_M.py```. Please specify checkpoint and data location in ```CH_PATH``` and ```PATH```. -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Classification/scripts/EthanolConcentration.sh: -------------------------------------------------------------------------------- 1 | python src/main.py \ 2 | --output_dir experiments \ 3 | --comment "classification from Scratch" \ 4 | --name EthanolConcentration \ 5 | --records_file Classification_records.xls \ 6 | --data_dir ./datasets/EthanolConcentration \ 7 | --data_class tsra \ 8 | --pattern TRAIN \ 9 | --val_pattern TEST \ 10 | --epochs 50 \ 11 | --lr 0.001 \ 12 | --patch_size 8 \ 13 | --stride 8 \ 14 | --optimizer RAdam \ 15 | --d_model 768 \ 16 | --pos_encoding learnable \ 17 | --task classification \ 18 | --key_metric accuracy -------------------------------------------------------------------------------- /zero-shot/ours/train/flags.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import numpy as np 3 | 4 | def parse_handle(): 5 | parser = argparse.ArgumentParser() 6 | parser.add_argument('--pics_series_path', help="path for saving series visualization", type=str, default='./1st_step_new_params_grad_clip/pics_series') 7 | parser.add_argument('--model_path', help="path for checkpoints", type=str, default='./1st_step_new_params_grad_clip/checkpoints') 8 | parser.add_argument('--pics_loss_path', help="path for saving loss and lr visualization", type=str, default='./1st_step_new_params_grad_clip/pics_loss') 9 | parser.add_argument('--lr', help="learning rate", type=str, default=3e-3) 10 | return parser -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Classification/scripts/PEMS-SF.sh: -------------------------------------------------------------------------------- 1 | 2 | for patch in 16 3 | do 4 | for stride in 8 5 | do 6 | for lr in 0.0005 7 | do 8 | 9 | python src/main.py \ 10 | --output_dir experiments \ 11 | --comment "classification from Scratch" \ 12 | --name PEMS-SF \ 13 | --records_file Classification_records.xls \ 14 | --data_dir ./datasets/PEMS-SF \ 15 | --data_class tsra \ 16 | --pattern TRAIN \ 17 | --val_pattern TEST \ 18 | --epochs 50 \ 19 | --lr $lr \ 20 | --patch_size $patch \ 21 | --stride $stride \ 22 | --optimizer RAdam \ 23 | --d_model 768 \ 24 | --pos_encoding learnable \ 25 | --task classification \ 26 | --key_metric accuracy 27 | 28 | done 29 | done 30 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Zero-shot_Learning/scripts/m4_inference.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | for model in \ 3 | DLinear_M3_Yearly DLinear_M3_Quarterly DLinear_M3_Monthly DLinear_M3_Other \ 4 | PatchTST_M3_Yearly PatchTST_M3_Quarterly PatchTST_M3_Monthly PatchTST_M3_Other \ 5 | GPT4TS_M3_Yearly GPT4TS_M3_Quarterly GPT4TS_M3_Monthly GPT4TS_M3_Other 6 | do 7 | for target_data in M4_Yearly M4_Quarterly M4_Monthly M4_Weekly M4_Daily M4_Hourly 8 | do 9 | python inference.py \ 10 | --model $model \ 11 | --target_data $target_data \ 12 | --checkpoints ./checkpoints/ \ 13 | --test_on_val 0 \ 14 | --res_path ./results/M4_test/$model'_'$target_data'.csv' \ 15 | --source_scaling False >> ./logs/M4_test/$model.txt 16 | done 17 | done 18 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Classification/scripts/FaceDetection.sh: -------------------------------------------------------------------------------- 1 | 2 | for patch in 4 3 | do 4 | for stride in 2 5 | do 6 | for lr in 0.0001 7 | do 8 | 9 | python src/main.py \ 10 | --output_dir experiments \ 11 | --comment "classification from Scratch" \ 12 | --name FaceDetection \ 13 | --records_file Classification_records.xls \ 14 | --data_dir ./datasets/FaceDetection \ 15 | --data_class tsra \ 16 | --pattern TRAIN \ 17 | --val_pattern TEST \ 18 | --epochs 50 \ 19 | --lr $lr \ 20 | --patch_size $patch \ 21 | --stride $stride \ 22 | --optimizer RAdam \ 23 | --d_model 768 \ 24 | --pos_encoding learnable \ 25 | --task classification \ 26 | --key_metric accuracy 27 | 28 | done 29 | done 30 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Classification/scripts/UWaveGestureLibrary.sh: -------------------------------------------------------------------------------- 1 | for lr in 0.001 2 | do 3 | for patch in 16 4 | do 5 | for stride in 16 6 | do 7 | 8 | python src/main.py \ 9 | --output_dir experiments \ 10 | --comment "classification from Scratch" \ 11 | --name UWaveGestureLibrary \ 12 | --records_file Classification_records.xls \ 13 | --data_dir ./datasets/UWaveGestureLibrary \ 14 | --data_class tsra \ 15 | --pattern TRAIN \ 16 | --val_pattern TEST \ 17 | --epochs 50 \ 18 | --lr 0.001 \ 19 | --patch_size 16 \ 20 | --stride 16 \ 21 | --optimizer RAdam \ 22 | --d_model 768 \ 23 | --pos_encoding learnable \ 24 | --task classification \ 25 | --key_metric accuracy 26 | 27 | done 28 | done 29 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Classification/scripts/SelfRegulationSCP1.sh: -------------------------------------------------------------------------------- 1 | for lr in 0.002 2 | do 3 | for patch in 16 4 | do 5 | for stride in 16 6 | do 7 | 8 | python src/main.py \ 9 | --output_dir experiments \ 10 | --comment "classification from Scratch" \ 11 | --name SelfRegulationSCP1 \ 12 | --records_file Classification_records.xls \ 13 | --data_dir ./datasets/SelfRegulationSCP1 \ 14 | --data_class tsra \ 15 | --pattern TRAIN \ 16 | --val_pattern TEST \ 17 | --epochs 50 \ 18 | --lr $lr \ 19 | --patch_size $patch \ 20 | --stride $stride \ 21 | --optimizer RAdam \ 22 | --d_model 768 \ 23 | --pos_encoding learnable \ 24 | --task classification \ 25 | --key_metric accuracy 26 | 27 | done 28 | done 29 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Classification/scripts/SpokenArabicDigits.sh: -------------------------------------------------------------------------------- 1 | for lr in 0.001 2 | do 3 | for patch in 8 4 | do 5 | for stride in 4 6 | do 7 | 8 | python src/main.py \ 9 | --output_dir experiments \ 10 | --comment "classification from Scratch" \ 11 | --name SpokenArabicDigits \ 12 | --records_file Classification_records.xls \ 13 | --data_dir ./datasets/SpokenArabicDigits \ 14 | --data_class tsra \ 15 | --pattern TRAIN \ 16 | --val_pattern TEST \ 17 | --epochs 50 \ 18 | --lr $lr \ 19 | --patch_size $patch \ 20 | --stride $stride \ 21 | --optimizer RAdam \ 22 | --d_model 768 \ 23 | --pos_encoding learnable \ 24 | --task classification \ 25 | --key_metric accuracy 26 | 27 | done 28 | done 29 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Classification/scripts/Heartbeat.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | for lr in 0.0005 3 | do 4 | for patch in 32 5 | do 6 | for stride in 16 7 | do 8 | 9 | python src/main.py \ 10 | --output_dir experiments \ 11 | --comment "classification from Scratch" \ 12 | --name Heartbeat \ 13 | --records_file Classification_records.xls \ 14 | --data_dir ./datasets/Heartbeat \ 15 | --data_class tsra \ 16 | --pattern TRAIN \ 17 | --val_pattern TEST \ 18 | --epochs 50 \ 19 | --lr $lr \ 20 | --patch_size $patch \ 21 | --stride $stride \ 22 | --optimizer RAdam \ 23 | --d_model 768 \ 24 | --pos_encoding learnable \ 25 | --task classification \ 26 | --key_metric accuracy 27 | 28 | done 29 | done 30 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Classification/scripts/JapaneseVowels.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | for lr in 0.0005 3 | do 4 | for patch in 4 5 | do 6 | for stride in 1 7 | do 8 | 9 | python src/main.py \ 10 | --output_dir experiments \ 11 | --comment "classification from Scratch" \ 12 | --name JapaneseVowels \ 13 | --records_file Classification_records.xls \ 14 | --data_dir ./datasets/JapaneseVowels \ 15 | --data_class tsra \ 16 | --pattern TRAIN \ 17 | --val_pattern TEST \ 18 | --epochs 50 \ 19 | --lr $lr \ 20 | --patch_size $patch \ 21 | --stride $stride \ 22 | --optimizer RAdam \ 23 | --d_model 768 \ 24 | --pos_encoding learnable \ 25 | --task classification \ 26 | --key_metric accuracy 27 | 28 | done 29 | done 30 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Zero-shot_Learning/scripts/train_base.sh: -------------------------------------------------------------------------------- 1 | for model in \ 2 | GPT4TS_M3_Yearly GPT4TS_M3_Quarterly GPT4TS_M3_Monthly GPT4TS_M3_Other \ 3 | GPT4TS_M4_Yearly GPT4TS_M4_Quarterly GPT4TS_M4_Monthly GPT4TS_M4_Weekly GPT4TS_M4_Daily GPT4TS_M4_Hourly \ 4 | DLinear_M3_Yearly DLinear_M3_Quarterly DLinear_M3_Monthly DLinear_M3_Other \ 5 | DLinear_M4_Yearly DLinear_M4_Quarterly DLinear_M4_Monthly DLinear_M4_Weekly DLinear_M4_Daily DLinear_M4_Hourly \ 6 | PatchTST_M3_Yearly PatchTST_M3_Quarterly PatchTST_M3_Monthly PatchTST_M3_Other \ 7 | PatchTST_M4_Yearly PatchTST_M4_Quarterly PatchTST_M4_Monthly PatchTST_M4_Weekly PatchTST_M4_Daily PatchTST_M4_Hourly 8 | do 9 | python train.py \ 10 | --model $model \ 11 | --config_path ./configs/ \ 12 | --checkpoints ./checkpoints/ 13 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Classification/scripts/SelfRegulationSCP2.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | for lr in 0.0005 3 | do 4 | for patch in 16 5 | do 6 | for stride in 16 7 | do 8 | 9 | python src/main.py \ 10 | --output_dir experiments \ 11 | --comment "classification from Scratch" \ 12 | --name SelfRegulationSCP2 \ 13 | --records_file Classification_records.xls \ 14 | --data_dir ./datasets/SelfRegulationSCP2 \ 15 | --data_class tsra \ 16 | --pattern TRAIN \ 17 | --val_pattern TEST \ 18 | --epochs 50 \ 19 | --lr $lr \ 20 | --patch_size $patch \ 21 | --stride $stride \ 22 | --optimizer RAdam \ 23 | --d_model 768 \ 24 | --pos_encoding learnable \ 25 | --task classification \ 26 | --key_metric accuracy 27 | 28 | done 29 | done 30 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Classification/scripts/Handwriting.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | for lr in 0.002 3 | do 4 | for patch in 8 5 | do 6 | for stride in 2 7 | do 8 | 9 | python src/main.py \ 10 | --output_dir experiments \ 11 | --comment "classification from Scratch" \ 12 | --name Handwriting \ 13 | --records_file Classification_records.xls \ 14 | --data_dir ./datasets/Handwriting \ 15 | --data_class tsra \ 16 | --pattern TRAIN \ 17 | --val_pattern TEST \ 18 | --epochs 50 \ 19 | --lr $lr \ 20 | --patch_size $patch \ 21 | --stride $stride \ 22 | --optimizer RAdam \ 23 | --d_model 768 \ 24 | --pos_encoding learnable \ 25 | --task classification \ 26 | --key_metric accuracy \ 27 | --lr_step 10,20,30,40 \ 28 | --lr_factor 0.6 29 | 30 | done 31 | done 32 | done -------------------------------------------------------------------------------- /few-shot/iTransformer/README.md: -------------------------------------------------------------------------------- 1 | # iTransformer 2 | 3 | The repo is the official implementation for the paper: [iTransformer: Inverted Transformers Are Effective for Time Series Forecasting](https://arxiv.org/abs/2310.06625). 4 | 5 | ## Get Start 6 | 7 | - Install environment from yml file ```conda env create -f conda_baseline.yml```. 8 | - Download data. You can obtain all the benchmarks from [[TimesNet](https://github.com/thuml/Time-Series-Library)]. 9 | - You can find scripts to reproduce results for few-shot learning in ```/scripts/few_shot```. 10 | 11 | ``` 12 | @article{liu2023itransformer, 13 | title={iTransformer: Inverted Transformers Are Effective for Time Series Forecasting}, 14 | author={Liu, Yong and Hu, Tengge and Zhang, Haoran and Wu, Haixu and Wang, Shiyu and Ma, Lintao and Long, Mingsheng}, 15 | journal={arXiv preprint arXiv:2310.06625}, 16 | year={2023} 17 | } 18 | ``` 19 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Few-shot_Learning/scripts/illness.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | seq_len=104 4 | model=GPT4TS 5 | 6 | 7 | for pred_len in 24 36 48 60 8 | do 9 | for percent in 5 10 10 | do 11 | 12 | python main.py \ 13 | --root_path ./datasets/illness/ \ 14 | --data_path national_illness.csv \ 15 | --model_id illness_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 16 | --data custom \ 17 | --seq_len $seq_len \ 18 | --label_len 18 \ 19 | --pred_len $pred_len \ 20 | --batch_size 16 \ 21 | --learning_rate 0.0001 \ 22 | --train_epochs 10 \ 23 | --decay_fac 0.75 \ 24 | --d_model 768 \ 25 | --n_heads 4 \ 26 | --d_ff 768 \ 27 | --freq 0 \ 28 | --patch_size 24 \ 29 | --stride 2 \ 30 | --all 1 \ 31 | --percent $percent \ 32 | --gpt_layer 6 \ 33 | --itr 3 \ 34 | --model $model \ 35 | --is_gpt 1 36 | done 37 | done 38 | 39 | -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/illness.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | seq_len=104 4 | model=GPT4TS 5 | 6 | 7 | for pred_len in 24 36 48 60 8 | do 9 | for percent in 5 10 10 | do 11 | 12 | python main.py \ 13 | --root_path ./datasets/illness/ \ 14 | --data_path national_illness.csv \ 15 | --model_id illness_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 16 | --data custom \ 17 | --seq_len $seq_len \ 18 | --label_len 18 \ 19 | --pred_len $pred_len \ 20 | --batch_size 16 \ 21 | --learning_rate 0.0001 \ 22 | --train_epochs 10 \ 23 | --decay_fac 0.75 \ 24 | --d_model 768 \ 25 | --n_heads 4 \ 26 | --d_ff 768 \ 27 | --freq 0 \ 28 | --patch_size 24 \ 29 | --stride 2 \ 30 | --all 1 \ 31 | --percent $percent \ 32 | --gpt_layer 6 \ 33 | --itr 3 \ 34 | --model $model \ 35 | --is_gpt 1 36 | done 37 | done 38 | 39 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Long-term_Forecasting/scripts/illness.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | seq_len=104 4 | model=GPT4TS 5 | 6 | 7 | for pred_len in 24 36 48 60 8 | do 9 | for percent in 100 10 | do 11 | 12 | python main.py \ 13 | --root_path ./datasets/illness/ \ 14 | --data_path national_illness.csv \ 15 | --model_id illness_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 16 | --data custom \ 17 | --seq_len $seq_len \ 18 | --label_len 18 \ 19 | --pred_len $pred_len \ 20 | --batch_size 16 \ 21 | --learning_rate 0.0001 \ 22 | --train_epochs 10 \ 23 | --decay_fac 0.75 \ 24 | --d_model 768 \ 25 | --n_heads 4 \ 26 | --d_ff 768 \ 27 | --freq 0 \ 28 | --patch_size 24 \ 29 | --stride 2 \ 30 | --all 1 \ 31 | --percent $percent \ 32 | --gpt_layer 6 \ 33 | --itr 3 \ 34 | --model $model \ 35 | --is_gpt 1 36 | done 37 | done 38 | 39 | -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/traffic.sh: -------------------------------------------------------------------------------- 1 | seq_len=512 2 | model=GPT4TS 3 | 4 | for percent in 5 10 5 | do 6 | for pred_len in 96 192 336 720 7 | do 8 | 9 | python main.py \ 10 | --root_path ./datasets/traffic/ \ 11 | --data_path traffic.csv \ 12 | --model_id traffic_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 13 | --data custom \ 14 | --seq_len $seq_len \ 15 | --label_len 48 \ 16 | --pred_len $pred_len \ 17 | --batch_size 2048 \ 18 | --learning_rate 0.001 \ 19 | --train_epochs 10 \ 20 | --decay_fac 0.75 \ 21 | --d_model 768 \ 22 | --n_heads 4 \ 23 | --d_ff 768 \ 24 | --freq 0 \ 25 | --patch_size 16 \ 26 | --stride 8 \ 27 | --all 1 \ 28 | --percent $percent \ 29 | --gpt_layer 6 \ 30 | --itr 3 \ 31 | --model $model \ 32 | --patience 3 \ 33 | --cos 1 \ 34 | --tmax 10 \ 35 | --is_gpt 1 36 | 37 | done 38 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Few-shot_Learning/scripts/traffic.sh: -------------------------------------------------------------------------------- 1 | seq_len=512 2 | model=GPT4TS 3 | 4 | for percent in 5 10 5 | do 6 | for pred_len in 96 192 336 720 7 | do 8 | 9 | python main.py \ 10 | --root_path ./datasets/traffic/ \ 11 | --data_path traffic.csv \ 12 | --model_id traffic_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 13 | --data custom \ 14 | --seq_len $seq_len \ 15 | --label_len 48 \ 16 | --pred_len $pred_len \ 17 | --batch_size 2048 \ 18 | --learning_rate 0.001 \ 19 | --train_epochs 10 \ 20 | --decay_fac 0.75 \ 21 | --d_model 768 \ 22 | --n_heads 4 \ 23 | --d_ff 768 \ 24 | --freq 0 \ 25 | --patch_size 16 \ 26 | --stride 8 \ 27 | --all 1 \ 28 | --percent $percent \ 29 | --gpt_layer 6 \ 30 | --itr 3 \ 31 | --model $model \ 32 | --patience 3 \ 33 | --cos 1 \ 34 | --tmax 10 \ 35 | --is_gpt 1 36 | 37 | done 38 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Long-term_Forecasting/scripts/traffic.sh: -------------------------------------------------------------------------------- 1 | seq_len=512 2 | model=GPT4TS 3 | 4 | for percent in 100 5 | do 6 | for pred_len in 96 192 336 720 7 | do 8 | 9 | python main.py \ 10 | --root_path ./datasets/traffic/ \ 11 | --data_path traffic.csv \ 12 | --model_id traffic_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 13 | --data custom \ 14 | --seq_len $seq_len \ 15 | --label_len 48 \ 16 | --pred_len $pred_len \ 17 | --batch_size 2048 \ 18 | --learning_rate 0.001 \ 19 | --train_epochs 10 \ 20 | --decay_fac 0.75 \ 21 | --d_model 768 \ 22 | --n_heads 4 \ 23 | --d_ff 768 \ 24 | --freq 0 \ 25 | --patch_size 16 \ 26 | --stride 8 \ 27 | --all 1 \ 28 | --percent $percent \ 29 | --gpt_layer 6 \ 30 | --itr 3 \ 31 | --model $model \ 32 | --patience 3 \ 33 | --cos 1 \ 34 | --tmax 10 \ 35 | --is_gpt 1 36 | 37 | done 38 | done 39 | -------------------------------------------------------------------------------- /few-shot/iTransformer/utils/masking.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class TriangularCausalMask(): 5 | def __init__(self, B, L, device="cpu"): 6 | mask_shape = [B, 1, L, L] 7 | with torch.no_grad(): 8 | self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device) 9 | 10 | @property 11 | def mask(self): 12 | return self._mask 13 | 14 | 15 | class ProbMask(): 16 | def __init__(self, B, H, L, index, scores, device="cpu"): 17 | _mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1) 18 | _mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1]) 19 | indicator = _mask_ex[torch.arange(B)[:, None, None], 20 | torch.arange(H)[None, :, None], 21 | index, :].to(device) 22 | self._mask = indicator.view(scores.shape).to(device) 23 | 24 | @property 25 | def mask(self): 26 | return self._mask 27 | -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/ETTm2.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | seq_len=512 4 | model=GPT4TS 5 | 6 | for percent in 5 10 7 | do 8 | for pred_len in 96 192 336 729 9 | do 10 | 11 | python main.py \ 12 | --root_path ./datasets/ETT-small/ \ 13 | --data_path ETTm2.csv \ 14 | --model_id ETTm2_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 15 | --data ett_m \ 16 | --seq_len $seq_len \ 17 | --label_len 48 \ 18 | --pred_len $pred_len \ 19 | --batch_size 256 \ 20 | --learning_rate 0.002 \ 21 | --train_epochs 10 \ 22 | --decay_fac 0.75 \ 23 | --d_model 768 \ 24 | --n_heads 4 \ 25 | --d_ff 768 \ 26 | --dropout 0.3 \ 27 | --enc_in 7 \ 28 | --c_out 7 \ 29 | --freq 0 \ 30 | --patch_size 16 \ 31 | --stride 16 \ 32 | --percent $percent \ 33 | --gpt_layer 6 \ 34 | --itr 1 \ 35 | --model $model \ 36 | --cos 1 \ 37 | --is_gpt 1 38 | done 39 | done 40 | -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/utils/masking.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class TriangularCausalMask(): 5 | def __init__(self, B, L, device="cpu"): 6 | mask_shape = [B, 1, L, L] 7 | with torch.no_grad(): 8 | self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device) 9 | 10 | @property 11 | def mask(self): 12 | return self._mask 13 | 14 | 15 | class ProbMask(): 16 | def __init__(self, B, H, L, index, scores, device="cpu"): 17 | _mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1) 18 | _mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1]) 19 | indicator = _mask_ex[torch.arange(B)[:, None, None], 20 | torch.arange(H)[None, :, None], 21 | index, :].to(device) 22 | self._mask = indicator.view(scores.shape).to(device) 23 | 24 | @property 25 | def mask(self): 26 | return self._mask 27 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Few-shot_Learning/scripts/ETTm2.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | seq_len=512 4 | model=GPT4TS 5 | 6 | for percent in 5 10 7 | do 8 | for pred_len in 96 192 336 729 9 | do 10 | 11 | python main.py \ 12 | --root_path ./datasets/ETT-small/ \ 13 | --data_path ETTm2.csv \ 14 | --model_id ETTm2_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 15 | --data ett_m \ 16 | --seq_len $seq_len \ 17 | --label_len 48 \ 18 | --pred_len $pred_len \ 19 | --batch_size 256 \ 20 | --learning_rate 0.002 \ 21 | --train_epochs 10 \ 22 | --decay_fac 0.75 \ 23 | --d_model 768 \ 24 | --n_heads 4 \ 25 | --d_ff 768 \ 26 | --dropout 0.3 \ 27 | --enc_in 7 \ 28 | --c_out 7 \ 29 | --freq 0 \ 30 | --patch_size 16 \ 31 | --stride 16 \ 32 | --percent $percent \ 33 | --gpt_layer 6 \ 34 | --itr 1 \ 35 | --model $model \ 36 | --cos 1 \ 37 | --is_gpt 1 38 | done 39 | done 40 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Long-term_Forecasting/scripts/ETTm2.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | seq_len=512 4 | model=GPT4TS 5 | 6 | for percent in 100 7 | do 8 | for pred_len in 96 192 336 729 9 | do 10 | 11 | python main.py \ 12 | --root_path ./datasets/ETT-small/ \ 13 | --data_path ETTm2.csv \ 14 | --model_id ETTm2_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 15 | --data ett_m \ 16 | --seq_len $seq_len \ 17 | --label_len 48 \ 18 | --pred_len $pred_len \ 19 | --batch_size 256 \ 20 | --learning_rate 0.0001 \ 21 | --train_epochs 10 \ 22 | --decay_fac 0.75 \ 23 | --d_model 768 \ 24 | --n_heads 4 \ 25 | --d_ff 768 \ 26 | --dropout 0.3 \ 27 | --enc_in 7 \ 28 | --c_out 7 \ 29 | --freq 0 \ 30 | --patch_size 16 \ 31 | --stride 16 \ 32 | --percent $percent \ 33 | --gpt_layer 6 \ 34 | --itr 1 \ 35 | --model $model \ 36 | --cos 1 \ 37 | --is_gpt 1 38 | done 39 | done 40 | -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/ETTm1.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | seq_len=512 4 | model=GPT4TS 5 | 6 | for percent in 5 10 7 | do 8 | for pred_len in 96 192 336 720 9 | do 10 | 11 | python main.py \ 12 | --root_path ./datasets/ETT-small/ \ 13 | --data_path ETTm1.csv \ 14 | --model_id ETTm1_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 15 | --data ett_m \ 16 | --seq_len $seq_len \ 17 | --label_len 48 \ 18 | --pred_len $pred_len \ 19 | --batch_size 256 \ 20 | --learning_rate 0.001 \ 21 | --train_epochs 10 \ 22 | --decay_fac 0.75 \ 23 | --d_model 768 \ 24 | --n_heads 4 \ 25 | --d_ff 768 \ 26 | --dropout 0.3 \ 27 | --enc_in 7 \ 28 | --c_out 7 \ 29 | --freq 0 \ 30 | --patch_size 16 \ 31 | --stride 16 \ 32 | --percent $percent \ 33 | --gpt_layer 6 \ 34 | --itr 3 \ 35 | --model $model \ 36 | --cos 1 \ 37 | --is_gpt 1 38 | done 39 | done 40 | 41 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Few-shot_Learning/scripts/ETTm1.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | seq_len=512 4 | model=GPT4TS 5 | 6 | for percent in 5 10 7 | do 8 | for pred_len in 96 192 336 720 9 | do 10 | 11 | python main.py \ 12 | --root_path ./datasets/ETT-small/ \ 13 | --data_path ETTm1.csv \ 14 | --model_id ETTm1_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 15 | --data ett_m \ 16 | --seq_len $seq_len \ 17 | --label_len 48 \ 18 | --pred_len $pred_len \ 19 | --batch_size 256 \ 20 | --learning_rate 0.001 \ 21 | --train_epochs 10 \ 22 | --decay_fac 0.75 \ 23 | --d_model 768 \ 24 | --n_heads 4 \ 25 | --d_ff 768 \ 26 | --dropout 0.3 \ 27 | --enc_in 7 \ 28 | --c_out 7 \ 29 | --freq 0 \ 30 | --patch_size 16 \ 31 | --stride 16 \ 32 | --percent $percent \ 33 | --gpt_layer 6 \ 34 | --itr 3 \ 35 | --model $model \ 36 | --cos 1 \ 37 | --is_gpt 1 38 | done 39 | done 40 | 41 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Few-shot_Learning/scripts/weather.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | seq_len=512 4 | model=GPT4TS 5 | 6 | for percent in 5 10 7 | do 8 | for pred_len in 96 192 336 720 9 | do 10 | 11 | python main.py \ 12 | --root_path ./datasets/weather/ \ 13 | --data_path weather.csv \ 14 | --model_id weather_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 15 | --data custom \ 16 | --seq_len $seq_len \ 17 | --label_len 48 \ 18 | --pred_len $pred_len \ 19 | --batch_size 512 \ 20 | --learning_rate 0.001 \ 21 | --train_epochs 10 \ 22 | --decay_fac 0.9 \ 23 | --d_model 768 \ 24 | --n_heads 4 \ 25 | --d_ff 768 \ 26 | --dropout 0.3 \ 27 | --enc_in 7 \ 28 | --c_out 7 \ 29 | --freq 0 \ 30 | --lradj type3 \ 31 | --patch_size 16 \ 32 | --stride 8 \ 33 | --percent $percent \ 34 | --gpt_layer 6 \ 35 | --itr 3 \ 36 | --model $model \ 37 | --is_gpt 1 38 | 39 | done 40 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Imputation/utils/masking.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class TriangularCausalMask(): 5 | def __init__(self, B, L, device="cpu"): 6 | mask_shape = [B, 1, L, L] 7 | with torch.no_grad(): 8 | self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device) 9 | 10 | @property 11 | def mask(self): 12 | return self._mask 13 | 14 | 15 | class ProbMask(): 16 | def __init__(self, B, H, L, index, scores, device="cpu"): 17 | _mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1) 18 | _mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1]) 19 | indicator = _mask_ex[torch.arange(B)[:, None, None], 20 | torch.arange(H)[None, :, None], 21 | index, :].to(device) 22 | self._mask = indicator.view(scores.shape).to(device) 23 | 24 | @property 25 | def mask(self): 26 | return self._mask 27 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Long-term_Forecasting/scripts/ETTm1.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | seq_len=512 4 | model=GPT4TS 5 | 6 | for percent in 100 7 | do 8 | for pred_len in 96 192 336 720 9 | do 10 | 11 | python main.py \ 12 | --root_path ./datasets/ETT-small/ \ 13 | --data_path ETTm1.csv \ 14 | --model_id ETTm1_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 15 | --data ett_m \ 16 | --seq_len $seq_len \ 17 | --label_len 48 \ 18 | --pred_len $pred_len \ 19 | --batch_size 256 \ 20 | --learning_rate 0.0001 \ 21 | --train_epochs 10 \ 22 | --decay_fac 0.75 \ 23 | --d_model 768 \ 24 | --n_heads 4 \ 25 | --d_ff 768 \ 26 | --dropout 0.3 \ 27 | --enc_in 7 \ 28 | --c_out 7 \ 29 | --freq 0 \ 30 | --patch_size 16 \ 31 | --stride 16 \ 32 | --percent $percent \ 33 | --gpt_layer 6 \ 34 | --itr 3 \ 35 | --model $model \ 36 | --cos 1 \ 37 | --is_gpt 1 38 | done 39 | done 40 | 41 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Zero-shot_Learning/utils/masking.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class TriangularCausalMask(): 5 | def __init__(self, B, L, device="cpu"): 6 | mask_shape = [B, 1, L, L] 7 | with torch.no_grad(): 8 | self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device) 9 | 10 | @property 11 | def mask(self): 12 | return self._mask 13 | 14 | 15 | class ProbMask(): 16 | def __init__(self, B, H, L, index, scores, device="cpu"): 17 | _mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1) 18 | _mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1]) 19 | indicator = _mask_ex[torch.arange(B)[:, None, None], 20 | torch.arange(H)[None, :, None], 21 | index, :].to(device) 22 | self._mask = indicator.view(scores.shape).to(device) 23 | 24 | @property 25 | def mask(self): 26 | return self._mask -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/weather.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | seq_len=512 4 | model=GPT4TS 5 | 6 | for percent in 5 10 7 | do 8 | for pred_len in 96 192 336 720 9 | do 10 | 11 | python main.py \ 12 | --root_path ./datasets/weather/ \ 13 | --data_path weather.csv \ 14 | --model_id weather_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 15 | --data custom \ 16 | --seq_len $seq_len \ 17 | --label_len 48 \ 18 | --pred_len $pred_len \ 19 | --batch_size 512 \ 20 | --learning_rate 0.001 \ 21 | --train_epochs 10 \ 22 | --decay_fac 0.9 \ 23 | --d_model 768 \ 24 | --n_heads 4 \ 25 | --d_ff 768 \ 26 | --dropout 0.3 \ 27 | --enc_in 7 \ 28 | --c_out 7 \ 29 | --freq 0 \ 30 | --lradj type3 \ 31 | --patch_size 16 \ 32 | --stride 8 \ 33 | --percent $percent \ 34 | --gpt_layer 6 \ 35 | --itr 3 \ 36 | --model $model \ 37 | --is_gpt 1 38 | 39 | done 40 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Anomaly_Detection/utils/masking.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class TriangularCausalMask(): 5 | def __init__(self, B, L, device="cpu"): 6 | mask_shape = [B, 1, L, L] 7 | with torch.no_grad(): 8 | self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device) 9 | 10 | @property 11 | def mask(self): 12 | return self._mask 13 | 14 | 15 | class ProbMask(): 16 | def __init__(self, B, H, L, index, scores, device="cpu"): 17 | _mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1) 18 | _mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1]) 19 | indicator = _mask_ex[torch.arange(B)[:, None, None], 20 | torch.arange(H)[None, :, None], 21 | index, :].to(device) 22 | self._mask = indicator.view(scores.shape).to(device) 23 | 24 | @property 25 | def mask(self): 26 | return self._mask 27 | -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/README.md: -------------------------------------------------------------------------------- 1 | # One Fits All: Power General Time Series Analysis by Pretrained LM (NeurIPS 2023 Spotlight) - official relization. 2 | 3 | Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, Rong Jin, "One Fits All: Power General Time Series Analysis by Pretrained LM,", NeurIPS, 2023. [[paper](https://arxiv.org/abs/2302.11939)] 4 | 5 | 6 | ## Get Start 7 | 8 | - Install environment from yml file ```conda env create -f conda_baseline.yml```. 9 | - Download data. You can obtain all the benchmarks from [[TimesNet](https://github.com/thuml/Time-Series-Library)]. 10 | - You can find scripts to reproduce results for few-shot learning in ```./Few-shot_Learning/scripts/few-shot```. 11 | 12 | ## Citation 13 | 14 | ``` 15 | @inproceedings{zhou2023onefitsall, 16 | title={{One Fits All}: Power General Time Series Analysis by Pretrained LM}, 17 | author={Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, Rong Jin}, 18 | booktitle={NeurIPS}, 19 | year={2023} 20 | } 21 | ``` -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Long-term_Forecasting/scripts/weather.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | seq_len=512 4 | model=GPT4TS 5 | 6 | for percent in 100 7 | do 8 | for pred_len in 96 192 336 720 9 | do 10 | 11 | python main.py \ 12 | --root_path ./datasets/weather/ \ 13 | --data_path weather.csv \ 14 | --model_id weather_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 15 | --data custom \ 16 | --seq_len $seq_len \ 17 | --label_len 48 \ 18 | --pred_len $pred_len \ 19 | --batch_size 512 \ 20 | --learning_rate 0.0001 \ 21 | --train_epochs 10 \ 22 | --decay_fac 0.9 \ 23 | --d_model 768 \ 24 | --n_heads 4 \ 25 | --d_ff 768 \ 26 | --dropout 0.3 \ 27 | --enc_in 7 \ 28 | --c_out 7 \ 29 | --freq 0 \ 30 | --lradj type3 \ 31 | --patch_size 16 \ 32 | --stride 8 \ 33 | --percent $percent \ 34 | --gpt_layer 6 \ 35 | --itr 3 \ 36 | --model $model \ 37 | --is_gpt 1 38 | 39 | done 40 | done 41 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Short-term_Forecasting/utils/masking.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class TriangularCausalMask(): 5 | def __init__(self, B, L, device="cpu"): 6 | mask_shape = [B, 1, L, L] 7 | with torch.no_grad(): 8 | self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device) 9 | 10 | @property 11 | def mask(self): 12 | return self._mask 13 | 14 | 15 | class ProbMask(): 16 | def __init__(self, B, H, L, index, scores, device="cpu"): 17 | _mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1) 18 | _mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1]) 19 | indicator = _mask_ex[torch.arange(B)[:, None, None], 20 | torch.arange(H)[None, :, None], 21 | index, :].to(device) 22 | self._mask = indicator.view(scores.shape).to(device) 23 | 24 | @property 25 | def mask(self): 26 | return self._mask 27 | -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/electricity.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | seq_len=512 4 | model=GPT4TS 5 | 6 | for pred_len in 96 192 336 720 7 | do 8 | for percent in 5 10 9 | do 10 | 11 | python main.py \ 12 | --root_path ./datasets/electricity/ \ 13 | --data_path electricity.csv \ 14 | --model_id ECL_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 15 | --data custom \ 16 | --seq_len $seq_len \ 17 | --label_len 48 \ 18 | --pred_len $pred_len \ 19 | --batch_size 2048 \ 20 | --learning_rate 0.0001 \ 21 | --train_epochs 10 \ 22 | --decay_fac 0.75 \ 23 | --d_model 768 \ 24 | --n_heads 4 \ 25 | --d_ff 768 \ 26 | --dropout 0.3 \ 27 | --enc_in 7 \ 28 | --c_out 7 \ 29 | --freq 0 \ 30 | --patch_size 16 \ 31 | --stride 8 \ 32 | --percent $percent \ 33 | --gpt_layer 6 \ 34 | --itr 3 \ 35 | --model $model \ 36 | --cos 1 \ 37 | --tmax 10 \ 38 | --is_gpt 1 39 | done 40 | done 41 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Few-shot_Learning/scripts/electricity.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | seq_len=512 4 | model=GPT4TS 5 | 6 | for pred_len in 96 192 336 720 7 | do 8 | for percent in 5 10 9 | do 10 | 11 | python main.py \ 12 | --root_path ./datasets/electricity/ \ 13 | --data_path electricity.csv \ 14 | --model_id ECL_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 15 | --data custom \ 16 | --seq_len $seq_len \ 17 | --label_len 48 \ 18 | --pred_len $pred_len \ 19 | --batch_size 2048 \ 20 | --learning_rate 0.0001 \ 21 | --train_epochs 10 \ 22 | --decay_fac 0.75 \ 23 | --d_model 768 \ 24 | --n_heads 4 \ 25 | --d_ff 768 \ 26 | --dropout 0.3 \ 27 | --enc_in 7 \ 28 | --c_out 7 \ 29 | --freq 0 \ 30 | --patch_size 16 \ 31 | --stride 8 \ 32 | --percent $percent \ 33 | --gpt_layer 6 \ 34 | --itr 3 \ 35 | --model $model \ 36 | --cos 1 \ 37 | --tmax 10 \ 38 | --is_gpt 1 39 | done 40 | done 41 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Long-term_Forecasting/scripts/electricity.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | seq_len=512 4 | model=GPT4TS 5 | 6 | for pred_len in 96 192 336 720 7 | do 8 | for percent in 100 9 | do 10 | 11 | python main.py \ 12 | --root_path ./datasets/electricity/ \ 13 | --data_path electricity.csv \ 14 | --model_id ECL_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 15 | --data custom \ 16 | --seq_len $seq_len \ 17 | --label_len 48 \ 18 | --pred_len $pred_len \ 19 | --batch_size 2048 \ 20 | --learning_rate 0.0001 \ 21 | --train_epochs 10 \ 22 | --decay_fac 0.75 \ 23 | --d_model 768 \ 24 | --n_heads 4 \ 25 | --d_ff 768 \ 26 | --dropout 0.3 \ 27 | --enc_in 7 \ 28 | --c_out 7 \ 29 | --freq 0 \ 30 | --patch_size 16 \ 31 | --stride 8 \ 32 | --percent $percent \ 33 | --gpt_layer 6 \ 34 | --itr 3 \ 35 | --model $model \ 36 | --cos 1 \ 37 | --tmax 10 \ 38 | --is_gpt 1 39 | done 40 | done 41 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Zero-shot_Learning/data_provider/temp_for_debug/utils/masking.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class TriangularCausalMask(): 5 | def __init__(self, B, L, device="cpu"): 6 | mask_shape = [B, 1, L, L] 7 | with torch.no_grad(): 8 | self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device) 9 | 10 | @property 11 | def mask(self): 12 | return self._mask 13 | 14 | 15 | class ProbMask(): 16 | def __init__(self, B, H, L, index, scores, device="cpu"): 17 | _mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1) 18 | _mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1]) 19 | indicator = _mask_ex[torch.arange(B)[:, None, None], 20 | torch.arange(H)[None, :, None], 21 | index, :].to(device) 22 | self._mask = indicator.view(scores.shape).to(device) 23 | 24 | @property 25 | def mask(self): 26 | return self._mask -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Zero-shot_Learning/scripts/old/test.sh: -------------------------------------------------------------------------------- 1 | model=GPT4TS 2 | 3 | seq_len=6 4 | pred_len=14 5 | test_seq_len=6 6 | test_pred_len=14 7 | 8 | python main_test.py \ 9 | --root_path ./data/m4/ \ 10 | --test_root_path ./data/m3/ \ 11 | --data_path m4_daily.tsf \ 12 | --test_data_path m3_monthly.tsf \ 13 | --model_id m3Darly'_'$model \ 14 | --data tsf_data \ 15 | --seq_len $seq_len \ 16 | --pred_len $pred_len \ 17 | --test_seq_len $seq_len \ 18 | --test_pred_len $test_pred_len \ 19 | --label_len 0 \ 20 | --batch_size 512 \ 21 | --test_batch_size 128 \ 22 | --learning_rate 0.001 \ 23 | --train_epochs 20 \ 24 | --decay_fac 0.75 \ 25 | --d_model 768 \ 26 | --n_heads 16 \ 27 | --d_ff 512 \ 28 | --loss_func smape \ 29 | --dropout 0 \ 30 | --gpt_layer 6 \ 31 | --itr 1 \ 32 | --model $model \ 33 | --patch_size 1 \ 34 | --stride 1 \ 35 | --print_int 1000 \ 36 | --train_all 1 \ 37 | --percent 100 38 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Few-shot_Learning/scripts/ETTh2.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | seq_len=336 4 | model=GPT4TS 5 | 6 | for percent in 5 10 7 | do 8 | for pred_len in 96 192 336 720 9 | do 10 | 11 | python main.py \ 12 | --root_path ./datasets/ETT-small/ \ 13 | --data_path ETTh2.csv \ 14 | --model_id ETTh2_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 15 | --data ett_h \ 16 | --seq_len $seq_len \ 17 | --label_len 168 \ 18 | --pred_len $pred_len \ 19 | --batch_size 256 \ 20 | --decay_fac 0.5 \ 21 | --learning_rate 0.001 \ 22 | --train_epochs 10 \ 23 | --d_model 768 \ 24 | --n_heads 4 \ 25 | --d_ff 768 \ 26 | --dropout 1 \ 27 | --enc_in 7 \ 28 | --c_out 7 \ 29 | --freq 0 \ 30 | --patch_size 16 \ 31 | --stride 8 \ 32 | --percent $percent \ 33 | --gpt_layer 6 \ 34 | --itr 1 \ 35 | --model $model \ 36 | --cos 1 \ 37 | --tmax 20 \ 38 | --pretrain 1 \ 39 | --is_gpt 1 40 | 41 | done 42 | done 43 | -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/ETTh2.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | seq_len=336 4 | model=GPT4TS 5 | 6 | for percent in 5 10 7 | do 8 | for pred_len in 96 192 336 720 9 | do 10 | 11 | python main.py \ 12 | --root_path ./datasets/ETT-small/ \ 13 | --data_path ETTh2.csv \ 14 | --model_id ETTh2_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 15 | --data ett_h \ 16 | --seq_len $seq_len \ 17 | --label_len 168 \ 18 | --pred_len $pred_len \ 19 | --batch_size 256 \ 20 | --decay_fac 0.5 \ 21 | --learning_rate 0.001 \ 22 | --train_epochs 10 \ 23 | --d_model 768 \ 24 | --n_heads 4 \ 25 | --d_ff 768 \ 26 | --dropout 1 \ 27 | --enc_in 7 \ 28 | --c_out 7 \ 29 | --freq 0 \ 30 | --patch_size 16 \ 31 | --stride 8 \ 32 | --percent $percent \ 33 | --gpt_layer 6 \ 34 | --itr 1 \ 35 | --model $model \ 36 | --cos 1 \ 37 | --tmax 20 \ 38 | --pretrain 1 \ 39 | --is_gpt 1 40 | 41 | done 42 | done 43 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Long-term_Forecasting/scripts/ETTh2.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | seq_len=336 4 | model=GPT4TS 5 | 6 | for percent in 100 7 | do 8 | for pred_len in 96 192 336 720 9 | do 10 | 11 | python main.py \ 12 | --root_path ./datasets/ETT-small/ \ 13 | --data_path ETTh2.csv \ 14 | --model_id ETTh2_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 15 | --data ett_h \ 16 | --seq_len $seq_len \ 17 | --label_len 168 \ 18 | --pred_len $pred_len \ 19 | --batch_size 256 \ 20 | --decay_fac 0.5 \ 21 | --learning_rate 0.0001 \ 22 | --train_epochs 10 \ 23 | --d_model 768 \ 24 | --n_heads 4 \ 25 | --d_ff 768 \ 26 | --dropout 1 \ 27 | --enc_in 7 \ 28 | --c_out 7 \ 29 | --freq 0 \ 30 | --patch_size 16 \ 31 | --stride 8 \ 32 | --percent $percent \ 33 | --gpt_layer 6 \ 34 | --itr 1 \ 35 | --model $model \ 36 | --cos 1 \ 37 | --tmax 20 \ 38 | --pretrain 1 \ 39 | --is_gpt 1 40 | 41 | done 42 | done 43 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Zero-shot_Learning/scripts/inference.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | for model in \ 3 | GPT4TS_M3_Yearly GPT4TS_M3_Quarterly GPT4TS_M3_Monthly GPT4TS_M3_Other 4 | do 5 | for target_data in Weather ECL ILI Traffic ETT ETTh1 ETTh2 ETTm1 ETTm2 6 | do 7 | python inference.py \ 8 | --model $model \ 9 | --target_data $target_data \ 10 | --checkpoints ./checkpoints/ \ 11 | --res_path ./results/$model'_'$target_data'.csv' \ 12 | --source_scaling False >> ./logs/$model.txt 13 | done 14 | done 15 | 16 | export CUDA_VISIBLE_DEVICES=0 17 | for model in \ 18 | GPT4TS_M4_Yearly GPT4TS_M4_Quarterly GPT4TS_M4_Weekly GPT4TS_M4_Daily GPT4TS_M4_Daily GPT4TS_M4_Hourly 19 | do 20 | for target_data in Weather ECL ILI Traffic ETT ETTh1 ETTh2 ETTm1 ETTm2 21 | do 22 | python inference.py \ 23 | --model $model \ 24 | --target_data $target_data \ 25 | --checkpoints ./checkpoints/ \ 26 | --res_path ./results/$model'_'$target_data'.csv' \ 27 | --source_scaling standard_scaler >> ./logs/$model.txt 28 | done 29 | done 30 | -------------------------------------------------------------------------------- /few-shot/iTransformer/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def RSE(pred, true): 5 | return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2)) 6 | 7 | 8 | def CORR(pred, true): 9 | u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0) 10 | d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0)) 11 | return (u / d).mean(-1) 12 | 13 | 14 | def MAE(pred, true): 15 | return np.mean(np.abs(pred - true)) 16 | 17 | 18 | def MSE(pred, true): 19 | return np.mean((pred - true) ** 2) 20 | 21 | 22 | def RMSE(pred, true): 23 | return np.sqrt(MSE(pred, true)) 24 | 25 | 26 | def MAPE(pred, true): 27 | return np.mean(np.abs((pred - true) / true)) 28 | 29 | 30 | def MSPE(pred, true): 31 | return np.mean(np.square((pred - true) / true)) 32 | 33 | 34 | def metric(pred, true): 35 | mae = MAE(pred, true) 36 | mse = MSE(pred, true) 37 | rmse = RMSE(pred, true) 38 | mape = MAPE(pred, true) 39 | mspe = MSPE(pred, true) 40 | 41 | return mae, mse, rmse, mape, mspe 42 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Few-shot_Learning/scripts/ETTh1.sh: -------------------------------------------------------------------------------- 1 | 2 | export CUDA_VISIBLE_DEVICES=0 3 | 4 | seq_len=336 5 | model=GPT4TS 6 | 7 | for percent in 5 10 8 | do 9 | for pred_len in 96 192 336 720 10 | do 11 | for lr in 0.001 0.000005 12 | do 13 | 14 | python main.py \ 15 | --root_path ./datasets/ETT-small/ \ 16 | --data_path ETTh1.csv \ 17 | --model_id ETTh1_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 18 | --data ett_h \ 19 | --seq_len $seq_len \ 20 | --label_len 168 \ 21 | --pred_len $pred_len \ 22 | --batch_size 256 \ 23 | --lradj type4 \ 24 | --learning_rate $lr \ 25 | --train_epochs 10 \ 26 | --decay_fac 0.5 \ 27 | --d_model 768 \ 28 | --n_heads 4 \ 29 | --d_ff 768 \ 30 | --dropout 0.3 \ 31 | --enc_in 7 \ 32 | --c_out 7 \ 33 | --freq 0 \ 34 | --patch_size 16 \ 35 | --stride 8 \ 36 | --percent $percent \ 37 | --gpt_layer 6 \ 38 | --itr 3 \ 39 | --model $model \ 40 | --tmax 20 \ 41 | --cos 1 \ 42 | --is_gpt 1 43 | 44 | done 45 | done 46 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Long-term_Forecasting/scripts/ETTh1.sh: -------------------------------------------------------------------------------- 1 | 2 | export CUDA_VISIBLE_DEVICES=0 3 | 4 | seq_len=336 5 | model=GPT4TS 6 | 7 | for percent in 100 8 | do 9 | for pred_len in 96 192 336 720 10 | do 11 | for lr in 0.0001 12 | do 13 | 14 | python main.py \ 15 | --root_path ./datasets/ETT-small/ \ 16 | --data_path ETTh1.csv \ 17 | --model_id ETTh1_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 18 | --data ett_h \ 19 | --seq_len $seq_len \ 20 | --label_len 168 \ 21 | --pred_len $pred_len \ 22 | --batch_size 256 \ 23 | --lradj type4 \ 24 | --learning_rate $lr \ 25 | --train_epochs 10 \ 26 | --decay_fac 0.5 \ 27 | --d_model 768 \ 28 | --n_heads 4 \ 29 | --d_ff 768 \ 30 | --dropout 0.3 \ 31 | --enc_in 7 \ 32 | --c_out 7 \ 33 | --freq 0 \ 34 | --patch_size 16 \ 35 | --stride 8 \ 36 | --percent $percent \ 37 | --gpt_layer 6 \ 38 | --itr 3 \ 39 | --model $model \ 40 | --tmax 20 \ 41 | --cos 1 \ 42 | --is_gpt 1 43 | 44 | done 45 | done 46 | done 47 | -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/ETTh1.sh: -------------------------------------------------------------------------------- 1 | 2 | export CUDA_VISIBLE_DEVICES=0 3 | 4 | seq_len=336 5 | model=GPT4TS 6 | 7 | for percent in 5 10 8 | do 9 | for pred_len in 96 192 336 720 10 | do 11 | for lr in 0.001 0.000005 12 | do 13 | 14 | python main.py \ 15 | --root_path ./datasets/ETT-small/ \ 16 | --data_path ETTh1.csv \ 17 | --model_id ETTh1_$model'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$percent \ 18 | --data ett_h \ 19 | --seq_len $seq_len \ 20 | --label_len 168 \ 21 | --pred_len $pred_len \ 22 | --batch_size 256 \ 23 | --lradj type4 \ 24 | --learning_rate $lr \ 25 | --train_epochs 10 \ 26 | --decay_fac 0.5 \ 27 | --d_model 768 \ 28 | --n_heads 4 \ 29 | --d_ff 768 \ 30 | --dropout 0.3 \ 31 | --enc_in 7 \ 32 | --c_out 7 \ 33 | --freq 0 \ 34 | --patch_size 16 \ 35 | --stride 8 \ 36 | --percent $percent \ 37 | --gpt_layer 6 \ 38 | --itr 3 \ 39 | --model $model \ 40 | --tmax 20 \ 41 | --cos 1 \ 42 | --is_gpt 1 43 | 44 | done 45 | done 46 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/exp/exp_basic.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import numpy as np 4 | 5 | 6 | class Exp_Basic(object): 7 | def __init__(self, args): 8 | self.args = args 9 | self.device = self._acquire_device() 10 | self.model = self._build_model().to(self.device) 11 | 12 | def _build_model(self): 13 | raise NotImplementedError 14 | return None 15 | 16 | def _acquire_device(self): 17 | if self.args.use_gpu: 18 | os.environ["CUDA_VISIBLE_DEVICES"] = str( 19 | self.args.gpu) if not self.args.use_multi_gpu else self.args.devices 20 | device = torch.device('cuda:{}'.format(self.args.gpu)) 21 | print('Use GPU: cuda:{}'.format(self.args.gpu)) 22 | else: 23 | device = torch.device('cpu') 24 | print('Use CPU') 25 | return device 26 | 27 | def _get_data(self): 28 | pass 29 | 30 | def vali(self): 31 | pass 32 | 33 | def train(self): 34 | pass 35 | 36 | def test(self): 37 | pass 38 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Zero-shot_Learning/scripts/old/m4_electricity.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | model=GPT4TS 4 | 5 | seq_len=30 6 | pred_len=168 7 | test_seq_len=30 8 | test_pred_len=168 9 | 10 | python main_test.py \ 11 | --root_path ./datasets/m4/ \ 12 | --test_root_path ./datasets/electricity_tsf/ \ 13 | --data_path m4_hourly_dataset.tsf \ 14 | --test_data_path electricity_hourly_dataset.tsf \ 15 | --model_id m4Hourly_$model'_'$seq_len'_'$pred_len \ 16 | --data tsf_data \ 17 | --seq_len $seq_len \ 18 | --pred_len $pred_len \ 19 | --test_seq_len $test_seq_len \ 20 | --test_pred_len $test_pred_len \ 21 | --label_len 0 \ 22 | --batch_size 512 \ 23 | --test_batch_size 128 \ 24 | --learning_rate 0.005 \ 25 | --train_epochs 10 \ 26 | --decay_fac 0.5 \ 27 | --d_model 768 \ 28 | --n_heads 16 \ 29 | --d_ff 512 \ 30 | --loss_func smape \ 31 | --percent 100 \ 32 | --gpt_layer 6 \ 33 | --itr 1 \ 34 | --model $model \ 35 | --patch_size 2 \ 36 | --stride 1 \ 37 | --print_int 1000 -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Imputation/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def RSE(pred, true): 5 | return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2)) 6 | 7 | 8 | def CORR(pred, true): 9 | u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0) 10 | d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0)) 11 | return (u / d).mean(-1) 12 | 13 | 14 | def MAE(pred, true): 15 | return np.mean(np.abs(pred - true)) 16 | 17 | 18 | def MSE(pred, true): 19 | return np.mean((pred - true) ** 2) 20 | 21 | 22 | def RMSE(pred, true): 23 | return np.sqrt(MSE(pred, true)) 24 | 25 | 26 | def MAPE(pred, true): 27 | return np.mean(np.abs((pred - true) / true)) 28 | 29 | 30 | def MSPE(pred, true): 31 | return np.mean(np.square((pred - true) / true)) 32 | 33 | 34 | def metric(pred, true): 35 | mae = MAE(pred, true) 36 | mse = MSE(pred, true) 37 | rmse = RMSE(pred, true) 38 | mape = MAPE(pred, true) 39 | mspe = MSPE(pred, true) 40 | 41 | return mae, mse, rmse, mape, mspe 42 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Anomaly_Detection/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def RSE(pred, true): 5 | return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2)) 6 | 7 | 8 | def CORR(pred, true): 9 | u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0) 10 | d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0)) 11 | return (u / d).mean(-1) 12 | 13 | 14 | def MAE(pred, true): 15 | return np.mean(np.abs(pred - true)) 16 | 17 | 18 | def MSE(pred, true): 19 | return np.mean((pred - true) ** 2) 20 | 21 | 22 | def RMSE(pred, true): 23 | return np.sqrt(MSE(pred, true)) 24 | 25 | 26 | def MAPE(pred, true): 27 | return np.mean(np.abs((pred - true) / true)) 28 | 29 | 30 | def MSPE(pred, true): 31 | return np.mean(np.square((pred - true) / true)) 32 | 33 | 34 | def metric(pred, true): 35 | mae = MAE(pred, true) 36 | mse = MSE(pred, true) 37 | rmse = RMSE(pred, true) 38 | mape = MAPE(pred, true) 39 | mspe = MSPE(pred, true) 40 | 41 | return mae, mse, rmse, mape, mspe 42 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Short-term_Forecasting/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def RSE(pred, true): 5 | return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2)) 6 | 7 | 8 | def CORR(pred, true): 9 | u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0) 10 | d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0)) 11 | return (u / d).mean(-1) 12 | 13 | 14 | def MAE(pred, true): 15 | return np.mean(np.abs(pred - true)) 16 | 17 | 18 | def MSE(pred, true): 19 | return np.mean((pred - true) ** 2) 20 | 21 | 22 | def RMSE(pred, true): 23 | return np.sqrt(MSE(pred, true)) 24 | 25 | 26 | def MAPE(pred, true): 27 | return np.mean(np.abs((pred - true) / true)) 28 | 29 | 30 | def MSPE(pred, true): 31 | return np.mean(np.square((pred - true) / true)) 32 | 33 | 34 | def metric(pred, true): 35 | mae = MAE(pred, true) 36 | mse = MSE(pred, true) 37 | rmse = RMSE(pred, true) 38 | mape = MAPE(pred, true) 39 | mspe = MSPE(pred, true) 40 | 41 | return mae, mse, rmse, mape, mspe 42 | -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/etth1-dlinear.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot-horizon-6" ]; then 7 | mkdir ./logs/few-shot-horizon-6 8 | fi 9 | 10 | model_name=DLinear 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTh1.csv 14 | model_id_name=etth1 15 | data_name=ETTh1 16 | 17 | seq_len=104 18 | pred_len=6 19 | 20 | for train_budget in 768 1536 3072 21 | do 22 | python -u run_longExp.py \ 23 | --is_training 1 \ 24 | --root_path $root_path_name \ 25 | --data_path $data_path_name \ 26 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 27 | --model $model_name \ 28 | --data $data_name \ 29 | --features M \ 30 | --train_budget $train_budget \ 31 | --drop_last_test 0 \ 32 | --seq_len $seq_len \ 33 | --pred_len $pred_len \ 34 | --enc_in 7 \ 35 | --des 'Exp' \ 36 | --itr 1 --batch_size 32 --learning_rate 0.005 >logs/few-shot-horizon-6/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 37 | done -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 sb-ai-lab 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/weather-dlinear.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot-horizon-6" ]; then 7 | mkdir ./logs/few-shot-horizon-6 8 | fi 9 | 10 | model_name=DLinear 11 | 12 | root_path_name=../../datasets/weather/ 13 | data_path_name=weather.csv 14 | model_id_name=weather 15 | data_name=custom 16 | 17 | seq_len=104 18 | pred_len=6 19 | 20 | for train_budget in 48 96 192 384 768 1536 3072 6144 21 | do 22 | python -u run_longExp.py \ 23 | --is_training 1 \ 24 | --root_path $root_path_name \ 25 | --data_path $data_path_name \ 26 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 27 | --model $model_name \ 28 | --data $data_name \ 29 | --features M \ 30 | --train_budget $train_budget \ 31 | --drop_last_test 0 \ 32 | --seq_len $seq_len \ 33 | --pred_len $pred_len \ 34 | --enc_in 21 \ 35 | --des 'Exp' \ 36 | --itr 1 --batch_size 16 >logs/few-shot-horizon-6/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 37 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot/etth1-dlinear.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=DLinear 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTh1.csv 14 | model_id_name=etth1 15 | data_name=ETTh1 16 | 17 | seq_len=336 18 | pred_len=96 19 | period=12 20 | 21 | for j in 24 48 96 192 384 768 1536 3072 22 | do 23 | train_budget=$(($j)) 24 | python -u run_longExp.py \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name_$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --enc_in 7 \ 37 | --des 'Exp' \ 38 | --itr 1 --batch_size 32 --learning_rate 0.005 >logs/few-shot/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 39 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot/etth2-dlinear.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=DLinear 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTh2.csv 14 | model_id_name=etth2 15 | data_name=ETTh2 16 | 17 | seq_len=336 18 | pred_len=96 19 | period=12 20 | 21 | for j in 24 48 96 192 384 768 1536 3072 22 | do 23 | train_budget=$(($j)) 24 | python -u run_longExp.py \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name_$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --enc_in 7 \ 37 | --des 'Exp' \ 38 | --itr 1 --batch_size 32 --learning_rate 0.05 >logs/few-shot/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 39 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/etth2-dlinear.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot-horizon-6" ]; then 7 | mkdir ./logs/few-shot-horizon-6 8 | fi 9 | 10 | model_name=DLinear 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTh2.csv 14 | model_id_name=etth2 15 | data_name=ETTh2 16 | 17 | seq_len=104 18 | pred_len=6 19 | 20 | for train_budget in 24 48 96 192 384 768 1536 3072 21 | do 22 | python -u run_longExp.py \ 23 | --is_training 1 \ 24 | --root_path $root_path_name \ 25 | --data_path $data_path_name \ 26 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 27 | --model $model_name \ 28 | --data $data_name \ 29 | --features M \ 30 | --train_budget $train_budget \ 31 | --drop_last_test 0 \ 32 | --seq_len $seq_len \ 33 | --pred_len $pred_len \ 34 | --enc_in 7 \ 35 | --des 'Exp' \ 36 | --itr 1 --batch_size 32 --learning_rate 0.05 >logs/few-shot-horizon-6/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 37 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot/ettm1-dlinear.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=DLinear 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTm1.csv 14 | model_id_name=ETTm1 15 | data_name=ETTm1 16 | 17 | seq_len=336 18 | pred_len=96 19 | period=24 20 | 21 | for j in 96 192 384 768 1536 3072 6144 12288 22 | do 23 | train_budget=$(($j)) 24 | python -u run_longExp.py \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name_$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --enc_in 7 \ 37 | --des 'Exp' \ 38 | --itr 1 --batch_size 8 --learning_rate 0.0001 >logs/few-shot/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 39 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot/ettm2-dlinear.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=DLinear 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTm2.csv 14 | model_id_name=ETTm2 15 | data_name=ETTm2 16 | 17 | seq_len=336 18 | pred_len=96 19 | period=24 20 | 21 | for j in 96 192 384 768 1536 3072 6144 12288 22 | do 23 | train_budget=$(($j)) 24 | python -u run_longExp.py \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name_$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --enc_in 7 \ 37 | --des 'Exp' \ 38 | --itr 1 --batch_size 32 --learning_rate 0.001 >logs/few-shot/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 39 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/ettm1-dlinear.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot-horizon-6" ]; then 7 | mkdir ./logs/few-shot-horizon-6 8 | fi 9 | 10 | model_name=DLinear 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTm1.csv 14 | model_id_name=ETTm1 15 | data_name=ETTm1 16 | 17 | seq_len=104 18 | pred_len=6 19 | 20 | for train_budget in 96 192 384 768 1536 3072 6144 12288 21 | do 22 | python -u run_longExp.py \ 23 | --is_training 1 \ 24 | --root_path $root_path_name \ 25 | --data_path $data_path_name \ 26 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 27 | --model $model_name \ 28 | --data $data_name \ 29 | --features M \ 30 | --train_budget $train_budget \ 31 | --drop_last_test 0 \ 32 | --seq_len $seq_len \ 33 | --pred_len $pred_len \ 34 | --enc_in 7 \ 35 | --des 'Exp' \ 36 | --itr 1 --batch_size 8 --learning_rate 0.0001 >logs/few-shot-horizon-6/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 37 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/ettm2-dlinear.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot-horizon-6" ]; then 7 | mkdir ./logs/few-shot-horizon-6 8 | fi 9 | 10 | model_name=DLinear 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTm2.csv 14 | model_id_name=ETTm2 15 | data_name=ETTm2 16 | 17 | seq_len=104 18 | pred_len=6 19 | 20 | for train_budget in 96 192 384 768 1536 3072 6144 12288 21 | do 22 | python -u run_longExp.py \ 23 | --is_training 1 \ 24 | --root_path $root_path_name \ 25 | --data_path $data_path_name \ 26 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 27 | --model $model_name \ 28 | --data $data_name \ 29 | --features M \ 30 | --train_budget $train_budget \ 31 | --drop_last_test 0 \ 32 | --seq_len $seq_len \ 33 | --pred_len $pred_len \ 34 | --enc_in 7 \ 35 | --des 'Exp' \ 36 | --itr 1 --batch_size 32 --learning_rate 0.001 >logs/few-shot-horizon-6/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 37 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Short-term_Forecasting/README.md: -------------------------------------------------------------------------------- 1 | # One Fits All: Power General Time Series Analysis by Pretrained LM (NeurIPS 2023 Spotlight) 2 | 3 | Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, Rong Jin, "One Fits All: Power General Time Series Analysis by Pretrained LM,", NeurIPS, 2023. [[paper](https://arxiv.org/abs/2302.11939)] 4 | 5 | ## Anomaly Detection 6 | 7 | ![image](../pic/short_term_result.png) 8 | 9 | ## Get Start 10 | 11 | - Install Python>=3.8, PyTorch 1.8.1. 12 | - Download data. You can obtain all the benchmarks from [[TimesNet](https://github.com/thuml/Time-Series-Library)]. 13 | - Train the model. We provide the experiment scripts of all benchmarks under the folder `./scripts`. You can reproduce the experiment results by: 14 | 15 | ```bash 16 | bash ./scripts/M4.sh 17 | ``` 18 | 19 | ## Citation 20 | 21 | If you find this repo useful, please cite our paper. 22 | 23 | ``` 24 | @inproceedings{zhou2023onefitsall, 25 | title={{One Fits All}: Power General Time Series Analysis by Pretrained LM}, 26 | author={Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, Rong Jin}, 27 | booktitle={NeurIPS}, 28 | year={2023} 29 | } 30 | ``` -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot/illness-dlinear.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=DLinear 11 | 12 | root_path_name=../../datasets/illness/ 13 | data_path_name=national_illness.csv 14 | model_id_name=national_illness 15 | data_name=custom 16 | 17 | seq_len=104 18 | pred_len=24 19 | period=52 20 | 21 | for i in 1 2 3 4 5 6 7 8 22 | do 23 | train_budget=$(($period*$i)) 24 | python -u run_longExp.py \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name_$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --enc_in 7 \ 37 | --des 'Exp' \ 38 | --itr 1 --batch_size 16 --learning_rate 0.01 >logs/few-shot/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 39 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot/traffic-dlinear.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=DLinear 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=traffic.csv 14 | model_id_name=traffic 15 | data_name=traffic 16 | 17 | seq_len=336 18 | pred_len=96 19 | period=168 20 | 21 | for j in 336 672 1344 2688 5376 10752 21504 43008 22 | do 23 | train_budget=$(($j)) 24 | python -u run_longExp.py \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name_$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --enc_in 862 \ 37 | --des 'Exp' \ 38 | --itr 1 --batch_size 16 --learning_rate 0.05 >logs/few-shot/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 39 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Classification/README.md: -------------------------------------------------------------------------------- 1 | # One Fits All: Power General Time Series Analysis by Pretrained LM (NeurIPS 2023 Spotlight) 2 | 3 | Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, Rong Jin, "One Fits All: Power General Time Series Analysis by Pretrained LM,", NeurIPS, 2023. [[paper](https://arxiv.org/abs/2302.11939)] 4 | 5 | ## Classification 6 | 7 | ![image](../pic/classification_result.png) 8 | 9 | ## Get Start 10 | 11 | - Install Python>=3.8, PyTorch 1.8.1. 12 | - Download data. You can obtain all the benchmarks from [[mvts](https://github.com/gzerveas/mvts_transformer)]. 13 | - Train the model. We provide the experiment scripts of all benchmarks under the folder `./scripts`. You can reproduce the experiment results by: 14 | 15 | ```bash 16 | bash ./scripts/EthanolConcentration.sh 17 | ``` 18 | 19 | ## Citation 20 | 21 | If you find this repo useful, please cite our paper. 22 | 23 | ``` 24 | @inproceedings{zhou2023onefitsall, 25 | title={{One Fits All}: Power General Time Series Analysis by Pretrained LM}, 26 | author={Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, Rong Jin}, 27 | booktitle={NeurIPS}, 28 | year={2023} 29 | } 30 | ``` -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Imputation/README.md: -------------------------------------------------------------------------------- 1 | # One Fits All: Power General Time Series Analysis by Pretrained LM (NeurIPS 2023 Spotlight) 2 | 3 | Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, Rong Jin, "One Fits All: Power General Time Series Analysis by Pretrained LM,", NeurIPS, 2023. [[paper](https://arxiv.org/abs/2302.11939)] 4 | 5 | ## Imputation 6 | 7 | ![image](../pic/imputation_result.png) 8 | 9 | ## Get Start 10 | 11 | - Install Python>=3.8, PyTorch 1.8.1. 12 | - Download data. You can obtain all the benchmarks from [[TimesNet](https://github.com/thuml/Time-Series-Library)]. 13 | - Train the model. We provide the experiment scripts of all benchmarks under the folder `./scripts`. You can reproduce the experiment results by: 14 | 15 | ```bash 16 | bash ./scripts/ETTh1.sh 17 | bash ./scripts/ETTh2.sh 18 | ``` 19 | 20 | ## Citation 21 | 22 | If you find this repo useful, please cite our paper. 23 | 24 | ``` 25 | @inproceedings{zhou2023onefitsall, 26 | title={{One Fits All}: Power General Time Series Analysis by Pretrained LM}, 27 | author={Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, Rong Jin}, 28 | booktitle={NeurIPS}, 29 | year={2023} 30 | } 31 | ``` -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/traffic-dlinear.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot-horizon-6" ]; then 7 | mkdir ./logs/few-shot-horizon-6 8 | fi 9 | 10 | model_name=DLinear 11 | 12 | root_path_name=../../datasets/traffic/ 13 | data_path_name=traffic.csv 14 | model_id_name=traffic 15 | data_name=custom 16 | 17 | seq_len=104 18 | pred_len=6 19 | 20 | for train_budget in 336 672 1344 2688 5376 10752 21504 43008 21 | do 22 | python -u run_longExp.py \ 23 | --is_training 1 \ 24 | --root_path $root_path_name \ 25 | --data_path $data_path_name \ 26 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 27 | --model $model_name \ 28 | --data $data_name \ 29 | --features M \ 30 | --train_budget $train_budget \ 31 | --drop_last_test 0 \ 32 | --seq_len $seq_len \ 33 | --pred_len $pred_len \ 34 | --enc_in 862 \ 35 | --des 'Exp' \ 36 | --itr 1 --batch_size 16 --learning_rate 0.05 >logs/few-shot-horizon-6/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 37 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot/electricity-dlinear.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=DLinear 11 | 12 | root_path_name=../../datasets/electricity/ 13 | data_path_name=electricity.csv 14 | model_id_name=electricity 15 | data_name=custom 16 | 17 | seq_len=336 18 | pred_len=96 19 | period=24 20 | 21 | for j in 48 96 192 384 768 1536 3072 6144 22 | do 23 | train_budget=$(($j)) 24 | python -u run_longExp.py \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name_$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --enc_in 321 \ 37 | --des 'Exp' \ 38 | --itr 1 --batch_size 16 --learning_rate 0.001 >logs/few-shot/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 39 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Anomaly_Detection/README.md: -------------------------------------------------------------------------------- 1 | # One Fits All: Power General Time Series Analysis by Pretrained LM (NeurIPS 2023 Spotlight) 2 | 3 | Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, Rong Jin, "One Fits All: Power General Time Series Analysis by Pretrained LM,", NeurIPS, 2023. [[paper](https://arxiv.org/abs/2302.11939)] 4 | 5 | ## Anomaly Detection 6 | 7 | ![image](../pic/anomaly_detection.png) 8 | 9 | ## Get Start 10 | 11 | - Install Python>=3.8, PyTorch 1.8.1. 12 | - Download data. You can obtain all the benchmarks from [[TimesNet](https://github.com/thuml/Time-Series-Library)]. 13 | - Train the model. We provide the experiment scripts of all benchmarks under the folder `./scripts`. You can reproduce the experiment results by: 14 | 15 | ```bash 16 | bash ./scripts/MSL.sh 17 | bash ./scripts/PSM.sh 18 | ``` 19 | 20 | ## Citation 21 | 22 | If you find this repo useful, please cite our paper. 23 | 24 | ``` 25 | @inproceedings{zhou2023onefitsall, 26 | title={{One Fits All}: Power General Time Series Analysis by Pretrained LM}, 27 | author={Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, Rong Jin}, 28 | booktitle={NeurIPS}, 29 | year={2023} 30 | } 31 | ``` -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/electricity-dlinear.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot-horizon-6" ]; then 7 | mkdir ./logs/few-shot-horizon-6 8 | fi 9 | 10 | model_name=DLinear 11 | 12 | root_path_name=../../datasets/electricity/ 13 | data_path_name=electricity.csv 14 | model_id_name=electricity 15 | data_name=custom 16 | 17 | seq_len=104 18 | pred_len=6 19 | 20 | for train_budget in 48 96 192 384 768 1536 3072 6144 21 | do 22 | python -u run_longExp.py \ 23 | --is_training 1 \ 24 | --root_path $root_path_name \ 25 | --data_path $data_path_name \ 26 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 27 | --model $model_name \ 28 | --data $data_name \ 29 | --features M \ 30 | --train_budget $train_budget \ 31 | --drop_last_test 0 \ 32 | --seq_len $seq_len \ 33 | --pred_len $pred_len \ 34 | --enc_in 321 \ 35 | --des 'Exp' \ 36 | --itr 1 --batch_size 16 --learning_rate 0.001 >logs/few-shot-horizon-6/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 37 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Zero-shot_Learning/README.md: -------------------------------------------------------------------------------- 1 | # One Fits All: Power General Time Series Analysis by Pretrained LM (NeurIPS 2023 Spotlight) 2 | 3 | Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, Rong Jin, "One Fits All: Power General Time Series Analysis by Pretrained LM,", NeurIPS, 2023. [[paper](https://arxiv.org/abs/2302.11939)] 4 | 5 | ## Zero-shot Learning 6 | 7 | ![image](../pic/zero_shot_result.png) 8 | 9 | ## Get Start 10 | 11 | - Install Python>=3.8, PyTorch 1.8.1. 12 | - Download data. You can obtain all the benchmarks from [[Monash](https://github.com/rakshitha123/TSForecasting)]. 13 | - Train the model. We provide the experiment scripts of all benchmarks under the folder `./scripts`. You can reproduce the experiment results by: 14 | 15 | ```bash 16 | bash ./scripts/m3_m4.sh 17 | bash ./scripts/m4_m3.sh 18 | ``` 19 | 20 | ## Citation 21 | 22 | If you find this repo useful, please cite our paper. 23 | 24 | ``` 25 | @inproceedings{zhou2023onefitsall, 26 | title={{One Fits All}: Power General Time Series Analysis by Pretrained LM}, 27 | author={Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, Rong Jin}, 28 | booktitle={NeurIPS}, 29 | year={2023} 30 | } 31 | ``` -------------------------------------------------------------------------------- /few-shot/PatchTST/README.md: -------------------------------------------------------------------------------- 1 | # A Time Series is Worth 64 Words: Long-term Forecasting with Transformers - official implementation. 2 | 3 | Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, Jayant Kalagnanam, "A Time Series is Worth 64 Words: Long-term Forecasting with Transformers", ICLR, 2023. [[paper](https://arxiv.org/abs/2211.14730)] 4 | 5 | 6 | ## Get Start 7 | 8 | - Install environment from yml file ```conda env create -f conda_baseline.yml```. 9 | - Download data. You can obtain all the benchmarks from [[TimesNet](https://github.com/thuml/Time-Series-Library)]. 10 | - You can find scripts to reproduce results for few-shot learning in ```scripts/few-shot``` for different horizons. 11 | - You can find scripts to reproduce results for few-shot learning in ```scripts/few-shot-horizon-6``` for different train budgets (Table 15 in Appendix). 12 | 13 | ## Citation 14 | 15 | ``` 16 | @inproceedings{nie2022time, 17 | title={A Time Series is Worth 64 Words: Long-term Forecasting with Transformers}, 18 | author={Nie, Yuqi and Nguyen, Nam H and Sinthong, Phanwadee and Kalagnanam, Jayant}, 19 | booktitle={The Eleventh International Conference on Learning Representations}, 20 | year={2022} 21 | } 22 | ``` -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/illness-dlinear.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot-horizon-6" ]; then 7 | mkdir ./logs/few-shot-horizon-6 8 | fi 9 | 10 | model_name=DLinear 11 | 12 | root_path_name=../../datasets/illness/ 13 | data_path_name=national_illness.csv 14 | model_id_name=national_illness 15 | data_name=custom 16 | 17 | seq_len=104 18 | pred_len=6 19 | 20 | for train_budget in 52 104 156 208 260 312 364 416 21 | do 22 | python -u run_longExp.py \ 23 | --is_training 1 \ 24 | --root_path $root_path_name \ 25 | --data_path $data_path_name \ 26 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 27 | --model $model_name \ 28 | --data $data_name \ 29 | --features M \ 30 | --train_budget $train_budget \ 31 | --drop_last_test 0 \ 32 | --seq_len $seq_len \ 33 | --label_len 18 \ 34 | --pred_len $pred_len \ 35 | --enc_in 7 \ 36 | --des 'Exp' \ 37 | --itr 1 --batch_size 32 --learning_rate 0.01 >logs/few-shot-horizon-6/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 38 | done -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/few_shot/etth1_iTransformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=4 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=iTransformer 11 | 12 | root_path_name=../datasets/ETT/ 13 | data_path_name=ETTh1.csv 14 | model_id_name=etth1 15 | data_name=ETTh1 16 | 17 | seq_len=96 18 | pred_len=96 19 | period=12 20 | 21 | for j in 24 48 96 192 384 768 1536 3072 22 | do 23 | train_budget=$(($j)) 24 | python -u run.py \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name_$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --e_layers 2 \ 37 | --enc_in 7 \ 38 | --dec_in 7 \ 39 | --c_out 7 \ 40 | --des 'Exp' \ 41 | --d_model 256 \ 42 | --d_ff 256 \ 43 | --itr 1 >logs/few-shot/$model_name'_'$model_id_name'_0_'$seq_len'_'$pred_len'_'$train_budget.log 44 | done -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/few_shot/etth2_iTransformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=4 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=iTransformer 11 | 12 | root_path_name=../datasets/ETT/ 13 | data_path_name=ETTh2.csv 14 | model_id_name=etth2 15 | data_name=ETTh2 16 | 17 | seq_len=96 18 | pred_len=96 19 | period=12 20 | 21 | for j in 24 48 96 192 384 768 1536 3072 22 | do 23 | train_budget=$(($j)) 24 | python -u run.py \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name_$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --e_layers 2 \ 37 | --enc_in 7 \ 38 | --dec_in 7 \ 39 | --c_out 7 \ 40 | --des 'Exp' \ 41 | --d_model 128 \ 42 | --d_ff 128 \ 43 | --itr 1 >logs/few-shot/$model_name'_'$model_id_name'_0_'$seq_len'_'$pred_len'_'$train_budget.log 44 | done -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/few_shot/ettm1_iTransformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=4 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=iTransformer 11 | 12 | root_path_name=../datasets/ETT/ 13 | data_path_name=ETTm1.csv 14 | model_id_name=ettm1 15 | data_name=ETTm1 16 | 17 | seq_len=96 18 | pred_len=96 19 | period=48 20 | 21 | for j in 96 192 384 768 1536 3072 6144 12288 22 | do 23 | train_budget=$(($j)) 24 | python -u run.py \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name_$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --e_layers 2 \ 37 | --enc_in 7 \ 38 | --dec_in 7 \ 39 | --c_out 7 \ 40 | --des 'Exp' \ 41 | --d_model 128 \ 42 | --d_ff 128 \ 43 | --itr 1 >logs/few-shot/$model_name'_'$model_id_name'_0_'$seq_len'_'$pred_len'_'$train_budget.log 44 | done -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/few_shot/ettm2_iTransformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=4 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=iTransformer 11 | 12 | root_path_name=../datasets/ETT/ 13 | data_path_name=ETTm2.csv 14 | model_id_name=ettm2 15 | data_name=ETTm2 16 | 17 | seq_len=96 18 | pred_len=96 19 | period=48 20 | 21 | for j in 96 192 384 768 1536 3072 6144 12288 22 | do 23 | train_budget=$(($j)) 24 | python -u run.py \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name_$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --e_layers 2 \ 37 | --enc_in 7 \ 38 | --dec_in 7 \ 39 | --c_out 7 \ 40 | --des 'Exp' \ 41 | --d_model 128 \ 42 | --d_ff 128 \ 43 | --itr 1 >logs/few-shot/$model_name'_'$model_id_name'_0_'$seq_len'_'$pred_len'_'$train_budget.log 44 | done -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/few_shot/weather_iTransformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=4 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=iTransformer 11 | 12 | root_path_name=../datasets/weather/ 13 | data_path_name=weather.csv 14 | model_id_name=weather 15 | data_name=custom 16 | 17 | seq_len=96 18 | pred_len=96 19 | period=24 20 | 21 | for j in 48 96 192 384 768 1536 3072 6144 22 | do 23 | train_budget=$(($j)) 24 | python -u run.py \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name_$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --e_layers 3 \ 37 | --enc_in 21 \ 38 | --dec_in 21 \ 39 | --c_out 21 \ 40 | --des 'Exp' \ 41 | --d_model 512 \ 42 | --d_ff 512 \ 43 | --itr 1 >logs/few-shot/$model_name'_'$model_id_name'_0_'$seq_len'_'$pred_len'_'$train_budget.log 44 | done -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/few_shot/illness_iTransformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=4 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=iTransformer 11 | 12 | root_path_name=../datasets/illness/ 13 | data_path_name=national_illness.csv 14 | model_id_name=national_illness 15 | data_name=custom 16 | 17 | seq_len=96 18 | pred_len=24 19 | period=52 20 | 21 | for i in 1 2 3 4 5 6 7 8 22 | do 23 | train_budget=$(($period*$i)) 24 | python -u run.py \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name_$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --e_layers 2 \ 37 | --enc_in 7 \ 38 | --dec_in 7 \ 39 | --c_out 7 \ 40 | --des 'Exp' \ 41 | --d_model 128 \ 42 | --d_ff 128 \ 43 | --itr 1 >logs/few-shot/$model_name'_'$model_id_name'_0_'$seq_len'_'$pred_len'_'$train_budget.log 44 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Zero-shot_Learning/scripts/inference_scale_check.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | for model in \ 3 | GPT4TS_M3_Yearly GPT4TS_M3_Quarterly GPT4TS_M3_Monthly GPT4TS_M3_Other \ 4 | GPT4TS_M4_Yearly GPT4TS_M4_Quarterly GPT4TS_M4_Monthly GPT4TS_M4_Weekly GPT4TS_M4_Daily GPT4TS_M4_Hourly \ 5 | DLinear_M3_Yearly DLinear_M3_Quarterly DLinear_M3_Monthly DLinear_M3_Other \ 6 | DLinear_M4_Yearly DLinear_M4_Quarterly DLinear_M4_Monthly DLinear_M4_Weekly DLinear_M4_Daily DLinear_M4_Hourly \ 7 | PatchTST_M3_Yearly PatchTST_M3_Quarterly PatchTST_M3_Monthly PatchTST_M3_Other \ 8 | PatchTST_M4_Yearly PatchTST_M4_Quarterly PatchTST_M4_Monthly PatchTST_M4_Weekly PatchTST_M4_Daily PatchTST_M4_Hourly 9 | do 10 | for target_data in Weather ECL ILI Traffic ETT ETTh1 ETTh2 ETTm1 ETTm2 11 | do 12 | for source_scaler in standard_scaler min_max_scaler quantile_transformer False 13 | do 14 | python inference.py \ 15 | --model $model \ 16 | --target_data $target_data \ 17 | --checkpoints ./checkpoints/ \ 18 | --res_path ./results/scale_check_on_val/$model'_'$target_data'_'$source_scaler'.csv' \ 19 | --source_scaling $source_scaler >> ./logs/scaling_check/$model.txt \ 20 | --test_on_val 0 21 | done 22 | done 23 | done 24 | 25 | -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/PatchTST/etth1.sh: -------------------------------------------------------------------------------- 1 | if [ ! -d "./logs" ]; then 2 | mkdir ./logs 3 | fi 4 | 5 | if [ ! -d "./logs/LongForecasting" ]; then 6 | mkdir ./logs/LongForecasting 7 | fi 8 | seq_len=336 9 | model_name=PatchTST 10 | 11 | root_path_name=./dataset/ 12 | data_path_name=ETTh1.csv 13 | model_id_name=ETTh1 14 | data_name=ETTh1 15 | 16 | random_seed=2021 17 | for pred_len in 96 192 336 720 18 | do 19 | python -u run_longExp.py \ 20 | --random_seed $random_seed \ 21 | --is_training 1 \ 22 | --root_path $root_path_name \ 23 | --data_path $data_path_name \ 24 | --model_id $model_id_name_$seq_len'_'$pred_len \ 25 | --model $model_name \ 26 | --data $data_name \ 27 | --features M \ 28 | --seq_len $seq_len \ 29 | --pred_len $pred_len \ 30 | --enc_in 7 \ 31 | --e_layers 3 \ 32 | --n_heads 4 \ 33 | --d_model 16 \ 34 | --d_ff 128 \ 35 | --dropout 0.3\ 36 | --fc_dropout 0.3\ 37 | --head_dropout 0\ 38 | --patch_len 16\ 39 | --stride 8\ 40 | --des 'Exp' \ 41 | --train_epochs 100\ 42 | --itr 1 --batch_size 128 --learning_rate 0.0001 >logs/LongForecasting/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len.log 43 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/PatchTST/etth2.sh: -------------------------------------------------------------------------------- 1 | if [ ! -d "./logs" ]; then 2 | mkdir ./logs 3 | fi 4 | 5 | if [ ! -d "./logs/LongForecasting" ]; then 6 | mkdir ./logs/LongForecasting 7 | fi 8 | seq_len=336 9 | model_name=PatchTST 10 | 11 | root_path_name=./dataset/ 12 | data_path_name=ETTh2.csv 13 | model_id_name=ETTh2 14 | data_name=ETTh2 15 | 16 | random_seed=2021 17 | for pred_len in 96 192 336 720 18 | do 19 | python -u run_longExp.py \ 20 | --random_seed $random_seed \ 21 | --is_training 1 \ 22 | --root_path $root_path_name \ 23 | --data_path $data_path_name \ 24 | --model_id $model_id_name_$seq_len'_'$pred_len \ 25 | --model $model_name \ 26 | --data $data_name \ 27 | --features M \ 28 | --seq_len $seq_len \ 29 | --pred_len $pred_len \ 30 | --enc_in 7 \ 31 | --e_layers 3 \ 32 | --n_heads 4 \ 33 | --d_model 16 \ 34 | --d_ff 128 \ 35 | --dropout 0.3\ 36 | --fc_dropout 0.3\ 37 | --head_dropout 0\ 38 | --patch_len 16\ 39 | --stride 8\ 40 | --des 'Exp' \ 41 | --train_epochs 100\ 42 | --itr 1 --batch_size 128 --learning_rate 0.0001 >logs/LongForecasting/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len.log 43 | done -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/few_shot/traffic_iTransformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=4 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=iTransformer 11 | 12 | root_path_name=../datasets/traffic/ 13 | data_path_name=traffic.csv 14 | model_id_name=traffic 15 | data_name=custom 16 | 17 | seq_len=96 18 | pred_len=96 19 | period=168 20 | 21 | for j in 336 672 1344 2688 5376 10752 21504 43008 22 | do 23 | train_budget=$(($j)) 24 | python -u run.py \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name_$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --e_layers 4 \ 37 | --enc_in 862 \ 38 | --dec_in 862 \ 39 | --c_out 862 \ 40 | --des 'Exp' \ 41 | --d_model 512 \ 42 | --d_ff 512 \ 43 | --batch_size 16 \ 44 | --learning_rate 0.001 \ 45 | --itr 1 >logs/few-shot/$model_name'_'$model_id_name'_0_'$seq_len'_'$pred_len'_'$train_budget.log 46 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/PatchTST/weather.sh: -------------------------------------------------------------------------------- 1 | if [ ! -d "./logs" ]; then 2 | mkdir ./logs 3 | fi 4 | 5 | if [ ! -d "./logs/LongForecasting" ]; then 6 | mkdir ./logs/LongForecasting 7 | fi 8 | seq_len=336 9 | model_name=PatchTST 10 | 11 | root_path_name=./dataset/ 12 | data_path_name=weather.csv 13 | model_id_name=weather 14 | data_name=custom 15 | 16 | random_seed=2021 17 | for pred_len in 96 192 336 720 18 | do 19 | python -u run_longExp.py \ 20 | --random_seed $random_seed \ 21 | --is_training 1 \ 22 | --root_path $root_path_name \ 23 | --data_path $data_path_name \ 24 | --model_id $model_id_name_$seq_len'_'$pred_len \ 25 | --model $model_name \ 26 | --data $data_name \ 27 | --features M \ 28 | --seq_len $seq_len \ 29 | --pred_len $pred_len \ 30 | --enc_in 21 \ 31 | --e_layers 3 \ 32 | --n_heads 16 \ 33 | --d_model 128 \ 34 | --d_ff 256 \ 35 | --dropout 0.2\ 36 | --fc_dropout 0.2\ 37 | --head_dropout 0\ 38 | --patch_len 16\ 39 | --stride 8\ 40 | --des 'Exp' \ 41 | --train_epochs 100\ 42 | --patience 20\ 43 | --itr 1 --batch_size 128 --learning_rate 0.0001 >logs/LongForecasting/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len.log 44 | done -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/few_shot/electricity_iTransformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=4 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=iTransformer 11 | 12 | root_path_name=../datasets/electricity/ 13 | data_path_name=electricity.csv 14 | model_id_name=electricity 15 | data_name=custom 16 | 17 | seq_len=96 18 | pred_len=96 19 | period=48 20 | 21 | for j in 48 96 192 384 768 1536 3072 6144 22 | do 23 | train_budget=$(($j)) 24 | python -u run.py \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name_$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --e_layers 3 \ 37 | --enc_in 321 \ 38 | --dec_in 321 \ 39 | --c_out 321 \ 40 | --des 'Exp' \ 41 | --d_model 512 \ 42 | --d_ff 512 \ 43 | --batch_size 16 \ 44 | --learning_rate 0.0005 \ 45 | --itr 1 >logs/few-shot/$model_name'_'$model_id_name'_0_'$seq_len'_'$pred_len'_'$train_budget.log 46 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/PatchTST/illness.sh: -------------------------------------------------------------------------------- 1 | if [ ! -d "./logs" ]; then 2 | mkdir ./logs 3 | fi 4 | 5 | if [ ! -d "./logs/LongForecasting" ]; then 6 | mkdir ./logs/LongForecasting 7 | fi 8 | seq_len=104 9 | model_name=PatchTST 10 | 11 | root_path_name=./dataset/ 12 | data_path_name=national_illness.csv 13 | model_id_name=national_illness 14 | data_name=custom 15 | 16 | random_seed=2021 17 | for pred_len in 24 36 48 60 18 | do 19 | python -u run_longExp.py \ 20 | --random_seed $random_seed \ 21 | --is_training 1 \ 22 | --root_path $root_path_name \ 23 | --data_path $data_path_name \ 24 | --model_id $model_id_name_$seq_len'_'$pred_len \ 25 | --model $model_name \ 26 | --data $data_name \ 27 | --features M \ 28 | --seq_len $seq_len \ 29 | --pred_len $pred_len \ 30 | --enc_in 7 \ 31 | --e_layers 3 \ 32 | --n_heads 4 \ 33 | --d_model 16 \ 34 | --d_ff 128 \ 35 | --dropout 0.3\ 36 | --fc_dropout 0.3\ 37 | --head_dropout 0\ 38 | --patch_len 24\ 39 | --stride 2\ 40 | --des 'Exp' \ 41 | --train_epochs 100\ 42 | --lradj 'constant'\ 43 | --itr 1 --batch_size 16 --learning_rate 0.0025 >logs/LongForecasting/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len.log 44 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Few-shot_Learning/README.md: -------------------------------------------------------------------------------- 1 | # One Fits All: Power General Time Series Analysis by Pretrained LM (NeurIPS 2023 Spotlight) 2 | 3 | Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, Rong Jin, "One Fits All: Power General Time Series Analysis by Pretrained LM,", NeurIPS, 2023. [[paper](https://arxiv.org/abs/2302.11939)] 4 | 5 | ## Few-shot Learning 6 | 7 | ![image](../pic/few_shot_result.png) 8 | 9 | ## Get Start 10 | 11 | - Install Python>=3.8, PyTorch 1.8.1. 12 | - Download data. You can obtain all the benchmarks from [[TimesNet](https://github.com/thuml/Time-Series-Library)]. 13 | - For electricity and traffic datasets with a batch size of 2048, we utilize 4 V100 GPUs, while for other datasets, we use a single V100 GPU. 14 | - Train the model. We provide the experiment scripts of all benchmarks under the folder `./scripts`. You can reproduce the experiment results by: 15 | 16 | ```bash 17 | bash ./scripts/ETTh1.sh 18 | bash ./scripts/ETTh2.sh 19 | ``` 20 | 21 | ## Citation 22 | 23 | If you find this repo useful, please cite our paper. 24 | 25 | ``` 26 | @inproceedings{zhou2023onefitsall, 27 | title={{One Fits All}: Power General Time Series Analysis by Pretrained LM}, 28 | author={Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, Rong Jin}, 29 | booktitle={NeurIPS}, 30 | year={2023} 31 | } 32 | ``` -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def RSE(pred, true): 5 | return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2)) 6 | 7 | 8 | def CORR(pred, true): 9 | u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0) 10 | d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0)) 11 | d += 1e-12 12 | return 0.01*(u / d).mean(-1) 13 | 14 | 15 | def MAE(pred, true): 16 | return np.mean(np.abs(pred - true)) 17 | 18 | 19 | def MSE(pred, true): 20 | return np.mean((pred - true) ** 2) 21 | 22 | 23 | def RMSE(pred, true): 24 | return np.sqrt(MSE(pred, true)) 25 | 26 | 27 | def MAPE(pred, true): 28 | return np.mean(np.abs((pred - true) / true)) 29 | 30 | def SMAPE(pred, true): 31 | return np.mean(200 * np.abs(pred - true) / (np.abs(pred) + np.abs(true) + 1e-8)) 32 | 33 | 34 | def MSPE(pred, true): 35 | return np.mean(np.square((pred - true) / true)) 36 | 37 | 38 | def metric(pred, true): 39 | mae = MAE(pred, true) 40 | mse = MSE(pred, true) 41 | rmse = RMSE(pred, true) 42 | mape = MAPE(pred, true) 43 | smape = SMAPE(pred, true) 44 | mspe = MSPE(pred, true) 45 | rse = RSE(pred, true) 46 | corr = CORR(pred, true) 47 | 48 | return mae, mse, rmse, mape, smape, mspe, rse, corr 49 | -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/PatchTST/ettm1.sh: -------------------------------------------------------------------------------- 1 | if [ ! -d "./logs" ]; then 2 | mkdir ./logs 3 | fi 4 | 5 | if [ ! -d "./logs/LongForecasting" ]; then 6 | mkdir ./logs/LongForecasting 7 | fi 8 | seq_len=336 9 | model_name=PatchTST 10 | 11 | root_path_name=./dataset/ 12 | data_path_name=ETTm1.csv 13 | model_id_name=ETTm1 14 | data_name=ETTm1 15 | 16 | random_seed=2021 17 | for pred_len in 96 192 336 720 18 | do 19 | python -u run_longExp.py \ 20 | --random_seed $random_seed \ 21 | --is_training 1 \ 22 | --root_path $root_path_name \ 23 | --data_path $data_path_name \ 24 | --model_id $model_id_name_$seq_len'_'$pred_len \ 25 | --model $model_name \ 26 | --data $data_name \ 27 | --features M \ 28 | --seq_len $seq_len \ 29 | --pred_len $pred_len \ 30 | --enc_in 7 \ 31 | --e_layers 3 \ 32 | --n_heads 16 \ 33 | --d_model 128 \ 34 | --d_ff 256 \ 35 | --dropout 0.2\ 36 | --fc_dropout 0.2\ 37 | --head_dropout 0\ 38 | --patch_len 16\ 39 | --stride 8\ 40 | --des 'Exp' \ 41 | --train_epochs 100\ 42 | --patience 20\ 43 | --lradj 'TST'\ 44 | --pct_start 0.4\ 45 | --itr 1 --batch_size 128 --learning_rate 0.0001 >logs/LongForecasting/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len.log 46 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/PatchTST/ettm2.sh: -------------------------------------------------------------------------------- 1 | if [ ! -d "./logs" ]; then 2 | mkdir ./logs 3 | fi 4 | 5 | if [ ! -d "./logs/LongForecasting" ]; then 6 | mkdir ./logs/LongForecasting 7 | fi 8 | seq_len=336 9 | model_name=PatchTST 10 | 11 | root_path_name=./dataset/ 12 | data_path_name=ETTm2.csv 13 | model_id_name=ETTm2 14 | data_name=ETTm2 15 | 16 | random_seed=2021 17 | for pred_len in 96 192 336 720 18 | do 19 | python -u run_longExp.py \ 20 | --random_seed $random_seed \ 21 | --is_training 1 \ 22 | --root_path $root_path_name \ 23 | --data_path $data_path_name \ 24 | --model_id $model_id_name_$seq_len'_'$pred_len \ 25 | --model $model_name \ 26 | --data $data_name \ 27 | --features M \ 28 | --seq_len $seq_len \ 29 | --pred_len $pred_len \ 30 | --enc_in 7 \ 31 | --e_layers 3 \ 32 | --n_heads 16 \ 33 | --d_model 128 \ 34 | --d_ff 256 \ 35 | --dropout 0.2\ 36 | --fc_dropout 0.2\ 37 | --head_dropout 0\ 38 | --patch_len 16\ 39 | --stride 8\ 40 | --des 'Exp' \ 41 | --train_epochs 100\ 42 | --patience 20\ 43 | --lradj 'TST'\ 44 | --pct_start 0.4 \ 45 | --itr 1 --batch_size 128 --learning_rate 0.0001 >logs/LongForecasting/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len.log 46 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/PatchTST/traffic.sh: -------------------------------------------------------------------------------- 1 | if [ ! -d "./logs" ]; then 2 | mkdir ./logs 3 | fi 4 | 5 | if [ ! -d "./logs/LongForecasting" ]; then 6 | mkdir ./logs/LongForecasting 7 | fi 8 | seq_len=336 9 | model_name=PatchTST 10 | 11 | root_path_name=./dataset/ 12 | data_path_name=traffic.csv 13 | model_id_name=traffic 14 | data_name=custom 15 | 16 | random_seed=2021 17 | for pred_len in 96 192 336 720 18 | do 19 | python -u run_longExp.py \ 20 | --random_seed $random_seed \ 21 | --is_training 1 \ 22 | --root_path $root_path_name \ 23 | --data_path $data_path_name \ 24 | --model_id $model_id_name_$seq_len'_'$pred_len \ 25 | --model $model_name \ 26 | --data $data_name \ 27 | --features M \ 28 | --seq_len $seq_len \ 29 | --pred_len $pred_len \ 30 | --enc_in 862 \ 31 | --e_layers 3 \ 32 | --n_heads 16 \ 33 | --d_model 128 \ 34 | --d_ff 256 \ 35 | --dropout 0.2\ 36 | --fc_dropout 0.2\ 37 | --head_dropout 0\ 38 | --patch_len 16\ 39 | --stride 8\ 40 | --des 'Exp' \ 41 | --train_epochs 100\ 42 | --patience 10\ 43 | --lradj 'TST'\ 44 | --pct_start 0.2\ 45 | --itr 1 --batch_size 24 --learning_rate 0.0001 >logs/LongForecasting/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len.log 46 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/PatchTST/electricity.sh: -------------------------------------------------------------------------------- 1 | if [ ! -d "./logs" ]; then 2 | mkdir ./logs 3 | fi 4 | 5 | if [ ! -d "./logs/LongForecasting" ]; then 6 | mkdir ./logs/LongForecasting 7 | fi 8 | seq_len=336 9 | model_name=PatchTST 10 | 11 | root_path_name=./dataset/ 12 | data_path_name=electricity.csv 13 | model_id_name=Electricity 14 | data_name=custom 15 | 16 | random_seed=2021 17 | for pred_len in 96 192 336 720 18 | do 19 | python -u run_longExp.py \ 20 | --random_seed $random_seed \ 21 | --is_training 1 \ 22 | --root_path $root_path_name \ 23 | --data_path $data_path_name \ 24 | --model_id $model_id_name_$seq_len'_'$pred_len \ 25 | --model $model_name \ 26 | --data $data_name \ 27 | --features M \ 28 | --seq_len $seq_len \ 29 | --pred_len $pred_len \ 30 | --enc_in 321 \ 31 | --e_layers 3 \ 32 | --n_heads 16 \ 33 | --d_model 128 \ 34 | --d_ff 256 \ 35 | --dropout 0.2\ 36 | --fc_dropout 0.2\ 37 | --head_dropout 0\ 38 | --patch_len 16\ 39 | --stride 8\ 40 | --des 'Exp' \ 41 | --train_epochs 100\ 42 | --patience 10\ 43 | --lradj 'TST'\ 44 | --pct_start 0.2\ 45 | --itr 1 --batch_size 32 --learning_rate 0.0001 >logs/LongForecasting/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len.log 46 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Long-term_Forecasting/README.md: -------------------------------------------------------------------------------- 1 | # One Fits All: Power General Time Series Analysis by Pretrained LM (NeurIPS 2023 Spotlight) 2 | 3 | Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, Rong Jin, "One Fits All: Power General Time Series Analysis by Pretrained LM,", NeurIPS, 2023. [[paper](https://arxiv.org/abs/2302.11939)] 4 | 5 | ## Long-term Learning 6 | 7 | ![image](../pic/long_term_result.png) 8 | 9 | ## Get Start 10 | 11 | - The code is the same as few-shot leanring with 100 percent. 12 | - Install Python>=3.8, PyTorch 1.8.1. 13 | - Download data. You can obtain all the benchmarks from [[TimesNet](https://github.com/thuml/Time-Series-Library)]. 14 | - For electricity and traffic datasets with a batch size of 2048, we utilize 4 V100 GPUs, while for other datasets, we use a single V100 GPU. 15 | - Train the model. We provide the experiment scripts of all benchmarks under the folder `./scripts`. You can reproduce the experiment results by: 16 | 17 | ```bash 18 | bash ./scripts/ETTh1.sh 19 | bash ./scripts/ETTh2.sh 20 | ``` 21 | 22 | ## Citation 23 | 24 | If you find this repo useful, please cite our paper. 25 | 26 | ``` 27 | @inproceedings{zhou2023onefitsall, 28 | title={{One Fits All}: Power General Time Series Analysis by Pretrained LM}, 29 | author={Tian Zhou, Peisong Niu, Xue Wang, Liang Sun, Rong Jin}, 30 | booktitle={NeurIPS}, 31 | year={2023} 32 | } 33 | ``` -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/few-shot/ready/illness_gpt0.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=GPT4TS 11 | 12 | root_path_name=../../datasets/illness/ 13 | data_path_name=national_illness.csv 14 | model_id_name=national_illness 15 | data_name=custom 16 | 17 | seq_len=104 18 | pred_len=24 19 | period=52 20 | 21 | for i in 1 2 3 4 5 6 7 8 22 | do 23 | train_budget=$(($period*$i)) 24 | python -u main.py \ 25 | --root_path $root_path_name \ 26 | --data_path $data_path_name \ 27 | --model_id $model_id_name_$seq_len'_'$pred_len \ 28 | --data $data_name \ 29 | --train_budget $train_budget \ 30 | --drop_last_test 0 \ 31 | --seq_len $seq_len \ 32 | --label_len 18 \ 33 | --pred_len $pred_len \ 34 | --batch_size 16 \ 35 | --learning_rate 0.0001 \ 36 | --train_epochs 10 \ 37 | --decay_fac 0.75 \ 38 | --d_model 768 \ 39 | --n_heads 4 \ 40 | --d_ff 768 \ 41 | --freq 0 \ 42 | --patch_size 24 \ 43 | --stride 2 \ 44 | --percent 100 \ 45 | --gpt_layer 0 \ 46 | --itr 1 \ 47 | --model $model_name \ 48 | --is_gpt 1 >logs/few-shot/$model_name'_'$model_id_name'_0_'$seq_len'_'$pred_len'_'$train_budget.log 49 | done -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/few-shot/ready/illness_gpt6.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=GPT4TS 11 | 12 | root_path_name=../../datasets/illness/ 13 | data_path_name=national_illness.csv 14 | model_id_name=national_illness 15 | data_name=custom 16 | 17 | seq_len=104 18 | pred_len=24 19 | period=52 20 | 21 | for i in 1 2 3 4 5 6 7 8 22 | do 23 | train_budget=$(($period*$i)) 24 | python -u main.py \ 25 | --root_path $root_path_name \ 26 | --data_path $data_path_name \ 27 | --model_id $model_id_name_$seq_len'_'$pred_len \ 28 | --data $data_name \ 29 | --train_budget $train_budget \ 30 | --drop_last_test 0 \ 31 | --seq_len $seq_len \ 32 | --label_len 18 \ 33 | --pred_len $pred_len \ 34 | --batch_size 16 \ 35 | --learning_rate 0.0001 \ 36 | --train_epochs 10 \ 37 | --decay_fac 0.75 \ 38 | --d_model 768 \ 39 | --n_heads 4 \ 40 | --d_ff 768 \ 41 | --freq 0 \ 42 | --patch_size 24 \ 43 | --stride 2 \ 44 | --percent 100 \ 45 | --gpt_layer 6 \ 46 | --itr 1 \ 47 | --model $model_name \ 48 | --is_gpt 1 >logs/few-shot/$model_name'_'$model_id_name'_6_'$seq_len'_'$pred_len'_'$train_budget.log 49 | done -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/few-shot/traffic_gpt.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=GPT4TS 11 | 12 | root_path_name=../../datasets/traffic/ 13 | data_path_name=traffic.csv 14 | model_id_name=traffic 15 | data_name=custom 16 | 17 | seq_len=104 18 | pred_len=6 19 | gpt_layer=6 20 | 21 | for train_budget in 336 672 1344 2688 5376 10752 21504 43008 22 | do 23 | python -u main.py \ 24 | --root_path $root_path_name \ 25 | --data_path $data_path_name \ 26 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 27 | --data $data_name \ 28 | --train_budget $train_budget \ 29 | --drop_last_test 0 \ 30 | --seq_len $seq_len \ 31 | --label_len 18 \ 32 | --pred_len $pred_len \ 33 | --batch_size 16 \ 34 | --learning_rate 0.0001 \ 35 | --train_epochs 10 \ 36 | --decay_fac 0.75 \ 37 | --d_model 768 \ 38 | --n_heads 4 \ 39 | --d_ff 768 \ 40 | --freq 0 \ 41 | --patch_size 24 \ 42 | --stride 2 \ 43 | --percent 100 \ 44 | --gpt_layer $gpt_layer \ 45 | --itr 1 \ 46 | --model $model_name \ 47 | --is_gpt 1 >logs/few-shot/$model_name'_'$model_id_name'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$train_budget.log 48 | done 49 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/PatchTST/univariate/etth1.sh: -------------------------------------------------------------------------------- 1 | if [ ! -d "./logs" ]; then 2 | mkdir ./logs 3 | fi 4 | 5 | if [ ! -d "./logs/LongForecasting" ]; then 6 | mkdir ./logs/LongForecasting 7 | fi 8 | 9 | if [ ! -d "./logs/LongForecasting/univariate" ]; then 10 | mkdir ./logs/LongForecasting/univariate 11 | fi 12 | 13 | seq_len=336 14 | model_name=PatchTST 15 | 16 | root_path_name=./dataset/ 17 | data_path_name=ETTh1.csv 18 | model_id_name=ETTh1 19 | data_name=ETTh1 20 | 21 | random_seed=2021 22 | for pred_len in 96 192 336 720 23 | do 24 | python -u run_longExp.py \ 25 | --random_seed $random_seed \ 26 | --is_training 1 \ 27 | --root_path $root_path_name \ 28 | --data_path $data_path_name \ 29 | --model_id $model_id_name_$seq_len'_'$pred_len \ 30 | --model $model_name \ 31 | --data $data_name \ 32 | --features S \ 33 | --seq_len $seq_len \ 34 | --pred_len $pred_len \ 35 | --enc_in 1 \ 36 | --e_layers 3 \ 37 | --n_heads 4 \ 38 | --d_model 16 \ 39 | --d_ff 128 \ 40 | --dropout 0.3\ 41 | --fc_dropout 0.3\ 42 | --head_dropout 0\ 43 | --patch_len 16\ 44 | --stride 8\ 45 | --des 'Exp' \ 46 | --train_epochs 100\ 47 | --itr 1 --batch_size 128 --learning_rate 0.0001 >logs/LongForecasting/univariate/$model_name'_fS_'$model_id_name'_'$seq_len'_'$pred_len.log 48 | done 49 | -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/PatchTST/univariate/etth2.sh: -------------------------------------------------------------------------------- 1 | if [ ! -d "./logs" ]; then 2 | mkdir ./logs 3 | fi 4 | 5 | if [ ! -d "./logs/LongForecasting" ]; then 6 | mkdir ./logs/LongForecasting 7 | fi 8 | 9 | if [ ! -d "./logs/LongForecasting/univariate" ]; then 10 | mkdir ./logs/LongForecasting/univariate 11 | fi 12 | 13 | seq_len=336 14 | model_name=PatchTST 15 | 16 | root_path_name=./dataset/ 17 | data_path_name=ETTh2.csv 18 | model_id_name=ETTh2 19 | data_name=ETTh2 20 | 21 | random_seed=2021 22 | for pred_len in 96 192 336 720 23 | do 24 | python -u run_longExp.py \ 25 | --random_seed $random_seed \ 26 | --is_training 1 \ 27 | --root_path $root_path_name \ 28 | --data_path $data_path_name \ 29 | --model_id $model_id_name_$seq_len'_'$pred_len \ 30 | --model $model_name \ 31 | --data $data_name \ 32 | --features S \ 33 | --seq_len $seq_len \ 34 | --pred_len $pred_len \ 35 | --enc_in 1 \ 36 | --e_layers 3 \ 37 | --n_heads 4 \ 38 | --d_model 16 \ 39 | --d_ff 128 \ 40 | --dropout 0.3\ 41 | --fc_dropout 0.3\ 42 | --head_dropout 0\ 43 | --patch_len 16\ 44 | --stride 8\ 45 | --des 'Exp' \ 46 | --train_epochs 100\ 47 | --itr 1 --batch_size 128 --learning_rate 0.0001 >logs/LongForecasting/univariate/$model_name'_fS_'$model_id_name'_'$seq_len'_'$pred_len.log 48 | done 49 | -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/illness-patchtst.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot-horizon-6" ]; then 7 | mkdir ./logs/few-shot-horizon-6 8 | fi 9 | 10 | model_name=PatchTST 11 | 12 | root_path_name=../../datasets/illness/ 13 | data_path_name=national_illness.csv 14 | model_id_name=national_illness 15 | data_name=custom 16 | 17 | random_seed=2024 18 | seq_len=148 19 | pred_len=6 20 | 21 | for train_budget in 364 22 | do 23 | python -u run_longExp.py \ 24 | --random_seed $random_seed \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --enc_in 7 \ 37 | --e_layers 3 \ 38 | --n_heads 4 \ 39 | --d_model 16 \ 40 | --d_ff 128 \ 41 | --dropout 0.3\ 42 | --fc_dropout 0.3\ 43 | --head_dropout 0\ 44 | --patch_len 24\ 45 | --stride 2\ 46 | --des 'Exp' \ 47 | --train_epochs 100\ 48 | --lradj 'constant'\ 49 | --itr 1 --batch_size 16 --learning_rate 0.0025 >>logs/few-shot-horizon-6/ili_patchtst.log 50 | done -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/few-shot/ili_gpt.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=GPT4TS 11 | 12 | root_path_name=../../datasets/ili/ 13 | data_path_name=ili.csv 14 | model_id_name=ili 15 | data_name=custom 16 | 17 | seq_len=104 18 | pred_len=6 19 | gpt_layer=6 20 | 21 | for train_budget in 52 104 156 208 260 312 364 416 22 | do 23 | python -u main.py \ 24 | --root_path $root_path_name \ 25 | --data_path $data_path_name \ 26 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 27 | --data $data_name \ 28 | --train_budget $train_budget \ 29 | --drop_last_test 0 \ 30 | --seq_len $seq_len \ 31 | --label_len 18 \ 32 | --pred_len $pred_len \ 33 | --batch_size 16 \ 34 | --learning_rate 0.0001 \ 35 | --train_epochs 10 \ 36 | --decay_fac 0.75 \ 37 | --d_model 768 \ 38 | --n_heads 4 \ 39 | --d_ff 768 \ 40 | --dropout 0.3 \ 41 | --enc_in 7 \ 42 | --c_out 7 \ 43 | --freq 0 \ 44 | --patch_size 24 \ 45 | --stride 2 \ 46 | --percent 100 \ 47 | --gpt_layer $gpt_layer \ 48 | --itr 1 \ 49 | --model $model_name \ 50 | --is_gpt 1 >logs/few-shot/$model_name'_'$model_id_name'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$train_budget.log 51 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/illness-patchtst_2.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot-horizon-6" ]; then 7 | mkdir ./logs/few-shot-horizon-6 8 | fi 9 | 10 | model_name=PatchTST 11 | 12 | root_path_name=../../datasets/illness/ 13 | data_path_name=national_illness.csv 14 | model_id_name=national_illness 15 | data_name=custom 16 | 17 | random_seed=2024 18 | seq_len=148 19 | pred_len=6 20 | 21 | for train_budget in 416 22 | do 23 | python -u run_longExp.py \ 24 | --random_seed $random_seed \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --enc_in 7 \ 37 | --e_layers 3 \ 38 | --n_heads 4 \ 39 | --d_model 16 \ 40 | --d_ff 128 \ 41 | --dropout 0.3\ 42 | --fc_dropout 0.3\ 43 | --head_dropout 0\ 44 | --patch_len 24\ 45 | --stride 2\ 46 | --des 'Exp' \ 47 | --train_epochs 100\ 48 | --lradj 'constant'\ 49 | --itr 1 --batch_size 16 --learning_rate 0.0025 >>logs/few-shot-horizon-6/ili_patchtst_2.log 50 | done -------------------------------------------------------------------------------- /zero-shot/ForecastPFN/README.md: -------------------------------------------------------------------------------- 1 | # ForecastPFN 2 | 3 | This is the code repository for the paper [_ForecastPFN: Synthetically-Trained Zero-Shot Forecasting_](https://arxiv.org/abs/2311.01933). 4 | 5 | 14 | 15 | ## Get Start 16 | 17 | - Install environment via ```conda env create -f fpfn.yml```. This repository uses Python 3.9, CUDA 11.2 and cuDNN 8.1. 18 | - Download data. You can obtain all the benchmarks from [[TimesNet](https://github.com/thuml/Time-Series-Library)]. Make sure to put it in the folder `./academic_data/`. 19 | - Finally, the ForecastPFN model weights should be downloaded [here](https://drive.google.com/file/d/1acp5thS7I4g_6Gw40wNFGnU1Sx14z0cU/view?usp=sharing). Make sure to put it in the folder `./saved_weights/`. 20 | - To reproduce zero-shot resuls for benchmarks, use ```run_benchmarks.py```. Please specify data location in ```PATH```. 21 | - To reproduce zero-shot results M4 dataset, use ```run_M4.py```. Please specify data location in ```PATH```. -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Few-shot_Learning/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def RSE(pred, true): 5 | return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2)) 6 | 7 | 8 | def CORR(pred, true): 9 | u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0) 10 | d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0)) 11 | return (u / d).mean(-1) 12 | 13 | 14 | def MAE(pred, true): 15 | return np.mean(np.abs(pred - true)) 16 | 17 | 18 | def MSE(pred, true): 19 | return np.mean((pred - true) ** 2) 20 | 21 | 22 | def RMSE(pred, true): 23 | return np.sqrt(MSE(pred, true)) 24 | 25 | 26 | def MAPE(pred, true): 27 | return np.mean(np.abs(100 * (pred - true) / (true +1e-8))) 28 | 29 | 30 | def MSPE(pred, true): 31 | return np.mean(np.square((pred - true) / (true + 1e-8))) 32 | 33 | def SMAPE(pred, true): 34 | return np.mean(200 * np.abs(pred - true) / (np.abs(pred) + np.abs(true) + 1e-8)) 35 | # return np.mean(200 * np.abs(pred - true) / (pred + true + 1e-8)) 36 | 37 | def ND(pred, true): 38 | return np.mean(np.abs(true - pred)) / np.mean(np.abs(true)) 39 | 40 | def metric(pred, true): 41 | mae = MAE(pred, true) 42 | mse = MSE(pred, true) 43 | rmse = RMSE(pred, true) 44 | mape = MAPE(pred, true) 45 | mspe = MSPE(pred, true) 46 | smape = SMAPE(pred, true) 47 | nd = ND(pred, true) 48 | 49 | return mae, mse, rmse, mape, mspe, smape, nd 50 | -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def RSE(pred, true): 5 | return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2)) 6 | 7 | 8 | def CORR(pred, true): 9 | u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0) 10 | d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0)) 11 | return (u / d).mean(-1) 12 | 13 | 14 | def MAE(pred, true): 15 | return np.mean(np.abs(pred - true)) 16 | 17 | 18 | def MSE(pred, true): 19 | return np.mean((pred - true) ** 2) 20 | 21 | 22 | def RMSE(pred, true): 23 | return np.sqrt(MSE(pred, true)) 24 | 25 | 26 | def MAPE(pred, true): 27 | return np.mean(np.abs(100 * (pred - true) / (true +1e-8))) 28 | 29 | 30 | def MSPE(pred, true): 31 | return np.mean(np.square((pred - true) / (true + 1e-8))) 32 | 33 | def SMAPE(pred, true): 34 | return np.mean(200 * np.abs(pred - true) / (np.abs(pred) + np.abs(true) + 1e-8)) 35 | # return np.mean(200 * np.abs(pred - true) / (pred + true + 1e-8)) 36 | 37 | def ND(pred, true): 38 | return np.mean(np.abs(true - pred)) / np.mean(np.abs(true)) 39 | 40 | def metric(pred, true): 41 | mae = MAE(pred, true) 42 | mse = MSE(pred, true) 43 | rmse = RMSE(pred, true) 44 | mape = MAPE(pred, true) 45 | mspe = MSPE(pred, true) 46 | smape = SMAPE(pred, true) 47 | nd = ND(pred, true) 48 | 49 | return mae, mse, rmse, mape, mspe, smape, nd 50 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Long-term_Forecasting/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def RSE(pred, true): 5 | return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2)) 6 | 7 | 8 | def CORR(pred, true): 9 | u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0) 10 | d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0)) 11 | return (u / d).mean(-1) 12 | 13 | 14 | def MAE(pred, true): 15 | return np.mean(np.abs(pred - true)) 16 | 17 | 18 | def MSE(pred, true): 19 | return np.mean((pred - true) ** 2) 20 | 21 | 22 | def RMSE(pred, true): 23 | return np.sqrt(MSE(pred, true)) 24 | 25 | 26 | def MAPE(pred, true): 27 | return np.mean(np.abs(100 * (pred - true) / (true +1e-8))) 28 | 29 | 30 | def MSPE(pred, true): 31 | return np.mean(np.square((pred - true) / (true + 1e-8))) 32 | 33 | def SMAPE(pred, true): 34 | return np.mean(200 * np.abs(pred - true) / (np.abs(pred) + np.abs(true) + 1e-8)) 35 | # return np.mean(200 * np.abs(pred - true) / (pred + true + 1e-8)) 36 | 37 | def ND(pred, true): 38 | return np.mean(np.abs(true - pred)) / np.mean(np.abs(true)) 39 | 40 | def metric(pred, true): 41 | mae = MAE(pred, true) 42 | mse = MSE(pred, true) 43 | rmse = RMSE(pred, true) 44 | mape = MAPE(pred, true) 45 | mspe = MSPE(pred, true) 46 | smape = SMAPE(pred, true) 47 | nd = ND(pred, true) 48 | 49 | return mae, mse, rmse, mape, mspe, smape, nd 50 | -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Zero-shot_Learning/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def RSE(pred, true): 5 | return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2)) 6 | 7 | 8 | def CORR(pred, true): 9 | u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0) 10 | d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0)) 11 | return (u / d).mean(-1) 12 | 13 | 14 | def MAE(pred, true): 15 | return np.mean(np.abs(pred - true)) 16 | 17 | 18 | def MSE(pred, true): 19 | return np.mean((pred - true) ** 2) 20 | 21 | 22 | def RMSE(pred, true): 23 | return np.sqrt(MSE(pred, true)) 24 | 25 | 26 | def MAPE(pred, true): 27 | return np.mean(np.abs(100 * (pred - true) / (true +1e-8))) 28 | 29 | 30 | def MSPE(pred, true): 31 | return np.mean(np.square((pred - true) / (true + 1e-8))) 32 | 33 | def SMAPE(pred, true): 34 | return np.mean(200 * np.abs(pred - true) / (np.abs(pred) + np.abs(true) + 1e-8)) 35 | # return np.mean(200 * np.abs(pred - true) / (pred + true + 1e-8)) 36 | 37 | def ND(pred, true): 38 | return np.mean(np.abs(true - pred)) / np.mean(np.abs(true)) 39 | 40 | def metric(pred, true): 41 | mae = MAE(pred, true) 42 | mse = MSE(pred, true) 43 | rmse = RMSE(pred, true) 44 | mape = MAPE(pred, true) 45 | mspe = MSPE(pred, true) 46 | smape = SMAPE(pred, true) 47 | nd = ND(pred, true) 48 | 49 | return mae, mse, rmse, mape, mspe, smape, nd 50 | -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/few-shot/ettm1_gpt.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=GPT4TS 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTm1.csv 14 | model_id_name=ETTm1 15 | data_name=ett_m 16 | 17 | seq_len=104 18 | pred_len=6 19 | gpt_layer=6 20 | 21 | for train_budget in 96 192 384 768 1536 3072 6144 12288 22 | do 23 | python -u main.py \ 24 | --root_path $root_path_name \ 25 | --data_path $data_path_name \ 26 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 27 | --data $data_name \ 28 | --train_budget $train_budget \ 29 | --drop_last_test 0 \ 30 | --seq_len $seq_len \ 31 | --label_len 48 \ 32 | --pred_len $pred_len \ 33 | --batch_size 256 \ 34 | --learning_rate 0.001 \ 35 | --train_epochs 10 \ 36 | --decay_fac 0.75 \ 37 | --d_model 768 \ 38 | --n_heads 4 \ 39 | --d_ff 768 \ 40 | --dropout 0.3 \ 41 | --enc_in 7 \ 42 | --c_out 7 \ 43 | --freq 0 \ 44 | --patch_size 16 \ 45 | --stride 16 \ 46 | --percent 100 \ 47 | --gpt_layer $gpt_layer \ 48 | --itr 1 \ 49 | --model $model_name \ 50 | --cos 1 \ 51 | --is_gpt 1 >logs/few-shot/$model_name'_'$model_id_name'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$train_budget.log 52 | done -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/few-shot/ettm2_gpt.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=GPT4TS 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTm2.csv 14 | model_id_name=ETTm2 15 | data_name=ett_m 16 | 17 | seq_len=104 18 | pred_len=6 19 | gpt_layer=6 20 | 21 | for train_budget in 96 192 384 768 1536 3072 6144 12288 22 | do 23 | python -u main.py \ 24 | --root_path $root_path_name \ 25 | --data_path $data_path_name \ 26 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 27 | --data $data_name \ 28 | --train_budget $train_budget \ 29 | --drop_last_test 0 \ 30 | --seq_len $seq_len \ 31 | --label_len 48 \ 32 | --pred_len $pred_len \ 33 | --batch_size 256 \ 34 | --learning_rate 0.002 \ 35 | --train_epochs 10 \ 36 | --decay_fac 0.75 \ 37 | --d_model 768 \ 38 | --n_heads 4 \ 39 | --d_ff 768 \ 40 | --dropout 0.3 \ 41 | --enc_in 7 \ 42 | --c_out 7 \ 43 | --freq 0 \ 44 | --patch_size 16 \ 45 | --stride 16 \ 46 | --percent 100 \ 47 | --gpt_layer $gpt_layer \ 48 | --itr 1 \ 49 | --model $model_name \ 50 | --cos 1 \ 51 | --is_gpt 1 >logs/few-shot/$model_name'_'$model_id_name'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$train_budget.log 52 | done -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/few-shot/weather_gpt.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=GPT4TS 11 | 12 | root_path_name=../../datasets/weather/ 13 | data_path_name=weather.csv 14 | model_id_name=weather 15 | data_name=custom 16 | 17 | seq_len=104 18 | pred_len=6 19 | gpt_layer=6 20 | 21 | for train_budget in 48 96 192 384 768 1536 3072 6144 22 | do 23 | python -u main.py \ 24 | --root_path $root_path_name \ 25 | --data_path $data_path_name \ 26 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 27 | --data $data_name \ 28 | --train_budget $train_budget \ 29 | --drop_last_test 0 \ 30 | --seq_len $seq_len \ 31 | --label_len 48 \ 32 | --pred_len $pred_len \ 33 | --batch_size 512 \ 34 | --learning_rate 0.001 \ 35 | --train_epochs 10 \ 36 | --decay_fac 0.9 \ 37 | --d_model 768 \ 38 | --n_heads 4 \ 39 | --d_ff 768 \ 40 | --dropout 0.3 \ 41 | --enc_in 7 \ 42 | --c_out 7 \ 43 | --freq 0 \ 44 | --lradj type3 \ 45 | --patch_size 16 \ 46 | --stride 8 \ 47 | --percent 100 \ 48 | --gpt_layer $gpt_layer \ 49 | --itr 1 \ 50 | --model $model_name \ 51 | --is_gpt 1 >logs/few-shot/$model_name'_'$model_id_name'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$train_budget.log 52 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot/etth1-patchtst.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=PatchTST 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTh1.csv 14 | model_id_name=etth1 15 | data_name=ETTh1 16 | 17 | random_seed=2024 18 | seq_len=512 19 | pred_len=96 20 | period=12 21 | 22 | for j in 24 48 96 192 384 768 1536 3072 23 | do 24 | train_budget=$(($j)) 25 | python -u run_longExp.py \ 26 | --random_seed $random_seed \ 27 | --is_training 1 \ 28 | --root_path $root_path_name \ 29 | --data_path $data_path_name \ 30 | --model_id $model_id_name_$seq_len'_'$pred_len \ 31 | --model $model_name \ 32 | --data $data_name \ 33 | --features M \ 34 | --train_budget $train_budget \ 35 | --drop_last_test 0 \ 36 | --seq_len $seq_len \ 37 | --pred_len $pred_len \ 38 | --enc_in 7 \ 39 | --e_layers 3 \ 40 | --n_heads 4 \ 41 | --d_model 16 \ 42 | --d_ff 128 \ 43 | --dropout 0.3\ 44 | --fc_dropout 0.3\ 45 | --head_dropout 0\ 46 | --patch_len 16\ 47 | --stride 8\ 48 | --des 'Exp' \ 49 | --train_epochs 100\ 50 | --lradj 'constant'\ 51 | --itr 1 --batch_size 128 --learning_rate 0.0001 >logs/few-shot/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 52 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot/etth2-patchtst.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=PatchTST 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTh2.csv 14 | model_id_name=etth2 15 | data_name=ETTh2 16 | 17 | random_seed=2024 18 | seq_len=512 19 | pred_len=96 20 | period=12 21 | 22 | for j in 24 48 96 192 384 768 1536 3072 23 | do 24 | train_budget=$(($j)) 25 | python -u run_longExp.py \ 26 | --random_seed $random_seed \ 27 | --is_training 1 \ 28 | --root_path $root_path_name \ 29 | --data_path $data_path_name \ 30 | --model_id $model_id_name_$seq_len'_'$pred_len \ 31 | --model $model_name \ 32 | --data $data_name \ 33 | --features M \ 34 | --train_budget $train_budget \ 35 | --drop_last_test 0 \ 36 | --seq_len $seq_len \ 37 | --pred_len $pred_len \ 38 | --enc_in 7 \ 39 | --e_layers 3 \ 40 | --n_heads 4 \ 41 | --d_model 16 \ 42 | --d_ff 128 \ 43 | --dropout 0.3\ 44 | --fc_dropout 0.3\ 45 | --head_dropout 0\ 46 | --patch_len 16\ 47 | --stride 8\ 48 | --des 'Exp' \ 49 | --train_epochs 100\ 50 | --lradj 'constant'\ 51 | --itr 1 --batch_size 128 --learning_rate 0.0001 >logs/few-shot/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 52 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot/ettm1-patchtst.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=PatchTST 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTm1.csv 14 | model_id_name=ettm1 15 | data_name=ETTm1 16 | 17 | random_seed=2024 18 | seq_len=512 19 | pred_len=96 20 | period=12 21 | 22 | for j in 24 48 96 192 384 768 1536 3072 23 | do 24 | train_budget=$(($j)) 25 | python -u run_longExp.py \ 26 | --random_seed $random_seed \ 27 | --is_training 1 \ 28 | --root_path $root_path_name \ 29 | --data_path $data_path_name \ 30 | --model_id $model_id_name_$seq_len'_'$pred_len \ 31 | --model $model_name \ 32 | --data $data_name \ 33 | --features M \ 34 | --train_budget $train_budget \ 35 | --drop_last_test 0 \ 36 | --seq_len $seq_len \ 37 | --pred_len $pred_len \ 38 | --enc_in 7 \ 39 | --e_layers 3 \ 40 | --n_heads 4 \ 41 | --d_model 16 \ 42 | --d_ff 128 \ 43 | --dropout 0.3\ 44 | --fc_dropout 0.3\ 45 | --head_dropout 0\ 46 | --patch_len 16\ 47 | --stride 8\ 48 | --des 'Exp' \ 49 | --train_epochs 100\ 50 | --lradj 'constant'\ 51 | --itr 1 --batch_size 128 --learning_rate 0.0001 >logs/few-shot/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 52 | done -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/few-shot/ready/etth2_gpt0.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=GPT4TS 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTh2.csv 14 | model_id_name=etth2 15 | data_name=ett_h 16 | 17 | seq_len=336 18 | pred_len=96 19 | period=12 20 | 21 | for j in 24 48 96 192 384 768 1536 3072 22 | do 23 | train_budget=$(($j)) 24 | python -u main.py \ 25 | --root_path $root_path_name \ 26 | --data_path $data_path_name \ 27 | --model_id $model_id_name_$seq_len'_'$pred_len \ 28 | --data $data_name \ 29 | --train_budget $train_budget \ 30 | --drop_last_test 0 \ 31 | --seq_len $seq_len \ 32 | --label_len 168 \ 33 | --pred_len $pred_len \ 34 | --batch_size 256 \ 35 | --decay_fac 0.5 \ 36 | --learning_rate 0.001 \ 37 | --train_epochs 10 \ 38 | --d_model 768 \ 39 | --n_heads 4 \ 40 | --d_ff 768 \ 41 | --dropout 1 \ 42 | --enc_in 7 \ 43 | --c_out 7 \ 44 | --freq 0 \ 45 | --patch_size 16 \ 46 | --stride 8 \ 47 | --percent 100 \ 48 | --gpt_layer 0 \ 49 | --itr 1 \ 50 | --model $model_name \ 51 | --cos 1 \ 52 | --tmax 20 \ 53 | --is_gpt 1 >logs/few-shot/$model_name'_'$model_id_name'_0_'$seq_len'_'$pred_len'_'$train_budget.log 54 | done -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/few-shot/ready/etth2_gpt6.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=GPT4TS 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTh2.csv 14 | model_id_name=etth2 15 | data_name=ett_h 16 | 17 | seq_len=336 18 | pred_len=96 19 | period=12 20 | 21 | for j in 24 48 96 192 384 768 1536 3072 22 | do 23 | train_budget=$(($j)) 24 | python -u main.py \ 25 | --root_path $root_path_name \ 26 | --data_path $data_path_name \ 27 | --model_id $model_id_name_$seq_len'_'$pred_len \ 28 | --data $data_name \ 29 | --train_budget $train_budget \ 30 | --drop_last_test 0 \ 31 | --seq_len $seq_len \ 32 | --label_len 168 \ 33 | --pred_len $pred_len \ 34 | --batch_size 256 \ 35 | --decay_fac 0.5 \ 36 | --learning_rate 0.001 \ 37 | --train_epochs 10 \ 38 | --d_model 768 \ 39 | --n_heads 4 \ 40 | --d_ff 768 \ 41 | --dropout 1 \ 42 | --enc_in 7 \ 43 | --c_out 7 \ 44 | --freq 0 \ 45 | --patch_size 16 \ 46 | --stride 8 \ 47 | --percent 100 \ 48 | --gpt_layer 6 \ 49 | --itr 1 \ 50 | --model $model_name \ 51 | --cos 1 \ 52 | --tmax 20 \ 53 | --is_gpt 1 >logs/few-shot/$model_name'_'$model_id_name'_6_'$seq_len'_'$pred_len'_'$train_budget.log 54 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/PatchTST/univariate/ettm1.sh: -------------------------------------------------------------------------------- 1 | if [ ! -d "./logs" ]; then 2 | mkdir ./logs 3 | fi 4 | 5 | if [ ! -d "./logs/LongForecasting" ]; then 6 | mkdir ./logs/LongForecasting 7 | fi 8 | 9 | if [ ! -d "./logs/LongForecasting/univariate" ]; then 10 | mkdir ./logs/LongForecasting/univariate 11 | fi 12 | 13 | seq_len=336 14 | model_name=PatchTST 15 | 16 | root_path_name=./dataset/ 17 | data_path_name=ETTm1.csv 18 | model_id_name=ETTm1 19 | data_name=ETTm1 20 | 21 | random_seed=2021 22 | for pred_len in 96 192 336 720 23 | do 24 | python -u run_longExp.py \ 25 | --random_seed $random_seed \ 26 | --is_training 1 \ 27 | --root_path $root_path_name \ 28 | --data_path $data_path_name \ 29 | --model_id $model_id_name_$seq_len'_'$pred_len \ 30 | --model $model_name \ 31 | --data $data_name \ 32 | --features S \ 33 | --seq_len $seq_len \ 34 | --pred_len $pred_len \ 35 | --enc_in 1 \ 36 | --e_layers 3 \ 37 | --n_heads 16 \ 38 | --d_model 128 \ 39 | --d_ff 256 \ 40 | --dropout 0.2\ 41 | --fc_dropout 0.2\ 42 | --head_dropout 0\ 43 | --patch_len 16\ 44 | --stride 8\ 45 | --des 'Exp' \ 46 | --train_epochs 100\ 47 | --patience 20\ 48 | --lradj 'TST'\ 49 | --pct_start 0.4\ 50 | --itr 1 --batch_size 128 --learning_rate 0.0001 >logs/LongForecasting/univariate/$model_name'_fS_'$model_id_name'_'$seq_len'_'$pred_len.log 51 | done 52 | -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/PatchTST/univariate/ettm2.sh: -------------------------------------------------------------------------------- 1 | if [ ! -d "./logs" ]; then 2 | mkdir ./logs 3 | fi 4 | 5 | if [ ! -d "./logs/LongForecasting" ]; then 6 | mkdir ./logs/LongForecasting 7 | fi 8 | 9 | if [ ! -d "./logs/LongForecasting/univariate" ]; then 10 | mkdir ./logs/LongForecasting/univariate 11 | fi 12 | 13 | seq_len=336 14 | model_name=PatchTST 15 | 16 | root_path_name=./dataset/ 17 | data_path_name=ETTm2.csv 18 | model_id_name=ETTm2 19 | data_name=ETTm2 20 | 21 | random_seed=2021 22 | for pred_len in 96 192 336 720 23 | do 24 | python -u run_longExp.py \ 25 | --random_seed $random_seed \ 26 | --is_training 1 \ 27 | --root_path $root_path_name \ 28 | --data_path $data_path_name \ 29 | --model_id $model_id_name_$seq_len'_'$pred_len \ 30 | --model $model_name \ 31 | --data $data_name \ 32 | --features S \ 33 | --seq_len $seq_len \ 34 | --pred_len $pred_len \ 35 | --enc_in 1 \ 36 | --e_layers 3 \ 37 | --n_heads 16 \ 38 | --d_model 128 \ 39 | --d_ff 256 \ 40 | --dropout 0.2\ 41 | --fc_dropout 0.2\ 42 | --head_dropout 0\ 43 | --patch_len 16\ 44 | --stride 8\ 45 | --des 'Exp' \ 46 | --train_epochs 100\ 47 | --patience 20\ 48 | --lradj 'TST'\ 49 | --pct_start 0.4 \ 50 | --itr 1 --batch_size 128 --learning_rate 0.0001 >logs/LongForecasting/univariate/$model_name'_fS_'$model_id_name'_'$seq_len'_'$pred_len.log 51 | done 52 | -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/etth1-patchtst.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot-horizon-6" ]; then 7 | mkdir ./logs/few-shot-horizon-6 8 | fi 9 | 10 | model_name=PatchTST 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTh1.csv 14 | model_id_name=etth1 15 | data_name=ETTh1 16 | 17 | random_seed=2024 18 | seq_len=148 19 | pred_len=6 20 | 21 | for train_budget in 24 48 96 192 384 768 1536 3072 22 | do 23 | python -u run_longExp.py \ 24 | --random_seed $random_seed \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --enc_in 7 \ 37 | --e_layers 3 \ 38 | --n_heads 4 \ 39 | --d_model 16 \ 40 | --d_ff 128 \ 41 | --dropout 0.3\ 42 | --fc_dropout 0.3\ 43 | --head_dropout 0\ 44 | --patch_len 16\ 45 | --stride 8\ 46 | --des 'Exp' \ 47 | --train_epochs 100\ 48 | --lradj 'constant'\ 49 | --itr 1 --batch_size 128 --learning_rate 0.0001 >logs/few-shot-horizon-6/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 50 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/etth2-patchtst.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot-horizon-6" ]; then 7 | mkdir ./logs/few-shot-horizon-6 8 | fi 9 | 10 | model_name=PatchTST 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTh2.csv 14 | model_id_name=etth2 15 | data_name=ETTh2 16 | 17 | random_seed=2024 18 | seq_len=148 19 | pred_len=6 20 | 21 | for train_budget in 24 48 96 192 384 768 1536 3072 22 | do 23 | python -u run_longExp.py \ 24 | --random_seed $random_seed \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --enc_in 7 \ 37 | --e_layers 3 \ 38 | --n_heads 4 \ 39 | --d_model 16 \ 40 | --d_ff 128 \ 41 | --dropout 0.3\ 42 | --fc_dropout 0.3\ 43 | --head_dropout 0\ 44 | --patch_len 16\ 45 | --stride 8\ 46 | --des 'Exp' \ 47 | --train_epochs 100\ 48 | --lradj 'constant'\ 49 | --itr 1 --batch_size 128 --learning_rate 0.0001 >logs/few-shot-horizon-6/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 50 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Zero-shot_Learning/data_provider/temp_for_debug/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def RSE(pred, true): 5 | return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2)) 6 | 7 | 8 | def CORR(pred, true): 9 | u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0) 10 | d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0)) 11 | return (u / d).mean(-1) 12 | 13 | 14 | def MAE(pred, true): 15 | return np.mean(np.abs(pred - true)) 16 | 17 | 18 | def MSE(pred, true): 19 | return np.mean((pred - true) ** 2) 20 | 21 | 22 | def RMSE(pred, true): 23 | return np.sqrt(MSE(pred, true)) 24 | 25 | 26 | def MAPE(pred, true): 27 | return np.mean(np.abs(100 * (pred - true) / (true +1e-8))) 28 | 29 | 30 | def MSPE(pred, true): 31 | return np.mean(np.square((pred - true) / (true + 1e-8))) 32 | 33 | def SMAPE(pred, true): 34 | return np.mean(200 * np.abs(pred - true) / (np.abs(pred) + np.abs(true) + 1e-8)) 35 | # return np.mean(200 * np.abs(pred - true) / (pred + true + 1e-8)) 36 | 37 | def ND(pred, true): 38 | return np.mean(np.abs(true - pred)) / np.mean(np.abs(true)) 39 | 40 | def metric(pred, true): 41 | mae = MAE(pred, true) 42 | mse = MSE(pred, true) 43 | rmse = RMSE(pred, true) 44 | mape = MAPE(pred, true) 45 | mspe = MSPE(pred, true) 46 | smape = SMAPE(pred, true) 47 | nd = ND(pred, true) 48 | 49 | return mae, mse, rmse, mape, mspe, smape, nd 50 | -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot/illness-patchtst.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=PatchTST 11 | 12 | root_path_name=../../datasets/illness/ 13 | data_path_name=national_illness.csv 14 | model_id_name=national_illness 15 | data_name=custom 16 | 17 | random_seed=2024 18 | seq_len=148 19 | pred_len=24 20 | period=52 21 | 22 | for i in 1 2 3 4 5 6 7 8 23 | do 24 | train_budget=$(($period*$i)) 25 | python -u run_longExp.py \ 26 | --random_seed $random_seed \ 27 | --is_training 1 \ 28 | --root_path $root_path_name \ 29 | --data_path $data_path_name \ 30 | --model_id $model_id_name_$seq_len'_'$pred_len \ 31 | --model $model_name \ 32 | --data $data_name \ 33 | --features M \ 34 | --train_budget $train_budget \ 35 | --drop_last_test 0 \ 36 | --seq_len $seq_len \ 37 | --pred_len $pred_len \ 38 | --enc_in 7 \ 39 | --e_layers 3 \ 40 | --n_heads 4 \ 41 | --d_model 16 \ 42 | --d_ff 128 \ 43 | --dropout 0.3\ 44 | --fc_dropout 0.3\ 45 | --head_dropout 0\ 46 | --patch_len 24\ 47 | --stride 2\ 48 | --des 'Exp' \ 49 | --train_epochs 100\ 50 | --lradj 'constant'\ 51 | --itr 1 --batch_size 16 --learning_rate 0.0025 >logs/few-shot/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 52 | done -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/few-shot/etth1_gpt.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=GPT4TS 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTh1.csv 14 | model_id_name=etth1 15 | data_name=ett_h 16 | 17 | seq_len=104 18 | pred_len=6 19 | gpt_layer=6 20 | 21 | for train_budget in 24 48 96 192 384 768 1536 3072 22 | do 23 | python -u main.py \ 24 | --root_path $root_path_name \ 25 | --data_path $data_path_name \ 26 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 27 | --data $data_name \ 28 | --train_budget $train_budget \ 29 | --drop_last_test 0 \ 30 | --seq_len $seq_len \ 31 | --label_len 48 \ 32 | --pred_len $pred_len \ 33 | --batch_size 256 \ 34 | --learning_rate 0.001 \ 35 | --train_epochs 10 \ 36 | --decay_fac 0.5 \ 37 | --d_model 768 \ 38 | --n_heads 4 \ 39 | --d_ff 768 \ 40 | --dropout 0.3 \ 41 | --enc_in 7 \ 42 | --c_out 7 \ 43 | --freq 0 \ 44 | --lradj type4 \ 45 | --patch_size 16 \ 46 | --stride 8 \ 47 | --percent 100 \ 48 | --gpt_layer $gpt_layer \ 49 | --itr 1 \ 50 | --model $model_name \ 51 | --tmax 20 \ 52 | --cos 1 \ 53 | --is_gpt 1 >logs/few-shot/$model_name'_'$model_id_name'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$train_budget.log 54 | done -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/few-shot/etth2_gpt.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=GPT4TS 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTh2.csv 14 | model_id_name=ETTh2 15 | data_name=ett_h 16 | 17 | seq_len=104 18 | pred_len=6 19 | gpt_layer=6 20 | 21 | for train_budget in 24 48 96 192 384 768 1536 3072 22 | do 23 | python -u main.py \ 24 | --root_path $root_path_name \ 25 | --data_path $data_path_name \ 26 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 27 | --data $data_name \ 28 | --train_budget $train_budget \ 29 | --drop_last_test 0 \ 30 | --seq_len $seq_len \ 31 | --label_len 48 \ 32 | --pred_len $pred_len \ 33 | --batch_size 256 \ 34 | --learning_rate 0.001 \ 35 | --train_epochs 10 \ 36 | --decay_fac 0.5 \ 37 | --d_model 768 \ 38 | --n_heads 4 \ 39 | --d_ff 768 \ 40 | --dropout 0.3 \ 41 | --enc_in 7 \ 42 | --c_out 7 \ 43 | --freq 0 \ 44 | --lradj type4 \ 45 | --patch_size 16 \ 46 | --stride 8 \ 47 | --percent 100 \ 48 | --gpt_layer $gpt_layer \ 49 | --itr 1 \ 50 | --model $model_name \ 51 | --tmax 20 \ 52 | --cos 1 \ 53 | --is_gpt 1 >logs/few-shot/$model_name'_'$model_id_name'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$train_budget.log 54 | done -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/few-shot/electricity_gpt.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=GPT4TS 11 | 12 | root_path_name=../../datasets/electricity/ 13 | data_path_name=electricity.csv 14 | model_id_name=electricity 15 | data_name=custom 16 | 17 | seq_len=104 18 | pred_len=6 19 | gpt_layer=6 20 | 21 | for train_budget in 48 96 192 384 768 1536 3072 6144 22 | do 23 | python -u main.py \ 24 | --root_path $root_path_name \ 25 | --data_path $data_path_name \ 26 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 27 | --data $data_name \ 28 | --train_budget $train_budget \ 29 | --drop_last_test 0 \ 30 | --seq_len $seq_len \ 31 | --label_len 48 \ 32 | --pred_len $pred_len \ 33 | --batch_size 2048 \ 34 | --learning_rate 0.0001 \ 35 | --train_epochs 10 \ 36 | --decay_fac 0.75 \ 37 | --d_model 768 \ 38 | --n_heads 4 \ 39 | --d_ff 768 \ 40 | --dropout 0.3 \ 41 | --enc_in 7 \ 42 | --c_out 7 \ 43 | --freq 0 \ 44 | --patch_size 16 \ 45 | --stride 8 \ 46 | --percent 100 \ 47 | --gpt_layer $gpt_layer \ 48 | --itr 1 \ 49 | --model $model_name \ 50 | --cos 1 \ 51 | --tmax 10 \ 52 | --is_gpt 1 >logs/few-shot/$model_name'_'$model_id_name'_'$gpt_layer'_'$seq_len'_'$pred_len'_'$train_budget.log 53 | done -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/few-shot/ready/etth1_gpt0.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=GPT4TS 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTh1.csv 14 | model_id_name=etth1 15 | data_name=ett_h 16 | 17 | seq_len=336 18 | pred_len=96 19 | period=12 20 | 21 | for j in 24 48 96 192 384 768 1536 3072 22 | do 23 | train_budget=$(($j)) 24 | python -u main.py \ 25 | --root_path $root_path_name \ 26 | --data_path $data_path_name \ 27 | --model_id $model_id_name_$seq_len'_'$pred_len \ 28 | --data $data_name \ 29 | --train_budget $train_budget \ 30 | --drop_last_test 0 \ 31 | --seq_len $seq_len \ 32 | --label_len 168 \ 33 | --pred_len $pred_len \ 34 | --batch_size 256 \ 35 | --lradj type4 \ 36 | --decay_fac 0.5 \ 37 | --learning_rate 0.001 \ 38 | --train_epochs 10 \ 39 | --d_model 768 \ 40 | --n_heads 4 \ 41 | --d_ff 768 \ 42 | --dropout 0.3 \ 43 | --enc_in 7 \ 44 | --c_out 7 \ 45 | --freq 0 \ 46 | --patch_size 16 \ 47 | --stride 8 \ 48 | --percent 100 \ 49 | --gpt_layer 0 \ 50 | --itr 1 \ 51 | --model $model_name \ 52 | --cos 1 \ 53 | --tmax 20 \ 54 | --is_gpt 1 >logs/few-shot/$model_name'_'$model_id_name'_0_'$seq_len'_'$pred_len'_'$train_budget.log 55 | done -------------------------------------------------------------------------------- /few-shot/NeurIPS2023-One-Fits-All_old/Few-shot_Learning/scripts/few-shot/ready/etth1_gpt6.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot" ]; then 7 | mkdir ./logs/few-shot 8 | fi 9 | 10 | model_name=GPT4TS 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTh1.csv 14 | model_id_name=etth1 15 | data_name=ett_h 16 | 17 | seq_len=336 18 | pred_len=96 19 | period=12 20 | 21 | for j in 24 48 96 192 384 768 1536 3072 22 | do 23 | train_budget=$(($j)) 24 | python -u main.py \ 25 | --root_path $root_path_name \ 26 | --data_path $data_path_name \ 27 | --model_id $model_id_name_$seq_len'_'$pred_len \ 28 | --data $data_name \ 29 | --train_budget $train_budget \ 30 | --drop_last_test 0 \ 31 | --seq_len $seq_len \ 32 | --label_len 168 \ 33 | --pred_len $pred_len \ 34 | --batch_size 256 \ 35 | --lradj type4 \ 36 | --decay_fac 0.5 \ 37 | --learning_rate 0.001 \ 38 | --train_epochs 10 \ 39 | --d_model 768 \ 40 | --n_heads 4 \ 41 | --d_ff 768 \ 42 | --dropout 0.3 \ 43 | --enc_in 7 \ 44 | --c_out 7 \ 45 | --freq 0 \ 46 | --patch_size 16 \ 47 | --stride 8 \ 48 | --percent 100 \ 49 | --gpt_layer 6 \ 50 | --itr 1 \ 51 | --model $model_name \ 52 | --cos 1 \ 53 | --tmax 20 \ 54 | --is_gpt 1 >logs/few-shot/$model_name'_'$model_id_name'_6_'$seq_len'_'$pred_len'_'$train_budget.log 55 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/ettm1-patchtst.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot-horizon-6" ]; then 7 | mkdir ./logs/few-shot-horizon-6 8 | fi 9 | 10 | model_name=PatchTST 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTm1.csv 14 | model_id_name=ETTm1 15 | data_name=ETTm1 16 | 17 | random_seed=2024 18 | seq_len=148 19 | pred_len=6 20 | 21 | for train_budget in 96 192 384 768 1536 3072 6144 12288 22 | do 23 | python -u run_longExp.py \ 24 | --random_seed $random_seed \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --enc_in 7 \ 37 | --e_layers 3 \ 38 | --n_heads 16 \ 39 | --d_model 128 \ 40 | --d_ff 256 \ 41 | --dropout 0.2\ 42 | --fc_dropout 0.2\ 43 | --head_dropout 0\ 44 | --patch_len 16\ 45 | --stride 8\ 46 | --des 'Exp' \ 47 | --train_epochs 100\ 48 | --lradj 'TST'\ 49 | --pct_start 0.4\ 50 | --itr 1 --batch_size 128 --learning_rate 0.0001 >logs/few-shot-horizon-6/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 51 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/ettm2-patchtst.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot-horizon-6" ]; then 7 | mkdir ./logs/few-shot-horizon-6 8 | fi 9 | 10 | model_name=PatchTST 11 | 12 | root_path_name=../../datasets/ETT/ 13 | data_path_name=ETTm2.csv 14 | model_id_name=ETTm2 15 | data_name=ETTm2 16 | 17 | random_seed=2024 18 | seq_len=148 19 | pred_len=6 20 | 21 | for train_budget in 96 192 384 768 1536 3072 6144 12288 22 | do 23 | python -u run_longExp.py \ 24 | --random_seed $random_seed \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --enc_in 7 \ 37 | --e_layers 3 \ 38 | --n_heads 16 \ 39 | --d_model 128 \ 40 | --d_ff 256 \ 41 | --dropout 0.2\ 42 | --fc_dropout 0.2\ 43 | --head_dropout 0\ 44 | --patch_len 16\ 45 | --stride 8\ 46 | --des 'Exp' \ 47 | --train_epochs 100\ 48 | --lradj 'TST'\ 49 | --pct_start 0.4\ 50 | --itr 1 --batch_size 128 --learning_rate 0.0001 >logs/few-shot-horizon-6/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 51 | done -------------------------------------------------------------------------------- /zero-shot/NeurIPS2023-One-Fits-All/Zero-shot_Learning/scripts/other_dfs_inference.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | for model in \ 3 | GPT4TS_M3_Quarterly GPT4TS_M3_Monthly GPT4TS_M3_Other \ 4 | GPT4TS_M4_Yearly GPT4TS_M4_Quarterly GPT4TS_M4_Monthly GPT4TS_M4_Weekly GPT4TS_M4_Daily GPT4TS_M4_Hourly \ 5 | DLinear_M4_Yearly DLinear_M4_Quarterly DLinear_M4_Monthly DLinear_M4_Weekly DLinear_M4_Daily DLinear_M4_Hourly \ 6 | PatchTST_M3_Yearly PatchTST_M3_Quarterly PatchTST_M3_Monthly PatchTST_M3_Other \ 7 | PatchTST_M4_Yearly PatchTST_M4_Quarterly PatchTST_M4_Monthly PatchTST_M4_Weekly PatchTST_M4_Daily PatchTST_M4_Hourly 8 | do 9 | for target_data in Weather ECL ILI Traffic ETTh1 ETTh2 ETTm1 ETTm2 10 | do 11 | python inference.py \ 12 | --model $model \ 13 | --target_data $target_data \ 14 | --checkpoints ./checkpoints/ \ 15 | --test_on_val 0 \ 16 | --res_path ./results/other_dfs_inference/$model'_'$target_data'.csv' \ 17 | --source_scaling standard_scaler >> ./logs/other_dfs_inference/$model.txt 18 | done 19 | done 20 | 21 | for model in \ 22 | DLinear_M3_Yearly DLinear_M3_Quarterly DLinear_M3_Monthly DLinear_M3_Other 23 | do 24 | for target_data in Weather ECL ILI Traffic ETT ETTh1 ETTh2 ETTm1 ETTm2 25 | do 26 | python inference.py \ 27 | --model $model \ 28 | --target_data $target_data \ 29 | --checkpoints ./checkpoints/ \ 30 | --test_on_val 0 \ 31 | --res_path ./results/other_dfs_inference/$model'_'$target_data'.csv' \ 32 | --source_scaling False >> ./logs/other_dfs_inference/$model.txt 33 | done 34 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/weather-patchtst.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot-horizon-6" ]; then 7 | mkdir ./logs/few-shot-horizon-6 8 | fi 9 | 10 | model_name=PatchTST 11 | 12 | root_path_name=../../datasets/weather/ 13 | data_path_name=weather.csv 14 | model_id_name=weather 15 | data_name=custom 16 | 17 | random_seed=2024 18 | seq_len=148 19 | pred_len=6 20 | 21 | for train_budget in 48 96 192 384 768 1536 3072 6144 22 | do 23 | python -u run_longExp.py \ 24 | --random_seed $random_seed \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --enc_in 21 \ 37 | --e_layers 3 \ 38 | --n_heads 16 \ 39 | --d_model 128 \ 40 | --d_ff 256 \ 41 | --dropout 0.2\ 42 | --fc_dropout 0.2\ 43 | --head_dropout 0\ 44 | --patch_len 16\ 45 | --stride 8\ 46 | --des 'Exp' \ 47 | --train_epochs 100\ 48 | --patience 20\ 49 | --lradj 'constant'\ 50 | --itr 1 --batch_size 128 --learning_rate 0.0001 >logs/few-shot-horizon-6/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 51 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/traffic-patchtst.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot-horizon-6" ]; then 7 | mkdir ./logs/few-shot-horizon-6 8 | fi 9 | 10 | model_name=PatchTST 11 | 12 | root_path_name=../../datasets/traffic/ 13 | data_path_name=traffic.csv 14 | model_id_name=traffic 15 | data_name=custom 16 | 17 | random_seed=2024 18 | seq_len=148 19 | pred_len=6 20 | 21 | for train_budget in 336 672 1344 2688 5376 10752 21504 43008 22 | do 23 | python -u run_longExp.py \ 24 | --random_seed $random_seed \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --enc_in 862 \ 37 | --e_layers 3 \ 38 | --n_heads 16 \ 39 | --d_model 128 \ 40 | --d_ff 256 \ 41 | --dropout 0.2\ 42 | --fc_dropout 0.2\ 43 | --head_dropout 0\ 44 | --patch_len 16\ 45 | --stride 8\ 46 | --des 'Exp' \ 47 | --train_epochs 100\ 48 | --patience 10\ 49 | --lradj 'TST'\ 50 | --itr 1 --batch_size 24 --learning_rate 0.0001 >logs/few-shot-horizon-6/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 51 | done -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/scripts/few-shot-horizon-6/electricity-patchtst.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | if [ ! -d "./logs" ]; then 3 | mkdir ./logs 4 | fi 5 | 6 | if [ ! -d "./logs/few-shot-horizon-6" ]; then 7 | mkdir ./logs/few-shot-horizon-6 8 | fi 9 | 10 | model_name=PatchTST 11 | 12 | root_path_name=../../datasets/electricity/ 13 | data_path_name=electricity.csv 14 | model_id_name=electricity 15 | data_name=custom 16 | 17 | random_seed=2024 18 | seq_len=148 19 | pred_len=6 20 | 21 | for train_budget in 48 96 192 384 768 1536 3072 6144 22 | do 23 | python -u run_longExp.py \ 24 | --random_seed $random_seed \ 25 | --is_training 1 \ 26 | --root_path $root_path_name \ 27 | --data_path $data_path_name \ 28 | --model_id $model_id_name'_'$seq_len'_'$pred_len \ 29 | --model $model_name \ 30 | --data $data_name \ 31 | --features M \ 32 | --train_budget $train_budget \ 33 | --drop_last_test 0 \ 34 | --seq_len $seq_len \ 35 | --pred_len $pred_len \ 36 | --enc_in 321 \ 37 | --e_layers 3 \ 38 | --n_heads 16 \ 39 | --d_model 128 \ 40 | --d_ff 256 \ 41 | --dropout 0.2\ 42 | --fc_dropout 0.2\ 43 | --head_dropout 0\ 44 | --patch_len 16\ 45 | --stride 8\ 46 | --des 'Exp' \ 47 | --train_epochs 100\ 48 | --lradj 'TST'\ 49 | --patience 10\ 50 | --pct_start 0.2\ 51 | --itr 1 --batch_size 32 --learning_rate 0.0001 >logs/few-shot-horizon-6/$model_name'_'$model_id_name'_'$seq_len'_'$pred_len'_'$train_budget.log 52 | done -------------------------------------------------------------------------------- /few-shot/iTransformer/experiments/exp_basic.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | from model import Transformer, Informer, Reformer, Flowformer, Flashformer, \ 4 | iTransformer, iInformer, iReformer, iFlowformer, iFlashformer 5 | 6 | 7 | class Exp_Basic(object): 8 | def __init__(self, args): 9 | self.args = args 10 | self.model_dict = { 11 | 'Transformer': Transformer, 12 | 'Informer': Informer, 13 | 'Reformer': Reformer, 14 | 'Flowformer': Flowformer, 15 | 'Flashformer': Flashformer, 16 | 'iTransformer': iTransformer, 17 | 'iInformer': iInformer, 18 | 'iReformer': iReformer, 19 | 'iFlowformer': iFlowformer, 20 | 'iFlashformer': iFlashformer, 21 | } 22 | self.device = self._acquire_device() 23 | self.model = self._build_model().to(self.device) 24 | 25 | def _build_model(self): 26 | raise NotImplementedError 27 | return None 28 | 29 | def _acquire_device(self): 30 | if self.args.use_gpu: 31 | os.environ["CUDA_VISIBLE_DEVICES"] = str( 32 | self.args.gpu) if not self.args.use_multi_gpu else self.args.devices 33 | device = torch.device('cuda:{}'.format(self.args.gpu)) 34 | print('Use GPU: cuda:{}'.format(self.args.gpu)) 35 | else: 36 | device = torch.device('cpu') 37 | print('Use CPU') 38 | return device 39 | 40 | def _get_data(self): 41 | pass 42 | 43 | def vali(self): 44 | pass 45 | 46 | def train(self): 47 | pass 48 | 49 | def test(self): 50 | pass 51 | -------------------------------------------------------------------------------- /few-shot/PatchTST/PatchTST_supervised/data_provider/data_factory.py: -------------------------------------------------------------------------------- 1 | from data_provider.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_Pred 2 | from torch.utils.data import DataLoader 3 | 4 | data_dict = { 5 | 'ETTh1': Dataset_ETT_hour, 6 | 'ETTh2': Dataset_ETT_hour, 7 | 'ETTm1': Dataset_ETT_minute, 8 | 'ETTm2': Dataset_ETT_minute, 9 | 'custom': Dataset_Custom, 10 | } 11 | 12 | 13 | def data_provider(args, flag, drop_last_test=0): 14 | Data = data_dict[args.data] 15 | timeenc = 0 if args.embed != 'timeF' else 1 16 | 17 | if flag == 'test': 18 | shuffle_flag = False 19 | drop_last = bool(drop_last_test) 20 | batch_size = args.batch_size 21 | freq = args.freq 22 | elif flag == 'pred': 23 | shuffle_flag = False 24 | drop_last = False 25 | batch_size = 1 26 | freq = args.freq 27 | Data = Dataset_Pred 28 | else: 29 | shuffle_flag = True 30 | drop_last = True 31 | batch_size = args.batch_size 32 | freq = args.freq 33 | 34 | data_set = Data( 35 | root_path=args.root_path, 36 | train_budget=args.train_budget, 37 | data_path=args.data_path, 38 | flag=flag, 39 | size=[args.seq_len, args.label_len, args.pred_len], 40 | features=args.features, 41 | target=args.target, 42 | timeenc=timeenc, 43 | freq=freq 44 | ) 45 | print(flag, len(data_set)) 46 | data_loader = DataLoader( 47 | data_set, 48 | batch_size=batch_size, 49 | shuffle=shuffle_flag, 50 | num_workers=args.num_workers, 51 | drop_last=drop_last) 52 | return data_set, data_loader 53 | -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/boost_performance/ECL/iInformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | model_name=iInformer 4 | # model_name=Informer 5 | 6 | python -u run.py \ 7 | --is_training 1 \ 8 | --root_path ./dataset/electricity/ \ 9 | --data_path electricity.csv \ 10 | --model_id ECL_96_96 \ 11 | --model $model_name \ 12 | --data custom \ 13 | --features M \ 14 | --seq_len 96 \ 15 | --pred_len 96 \ 16 | --e_layers 2 \ 17 | --enc_in 321 \ 18 | --dec_in 321 \ 19 | --c_out 321 \ 20 | --des 'Exp' \ 21 | --itr 1 22 | 23 | python -u run.py \ 24 | --is_training 1 \ 25 | --root_path ./dataset/electricity/ \ 26 | --data_path electricity.csv \ 27 | --model_id ECL_96_192 \ 28 | --model $model_name \ 29 | --data custom \ 30 | --features M \ 31 | --seq_len 96 \ 32 | --pred_len 192 \ 33 | --e_layers 2 \ 34 | --enc_in 321 \ 35 | --dec_in 321 \ 36 | --c_out 321 \ 37 | --des 'Exp' \ 38 | --itr 1 39 | 40 | python -u run.py \ 41 | --is_training 1 \ 42 | --root_path ./dataset/electricity/ \ 43 | --data_path electricity.csv \ 44 | --model_id ECL_96_336 \ 45 | --model $model_name \ 46 | --data custom \ 47 | --features M \ 48 | --seq_len 96 \ 49 | --pred_len 336 \ 50 | --e_layers 2 \ 51 | --enc_in 321 \ 52 | --dec_in 321 \ 53 | --c_out 321 \ 54 | --des 'Exp' \ 55 | --itr 1 56 | 57 | python -u run.py \ 58 | --is_training 1 \ 59 | --root_path ./dataset/electricity/ \ 60 | --data_path electricity.csv \ 61 | --model_id ECL_96_720 \ 62 | --model $model_name \ 63 | --data custom \ 64 | --features M \ 65 | --seq_len 96 \ 66 | --pred_len 720 \ 67 | --e_layers 2 \ 68 | --enc_in 321 \ 69 | --dec_in 321 \ 70 | --c_out 321 \ 71 | --des 'Exp' \ 72 | --itr 1 73 | -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/boost_performance/ECL/iReformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | model_name=iReformer 4 | # model_name=Reformer 5 | 6 | python -u run.py \ 7 | --is_training 1 \ 8 | --root_path ./dataset/electricity/ \ 9 | --data_path electricity.csv \ 10 | --model_id ECL_96_96 \ 11 | --model $model_name \ 12 | --data custom \ 13 | --features M \ 14 | --seq_len 96 \ 15 | --pred_len 96 \ 16 | --e_layers 2 \ 17 | --enc_in 321 \ 18 | --dec_in 321 \ 19 | --c_out 321 \ 20 | --des 'Exp' \ 21 | --itr 1 22 | 23 | python -u run.py \ 24 | --is_training 1 \ 25 | --root_path ./dataset/electricity/ \ 26 | --data_path electricity.csv \ 27 | --model_id ECL_96_192 \ 28 | --model $model_name \ 29 | --data custom \ 30 | --features M \ 31 | --seq_len 96 \ 32 | --pred_len 192 \ 33 | --e_layers 2 \ 34 | --enc_in 321 \ 35 | --dec_in 321 \ 36 | --c_out 321 \ 37 | --des 'Exp' \ 38 | --itr 1 39 | 40 | python -u run.py \ 41 | --is_training 1 \ 42 | --root_path ./dataset/electricity/ \ 43 | --data_path electricity.csv \ 44 | --model_id ECL_96_336 \ 45 | --model $model_name \ 46 | --data custom \ 47 | --features M \ 48 | --seq_len 96 \ 49 | --pred_len 336 \ 50 | --e_layers 2 \ 51 | --enc_in 321 \ 52 | --dec_in 321 \ 53 | --c_out 321 \ 54 | --des 'Exp' \ 55 | --itr 1 56 | 57 | python -u run.py \ 58 | --is_training 1 \ 59 | --root_path ./dataset/electricity/ \ 60 | --data_path electricity.csv \ 61 | --model_id ECL_96_720 \ 62 | --model $model_name \ 63 | --data custom \ 64 | --features M \ 65 | --seq_len 96 \ 66 | --pred_len 720 \ 67 | --e_layers 2 \ 68 | --enc_in 321 \ 69 | --dec_in 321 \ 70 | --c_out 321 \ 71 | --des 'Exp' \ 72 | --itr 1 73 | -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/boost_performance/ECL/iFlowformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | model_name=iFlowformer 4 | # model_name=Flowformer 5 | 6 | python -u run.py \ 7 | --is_training 1 \ 8 | --root_path ./dataset/electricity/ \ 9 | --data_path electricity.csv \ 10 | --model_id ECL_96_96 \ 11 | --model $model_name \ 12 | --data custom \ 13 | --features M \ 14 | --seq_len 96 \ 15 | --pred_len 96 \ 16 | --e_layers 2 \ 17 | --enc_in 321 \ 18 | --dec_in 321 \ 19 | --c_out 321 \ 20 | --des 'Exp' \ 21 | --itr 1 22 | 23 | python -u run.py \ 24 | --is_training 1 \ 25 | --root_path ./dataset/electricity/ \ 26 | --data_path electricity.csv \ 27 | --model_id ECL_96_192 \ 28 | --model $model_name \ 29 | --data custom \ 30 | --features M \ 31 | --seq_len 96 \ 32 | --pred_len 192 \ 33 | --e_layers 2 \ 34 | --enc_in 321 \ 35 | --dec_in 321 \ 36 | --c_out 321 \ 37 | --des 'Exp' \ 38 | --itr 1 39 | 40 | python -u run.py \ 41 | --is_training 1 \ 42 | --root_path ./dataset/electricity/ \ 43 | --data_path electricity.csv \ 44 | --model_id ECL_96_336 \ 45 | --model $model_name \ 46 | --data custom \ 47 | --features M \ 48 | --seq_len 96 \ 49 | --pred_len 336 \ 50 | --e_layers 2 \ 51 | --enc_in 321 \ 52 | --dec_in 321 \ 53 | --c_out 321 \ 54 | --des 'Exp' \ 55 | --itr 1 56 | 57 | python -u run.py \ 58 | --is_training 1 \ 59 | --root_path ./dataset/electricity/ \ 60 | --data_path electricity.csv \ 61 | --model_id ECL_96_720 \ 62 | --model $model_name \ 63 | --data custom \ 64 | --features M \ 65 | --seq_len 96 \ 66 | --pred_len 720 \ 67 | --e_layers 2 \ 68 | --enc_in 321 \ 69 | --dec_in 321 \ 70 | --c_out 321 \ 71 | --des 'Exp' \ 72 | --itr 1 73 | -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/boost_performance/ECL/iTransformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | model_name=iTransformer 4 | # model_name=Transformer 5 | 6 | python -u run.py \ 7 | --is_training 1 \ 8 | --root_path ./dataset/electricity/ \ 9 | --data_path electricity.csv \ 10 | --model_id ECL_96_96 \ 11 | --model $model_name \ 12 | --data custom \ 13 | --features M \ 14 | --seq_len 96 \ 15 | --pred_len 96 \ 16 | --e_layers 2 \ 17 | --enc_in 321 \ 18 | --dec_in 321 \ 19 | --c_out 321 \ 20 | --des 'Exp' \ 21 | --itr 1 22 | 23 | python -u run.py \ 24 | --is_training 1 \ 25 | --root_path ./dataset/electricity/ \ 26 | --data_path electricity.csv \ 27 | --model_id ECL_96_192 \ 28 | --model $model_name \ 29 | --data custom \ 30 | --features M \ 31 | --seq_len 96 \ 32 | --pred_len 192 \ 33 | --e_layers 2 \ 34 | --enc_in 321 \ 35 | --dec_in 321 \ 36 | --c_out 321 \ 37 | --des 'Exp' \ 38 | --itr 1 39 | 40 | python -u run.py \ 41 | --is_training 1 \ 42 | --root_path ./dataset/electricity/ \ 43 | --data_path electricity.csv \ 44 | --model_id ECL_96_336 \ 45 | --model $model_name \ 46 | --data custom \ 47 | --features M \ 48 | --seq_len 96 \ 49 | --pred_len 336 \ 50 | --e_layers 2 \ 51 | --enc_in 321 \ 52 | --dec_in 321 \ 53 | --c_out 321 \ 54 | --des 'Exp' \ 55 | --itr 1 56 | 57 | python -u run.py \ 58 | --is_training 1 \ 59 | --root_path ./dataset/electricity/ \ 60 | --data_path electricity.csv \ 61 | --model_id ECL_96_720 \ 62 | --model $model_name \ 63 | --data custom \ 64 | --features M \ 65 | --seq_len 96 \ 66 | --pred_len 720 \ 67 | --e_layers 2 \ 68 | --enc_in 321 \ 69 | --dec_in 321 \ 70 | --c_out 321 \ 71 | --des 'Exp' \ 72 | --itr 1 73 | -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/boost_performance/Traffic/iInformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | 3 | # model_name=iInformer 4 | model_name=Informer 5 | 6 | python -u run.py \ 7 | --is_training 1 \ 8 | --root_path ./dataset/traffic/ \ 9 | --data_path traffic.csv \ 10 | --model_id traffic_96_96 \ 11 | --model $model_name \ 12 | --data custom \ 13 | --features M \ 14 | --seq_len 96 \ 15 | --pred_len 96 \ 16 | --e_layers 2 \ 17 | --enc_in 862 \ 18 | --dec_in 862 \ 19 | --c_out 862 \ 20 | --des 'Exp' \ 21 | --itr 1 \ 22 | --train_epochs 3 23 | 24 | python -u run.py \ 25 | --is_training 1 \ 26 | --root_path ./dataset/traffic/ \ 27 | --data_path traffic.csv \ 28 | --model_id traffic_96_192 \ 29 | --model $model_name \ 30 | --data custom \ 31 | --features M \ 32 | --seq_len 96 \ 33 | --pred_len 192 \ 34 | --e_layers 2 \ 35 | --enc_in 862 \ 36 | --dec_in 862 \ 37 | --c_out 862 \ 38 | --des 'Exp' \ 39 | --itr 1 \ 40 | --train_epochs 3 41 | 42 | python -u run.py \ 43 | --is_training 1 \ 44 | --root_path ./dataset/traffic/ \ 45 | --data_path traffic.csv \ 46 | --model_id traffic_96_336 \ 47 | --model $model_name \ 48 | --data custom \ 49 | --features M \ 50 | --seq_len 96 \ 51 | --pred_len 336 \ 52 | --e_layers 2 \ 53 | --enc_in 862 \ 54 | --dec_in 862 \ 55 | --c_out 862 \ 56 | --des 'Exp' \ 57 | --itr 1 \ 58 | --train_epochs 3 59 | 60 | python -u run.py \ 61 | --is_training 1 \ 62 | --root_path ./dataset/traffic/ \ 63 | --data_path traffic.csv \ 64 | --model_id traffic_96_720 \ 65 | --model $model_name \ 66 | --data custom \ 67 | --features M \ 68 | --seq_len 96 \ 69 | --pred_len 720 \ 70 | --e_layers 2 \ 71 | --enc_in 862 \ 72 | --dec_in 862 \ 73 | --c_out 862 \ 74 | --des 'Exp' \ 75 | --itr 1 \ 76 | --train_epochs 3 77 | -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/boost_performance/Traffic/iReformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | 3 | model_name=iReformer 4 | # model_name=Reformer 5 | 6 | python -u run.py \ 7 | --is_training 1 \ 8 | --root_path ./dataset/traffic/ \ 9 | --data_path traffic.csv \ 10 | --model_id traffic_96_96 \ 11 | --model $model_name \ 12 | --data custom \ 13 | --features M \ 14 | --seq_len 96 \ 15 | --pred_len 96 \ 16 | --e_layers 2 \ 17 | --enc_in 862 \ 18 | --dec_in 862 \ 19 | --c_out 862 \ 20 | --des 'Exp' \ 21 | --itr 1 \ 22 | --train_epochs 3 23 | 24 | python -u run.py \ 25 | --is_training 1 \ 26 | --root_path ./dataset/traffic/ \ 27 | --data_path traffic.csv \ 28 | --model_id traffic_96_192 \ 29 | --model $model_name \ 30 | --data custom \ 31 | --features M \ 32 | --seq_len 96 \ 33 | --pred_len 192 \ 34 | --e_layers 2 \ 35 | --enc_in 862 \ 36 | --dec_in 862 \ 37 | --c_out 862 \ 38 | --des 'Exp' \ 39 | --itr 1 \ 40 | --train_epochs 3 41 | 42 | python -u run.py \ 43 | --is_training 1 \ 44 | --root_path ./dataset/traffic/ \ 45 | --data_path traffic.csv \ 46 | --model_id traffic_96_336 \ 47 | --model $model_name \ 48 | --data custom \ 49 | --features M \ 50 | --seq_len 96 \ 51 | --pred_len 336 \ 52 | --e_layers 2 \ 53 | --enc_in 862 \ 54 | --dec_in 862 \ 55 | --c_out 862 \ 56 | --des 'Exp' \ 57 | --itr 1 \ 58 | --train_epochs 3 59 | 60 | python -u run.py \ 61 | --is_training 1 \ 62 | --root_path ./dataset/traffic/ \ 63 | --data_path traffic.csv \ 64 | --model_id traffic_96_720 \ 65 | --model $model_name \ 66 | --data custom \ 67 | --features M \ 68 | --seq_len 96 \ 69 | --pred_len 720 \ 70 | --e_layers 2 \ 71 | --enc_in 862 \ 72 | --dec_in 862 \ 73 | --c_out 862 \ 74 | --des 'Exp' \ 75 | --itr 1 \ 76 | --train_epochs 3 77 | -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/boost_performance/Traffic/iFlowformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | 3 | model_name=iFlowformer 4 | # model_name=Flowformer 5 | 6 | python -u run.py \ 7 | --is_training 1 \ 8 | --root_path ./dataset/traffic/ \ 9 | --data_path traffic.csv \ 10 | --model_id traffic_96_96 \ 11 | --model $model_name \ 12 | --data custom \ 13 | --features M \ 14 | --seq_len 96 \ 15 | --pred_len 96 \ 16 | --e_layers 2 \ 17 | --enc_in 862 \ 18 | --dec_in 862 \ 19 | --c_out 862 \ 20 | --des 'Exp' \ 21 | --itr 1 \ 22 | --train_epochs 3 23 | 24 | python -u run.py \ 25 | --is_training 1 \ 26 | --root_path ./dataset/traffic/ \ 27 | --data_path traffic.csv \ 28 | --model_id traffic_96_192 \ 29 | --model $model_name \ 30 | --data custom \ 31 | --features M \ 32 | --seq_len 96 \ 33 | --pred_len 192 \ 34 | --e_layers 2 \ 35 | --enc_in 862 \ 36 | --dec_in 862 \ 37 | --c_out 862 \ 38 | --des 'Exp' \ 39 | --itr 1 \ 40 | --train_epochs 3 41 | 42 | python -u run.py \ 43 | --is_training 1 \ 44 | --root_path ./dataset/traffic/ \ 45 | --data_path traffic.csv \ 46 | --model_id traffic_96_336 \ 47 | --model $model_name \ 48 | --data custom \ 49 | --features M \ 50 | --seq_len 96 \ 51 | --pred_len 336 \ 52 | --e_layers 2 \ 53 | --enc_in 862 \ 54 | --dec_in 862 \ 55 | --c_out 862 \ 56 | --des 'Exp' \ 57 | --itr 1 \ 58 | --train_epochs 3 59 | 60 | python -u run.py \ 61 | --is_training 1 \ 62 | --root_path ./dataset/traffic/ \ 63 | --data_path traffic.csv \ 64 | --model_id traffic_96_720 \ 65 | --model $model_name \ 66 | --data custom \ 67 | --features M \ 68 | --seq_len 96 \ 69 | --pred_len 720 \ 70 | --e_layers 2 \ 71 | --enc_in 862 \ 72 | --dec_in 862 \ 73 | --c_out 862 \ 74 | --des 'Exp' \ 75 | --itr 1 \ 76 | --train_epochs 3 77 | -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/boost_performance/Traffic/iTransformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | 3 | model_name=iTransformer 4 | #model_name=Transformer 5 | 6 | python -u run.py \ 7 | --is_training 1 \ 8 | --root_path ./dataset/traffic/ \ 9 | --data_path traffic.csv \ 10 | --model_id traffic_96_96 \ 11 | --model $model_name \ 12 | --data custom \ 13 | --features M \ 14 | --seq_len 96 \ 15 | --pred_len 96 \ 16 | --e_layers 2 \ 17 | --enc_in 862 \ 18 | --dec_in 862 \ 19 | --c_out 862 \ 20 | --des 'Exp' \ 21 | --itr 1 \ 22 | --train_epochs 3 23 | 24 | python -u run.py \ 25 | --is_training 1 \ 26 | --root_path ./dataset/traffic/ \ 27 | --data_path traffic.csv \ 28 | --model_id traffic_96_192 \ 29 | --model $model_name \ 30 | --data custom \ 31 | --features M \ 32 | --seq_len 96 \ 33 | --pred_len 192 \ 34 | --e_layers 2 \ 35 | --enc_in 862 \ 36 | --dec_in 862 \ 37 | --c_out 862 \ 38 | --des 'Exp' \ 39 | --itr 1 \ 40 | --train_epochs 3 41 | 42 | python -u run.py \ 43 | --is_training 1 \ 44 | --root_path ./dataset/traffic/ \ 45 | --data_path traffic.csv \ 46 | --model_id traffic_96_336 \ 47 | --model $model_name \ 48 | --data custom \ 49 | --features M \ 50 | --seq_len 96 \ 51 | --pred_len 336 \ 52 | --e_layers 2 \ 53 | --enc_in 862 \ 54 | --dec_in 862 \ 55 | --c_out 862 \ 56 | --des 'Exp' \ 57 | --itr 1 \ 58 | --train_epochs 3 59 | 60 | python -u run.py \ 61 | --is_training 1 \ 62 | --root_path ./dataset/traffic/ \ 63 | --data_path traffic.csv \ 64 | --model_id traffic_96_720 \ 65 | --model $model_name \ 66 | --data custom \ 67 | --features M \ 68 | --seq_len 96 \ 69 | --pred_len 720 \ 70 | --e_layers 2 \ 71 | --enc_in 862 \ 72 | --dec_in 862 \ 73 | --c_out 862 \ 74 | --des 'Exp' \ 75 | --itr 1 \ 76 | --train_epochs 3 77 | -------------------------------------------------------------------------------- /few-shot/iTransformer/data_provider/data_factory.py: -------------------------------------------------------------------------------- 1 | from data_provider.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_Solar, Dataset_PEMS, \ 2 | Dataset_Pred 3 | from torch.utils.data import DataLoader 4 | 5 | data_dict = { 6 | 'ETTh1': Dataset_ETT_hour, 7 | 'ETTh2': Dataset_ETT_hour, 8 | 'ETTm1': Dataset_ETT_minute, 9 | 'ETTm2': Dataset_ETT_minute, 10 | 'Solar': Dataset_Solar, 11 | 'PEMS': Dataset_PEMS, 12 | 'custom': Dataset_Custom, 13 | } 14 | 15 | 16 | def data_provider(args, flag, drop_last_test=0): 17 | Data = data_dict[args.data] 18 | timeenc = 0 if args.embed != 'timeF' else 1 19 | 20 | if flag == 'test': 21 | shuffle_flag = False 22 | drop_last = bool(drop_last_test) 23 | batch_size = 1 # bsz=1 for evaluation 24 | freq = args.freq 25 | elif flag == 'pred': 26 | shuffle_flag = False 27 | drop_last = False 28 | batch_size = 1 29 | freq = args.freq 30 | Data = Dataset_Pred 31 | else: 32 | shuffle_flag = True 33 | drop_last = True 34 | batch_size = args.batch_size # bsz for train and valid 35 | freq = args.freq 36 | 37 | data_set = Data( 38 | root_path=args.root_path, 39 | train_budget=args.train_budget, 40 | data_path=args.data_path, 41 | flag=flag, 42 | size=[args.seq_len, args.label_len, args.pred_len], 43 | features=args.features, 44 | target=args.target, 45 | timeenc=timeenc, 46 | freq=freq, 47 | ) 48 | print(flag, len(data_set)) 49 | data_loader = DataLoader( 50 | data_set, 51 | batch_size=batch_size, 52 | shuffle=shuffle_flag, 53 | num_workers=args.num_workers, 54 | drop_last=drop_last) 55 | return data_set, data_loader 56 | -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/boost_performance/Weather/iInformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | 3 | model_name=iInformer 4 | #model_name=Informer 5 | 6 | python -u run.py \ 7 | --is_training 1 \ 8 | --root_path ./dataset/weather/ \ 9 | --data_path weather.csv \ 10 | --model_id weather_96_96 \ 11 | --model $model_name \ 12 | --data custom \ 13 | --features M \ 14 | --seq_len 96 \ 15 | --pred_len 96 \ 16 | --e_layers 2 \ 17 | --enc_in 21 \ 18 | --dec_in 21 \ 19 | --c_out 21 \ 20 | --des 'Exp' \ 21 | --itr 1 \ 22 | --batch_size 128 \ 23 | --train_epochs 3 24 | 25 | python -u run.py \ 26 | --is_training 1 \ 27 | --root_path ./dataset/weather/ \ 28 | --data_path weather.csv \ 29 | --model_id weather_96_192 \ 30 | --model $model_name \ 31 | --data custom \ 32 | --features M \ 33 | --seq_len 96 \ 34 | --pred_len 192 \ 35 | --e_layers 2 \ 36 | --enc_in 21 \ 37 | --dec_in 21 \ 38 | --c_out 21 \ 39 | --des 'Exp' \ 40 | --batch_size 128 \ 41 | --itr 1 42 | 43 | python -u run.py \ 44 | --is_training 1 \ 45 | --root_path ./dataset/weather/ \ 46 | --data_path weather.csv \ 47 | --model_id weather_96_336 \ 48 | --model $model_name \ 49 | --data custom \ 50 | --features M \ 51 | --seq_len 96 \ 52 | --pred_len 336 \ 53 | --e_layers 2 \ 54 | --enc_in 21 \ 55 | --dec_in 21 \ 56 | --c_out 21 \ 57 | --des 'Exp' \ 58 | --batch_size 128 \ 59 | --itr 1 60 | 61 | python -u run.py \ 62 | --is_training 1 \ 63 | --root_path ./dataset/weather/ \ 64 | --data_path weather.csv \ 65 | --model_id weather_96_720 \ 66 | --model $model_name \ 67 | --data custom \ 68 | --features M \ 69 | --seq_len 96 \ 70 | --pred_len 720 \ 71 | --e_layers 2 \ 72 | --enc_in 21 \ 73 | --dec_in 21 \ 74 | --c_out 21 \ 75 | --des 'Exp' \ 76 | --batch_size 128 \ 77 | --itr 1 78 | -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/boost_performance/Weather/iReformer.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | 3 | model_name=iReformer 4 | #model_name=Reformer 5 | 6 | python -u run.py \ 7 | --is_training 1 \ 8 | --root_path ./dataset/weather/ \ 9 | --data_path weather.csv \ 10 | --model_id weather_96_96 \ 11 | --model $model_name \ 12 | --data custom \ 13 | --features M \ 14 | --seq_len 96 \ 15 | --pred_len 96 \ 16 | --e_layers 2 \ 17 | --enc_in 21 \ 18 | --dec_in 21 \ 19 | --c_out 21 \ 20 | --des 'Exp' \ 21 | --itr 1 \ 22 | --batch_size 128 \ 23 | --train_epochs 3 24 | 25 | python -u run.py \ 26 | --is_training 1 \ 27 | --root_path ./dataset/weather/ \ 28 | --data_path weather.csv \ 29 | --model_id weather_96_192 \ 30 | --model $model_name \ 31 | --data custom \ 32 | --features M \ 33 | --seq_len 96 \ 34 | --pred_len 192 \ 35 | --e_layers 2 \ 36 | --enc_in 21 \ 37 | --dec_in 21 \ 38 | --c_out 21 \ 39 | --des 'Exp' \ 40 | --batch_size 128 \ 41 | --itr 1 42 | 43 | python -u run.py \ 44 | --is_training 1 \ 45 | --root_path ./dataset/weather/ \ 46 | --data_path weather.csv \ 47 | --model_id weather_96_336 \ 48 | --model $model_name \ 49 | --data custom \ 50 | --features M \ 51 | --seq_len 96 \ 52 | --pred_len 336 \ 53 | --e_layers 2 \ 54 | --enc_in 21 \ 55 | --dec_in 21 \ 56 | --c_out 21 \ 57 | --des 'Exp' \ 58 | --batch_size 128 \ 59 | --itr 1 60 | 61 | python -u run.py \ 62 | --is_training 1 \ 63 | --root_path ./dataset/weather/ \ 64 | --data_path weather.csv \ 65 | --model_id weather_96_720 \ 66 | --model $model_name \ 67 | --data custom \ 68 | --features M \ 69 | --seq_len 96 \ 70 | --pred_len 720 \ 71 | --e_layers 2 \ 72 | --enc_in 21 \ 73 | --dec_in 21 \ 74 | --c_out 21 \ 75 | --des 'Exp' \ 76 | --batch_size 128 \ 77 | --itr 1 78 | -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/multivariate_forecasting/ETT/iTransformer_ETTh1.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | 3 | model_name=iTransformer 4 | 5 | python -u run.py \ 6 | --is_training 1 \ 7 | --root_path ./dataset/ETT-small/ \ 8 | --data_path ETTh1.csv \ 9 | --model_id ETTh1_96_96 \ 10 | --model $model_name \ 11 | --data ETTh1 \ 12 | --features M \ 13 | --seq_len 96 \ 14 | --pred_len 96 \ 15 | --e_layers 2 \ 16 | --enc_in 7 \ 17 | --dec_in 7 \ 18 | --c_out 7 \ 19 | --des 'Exp' \ 20 | --d_model 256 \ 21 | --d_ff 256 \ 22 | --itr 1 23 | 24 | python -u run.py \ 25 | --is_training 1 \ 26 | --root_path ./dataset/ETT-small/ \ 27 | --data_path ETTh1.csv \ 28 | --model_id ETTh1_96_192 \ 29 | --model $model_name \ 30 | --data ETTh1 \ 31 | --features M \ 32 | --seq_len 96 \ 33 | --pred_len 192 \ 34 | --e_layers 2 \ 35 | --enc_in 7 \ 36 | --dec_in 7 \ 37 | --c_out 7 \ 38 | --des 'Exp' \ 39 | --d_model 256 \ 40 | --d_ff 256 \ 41 | --itr 1 42 | 43 | python -u run.py \ 44 | --is_training 1 \ 45 | --root_path ./dataset/ETT-small/ \ 46 | --data_path ETTh1.csv \ 47 | --model_id ETTh1_96_336 \ 48 | --model $model_name \ 49 | --data ETTh1 \ 50 | --features M \ 51 | --seq_len 96 \ 52 | --pred_len 336 \ 53 | --e_layers 2 \ 54 | --enc_in 7 \ 55 | --dec_in 7 \ 56 | --c_out 7 \ 57 | --des 'Exp' \ 58 | --d_model 512 \ 59 | --d_ff 512 \ 60 | --itr 1 61 | 62 | python -u run.py \ 63 | --is_training 1 \ 64 | --root_path ./dataset/ETT-small/ \ 65 | --data_path ETTh1.csv \ 66 | --model_id ETTh1_96_720 \ 67 | --model $model_name \ 68 | --data ETTh1 \ 69 | --features M \ 70 | --seq_len 96 \ 71 | --pred_len 720 \ 72 | --e_layers 2 \ 73 | --enc_in 7 \ 74 | --dec_in 7 \ 75 | --c_out 7 \ 76 | --des 'Exp' \ 77 | --d_model 512 \ 78 | --d_ff 512 \ 79 | --itr 1 -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/multivariate_forecasting/ETT/iTransformer_ETTh2.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=1 2 | 3 | model_name=iTransformer 4 | 5 | python -u run.py \ 6 | --is_training 1 \ 7 | --root_path ./dataset/ETT-small/ \ 8 | --data_path ETTh2.csv \ 9 | --model_id ETTh2_96_96 \ 10 | --model $model_name \ 11 | --data ETTh2 \ 12 | --features M \ 13 | --seq_len 96 \ 14 | --pred_len 96 \ 15 | --e_layers 2 \ 16 | --enc_in 7 \ 17 | --dec_in 7 \ 18 | --c_out 7 \ 19 | --des 'Exp' \ 20 | --d_model 128 \ 21 | --d_ff 128 \ 22 | --itr 1 23 | 24 | python -u run.py \ 25 | --is_training 1 \ 26 | --root_path ./dataset/ETT-small/ \ 27 | --data_path ETTh2.csv \ 28 | --model_id ETTh2_96_192 \ 29 | --model $model_name \ 30 | --data ETTh2 \ 31 | --features M \ 32 | --seq_len 96 \ 33 | --pred_len 192 \ 34 | --e_layers 2 \ 35 | --enc_in 7 \ 36 | --dec_in 7 \ 37 | --c_out 7 \ 38 | --des 'Exp' \ 39 | --d_model 128 \ 40 | --d_ff 128 \ 41 | --itr 1 42 | 43 | python -u run.py \ 44 | --is_training 1 \ 45 | --root_path ./dataset/ETT-small/ \ 46 | --data_path ETTh2.csv \ 47 | --model_id ETTh2_96_336 \ 48 | --model $model_name \ 49 | --data ETTh2 \ 50 | --features M \ 51 | --seq_len 96 \ 52 | --pred_len 336 \ 53 | --e_layers 2 \ 54 | --enc_in 7 \ 55 | --dec_in 7 \ 56 | --c_out 7 \ 57 | --des 'Exp' \ 58 | --d_model 128 \ 59 | --d_ff 128 \ 60 | --itr 1 61 | 62 | python -u run.py \ 63 | --is_training 1 \ 64 | --root_path ./dataset/ETT-small/ \ 65 | --data_path ETTh2.csv \ 66 | --model_id ETTh2_96_720 \ 67 | --model $model_name \ 68 | --data ETTh2 \ 69 | --features M \ 70 | --seq_len 96 \ 71 | --pred_len 720 \ 72 | --e_layers 2 \ 73 | --enc_in 7 \ 74 | --dec_in 7 \ 75 | --c_out 7 \ 76 | --des 'Exp' \ 77 | --d_model 128 \ 78 | --d_ff 128 \ 79 | --itr 1 -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/multivariate_forecasting/ETT/iTransformer_ETTm1.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=2 2 | 3 | model_name=iTransformer 4 | 5 | python -u run.py \ 6 | --is_training 1 \ 7 | --root_path ./dataset/ETT-small/ \ 8 | --data_path ETTm1.csv \ 9 | --model_id ETTm1_96_96 \ 10 | --model $model_name \ 11 | --data ETTm1 \ 12 | --features M \ 13 | --seq_len 96 \ 14 | --pred_len 96 \ 15 | --e_layers 2 \ 16 | --enc_in 7 \ 17 | --dec_in 7 \ 18 | --c_out 7 \ 19 | --des 'Exp' \ 20 | --d_model 128 \ 21 | --d_ff 128 \ 22 | --itr 1 23 | 24 | python -u run.py \ 25 | --is_training 1 \ 26 | --root_path ./dataset/ETT-small/ \ 27 | --data_path ETTm1.csv \ 28 | --model_id ETTm1_96_192 \ 29 | --model $model_name \ 30 | --data ETTm1 \ 31 | --features M \ 32 | --seq_len 96 \ 33 | --pred_len 192 \ 34 | --e_layers 2 \ 35 | --enc_in 7 \ 36 | --dec_in 7 \ 37 | --c_out 7 \ 38 | --des 'Exp' \ 39 | --d_model 128 \ 40 | --d_ff 128 \ 41 | --itr 1 42 | 43 | python -u run.py \ 44 | --is_training 1 \ 45 | --root_path ./dataset/ETT-small/ \ 46 | --data_path ETTm1.csv \ 47 | --model_id ETTm1_96_336 \ 48 | --model $model_name \ 49 | --data ETTm1 \ 50 | --features M \ 51 | --seq_len 96 \ 52 | --pred_len 336 \ 53 | --e_layers 2 \ 54 | --enc_in 7 \ 55 | --dec_in 7 \ 56 | --c_out 7 \ 57 | --des 'Exp' \ 58 | --d_model 128 \ 59 | --d_ff 128 \ 60 | --itr 1 61 | 62 | python -u run.py \ 63 | --is_training 1 \ 64 | --root_path ./dataset/ETT-small/ \ 65 | --data_path ETTm1.csv \ 66 | --model_id ETTm1_96_720 \ 67 | --model $model_name \ 68 | --data ETTm1 \ 69 | --features M \ 70 | --seq_len 96 \ 71 | --pred_len 720 \ 72 | --e_layers 2 \ 73 | --enc_in 7 \ 74 | --dec_in 7 \ 75 | --c_out 7 \ 76 | --des 'Exp' \ 77 | --d_model 128 \ 78 | --d_ff 128 \ 79 | --itr 1 -------------------------------------------------------------------------------- /few-shot/iTransformer/scripts/multivariate_forecasting/ETT/iTransformer_ETTm2.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0 2 | 3 | model_name=iTransformer 4 | 5 | python -u run.py \ 6 | --is_training 1 \ 7 | --root_path ./dataset/ETT-small/ \ 8 | --data_path ETTm2.csv \ 9 | --model_id ETTm2_96_96 \ 10 | --model $model_name \ 11 | --data ETTm2 \ 12 | --features M \ 13 | --seq_len 96 \ 14 | --pred_len 96 \ 15 | --e_layers 2 \ 16 | --enc_in 7 \ 17 | --dec_in 7 \ 18 | --c_out 7 \ 19 | --des 'Exp' \ 20 | --d_model 128 \ 21 | --d_ff 128 \ 22 | --itr 1 23 | 24 | python -u run.py \ 25 | --is_training 1 \ 26 | --root_path ./dataset/ETT-small/ \ 27 | --data_path ETTm2.csv \ 28 | --model_id ETTm2_96_192 \ 29 | --model $model_name \ 30 | --data ETTm2 \ 31 | --features M \ 32 | --seq_len 96 \ 33 | --pred_len 192 \ 34 | --e_layers 2 \ 35 | --enc_in 7 \ 36 | --dec_in 7 \ 37 | --c_out 7 \ 38 | --des 'Exp' \ 39 | --d_model 128 \ 40 | --d_ff 128 \ 41 | --itr 1 42 | 43 | python -u run.py \ 44 | --is_training 1 \ 45 | --root_path ./dataset/ETT-small/ \ 46 | --data_path ETTm2.csv \ 47 | --model_id ETTm2_96_336 \ 48 | --model $model_name \ 49 | --data ETTm2 \ 50 | --features M \ 51 | --seq_len 96 \ 52 | --pred_len 336 \ 53 | --e_layers 2 \ 54 | --enc_in 7 \ 55 | --dec_in 7 \ 56 | --c_out 7 \ 57 | --des 'Exp' \ 58 | --d_model 128 \ 59 | --d_ff 128 \ 60 | --itr 1 61 | 62 | python -u run.py \ 63 | --is_training 1 \ 64 | --root_path ./dataset/ETT-small/ \ 65 | --data_path ETTm2.csv \ 66 | --model_id ETTm2_96_720 \ 67 | --model $model_name \ 68 | --data ETTm2 \ 69 | --features M \ 70 | --seq_len 96 \ 71 | --pred_len 720 \ 72 | --e_layers 2 \ 73 | --enc_in 7 \ 74 | --dec_in 7 \ 75 | --c_out 7 \ 76 | --des 'Exp' \ 77 | --d_model 128 \ 78 | --d_ff 128 \ 79 | --itr 1 --------------------------------------------------------------------------------