├── .gitignore ├── README.md ├── dataset ├── __init__.py ├── ablation │ ├── adam_optimizer │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── config.json │ ├── diffusionstep_0010 │ │ ├── finetune.py │ │ ├── model.py │ │ ├── reselect.py │ │ ├── test.py │ │ └── train.py │ ├── diffusionstep_0100 │ │ ├── finetune.py │ │ ├── model.py │ │ ├── reselect.py │ │ ├── test.py │ │ └── train.py │ ├── diffusionstep_2000 │ │ ├── finetune.py │ │ ├── model.py │ │ ├── reselect.py │ │ ├── test.py │ │ └── train.py │ ├── layers_bn1013 │ │ ├── finetune.py │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── layers_bn1415 │ │ ├── finetune.py │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── layers_cv0000 │ │ ├── finetune.py │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── layers_fc0000 │ │ ├── finetune.py │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── noise_0000a00 │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── reselect.py │ │ ├── test.py │ │ └── train.py │ ├── noise_0000a01 │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── reselect.py │ │ ├── test.py │ │ └── train.py │ ├── noise_0001a00 │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── reselect.py │ │ ├── test.py │ │ └── train.py │ ├── noise_x001 │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── reselect.py │ │ ├── test.py │ │ └── train.py │ ├── noise_x01 │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── reselect.py │ │ ├── test.py │ │ └── train.py │ ├── noise_x10 │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── reselect.py │ │ ├── test.py │ │ └── train.py │ ├── noise_x100 │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── reselect.py │ │ ├── test.py │ │ └── train.py │ ├── numberckpt_001 │ │ ├── finetune.py │ │ ├── model.py │ │ ├── reselect.py │ │ ├── test.py │ │ └── train.py │ ├── numberckpt_010 │ │ ├── finetune.py │ │ ├── model.py │ │ ├── reselect.py │ │ ├── test.py │ │ └── train.py │ ├── numberckpt_050 │ │ ├── finetune.py │ │ ├── model.py │ │ ├── reselect.py │ │ ├── test.py │ │ └── train.py │ ├── numberckpt_200 │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── reselect.py │ │ ├── test.py │ │ └── train.py │ ├── numberckpt_300 │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── reselect.py │ │ ├── test.py │ │ └── train.py │ ├── numberckpt_400 │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── save_epoch1 │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── save_epoch3 │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── save_lr00003 │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── save_lr03000 │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── sgd_optimizer │ │ ├── finetune.py │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ └── vae_sample │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── reselect.py │ │ ├── test.py │ │ └── train.py ├── full │ ├── cifar10_convnet │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── cifar10_convnext │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── cifar10_mlp │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── cifar10_resnet │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── cifar10_vit │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── config.json │ ├── mnist_cnnmedium │ │ ├── finetune.py │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── stl10_convnet │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── stl10_convnext │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── stl10_mlp │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── stl10_resnet │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ ├── stl10_vit │ │ ├── finetune.py │ │ ├── log.txt │ │ ├── model.py │ │ ├── test.py │ │ └── train.py │ └── svhn_cnnmedium │ │ ├── finetune.py │ │ ├── model.py │ │ ├── test.py │ │ └── train.py └── main │ ├── cifar100_convnextbase │ ├── finetune.py │ ├── log.txt │ ├── performance.cache │ ├── test.py │ └── train.py │ ├── cifar100_convnexttiny │ ├── finetune.py │ ├── log.txt │ ├── performance.cache │ ├── test.py │ └── train.py │ ├── cifar100_resnet18 │ ├── finetune.py │ ├── log.txt │ ├── model.py │ ├── test.py │ └── train.py │ ├── cifar100_resnet50 │ ├── finetune.py │ ├── log.txt │ ├── performance.cache │ ├── test.py │ └── train.py │ ├── cifar100_vitbase │ ├── finetune.py │ ├── log.txt │ ├── performance.cache │ ├── test.py │ └── train.py │ ├── cifar100_vittiny │ ├── finetune.py │ ├── log.txt │ ├── performance.cache │ ├── test.py │ └── train.py │ ├── cifar10_convnextbase │ ├── finetune.py │ ├── test.py │ └── train.py │ ├── cifar10_convnexttiny │ ├── finetune.py │ ├── test.py │ └── train.py │ ├── cifar10_resnet18 │ ├── finetune.py │ ├── test.py │ └── train.py │ ├── cifar10_resnet50 │ ├── finetune.py │ ├── test.py │ └── train.py │ ├── cifar10_vitbase │ ├── finetune.py │ ├── test.py │ └── train.py │ ├── cifar10_vittiny │ ├── finetune.py │ ├── test.py │ └── train.py │ ├── config.json │ ├── flowers_convnextbase │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── flowers_convnexttiny │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── flowers_resnet18 │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── flowers_resnet50 │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── flowers_vitbase │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── flowers_vittiny │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── food101_convnextbase │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── food101_convnexttiny │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── food101_resnet18 │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── food101_resnet50 │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── food101_vitbase │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── food101_vittiny │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── in1k_convnextbase │ ├── finetune.py │ ├── log.txt │ └── test.py │ ├── in1k_convnexttiny │ ├── finetune.py │ ├── log.txt │ └── test.py │ ├── in1k_resnet18 │ ├── finetune.py │ ├── log.txt │ └── test.py │ ├── in1k_resnet50 │ ├── finetune.py │ ├── log.txt │ └── test.py │ ├── in1k_vitbase │ ├── finetune.py │ ├── log.txt │ └── test.py │ ├── in1k_vittiny │ ├── finetune.py │ ├── log.txt │ └── test.py │ ├── pets_convnextbase │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── pets_convnexttiny │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── pets_resnet18 │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── pets_resnet50 │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── pets_vitbase │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── pets_vittiny │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── stl10_convnextbase │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── stl10_convnexttiny │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── stl10_resnet18 │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── stl10_resnet50 │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ ├── stl10_vitbase │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py │ └── stl10_vittiny │ ├── finetune.py │ ├── log.txt │ ├── test.py │ └── train.py ├── figures ├── motivation.gif └── pipeline.png ├── model ├── __init__.py ├── denoiser.py ├── diffusion.py └── pdiff.py ├── requirements.txt └── workspace ├── ablation ├── adam_optimizer.py ├── diffusionstep_0010.py ├── diffusionstep_0100.py ├── diffusionstep_2000.py ├── layers_bn1013.py ├── layers_bn1415.py ├── layers_cv0000.py ├── layers_fc0000.py ├── noise_0000a00.py ├── noise_0000a01.py ├── noise_0001a00.py ├── noise_x001.py ├── noise_x01.py ├── noise_x10.py ├── noise_x100.py ├── numberckpt_001.py ├── numberckpt_010.py ├── numberckpt_050.py ├── numberckpt_200.py ├── numberckpt_300.py ├── numberckpt_400.py ├── save_epoch1.py ├── save_epoch3.py ├── save_lr00003.py ├── save_lr03000.py ├── sgd_optimizer.py └── vae_sample.py ├── ensemble.py ├── evaluate.py ├── full ├── cifar10_convnet.py ├── cifar10_convnext.py ├── cifar10_mlp.py ├── cifar10_resnet.py ├── cifar10_vit.py ├── mnist_cnnmedium.py ├── stl10_convnet.py ├── stl10_convnext.py ├── stl10_mlp.py ├── stl10_resnet.py ├── stl10_vit.py └── svhn_cnnmedium.py ├── generate.py ├── launch.sh ├── main ├── cifar100_convnextbase.py ├── cifar100_convnexttiny.py ├── cifar100_resnet18.py ├── cifar100_resnet50.py ├── cifar100_vitbase.py ├── cifar100_vittiny.py ├── cifar10_convnextbase.py ├── cifar10_convnexttiny.py ├── cifar10_resnet18.py ├── cifar10_resnet50.py ├── cifar10_vitbase.py ├── cifar10_vittiny.py ├── flowers_convnextbase.py ├── flowers_convnexttiny.py ├── flowers_resnet18.py ├── flowers_resnet50.py ├── flowers_vitbase.py ├── flowers_vittiny.py ├── food101_convnextbase.py ├── food101_convnexttiny.py ├── food101_resnet18.py ├── food101_resnet50.py ├── food101_vitbase.py ├── food101_vittiny.py ├── in1k_convnextbase.py ├── in1k_convnexttiny.py ├── in1k_resnet18.py ├── in1k_resnet50.py ├── in1k_vitbase.py ├── in1k_vittiny.py ├── pets_convnextbase.py ├── pets_convnexttiny.py ├── pets_resnet18.py ├── pets_resnet50.py ├── pets_vitbase.py ├── pets_vittiny.py ├── stl10_convnextbase.py ├── stl10_convnexttiny.py ├── stl10_resnet18.py ├── stl10_resnet50.py ├── stl10_vitbase.py └── stl10_vittiny.py ├── read_cache.py ├── run_all.sh └── tools ├── draw_diffusion_process.ipynb ├── draw_diffusion_process.pdf ├── draw_vary_numebr_ckpt.ipynb ├── draw_vary_numebr_ckpt.pdf ├── get_and_test_ensemble.py ├── save_diffusion_process.py ├── test_diffusion_process.py ├── test_gpu_memory_ae.py ├── test_gpu_memory_diff.py └── test_number_parameters.py /.gitignore: -------------------------------------------------------------------------------- 1 | /.idea 2 | /.vscode 3 | /rubbish 4 | **/checkpoint*/ 5 | **/generated*/ 6 | **/process*/ 7 | **/__pycache__/ 8 | **/wandb/ 9 | **/pretrained.pth 10 | **/ensemble.pth 11 | *.cache 12 | *.pth 13 | *.xlsx 14 | *.png 15 | -------------------------------------------------------------------------------- /dataset/ablation/adam_optimizer/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: adam_optimizer 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 50x1 9 | original_acc_mean: 0.7948316666666667 10 | original_acc_max: 0.7965 11 | original_acc_min: 0.7927 12 | original_acc_std: 0.0007079763806480825 13 | original_acc_median: 0.7948500000000001 14 | generated_acc_mean: 0.794389552238806 15 | generated_acc_max: 0.7966 16 | generated_acc_min: 0.7889 17 | generated_acc_std: 0.001107073727138738 18 | generated_acc_median: 0.7945 19 | noise=0.0000_acc_mean: 0.794824 20 | 21 | origin-origin: 0.9366734854252348 22 | generated-generated: 0.9208876191707496 23 | origin-generated: 0.9253368187898923 24 | origin-generated(max): 0.9427621098934876 25 | 26 | noise_intensity=0 27 | noised-noised: 0.9358043198593178 28 | origin-noised: 0.9366206473684193 29 | origin-noised(max): 0.9999803921521512 30 | 31 | ==> start drawing.. 32 | plot saved to plot_adam_optimizer.png 33 | -------------------------------------------------------------------------------- /dataset/ablation/adam_optimizer/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/adam_optimizer/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "dataset_root": "~/.cache/p-diff/datasets", 3 | "imagenet_root": "~/data/imagenet" 4 | } -------------------------------------------------------------------------------- /dataset/ablation/diffusionstep_0010/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/diffusionstep_0010/reselect.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import random 4 | 5 | 6 | src = [os.path.join("../../main/cifar100_resnet18/checkpoint", i) 7 | for i in os.listdir("../../main/cifar100_resnet18/checkpoint")] 8 | os.makedirs("./checkpoint", exist_ok=True) 9 | dst = "./checkpoint" 10 | 11 | 12 | src.sort() 13 | src = src[:300] 14 | for i in src: 15 | shutil.copy(i, dst) -------------------------------------------------------------------------------- /dataset/ablation/diffusionstep_0010/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/diffusionstep_0010/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/diffusionstep_0100/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/diffusionstep_0100/reselect.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import random 4 | 5 | 6 | src = [os.path.join("../../main/cifar100_resnet18/checkpoint", i) 7 | for i in os.listdir("../../main/cifar100_resnet18/checkpoint")] 8 | os.makedirs("./checkpoint", exist_ok=True) 9 | dst = "./checkpoint" 10 | 11 | 12 | src.sort() 13 | src = src[:300] 14 | for i in src: 15 | shutil.copy(i, dst) -------------------------------------------------------------------------------- /dataset/ablation/diffusionstep_0100/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/diffusionstep_0100/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/diffusionstep_2000/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/diffusionstep_2000/reselect.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import random 4 | 5 | 6 | src = [os.path.join("../../main/cifar100_resnet18/checkpoint", i) 7 | for i in os.listdir("../../main/cifar100_resnet18/checkpoint")] 8 | os.makedirs("./checkpoint", exist_ok=True) 9 | dst = "./checkpoint" 10 | 11 | 12 | src.sort() 13 | src = src[:300] 14 | for i in src: 15 | shutil.copy(i, dst) -------------------------------------------------------------------------------- /dataset/ablation/diffusionstep_2000/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/diffusionstep_2000/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/layers_bn1013/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/layers_bn1013/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/layers_bn1013/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/layers_bn1415/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/layers_bn1415/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/layers_bn1415/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/layers_cv0000/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/layers_cv0000/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/layers_cv0000/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/layers_fc0000/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/layers_fc0000/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/layers_fc0000/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/noise_0000a00/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: noise_0000a00 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.7672460000000001 10 | original_acc_max: 0.7715 11 | original_acc_min: 0.7633 12 | original_acc_std: 0.0016230888248439583 13 | original_acc_median: 0.7671 14 | generated_acc_mean: 0.765376119402985 15 | generated_acc_max: 0.7715 16 | generated_acc_min: 0.7564 17 | generated_acc_std: 0.0026795757564396596 18 | generated_acc_median: 0.7655 19 | noise=0.0100_acc_mean: 0.7666999999999999 20 | noise=0.0300_acc_mean: 0.76594 21 | noise=0.0500_acc_mean: 0.76544 22 | noise=0.1000_acc_mean: 0.76142 23 | 24 | origin-origin: 0.783744070950229 25 | generated-generated: 0.7601224892852749 26 | origin-generated: 0.7685989813940279 27 | origin-generated(max): 0.8008409242714543 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.7804692673625698 31 | origin-noised: 0.7851900311258094 32 | origin-noised(max): 0.9829881999135296 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.770021096601627 36 | origin-noised: 0.7808839175829547 37 | origin-noised(max): 0.9494507777383816 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.7603790618878715 41 | origin-noised: 0.775143834655307 42 | origin-noised(max): 0.9247160219513486 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.7198123632444169 46 | origin-noised: 0.7485398828039533 47 | origin-noised(max): 0.8592503773146264 48 | 49 | ==> start drawing.. 50 | plot saved to plot_noise_0000a00.png 51 | -------------------------------------------------------------------------------- /dataset/ablation/noise_0000a00/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/noise_0000a00/reselect.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import random 4 | 5 | 6 | src = [os.path.join("../../main/cifar100_resnet18/checkpoint", i) 7 | for i in os.listdir("../../main/cifar100_resnet18/checkpoint")] 8 | os.makedirs("./checkpoint", exist_ok=True) 9 | dst = "./checkpoint" 10 | 11 | 12 | src.sort() 13 | src = src[:300] 14 | for i in src: 15 | shutil.copy(i, dst) -------------------------------------------------------------------------------- /dataset/ablation/noise_0000a00/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/noise_0000a00/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/noise_0000a01/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: noise_0000a01 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.7672460000000001 10 | original_acc_max: 0.7715 11 | original_acc_min: 0.7633 12 | original_acc_std: 0.0016230888248439583 13 | original_acc_median: 0.7671 14 | generated_acc_mean: 0.772479104477612 15 | generated_acc_max: 0.7764 16 | generated_acc_min: 0.7691 17 | generated_acc_std: 0.001269018970815019 18 | generated_acc_median: 0.7725 19 | noise=0.0100_acc_mean: 0.7666999999999999 20 | noise=0.0300_acc_mean: 0.76594 21 | noise=0.0500_acc_mean: 0.76544 22 | noise=0.1000_acc_mean: 0.76142 23 | 24 | origin-origin: 0.783744070950229 25 | generated-generated: 0.8800426684536831 26 | origin-generated: 0.8189711963834987 27 | origin-generated(max): 0.8478979386750336 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.7804692673625698 31 | origin-noised: 0.7851900311258094 32 | origin-noised(max): 0.9829881999135296 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.770021096601627 36 | origin-noised: 0.7808839175829547 37 | origin-noised(max): 0.9494507777383816 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.7603790618878715 41 | origin-noised: 0.775143834655307 42 | origin-noised(max): 0.9247160219513486 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.7198123632444169 46 | origin-noised: 0.7485398828039533 47 | origin-noised(max): 0.8592503773146264 48 | 49 | ==> start drawing.. 50 | plot saved to plot_noise_0000a01.png 51 | -------------------------------------------------------------------------------- /dataset/ablation/noise_0000a01/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/noise_0000a01/reselect.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import random 4 | 5 | 6 | src = [os.path.join("../../main/cifar100_resnet18/checkpoint", i) 7 | for i in os.listdir("../../main/cifar100_resnet18/checkpoint")] 8 | os.makedirs("./checkpoint", exist_ok=True) 9 | dst = "./checkpoint" 10 | 11 | 12 | src.sort() 13 | src = src[:300] 14 | for i in src: 15 | shutil.copy(i, dst) -------------------------------------------------------------------------------- /dataset/ablation/noise_0000a01/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/noise_0000a01/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/noise_0001a00/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: noise_0001a00 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.7672460000000001 10 | original_acc_max: 0.7715 11 | original_acc_min: 0.7633 12 | original_acc_std: 0.0016230888248439583 13 | original_acc_median: 0.7671 14 | generated_acc_mean: 0.7654019900497512 15 | generated_acc_max: 0.7721 16 | generated_acc_min: 0.7567 17 | generated_acc_std: 0.0026832252108683063 18 | generated_acc_median: 0.7654 19 | noise=0.0100_acc_mean: 0.7666999999999999 20 | noise=0.0300_acc_mean: 0.76594 21 | noise=0.0500_acc_mean: 0.76544 22 | noise=0.1000_acc_mean: 0.76142 23 | 24 | origin-origin: 0.783744070950229 25 | generated-generated: 0.7602788612530936 26 | origin-generated: 0.768686092449501 27 | origin-generated(max): 0.8008151805666096 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.7804692673625698 31 | origin-noised: 0.7851900311258094 32 | origin-noised(max): 0.9829881999135296 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.770021096601627 36 | origin-noised: 0.7808839175829547 37 | origin-noised(max): 0.9494507777383816 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.7603790618878715 41 | origin-noised: 0.775143834655307 42 | origin-noised(max): 0.9247160219513486 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.7198123632444169 46 | origin-noised: 0.7485398828039533 47 | origin-noised(max): 0.8592503773146264 48 | 49 | ==> start drawing.. 50 | plot saved to plot_noise_0001a00.png 51 | -------------------------------------------------------------------------------- /dataset/ablation/noise_0001a00/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/noise_0001a00/reselect.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import random 4 | 5 | 6 | src = [os.path.join("../../main/cifar100_resnet18/checkpoint", i) 7 | for i in os.listdir("../../main/cifar100_resnet18/checkpoint")] 8 | os.makedirs("./checkpoint", exist_ok=True) 9 | dst = "./checkpoint" 10 | 11 | 12 | src.sort() 13 | src = src[:300] 14 | for i in src: 15 | shutil.copy(i, dst) -------------------------------------------------------------------------------- /dataset/ablation/noise_0001a00/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/noise_0001a00/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/noise_x001/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: noise_x001 5 | 6 | num_checkpoint: 300 7 | num_generated: 200 8 | num_noised: 300x3 9 | original_acc_mean: 0.7672460000000001 10 | original_acc_max: 0.7715 11 | original_acc_min: 0.7633 12 | original_acc_std: 0.0016230888248439583 13 | original_acc_median: 0.7671 14 | generated_acc_mean: 0.765402 15 | generated_acc_max: 0.7713 16 | generated_acc_min: 0.7559 17 | generated_acc_std: 0.0026990546493170517 18 | generated_acc_median: 0.76555 19 | noise=0.0010_acc_mean: 0.7672580000000001 20 | noise=0.0500_acc_mean: 0.765967 21 | noise=0.1500_acc_mean: 0.7549953333333334 22 | 23 | origin-origin: 0.783744070950229 24 | generated-generated: 0.7601345963798548 25 | origin-generated: 0.7686759391444887 26 | origin-generated(max): 0.800752043702124 27 | 28 | noise_intensity=0.001 29 | noised-noised: 0.7837630736203222 30 | origin-noised: 0.7844637686452585 31 | origin-noised(max): 0.9968632155789423 32 | 33 | noise_intensity=0.05 34 | noised-noised: 0.7647465713162575 35 | origin-noised: 0.7741849666545614 36 | origin-noised(max): 0.9228475875434022 37 | 38 | noise_intensity=0.15 39 | noised-noised: 0.6748632877281713 40 | origin-noised: 0.7155138935177828 41 | origin-noised(max): 0.7914981357023568 42 | 43 | ==> start drawing.. 44 | plot saved to plot_noise_x001.png 45 | -------------------------------------------------------------------------------- /dataset/ablation/noise_x001/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/noise_x001/reselect.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import random 4 | 5 | 6 | src = [os.path.join("../../main/cifar100_resnet18/checkpoint", i) 7 | for i in os.listdir("../../main/cifar100_resnet18/checkpoint")] 8 | os.makedirs("./checkpoint", exist_ok=True) 9 | dst = "./checkpoint" 10 | 11 | 12 | src.sort() 13 | src = src[:300] 14 | for i in src: 15 | shutil.copy(i, dst) -------------------------------------------------------------------------------- /dataset/ablation/noise_x001/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/noise_x001/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/noise_x01/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: noise_x01 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 300x3 9 | original_acc_mean: 0.7672460000000001 10 | original_acc_max: 0.7715 11 | original_acc_min: 0.7633 12 | original_acc_std: 0.0016230888248439583 13 | original_acc_median: 0.7671 14 | generated_acc_mean: 0.7655935323383084 15 | generated_acc_max: 0.7716 16 | generated_acc_min: 0.7554 17 | generated_acc_std: 0.0026473506817266956 18 | generated_acc_median: 0.7657 19 | noise=0.0010_acc_mean: 0.7672583333333333 20 | noise=0.0500_acc_mean: 0.7659653333333334 21 | noise=0.1500_acc_mean: 0.7548929999999999 22 | 23 | origin-origin: 0.783744070950229 24 | generated-generated: 0.7619779563831668 25 | origin-generated: 0.7697420856825437 26 | origin-generated(max): 0.8017596237964788 27 | 28 | noise_intensity=0.001 29 | noised-noised: 0.7837735298211486 30 | origin-noised: 0.7844687620288421 31 | origin-noised(max): 0.9967755670030416 32 | 33 | noise_intensity=0.05 34 | noised-noised: 0.765488650808208 35 | origin-noised: 0.774528071506807 36 | origin-noised(max): 0.9228897948343431 37 | 38 | noise_intensity=0.15 39 | noised-noised: 0.6748511686024631 40 | origin-noised: 0.7155596601813675 41 | origin-noised(max): 0.7921127087283428 42 | 43 | ==> start drawing.. 44 | plot saved to plot_noise_x01.png 45 | -------------------------------------------------------------------------------- /dataset/ablation/noise_x01/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/noise_x01/reselect.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import random 4 | 5 | 6 | src = [os.path.join("../../main/cifar100_resnet18/checkpoint", i) 7 | for i in os.listdir("../../main/cifar100_resnet18/checkpoint")] 8 | os.makedirs("./checkpoint", exist_ok=True) 9 | dst = "./checkpoint" 10 | 11 | 12 | src.sort() 13 | src = src[:300] 14 | for i in src: 15 | shutil.copy(i, dst) -------------------------------------------------------------------------------- /dataset/ablation/noise_x01/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/noise_x01/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/noise_x10/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: noise_x10 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 300x3 9 | original_acc_mean: 0.7672460000000001 10 | original_acc_max: 0.7715 11 | original_acc_min: 0.7633 12 | original_acc_std: 0.0016230888248439583 13 | original_acc_median: 0.7671 14 | generated_acc_mean: 0.7730666666666667 15 | generated_acc_max: 0.7757 16 | generated_acc_min: 0.7682 17 | generated_acc_std: 0.0011701653297030885 18 | generated_acc_median: 0.7733 19 | noise=0.0010_acc_mean: 0.7672583333333333 20 | noise=0.0500_acc_mean: 0.7659653333333334 21 | noise=0.1500_acc_mean: 0.7548929999999999 22 | 23 | origin-origin: 0.783744070950229 24 | generated-generated: 0.9165000285488855 25 | origin-generated: 0.826044086487165 26 | origin-generated(max): 0.8533879560218601 27 | 28 | noise_intensity=0.001 29 | noised-noised: 0.7837735298211486 30 | origin-noised: 0.7844687620288421 31 | origin-noised(max): 0.9967755670030416 32 | 33 | noise_intensity=0.05 34 | noised-noised: 0.765488650808208 35 | origin-noised: 0.774528071506807 36 | origin-noised(max): 0.9228897948343431 37 | 38 | noise_intensity=0.15 39 | noised-noised: 0.6748511686024631 40 | origin-noised: 0.7155596601813675 41 | origin-noised(max): 0.7921127087283428 42 | 43 | ==> start drawing.. 44 | plot saved to plot_noise_x10.png 45 | -------------------------------------------------------------------------------- /dataset/ablation/noise_x10/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/noise_x10/reselect.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import random 4 | 5 | 6 | src = [os.path.join("../../main/cifar100_resnet18/checkpoint", i) 7 | for i in os.listdir("../../main/cifar100_resnet18/checkpoint")] 8 | os.makedirs("./checkpoint", exist_ok=True) 9 | dst = "./checkpoint" 10 | 11 | 12 | src.sort() 13 | src = src[:300] 14 | for i in src: 15 | shutil.copy(i, dst) -------------------------------------------------------------------------------- /dataset/ablation/noise_x10/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/noise_x10/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/noise_x100/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: noise_x100 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 300x3 9 | original_acc_mean: 0.7672460000000001 10 | original_acc_max: 0.7715 11 | original_acc_min: 0.7633 12 | original_acc_std: 0.0016230888248439583 13 | original_acc_median: 0.7671 14 | generated_acc_mean: 0.7684004975124379 15 | generated_acc_max: 0.771 16 | generated_acc_min: 0.7631 17 | generated_acc_std: 0.0013419187833613835 18 | generated_acc_median: 0.7685 19 | noise=0.0010_acc_mean: 0.7672583333333333 20 | noise=0.0500_acc_mean: 0.7659653333333334 21 | noise=0.1500_acc_mean: 0.7548929999999999 22 | 23 | origin-origin: 0.783744070950229 24 | generated-generated: 0.9197119192410966 25 | origin-generated: 0.7929798755131225 26 | origin-generated(max): 0.8238750345927515 27 | 28 | noise_intensity=0.001 29 | noised-noised: 0.7837735298211486 30 | origin-noised: 0.7844687620288421 31 | origin-noised(max): 0.9967755670030416 32 | 33 | noise_intensity=0.05 34 | noised-noised: 0.765488650808208 35 | origin-noised: 0.774528071506807 36 | origin-noised(max): 0.9228897948343431 37 | 38 | noise_intensity=0.15 39 | noised-noised: 0.6748511686024631 40 | origin-noised: 0.7155596601813675 41 | origin-noised(max): 0.7921127087283428 42 | 43 | ==> start drawing.. 44 | plot saved to plot_noise_x100.png 45 | -------------------------------------------------------------------------------- /dataset/ablation/noise_x100/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/noise_x100/reselect.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import random 4 | 5 | 6 | src = [os.path.join("../../main/cifar100_resnet18/checkpoint", i) 7 | for i in os.listdir("../../main/cifar100_resnet18/checkpoint")] 8 | os.makedirs("./checkpoint", exist_ok=True) 9 | dst = "./checkpoint" 10 | 11 | 12 | src.sort() 13 | src = src[:300] 14 | for i in src: 15 | shutil.copy(i, dst) -------------------------------------------------------------------------------- /dataset/ablation/noise_x100/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/noise_x100/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_001/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_001/reselect.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import random 4 | 5 | 6 | src = [os.path.join("../../main/cifar100_resnet18/checkpoint", i) 7 | for i in os.listdir("../../main/cifar100_resnet18/checkpoint")] 8 | os.makedirs("./checkpoint", exist_ok=True) 9 | dst = "./checkpoint" 10 | 11 | 12 | src.sort() 13 | src = src[:1] 14 | for i in src: 15 | shutil.copy(i, os.path.join(dst, "origin.pth")) 16 | for i in src: 17 | shutil.copy(i, os.path.join(dst, "repeat.pth")) 18 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_001/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_001/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_010/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_010/reselect.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import random 4 | 5 | 6 | src = [os.path.join("../../main/cifar100_resnet18/checkpoint", i) 7 | for i in os.listdir("../../main/cifar100_resnet18/checkpoint")] 8 | os.makedirs("./checkpoint", exist_ok=True) 9 | dst = "./checkpoint" 10 | 11 | 12 | src.sort() 13 | src = src[:10] 14 | for i in src: 15 | shutil.copy(i, dst) 16 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_010/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_010/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_050/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_050/reselect.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import random 4 | 5 | 6 | src = [os.path.join("../../main/cifar100_resnet18/checkpoint", i) 7 | for i in os.listdir("../../main/cifar100_resnet18/checkpoint")] 8 | os.makedirs("./checkpoint", exist_ok=True) 9 | dst = "./checkpoint" 10 | 11 | 12 | src.sort() 13 | src = src[:50] 14 | for i in src: 15 | shutil.copy(i, dst) 16 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_050/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_050/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_200/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: numberckpt_200 5 | 6 | num_checkpoint: 200 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.7674340000000001 10 | original_acc_max: 0.7715 11 | original_acc_min: 0.7633 12 | original_acc_std: 0.001642602812611743 13 | original_acc_median: 0.7674 14 | generated_acc_mean: 0.7724412935323383 15 | generated_acc_max: 0.7751 16 | generated_acc_min: 0.769 17 | generated_acc_std: 0.0012590908503996722 18 | generated_acc_median: 0.7725 19 | noise=0.0100_acc_mean: 0.76796 20 | noise=0.0300_acc_mean: 0.76662 21 | noise=0.0500_acc_mean: 0.76502 22 | noise=0.1000_acc_mean: 0.76034 23 | 24 | origin-origin: 0.7888303794211262 25 | generated-generated: 0.8789544183450024 26 | origin-generated: 0.8214708136496364 27 | origin-generated(max): 0.8491354161825764 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.7711305098036936 31 | origin-noised: 0.7849320530130974 32 | origin-noised(max): 0.98215411870317 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.767304511284648 36 | origin-noised: 0.7846187284478015 37 | origin-noised(max): 0.9515122945274813 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.7607430376055746 41 | origin-noised: 0.7774503052519959 42 | origin-noised(max): 0.9257728171303308 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.7099475504865784 46 | origin-noised: 0.7426224389418905 47 | origin-noised(max): 0.8505338524650083 48 | 49 | ==> start drawing.. 50 | plot saved to plot_numberckpt_200.png 51 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_200/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_200/reselect.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import random 4 | 5 | 6 | src = [os.path.join("../../main/cifar100_resnet18/checkpoint", i) 7 | for i in os.listdir("../../main/cifar100_resnet18/checkpoint")] 8 | os.makedirs("./checkpoint", exist_ok=True) 9 | dst = "./checkpoint" 10 | 11 | 12 | src.sort() 13 | src = src[:200] 14 | for i in src: 15 | shutil.copy(i, dst) 16 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_200/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_200/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_300/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: numberckpt_300 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.7672460000000001 10 | original_acc_max: 0.7715 11 | original_acc_min: 0.7633 12 | original_acc_std: 0.0016230888248439583 13 | original_acc_median: 0.7671 14 | generated_acc_mean: 0.7724044776119403 15 | generated_acc_max: 0.7758 16 | generated_acc_min: 0.769 17 | generated_acc_std: 0.0012339032638031837 18 | generated_acc_median: 0.7724 19 | noise=0.0100_acc_mean: 0.7666999999999999 20 | noise=0.0300_acc_mean: 0.76594 21 | noise=0.0500_acc_mean: 0.76544 22 | noise=0.1000_acc_mean: 0.76142 23 | 24 | origin-origin: 0.783744070950229 25 | generated-generated: 0.8798565924168715 26 | origin-generated: 0.8189223322705257 27 | origin-generated(max): 0.8478697695884533 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.7804692673625698 31 | origin-noised: 0.7851900311258094 32 | origin-noised(max): 0.9829881999135296 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.770021096601627 36 | origin-noised: 0.7808839175829547 37 | origin-noised(max): 0.9494507777383816 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.7603790618878715 41 | origin-noised: 0.775143834655307 42 | origin-noised(max): 0.9247160219513486 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.7198123632444169 46 | origin-noised: 0.7485398828039533 47 | origin-noised(max): 0.8592503773146264 48 | 49 | ==> start drawing.. 50 | plot saved to plot_numberckpt_300.png 51 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_300/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_300/reselect.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import random 4 | 5 | 6 | src = [os.path.join("../../main/cifar100_resnet18/checkpoint", i) 7 | for i in os.listdir("../../main/cifar100_resnet18/checkpoint")] 8 | os.makedirs("./checkpoint", exist_ok=True) 9 | dst = "./checkpoint" 10 | 11 | 12 | src.sort() 13 | src = src[:300] 14 | for i in src: 15 | shutil.copy(i, dst) 16 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_300/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_300/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_400/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: numberckpt_400 5 | 6 | num_checkpoint: 400 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.76772575 10 | original_acc_max: 0.7757 11 | original_acc_min: 0.7635 12 | original_acc_std: 0.002046646510147761 13 | original_acc_median: 0.7676 14 | generated_acc_mean: 0.7731597014925373 15 | generated_acc_max: 0.7769 16 | generated_acc_min: 0.7694 17 | generated_acc_std: 0.0012994518210658716 18 | generated_acc_median: 0.7732 19 | noise=0.0100_acc_mean: 0.76956 20 | noise=0.0300_acc_mean: 0.7680999999999999 21 | noise=0.0500_acc_mean: 0.7664800000000002 22 | noise=0.1000_acc_mean: 0.7639 23 | 24 | origin-origin: 0.7838728373055499 25 | generated-generated: 0.8803063305293586 26 | origin-generated: 0.8193568909787945 27 | origin-generated(max): 0.8569226831998842 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.7690863396477337 31 | origin-noised: 0.7832481038467187 32 | origin-noised(max): 0.9839995490144325 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.7858795214991756 36 | origin-noised: 0.7831965011808195 37 | origin-noised(max): 0.9472119929994939 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.7570424415533129 41 | origin-noised: 0.7736690597561782 42 | origin-noised(max): 0.9226506767419828 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.726624521898206 46 | origin-noised: 0.7501923767215506 47 | origin-noised(max): 0.846094997130797 48 | 49 | ==> start drawing.. 50 | plot saved to plot_numberckpt_400.png 51 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_400/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_400/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/numberckpt_400/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/ablation/save_epoch1/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: save_epoch1 5 | 6 | num_checkpoint: 301 7 | num_generated: 201 8 | num_noised: 300x3 9 | original_acc_mean: 0.5490870431893687 10 | original_acc_max: 0.5604 11 | original_acc_min: 0.5377 12 | original_acc_std: 0.004548417806920054 13 | original_acc_median: 0.5488 14 | generated_acc_mean: 0.555276119402985 15 | generated_acc_max: 0.5612 16 | generated_acc_min: 0.5469 17 | generated_acc_std: 0.002326128221563865 18 | generated_acc_median: 0.5552 19 | noise=0.0010_acc_mean: 0.5490843333333334 20 | noise=0.0500_acc_mean: 0.547574 21 | noise=0.1500_acc_mean: 0.5359983333333334 22 | 23 | origin-origin: 0.8371961440227118 24 | generated-generated: 0.8993010136724104 25 | origin-generated: 0.8593237153774894 26 | origin-generated(max): 0.8861725456461587 27 | 28 | noise_intensity=0.001 29 | noised-noised: 0.8371230017932498 30 | origin-noised: 0.8376929792330506 31 | origin-noised(max): 0.9976185553764767 32 | 33 | noise_intensity=0.05 34 | noised-noised: 0.8255543450475331 35 | origin-noised: 0.8313731914403734 36 | origin-noised(max): 0.9498151556455787 37 | 38 | noise_intensity=0.15 39 | noised-noised: 0.7656982305688101 40 | origin-noised: 0.7934674535669812 41 | origin-noised(max): 0.8588880160803154 42 | 43 | ==> start drawing.. 44 | plot saved to plot_save_epoch1.png 45 | -------------------------------------------------------------------------------- /dataset/ablation/save_epoch1/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/save_epoch1/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/save_epoch3/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: save_epoch3 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 300x3 9 | original_acc_mean: 0.6943186666666666 10 | original_acc_max: 0.7025 11 | original_acc_min: 0.6872 12 | original_acc_std: 0.0035952818464698396 13 | original_acc_median: 0.6939 14 | generated_acc_mean: 0.7009577114427862 15 | generated_acc_max: 0.7043 16 | generated_acc_min: 0.6965 17 | generated_acc_std: 0.0016899329482174847 18 | generated_acc_median: 0.701 19 | noise=0.0010_acc_mean: 0.6943256666666667 20 | noise=0.0500_acc_mean: 0.6928706666666666 21 | noise=0.1500_acc_mean: 0.6815203333333334 22 | 23 | origin-origin: 0.7936632419492747 24 | generated-generated: 0.8932966121473511 25 | origin-generated: 0.827296815964664 26 | origin-generated(max): 0.8577475618506314 27 | 28 | noise_intensity=0.001 29 | noised-noised: 0.7935713767761008 30 | origin-noised: 0.7942954955951033 31 | origin-noised(max): 0.9970258879088415 32 | 33 | noise_intensity=0.05 34 | noised-noised: 0.7799622962365208 35 | origin-noised: 0.7868667343284447 36 | origin-noised(max): 0.9355101357318978 37 | 38 | noise_intensity=0.15 39 | noised-noised: 0.7078743212577527 40 | origin-noised: 0.7413196502757021 41 | origin-noised(max): 0.8234059204269378 42 | 43 | ==> start drawing.. 44 | plot saved to plot_save_epoch3.png 45 | -------------------------------------------------------------------------------- /dataset/ablation/save_epoch3/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/save_epoch3/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/save_lr00003/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: save_lr00003 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 300x3 9 | original_acc_mean: 0.7749373333333334 10 | original_acc_max: 0.7756 11 | original_acc_min: 0.7744 12 | original_acc_std: 0.00023707851488952018 13 | original_acc_median: 0.7749 14 | generated_acc_mean: 0.7740054726368158 15 | generated_acc_max: 0.7764 16 | generated_acc_min: 0.771 17 | generated_acc_std: 0.0012168059896365735 18 | generated_acc_median: 0.7741 19 | noise=0.0010_acc_mean: 0.7749853333333333 20 | noise=0.0500_acc_mean: 0.773789 21 | noise=0.1500_acc_mean: 0.7605559999999999 22 | 23 | origin-origin: 0.9928175303116078 24 | generated-generated: 0.8786253572591595 25 | origin-generated: 0.9121490682709359 26 | origin-generated(max): 0.9150136569891711 27 | 28 | noise_intensity=0.001 29 | noised-noised: 0.9924285634583813 30 | origin-noised: 0.9926032033998621 31 | origin-noised(max): 0.9981359219476985 32 | 33 | noise_intensity=0.05 34 | noised-noised: 0.8777641141739734 35 | origin-noised: 0.9112288342582473 36 | origin-noised(max): 0.9141682188874912 37 | 38 | noise_intensity=0.15 39 | noised-noised: 0.6975741699920849 40 | origin-noised: 0.7633615816536693 41 | origin-noised(max): 0.7658394113511617 42 | 43 | ==> start drawing.. 44 | plot saved to plot_save_lr00003.png 45 | -------------------------------------------------------------------------------- /dataset/ablation/save_lr00003/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/save_lr00003/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/save_lr03000/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: save_lr03000 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 300x3 9 | original_acc_mean: 0.6842626666666667 10 | original_acc_max: 0.7033 11 | original_acc_min: 0.5956 12 | original_acc_std: 0.0150538834266186 13 | original_acc_median: 0.6872 14 | generated_acc_mean: 0.7118323383084578 15 | generated_acc_max: 0.7263 16 | generated_acc_min: 0.6932 17 | generated_acc_std: 0.005478796669002578 18 | generated_acc_median: 0.7124 19 | noise=0.0010_acc_mean: 0.6842403333333333 20 | noise=0.0500_acc_mean: 0.683202 21 | noise=0.1500_acc_mean: 0.6763809999999999 22 | 23 | origin-origin: 0.6477034144222347 24 | generated-generated: 0.7789657034174426 25 | origin-generated: 0.6867510684959912 26 | origin-generated(max): 0.7435577882228703 27 | 28 | noise_intensity=0.001 29 | noised-noised: 0.6477078003567298 30 | origin-noised: 0.6488725888975981 31 | origin-noised(max): 0.9977937367646519 32 | 33 | noise_intensity=0.05 34 | noised-noised: 0.6449774485477004 35 | origin-noised: 0.6473129033612476 36 | origin-noised(max): 0.9510731581593874 37 | 38 | noise_intensity=0.15 39 | noised-noised: 0.6249510087735021 40 | origin-noised: 0.6358451545753178 41 | origin-noised(max): 0.8622535746368339 42 | 43 | ==> start drawing.. 44 | plot saved to plot_save_lr03000.png 45 | -------------------------------------------------------------------------------- /dataset/ablation/save_lr03000/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/save_lr03000/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/sgd_optimizer/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/sgd_optimizer/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/vae_sample/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: vae_sample 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.7672460000000001 10 | original_acc_max: 0.7715 11 | original_acc_min: 0.7633 12 | original_acc_std: 0.0016230888248439583 13 | original_acc_median: 0.7671 14 | generated_acc_mean: 0.7640875621890548 15 | generated_acc_max: 0.7706 16 | generated_acc_min: 0.7566 17 | generated_acc_std: 0.002771118732967275 18 | generated_acc_median: 0.7642 19 | noise=0.0100_acc_mean: 0.7666999999999999 20 | noise=0.0300_acc_mean: 0.76594 21 | noise=0.0500_acc_mean: 0.76544 22 | noise=0.1000_acc_mean: 0.76142 23 | 24 | origin-origin: 0.783744070950229 25 | generated-generated: 0.7430466423489621 26 | origin-generated: 0.7589358555563068 27 | origin-generated(max): 0.7900726451407695 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.7804692673625698 31 | origin-noised: 0.7851900311258094 32 | origin-noised(max): 0.9829881999135296 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.770021096601627 36 | origin-noised: 0.7808839175829547 37 | origin-noised(max): 0.9494507777383816 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.7603790618878715 41 | origin-noised: 0.775143834655307 42 | origin-noised(max): 0.9247160219513486 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.7198123632444169 46 | origin-noised: 0.7485398828039533 47 | origin-noised(max): 0.8592503773146264 48 | 49 | ==> start drawing.. 50 | plot saved to plot_vae_sample.png 51 | -------------------------------------------------------------------------------- /dataset/ablation/vae_sample/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/ablation/vae_sample/reselect.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import random 4 | 5 | 6 | src = [os.path.join("../../main/cifar100_resnet18/checkpoint", i) 7 | for i in os.listdir("../../main/cifar100_resnet18/checkpoint")] 8 | os.makedirs("./checkpoint", exist_ok=True) 9 | dst = "./checkpoint" 10 | 11 | 12 | src.sort() 13 | src = src[:300] 14 | for i in src: 15 | shutil.copy(i, dst) 16 | -------------------------------------------------------------------------------- /dataset/ablation/vae_sample/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/ablation/vae_sample/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | if __name__ == "__main__": 5 | if not os.path.exists("../../main/cifar100_resnet18/pretrained.pth"): 6 | os.system(f"cd ../../main/cifar100_resnet18 && CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']} python train.py") 7 | os.system("cp ../../main/cifar100_resnet18/pretrained.pth ./") -------------------------------------------------------------------------------- /dataset/full/cifar10_convnet/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: cifar10_convnet 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.565251 10 | original_acc_max: 0.5728 11 | original_acc_min: 0.5522 12 | original_acc_std: 0.0037147317983043334 13 | original_acc_median: 0.56545 14 | generated_acc_mean: 0.5690109452736318 15 | generated_acc_max: 0.5731 16 | generated_acc_min: 0.5632 17 | generated_acc_std: 0.0019845371409151547 18 | generated_acc_median: 0.5692 19 | noise=0.0100_acc_mean: 0.5622199999999999 20 | noise=0.0300_acc_mean: 0.56406 21 | noise=0.0500_acc_mean: 0.5497 22 | noise=0.1000_acc_mean: 0.52946 23 | 24 | origin-origin: 0.85181729429777 25 | generated-generated: 0.9259450250012988 26 | origin-generated: 0.8799044896992657 27 | origin-generated(max): 0.92666306878838 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.8321284651481472 31 | origin-noised: 0.839641800272076 32 | origin-noised(max): 0.9521917477380027 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.7970578903053653 36 | origin-noised: 0.8263023795323673 37 | origin-noised(max): 0.8860121376196611 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.7079077044549905 41 | origin-noised: 0.7485095512232756 42 | origin-noised(max): 0.8005446908888647 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.6358702281195605 46 | origin-noised: 0.6929513641769632 47 | origin-noised(max): 0.7290734335616648 48 | 49 | ==> start drawing.. 50 | plot saved to plot_cifar10_convnet.png 51 | -------------------------------------------------------------------------------- /dataset/full/cifar10_convnet/model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | 5 | class Model(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.layer1 = nn.Sequential( 9 | nn.Conv2d(3, 8, 7, 2, 3), 10 | nn.LayerNorm(normalized_shape=[8, 32, 32]), 11 | nn.LeakyReLU(), 12 | nn.MaxPool2d(2, 2), 13 | ) # in (3, 64, 64) out (8, 16, 16) 14 | # layer2 15 | self.layer2 = nn.Sequential( 16 | nn.Conv2d(8, 8, 3, 1, 1), 17 | nn.LayerNorm(normalized_shape=[8, 16, 16]), 18 | nn.LeakyReLU(), 19 | nn.MaxPool2d(2, 2), 20 | ) # in (8, 16, 16) out (8, 8, 8) 21 | self.layer3 = nn.Sequential( 22 | nn.Conv2d(8, 4, 3, 1, 1), 23 | nn.LayerNorm(normalized_shape=[4, 8, 8]), 24 | nn.LeakyReLU(), 25 | nn.MaxPool2d(2, 2), 26 | ) # in (8, 8, 8) out (4, 4, 4) 27 | self.head = nn.Sequential( 28 | nn.Flatten(start_dim=1), 29 | nn.Linear(64, 16), 30 | nn.LeakyReLU(), 31 | nn.Linear(16, 10), 32 | ) # in (4, 4, 4) out (4, 10) 33 | 34 | def forward(self, x): 35 | x = self.layer1(x) 36 | x = self.layer2(x) 37 | x = self.layer3(x) 38 | x = self.head(x) 39 | return x 40 | 41 | 42 | if __name__ == "__main__": 43 | model = Model() 44 | print(model) 45 | num_param = 0 46 | for param in model.parameters(): 47 | num_param += param.numel() 48 | print("num_param:", num_param) 49 | -------------------------------------------------------------------------------- /dataset/full/cifar10_convnet/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/full/cifar10_convnext/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: cifar10_convnext 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.7147616666666667 10 | original_acc_max: 0.7192 11 | original_acc_min: 0.7074 12 | original_acc_std: 0.0025258194489885632 13 | original_acc_median: 0.7152 14 | generated_acc_mean: 0.716728855721393 15 | generated_acc_max: 0.7202 16 | generated_acc_min: 0.6796 17 | generated_acc_std: 0.0036111440537859524 18 | generated_acc_median: 0.7174 19 | noise=0.0100_acc_mean: 0.7135400000000001 20 | noise=0.0300_acc_mean: 0.71374 21 | noise=0.0500_acc_mean: 0.70812 22 | noise=0.1000_acc_mean: 0.6926 23 | 24 | origin-origin: 0.8522752339355212 25 | generated-generated: 0.9377152177146878 26 | origin-generated: 0.8821502131285284 27 | origin-generated(max): 0.9143786452837976 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.8308341135825703 31 | origin-noised: 0.8453666126698615 32 | origin-noised(max): 0.964105043453716 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.8090362196630647 36 | origin-noised: 0.8307271294185856 37 | origin-noised(max): 0.9122504597876091 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.7512679622039198 41 | origin-noised: 0.7959350791180901 42 | origin-noised(max): 0.8493855696424237 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.6302405623125653 46 | origin-noised: 0.6995768856289607 47 | origin-noised(max): 0.7304503072697628 48 | 49 | ==> start drawing.. 50 | plot saved to plot_cifar10_convnext.png 51 | -------------------------------------------------------------------------------- /dataset/full/cifar10_convnext/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/full/cifar10_mlp/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: cifar10_mlp 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.4093503333333333 10 | original_acc_max: 0.4188 11 | original_acc_min: 0.4015 12 | original_acc_std: 0.003615121006119836 13 | original_acc_median: 0.4083 14 | generated_acc_mean: 0.41667810945273626 15 | generated_acc_max: 0.42 16 | generated_acc_min: 0.4133 17 | generated_acc_std: 0.0013359247483648662 18 | generated_acc_median: 0.4166 19 | noise=0.0100_acc_mean: 0.40904000000000007 20 | noise=0.0300_acc_mean: 0.40708 21 | noise=0.0500_acc_mean: 0.40884 22 | noise=0.1000_acc_mean: 0.40086000000000005 23 | 24 | origin-origin: 0.8302427457883415 25 | generated-generated: 0.9353079406579787 26 | origin-generated: 0.8642499561797228 27 | origin-generated(max): 0.8966991870338501 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.8375481111756236 31 | origin-noised: 0.831643972644288 32 | origin-noised(max): 0.9827884012861816 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.819862879585895 36 | origin-noised: 0.8241227415993428 37 | origin-noised(max): 0.9531572835495068 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.8132420486761129 41 | origin-noised: 0.8231783229873113 42 | origin-noised(max): 0.9270106672084175 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.7487304118980767 46 | origin-noised: 0.7824643883572001 47 | origin-noised(max): 0.8508655676899091 48 | 49 | ==> start drawing.. 50 | plot saved to plot_cifar10_mlp.png 51 | -------------------------------------------------------------------------------- /dataset/full/cifar10_mlp/model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from torch.nn import functional as F 4 | 5 | 6 | class Model(nn.Module): 7 | def __init__(self): 8 | super().__init__() 9 | self.layer1 = nn.Sequential( 10 | nn.Linear(1024, 64), 11 | nn.LayerNorm(normalized_shape=[64]), 12 | nn.LeakyReLU(), 13 | ) 14 | # layer2 15 | self.layer2 = nn.Sequential( 16 | nn.Linear(64, 16), 17 | nn.LayerNorm(normalized_shape=[16]), 18 | nn.LeakyReLU(), 19 | ) 20 | # head 21 | self.head = nn.Sequential( 22 | nn.Linear(16, 10), 23 | ) 24 | 25 | def forward(self, x): 26 | x = F.avg_pool2d(x, 2, 2) 27 | x = x.mean(dim=-3, keepdim=True) 28 | x = torch.flatten(x, start_dim=1) 29 | x = self.layer1(x) 30 | x = self.layer2(x) 31 | x = self.head(x) 32 | return x 33 | 34 | 35 | if __name__ == "__main__": 36 | model = Model() 37 | print(model) 38 | num_param = 0 39 | for param in model.parameters(): 40 | num_param += param.numel() 41 | print("num_param:", num_param) 42 | -------------------------------------------------------------------------------- /dataset/full/cifar10_mlp/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/full/cifar10_resnet/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: cifar10_resnet 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.5796760000000001 10 | original_acc_max: 0.586 11 | original_acc_min: 0.572 12 | original_acc_std: 0.002720886620203054 13 | original_acc_median: 0.58 14 | generated_acc_mean: 0.5827139303482587 15 | generated_acc_max: 0.5864 16 | generated_acc_min: 0.5689 17 | generated_acc_std: 0.0019329314151943359 18 | generated_acc_median: 0.583 19 | noise=0.0100_acc_mean: 0.5800400000000001 20 | noise=0.0300_acc_mean: 0.57436 21 | noise=0.0500_acc_mean: 0.56458 22 | noise=0.1000_acc_mean: 0.5376 23 | 24 | origin-origin: 0.8667350875515929 25 | generated-generated: 0.9261737483095076 26 | origin-generated: 0.8893198331428271 27 | origin-generated(max): 0.9231824554134457 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.8561211816978311 31 | origin-noised: 0.8645386640227765 32 | origin-noised(max): 0.9624041612102119 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.7982610613310406 36 | origin-noised: 0.8285380000703261 37 | origin-noised(max): 0.8947780826032969 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.7533781849109135 41 | origin-noised: 0.7946265881152242 42 | origin-noised(max): 0.8314451211095829 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.6391642646308215 46 | origin-noised: 0.6987809060084866 47 | origin-noised(max): 0.7303123861020593 48 | 49 | ==> start drawing.. 50 | plot saved to plot_cifar10_resnet.png 51 | -------------------------------------------------------------------------------- /dataset/full/cifar10_resnet/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/full/cifar10_vit/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: cifar10_vit 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.7194413333333334 10 | original_acc_max: 0.7262 11 | original_acc_min: 0.7077 12 | original_acc_std: 0.0034161906009797295 13 | original_acc_median: 0.7202 14 | generated_acc_mean: 0.72856815920398 15 | generated_acc_max: 0.7315 16 | generated_acc_min: 0.72 17 | generated_acc_std: 0.0012078026158001578 18 | generated_acc_median: 0.7287 19 | noise=0.0100_acc_mean: 0.7203600000000001 20 | noise=0.0300_acc_mean: 0.71916 21 | noise=0.0500_acc_mean: 0.7143200000000001 22 | noise=0.1000_acc_mean: 0.6967599999999999 23 | 24 | origin-origin: 0.7630983479554685 25 | generated-generated: 0.9455185767516194 26 | origin-generated: 0.8155491626810608 27 | origin-generated(max): 0.8546121898488279 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.7831340449136228 31 | origin-noised: 0.7712386381301437 32 | origin-noised(max): 0.9711696802503804 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.7516418532352175 36 | origin-noised: 0.7601881755572939 37 | origin-noised(max): 0.9113388835527247 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.7316475031978111 41 | origin-noised: 0.7423066472174237 42 | origin-noised(max): 0.8582531331959455 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.6274590089383555 46 | origin-noised: 0.6761983896558454 47 | origin-noised(max): 0.7296708213050869 48 | 49 | ==> start drawing.. 50 | plot saved to plot_cifar10_vit.png 51 | -------------------------------------------------------------------------------- /dataset/full/cifar10_vit/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/full/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "dataset_root": "~/.cache/p-diff/datasets", 3 | "imagenet_root": "~/data/imagenet" 4 | } -------------------------------------------------------------------------------- /dataset/full/mnist_cnnmedium/model.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | 4 | class CNNMedium(nn.Module): 5 | def __init__(self): 6 | super().__init__() 7 | self.module = nn.Sequential( 8 | nn.Conv2d(3, 16, 3), 9 | nn.MaxPool2d(2, 2), 10 | nn.LeakyReLU(), 11 | nn.Conv2d(16, 32, 3), 12 | nn.MaxPool2d(2, 2), 13 | nn.LeakyReLU(), 14 | nn.Conv2d(32, 15, 3), 15 | nn.MaxPool2d(2, 2), 16 | nn.LeakyReLU(), 17 | nn.Flatten(start_dim=1), 18 | ) 19 | self.head = nn.Sequential( 20 | nn.Linear(60, 20), 21 | nn.LeakyReLU(), 22 | nn.Linear(20, 10), 23 | ) 24 | 25 | def forward(self, x): 26 | x = x.view(x.size(0), 1, 32, 32).repeat(1, 3, 1, 1) 27 | x = self.module(x) 28 | x = self.head(x) 29 | return x 30 | 31 | 32 | def Model(): 33 | model = CNNMedium() 34 | return model 35 | 36 | 37 | if __name__ == "__main__": 38 | model = Model() 39 | print(model) 40 | num_param = 0 41 | for param in model.parameters(): 42 | num_param += param.numel() 43 | print("num_param:", num_param) 44 | -------------------------------------------------------------------------------- /dataset/full/mnist_cnnmedium/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/full/stl10_convnet/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: stl10_convnet 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.4634108333333333 10 | original_acc_max: 0.467125 11 | original_acc_min: 0.459 12 | original_acc_std: 0.0016319298817317163 13 | original_acc_median: 0.4635 14 | generated_acc_mean: 0.46368656716417916 15 | generated_acc_max: 0.466875 16 | generated_acc_min: 0.459125 17 | generated_acc_std: 0.0014694293259617608 18 | generated_acc_median: 0.463625 19 | noise=0.0100_acc_mean: 0.46282500000000004 20 | noise=0.0300_acc_mean: 0.460625 21 | noise=0.0500_acc_mean: 0.45404999999999995 22 | noise=0.1000_acc_mean: 0.442075 23 | 24 | origin-origin: 0.953621662133354 25 | generated-generated: 0.964064325314463 26 | origin-generated: 0.9575352516690476 27 | origin-generated(max): 0.9752069518585044 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.9402709432433308 31 | origin-noised: 0.9477930927374637 32 | origin-noised(max): 0.9799068955009119 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.893302919411985 36 | origin-noised: 0.9161372713761248 37 | origin-noised(max): 0.9353573169706211 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.8296207611032376 41 | origin-noised: 0.8688809321918058 42 | origin-noised(max): 0.8900590173489238 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.7589276038634771 46 | origin-noised: 0.7983401869011156 47 | origin-noised(max): 0.8135998681030465 48 | 49 | ==> start drawing.. 50 | plot saved to plot_stl10_convnet.png 51 | -------------------------------------------------------------------------------- /dataset/full/stl10_convnet/model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | 5 | class Model(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | self.layer1 = nn.Sequential( 9 | nn.Conv2d(3, 8, 7, 2, 3), 10 | nn.LayerNorm(normalized_shape=[8, 32, 32]), 11 | nn.LeakyReLU(), 12 | nn.MaxPool2d(2, 2), 13 | ) # in (3, 64, 64) out (8, 16, 16) 14 | # layer2 15 | self.layer2 = nn.Sequential( 16 | nn.Conv2d(8, 8, 3, 1, 1), 17 | nn.LayerNorm(normalized_shape=[8, 16, 16]), 18 | nn.LeakyReLU(), 19 | nn.MaxPool2d(2, 2), 20 | ) # in (8, 16, 16) out (8, 8, 8) 21 | self.layer3 = nn.Sequential( 22 | nn.Conv2d(8, 4, 3, 1, 1), 23 | nn.LayerNorm(normalized_shape=[4, 8, 8]), 24 | nn.LeakyReLU(), 25 | nn.MaxPool2d(2, 2), 26 | ) # in (8, 8, 8) out (4, 4, 4) 27 | self.head = nn.Sequential( 28 | nn.Flatten(start_dim=1), 29 | nn.Linear(64, 16), 30 | nn.LeakyReLU(), 31 | nn.Linear(16, 10), 32 | ) # in (4, 4, 4) out (4, 10) 33 | 34 | def forward(self, x): 35 | x = self.layer1(x) 36 | x = self.layer2(x) 37 | x = self.layer3(x) 38 | x = self.head(x) 39 | return x 40 | 41 | 42 | if __name__ == "__main__": 43 | model = Model() 44 | print(model) 45 | num_param = 0 46 | for param in model.parameters(): 47 | num_param += param.numel() 48 | print("num_param:", num_param) 49 | -------------------------------------------------------------------------------- /dataset/full/stl10_convnet/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/full/stl10_convnext/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: stl10_convnext 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.46937458333333326 10 | original_acc_max: 0.47275 11 | original_acc_min: 0.466 12 | original_acc_std: 0.0013382396059957067 13 | original_acc_median: 0.4695 14 | generated_acc_mean: 0.4694657960199004 15 | generated_acc_max: 0.473625 16 | generated_acc_min: 0.461125 17 | generated_acc_std: 0.001940060101439661 18 | generated_acc_median: 0.469625 19 | noise=0.0100_acc_mean: 0.46935000000000004 20 | noise=0.0300_acc_mean: 0.46875 21 | noise=0.0500_acc_mean: 0.46769999999999995 22 | noise=0.1000_acc_mean: 0.461275 23 | 24 | origin-origin: 0.955506810362726 25 | generated-generated: 0.9431487499275931 26 | origin-generated: 0.9253463536900253 27 | origin-generated(max): 0.946694375059236 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.9601935581254958 31 | origin-noised: 0.9565478941059519 32 | origin-noised(max): 0.989555385699831 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.9430592314091829 36 | origin-noised: 0.9490522757558261 37 | origin-noised(max): 0.9693725336649676 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.9115482781575756 41 | origin-noised: 0.9299273617746925 42 | origin-noised(max): 0.9510694766428631 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.8375196788332939 46 | origin-noised: 0.8808368316690494 47 | origin-noised(max): 0.9026483298310041 48 | 49 | ==> start drawing.. 50 | plot saved to plot_stl10_convnext.png 51 | -------------------------------------------------------------------------------- /dataset/full/stl10_convnext/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/full/stl10_mlp/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: stl10_mlp 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.33604416666666664 10 | original_acc_max: 0.34925 11 | original_acc_min: 0.3155 12 | original_acc_std: 0.00526892692796382 13 | original_acc_median: 0.336625 14 | generated_acc_mean: 0.3431865671641791 15 | generated_acc_max: 0.35425 16 | generated_acc_min: 0.325 17 | generated_acc_std: 0.004432223834164484 18 | generated_acc_median: 0.3435 19 | noise=0.0100_acc_mean: 0.334375 20 | noise=0.0300_acc_mean: 0.333775 21 | noise=0.0500_acc_mean: 0.334725 22 | noise=0.1000_acc_mean: 0.33602499999999996 23 | 24 | origin-origin: 0.8189517142459777 25 | generated-generated: 0.8604906648862729 26 | origin-generated: 0.8337474381218589 27 | origin-generated(max): 0.8729022145540754 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.8185477876455426 31 | origin-noised: 0.8228625035748491 32 | origin-noised(max): 0.9879703463721515 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.8230223965274984 36 | origin-noised: 0.8176165406920203 37 | origin-noised(max): 0.9705165022976108 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.8173981621141433 41 | origin-noised: 0.8184130184303264 42 | origin-noised(max): 0.9489569419263798 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.813159071171091 46 | origin-noised: 0.8125254336361222 47 | origin-noised(max): 0.9053870538793252 48 | 49 | ==> start drawing.. 50 | plot saved to plot_stl10_mlp.png 51 | -------------------------------------------------------------------------------- /dataset/full/stl10_mlp/model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from torch.nn import functional as F 4 | 5 | 6 | class Model(nn.Module): 7 | def __init__(self): 8 | super().__init__() 9 | self.layer1 = nn.Sequential( 10 | nn.Linear(1024, 64), 11 | nn.LayerNorm(normalized_shape=[64]), 12 | nn.LeakyReLU(), 13 | ) 14 | # layer2 15 | self.layer2 = nn.Sequential( 16 | nn.Linear(64, 16), 17 | nn.LayerNorm(normalized_shape=[16]), 18 | nn.LeakyReLU(), 19 | ) 20 | # head 21 | self.head = nn.Sequential( 22 | nn.Linear(16, 10), 23 | ) 24 | 25 | def forward(self, x): 26 | x = F.avg_pool2d(x, 2, 2) 27 | x = x.mean(dim=-3, keepdim=True) 28 | x = torch.flatten(x, start_dim=1) 29 | x = self.layer1(x) 30 | x = self.layer2(x) 31 | x = self.head(x) 32 | return x 33 | 34 | 35 | if __name__ == "__main__": 36 | model = Model() 37 | print(model) 38 | num_param = 0 39 | for param in model.parameters(): 40 | num_param += param.numel() 41 | print("num_param:", num_param) 42 | -------------------------------------------------------------------------------- /dataset/full/stl10_mlp/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/full/stl10_resnet/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: stl10_resnet 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.50776875 10 | original_acc_max: 0.521875 11 | original_acc_min: 0.492375 12 | original_acc_std: 0.005775902932659102 13 | original_acc_median: 0.508 14 | generated_acc_mean: 0.5138781094527364 15 | generated_acc_max: 0.523375 16 | generated_acc_min: 0.445625 17 | generated_acc_std: 0.007942466840371958 18 | generated_acc_median: 0.515375 19 | noise=0.0100_acc_mean: 0.5065000000000001 20 | noise=0.0300_acc_mean: 0.506025 21 | noise=0.0500_acc_mean: 0.50465 22 | noise=0.1000_acc_mean: 0.48795 23 | 24 | origin-origin: 0.7898999959263586 25 | generated-generated: 0.846251281414528 26 | origin-generated: 0.8109375005794324 27 | origin-generated(max): 0.8681861098523042 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.7713175743634808 31 | origin-noised: 0.7800571584549967 32 | origin-noised(max): 0.968893357887822 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.7759628219840881 36 | origin-noised: 0.783952215783543 37 | origin-noised(max): 0.9194544413933391 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.7372201827305938 41 | origin-noised: 0.7641399067355099 42 | origin-noised(max): 0.8774007488805937 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.676716976987483 46 | origin-noised: 0.7240556134202758 47 | origin-noised(max): 0.806333320689759 48 | 49 | ==> start drawing.. 50 | plot saved to plot_stl10_resnet.png 51 | -------------------------------------------------------------------------------- /dataset/full/stl10_resnet/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/full/stl10_vit/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: stl10_vit 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.5216895833333333 10 | original_acc_max: 0.5265 11 | original_acc_min: 0.515125 12 | original_acc_std: 0.002022798657731962 13 | original_acc_median: 0.52175 14 | generated_acc_mean: 0.5240621890547263 15 | generated_acc_max: 0.528125 16 | generated_acc_min: 0.492875 17 | generated_acc_std: 0.0029340312264681434 18 | generated_acc_median: 0.524375 19 | noise=0.0100_acc_mean: 0.5206250000000001 20 | noise=0.0300_acc_mean: 0.5227250000000001 21 | noise=0.0500_acc_mean: 0.5197749999999999 22 | noise=0.1000_acc_mean: 0.516175 23 | 24 | origin-origin: 0.9140567935769377 25 | generated-generated: 0.9381340166585554 26 | origin-generated: 0.9126040181128068 27 | origin-generated(max): 0.932995113966968 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.9056913702656759 31 | origin-noised: 0.9113626636483584 32 | origin-noised(max): 0.9888055737500053 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.9055068980907433 36 | origin-noised: 0.9116464954528183 37 | origin-noised(max): 0.9698299238035858 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.8911945277656732 41 | origin-noised: 0.9056117373168868 42 | origin-noised(max): 0.9524227750543908 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.8305363245621995 46 | origin-noised: 0.8640297660366567 47 | origin-noised(max): 0.9025097687540203 48 | 49 | ==> start drawing.. 50 | plot saved to plot_stl10_vit.png 51 | -------------------------------------------------------------------------------- /dataset/full/stl10_vit/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/full/svhn_cnnmedium/model.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | 4 | class CNNMedium(nn.Module): 5 | def __init__(self): 6 | super().__init__() 7 | self.module = nn.Sequential( 8 | nn.Conv2d(3, 16, 3), 9 | nn.MaxPool2d(2, 2), 10 | nn.LeakyReLU(), 11 | nn.Conv2d(16, 32, 3), 12 | nn.MaxPool2d(2, 2), 13 | nn.LeakyReLU(), 14 | nn.Conv2d(32, 15, 3), 15 | nn.MaxPool2d(2, 2), 16 | nn.LeakyReLU(), 17 | nn.Flatten(start_dim=1), 18 | ) 19 | self.head = nn.Sequential( 20 | nn.Linear(60, 20), 21 | nn.LeakyReLU(), 22 | nn.Linear(20, 10), 23 | ) 24 | 25 | def forward(self, x): 26 | x = self.module(x) 27 | x = self.head(x) 28 | return x 29 | 30 | 31 | def Model(): 32 | model = CNNMedium() 33 | return model 34 | 35 | 36 | if __name__ == "__main__": 37 | model = Model() 38 | print(model) 39 | num_param = 0 40 | for param in model.parameters(): 41 | num_param += param.numel() 42 | print("num_param:", num_param) 43 | -------------------------------------------------------------------------------- /dataset/full/svhn_cnnmedium/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/cifar100_convnextbase/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: cifar100_convnextbase 5 | 6 | num_checkpoint: 301 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.9295192691029901 10 | original_acc_max: 0.9323 11 | original_acc_min: 0.9267 12 | original_acc_std: 0.0009741650129388167 13 | original_acc_median: 0.9296 14 | generated_acc_mean: 0.9315517412935322 15 | generated_acc_max: 0.9329 16 | generated_acc_min: 0.9297 17 | generated_acc_std: 0.0005608357771147083 18 | generated_acc_median: 0.9316 19 | noise=0.0100_acc_mean: 0.92942 20 | noise=0.0300_acc_mean: 0.9291600000000001 21 | noise=0.0500_acc_mean: 0.92964 22 | noise=0.1000_acc_mean: 0.93012 23 | 24 | origin-origin: 0.7503282873314403 25 | generated-generated: 0.9286410015738924 26 | origin-generated: 0.79652781923223 27 | origin-generated(max): 0.8386033427130863 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.7595811166996388 31 | origin-noised: 0.7570556061435638 32 | origin-noised(max): 0.99294883884466 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.7420760641846613 36 | origin-noised: 0.7517426711231399 37 | origin-noised(max): 0.9882459374287997 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.7431530576481219 41 | origin-noised: 0.7521745938907514 42 | origin-noised(max): 0.9778588006420946 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.7206284596919204 46 | origin-noised: 0.7415116565157877 47 | origin-noised(max): 0.9629801737411728 48 | 49 | ==> start drawing.. 50 | plot saved to plot_cifar100_convnextbase.png 51 | -------------------------------------------------------------------------------- /dataset/main/cifar100_convnextbase/performance.cache: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NUS-HPC-AI-Lab/Neural-Network-Diffusion/0ecca89fe6cdf4faaecad7d563c5e70d7c2a3d36/dataset/main/cifar100_convnextbase/performance.cache -------------------------------------------------------------------------------- /dataset/main/cifar100_convnextbase/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/cifar100_convnexttiny/performance.cache: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NUS-HPC-AI-Lab/Neural-Network-Diffusion/0ecca89fe6cdf4faaecad7d563c5e70d7c2a3d36/dataset/main/cifar100_convnexttiny/performance.cache -------------------------------------------------------------------------------- /dataset/main/cifar100_convnexttiny/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/cifar100_resnet18/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: cifar100_resnet18 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 300x3 9 | original_acc_mean: 0.7672460000000001 10 | original_acc_max: 0.7715 11 | original_acc_min: 0.7633 12 | original_acc_std: 0.0016230888248439583 13 | original_acc_median: 0.7671 14 | generated_acc_mean: 0.7724422885572139 15 | generated_acc_max: 0.7755 16 | generated_acc_min: 0.7681 17 | generated_acc_std: 0.001281189099537825 18 | generated_acc_median: 0.7725 19 | noise=0.0010_acc_mean: 0.7672583333333333 20 | noise=0.0500_acc_mean: 0.7659653333333334 21 | noise=0.1500_acc_mean: 0.7548929999999999 22 | 23 | origin-origin: 0.783744070950229 24 | generated-generated: 0.8799036684646423 25 | origin-generated: 0.8189136019035009 26 | origin-generated(max): 0.8479096892975388 27 | 28 | noise_intensity=0.001 29 | noised-noised: 0.7837735298211486 30 | origin-noised: 0.7844687620288421 31 | origin-noised(max): 0.9967755670030416 32 | 33 | noise_intensity=0.05 34 | noised-noised: 0.765488650808208 35 | origin-noised: 0.774528071506807 36 | origin-noised(max): 0.9228897948343431 37 | 38 | noise_intensity=0.15 39 | noised-noised: 0.6748511686024631 40 | origin-noised: 0.7155596601813675 41 | origin-noised(max): 0.7921127087283428 42 | 43 | ==> start drawing.. 44 | plot saved to plot_cifar100_resnet18.png 45 | -------------------------------------------------------------------------------- /dataset/main/cifar100_resnet18/model.py: -------------------------------------------------------------------------------- 1 | import timm 2 | import torch.nn as nn 3 | 4 | 5 | class resnet18(nn.Module): 6 | def __init__(self, num_classes=100): 7 | super(resnet18, self).__init__() 8 | self.model = timm.create_model('resnet18', pretrained=True, num_classes=num_classes) 9 | 10 | def forward(self, x): 11 | return self.model(x) 12 | 13 | 14 | def create_model(num_classes=100): 15 | return resnet18(num_classes) 16 | -------------------------------------------------------------------------------- /dataset/main/cifar100_resnet18/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/cifar100_resnet50/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: cifar100_resnet50 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.7759426666666666 10 | original_acc_max: 0.7846 11 | original_acc_min: 0.7696 12 | original_acc_std: 0.002998885274379274 13 | original_acc_median: 0.7755 14 | generated_acc_mean: 0.783723383084577 15 | generated_acc_max: 0.7861 16 | generated_acc_min: 0.7816 17 | generated_acc_std: 0.0007043577911987102 18 | generated_acc_median: 0.7838 19 | noise=0.0100_acc_mean: 0.7784000000000001 20 | noise=0.0300_acc_mean: 0.7765599999999999 21 | noise=0.0500_acc_mean: 0.7724 22 | noise=0.1000_acc_mean: 0.774 23 | 24 | origin-origin: 0.7635843535691847 25 | generated-generated: 0.9441066106014256 26 | origin-generated: 0.8139090120850814 27 | origin-generated(max): 0.8549495505352493 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.7739454451001873 31 | origin-noised: 0.7658242020556592 32 | origin-noised(max): 0.9924562652609643 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.7826319529459715 36 | origin-noised: 0.7714949374397474 37 | origin-noised(max): 0.9762808603196002 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.8509432260368079 41 | origin-noised: 0.7672504597891342 42 | origin-noised(max): 0.9636814955871523 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.7345961935231802 46 | origin-noised: 0.7565532714582889 47 | origin-noised(max): 0.924659499263649 48 | 49 | ==> start drawing.. 50 | plot saved to plot_cifar100_resnet50.png 51 | -------------------------------------------------------------------------------- /dataset/main/cifar100_resnet50/performance.cache: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NUS-HPC-AI-Lab/Neural-Network-Diffusion/0ecca89fe6cdf4faaecad7d563c5e70d7c2a3d36/dataset/main/cifar100_resnet50/performance.cache -------------------------------------------------------------------------------- /dataset/main/cifar100_resnet50/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/cifar100_vitbase/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: cifar100_vitbase 5 | 6 | num_checkpoint: 301 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.9122920265780733 10 | original_acc_max: 0.9145 11 | original_acc_min: 0.9101 12 | original_acc_std: 0.0009212530535430408 13 | original_acc_median: 0.9123 14 | generated_acc_mean: 0.9132517412935324 15 | generated_acc_max: 0.9146 16 | generated_acc_min: 0.9117 17 | generated_acc_std: 0.0004786064480091511 18 | generated_acc_median: 0.9132 19 | noise=0.0100_acc_mean: 0.91152 20 | noise=0.0300_acc_mean: 0.9125 21 | noise=0.0500_acc_mean: 0.91272 22 | noise=0.1000_acc_mean: 0.9121 23 | 24 | origin-origin: 0.8474740275065582 25 | generated-generated: 0.9473055150924526 26 | origin-generated: 0.880337810559087 27 | origin-generated(max): 0.9115880062667687 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.8414037855347681 31 | origin-noised: 0.8445987609680663 32 | origin-noised(max): 0.9954912059898042 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.875809693623719 36 | origin-noised: 0.8472413884517076 37 | origin-noised(max): 0.9931538109667315 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.8532214560833944 41 | origin-noised: 0.8489509934369072 42 | origin-noised(max): 0.9894991365572642 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.8477616838555957 46 | origin-noised: 0.8490866325706529 47 | origin-noised(max): 0.9846291916397079 48 | 49 | ==> start drawing.. 50 | plot saved to plot_cifar100_vitbase.png 51 | -------------------------------------------------------------------------------- /dataset/main/cifar100_vitbase/performance.cache: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NUS-HPC-AI-Lab/Neural-Network-Diffusion/0ecca89fe6cdf4faaecad7d563c5e70d7c2a3d36/dataset/main/cifar100_vitbase/performance.cache -------------------------------------------------------------------------------- /dataset/main/cifar100_vitbase/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/cifar100_vittiny/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: cifar100_vittiny 5 | 6 | num_checkpoint: 301 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.863936877076412 10 | original_acc_max: 0.8657 11 | original_acc_min: 0.8609 12 | original_acc_std: 0.0010166073871333866 13 | original_acc_median: 0.8641 14 | generated_acc_mean: 0.8650034825870647 15 | generated_acc_max: 0.8665 16 | generated_acc_min: 0.8627 17 | generated_acc_std: 0.0006488702857326276 18 | generated_acc_median: 0.865 19 | noise=0.0100_acc_mean: 0.8629 20 | noise=0.0300_acc_mean: 0.8628 21 | noise=0.0500_acc_mean: 0.8635400000000001 22 | noise=0.1000_acc_mean: 0.86312 23 | 24 | origin-origin: 0.8657764196362602 25 | generated-generated: 0.933175390830182 26 | origin-generated: 0.885349599175035 27 | origin-generated(max): 0.9137560769935503 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.8653860017187217 31 | origin-noised: 0.8659624943559093 32 | origin-noised(max): 0.9941801215656134 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.8785371234621113 36 | origin-noised: 0.8670882425899135 37 | origin-noised(max): 0.992606893327927 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.8629172775375779 41 | origin-noised: 0.8661905892499554 42 | origin-noised(max): 0.9848824982326368 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.8617980098737688 46 | origin-noised: 0.864353813229194 47 | origin-noised(max): 0.9740182262311521 48 | 49 | ==> start drawing.. 50 | plot saved to plot_cifar100_vittiny.png 51 | -------------------------------------------------------------------------------- /dataset/main/cifar100_vittiny/performance.cache: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NUS-HPC-AI-Lab/Neural-Network-Diffusion/0ecca89fe6cdf4faaecad7d563c5e70d7c2a3d36/dataset/main/cifar100_vittiny/performance.cache -------------------------------------------------------------------------------- /dataset/main/cifar100_vittiny/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/cifar10_convnextbase/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/cifar10_convnexttiny/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/cifar10_resnet18/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/cifar10_resnet50/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/cifar10_vitbase/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/cifar10_vittiny/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "dataset_root": "~/.cache/p-diff/datasets", 3 | "imagenet_root": "~/data/imagenet" 4 | } -------------------------------------------------------------------------------- /dataset/main/flowers_convnextbase/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/flowers_convnexttiny/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/flowers_resnet18/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/flowers_resnet50/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/flowers_vitbase/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/flowers_vittiny/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/food101_convnextbase/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/food101_convnexttiny/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/food101_resnet18/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/food101_resnet50/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/food101_vitbase/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/food101_vittiny/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/in1k_convnextbase/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: in1k_convnextbase 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.8485940666666666 10 | original_acc_max: 0.85134 11 | original_acc_min: 0.84572 12 | original_acc_std: 0.0008558368977530435 13 | original_acc_median: 0.8486499999999999 14 | generated_acc_mean: 0.8516204975124378 15 | generated_acc_max: 0.85296 16 | generated_acc_min: 0.8487 17 | generated_acc_std: 0.0006967856200027145 18 | generated_acc_median: 0.85172 19 | noise=0.0100_acc_mean: 0.8484879999999999 20 | noise=0.0300_acc_mean: 0.84848 21 | noise=0.0500_acc_mean: 0.848624 22 | noise=0.1000_acc_mean: 0.847724 23 | 24 | origin-origin: 0.7798870323224116 25 | generated-generated: 0.8918929378640985 26 | origin-generated: 0.8198739878983871 27 | origin-generated(max): 0.8405154272236401 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.7764443787364576 31 | origin-noised: 0.7797807284933803 32 | origin-noised(max): 0.9932380011687858 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.7916555545303389 36 | origin-noised: 0.7827113643072868 37 | origin-noised(max): 0.9864964494817239 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.7816142462771796 41 | origin-noised: 0.7822520379939576 42 | origin-noised(max): 0.9806140383297111 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.766153632547524 46 | origin-noised: 0.7740362959069211 47 | origin-noised(max): 0.9553474738982353 48 | 49 | ==> start drawing.. 50 | plot saved to plot_in1k_convnextbase.png 51 | -------------------------------------------------------------------------------- /dataset/main/in1k_convnextbase/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/in1k_convnexttiny/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: in1k_convnexttiny 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.8320767333333333 10 | original_acc_max: 0.8344 11 | original_acc_min: 0.82946 12 | original_acc_std: 0.0008454024656274038 13 | original_acc_median: 0.83204 14 | generated_acc_mean: 0.8359853731343284 15 | generated_acc_max: 0.83734 16 | generated_acc_min: 0.83346 17 | generated_acc_std: 0.0007667927552001838 18 | generated_acc_median: 0.8361 19 | noise=0.0100_acc_mean: 0.831672 20 | noise=0.0300_acc_mean: 0.832272 21 | noise=0.0500_acc_mean: 0.8324720000000001 22 | noise=0.1000_acc_mean: 0.8308 23 | 24 | origin-origin: 0.7804360385614553 25 | generated-generated: 0.8851416144081644 26 | origin-generated: 0.818866964529673 27 | origin-generated(max): 0.8388200275759264 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.7799547008548245 31 | origin-noised: 0.7813623801288297 32 | origin-noised(max): 0.9913591432635563 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.7773342935191394 36 | origin-noised: 0.7804247547530103 37 | origin-noised(max): 0.9815539357424259 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.7756421628800638 41 | origin-noised: 0.779719055811603 42 | origin-noised(max): 0.9699890089388035 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.7637140939931873 46 | origin-noised: 0.7736470597293881 47 | origin-noised(max): 0.9431080238114593 48 | 49 | ==> start drawing.. 50 | plot saved to plot_in1k_convnexttiny.png 51 | -------------------------------------------------------------------------------- /dataset/main/in1k_convnexttiny/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/in1k_resnet18/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: in1k_resnet18 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.6870534666666667 10 | original_acc_max: 0.6952 11 | original_acc_min: 0.67754 12 | original_acc_std: 0.0027158743188070273 13 | original_acc_median: 0.6871 14 | generated_acc_mean: 0.6960835820895522 15 | generated_acc_max: 0.70396 16 | generated_acc_min: 0.68542 17 | generated_acc_std: 0.002896085384051893 18 | generated_acc_median: 0.69606 19 | noise=0.0100_acc_mean: 0.687612 20 | noise=0.0300_acc_mean: 0.68544 21 | noise=0.0500_acc_mean: 0.6857520000000001 22 | noise=0.1000_acc_mean: 0.680012 23 | 24 | origin-origin: 0.7709396233280508 25 | generated-generated: 0.8665289419633503 26 | origin-generated: 0.8035780953009304 27 | origin-generated(max): 0.8224302489683856 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.7777621709786688 31 | origin-noised: 0.7755401622834146 32 | origin-noised(max): 0.9879849836697991 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.7744726742613544 36 | origin-noised: 0.7720579637566043 37 | origin-noised(max): 0.9658109773104904 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.768657243688849 41 | origin-noised: 0.7697135114772664 42 | origin-noised(max): 0.9437273272861482 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.741847657951686 46 | origin-noised: 0.7545180711550666 47 | origin-noised(max): 0.8913848085237811 48 | 49 | ==> start drawing.. 50 | plot saved to plot_in1k_resnet18.png 51 | -------------------------------------------------------------------------------- /dataset/main/in1k_resnet18/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/in1k_resnet50/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: in1k_resnet50 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.7746842666666667 10 | original_acc_max: 0.78004 11 | original_acc_min: 0.76582 12 | original_acc_std: 0.002989554782163315 13 | original_acc_median: 0.77505 14 | generated_acc_mean: 0.7820236815920397 15 | generated_acc_max: 0.78394 16 | generated_acc_min: 0.77908 17 | generated_acc_std: 0.0008162237877878229 18 | generated_acc_median: 0.78208 19 | noise=0.0100_acc_mean: 0.7756160000000001 20 | noise=0.0300_acc_mean: 0.7735399999999999 21 | noise=0.0500_acc_mean: 0.7720520000000001 22 | noise=0.1000_acc_mean: 0.770364 23 | 24 | origin-origin: 0.8053253239180657 25 | generated-generated: 0.9274865554477919 26 | origin-generated: 0.844108648884506 27 | origin-generated(max): 0.861550736555514 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.805563810266254 31 | origin-noised: 0.8084969646275837 32 | origin-noised(max): 0.986892854693114 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.8191410182553437 36 | origin-noised: 0.8025072210755458 37 | origin-noised(max): 0.9569006830500859 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.782033467924775 41 | origin-noised: 0.7966290866325827 42 | origin-noised(max): 0.9312826973788317 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.756297501098947 46 | origin-noised: 0.7797891724938711 47 | origin-noised(max): 0.8702588104954454 48 | 49 | ==> start drawing.. 50 | plot saved to plot_in1k_resnet50.png 51 | -------------------------------------------------------------------------------- /dataset/main/in1k_resnet50/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/in1k_vitbase/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: in1k_vitbase 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.8478227333333334 10 | original_acc_max: 0.85046 11 | original_acc_min: 0.84592 12 | original_acc_std: 0.0007249950773779239 13 | original_acc_median: 0.84778 14 | generated_acc_mean: 0.8494403980099503 15 | generated_acc_max: 0.85092 16 | generated_acc_min: 0.84662 17 | generated_acc_std: 0.0006486203044150892 18 | generated_acc_median: 0.84952 19 | noise=0.0100_acc_mean: 0.848208 20 | noise=0.0300_acc_mean: 0.8482520000000001 21 | noise=0.0500_acc_mean: 0.8474280000000001 22 | noise=0.1000_acc_mean: 0.847472 23 | 24 | origin-origin: 0.8271305849339674 25 | generated-generated: 0.9012162478805515 26 | origin-generated: 0.8544788382587705 27 | origin-generated(max): 0.8758206979101743 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.8259620420967957 31 | origin-noised: 0.8266762222239319 32 | origin-noised(max): 0.9929347493950011 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.8302800377617677 36 | origin-noised: 0.8297174118861432 37 | origin-noised(max): 0.9904812403259475 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.8225386297818229 41 | origin-noised: 0.8258465932572167 42 | origin-noised(max): 0.9825183831653103 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.8262681192353067 46 | origin-noised: 0.8269878213817348 47 | origin-noised(max): 0.9639417184860501 48 | 49 | ==> start drawing.. 50 | plot saved to plot_in1k_vitbase.png 51 | -------------------------------------------------------------------------------- /dataset/main/in1k_vitbase/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/in1k_vittiny/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: in1k_vittiny 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.7495394000000001 10 | original_acc_max: 0.752 11 | original_acc_min: 0.74684 12 | original_acc_std: 0.0008902095109205094 13 | original_acc_median: 0.7495700000000001 14 | generated_acc_mean: 0.7526882587064677 15 | generated_acc_max: 0.75376 16 | generated_acc_min: 0.75164 17 | generated_acc_std: 0.00042979279137594185 18 | generated_acc_median: 0.7527 19 | noise=0.0100_acc_mean: 0.749512 20 | noise=0.0300_acc_mean: 0.749204 21 | noise=0.0500_acc_mean: 0.74902 22 | noise=0.1000_acc_mean: 0.749348 23 | 24 | origin-origin: 0.8557354150061532 25 | generated-generated: 0.9449517814926156 26 | origin-generated: 0.8876014333679159 27 | origin-generated(max): 0.9038900063300858 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.8546371769729916 31 | origin-noised: 0.8566624589726385 32 | origin-noised(max): 0.9934288102268679 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.8534447151676121 36 | origin-noised: 0.8564797452282233 37 | origin-noised(max): 0.9872619229796179 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.8509455819159978 41 | origin-noised: 0.8548923055406743 42 | origin-noised(max): 0.9822671091397387 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.8483481131495278 46 | origin-noised: 0.8538368419003671 47 | origin-noised(max): 0.967171838867858 48 | 49 | ==> start drawing.. 50 | plot saved to plot_in1k_vittiny.png 51 | -------------------------------------------------------------------------------- /dataset/main/in1k_vittiny/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/pets_convnextbase/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/pets_convnexttiny/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/pets_resnet18/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/pets_resnet50/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/pets_vitbase/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/pets_vittiny/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/stl10_convnextbase/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: stl10_convnextbase 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.9956716666666667 10 | original_acc_max: 0.99675 11 | original_acc_min: 0.988125 12 | original_acc_std: 0.000845705655388181 13 | original_acc_median: 0.995875 14 | generated_acc_mean: 0.9962425373134329 15 | generated_acc_max: 0.996625 16 | generated_acc_min: 0.99575 17 | generated_acc_std: 0.0001801057728217799 18 | generated_acc_median: 0.99625 19 | noise=0.0100_acc_mean: 0.9959250000000001 20 | noise=0.0300_acc_mean: 0.996025 21 | noise=0.0500_acc_mean: 0.995275 22 | noise=0.1000_acc_mean: 0.995425 23 | 24 | origin-origin: 0.5621754847655185 25 | generated-generated: 0.8737619087714468 26 | origin-generated: 0.633886641161869 27 | origin-generated(max): 0.8575576526396431 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.6194341265813808 31 | origin-noised: 0.6009439106834752 32 | origin-noised(max): 1.0 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.6272664918445787 36 | origin-noised: 0.5740566468099576 37 | origin-noised(max): 0.99375 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.5159999739881833 41 | origin-noised: 0.5512837774912045 42 | origin-noised(max): 0.9912757973733584 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.6183976635588024 46 | origin-noised: 0.5639186980380372 47 | origin-noised(max): 0.9796515679442509 48 | 49 | ==> start drawing.. 50 | plot saved to plot_stl10_convnextbase.png 51 | -------------------------------------------------------------------------------- /dataset/main/stl10_convnextbase/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/stl10_convnexttiny/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: stl10_convnexttiny 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.9903270833333333 10 | original_acc_max: 0.992 11 | original_acc_min: 0.98175 12 | original_acc_std: 0.0014294323732594793 13 | original_acc_median: 0.990625 14 | generated_acc_mean: 0.9912332089552237 15 | generated_acc_max: 0.991625 16 | generated_acc_min: 0.990625 17 | generated_acc_std: 0.00018690268337863736 18 | generated_acc_median: 0.99125 19 | noise=0.0100_acc_mean: 0.9905250000000001 20 | noise=0.0300_acc_mean: 0.9899999999999999 21 | noise=0.0500_acc_mean: 0.9903000000000001 22 | noise=0.1000_acc_mean: 0.9901249999999999 23 | 24 | origin-origin: 0.6696033470969721 25 | generated-generated: 0.9324355226199335 26 | origin-generated: 0.7489091957588933 27 | origin-generated(max): 0.9340619031473409 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.6348862552918927 31 | origin-noised: 0.6557293774913151 32 | origin-noised(max): 0.9921109093909886 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.6393386082838152 36 | origin-noised: 0.6525607074112196 37 | origin-noised(max): 0.9785820845820845 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.6672502661453198 41 | origin-noised: 0.6682563744614735 42 | origin-noised(max): 0.9849303963125102 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.6829055216101707 46 | origin-noised: 0.673086110477329 47 | origin-noised(max): 0.9627336529775554 48 | 49 | ==> start drawing.. 50 | plot saved to plot_stl10_convnexttiny.png 51 | -------------------------------------------------------------------------------- /dataset/main/stl10_convnexttiny/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/stl10_resnet18/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: stl10_resnet18 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.9437787500000002 10 | original_acc_max: 0.95075 11 | original_acc_min: 0.925 12 | original_acc_std: 0.004735972324049906 13 | original_acc_median: 0.944875 14 | generated_acc_mean: 0.9489931592039801 15 | generated_acc_max: 0.952375 16 | generated_acc_min: 0.9435 17 | generated_acc_std: 0.0017484534301889952 18 | generated_acc_median: 0.949125 19 | noise=0.0100_acc_mean: 0.9424250000000001 20 | noise=0.0300_acc_mean: 0.9395749999999999 21 | noise=0.0500_acc_mean: 0.94025 22 | noise=0.1000_acc_mean: 0.9400999999999999 23 | 24 | origin-origin: 0.6148950738563927 25 | generated-generated: 0.8446581403696337 26 | origin-generated: 0.6829694026998347 27 | origin-generated(max): 0.8484169848511975 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.6143879756711199 31 | origin-noised: 0.6201640110942424 32 | origin-noised(max): 0.9868972642845268 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.6057291407499736 36 | origin-noised: 0.6049193403931872 37 | origin-noised(max): 0.9619986337878188 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.5808710723064738 41 | origin-noised: 0.5876499206935195 42 | origin-noised(max): 0.930468903743435 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.5492072259458809 46 | origin-noised: 0.5760497999822226 47 | origin-noised(max): 0.8554125165735249 48 | 49 | ==> start drawing.. 50 | plot saved to plot_stl10_resnet18.png 51 | -------------------------------------------------------------------------------- /dataset/main/stl10_resnet18/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/stl10_resnet50/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: stl10_resnet50 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.9650912500000001 10 | original_acc_max: 0.96925 11 | original_acc_min: 0.9605 12 | original_acc_std: 0.0021610318956847177 13 | original_acc_median: 0.96475 14 | generated_acc_mean: 0.9681480099502489 15 | generated_acc_max: 0.96925 16 | generated_acc_min: 0.966 17 | generated_acc_std: 0.0005667637639088252 18 | generated_acc_median: 0.96825 19 | noise=0.0100_acc_mean: 0.9654 20 | noise=0.0300_acc_mean: 0.9639749999999999 21 | noise=0.0500_acc_mean: 0.966325 22 | noise=0.1000_acc_mean: 0.9654999999999999 23 | 24 | origin-origin: 0.6622239012104947 25 | generated-generated: 0.9456147231764751 26 | origin-generated: 0.7371950488177419 27 | origin-generated(max): 0.9014617142620602 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.6457870738237415 31 | origin-noised: 0.6519382232425237 32 | origin-noised(max): 0.9929980989435565 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.6368151766014826 36 | origin-noised: 0.6449700933824374 37 | origin-noised(max): 0.9743831378175241 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.6775078931595697 41 | origin-noised: 0.6748154437322408 42 | origin-noised(max): 0.960045197835035 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.6772155965189051 46 | origin-noised: 0.6605656872555316 47 | origin-noised(max): 0.9125132093965437 48 | 49 | ==> start drawing.. 50 | plot saved to plot_stl10_resnet50.png 51 | -------------------------------------------------------------------------------- /dataset/main/stl10_resnet50/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/stl10_vitbase/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: stl10_vitbase 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.9905550000000002 10 | original_acc_max: 0.991625 11 | original_acc_min: 0.989375 12 | original_acc_std: 0.00044569234530858443 13 | original_acc_median: 0.9905 14 | generated_acc_mean: 0.9908439054726368 15 | generated_acc_max: 0.991375 16 | generated_acc_min: 0.990125 17 | generated_acc_std: 0.000226089503739981 18 | generated_acc_median: 0.990875 19 | noise=0.0100_acc_mean: 0.9907999999999999 20 | noise=0.0300_acc_mean: 0.990475 21 | noise=0.0500_acc_mean: 0.9902749999999999 22 | noise=0.1000_acc_mean: 0.990375 23 | 24 | origin-origin: 0.6830605686233363 25 | generated-generated: 0.9092959811714547 26 | origin-generated: 0.7501410861205543 27 | origin-generated(max): 0.8907312485903788 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.6838254649604986 31 | origin-noised: 0.6899788579755605 32 | origin-noised(max): 0.9973333333333333 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.7251950833581458 36 | origin-noised: 0.6749432054866453 37 | origin-noised(max): 0.9950617283950617 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.6873026842807057 41 | origin-noised: 0.6904456352426965 42 | origin-noised(max): 0.9847977207977209 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.7714355855560455 46 | origin-noised: 0.6807874033090525 47 | origin-noised(max): 0.9844804318488529 48 | 49 | ==> start drawing.. 50 | plot saved to plot_stl10_vitbase.png 51 | -------------------------------------------------------------------------------- /dataset/main/stl10_vitbase/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /dataset/main/stl10_vittiny/log.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | =============================================== 4 | Summary: stl10_vittiny 5 | 6 | num_checkpoint: 300 7 | num_generated: 201 8 | num_noised: 5x4 9 | original_acc_mean: 0.9754258333333333 10 | original_acc_max: 0.977 11 | original_acc_min: 0.9735 12 | original_acc_std: 0.0006919321779545259 13 | original_acc_median: 0.9755 14 | generated_acc_mean: 0.9755963930348259 15 | generated_acc_max: 0.976625 16 | generated_acc_min: 0.974625 17 | generated_acc_std: 0.00043080544690824786 18 | generated_acc_median: 0.975625 19 | noise=0.0100_acc_mean: 0.975375 20 | noise=0.0300_acc_mean: 0.975625 21 | noise=0.0500_acc_mean: 0.9751249999999999 22 | noise=0.1000_acc_mean: 0.9756750000000001 23 | 24 | origin-origin: 0.7652916695254511 25 | generated-generated: 0.9209960051527973 26 | origin-generated: 0.8168831103090519 27 | origin-generated(max): 0.921698611696385 28 | 29 | noise_intensity=0.01 30 | noised-noised: 0.793727415529035 31 | origin-noised: 0.780836017129803 32 | origin-noised(max): 0.9969523435454308 33 | 34 | noise_intensity=0.03 35 | noised-noised: 0.8024682320970598 36 | origin-noised: 0.7883941201106837 37 | origin-noised(max): 0.9918103554153882 38 | 39 | noise_intensity=0.05 40 | noised-noised: 0.7678862730072804 41 | origin-noised: 0.7640952880347095 42 | origin-noised(max): 0.9900539493502141 43 | 44 | noise_intensity=0.1 45 | noised-noised: 0.7755813088245898 46 | origin-noised: 0.7610573018296022 47 | origin-noised(max): 0.9774952975571715 48 | 49 | ==> start drawing.. 50 | plot saved to plot_stl10_vittiny.png 51 | -------------------------------------------------------------------------------- /dataset/main/stl10_vittiny/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import torch 4 | 5 | try: 6 | from finetune import * 7 | except ImportError: 8 | from .finetune import * 9 | 10 | try: 11 | test_item = sys.argv[1] 12 | except IndexError: 13 | assert __name__ == "__main__" 14 | test_item = "./checkpoint" 15 | 16 | 17 | test_items = [] 18 | if os.path.isdir(test_item): 19 | for item in os.listdir(test_item): 20 | if item.endswith('.pth'): 21 | item = os.path.join(test_item, item) 22 | test_items.append(item) 23 | elif os.path.isfile(test_item): 24 | test_items.append(test_item) 25 | 26 | 27 | if __name__ == "__main__": 28 | config = get_config() 29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 30 | _, test_loader = get_data_loaders(config) 31 | 32 | for item in test_items: 33 | print(f"Test model: {os.path.basename(item)}") 34 | state = torch.load(item, map_location=device, weights_only=True) 35 | model.load_state_dict({k: v.to(torch.float32).to(device) for k, v in state.items()}, strict=False) 36 | loss, acc, all_targets, all_predicts = test(model, test_loader, device) 37 | print(f"Loss = {loss:.4f}, Acc = {acc:.4f}\n") 38 | -------------------------------------------------------------------------------- /figures/motivation.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NUS-HPC-AI-Lab/Neural-Network-Diffusion/0ecca89fe6cdf4faaecad7d563c5e70d7c2a3d36/figures/motivation.gif -------------------------------------------------------------------------------- /figures/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NUS-HPC-AI-Lab/Neural-Network-Diffusion/0ecca89fe6cdf4faaecad7d563c5e70d7c2a3d36/figures/pipeline.png -------------------------------------------------------------------------------- /model/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NUS-HPC-AI-Lab/Neural-Network-Diffusion/0ecca89fe6cdf4faaecad7d563c5e70d7c2a3d36/model/__init__.py -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | tqdm 2 | timm 3 | seaborn 4 | notebook 5 | accelerate 6 | openpyxl 7 | numpy==2.0.1 8 | -------------------------------------------------------------------------------- /workspace/launch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # set variable 4 | cls="$1" 5 | gpu_ids="$3" 6 | exec_file="$2" 7 | 8 | 9 | # count gpus 10 | IFS=',' read -r -a gpus <<< "$gpu_ids" 11 | num_gpus=${#gpus[@]} 12 | 13 | # find a usable port 14 | find_open_port() { 15 | local port 16 | while true; do 17 | port=$(( (RANDOM % 32768) + 32767 )) 18 | if ! (echo >/dev/tcp/localhost/$port) &>/dev/null; then 19 | break 20 | fi 21 | done 22 | echo $port 23 | } 24 | main_process_port=$(find_open_port) 25 | echo "Using main_process_port=$main_process_port" 26 | 27 | # construct command 28 | command="accelerate launch --main_process_port=$main_process_port --num_processes=$num_gpus --gpu_ids=$gpu_ids" 29 | command+=" --num_machines=1 --mixed_precision=bf16 --dynamo_backend=no" 30 | if [ $num_gpus -ge 2 ]; then 31 | command+=" --multi_gpu" 32 | fi 33 | command+=" ./$cls/$exec_file.py" 34 | 35 | # execute command 36 | eval $command -------------------------------------------------------------------------------- /workspace/run_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cls=$1 4 | tag=$2 5 | device=$3 6 | 7 | 8 | cd .. 9 | echo "Processing: $cls/$tag" 10 | 11 | cd "./dataset/$cls/$tag" || exit 12 | rm performance.cache 13 | CUDA_VISIBLE_DEVICES="$device" python train.py 14 | CUDA_VISIBLE_DEVICES="$device" python finetune.py 15 | 16 | cd "../../../workspace" || exit 17 | bash launch.sh "$cls" "$tag" "$device" 18 | CUDA_VISIBLE_DEVICES="$device" python generate.py "$cls" "$tag" 19 | CUDA_VISIBLE_DEVICES="$device" python evaluate.py "$cls" "$tag" 20 | cd .. -------------------------------------------------------------------------------- /workspace/tools/draw_diffusion_process.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NUS-HPC-AI-Lab/Neural-Network-Diffusion/0ecca89fe6cdf4faaecad7d563c5e70d7c2a3d36/workspace/tools/draw_diffusion_process.pdf -------------------------------------------------------------------------------- /workspace/tools/draw_vary_numebr_ckpt.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NUS-HPC-AI-Lab/Neural-Network-Diffusion/0ecca89fe6cdf4faaecad7d563c5e70d7c2a3d36/workspace/tools/draw_vary_numebr_ckpt.pdf -------------------------------------------------------------------------------- /workspace/tools/test_number_parameters.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import sys 3 | 4 | 5 | diction = torch.load(sys.argv[1], map_location="cpu", weights_only=True) 6 | param = 0 7 | for k, v in diction.items(): 8 | param += v.numel() 9 | print(param) 10 | --------------------------------------------------------------------------------