├── cub
├── show.png
├── Output_RS.txt
├── Readme.md
├── Output678.txt
├── prepare.py
├── prepare_attack.py
├── experiment_67.py
├── experiment.py
├── experiment_randomstart.py
├── Output.txt
├── random_erasing.py
├── random_erasing_plus.py
├── draw_result.py
├── evaluate_gpu.py
├── utils.py
├── demo.py
├── losses.py
├── test.py
└── test_query.py
├── method.png
├── Cuisine-retrieval
├── show.png
├── Output_67.txt
├── Output_RS.txt
├── .gitignore
├── experiment_RS.py
├── experiment_67.py
├── experiment.py
├── Output.txt
├── random_erasing.py
├── random_erasing_plus.py
├── prepare.py
├── draw_result.py
├── evaluate_gpu.py
├── utils.py
├── demo.py
├── losses.py
├── test_query.py
└── test.py
├── mnist
├── README.md
├── demo.sh
├── model.py
├── show_mnist.py
├── main.py
└── attack_mnist.py
├── cifar
├── demo2.sh
├── Readme.md
├── experiment.py
├── Output.txt
├── attack_result.py
├── test.py
└── resnet2.py
├── Output_67.txt
├── Output_RS.txt
├── visualize
├── README.md
├── vis_noise.py
├── vis_ranking.py
└── statistic_pred.py
├── Output_dense.txt
├── .gitignore
├── market.txt
├── LICENSE
├── experiment_67.py
├── experiment_randomstart.py
├── experiment_PCB.py
├── experiment.py
├── prepare_attack.py
├── experiment_adv.py
├── experiment_dense.py
├── Output_PCB.txt
├── Output_adv.txt
├── Output.txt
├── market.py
├── README.md
├── draw_PCB.py
├── draw_result.py
├── show-tsne-example.py
├── draw_adv.py
├── caffe
└── test_res.py
├── evaluate_gpu.py
├── demo.py
├── test_only_query_PCB.py
├── test_only_query.py
├── test_normal.py
└── model.py
/cub/show.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/layumi/U_turn/HEAD/cub/show.png
--------------------------------------------------------------------------------
/method.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/layumi/U_turn/HEAD/method.png
--------------------------------------------------------------------------------
/Cuisine-retrieval/show.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/layumi/U_turn/HEAD/Cuisine-retrieval/show.png
--------------------------------------------------------------------------------
/mnist/README.md:
--------------------------------------------------------------------------------
1 | # pytorch-mnist
2 | Draw mnist
3 | 
4 |
--------------------------------------------------------------------------------
/mnist/demo.sh:
--------------------------------------------------------------------------------
1 | python attack_mnist.py --rate 0
2 | python attack_mnist.py --rate 8
3 | python attack_mnist.py --rate 16
4 | python attack_mnist.py --rate 32
5 |
6 |
--------------------------------------------------------------------------------
/cifar/demo2.sh:
--------------------------------------------------------------------------------
1 | python attack2.py --rate 0 --method_id 5
2 | python attack2.py --rate 4 --method_id 5
3 | python attack2.py --rate 8 --method_id 5
4 | python attack2.py --rate 16 --method_id 5
5 |
--------------------------------------------------------------------------------
/cifar/Readme.md:
--------------------------------------------------------------------------------
1 | ### Attack Cifar
2 |
3 | `attack.py` will generate the adversarial query and evaluate the classification result.
4 |
5 | `attack2.py` is for the figure in the paper. It attacks the 2-dim feature and draw the image.
6 |
--------------------------------------------------------------------------------
/Cuisine-retrieval/Output_67.txt:
--------------------------------------------------------------------------------
1 | # 100
2 | |16 | 6 | 27.93 | 55.47 | 66.02 | 16.07 |
3 | |16 | 7 | 13.87 | 32.23 | 42.58 | 8.47 |
4 | # 100 lr=1e-4
5 | |16 | 6 | 61.91 | 85.94 | 93.95 | 30.99 |
6 | |16 | 7 | 62.70 | 86.33 | 93.16 | 31.01 |
7 |
--------------------------------------------------------------------------------
/Output_67.txt:
--------------------------------------------------------------------------------
1 | # 100
2 | |16 | 6 | 0.163302 | 0.300772 | 0.381235 | 0.114772 |
3 | |16 | 7 | 0.022565 | 0.054335 | 0.083729 | 0.019038 |
4 | # 100 lr=1e-4
5 | |16 | 6 | 0.865796 | 0.944774 | 0.966746 | 0.695038 |
6 | |16 | 7 | 0.868171 | 0.947447 | 0.965558 | 0.694802 |
7 |
--------------------------------------------------------------------------------
/cub/Output_RS.txt:
--------------------------------------------------------------------------------
1 | |16 | 1 | 12.47 | 27.92 | 36.73 | 7.07 |
2 | |16 | 2 | 14.11 | 31.26 | 42.05 | 8.69 |
3 | |16 | 3 | 19.29 | 39.13 | 49.53 | 10.50 |
4 | |16 | 5 | 7.63 | 20.24 | 27.99 | 5.15 |
5 | |16 | 6 | 31.74 | 55.27 | 65.11 | 16.64 |
6 | |16 | 7 | 30.52 | 54.68 | 64.60 | 16.41 |
7 |
--------------------------------------------------------------------------------
/cub/Readme.md:
--------------------------------------------------------------------------------
1 | ### Attack CUBird
2 |
3 | Follow the following code to prepare the dataset.
4 | ```bash
5 | mkdir train_all
6 | mkdir test
7 | mv 0* train_all
8 | mv ./100.Brown_Pelican train_all
9 | mv 1* test
10 | mv ./200.Common_Yellowthroat/ test
11 | ```
12 |
13 | Train a model.
14 |
--------------------------------------------------------------------------------
/Cuisine-retrieval/Output_RS.txt:
--------------------------------------------------------------------------------
1 | |16 | 1 | 25.39 | 51.76 | 64.06 | 13.57 |
2 | |16 | 2 | 17.77 | 42.19 | 56.05 | 11.68 |
3 | |16 | 3 | 10.55 | 25.00 | 34.57 | 7.70 |
4 | |16 | 5 | 6.64 | 17.58 | 28.91 | 4.89 |
5 | |16 | 6 | 35.55 | 63.87 | 73.83 | 19.46 |
6 | |16 | 7 | 35.16 | 63.09 | 77.15 | 18.91 |
7 |
--------------------------------------------------------------------------------
/Output_RS.txt:
--------------------------------------------------------------------------------
1 | |16 | 1 | 0.129454 | 0.238124 | 0.292458 | 0.093279 |
2 | |16 | 2 | 0.096793 | 0.195368 | 0.272862 | 0.069118 |
3 | |16 | 3 | 0.040380 | 0.086401 | 0.119062 | 0.032450 |
4 | |16 | 5 | 0.002375 | 0.004454 | 0.006532 | 0.002273 |
5 | |16 | 6 | 0.545428 | 0.719418 | 0.779097 | 0.406221 |
6 | |16 | 7 | 0.540380 | 0.710808 | 0.764549 | 0.396639 |
7 |
--------------------------------------------------------------------------------
/visualize/README.md:
--------------------------------------------------------------------------------
1 | ## Visualize
2 |
3 | ### 1. vis_ranking.py
4 | Save the present top-15 images into folder.
5 |
6 | ### 2. statistic_pred.py (Not used in the paper)
7 | See whether the feature to the opposite side.
8 |
9 | ### 3. vis_noise.py (Not used in the paper)
10 | See the difference between the original and adversarial queries.
11 |
12 |
--------------------------------------------------------------------------------
/cub/Output678.txt:
--------------------------------------------------------------------------------
1 | # 100
2 | |16 | 6 | 15.34 | 34.15 | 44.73 | 8.71 |
3 | |16 | 7 | 20.17 | 40.85 | 51.76 | 10.90 |
4 | |16 | 8 | 21.69 | 42.30 | 53.41 | 11.55 |
5 | #20
6 | |16 | 6 | 37.22 | 62.58 | 72.54 | 19.18 |
7 | |16 | 7 | 36.88 | 61.68 | 71.74 | 18.92 |
8 | # 100 + lr 1e-4
9 | |16 | 6 | 49.16 | 74.34 | 83.07 | 25.16 |
10 | |16 | 7 | 49.34 | 74.44 | 83.12 | 25.15 |
11 |
--------------------------------------------------------------------------------
/cifar/experiment.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | test_rate = (2,4,8,12,16)
4 | for i in range(5):
5 | rate = test_rate[i]
6 | for j in range(5):
7 | method_id = j+1
8 | print('------Rate %d Method:%d------'%(rate,method_id) )
9 | with open("Output.txt", "a") as text_file:
10 | text_file.write("|%d | %d | " % (rate, method_id))
11 | os.system('python attack.py --method_id %d --rate %d >> Output.txt'% (method_id, rate))
12 |
--------------------------------------------------------------------------------
/Cuisine-retrieval/.gitignore:
--------------------------------------------------------------------------------
1 | # Don't track content of these folders
2 | outputs/
3 | model/
4 | logs/
5 | __pycache__/
6 | core
7 |
8 | # Compiled source #
9 | ###################
10 | *.com
11 | *.class
12 | *.dll
13 | *.exe
14 | *.o
15 | *.so
16 | *.pyc
17 |
18 | # Packages #
19 | ############
20 | # it's better to unpack these files and commit the raw source
21 | # git has its own built in compression methods
22 | *.7z
23 | *.dmg
24 | *.gz
25 | *.iso
26 | *.jar
27 | *.rar
28 | *.tar
29 | *.zip
30 | *.mat
31 | *.jpg
32 | *.npy
33 | *.pt
34 | *.pth
35 |
--------------------------------------------------------------------------------
/Output_dense.txt:
--------------------------------------------------------------------------------
1 | |16 | 1 | 0.331948 | 0.496734 | 0.570071 | 0.236373 |
2 | |16 | 2 | 0.698040 | 0.852138 | 0.902910 | 0.530079 |
3 | |16 | 3 | 0.693290 | 0.840261 | 0.891627 | 0.521233 |
4 | |16 | 5 | 0.528800 | 0.717340 | 0.783848 | 0.377660 |
5 | |16 | 6 | 0.861342 | 0.935570 | 0.962886 | 0.699404 |
6 | |16 | 7 | 0.858373 | 0.938242 | 0.960808 | 0.697487 |
7 | |16 | 8 | 0.812055 | 0.913005 | 0.945368 | 0.648264 |
8 | |16 | 9 | 0.620843 | 0.791271 | 0.844121 | 0.450959 |
9 | # 100
10 | |16 | 6 | 0.710808 | 0.857779 | 0.903800 | 0.540351 |
11 | |16 | 7 | 0.682601 | 0.831651 | 0.882126 | 0.510608 |
12 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Don't track content of these folders
2 | __pycache__/
3 | attack_query/
4 | model/
5 | data/
6 | configs/cifs9a31
7 | core
8 | cub/images
9 | cub/CUB_200_2011/
10 | Food/
11 | UPA_weight/
12 | sma_defence/
13 | Cuisine-retrieval/Food-cropped/
14 | # Compiled source #
15 | ###################
16 | *.com
17 | *.class
18 | *.dll
19 | *.exe
20 | *.o
21 | *.so
22 | *.pyc
23 |
24 | # Packages #
25 | ############
26 | # it's better to unpack these files and commit the raw source
27 | # git has its own built in compression methods
28 | *.7z
29 | *.th
30 | *.dmg
31 | *.gz
32 | *.iso
33 | *.jar
34 | *.rar
35 | *.tar
36 | *.zip
37 | *.mat
38 | *.jpg
39 | *.jpeg
40 | *.npy
41 | *.pt
42 | *.pth
43 | *.json
44 |
--------------------------------------------------------------------------------
/visualize/vis_noise.py:
--------------------------------------------------------------------------------
1 | from PIL import Image
2 | import numpy
3 |
4 | image_path = "0001/0001_c1s1_001051_00.jpg"
5 |
6 | original_im = Image.open("/home/zzd/Market/pytorch/query/" + image_path)
7 | original_im = original_im.resize((128,256))
8 |
9 | attack_im = Image.open("../attack_query/pytorch/query/" + image_path)
10 |
11 | diff = numpy.array(original_im, dtype=float) - numpy.array(attack_im, dtype=float)
12 |
13 | # move to 128 for show
14 | diff += 128
15 | diff = Image.fromarray( numpy.uint8(diff))
16 |
17 | im_save = Image.new('RGB',(128*3, 256))
18 | im_save.paste( original_im, (0,0))
19 | im_save.paste( diff, (128,0))
20 | im_save.paste( attack_im, (256,0))
21 | im_save.save('vis_noise.jpg')
22 |
--------------------------------------------------------------------------------
/cifar/Output.txt:
--------------------------------------------------------------------------------
1 | |2 | 1 | 0.3160 | 0.8958
2 | |2 | 2 | 0.1550 | 0.8193
3 | |2 | 3 | 0.4387 | 0.9273
4 | |2 | 4 | 0.3945 | 0.9454
5 | |2 | 5 | 0.5700 | 0.8807
6 | |4 | 1 | 0.2394 | 0.8165
7 | |4 | 2 | 0.0498 | 0.5214
8 | |4 | 3 | 0.0841 | 0.8020
9 | |4 | 4 | 0.3327 | 0.9312
10 | |4 | 5 | 0.2758 | 0.5355
11 | |8 | 1 | 0.2016 | 0.7518
12 | |8 | 2 | 0.0474 | 0.2900
13 | |8 | 3 | 0.0013 | 0.5724
14 | |8 | 4 | 0.2666 | 0.8884
15 | |8 | 5 | 0.0181 | 0.1262
16 | |12 | 1 | 0.1709 | 0.7133
17 | |12 | 2 | 0.0474 | 0.2380
18 | |12 | 3 | 0.0003 | 0.5085
19 | |12 | 4 | 0.2291 | 0.8455
20 | |12 | 5 | 0.0019 | 0.0201
21 | |16 | 1 | 0.1495 | 0.6755
22 | |16 | 2 | 0.0474 | 0.2147
23 | |16 | 3 | 0.0003 | 0.4558
24 | |16 | 4 | 0.2086 | 0.8113
25 | |16 | 5 | 0.0006 | 0.0076
26 |
--------------------------------------------------------------------------------
/market.txt:
--------------------------------------------------------------------------------
1 | |2 | 1 | 81.83 | 92.07 | 95.01 | 62.33 |
2 | |2 | 2 | 81.06 | 91.63 | 94.80 | 61.76 |
3 | |2 | 3 | 83.94 | 93.08 | 95.49 | 63.93 |
4 | |2 | 4 | 86.19 | 94.24 | 96.14 | 67.00 |
5 | |2 | 5 | 82.54 | 93.02 | 95.40 | 63.93 |
6 | |4 | 1 | 66.98 | 82.66 | 87.50 | 47.26 |
7 | |4 | 2 | 61.25 | 77.88 | 84.29 | 43.03 |
8 | |4 | 3 | 69.63 | 84.26 | 88.90 | 49.17 |
9 | |4 | 4 | 83.49 | 93.82 | 95.64 | 63.69 |
10 | |4 | 5 | 53.56 | 71.64 | 78.36 | 38.24 |
11 | |8 | 1 | 38.75 | 57.24 | 64.85 | 25.16 |
12 | |8 | 2 | 28.06 | 45.87 | 54.42 | 18.60 |
13 | |8 | 3 | 29.90 | 45.78 | 53.86 | 17.93 |
14 | |8 | 4 | 77.49 | 90.47 | 93.82 | 55.22 |
15 | |8 | 5 | 11.02 | 20.78 | 27.58 | 7.84 |
16 | |12 | 1 | 19.39 | 33.94 | 40.91 | 12.53 |
17 | |12 | 2 | 16.24 | 30.88 | 38.57 | 11.08 |
18 | |12 | 3 | 11.61 | 21.11 | 27.76 | 6.84 |
19 | |12 | 4 | 71.97 | 86.85 | 90.56 | 48.11 |
20 | |12 | 5 | 2.64 | 5.88 | 8.28 | 2.03 |
21 | |16 | 1 | 8.49 | 17.67 | 22.39 | 5.74 |
22 | |16 | 2 | 10.93 | 23.96 | 31.53 | 7.79 |
23 | |16 | 3 | 4.93 | 10.51 | 14.31 | 3.11 |
24 | |16 | 4 | 66.03 | 83.46 | 88.27 | 42.56 |
25 | |16 | 5 | 0.62 | 1.90 | 2.88 | 0.72 |
26 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Zhedong Zheng
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/cub/prepare.py:
--------------------------------------------------------------------------------
1 | # mkdir train_all
2 | # mkdir test
3 | # mv 0* train_all
4 | # mv ./100.Brown_Pelican train_all
5 | # mv 1* test
6 | # mv ./200.Common_Yellowthroat/ test
7 | #
8 |
9 | #---------------------------------------
10 | #train_val
11 | import os
12 | from shutil import copyfile
13 |
14 | download_path = './CUB_200_2011'
15 | train_path = download_path + '/images/train_all'
16 | train_save_path = download_path + '/images/train'
17 | val_save_path = download_path + '/images/val'
18 | if not os.path.isdir(train_save_path):
19 | os.mkdir(train_save_path)
20 | os.mkdir(val_save_path)
21 |
22 | for r, subdirs,f in os.walk(train_path, topdown=True):
23 | for sub in subdirs:
24 | for root, dirs, files in os.walk(train_path+'/'+sub, topdown=True):
25 | for name in files:
26 | if not name[-3:]=='jpg':
27 | continue
28 | src_path = train_path + '/' + sub + '/' + name
29 | dst_path = train_save_path + '/' + sub
30 | if not os.path.isdir(dst_path):
31 | os.mkdir(dst_path)
32 | dst_path = val_save_path + '/'+ sub #first image is used as val image
33 | os.mkdir(dst_path)
34 | copyfile(src_path, dst_path + '/' + name)
35 |
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/cub/prepare_attack.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | from shutil import copyfile
4 |
5 | parser = argparse.ArgumentParser(description='Prepare')
6 | parser.add_argument('--method_id', default=3, type=int, help='1.fast || 2.least likely || 3.label smooth')
7 | parser.add_argument('--rate', default=2, type=int, help='attack rate')
8 |
9 | opt = parser.parse_args()
10 |
11 | # You only need to change this line to your dataset download path
12 | download_path = './attack_query'
13 |
14 | if not os.path.isdir(download_path):
15 | print('please change the download_path')
16 |
17 | save_path = download_path + '/pytorch'
18 | if not os.path.isdir(save_path):
19 | os.mkdir(save_path)
20 | #-----------------------------------------
21 | #query
22 | query_path = download_path + '/ft_ResNet50-' + str(opt.method_id) + '/' + str(opt.rate)
23 | query_save_path = download_path + '/pytorch/query'
24 | if not os.path.isdir(query_save_path):
25 | os.mkdir(query_save_path)
26 |
27 | for root, dirs, files in os.walk(query_path, topdown=True):
28 | for name in files:
29 | if not name[-3:]=='jpg':
30 | continue
31 | ID = name.split('_')
32 | src_path = query_path + '/' + name
33 | dst_path = query_save_path + '/' + ID[0]
34 | if not os.path.isdir(dst_path):
35 | os.mkdir(dst_path)
36 | copyfile(src_path, dst_path + '/' + name)
37 |
38 |
--------------------------------------------------------------------------------
/experiment_67.py:
--------------------------------------------------------------------------------
1 | import os
2 | import evaluate_gpu
3 | import time
4 |
5 | os.environ['MKL_THREADING_LAYER'] = 'GNU'
6 |
7 | #test_rate = (2,4,8,12,16)
8 | test_rate = (16,12,8,4,2)
9 | for i in range(1):
10 | rate = test_rate[i]
11 | #for j in range(7):
12 | for j in [5, 6]:
13 | method_id = j+1
14 | if method_id ==4:
15 | continue
16 | print('------Rate %d Method:%d------'%(rate,method_id) )
17 | time_start=time.time()
18 | os.system('python3 generate_attack_query.py --lr 1e-4 --iter 100 --method_id %d --rate %d'% (method_id, rate))
19 | time_end=time.time()
20 | print('time cost',time_end-time_start,'s')
21 | os.system('python3 prepare_attack.py --method_id %d --rate %d'% (method_id, rate))
22 | if (i==0) and (j==0):
23 | os.system('python3 test_only_query.py --name baseline --test_all --test_dir ./attack_query/pytorch/')
24 | else:
25 | output_path = './attack_query/baseline-' + str(method_id) + '/' + str(rate) + '/'
26 | os.system('python3 test_only_query.py --name baseline --test_dir ./attack_query/pytorch/ --output_path %s'%output_path)
27 | result = evaluate_gpu.main()
28 | with open("Output_67.txt", "a") as text_file:
29 | text_file.write("|%d | %d | %f | %f | %f | %f |\n" % (rate, method_id,
30 | result[0],result[1], result[2], result[3]))
31 |
--------------------------------------------------------------------------------
/experiment_randomstart.py:
--------------------------------------------------------------------------------
1 | import os
2 | import evaluate_gpu
3 | import time
4 |
5 | os.environ['MKL_THREADING_LAYER'] = 'GNU'
6 |
7 | #test_rate = (2,4,8,12,16)
8 | test_rate = (16,12,8,4,2)
9 | for i in range(1):
10 | rate = test_rate[i]
11 | #for j in range(8):
12 | for j in range(7):
13 | method_id = j+1
14 | if method_id ==4:
15 | continue
16 | print('------Rate %d Method:%d------'%(rate,method_id) )
17 | time_start=time.time()
18 | os.system('python3 generate_attack_query.py --randomstart --method_id %d --rate %d'% (method_id, rate))
19 | time_end=time.time()
20 | print('time cost',time_end-time_start,'s')
21 | os.system('python3 prepare_attack.py --method_id %d --rate %d'% (method_id, rate))
22 | if (i==0) and (j==0):
23 | os.system('python3 test_only_query.py --name baseline --test_all --test_dir ./attack_query/pytorch/')
24 | else:
25 | output_path = './attack_query/baseline-' + str(method_id) + '/' + str(rate) + '/'
26 | os.system('python3 test_only_query.py --name baseline --test_dir ./attack_query/pytorch/ --output_path %s'%output_path)
27 | result = evaluate_gpu.main()
28 | with open("Output_RS.txt", "a") as text_file:
29 | text_file.write("|%d | %d | %f | %f | %f | %f |\n" % (rate, method_id,
30 | result[0],result[1], result[2], result[3]))
31 |
--------------------------------------------------------------------------------
/experiment_PCB.py:
--------------------------------------------------------------------------------
1 | import os
2 | import evaluate_gpu
3 | import time
4 |
5 | os.environ['MKL_THREADING_LAYER'] = 'GNU'
6 |
7 | test_rate = (2,4,8,12,16)
8 | #test_rate = (16,12,8,4,2)
9 | for i in range(5):
10 | #for i in [-1]:
11 | rate = test_rate[i]
12 | for j in [-1, 0 ,1,2,3,4,5,6,7,8]:
13 | #for j in [4]:
14 | method_id = j+1
15 | if method_id ==4:
16 | continue
17 | print('------Rate %d Method:%d------'%(rate,method_id) )
18 | time_start=time.time()
19 | os.system('python3 generate_attack_query_PCB.py --method_id %d --rate %d'% (method_id, rate))
20 | time_end=time.time()
21 | print('time cost',time_end-time_start,'s')
22 | os.system('python3 prepare_attack.py --name PCB --method_id %d --rate %d'% (method_id, rate))
23 | if (i==0) and (j==0):
24 | os.system('python3 test_only_query_PCB.py --name PCB --test_all --test_dir ./attack_query/pytorch/')
25 | else:
26 | output_path = './attack_query/PCB-' + str(method_id) + '/' + str(rate) + '/'
27 | os.system('python3 test_only_query_PCB.py --name PCB --test_dir ./attack_query/pytorch/ --output_path %s'%output_path)
28 | result = evaluate_gpu.main()
29 | with open("Output_PCB.txt", "a") as text_file:
30 | text_file.write("|%d | %d | %f | %f | %f | %f |\n" % (rate, method_id,
31 | result[0],result[1], result[2], result[3]))
32 |
--------------------------------------------------------------------------------
/experiment.py:
--------------------------------------------------------------------------------
1 | import os
2 | import evaluate_gpu
3 | import time
4 |
5 | os.environ['MKL_THREADING_LAYER'] = 'GNU'
6 |
7 | test_rate = (2,4,8,12,16)
8 | #test_rate = (16,12,8,4,2)
9 | #for i in range(5):
10 | for i in [-1]:
11 | rate = test_rate[i]
12 | #for j in range(7):
13 | for j in [2,4,8]:
14 | method_id = j+1
15 | if method_id ==4:
16 | continue
17 | print('------Rate %d Method:%d------'%(rate,method_id) )
18 | time_start=time.time()
19 | os.system('python3 generate_attack_query.py --iter 100 --method_id %d --rate %d'% (method_id, rate))
20 | time_end=time.time()
21 | print('time cost',time_end-time_start,'s')
22 | os.system('python3 prepare_attack.py --name baseline --method_id %d --rate %d'% (method_id, rate))
23 | if (i==0) and (j==0):
24 | os.system('python3 test_only_query.py --name baseline --test_all --test_dir ./attack_query/pytorch/')
25 | else:
26 | output_path = './attack_query/baseline-' + str(method_id) + '/' + str(rate) + '/'
27 | os.system('python3 test_only_query.py --name baseline --test_dir ./attack_query/pytorch/ --output_path %s'%output_path)
28 | result = evaluate_gpu.main()
29 | continue
30 | with open("Output.txt", "a") as text_file:
31 | text_file.write("|%d | %d | %f | %f | %f | %f |\n" % (rate, method_id,
32 | result[0],result[1], result[2], result[3]))
33 |
--------------------------------------------------------------------------------
/prepare_attack.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | from shutil import copyfile
4 |
5 | parser = argparse.ArgumentParser(description='Prepare')
6 | parser.add_argument('--method_id', default=3, type=int, help='1.fast || 2.least likely || 3.label smooth')
7 | parser.add_argument('--rate', default=2, type=int, help='attack rate')
8 | parser.add_argument('--name', default='baseline', type=str, help='save model path')
9 |
10 | opt = parser.parse_args()
11 |
12 | # You only need to change this line to your dataset download path
13 | download_path = './attack_query'
14 |
15 | if not os.path.isdir(download_path):
16 | print('please change the download_path')
17 |
18 | save_path = download_path + '/pytorch'
19 | if not os.path.isdir(save_path):
20 | os.mkdir(save_path)
21 | #-----------------------------------------
22 | #query
23 | query_path = download_path + '/%s-'%opt.name + str(opt.method_id) + '/' + str(opt.rate)
24 | query_save_path = download_path + '/pytorch/query'
25 | if not os.path.isdir(query_save_path):
26 | os.mkdir(query_save_path)
27 |
28 | for root, dirs, files in os.walk(query_path, topdown=True):
29 | for name in files:
30 | if not name[-3:]=='jpg':
31 | continue
32 | ID = name.split('_')
33 | src_path = query_path + '/' + name
34 | dst_path = query_save_path + '/' + ID[0]
35 | if not os.path.isdir(dst_path):
36 | os.mkdir(dst_path)
37 | copyfile(src_path, dst_path + '/' + name)
38 |
39 |
--------------------------------------------------------------------------------
/experiment_adv.py:
--------------------------------------------------------------------------------
1 | import os
2 | import evaluate_gpu
3 | import time
4 |
5 | os.environ['MKL_THREADING_LAYER'] = 'GNU'
6 |
7 | test_rate = (2,4,8,12,16)
8 | #test_rate = (16,12,8,4,2)
9 | for i in range(5):
10 | #for i in [-1]:
11 | rate = test_rate[i]
12 | #for j in range(9):
13 | for j in [5,6,8]:
14 | method_id = j+1
15 | if method_id ==4:
16 | continue
17 | print('------Rate %d Method:%d------'%(rate,method_id) )
18 | time_start=time.time()
19 | os.system('python3 generate_attack_query.py --name adv0.1_40_w10 --method_id %d --rate %d'% (method_id, rate))
20 | time_end=time.time()
21 | print('time cost',time_end-time_start,'s')
22 | os.system('python3 prepare_attack.py --name adv0.1_40_w10 --method_id %d --rate %d'% (method_id, rate))
23 | if (i==0) and (j==0):
24 | os.system('python3 test_only_query.py --name adv0.1_40_w10 --test_all --test_dir ./attack_query/pytorch/')
25 | else:
26 | output_path = './attack_query/baseline-' + str(method_id) + '/' + str(rate) + '/'
27 | os.system('python3 test_only_query.py --name adv0.1_40_w10 --test_dir ./attack_query/pytorch/ --output_path %s'%output_path)
28 | result = evaluate_gpu.main()
29 | with open("Output_adv.txt", "a") as text_file:
30 | text_file.write("|%d | %d | %f | %f | %f | %f |\n" % (rate, method_id,
31 | result[0],result[1], result[2], result[3]))
32 |
--------------------------------------------------------------------------------
/experiment_dense.py:
--------------------------------------------------------------------------------
1 | import os
2 | import evaluate_gpu
3 | import time
4 | #test_rate = (2,4,8,12,16)
5 | test_rate = (16,12,8,4,2)
6 | for i in range(1):
7 | rate = test_rate[i]
8 | #for j in range(9):
9 | for j in [4, 5, 6]:
10 | method_id = j+1
11 | if method_id ==4:
12 | continue
13 | print('------Rate %d Method:%d------'%(rate,method_id) )
14 | #time_start=time.time()
15 | #os.system('python3 generate_attack_query.py --iter 100 --method_id %d --rate %d'% (method_id, rate))
16 | #time_end=time.time()
17 | #print('time cost',time_end-time_start,'s')
18 |
19 | # We still use Resnet-50 feature to attacj DenseNet Model
20 | os.system('python3 prepare_attack.py --name baseline --method_id %d --rate %d'% (method_id, rate))
21 | if (i==0) and (j==0):
22 | os.system('python3 test_only_query.py --name Dense --use_dense --test_all --test_dir ./attack_query/pytorch/')
23 | else:
24 | output_path = './attack_query/baseline-' + str(method_id) + '/' + str(rate) + '/'
25 | os.system('python3 test_only_query.py --name Dense --use_dense --test_dir ./attack_query/pytorch/ --output_path %s'%output_path)
26 | result = evaluate_gpu.main()
27 | with open("Output_dense.txt", "a") as text_file:
28 | text_file.write("|%d | %d | %f | %f | %f | %f |\n" % (rate, method_id,
29 | result[0],result[1], result[2], result[3]))
30 |
--------------------------------------------------------------------------------
/Cuisine-retrieval/experiment_RS.py:
--------------------------------------------------------------------------------
1 | import os
2 | import evaluate_gpu
3 |
4 | os.environ['MKL_THREADING_LAYER'] = 'GNU'
5 | #test_rate = (16, 12, 8, 4, 2)
6 | test_rate = (2,4,8,12,16)
7 |
8 | query_save_path = 'attack_query'
9 | if not os.path.isdir(query_save_path):
10 | os.mkdir(query_save_path)
11 |
12 | for i in [-1]:
13 | rate = test_rate[i]
14 | for j in range(7):
15 | method_id = 1+j #6+j #j+1
16 | if method_id == 4:
17 | continue
18 | print('------Rate %d Method:%d------'%(rate,method_id) )
19 | os.system('python generate_attack_query.py --randomstart --method_id %d --rate %d --gpu_ids 0'% (method_id, rate))
20 | os.system('python prepare_attack.py --method_id %d --rate %d'% (method_id, rate))
21 | if (i==0) and (j==0):
22 | # we need gallery feature
23 | os.system('python test_query.py --name ft_ResNet50_all --test_all --test_dir ./attack_query/pytorch/')
24 | else:
25 | output_path = './attack_query/ft_ResNet50_all-' + str(method_id) + '/' + str(rate) + '/'
26 | os.system('python test_query.py --name ft_ResNet50_all --test_dir %s --output_path %s --gpu_ids 0'%(output_path,output_path))
27 | result = evaluate_gpu.main(output_path)
28 | with open("Output_RS.txt", "a") as text_file:
29 | text_file.write("|%d | %d | %.2f | %.2f | %.2f | %.2f |\n" % (rate, method_id,
30 | result[0]*100,result[1]*100, result[2]*100, result[3]*100))
31 |
--------------------------------------------------------------------------------
/Cuisine-retrieval/experiment_67.py:
--------------------------------------------------------------------------------
1 | import os
2 | import evaluate_gpu
3 |
4 | os.environ['MKL_THREADING_LAYER'] = 'GNU'
5 | #test_rate = (16, 12, 8, 4, 2)
6 | test_rate = (2,4,8,12,16)
7 |
8 | query_save_path = 'attack_query'
9 | if not os.path.isdir(query_save_path):
10 | os.mkdir(query_save_path)
11 |
12 | for i in [-1]:
13 | rate = test_rate[i]
14 | for j in [5,6]:
15 | method_id = 1+j #6+j #j+1
16 | if method_id == 4:
17 | continue
18 | print('------Rate %d Method:%d------'%(rate,method_id) )
19 | os.system('python generate_attack_query.py --lr 1e-4 --iter 100 --method_id %d --rate %d --gpu_ids 0'% (method_id, rate))
20 | os.system('python prepare_attack.py --method_id %d --rate %d'% (method_id, rate))
21 | if (i==0) and (j==0):
22 | # we need gallery feature
23 | os.system('python test_query.py --name ft_ResNet50_all --test_all --test_dir ./attack_query/pytorch/')
24 | else:
25 | output_path = './attack_query/ft_ResNet50_all-' + str(method_id) + '/' + str(rate) + '/'
26 | os.system('python test_query.py --name ft_ResNet50_all --test_dir %s --output_path %s --gpu_ids 0'%(output_path,output_path))
27 | result = evaluate_gpu.main(output_path)
28 | with open("Output_67.txt", "a") as text_file:
29 | text_file.write("|%d | %d | %.2f | %.2f | %.2f | %.2f |\n" % (rate, method_id,
30 | result[0]*100,result[1]*100, result[2]*100, result[3]*100))
31 |
--------------------------------------------------------------------------------
/Cuisine-retrieval/experiment.py:
--------------------------------------------------------------------------------
1 | import os
2 | import evaluate_gpu
3 |
4 | os.environ['MKL_THREADING_LAYER'] = 'GNU'
5 | #test_rate = (16, 12, 8, 4, 2)
6 | test_rate = (2,4,8,12,16)
7 |
8 | query_save_path = 'attack_query'
9 | if not os.path.isdir(query_save_path):
10 | os.mkdir(query_save_path)
11 |
12 | #for i in range(5):
13 | for i in [-1]:
14 | rate = test_rate[i]
15 | #for j in range(8):
16 | for j in [8]:
17 | method_id = 1+j #6+j #j+1
18 | if method_id == 4:
19 | continue
20 | print('------Rate %d Method:%d------'%(rate,method_id) )
21 | os.system('python generate_attack_query.py --method_id %d --rate %d --gpu_ids 0'% (method_id, rate))
22 | os.system('python prepare_attack.py --method_id %d --rate %d'% (method_id, rate))
23 | #if (i==0) and (j==0):
24 | # we need gallery feature
25 | # os.system('python test_query.py --name ft_ResNet50_all --test_all --test_dir ./attack_query/pytorch/')
26 | #else:
27 | output_path = './attack_query/ft_ResNet50_all-' + str(method_id) + '/' + str(rate) + '/'
28 | os.system('python test_query.py --name ft_ResNet50_all --test_dir %s --output_path %s --gpu_ids 0'%(output_path,output_path))
29 | result = evaluate_gpu.main(output_path)
30 | with open("Output.txt", "a") as text_file:
31 | text_file.write("|%d | %d | %.2f | %.2f | %.2f | %.2f |\n" % (rate, method_id,
32 | result[0]*100,result[1]*100, result[2]*100, result[3]*100))
33 |
--------------------------------------------------------------------------------
/cub/experiment_67.py:
--------------------------------------------------------------------------------
1 | import os
2 | import evaluate_gpu
3 |
4 | os.environ['MKL_THREADING_LAYER'] = 'GNU'
5 |
6 | test_rate = (16,12,8,4,2)
7 |
8 | query_save_path = 'attack_query'
9 | if not os.path.isdir(query_save_path):
10 | os.mkdir(query_save_path)
11 |
12 | for i in [0]:
13 | rate = test_rate[i]
14 | for j in [5,6]:
15 | method_id = 1+j
16 | #method_id = j+1
17 | if method_id == 4:
18 | continue
19 | print('------Rate %d Method:%d------'%(rate,method_id) )
20 | os.system('python generate_attack_query.py --lr 1e-4 --iter 100 --name rerun_lr4 --method_id %d --rate %d --gpu_ids 0'% (method_id, rate))
21 | # CUB query is gallery so do not need prepare query.
22 | #os.system('python prepare_attack.py --method_id %d --rate %d'% (method_id, rate))
23 | output_path = './attack_query/rerun_lr4-' + str(method_id) + '/' + str(rate) + '/'
24 | #if (i==0) and (j==0):
25 | # we need gallery feature
26 | # os.system('python test_query.py --name rerun_lr4 --test_all --test_dir ./attack_query/pytorch/')
27 | #else:
28 | os.system('python test_query.py --name rerun_lr4 --test_dir %s --output_path %s --gpu_ids 0'%(output_path,output_path))
29 | result = evaluate_gpu.main(output_path)
30 | with open("Output678.txt", "a") as text_file:
31 | text_file.write("|%d | %d | %.2f | %.2f | %.2f | %.2f |\n" % (rate, method_id,
32 | result[0]*100,result[1]*100, result[2]*100, result[3]*100))
33 |
--------------------------------------------------------------------------------
/cub/experiment.py:
--------------------------------------------------------------------------------
1 | import os
2 | import evaluate_gpu
3 |
4 | os.environ['MKL_THREADING_LAYER'] = 'GNU'
5 |
6 | test_rate = (16,12,8,4,2)
7 | #test_rate = (2,4,8,12,16)
8 |
9 | query_save_path = 'attack_query'
10 | if not os.path.isdir(query_save_path):
11 | os.mkdir(query_save_path)
12 |
13 | #for i in range(5):
14 | for i in [0]:
15 | rate = test_rate[i]
16 | for j in [8]:
17 | #for j in range(5):
18 | method_id = 1+j
19 | #method_id = j+1
20 | if method_id == 4:
21 | continue
22 | print('------Rate %d Method:%d------'%(rate,method_id) )
23 | os.system('python generate_attack_query.py --name rerun_lr4 --method_id %d --rate %d --gpu_ids 0'% (method_id, rate))
24 | # CUB query is gallery so do not need prepare query.
25 | #os.system('python prepare_attack.py --method_id %d --rate %d'% (method_id, rate))
26 | output_path = './attack_query/rerun_lr4-' + str(method_id) + '/' + str(rate) + '/'
27 | #if (i==0) and (j==0):
28 | # we need gallery feature
29 | # os.system('python test_query.py --name rerun_lr4 --test_all --test_dir ./attack_query/pytorch/')
30 | #else:
31 | os.system('python test_query.py --name rerun_lr4 --test_dir %s --output_path %s --gpu_ids 0'%(output_path,output_path))
32 | result = evaluate_gpu.main(output_path)
33 | with open("Output.txt", "a") as text_file:
34 | text_file.write("|%d | %d | %.2f | %.2f | %.2f | %.2f |\n" % (rate, method_id,
35 | result[0]*100,result[1]*100, result[2]*100, result[3]*100))
36 |
--------------------------------------------------------------------------------
/cub/experiment_randomstart.py:
--------------------------------------------------------------------------------
1 | import os
2 | import evaluate_gpu
3 |
4 | os.environ['MKL_THREADING_LAYER'] = 'GNU'
5 |
6 | test_rate = (16,12,8,4,2)
7 | #test_rate = (2,4,8,12,16)
8 |
9 | query_save_path = 'attack_query'
10 | if not os.path.isdir(query_save_path):
11 | os.mkdir(query_save_path)
12 |
13 | for i in range(1):
14 | rate = test_rate[i]
15 | #for j in range(2):
16 | for j in [4]:
17 | #for j in range(7):
18 | method_id = 1+j
19 | #method_id = j+1
20 | if method_id == 4:
21 | continue
22 | print('------Rate %d Method:%d------'%(rate,method_id) )
23 | os.system('python generate_attack_query.py --name rerun_lr4 --method_id %d --rate %d --gpu_ids 0 --randomstart'% (method_id, rate))
24 | # CUB query is gallery so do not need prepare query.
25 | #os.system('python prepare_attack.py --method_id %d --rate %d'% (method_id, rate))
26 | output_path = './attack_query/rerun_lr4-' + str(method_id) + '/' + str(rate) + '/'
27 | #if (i==0) and (j==0):
28 | # we need gallery feature
29 | # os.system('python test_query.py --name rerun_lr4 --test_all --test_dir ./attack_query/pytorch/')
30 | #else:
31 | os.system('python test_query.py --name rerun_lr4 --test_dir %s --output_path %s --gpu_ids 0'%(output_path,output_path))
32 | result = evaluate_gpu.main(output_path)
33 | with open("Output_RS.txt", "a") as text_file:
34 | text_file.write("|%d | %d | %.2f | %.2f | %.2f | %.2f |\n" % (rate, method_id,
35 | result[0]*100,result[1]*100, result[2]*100, result[3]*100))
36 |
--------------------------------------------------------------------------------
/mnist/model.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch.nn.functional as F
3 | from torch.nn import init
4 |
5 | def weights_init_kaiming(m):
6 | classname = m.__class__.__name__
7 | # print(classname)
8 | if classname.find('Conv') != -1:
9 | init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
10 | elif classname.find('Linear') != -1:
11 | init.kaiming_normal(m.weight.data, a=0, mode='fan_out')
12 | init.constant(m.bias.data, 0.0)
13 | elif classname.find('BatchNorm1d') != -1:
14 | init.normal(m.weight.data, 1.0, 0.02)
15 | init.constant(m.bias.data, 0.0)
16 |
17 | def weights_init_classifier(m):
18 | classname = m.__class__.__name__
19 | if classname.find('Linear') != -1:
20 | init.normal(m.weight.data, std=0.001)
21 | #init.constant(m.bias.data, 0.0)
22 |
23 | # LeNet, I follow the practice in
24 | # https://github.com/vlfeat/matconvnet/blob/34742d978151d8809f7ab2d18bb48db23fb9bb47/examples/mnist/cnn_mnist_init.m
25 | class Net(nn.Module):
26 | def __init__(self):
27 | super(Net, self).__init__()
28 | self.conv1 = nn.Conv2d(1, 20, kernel_size=5)
29 | self.conv1.apply(weights_init_kaiming)
30 | self.conv2 = nn.Conv2d(20, 50, kernel_size=5)
31 | self.conv2.apply(weights_init_kaiming)
32 | self.conv3 = nn.Conv2d(50, 500, kernel_size=4)
33 | self.conv3.apply(weights_init_kaiming)
34 |
35 | self.fc1 = nn.Linear(500, 2)
36 | self.fc2 = nn.Linear(2, 10, bias=False)
37 | self.fc2.apply(weights_init_classifier)
38 |
39 | def forward(self, x):
40 | x = F.max_pool2d(self.conv1(x), 2)
41 | x = F.max_pool2d(self.conv2(x), 2)
42 | x = F.relu(self.conv3(x))
43 | x = x.view(-1, 500)
44 | x = self.fc1(x)
45 | x = self.fc2(x)
46 | return x
47 |
--------------------------------------------------------------------------------
/cifar/attack_result.py:
--------------------------------------------------------------------------------
1 | import os
2 | import matplotlib
3 | matplotlib.use('agg')
4 | import matplotlib.pyplot as plt
5 | from PIL import Image
6 |
7 | ######################################################################
8 | # Draw Curve
9 | #---------------------------
10 | x_epoch = [2,4,8,12,16]
11 |
12 | top1 = {}
13 | top5 = {}
14 |
15 | for i in range(6):
16 | top1[i] = []
17 | top5[i] = []
18 |
19 | top1[0] = [93.14, 93.14, 93.14, 93.14, 93.14]
20 | top5[0] = [99.76, 99.76, 99.76, 99.76, 99.76]
21 |
22 | with open("./Output.txt", "r") as f:
23 | for line in f:
24 | score = line.split('|')
25 | method_id = int(score[2])
26 | top1_acc, top5_acc = float(score[3]), float(score[4])
27 | top1[method_id].append(top1_acc*100)
28 | top5[method_id].append(top5_acc*100)
29 |
30 |
31 | fig = plt.figure(figsize=(10,4),dpi=180)
32 | ax0 = fig.add_subplot(121, ylabel="Top-1(%)", xlabel='epsilon')
33 | ax0.plot(x_epoch, top1[0], 'k-', label='Clean')
34 | ax0.plot(x_epoch, top1[1], 'b^-', label='Fast')
35 | ax0.plot(x_epoch, top1[2], 'rs-', label='Basic')
36 | ax0.plot(x_epoch, top1[3], 'gv-', label='Least-likely')
37 | #ax0.plot(x_epoch, top1[4], 'mo-', label='Label-smooth')
38 | ax0.plot(x_epoch, top1[5], 'yo-', label='Ours')
39 | ax0.grid(True)
40 | ax0.legend()
41 | plt.ylim(0,100)
42 | plt.xlim(1,17)
43 |
44 | ax0 = fig.add_subplot(122, ylabel="Top-5(%)", xlabel='epsilon')
45 | ax0.plot(x_epoch, top5[0], 'k-', label='Clean')
46 | ax0.plot(x_epoch, top5[1], 'b^-', label='Fast')
47 | ax0.plot(x_epoch, top5[2], 'rs-', label='Basic')
48 | ax0.plot(x_epoch, top5[3], 'gv-', label='Least-likely')
49 | #ax0.plot(x_epoch, top5[4], 'mo-', label='Label-smooth')
50 | ax0.plot(x_epoch, top5[5], 'yo-', label='Ours')
51 | ax0.grid(True)
52 | ax0.legend()
53 | plt.ylim(0,100)
54 | plt.xlim(1,17)
55 |
56 | fig.savefig( 'Cifar.jpg')
57 |
--------------------------------------------------------------------------------
/cub/Output.txt:
--------------------------------------------------------------------------------
1 | # |Epsilon | Method ID | Recall@1 | Recall@10 | mAP |
2 | # Method 1: Fast
3 | # Method 2: Basic
4 | # Method 3: Least-Likely
5 | # Method 5: Ours (ODFA)
6 | # Method 6: PIRE
7 | # Method 7: TMA
8 | # Method 8: UPA
9 | # Method 9: SMA
10 | |2 | 1 | 48.46 | 74.56 | 83.61 | 24.55 |
11 | |2 | 2 | 47.89 | 74.17 | 83.36 | 24.33 |
12 | |2 | 3 | 52.55 | 77.55 | 85.65 | 26.68 |
13 | |2 | 5 | 50.03 | 74.92 | 83.90 | 25.49 |
14 | |4 | 1 | 40.88 | 66.02 | 75.30 | 19.90 |
15 | |4 | 2 | 38.42 | 63.28 | 73.68 | 19.41 |
16 | |4 | 3 | 47.65 | 73.90 | 82.48 | 24.15 |
17 | |4 | 5 | 39.23 | 65.94 | 75.47 | 20.55 |
18 | |8 | 1 | 29.14 | 53.31 | 63.59 | 14.40 |
19 | |8 | 2 | 23.73 | 45.51 | 56.47 | 12.91 |
20 | |8 | 3 | 33.10 | 57.88 | 68.79 | 17.03 |
21 | |8 | 5 | 22.77 | 44.35 | 55.32 | 12.04 |
22 | |12 | 1 | 21.64 | 42.25 | 52.95 | 11.06 |
23 | |12 | 2 | 18.52 | 38.81 | 49.58 | 10.56 |
24 | |12 | 3 | 25.69 | 48.95 | 59.74 | 13.46 |
25 | |12 | 5 | 14.74 | 33.51 | 43.89 | 8.40 |
26 | |16 | 1 | 15.80 | 33.04 | 42.76 | 8.50 |
27 | |16 | 2 | 15.63 | 33.96 | 44.83 | 9.27 |
28 | |16 | 3 | 21.40 | 42.44 | 53.75 | 11.31 |
29 | |16 | 5 | 10.28 | 25.34 | 34.25 | 6.34 |
30 | |16 | 8 | 52.94 | 78.00 | 86.19 | 27.07 |
31 | |2 | 6 | 52.68 | 78.11 | 85.82 | 26.97 |
32 | |2 | 7 | 52.80 | 78.33 | 85.92 | 26.97 |
33 | |4 | 6 | 51.69 | 77.62 | 85.97 | 26.63 |
34 | |4 | 7 | 52.18 | 77.58 | 85.62 | 26.61 |
35 | |8 | 6 | 49.29 | 74.73 | 83.49 | 24.95 |
36 | |8 | 7 | 49.59 | 75.08 | 83.17 | 25.07 |
37 | |12 | 6 | 43.65 | 69.80 | 79.17 | 22.42 |
38 | |12 | 7 | 44.33 | 69.94 | 78.95 | 22.43 |
39 | |16 | 6 | 37.24 | 62.51 | 72.32 | 19.15 |
40 | |16 | 7 | 36.53 | 61.63 | 71.37 | 18.86 |
41 | |2 | 9 | 46.78 | 72.87 | 81.40 | 24.11 |
42 | |4 | 9 | 37.19 | 63.08 | 73.11 | 19.09 |
43 | |8 | 9 | 23.11 | 46.15 | 57.68 | 12.35 |
44 | |12 | 9 | 15.82 | 35.43 | 46.07 | 8.96 |
45 | |16 | 9 | 11.24 | 27.89 | 37.98 | 7.03 |
46 |
--------------------------------------------------------------------------------
/Cuisine-retrieval/Output.txt:
--------------------------------------------------------------------------------
1 | # |Epsilon | Method ID | Recall@1 | Recall@10 | mAP |
2 | # Method 1: Fast
3 | # Method 2: Basic
4 | # Method 3: Least-Likely
5 | # Method 5: Ours (ODFA)
6 | # Method 6: PIRE
7 | # Method 7: TMA
8 | # Method 8: UPA
9 | # Method 9: SMA
10 | |2 | 1 | 52.54 | 81.05 | 88.67 | 26.70 |
11 | |2 | 2 | 53.12 | 78.71 | 86.52 | 26.52 |
12 | |2 | 3 | 60.16 | 87.11 | 93.95 | 30.53 |
13 | |2 | 5 | 52.93 | 81.64 | 89.45 | 27.48 |
14 | |4 | 1 | 44.92 | 70.51 | 80.27 | 21.83 |
15 | |4 | 2 | 37.89 | 65.82 | 76.17 | 20.74 |
16 | |4 | 3 | 48.44 | 77.93 | 87.11 | 24.42 |
17 | |4 | 5 | 39.65 | 66.41 | 77.34 | 19.52 |
18 | |8 | 1 | 36.52 | 64.84 | 75.00 | 18.39 |
19 | |8 | 2 | 26.95 | 54.49 | 67.19 | 15.68 |
20 | |8 | 3 | 22.27 | 43.95 | 58.59 | 12.51 |
21 | |8 | 5 | 16.60 | 40.23 | 52.15 | 8.98 |
22 | |12 | 1 | 33.98 | 61.13 | 72.07 | 16.86 |
23 | |12 | 2 | 24.02 | 49.41 | 62.30 | 14.24 |
24 | |12 | 3 | 14.65 | 33.40 | 44.14 | 9.57 |
25 | |12 | 5 | 8.98 | 25.39 | 36.72 | 5.89 |
26 | |16 | 1 | 30.66 | 56.84 | 67.58 | 15.41 |
27 | |16 | 2 | 23.63 | 47.07 | 60.94 | 13.43 |
28 | |16 | 3 | 10.74 | 27.15 | 38.87 | 8.12 |
29 | |16 | 5 | 6.64 | 18.75 | 26.95 | 4.65 |
30 | |16 | 8 | 53.32 | 81.05 | 87.70 | 26.38 |
31 | |2 | 6 | 64.45 | 88.48 | 94.34 | 32.63 |
32 | |2 | 7 | 65.82 | 88.67 | 94.34 | 32.61 |
33 | |4 | 6 | 64.26 | 88.09 | 94.14 | 31.86 |
34 | |4 | 7 | 65.43 | 87.70 | 94.14 | 32.12 |
35 | |8 | 6 | 59.18 | 83.40 | 91.21 | 29.07 |
36 | |8 | 7 | 56.64 | 83.01 | 89.65 | 28.92 |
37 | |12 | 6 | 49.22 | 74.80 | 85.35 | 25.50 |
38 | |12 | 7 | 52.15 | 76.17 | 85.74 | 24.86 |
39 | |16 | 6 | 40.23 | 71.29 | 82.23 | 21.34 |
40 | |16 | 7 | 41.80 | 69.14 | 80.27 | 20.73 |
41 | |2 | 9 | 53.52 | 78.32 | 87.50 | 26.19 |
42 | |4 | 9 | 36.33 | 68.95 | 77.93 | 18.41 |
43 | |8 | 9 | 20.51 | 48.83 | 61.72 | 12.31 |
44 | |12 | 9 | 16.60 | 37.89 | 53.91 | 10.81 |
45 | |16 | 9 | 13.28 | 33.79 | 46.48 | 9.21 |
46 |
--------------------------------------------------------------------------------
/mnist/show_mnist.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | import torch.nn as nn
4 | import torch.nn.functional as F
5 | import torch.optim as optim
6 | from torchvision import datasets, transforms
7 | from model import Net
8 | import matplotlib
9 | matplotlib.use('agg')
10 | import matplotlib.pyplot as plt
11 | from model import Net
12 | import matplotlib.cm as cm
13 | import numpy as np
14 |
15 | colors = cm.rainbow(np.linspace(0, 1, 10))
16 | print(colors)
17 | ######################################################################
18 | # Load model
19 | #---------------------------
20 | def load_network(network):
21 | save_path = os.path.join('./model/best.pth')
22 | network.load_state_dict(torch.load(save_path))
23 | return network
24 |
25 |
26 | def test(model, test_loader):
27 | test_loss = 0
28 | correct = 0
29 | is_appear = np.zeros(10)
30 | with torch.no_grad():
31 | for data, target in test_loader:
32 | data = data.cuda()
33 | output = model(data)
34 | location = output.data.cpu()
35 | for i in range(data.size(0)):
36 | l = target[i].data.numpy()
37 | if is_appear[l]==0:
38 | is_appear[l] = 1
39 | ax.scatter( location[i, 0], location[i, 1], c=colors[l], s=10, label = l,
40 | alpha=0.7, edgecolors='none')
41 | else:
42 | ax.scatter( location[i, 0], location[i, 1], c=colors[l], s=10,
43 | alpha=0.7, edgecolors='none')
44 |
45 | test_loader = torch.utils.data.DataLoader(
46 | datasets.MNIST('../data', train=False, transform=transforms.Compose([
47 | transforms.ToTensor(),
48 | transforms.Normalize((0.1307,), (0.3081,))
49 | ])),
50 | batch_size=100, shuffle=False)
51 |
52 | model = Net()
53 | model = load_network(model)
54 | model.fc2 = nn.Sequential()
55 | model = model.eval()
56 | model = model.cuda()
57 |
58 | fig, ax = plt.subplots()
59 | test(model, test_loader)
60 | ax.grid(True)
61 | ax.legend(loc='best')
62 | fig.savefig('train.jpg')
63 |
64 |
--------------------------------------------------------------------------------
/cub/random_erasing.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from torchvision.transforms import *
4 |
5 | #from PIL import Image
6 | import random
7 | import math
8 | #import numpy as np
9 | #import torch
10 |
11 | class RandomErasing(object):
12 | """ Randomly selects a rectangle region in an image and erases its pixels.
13 | 'Random Erasing Data Augmentation' by Zhong et al.
14 | See https://arxiv.org/pdf/1708.04896.pdf
15 | Args:
16 | probability: The probability that the Random Erasing operation will be performed.
17 | sl: Minimum proportion of erased area against input image.
18 | sh: Maximum proportion of erased area against input image.
19 | r1: Minimum aspect ratio of erased area.
20 | mean: Erasing value.
21 | """
22 |
23 | def __init__(self, probability = 0.5, sl = 0.02, sh = 0.4, r1 = 0.3, mean=[0.4914, 0.4822, 0.4465]):
24 | self.probability = probability
25 | self.mean = mean
26 | self.sl = sl
27 | self.sh = sh
28 | self.r1 = r1
29 |
30 | def __call__(self, img):
31 |
32 | if random.uniform(0, 1) > self.probability:
33 | return img
34 |
35 | for attempt in range(100):
36 | area = img.size()[1] * img.size()[2]
37 |
38 | target_area = random.uniform(self.sl, self.sh) * area
39 | aspect_ratio = random.uniform(self.r1, 1/self.r1)
40 |
41 | h = int(round(math.sqrt(target_area * aspect_ratio)))
42 | w = int(round(math.sqrt(target_area / aspect_ratio)))
43 |
44 | if w < img.size()[2] and h < img.size()[1]:
45 | x1 = random.randint(0, img.size()[1] - h)
46 | y1 = random.randint(0, img.size()[2] - w)
47 | if img.size()[0] == 3:
48 | img[0, x1:x1+h, y1:y1+w] = self.mean[0]
49 | img[1, x1:x1+h, y1:y1+w] = self.mean[1]
50 | img[2, x1:x1+h, y1:y1+w] = self.mean[2]
51 | else:
52 | img[0, x1:x1+h, y1:y1+w] = self.mean[0]
53 | return img
54 |
55 | return img
56 |
--------------------------------------------------------------------------------
/Cuisine-retrieval/random_erasing.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from torchvision.transforms import *
4 |
5 | #from PIL import Image
6 | import random
7 | import math
8 | #import numpy as np
9 | #import torch
10 |
11 | class RandomErasing(object):
12 | """ Randomly selects a rectangle region in an image and erases its pixels.
13 | 'Random Erasing Data Augmentation' by Zhong et al.
14 | See https://arxiv.org/pdf/1708.04896.pdf
15 | Args:
16 | probability: The probability that the Random Erasing operation will be performed.
17 | sl: Minimum proportion of erased area against input image.
18 | sh: Maximum proportion of erased area against input image.
19 | r1: Minimum aspect ratio of erased area.
20 | mean: Erasing value.
21 | """
22 |
23 | def __init__(self, probability = 0.5, sl = 0.02, sh = 0.4, r1 = 0.3, mean=[0.4914, 0.4822, 0.4465]):
24 | self.probability = probability
25 | self.mean = mean
26 | self.sl = sl
27 | self.sh = sh
28 | self.r1 = r1
29 |
30 | def __call__(self, img):
31 |
32 | if random.uniform(0, 1) > self.probability:
33 | return img
34 |
35 | for attempt in range(100):
36 | area = img.size()[1] * img.size()[2]
37 |
38 | target_area = random.uniform(self.sl, self.sh) * area
39 | aspect_ratio = random.uniform(self.r1, 1/self.r1)
40 |
41 | h = int(round(math.sqrt(target_area * aspect_ratio)))
42 | w = int(round(math.sqrt(target_area / aspect_ratio)))
43 |
44 | if w < img.size()[2] and h < img.size()[1]:
45 | x1 = random.randint(0, img.size()[1] - h)
46 | y1 = random.randint(0, img.size()[2] - w)
47 | if img.size()[0] == 3:
48 | img[0, x1:x1+h, y1:y1+w] = self.mean[0]
49 | img[1, x1:x1+h, y1:y1+w] = self.mean[1]
50 | img[2, x1:x1+h, y1:y1+w] = self.mean[2]
51 | else:
52 | img[0, x1:x1+h, y1:y1+w] = self.mean[0]
53 | return img
54 |
55 | return img
56 |
--------------------------------------------------------------------------------
/Output_PCB.txt:
--------------------------------------------------------------------------------
1 | |2 | 1 | 0.894596 | 0.955760 | 0.970309 | 0.728997 |
2 | |2 | 2 | 0.892518 | 0.956651 | 0.970903 | 0.727571 |
3 | |2 | 3 | 0.915974 | 0.968230 | 0.977435 | 0.756809 |
4 | |2 | 5 | 0.904394 | 0.961995 | 0.975356 | 0.739829 |
5 | |2 | 6 | 0.923694 | 0.973872 | 0.983076 | 0.775714 |
6 | |2 | 7 | 0.925772 | 0.973575 | 0.982482 | 0.775888 |
7 | |2 | 8 | 0.925772 | 0.973575 | 0.982482 | 0.775888 |
8 | |2 | 9 | 0.871734 | 0.947743 | 0.968230 | 0.696440 |
9 | |4 | 1 | 0.838183 | 0.920428 | 0.944774 | 0.648226 |
10 | |4 | 2 | 0.815915 | 0.914192 | 0.942399 | 0.632878 |
11 | |4 | 3 | 0.868468 | 0.946853 | 0.964074 | 0.696932 |
12 | |4 | 5 | 0.726841 | 0.863124 | 0.905582 | 0.543279 |
13 | |4 | 6 | 0.923397 | 0.970606 | 0.981295 | 0.770800 |
14 | |4 | 7 | 0.920724 | 0.971200 | 0.980701 | 0.770377 |
15 | |4 | 8 | 0.920724 | 0.971200 | 0.980701 | 0.770377 |
16 | |4 | 9 | 0.720903 | 0.852138 | 0.898159 | 0.523028 |
17 | |8 | 1 | 0.716746 | 0.860154 | 0.899941 | 0.524257 |
18 | |8 | 2 | 0.648456 | 0.804632 | 0.855404 | 0.461468 |
19 | |8 | 3 | 0.671318 | 0.803444 | 0.848872 | 0.483529 |
20 | |8 | 5 | 0.197447 | 0.335511 | 0.404691 | 0.129179 |
21 | |8 | 6 | 0.901128 | 0.960214 | 0.973278 | 0.739736 |
22 | |8 | 7 | 0.903504 | 0.962589 | 0.974762 | 0.738996 |
23 | |8 | 8 | 0.903504 | 0.962589 | 0.974762 | 0.738996 |
24 | |8 | 9 | 0.418052 | 0.616093 | 0.703682 | 0.286958 |
25 | |12 | 1 | 0.586698 | 0.749109 | 0.812945 | 0.409697 |
26 | |12 | 2 | 0.559085 | 0.728622 | 0.791271 | 0.380815 |
27 | |12 | 3 | 0.459323 | 0.616093 | 0.686758 | 0.311468 |
28 | |12 | 5 | 0.046615 | 0.101544 | 0.144299 | 0.032762 |
29 | |12 | 6 | 0.865796 | 0.944774 | 0.964371 | 0.684781 |
30 | |12 | 7 | 0.858373 | 0.943587 | 0.963777 | 0.678520 |
31 | |12 | 8 | 0.858373 | 0.943587 | 0.963777 | 0.678520 |
32 | |12 | 9 | 0.285036 | 0.468230 | 0.562945 | 0.190001 |
33 | |16 | 1 | 0.421912 | 0.585808 | 0.663302 | 0.280624 |
34 | |16 | 2 | 0.495546 | 0.676366 | 0.743171 | 0.328371 |
35 | |16 | 3 | 0.294537 | 0.439133 | 0.510392 | 0.196915 |
36 | |16 | 5 | 0.013361 | 0.033254 | 0.054335 | 0.011060 |
37 | |16 | 6 | 0.791568 | 0.906176 | 0.930523 | 0.605099 |
38 | |16 | 7 | 0.774347 | 0.888361 | 0.920724 | 0.589096 |
39 | |16 | 8 | 0.774347 | 0.888361 | 0.920724 | 0.589095 |
40 | |16 | 9 | 0.200713 | 0.359264 | 0.451010 | 0.135304 |
41 |
--------------------------------------------------------------------------------
/Output_adv.txt:
--------------------------------------------------------------------------------
1 | |2 | 1 | 0.838777 | 0.931710 | 0.954276 | 0.651332 |
2 | |2 | 2 | 0.833135 | 0.927553 | 0.953088 | 0.646605 |
3 | |2 | 3 | 0.870249 | 0.941211 | 0.962886 | 0.677646 |
4 | |2 | 5 | 0.845309 | 0.932601 | 0.957245 | 0.654169 |
5 | |2 | 8 | 0.882126 | 0.950416 | 0.969121 | 0.696366 |
6 | |4 | 1 | 0.738124 | 0.864608 | 0.906176 | 0.552088 |
7 | |4 | 2 | 0.675178 | 0.818290 | 0.868468 | 0.496638 |
8 | |4 | 3 | 0.791865 | 0.908254 | 0.941805 | 0.606824 |
9 | |4 | 5 | 0.644596 | 0.795428 | 0.852138 | 0.469663 |
10 | |4 | 8 | 0.881532 | 0.951603 | 0.969418 | 0.695019 |
11 | |8 | 1 | 0.556116 | 0.721793 | 0.783848 | 0.389164 |
12 | |8 | 2 | 0.344418 | 0.514846 | 0.603622 | 0.239376 |
13 | |8 | 3 | 0.504157 | 0.670131 | 0.746734 | 0.370818 |
14 | |8 | 5 | 0.183195 | 0.307601 | 0.375891 | 0.137708 |
15 | |8 | 8 | 0.878266 | 0.950416 | 0.967340 | 0.689338 |
16 | |12 | 1 | 0.407066 | 0.581057 | 0.656473 | 0.280693 |
17 | |12 | 2 | 0.193884 | 0.343527 | 0.423694 | 0.134703 |
18 | |12 | 3 | 0.259501 | 0.404691 | 0.475950 | 0.193152 |
19 | |12 | 5 | 0.033848 | 0.076603 | 0.109561 | 0.031662 |
20 | |12 | 8 | 0.869656 | 0.945962 | 0.964667 | 0.678367 |
21 | |16 | 1 | 0.282957 | 0.449822 | 0.537411 | 0.198810 |
22 | |16 | 2 | 0.125594 | 0.243171 | 0.319477 | 0.088529 |
23 | |16 | 3 | 0.119359 | 0.210214 | 0.262470 | 0.092143 |
24 | |16 | 5 | 0.005048 | 0.014252 | 0.025534 | 0.007343 |
25 | |16 | 8 | 0.855107 | 0.937648 | 0.961698 | 0.663618 |
26 | |2 | 6 | 0.880938 | 0.949228 | 0.968527 | 0.695956 |
27 | |2 | 7 | 0.881235 | 0.948634 | 0.969121 | 0.695815 |
28 | |2 | 9 | 0.808195 | 0.921318 | 0.946853 | 0.626600 |
29 | |4 | 6 | 0.879454 | 0.950416 | 0.968824 | 0.693022 |
30 | |4 | 7 | 0.883610 | 0.949228 | 0.967637 | 0.693278 |
31 | |4 | 9 | 0.613124 | 0.760392 | 0.821259 | 0.440069 |
32 | |8 | 6 | 0.861342 | 0.943587 | 0.961698 | 0.671660 |
33 | |8 | 7 | 0.862233 | 0.944774 | 0.964371 | 0.674681 |
34 | |8 | 9 | 0.220309 | 0.375594 | 0.459026 | 0.161302 |
35 | |12 | 6 | 0.825713 | 0.926366 | 0.951603 | 0.640444 |
36 | |12 | 7 | 0.826306 | 0.927257 | 0.951010 | 0.637332 |
37 | |12 | 9 | 0.080166 | 0.174287 | 0.233373 | 0.066659 |
38 | |16 | 6 | 0.778800 | 0.898159 | 0.932601 | 0.589108 |
39 | |16 | 7 | 0.774347 | 0.894893 | 0.929335 | 0.589405 |
40 | |16 | 9 | 0.035629 | 0.084323 | 0.120249 | 0.030606 |
41 |
--------------------------------------------------------------------------------
/Output.txt:
--------------------------------------------------------------------------------
1 | # |Epsilon | Method ID | Recall@1 | Recall@10 | mAP |
2 | # Method 1: Fast
3 | # Method 2: Basic
4 | # Method 3: Least-Likely
5 | # Method 5: Ours (ODFA)
6 | # Method 6: PIRE
7 | # Method 7: TMA
8 | # Method 8: UPA
9 | # Method 9: SMA
10 | |2 | 1 | 0.809382 | 0.910629 | 0.945071 | 0.626798 |
11 | |2 | 2 | 0.801960 | 0.907957 | 0.943290 | 0.618614 |
12 | |2 | 3 | 0.852138 | 0.940321 | 0.959917 | 0.679451 |
13 | |2 | 5 | 0.819774 | 0.917755 | 0.945962 | 0.639764 |
14 | |4 | 1 | 0.654691 | 0.801366 | 0.863420 | 0.474824 |
15 | |4 | 2 | 0.578979 | 0.757126 | 0.822743 | 0.415919 |
16 | |4 | 3 | 0.729810 | 0.866686 | 0.901425 | 0.556186 |
17 | |4 | 5 | 0.489014 | 0.665974 | 0.733373 | 0.356251 |
18 | |8 | 1 | 0.434976 | 0.607185 | 0.681710 | 0.292631 |
19 | |8 | 2 | 0.237233 | 0.421615 | 0.516330 | 0.164664 |
20 | |8 | 3 | 0.295724 | 0.452197 | 0.521675 | 0.215465 |
21 | |8 | 5 | 0.058195 | 0.113717 | 0.163005 | 0.040746 |
22 | |12 | 1 | 0.270487 | 0.433195 | 0.514252 | 0.183044 |
23 | |12 | 2 | 0.143112 | 0.286223 | 0.376781 | 0.098762 |
24 | |12 | 3 | 0.115499 | 0.199822 | 0.252672 | 0.084144 |
25 | |12 | 5 | 0.006532 | 0.017815 | 0.030285 | 0.006695 |
26 | |16 | 1 | 0.153504 | 0.258610 | 0.324525 | 0.105091 |
27 | |16 | 2 | 0.098872 | 0.214074 | 0.295428 | 0.070550 |
28 | |16 | 3 | 0.041568 | 0.087886 | 0.117577 | 0.034564 |
29 | |16 | 5 | 0.001485 | 0.004751 | 0.009798 | 0.002168 |
30 | |16 | 8 | 0.699822 | 0.841746 | 0.878860 | 0.539010 |
31 | |2 | 6 | 0.881235 | 0.954869 | 0.969418 | 0.709923 |
32 | |2 | 7 | 0.882126 | 0.953088 | 0.968824 | 0.708975 |
33 | |4 | 6 | 0.876188 | 0.949822 | 0.966746 | 0.702975 |
34 | |4 | 7 | 0.873812 | 0.952197 | 0.967043 | 0.703992 |
35 | |8 | 6 | 0.836698 | 0.935273 | 0.956354 | 0.666503 |
36 | |8 | 7 | 0.840855 | 0.931413 | 0.956948 | 0.666135 |
37 | |12 | 6 | 0.763955 | 0.892221 | 0.926069 | 0.590722 |
38 | |12 | 7 | 0.769893 | 0.884204 | 0.919240 | 0.589691 |
39 | |16 | 6 | 0.653800 | 0.803147 | 0.857482 | 0.486963 |
40 | |16 | 7 | 0.648159 | 0.801366 | 0.850950 | 0.479632 |
41 | |2 | 9 | 0.758017 | 0.883610 | 0.921021 | 0.582969 |
42 | |4 | 9 | 0.455463 | 0.648456 | 0.719715 | 0.318517 |
43 | |8 | 9 | 0.126485 | 0.251485 | 0.326010 | 0.086431 |
44 | |12 | 9 | 0.056413 | 0.128266 | 0.180819 | 0.039148 |
45 | |16 | 9 | 0.028207 | 0.070962 | 0.098575 | 0.020516 |
46 |
--------------------------------------------------------------------------------
/market.py:
--------------------------------------------------------------------------------
1 | import os
2 | import matplotlib
3 | matplotlib.use('agg')
4 | import matplotlib.pyplot as plt
5 | from PIL import Image
6 |
7 | ######################################################################
8 | # Draw Curve
9 | #---------------------------
10 | x_epoch = [2,4,8,12,16]
11 |
12 | top1 = {}
13 | top10 = {}
14 | mAP = {}
15 |
16 | for i in range(6):
17 | top1[i] = []
18 | top10[i] = []
19 | mAP[i] = []
20 |
21 | top1[0] = [92.70, 92.70, 92.70, 92.70, 92.70]
22 | top10[0] =[97.98, 97.98, 97.98, 97.98, 97.98]
23 | mAP[0] = [77.14, 77.14, 77.14, 77.14, 77.14]
24 |
25 | with open("./Output.txt", "r") as f:
26 | for line in f:
27 | score = line.split('|')
28 | method_id = int(score[2])
29 | top1_acc, top10_acc, mAP_acc = float(score[3]), float(score[5]), float(score[6])
30 | top1[method_id].append(top1_acc*100)
31 | top10[method_id].append(top10_acc*100)
32 | mAP[method_id].append(mAP_acc*100)
33 |
34 | fig = plt.figure(figsize=(15,4), dpi=180)
35 | ax0 = fig.add_subplot(131, ylabel="Rank-1(%)",xlabel='epsilon')
36 | ax0.plot(x_epoch, top1[0], 'k-', label='Clean')
37 | ax0.plot(x_epoch, top1[1], 'b^-', label='Fast')
38 | ax0.plot(x_epoch, top1[2], 'rs-', label='Basic')
39 | ax0.plot(x_epoch, top1[3], 'gv-', label='Least-likely')
40 | ax0.plot(x_epoch, top1[5], 'yo-', label='Ours')
41 | ax0.grid(True)
42 | ax0.legend()
43 | plt.ylim(0.0,100.0)
44 | plt.xlim(1,17)
45 |
46 | ax0 = fig.add_subplot(132, ylabel="Rank-10(%)",xlabel='epsilon')
47 | ax0.plot(x_epoch, top10[0], 'k-', label='Clean')
48 | ax0.plot(x_epoch, top10[1], 'b^-', label='Fast')
49 | ax0.plot(x_epoch, top10[2], 'rs-', label='Basic')
50 | ax0.plot(x_epoch, top10[3], 'gv-', label='Least-likely')
51 | ax0.plot(x_epoch, top10[5], 'yo-', label='Ours')
52 | ax0.grid(True)
53 | ax0.legend()
54 | plt.ylim(0,100)
55 | plt.xlim(1,17)
56 |
57 | ax0 = fig.add_subplot(133, ylabel="mAP(%)", xlabel='epsilon')
58 | ax0.plot(x_epoch, mAP[0], 'k-', label='Clean')
59 | ax0.plot(x_epoch, mAP[1], 'b^-', label='Fast')
60 | ax0.plot(x_epoch, mAP[2], 'rs-', label='Basic')
61 | ax0.plot(x_epoch, mAP[3], 'gv-', label='Least-likely')
62 | ax0.plot(x_epoch, mAP[5], 'yo-', label='Ours')
63 | ax0.grid(True)
64 | ax0.legend()
65 | plt.ylim(0,100)
66 | plt.xlim(1,17)
67 |
68 | fig.savefig('market.jpg')
69 |
--------------------------------------------------------------------------------
/cub/random_erasing_plus.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from torchvision.transforms import *
4 |
5 | #from PIL import Image
6 | import random
7 | import math
8 | #import numpy as np
9 | #import torch
10 |
11 | def erasing(img, sl = 0.02, sh = 0.4, r1 = 0.3, mean=[0.4914, 0.4822, 0.4465]):
12 | for attempt in range(100):
13 | area = img.size()[1] * img.size()[2]
14 |
15 | target_area = random.uniform(sl, sh) * area
16 | aspect_ratio = random.uniform(r1, 1/r1)
17 |
18 | h = int(round(math.sqrt(target_area * aspect_ratio)))
19 | w = int(round(math.sqrt(target_area / aspect_ratio)))
20 |
21 | if w < img.size()[2] and h < img.size()[1]:
22 | x1 = random.randint(0, img.size()[1] - h)
23 | y1 = random.randint(0, img.size()[2] - w)
24 | if img.size()[0] == 3:
25 | img[0, x1:x1+h, y1:y1+w] = mean[0]
26 | img[1, x1:x1+h, y1:y1+w] = mean[1]
27 | img[2, x1:x1+h, y1:y1+w] = mean[2]
28 | else:
29 | img[0, x1:x1+h, y1:y1+w] = mean[0]
30 | return img
31 | return img
32 |
33 | class RandomErasingPlus(object):
34 | """ Randomly selects a rectangle region in an image and erases its pixels.
35 | 'Random Erasing Data Augmentation' by Zhong et al.
36 | See https://arxiv.org/pdf/1708.04896.pdf
37 | Args:
38 | probability: The probability that the Random Erasing operation will be performed.
39 | sl: Minimum proportion of erased area against input image.
40 | sh: Maximum proportion of erased area against input image.
41 | r1: Minimum aspect ratio of erased area.
42 | mean: Erasing value.
43 | """
44 |
45 | def __init__(self, probability = 0.5, sl = 0.02, sh = 0.4, r1 = 0.3, mean=[0.4914, 0.4822, 0.4465]):
46 | self.probability = probability
47 | self.mean = mean
48 | self.sl = sl
49 | self.sh = sh
50 | self.r1 = r1
51 |
52 | def __call__(self, img):
53 |
54 | if random.uniform(0, 1) > self.probability:
55 | return img
56 |
57 | for i in range(5):
58 | img = erasing(img)
59 | if random.uniform(0, 1) > self.probability:
60 | return img
61 |
62 | return img
63 |
64 |
--------------------------------------------------------------------------------
/Cuisine-retrieval/random_erasing_plus.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | from torchvision.transforms import *
4 |
5 | #from PIL import Image
6 | import random
7 | import math
8 | #import numpy as np
9 | #import torch
10 |
11 | def erasing(img, sl = 0.02, sh = 0.4, r1 = 0.3, mean=[0.4914, 0.4822, 0.4465]):
12 | for attempt in range(100):
13 | area = img.size()[1] * img.size()[2]
14 |
15 | target_area = random.uniform(sl, sh) * area
16 | aspect_ratio = random.uniform(r1, 1/r1)
17 |
18 | h = int(round(math.sqrt(target_area * aspect_ratio)))
19 | w = int(round(math.sqrt(target_area / aspect_ratio)))
20 |
21 | if w < img.size()[2] and h < img.size()[1]:
22 | x1 = random.randint(0, img.size()[1] - h)
23 | y1 = random.randint(0, img.size()[2] - w)
24 | if img.size()[0] == 3:
25 | img[0, x1:x1+h, y1:y1+w] = mean[0]
26 | img[1, x1:x1+h, y1:y1+w] = mean[1]
27 | img[2, x1:x1+h, y1:y1+w] = mean[2]
28 | else:
29 | img[0, x1:x1+h, y1:y1+w] = mean[0]
30 | return img
31 | return img
32 |
33 | class RandomErasingPlus(object):
34 | """ Randomly selects a rectangle region in an image and erases its pixels.
35 | 'Random Erasing Data Augmentation' by Zhong et al.
36 | See https://arxiv.org/pdf/1708.04896.pdf
37 | Args:
38 | probability: The probability that the Random Erasing operation will be performed.
39 | sl: Minimum proportion of erased area against input image.
40 | sh: Maximum proportion of erased area against input image.
41 | r1: Minimum aspect ratio of erased area.
42 | mean: Erasing value.
43 | """
44 |
45 | def __init__(self, probability = 0.5, sl = 0.02, sh = 0.4, r1 = 0.3, mean=[0.4914, 0.4822, 0.4465]):
46 | self.probability = probability
47 | self.mean = mean
48 | self.sl = sl
49 | self.sh = sh
50 | self.r1 = r1
51 |
52 | def __call__(self, img):
53 |
54 | if random.uniform(0, 1) > self.probability:
55 | return img
56 |
57 | for i in range(5):
58 | img = erasing(img)
59 | if random.uniform(0, 1) > self.probability:
60 | return img
61 |
62 | return img
63 |
64 |
--------------------------------------------------------------------------------
/visualize/vis_ranking.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import print_function, division
4 |
5 | import argparse
6 | import torch
7 | import torch.nn as nn
8 | import torch.optim as optim
9 | from torch.optim import lr_scheduler
10 | from torch.autograd import Variable
11 | import numpy as np
12 | import torchvision
13 | from torchvision import datasets, models, transforms
14 | import time
15 | import os
16 | import scipy.io
17 | from shutil import copyfile
18 |
19 | opt=2
20 |
21 | if opt==1:
22 | data_dir = '/home/zzd/Market/pytorch'
23 | image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x)) for x in ['gallery','query']}
24 |
25 | query_path = image_datasets['query'].imgs
26 | gallery_path = image_datasets['gallery'].imgs
27 |
28 | save_path = './original'
29 | if not os.path.isdir(save_path):
30 | os.mkdir(save_path)
31 |
32 | with open("../original_list.txt","r") as rank:
33 | for i in range(3368):
34 | result = rank.readline()
35 | rank15 = result.split(',')
36 | query = query_path[i]
37 | os.mkdir(save_path + '/%d'%i)
38 | copyfile(query[0], save_path + '/%d/'%i + 'query.jpg')
39 | for j in range(15):
40 | img_name = gallery_path[int(rank15[j])]
41 | copyfile(img_name[0], save_path + '/%d/'%i + '%d.jpg'%j)
42 |
43 |
44 | #############################
45 | # adv
46 | if opt==2:
47 | query_path = datasets.ImageFolder('../attack_query/pytorch/query').imgs
48 | gallery_path = datasets.ImageFolder('/home/zzd/Market/pytorch/gallery').imgs
49 |
50 | save_path = './adv'
51 | if not os.path.isdir(save_path):
52 | os.mkdir(save_path)
53 |
54 | with open("../adv_list.txt","r") as rank:
55 | for i in range(3368):
56 | result = rank.readline()
57 | rank15 = result.split(',')
58 | query = query_path[i]
59 | os.mkdir(save_path + '/%d'%i)
60 | copyfile(query[0], save_path + '/%d/'%i + 'query.jpg')
61 | for j in range(15):
62 | img_name = gallery_path[int(rank15[j])]
63 | img_name = img_name[0]
64 | #print(img_name[38:])
65 | copyfile(img_name, save_path + '/%d/'%i + '%d.jpg'%j)
66 | #copyfile(img_name, save_path + '/%d/'%i + img_name[38:])
67 |
68 |
69 |
70 |
--------------------------------------------------------------------------------
/Cuisine-retrieval/prepare.py:
--------------------------------------------------------------------------------
1 | import os
2 | from shutil import copyfile
3 | from PIL import Image
4 |
5 | # You only need to change this line to your dataset download path
6 |
7 | download_path = '../Food'
8 | dst_path = '../Food-cropped/'
9 | train_path = dst_path+'/pytorch/train_all/'
10 | gallery_path = dst_path+'/pytorch/gallery/'
11 | query_path = dst_path + '/pytorch/query'
12 | if not os.path.isdir(download_path):
13 | print('please change the download_path')
14 |
15 | if not os.path.isdir(dst_path):
16 | os.mkdir(dst_path)
17 |
18 | for root, dirs, files in os.walk(download_path, topdown=True):
19 | for name in files:
20 | if name == 'bb_info.txt':
21 | with open(root+'/'+name) as fp:
22 | for i, line in enumerate(fp):
23 | if i==0:
24 | continue
25 | img_name, left, upper, right, lower = line.split(' ')
26 | left, upper, right, lower = int(left), int(upper), int(right), int(lower)
27 | if right-left <10 or lower-upper<10:
28 | continue
29 | im = Image.open(root + '/' + img_name + '.jpg')
30 | im = im.crop(box= (left-1, upper-1, right-1, lower-1) )
31 | root_dst = root.replace('Food', 'Food-cropped')
32 | if not os.path.isdir(root_dst):
33 | os.mkdir(root_dst)
34 | im.save(root_dst + '/' + img_name + '.jpg')
35 |
36 | if not os.path.isdir(dst_path+'/pytorch'):
37 | os.mkdir(dst_path+'/pytorch')
38 | os.mkdir(dst_path+'/pytorch/train_all')
39 | os.mkdir(dst_path+'/pytorch/gallery')
40 | os.mkdir(query_path)
41 |
42 |
43 | # Split
44 | for i in range(224):
45 | print(dst_path+str(i+1))
46 | os.system('mv %s %s'% ( dst_path+str(i+1), train_path) )
47 |
48 | for i in range(32):
49 | os.system('mv %s %s'%( dst_path+str(i+225), gallery_path) )
50 |
51 | # Query
52 | for root, dirs, files in os.walk(gallery_path, topdown=True):
53 | count = 0
54 | for name in files:
55 | root_dst = root.replace('gallery', 'query')
56 | if not os.path.isdir(root_dst):
57 | os.mkdir(root_dst)
58 | if name[-3:] == 'jpg':
59 | os.system('mv %s %s' %(root+'/'+name, root_dst+'/'+name))
60 | count +=1
61 | if count == 16:
62 | break
63 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
:see_no_evil: U-Turn :hear_no_evil:
2 | Attack your retrieval model via Query! They are not robust as you expected!
3 |
4 | [](https://opensource.org/licenses/MIT)
5 |
6 | One simple code to cheat your retrieval model via **Modifying Query ONLY** (based on [pytorch](https://pytorch.org)) accepted by IJCV.
7 | Pre-print version is at https://arxiv.org/abs/1809.02681.
8 |
9 | The main idea underpinning our method is simple yet effective, making the query feature to conduct a U-turn :arrow_right_hook:.
10 |
11 | 
12 |
13 | ## Table of contents
14 | * [Re-ID Attacking](#re-id-attacking)
15 | * [Image Retrieval Attacking](#image-retrieval-attacking)
16 | * [Cifar Attacking](#mnist-attacking)
17 |
18 |
19 | ## Re-ID Attacking
20 | ### 1.1 Preparing your reID models.
21 | Please check the step-by-step tutorial in https://github.com/layumi/Person_reID_baseline_pytorch
22 |
23 | ### 1.2 Attacking Market-1501
24 | Try four attack methods with one line. Please change the path before run it.
25 | ```bash
26 | python experiment.py
27 | ```
28 |
29 | ## Image Retrieval Attacking
30 | ### 2.1 Download the pre-trained model on Oxford and Paris
31 | We attach the training code, which is based on the excellent code in TPAMI 2018.
32 | https://github.com/layumi/Oxford-Paris-Attack
33 |
34 | ### 2.2 Attacking the Oxford and Paris Dataset
35 | Our effort is to cheat the TPAMI model. Yes. We succeed.
36 | https://github.com/layumi/Oxford-Paris-Attack
37 |
38 | ### 2.3 Attacking Food-256 and CUB-200-2011
39 | Please check subfolders.
40 |
41 | Food: https://github.com/layumi/U_turn/tree/master/Food
42 |
43 | CUB: https://github.com/layumi/U_turn/tree/master/cub
44 |
45 | ## Cifar Attacking
46 | ### 3.1 Cifar (ResNet-Wide)
47 | We attach the training code, which is borrowed from ResNet-Wide (with Random Erasing).
48 |
49 | ### 3.2 Attacking Cifar
50 | https://github.com/layumi/A_reID/tree/master/cifar
51 |
52 |
53 | 
54 |
55 |
56 | ### Citation
57 | ```
58 | @article{zheng2022query,
59 | title={U-turn: Crafting Adversarial Queries with Opposite-direction Features},
60 | author={Zheng, Zhedong and Zheng, Liang and Yang, Yi and Wu, Fei},
61 | journal={IJCV},
62 | year={2022}
63 | }
64 | ```
65 |
66 |
67 |
68 |
--------------------------------------------------------------------------------
/draw_PCB.py:
--------------------------------------------------------------------------------
1 | import os
2 | import matplotlib
3 | matplotlib.use('agg')
4 | import matplotlib.pyplot as plt
5 | from PIL import Image
6 |
7 | ######################################################################
8 | # Draw Curve
9 | #---------------------------
10 | x_epoch = [2,4,8,12,16]
11 |
12 | top1 = {}
13 | top10 = {}
14 | mAP = {}
15 |
16 | for i in range(11):
17 | top1[i] = []
18 | top10[i] = []
19 | mAP[i] = []
20 |
21 | top1[0] = [92.70] *5
22 | top10[0] = [98.43] *5
23 | mAP[0] = [77.78] *5
24 |
25 | with open("./Output_PCB.txt", "r") as f:
26 | for line in f:
27 | score = line.split('|')
28 | method_id = int(score[2])
29 | top1_acc, top10_acc, mAP_acc = float(score[3]), float(score[5]), float(score[6])
30 | top1[method_id].append(top1_acc*100)
31 | top10[method_id].append(top10_acc*100)
32 | mAP[method_id].append(mAP_acc*100)
33 |
34 | fig = plt.figure(figsize=(10,4), dpi=90)
35 | ax0 = fig.add_subplot(121, ylabel="Recall@1(%)",xlabel='epsilon')
36 | ax0.plot(x_epoch, top1[0], 'k-', label='Clean')
37 | ax0.plot(x_epoch, top1[1], 'b^-', label='Fast')
38 | ax0.plot(x_epoch, top1[2], 'rs-', label='Basic')
39 | ax0.plot(x_epoch, top1[3], 'gv-', label='Least-likely')
40 | ax0.plot(x_epoch, top1[6], 'c<-', label='PIRE')
41 | ax0.plot(x_epoch, top1[7], 'm>-', label='TMA')
42 | ax0.plot(x_epoch, top1[9], 'P-', label='SMA')
43 | ax0.plot(x_epoch, top1[5], 'yo-', label='Ours')
44 | ax0.grid(True)
45 | ax0.legend()
46 | plt.ylim(0.0,100.0)
47 | plt.xlim(1,17)
48 |
49 | #ax0 = fig.add_subplot(132, ylabel="Rank-10(%)",xlabel='epsilon')
50 | #ax0.plot(x_epoch, top10[0], 'k-', label='Clean')
51 | #ax0.plot(x_epoch, top10[1], 'b^-', label='Fast')
52 | #ax0.plot(x_epoch, top10[2], 'rs-', label='Basic')
53 | #ax0.plot(x_epoch, top10[3], 'gv-', label='Least-likely')
54 | #ax0.plot(x_epoch, top10[6], 'c<-', label='PIRE')
55 | #ax0.plot(x_epoch, top10[7], 'm>-', label='TMA')
56 | #ax0.plot(x_epoch, top10[9], 'P-', label='SMA')
57 | #ax0.plot(x_epoch, top10[5], 'yo-', label='Ours')
58 | #ax0.grid(True)
59 | #ax0.legend()
60 | #plt.ylim(0,100)
61 | #plt.xlim(1,17)
62 |
63 | ax0 = fig.add_subplot(122, ylabel="mAP(%)", xlabel='epsilon')
64 | ax0.plot(x_epoch, mAP[0], 'k-', label='Clean')
65 | ax0.plot(x_epoch, mAP[1], 'b^-', label='Fast')
66 | ax0.plot(x_epoch, mAP[2], 'rs-', label='Basic')
67 | ax0.plot(x_epoch, mAP[3], 'gv-', label='Least-likely')
68 | ax0.plot(x_epoch, mAP[6], 'c<-', label='PIRE')
69 | ax0.plot(x_epoch, mAP[7], 'm>-', label='TMA')
70 | ax0.plot(x_epoch, mAP[9], 'P-', label='SMA')
71 | ax0.plot(x_epoch, mAP[5], 'yo-', label='Ours')
72 | ax0.grid(True)
73 | ax0.legend()
74 | plt.ylim(0,100)
75 | plt.xlim(1,17)
76 |
77 | fig.savefig('PCB.jpg')
78 |
--------------------------------------------------------------------------------
/cub/draw_result.py:
--------------------------------------------------------------------------------
1 | import os
2 | import matplotlib
3 | matplotlib.use('agg')
4 | import matplotlib.pyplot as plt
5 | from PIL import Image
6 |
7 | ######################################################################
8 | # Draw Curve
9 | #---------------------------
10 | x_epoch = [2,4,8,12,16]
11 |
12 | top1 = {}
13 | top10 = {}
14 | mAP = {}
15 |
16 | for i in range(10):
17 | top1[i] = []
18 | top10[i] = []
19 | mAP[i] = []
20 |
21 | top1[0] = [55.47] *5
22 | top10[0] = [87.49] *5
23 | mAP[0] = [28.18] *5
24 |
25 | with open("./Output.txt", "r") as f:
26 | for line in f:
27 | if line[0] == '#': continue
28 | score = line.split('|')
29 | method_id = int(score[2])
30 | top1_acc, top10_acc, mAP_acc = float(score[3]), float(score[5]), float(score[6])
31 | top1[method_id].append(top1_acc)
32 | top10[method_id].append(top10_acc)
33 | mAP[method_id].append(mAP_acc)
34 |
35 | fig = plt.figure(figsize=(15,4), dpi=90)
36 | ax0 = fig.add_subplot(131, ylabel="Recall@1(%)",xlabel='epsilon')
37 | ax0.plot(x_epoch, top1[0], 'k-', label='Clean')
38 | ax0.plot(x_epoch, top1[1], 'b^-', label='Fast')
39 | ax0.plot(x_epoch, top1[2], 'rs-', label='Basic')
40 | ax0.plot(x_epoch, top1[3], 'gv-', label='Least-likely')
41 | ax0.plot(x_epoch, top1[6], 'c<-', label='PIRE')
42 | ax0.plot(x_epoch, top1[7], 'm>-', label='TMA')
43 | ax0.plot(x_epoch, top1[9], 'P-', label='SMA')
44 | ax0.plot(x_epoch, top1[5], 'yo-', label='Ours')
45 | ax0.grid(True)
46 | ax0.legend()
47 | plt.ylim(0.0,100.0)
48 | plt.xlim(1,17)
49 |
50 | ax0 = fig.add_subplot(132, ylabel="Recall@10(%)",xlabel='epsilon')
51 | ax0.plot(x_epoch, top10[0], 'k-', label='Clean')
52 | ax0.plot(x_epoch, top10[1], 'b^-', label='Fast')
53 | ax0.plot(x_epoch, top10[2], 'rs-', label='Basic')
54 | ax0.plot(x_epoch, top10[3], 'gv-', label='Least-likely')
55 | ax0.plot(x_epoch, top10[6], 'c<-', label='PIRE')
56 | ax0.plot(x_epoch, top10[7], 'm>-', label='TMA')
57 | ax0.plot(x_epoch, top10[9], 'P-', label='SMA')
58 | ax0.plot(x_epoch, top10[5], 'yo-', label='Ours')
59 | ax0.grid(True)
60 | ax0.legend()
61 | plt.ylim(0,100)
62 | plt.xlim(1,17)
63 |
64 | ax0 = fig.add_subplot(133, ylabel="mAP(%)", xlabel='epsilon')
65 | ax0.plot(x_epoch, mAP[0], 'k-', label='Clean')
66 | ax0.plot(x_epoch, mAP[1], 'b^-', label='Fast')
67 | ax0.plot(x_epoch, mAP[2], 'rs-', label='Basic')
68 | ax0.plot(x_epoch, mAP[3], 'gv-', label='Least-likely')
69 | ax0.plot(x_epoch, mAP[6], 'c<-', label='PIRE')
70 | ax0.plot(x_epoch, mAP[7], 'm>-', label='TMA')
71 | ax0.plot(x_epoch, mAP[9], 'P-', label='SMA')
72 | ax0.plot(x_epoch, mAP[5], 'yo-', label='Ours')
73 | ax0.grid(True)
74 | ax0.legend()
75 | plt.ylim(0,100)
76 | plt.xlim(1,17)
77 |
78 | fig.savefig('CUB.jpg')
79 |
--------------------------------------------------------------------------------
/draw_result.py:
--------------------------------------------------------------------------------
1 | import os
2 | import matplotlib
3 | matplotlib.use('agg')
4 | import matplotlib.pyplot as plt
5 | from PIL import Image
6 |
7 | ######################################################################
8 | # Draw Curve
9 | #---------------------------
10 | x_epoch = [2,4,8,12,16]
11 |
12 | top1 = {}
13 | top10 = {}
14 | mAP = {}
15 |
16 | for i in range(10):
17 | top1[i] = []
18 | top10[i] = []
19 | mAP[i] = []
20 |
21 | top1[0] = [88.54] *5
22 | top10[0] = [96.85] *5
23 | mAP[0] = [71.08] *5
24 |
25 | with open("./Output.txt", "r") as f:
26 | for line in f:
27 | if line[0] =='#': continue
28 | score = line.split('|')
29 | method_id = int(score[2])
30 | top1_acc, top10_acc, mAP_acc = float(score[3]), float(score[5]), float(score[6])
31 | top1[method_id].append(top1_acc*100)
32 | top10[method_id].append(top10_acc*100)
33 | mAP[method_id].append(mAP_acc*100)
34 |
35 | fig = plt.figure(figsize=(15,4), dpi=90)
36 | ax0 = fig.add_subplot(131, ylabel="Recall@1(%)",xlabel='epsilon')
37 | ax0.plot(x_epoch, top1[0], 'k-', label='Clean')
38 | ax0.plot(x_epoch, top1[1], 'b^-', label='Fast')
39 | ax0.plot(x_epoch, top1[2], 'rs-', label='Basic')
40 | ax0.plot(x_epoch, top1[3], 'gv-', label='Least-likely')
41 | ax0.plot(x_epoch, top1[6], 'c<-', label='PIRE')
42 | ax0.plot(x_epoch, top1[7], 'm>-', label='TMA')
43 | ax0.plot(x_epoch, top1[9], 'P-', label='SMA')
44 | ax0.plot(x_epoch, top1[5], 'yo-', label='Ours')
45 | ax0.grid(True)
46 | ax0.legend()
47 | plt.ylim(0.0,100.0)
48 | plt.xlim(1,17)
49 |
50 | ax0 = fig.add_subplot(132, ylabel="Recall@10(%)",xlabel='epsilon')
51 | ax0.plot(x_epoch, top10[0], 'k-', label='Clean')
52 | ax0.plot(x_epoch, top10[1], 'b^-', label='Fast')
53 | ax0.plot(x_epoch, top10[2], 'rs-', label='Basic')
54 | ax0.plot(x_epoch, top10[3], 'gv-', label='Least-likely')
55 | ax0.plot(x_epoch, top10[6], 'c<-', label='PIRE')
56 | ax0.plot(x_epoch, top10[7], 'm>-', label='TMA')
57 | ax0.plot(x_epoch, top10[9], 'P-', label='SMA')
58 | ax0.plot(x_epoch, top10[5], 'yo-', label='Ours')
59 | ax0.grid(True)
60 | ax0.legend()
61 | plt.ylim(0,100)
62 | plt.xlim(1,17)
63 |
64 | ax0 = fig.add_subplot(133, ylabel="mAP(%)", xlabel='epsilon')
65 | ax0.plot(x_epoch, mAP[0], 'k-', label='Clean')
66 | ax0.plot(x_epoch, mAP[1], 'b^-', label='Fast')
67 | ax0.plot(x_epoch, mAP[2], 'rs-', label='Basic')
68 | ax0.plot(x_epoch, mAP[3], 'gv-', label='Least-likely')
69 | ax0.plot(x_epoch, mAP[6], 'c<-', label='PIRE')
70 | ax0.plot(x_epoch, mAP[7], 'm>-', label='TMA')
71 | ax0.plot(x_epoch, mAP[9], 'P-', label='SMA')
72 | ax0.plot(x_epoch, mAP[5], 'yo-', label='Ours')
73 | ax0.grid(True)
74 | ax0.legend()
75 | plt.ylim(0,100)
76 | plt.xlim(1,17)
77 |
78 | fig.savefig('market.jpg')
79 |
--------------------------------------------------------------------------------
/Cuisine-retrieval/draw_result.py:
--------------------------------------------------------------------------------
1 | import os
2 | import matplotlib
3 | matplotlib.use('agg')
4 | import matplotlib.pyplot as plt
5 | from PIL import Image
6 |
7 | ######################################################################
8 | # Draw Curve
9 | #---------------------------
10 | x_epoch = [2,4,8,12,16]
11 |
12 | top1 = {}
13 | top10 = {}
14 | mAP = {}
15 |
16 | for i in range(10):
17 | top1[i] = []
18 | top10[i] = []
19 | mAP[i] = []
20 |
21 | top1[0] = [66.02] * 5
22 | top10[0] = [95.12] * 5
23 | mAP[0] = [32.95] * 5
24 |
25 | with open("./Output.txt", "r") as f:
26 | for line in f:
27 | if line[0] == '#': continue
28 | score = line.split('|')
29 | method_id = int(score[2])
30 | top1_acc, top10_acc, mAP_acc = float(score[3]), float(score[5]), float(score[6])
31 | top1[method_id].append(top1_acc)
32 | top10[method_id].append(top10_acc)
33 | mAP[method_id].append(mAP_acc)
34 | fig = plt.figure(figsize=(15,4), dpi=90)
35 | ax0 = fig.add_subplot(131, ylabel="Recall@1(%)",xlabel='epsilon')
36 | ax0.plot(x_epoch, top1[0], 'k-', label='Clean')
37 | ax0.plot(x_epoch, top1[1], 'b^-', label='Fast')
38 | ax0.plot(x_epoch, top1[2], 'rs-', label='Basic')
39 | ax0.plot(x_epoch, top1[3], 'gv-', label='Least-likely')
40 | ax0.plot(x_epoch, top1[6], 'c<-', label='PIRE')
41 | ax0.plot(x_epoch, top1[7], 'm>-', label='TMA')
42 | ax0.plot(x_epoch, top1[9], 'P-', label='SMA')
43 | ax0.plot(x_epoch, top1[5], 'yo-', label='Ours')
44 | ax0.grid(True)
45 | ax0.legend()
46 | plt.ylim(0.0,100.0)
47 | plt.xlim(1,17)
48 |
49 | ax0 = fig.add_subplot(132, ylabel="Recall@10(%)",xlabel='epsilon')
50 | ax0.plot(x_epoch, top10[0], 'k-', label='Clean')
51 | ax0.plot(x_epoch, top10[1], 'b^-', label='Fast')
52 | ax0.plot(x_epoch, top10[2], 'rs-', label='Basic')
53 | ax0.plot(x_epoch, top10[3], 'gv-', label='Least-likely')
54 | ax0.plot(x_epoch, top10[6], 'c<-', label='PIRE')
55 | ax0.plot(x_epoch, top10[7], 'm>-', label='TMA')
56 | ax0.plot(x_epoch, top10[9], 'P-', label='SMA')
57 | ax0.plot(x_epoch, top10[5], 'yo-', label='Ours')
58 | ax0.grid(True)
59 | ax0.legend()
60 | plt.ylim(0,100)
61 | plt.xlim(1,17)
62 |
63 | ax0 = fig.add_subplot(133, ylabel="mAP(%)", xlabel='epsilon')
64 | ax0.plot(x_epoch, mAP[0], 'k-', label='Clean')
65 | ax0.plot(x_epoch, mAP[1], 'b^-', label='Fast')
66 | ax0.plot(x_epoch, mAP[2], 'rs-', label='Basic')
67 | ax0.plot(x_epoch, mAP[3], 'gv-', label='Least-likely')
68 | ax0.plot(x_epoch, mAP[6], 'c<-', label='PIRE')
69 | ax0.plot(x_epoch, mAP[7], 'm>-', label='TMA')
70 | ax0.plot(x_epoch, mAP[9], 'P-', label='SMA')
71 | ax0.plot(x_epoch, mAP[5], 'yo-', label='Ours')
72 | ax0.grid(True)
73 | ax0.legend()
74 | plt.ylim(0,100)
75 | plt.xlim(1,17)
76 |
77 | fig.savefig('Food.jpg')
78 |
--------------------------------------------------------------------------------
/show-tsne-example.py:
--------------------------------------------------------------------------------
1 | from sklearn.datasets import load_digits
2 | from MulticoreTSNE import MulticoreTSNE as TSNE
3 | import matplotlib
4 | matplotlib.use('agg')
5 | from matplotlib import pyplot as plt
6 | import scipy
7 | import torch
8 | import numpy as np
9 | #digits = load_digits()
10 |
11 | query_path = '.'
12 | result_n = scipy.io.loadmat(query_path+'/query_result_normal.mat')
13 | query_n = torch.FloatTensor(result_n['query_f'])
14 | label_n = result_n['query_label'][0]
15 |
16 | result_q = scipy.io.loadmat(query_path+'/query_result.mat')
17 | query_q = torch.FloatTensor(result_q['query_f'])
18 | label_q = result_q['query_label'][0]
19 |
20 | data = torch.cat( (query_n, query_q), 0)
21 |
22 | flag = -1
23 | label_t1 = torch.zeros(label_n.shape)
24 | for index, xx in enumerate(label_n):
25 | if index == 0:
26 | flag = xx
27 | continue
28 | if xx !=flag:
29 | flag = xx
30 | label_t1[index] = label_t1[index-1] +1
31 | else:
32 | label_t1[index] = label_t1[index-1]
33 |
34 | flag = -1
35 | label_t2 = torch.zeros(label_q.shape)
36 | for index, xx in enumerate(label_q):
37 | if index == 0:
38 | flag = xx
39 | continue
40 | if xx !=flag:
41 | flag = xx
42 | label_t2[index] = label_t2[index-1] +1
43 | else:
44 | label_t2[index] = label_t2[index-1]
45 |
46 | label = np.concatenate( (label_t1, label_t2), 0)
47 | print(label)
48 | #label = torch.cat( (torch.zeros(label_n.shape), torch.ones(label_q.shape)), 0)
49 |
50 | print(data.shape, label.shape)
51 | embeddings = TSNE(n_jobs=16).fit_transform(data)
52 |
53 | fig = plt.figure(dpi=1200)
54 |
55 |
56 | top = 10
57 | vis_x = [] #embeddings[0:first20, 0]
58 | vis_y = [] #embeddings[0:first20, 1]
59 | label_t = []
60 | for i in range(500):
61 | if label_t1[i] == top:
62 | break
63 | if i==0 or label_t1[i] != label_t1[i-1]:
64 | vis_x.append(embeddings[i, 0])
65 | vis_y.append(embeddings[i, 1])
66 | label_t.append(label_t1[i])
67 | print(label_t)
68 | plt.scatter(vis_x, vis_y, c=label_t, cmap=plt.cm.get_cmap("jet", top), marker='.')
69 |
70 | start = len(label_t1)
71 | vis_x = [] #embeddings[0:first20, 0]
72 | vis_y = [] #embeddings[0:first20, 1]
73 | label_t = []
74 | for i in range(500):
75 | if label_t2[i] == top:
76 | break
77 | if i==0 or label_t2[i] != label_t2[i-1]:
78 | vis_x.append(embeddings[start+i, 0])
79 | vis_y.append(embeddings[start+i, 1])
80 | label_t.append(label_t2[i])
81 | print(label_t)
82 | plt.scatter(vis_x, vis_y, c=label_t, cmap=plt.cm.get_cmap("jet", top), marker='*')
83 | plt.grid(True)
84 | plt.colorbar(ticks=range(top))
85 | plt.clim(-0.5, top-0.5)
86 | plt.show()
87 | fig.savefig( 'tsne.jpg')
88 |
--------------------------------------------------------------------------------
/draw_adv.py:
--------------------------------------------------------------------------------
1 | import os
2 | import matplotlib
3 | matplotlib.use('agg')
4 | import matplotlib.pyplot as plt
5 | from PIL import Image
6 |
7 | ######################################################################
8 | # Draw Curve
9 | #---------------------------
10 | x_epoch = [2,4,8,12,16]
11 |
12 | top1 = {}
13 | top10 = {}
14 | mAP = {}
15 |
16 | for i in range(10):
17 | top1[i] = []
18 | top10[i] = []
19 | mAP[i] = []
20 |
21 | top1[0] = [88.24] *5
22 | top10[0] = [96.88] *5
23 | mAP[0] = [69.70] *5
24 |
25 | with open("./Output_adv.txt", "r") as f:
26 | for line in f:
27 | if line[0] =='#': continue
28 | score = line.split('|')
29 | method_id = int(score[2])
30 | top1_acc, top10_acc, mAP_acc = float(score[3]), float(score[5]), float(score[6])
31 | top1[method_id].append(top1_acc*100)
32 | top10[method_id].append(top10_acc*100)
33 | mAP[method_id].append(mAP_acc*100)
34 |
35 | fig = plt.figure(figsize=(15,4), dpi=90)
36 | ax0 = fig.add_subplot(131, ylabel="Recall@1(%)",xlabel='epsilon')
37 | ax0.plot(x_epoch, top1[0], 'k-', label='Clean')
38 | ax0.plot(x_epoch, top1[1], 'b^--', label='Fast')
39 | ax0.plot(x_epoch, top1[2], 'rs--', label='Basic')
40 | ax0.plot(x_epoch, top1[3], 'gv--', label='Least-likely')
41 | ax0.plot(x_epoch, top1[6], 'c<--', label='PIRE')
42 | ax0.plot(x_epoch, top1[7], 'm>--', label='TMA')
43 | ax0.plot(x_epoch, top1[9], 'P--', label='SMA')
44 | ax0.plot(x_epoch, top1[5], 'yo--', label='Ours')
45 | ax0.grid(True)
46 | ax0.legend()
47 | plt.ylim(0.0,100.0)
48 | plt.xlim(1,17)
49 |
50 | ax0 = fig.add_subplot(132, ylabel="Recall@10(%)",xlabel='epsilon')
51 | ax0.plot(x_epoch, top10[0], 'k-', label='Clean')
52 | ax0.plot(x_epoch, top10[1], 'b^--', label='Fast')
53 | ax0.plot(x_epoch, top10[2], 'rs--', label='Basic')
54 | ax0.plot(x_epoch, top10[3], 'gv--', label='Least-likely')
55 | ax0.plot(x_epoch, top10[6], 'c<--', label='PIRE')
56 | ax0.plot(x_epoch, top10[7], 'm>--', label='TMA')
57 | ax0.plot(x_epoch, top10[9], 'P--', label='SMA')
58 | ax0.plot(x_epoch, top10[5], 'yo--', label='Ours')
59 | ax0.grid(True)
60 | ax0.legend()
61 | plt.ylim(0,100)
62 | plt.xlim(1,17)
63 |
64 | ax0 = fig.add_subplot(133, ylabel="mAP(%)", xlabel='epsilon')
65 | ax0.plot(x_epoch, mAP[0], 'k-', label='Clean')
66 | ax0.plot(x_epoch, mAP[1], 'b^--', label='Fast')
67 | ax0.plot(x_epoch, mAP[2], 'rs--', label='Basic')
68 | ax0.plot(x_epoch, mAP[3], 'gv--', label='Least-likely')
69 | ax0.plot(x_epoch, mAP[6], 'c<--', label='PIRE')
70 | ax0.plot(x_epoch, mAP[7], 'm>--', label='TMA')
71 | ax0.plot(x_epoch, mAP[9], 'P--', label='SMA')
72 | ax0.plot(x_epoch, mAP[5], 'yo--', label='Ours')
73 | ax0.grid(True)
74 | ax0.legend()
75 | plt.ylim(0,100)
76 | plt.xlim(1,17)
77 |
78 | fig.savefig('adv.jpg')
79 |
--------------------------------------------------------------------------------
/Cuisine-retrieval/evaluate_gpu.py:
--------------------------------------------------------------------------------
1 | import scipy.io
2 | import torch
3 | import numpy as np
4 | #import time
5 | import os
6 |
7 | #######################################################################
8 | # Evaluate
9 | def evaluate(qf,ql,gf,gl):
10 | query = qf.view(-1,1)
11 | # print(query.shape)
12 | score = torch.mm(gf,query)
13 | score = score.squeeze(1).cpu()
14 | score = score.numpy()
15 | # predict index
16 | index = np.argsort(score) #from small to large
17 | index = index[::-1]
18 | # index = index[0:2000]
19 | # good index
20 | query_index = np.argwhere(gl==ql)
21 |
22 | good_index = query_index
23 | junk_index = np.argwhere(gl==-1)
24 | CMC_tmp = compute_mAP(index, good_index, junk_index)
25 |
26 | return CMC_tmp
27 |
28 |
29 | def compute_mAP(index, good_index, junk_index):
30 | ap = 0
31 | cmc = torch.IntTensor(len(index)).zero_()
32 | if good_index.size==0: # if empty
33 | cmc[0] = -1
34 | return ap,cmc
35 |
36 | # remove junk_index
37 | mask = np.in1d(index, junk_index, invert=True)
38 | index = index[mask]
39 |
40 | # find good_index index
41 | ngood = len(good_index)
42 | mask = np.in1d(index, good_index)
43 | rows_good = np.argwhere(mask==True)
44 | rows_good = rows_good.flatten()
45 |
46 | cmc[rows_good[0]:] = 1
47 | for i in range(ngood):
48 | d_recall = 1.0/ngood
49 | precision = (i+1)*1.0/(rows_good[i]+1)
50 | if rows_good[i]!=0:
51 | old_precision = i*1.0/rows_good[i]
52 | else:
53 | old_precision=1.0
54 | ap = ap + d_recall*(old_precision + precision)/2
55 |
56 | return ap, cmc
57 |
58 | ######################################################################
59 | def main( query_path):
60 |
61 | result = scipy.io.loadmat(query_path+'/query.mat')
62 | query_feature = torch.FloatTensor(result['query_f'])
63 | query_label = result['query_label'][0]
64 |
65 | result = scipy.io.loadmat('pytorch_result.mat')
66 | gallery_feature = torch.FloatTensor(result['gallery_f'])
67 | gallery_label = result['gallery_label'][0]
68 |
69 | query_feature = query_feature.cuda()
70 | gallery_feature = gallery_feature.cuda()
71 |
72 | print(query_feature.shape)
73 | CMC = torch.IntTensor(len(gallery_label)).zero_()
74 | ap = 0.0
75 | fail_index = []
76 | #print(query_label)
77 | for i in range(len(query_label)):
78 | ap_tmp, CMC_tmp = evaluate(query_feature[i],query_label[i],gallery_feature,gallery_label)
79 | if CMC_tmp[0]==-1:
80 | continue
81 | if CMC_tmp[0]==1: fail_index.append(i)
82 | CMC = CMC + CMC_tmp
83 | ap += ap_tmp
84 |
85 | print(len(fail_index), fail_index)
86 | CMC = CMC.float()
87 | CMC = CMC/len(query_label) #average CMC
88 | print('Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f'%(CMC[0],CMC[4],CMC[9],ap/len(query_label)))
89 | save_result = (CMC[0],CMC[4],CMC[9],ap/len(query_label))
90 | return save_result
91 |
92 |
93 | if __name__=='__main__':
94 | #since = time.time()
95 | query_path = '.'
96 | #query_path = './attack_query/ft_ResNet50_all-1/2/'
97 | main(query_path)
98 | #print(time.time()-since)
99 |
100 |
--------------------------------------------------------------------------------
/caffe/test_res.py:
--------------------------------------------------------------------------------
1 | import caffe
2 | import os
3 | import numpy as np
4 | from PIL import Image
5 |
6 | def clip(input, zeros, ones):
7 | low_mask = inputones
9 | input[low_mask] = zeros[low_mask]
10 | input[up_mask] = ones[up_mask]
11 | return input
12 |
13 |
14 | def attack(img_pth, transformer):
15 | # load data
16 | data = caffe.io.load_image(img_pth)
17 | data = data * 255
18 | data = caffe.io.resize_image( data,(256,128))
19 | data = transformer.preprocess('data', data)
20 | net.blobs['data'].reshape(1,3,256,128)
21 | net.blobs['data'].data[...] = data
22 |
23 | # calculate the up bound and low bound of the input
24 | zeros = np.zeros((256,128,3),dtype=np.float32)
25 | ones = np.ones((256,128,3),dtype=np.float32)*255
26 | zeros = transformer.preprocess('data', zeros)
27 | ones = transformer.preprocess('data', ones)
28 |
29 | # As in my paper, I set the rate = 16
30 | rate = 16
31 | for i in range(int(min(1.25*rate, rate+4))):
32 | net.forward()
33 | net.backward()
34 | loss = net.blobs['loss'].data
35 | grad = net.blobs['data'].diff
36 | # In the first round, I set the -f (more detail in the paper)
37 | if i==0:
38 | fc7_adv = -net.blobs['fc7'].data
39 | fc7_adv = fc7_adv.reshape(1,512,1,1)
40 | print 'make adv'
41 | #test = fc7.flatten()
42 | #print sum(test*test)
43 | net.blobs['fc7-adv'].data[...] = fc7_adv
44 |
45 | # use the acculmulate grad (just small trick)
46 | if i==0:
47 | acc_grad = np.sign(grad)
48 | else:
49 | acc_grad = acc_grad + np.sign(grad)
50 |
51 | acc_grad = acc_grad.reshape(3,256,128)
52 | mask_diff = np.abs(acc_grad) > rate
53 | acc_grad[mask_diff] = rate * np.sign(acc_grad[mask_diff])
54 |
55 | net.blobs['data'].data[...] = clip(data - acc_grad, zeros, ones)
56 |
57 | print i, loss
58 |
59 | data = net.blobs['data'].data
60 | data = transformer.deprocess('data',data)
61 | return data
62 |
63 |
64 | ##########################################
65 | # Set data path
66 | ##########################################
67 | query_pth = './query/'
68 | query_save_pth = './query-adv-res-mvn/'
69 | # Set model path
70 | # Remember to remvoe the ImageData layer in the deploy prototxt !!
71 | caffe.set_device(0)
72 | caffe.set_mode_gpu()
73 | net = caffe.Net('resnet.prototxt', 'resnet_340000.caffemodel', caffe.TEST)
74 |
75 | ##########################################
76 | # Prepare
77 | ##########################################
78 | # define transformer
79 | transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
80 | transformer.set_transpose('data', (2,0,1)) # 192*96*3 -> 3*192*96
81 | transformer.set_mean('data', np.asarray([107.72, 103.77, 109.23]) ) # mean pixel
82 | transformer.set_channel_swap('data', (2,1,0)) # RGB -> BGR
83 |
84 | #########################################
85 | #Attack->save data
86 | #########################################
87 | for root,dirs,files in os.walk(query_pth):
88 | for name in files:
89 | src_pth = query_pth + name #load path
90 | print src_pth
91 | dst_pth = query_save_pth + name #save path
92 | im = attack(src_pth, transformer) # attack
93 | im = Image.fromarray(im.astype('uint8'))
94 | im.save(dst_pth) # save image
95 |
--------------------------------------------------------------------------------
/cub/evaluate_gpu.py:
--------------------------------------------------------------------------------
1 | import scipy.io
2 | import torch
3 | import numpy as np
4 | import time
5 |
6 | #######################################################################
7 | # Evaluate
8 | def evaluate(qf,ql,gf,gl,junk_index):
9 | query = qf.view(-1,1)
10 | # print(query.shape)
11 | score = torch.mm(gf,query)
12 | score = score.squeeze(1).cpu()
13 | score = score.numpy()
14 | # predict index
15 | index = np.argsort(score) #from small to large
16 | index = index[::-1]
17 | # index = index[0:2000]
18 | # good index
19 | query_index = np.argwhere(gl==ql)
20 | good_index = np.setdiff1d(query_index, junk_index, assume_unique=True)
21 |
22 | CMC_tmp = compute_mAP(index, good_index, junk_index)
23 | return CMC_tmp
24 |
25 | def save_index(index):
26 | # save the top 15 id
27 | with open('adv_list.txt','a') as fp:
28 | for i in range(15):
29 | fp.write("%d," %index[i])
30 | fp.write(" \n")
31 |
32 | def compute_mAP(index, good_index, junk_index):
33 | ap = 0
34 | cmc = torch.IntTensor(len(index)).zero_()
35 | if good_index.size==0: # if empty
36 | cmc[0] = -1
37 | return ap,cmc
38 |
39 | # remove junk_index
40 | mask = np.in1d(index, junk_index, invert=True)
41 | index = index[mask]
42 |
43 | # save index
44 | #save_index(index)
45 |
46 | # find good_index index
47 | ngood = len(good_index)
48 | mask = np.in1d(index, good_index)
49 | rows_good = np.argwhere(mask==True)
50 | rows_good = rows_good.flatten()
51 |
52 | cmc[rows_good[0]:] = 1
53 | for i in range(ngood):
54 | d_recall = 1.0/ngood
55 | precision = (i+1)*1.0/(rows_good[i]+1)
56 | if rows_good[i]!=0:
57 | old_precision = i*1.0/rows_good[i]
58 | else:
59 | old_precision=1.0
60 | ap = ap + d_recall*(old_precision + precision)/2
61 |
62 | return ap, cmc
63 |
64 | ######################################################################
65 | # main function
66 | def main( query_path ):
67 | result_q = scipy.io.loadmat(query_path+'/query.mat')
68 | query_feature = torch.FloatTensor(result_q['img_f'])
69 | query_label = result_q['label'][0]
70 |
71 | result_g = scipy.io.loadmat('pytorch_result.mat')
72 | gallery_feature = torch.FloatTensor(result_g['img_f'])
73 | gallery_label = result_g['label'][0]
74 |
75 | query_feature = query_feature.cuda()
76 | gallery_feature = gallery_feature.cuda()
77 |
78 | CMC = torch.IntTensor(len(gallery_label)).zero_()
79 | ap = 0.0
80 | fail_index = []
81 | junk_index1 = np.argwhere(gallery_label==-1) #not well-detected
82 | #print(query_label)
83 | for i in range(len(query_label)):
84 | ap_tmp, CMC_tmp = evaluate(query_feature[i],query_label[i],gallery_feature,gallery_label, i)
85 | if CMC_tmp[0]==-1:
86 | continue
87 | if CMC_tmp[0]==1: fail_index.append(i)
88 | CMC += CMC_tmp
89 | ap += ap_tmp
90 | #print(i, CMC_tmp[0])
91 | print(len(fail_index), fail_index)
92 | CMC = CMC.float()
93 | CMC = CMC/len(query_label) #average CMC
94 | print('top1:%f top5:%f top10:%f mAP:%f'%(CMC[0],CMC[4],CMC[9],ap/len(query_label)))
95 | save_result = (CMC[0],CMC[4],CMC[9],ap/len(query_label))
96 | return save_result
97 |
98 | if __name__=='__main__':
99 | #since = time.time()
100 | query_path = '.'
101 | #query_path = './attack_query/rerun_lr4--1/16'
102 | #query_path = './attack_query/ft_ResNet50_all-1/2/'
103 | main(query_path)
104 | #print(time.time()-since)
105 |
--------------------------------------------------------------------------------
/evaluate_gpu.py:
--------------------------------------------------------------------------------
1 | import scipy.io
2 | import torch
3 | import numpy as np
4 | import time
5 |
6 | #######################################################################
7 | # Evaluate
8 | def evaluate(qf,ql,qc,gf,gl,gc,junk_index1):
9 | query = qf.view(-1,1)
10 | # print(query.shape)
11 | score = torch.mm(gf,query)
12 | score = score.squeeze(1).cpu()
13 | score = score.numpy()
14 | # predict index
15 | index = np.argsort(score) #from small to large
16 | index = index[::-1]
17 | # index = index[0:2000]
18 | # good index
19 | query_index = np.argwhere(gl==ql)
20 | camera_index = np.argwhere(gc==qc)
21 |
22 | good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)
23 | #junk_index1 = np.argwhere(gl==-1)
24 | junk_index2 = np.intersect1d(query_index, camera_index)
25 | junk_index = np.append(junk_index2, junk_index1) #.flatten())
26 |
27 | CMC_tmp = compute_mAP(index, good_index, junk_index)
28 | return CMC_tmp
29 |
30 | def compute_mAP(index, good_index, junk_index):
31 | ap = 0
32 | cmc = torch.IntTensor(len(index)).zero_()
33 | if good_index.size==0: # if empty
34 | cmc[0] = -1
35 | return ap,cmc
36 |
37 | # remove junk_index
38 | mask = np.in1d(index, junk_index, invert=True)
39 | index = index[mask]
40 |
41 | # find good_index index
42 | ngood = len(good_index)
43 | mask = np.in1d(index, good_index)
44 | rows_good = np.argwhere(mask==True)
45 | rows_good = rows_good.flatten()
46 |
47 | cmc[rows_good[0]:] = 1
48 | for i in range(ngood):
49 | d_recall = 1.0/ngood
50 | precision = (i+1)*1.0/(rows_good[i]+1)
51 | if rows_good[i]!=0:
52 | old_precision = i*1.0/rows_good[i]
53 | else:
54 | old_precision=1.0
55 | ap = ap + d_recall*(old_precision + precision)/2
56 |
57 | return ap, cmc
58 |
59 | ######################################################################
60 | # main function
61 | def main( query_path = './' ):
62 | #result_q = scipy.io.loadmat(query_path+'/query_result_normal.mat')
63 | result_q = scipy.io.loadmat(query_path+'/query_result.mat')
64 | query_feature = torch.FloatTensor(result_q['query_f'])
65 | query_cam = result_q['query_cam'][0]
66 | query_label = result_q['query_label'][0]
67 |
68 | result_g = scipy.io.loadmat('gallery_result.mat')
69 | gallery_feature = torch.FloatTensor(result_g['gallery_f'])
70 | gallery_cam = result_g['gallery_cam'][0]
71 | gallery_label = result_g['gallery_label'][0]
72 |
73 | query_feature = query_feature.cuda()
74 | gallery_feature = gallery_feature.cuda()
75 |
76 | CMC = torch.IntTensor(len(gallery_label)).zero_()
77 | ap = 0.0
78 | fail_index = []
79 | junk_index1 = np.argwhere(gallery_label==-1) #not well-detected
80 | #print(query_label)
81 | for i in range(len(query_label)):
82 | ap_tmp, CMC_tmp = evaluate(query_feature[i],query_label[i],query_cam[i],gallery_feature,gallery_label,gallery_cam, junk_index1)
83 | if CMC_tmp[0]==-1:
84 | continue
85 | if CMC_tmp[0]==1: fail_index.append(i)
86 | CMC += CMC_tmp
87 | ap += ap_tmp
88 | #print(i, CMC_tmp[0])
89 | print(len(fail_index), fail_index)
90 | CMC = CMC.float()
91 | CMC = CMC/len(query_label) #average CMC
92 | print('top1:%f top5:%f top10:%f mAP:%f'%(CMC[0],CMC[4],CMC[9],ap/len(query_label)))
93 | save_result = (CMC[0],CMC[4],CMC[9],ap/len(query_label))
94 | return save_result
95 |
96 | if __name__=='__main__':
97 | #since = time.time()
98 | #query_path = './attack_query/baseline-9/16'
99 | query_path = './'
100 | main(query_path)
101 | #print(time.time()-since)
102 |
--------------------------------------------------------------------------------
/cifar/test.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import print_function, division
4 |
5 | import argparse
6 | import torch
7 | import torch.nn as nn
8 | import torch.optim as optim
9 | from torch.optim import lr_scheduler
10 | from torch.autograd import Variable
11 | import numpy as np
12 | import torchvision
13 | from torchvision import datasets, models, transforms
14 | import time
15 | import os
16 | import scipy.io
17 | import torch.utils.data as data
18 | import models.cifar as models
19 | ######################################################################
20 | # Options
21 | # --------
22 | parser = argparse.ArgumentParser(description='Training')
23 | parser.add_argument('--gpu_ids',default='1', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2')
24 | parser.add_argument('--which_epoch',default='last', type=str, help='0,1,2,3...or last')
25 | parser.add_argument('--test_dir',default='/home/zzd/Market/pytorch',type=str, help='./test_data')
26 | parser.add_argument('--name', default='resnet20', type=str, help='save model path')
27 | parser.add_argument('--batchsize', default=100, type=int, help='batchsize')
28 | parser.add_argument('--use_dense', action='store_true', help='use densenet121' )
29 | parser.add_argument('--depth', default=20, type=int, help='model depth')
30 |
31 | ###########################################
32 | # python test.py --name resnet20 --depth 20
33 | ##########################################
34 |
35 | opt = parser.parse_args()
36 |
37 | str_ids = opt.gpu_ids.split(',')
38 | #which_epoch = opt.which_epoch
39 | name = opt.name
40 | test_dir = opt.test_dir
41 | ######################################################################
42 | # Load Data
43 | # ---------
44 | #
45 | # We will use torchvision and torch.utils.data packages for loading the
46 | # data.
47 | #
48 | data_transforms = transforms.Compose([
49 | transforms.ToTensor(),
50 | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
51 | ])
52 |
53 | dataloader = datasets.CIFAR10
54 | testset = dataloader(root='./data', train=False, download=False, transform=data_transforms)
55 | testloader = data.DataLoader(testset, batch_size=opt.batchsize, shuffle=False, num_workers=8)
56 |
57 | class_names = ('plane', 'car', 'bird', 'cat',
58 | 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
59 | use_gpu = torch.cuda.is_available()
60 |
61 | ######################################################################
62 | # Load model
63 | #---------------------------
64 | def load_network(network):
65 | save_path = os.path.join('./checkpoint',name,'model_best.pth.tar')
66 | checkpoint = torch.load(save_path)
67 | network.load_state_dict(checkpoint['state_dict'])
68 | return network
69 |
70 |
71 | ######################################################################
72 | # Extract feature
73 | # ----------------------
74 | #
75 | # Extract feature from a trained model.
76 | #
77 |
78 | def evaluate(model,dataloaders):
79 | count = 0
80 | score = 0.0
81 | score5 = 0.0
82 | for data in dataloaders:
83 | img, label = data
84 | n, c, h, w = img.size()
85 | count += n
86 | input_img = Variable(img.cuda(), volatile=True)
87 | outputs = model(input_img)
88 | outputs = outputs.data.cpu()
89 | _, preds = outputs.topk(5, dim=1)
90 | correct = preds.eq(label.view(n,1).expand_as(preds))
91 | score += torch.sum(correct[:,0])
92 | score5 += torch.sum(correct)
93 | print("top1: %.4f top5:%.4f"% (score/count, score5/count))
94 | return
95 |
96 | ######################################################################
97 | # Load Collected data Trained model
98 | #print('-------test-----------')
99 |
100 | model_structure = models.__dict__['resnet'](
101 | num_classes=10,
102 | depth=opt.depth,
103 | )
104 |
105 | model = torch.nn.DataParallel(model_structure).cuda()
106 | model = load_network(model)
107 |
108 | # Change to test mode
109 | model = model.eval()
110 |
111 | # test
112 | evaluate(model,testloader)
113 |
114 |
--------------------------------------------------------------------------------
/cub/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | import yaml
4 | import torch.nn as nn
5 | import parser
6 | from model import ft_net, ft_net_dense, PCB
7 |
8 | def make_weights_for_balanced_classes(images, nclasses):
9 | count = [0] * nclasses
10 | for item in images:
11 | count[item[1]] += 1 # count the image number in every class
12 | weight_per_class = [0.] * nclasses
13 | N = float(sum(count))
14 | for i in range(nclasses):
15 | weight_per_class[i] = N/float(count[i])
16 | weight = [0] * len(images)
17 | for idx, val in enumerate(images):
18 | weight[idx] = weight_per_class[val[1]]
19 | return weight
20 |
21 | # Get model list for resume
22 | def get_model_list(dirname, key):
23 | if os.path.exists(dirname) is False:
24 | print('no dir: %s'%dirname)
25 | return None
26 | gen_models = [os.path.join(dirname, f) for f in os.listdir(dirname) if
27 | os.path.isfile(os.path.join(dirname, f)) and key in f and ".pth" in f]
28 | if gen_models is None:
29 | return None
30 | gen_models.sort()
31 | last_model_name = gen_models[-1]
32 | return last_model_name
33 |
34 | ######################################################################
35 | # Save model
36 | #---------------------------
37 | def save_network(network, dirname, epoch_label):
38 | if not os.path.isdir('./model/'+dirname):
39 | os.mkdir('./model/'+dirname)
40 | if isinstance(epoch_label, int):
41 | save_filename = 'net_%03d.pth'% epoch_label
42 | else:
43 | save_filename = 'net_%s.pth'% epoch_label
44 | save_path = os.path.join('./model',dirname,save_filename)
45 | torch.save(network.cpu().state_dict(), save_path)
46 | if torch.cuda.is_available:
47 | network.cuda()
48 |
49 |
50 | ######################################################################
51 | # Load model for resume
52 | #---------------------------
53 | def load_network(name, opt):
54 | # Load config
55 | dirname = os.path.join('./model',name)
56 | last_model_name = os.path.basename(get_model_list(dirname, 'net'))
57 | epoch = last_model_name.split('_')[1]
58 | epoch = epoch.split('.')[0]
59 | if not epoch=='last':
60 | epoch = int(epoch)
61 | config_path = os.path.join(dirname,'opts.yaml')
62 | with open(config_path, 'r') as stream:
63 | config = yaml.load(stream)
64 |
65 | opt.name = config['name']
66 | opt.data_dir = config['data_dir']
67 | opt.train_all = config['train_all']
68 | opt.droprate = config['droprate']
69 | opt.color_jitter = config['color_jitter']
70 | opt.batchsize = config['batchsize']
71 | opt.h = config['h']
72 | opt.w = config['w']
73 | opt.stride = config['stride']
74 | if 'pool' in config:
75 | opt.pool = config['pool']
76 | if 'h' in config:
77 | opt.h = config['h']
78 | opt.w = config['w']
79 | if 'gpu_ids' in config:
80 | opt.gpu_ids = config['gpu_ids']
81 | opt.erasing_p = config['erasing_p']
82 | opt.lr = config['lr']
83 | opt.nclasses = config['nclasses']
84 | opt.erasing_p = config['erasing_p']
85 | opt.use_dense = config['use_dense']
86 | opt.PCB = config['PCB']
87 | opt.fp16 = config['fp16']
88 |
89 | if opt.use_dense:
90 | model = ft_net_dense(opt.nclasses, opt.droprate, opt.stride, None, opt.pool)
91 | else:
92 | model = ft_net(opt.nclasses, opt.droprate, opt.stride, None, opt.pool)
93 | if opt.PCB:
94 | model = PCB(opt.nclasses)
95 |
96 | # load model
97 | if isinstance(epoch, int):
98 | save_filename = 'net_%03d.pth'% epoch
99 | else:
100 | save_filename = 'net_%s.pth'% epoch
101 |
102 | save_path = os.path.join('./model',name,save_filename)
103 | print('Load the model from %s'%save_path)
104 | network = model
105 | network.load_state_dict(torch.load(save_path))
106 | return network, opt, epoch
107 |
108 | def toogle_grad(model, requires_grad):
109 | for p in model.parameters():
110 | p.requires_grad_(requires_grad)
111 |
112 | def update_average(model_tgt, model_src, beta):
113 | toogle_grad(model_src, False)
114 | toogle_grad(model_tgt, False)
115 |
116 | param_dict_src = dict(model_src.named_parameters())
117 |
118 | for p_name, p_tgt in model_tgt.named_parameters():
119 | p_src = param_dict_src[p_name]
120 | assert(p_src is not p_tgt)
121 | p_tgt.copy_(beta*p_tgt + (1. - beta)*p_src)
122 |
123 | toogle_grad(model_src, True)
124 |
--------------------------------------------------------------------------------
/Cuisine-retrieval/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | import yaml
4 | import torch.nn as nn
5 | import parser
6 | from model import ft_net, ft_net_dense, PCB
7 |
8 | def make_weights_for_balanced_classes(images, nclasses):
9 | count = [0] * nclasses
10 | for item in images:
11 | count[item[1]] += 1 # count the image number in every class
12 | weight_per_class = [0.] * nclasses
13 | N = float(sum(count))
14 | for i in range(nclasses):
15 | weight_per_class[i] = N/float(count[i])
16 | weight = [0] * len(images)
17 | for idx, val in enumerate(images):
18 | weight[idx] = weight_per_class[val[1]]
19 | return weight
20 |
21 | # Get model list for resume
22 | def get_model_list(dirname, key):
23 | if os.path.exists(dirname) is False:
24 | print('no dir: %s'%dirname)
25 | return None
26 | gen_models = [os.path.join(dirname, f) for f in os.listdir(dirname) if
27 | os.path.isfile(os.path.join(dirname, f)) and key in f and ".pth" in f]
28 | if gen_models is None:
29 | return None
30 | gen_models.sort()
31 | last_model_name = gen_models[-1]
32 | return last_model_name
33 |
34 | ######################################################################
35 | # Save model
36 | #---------------------------
37 | def save_network(network, dirname, epoch_label):
38 | if not os.path.isdir('./model/'+dirname):
39 | os.mkdir('./model/'+dirname)
40 | if isinstance(epoch_label, int):
41 | save_filename = 'net_%03d.pth'% epoch_label
42 | else:
43 | save_filename = 'net_%s.pth'% epoch_label
44 | save_path = os.path.join('./model',dirname,save_filename)
45 | torch.save(network.cpu().state_dict(), save_path)
46 | if torch.cuda.is_available:
47 | network.cuda()
48 |
49 |
50 | ######################################################################
51 | # Load model for resume
52 | #---------------------------
53 | def load_network(name, opt):
54 | # Load config
55 | dirname = os.path.join('./model',name)
56 | last_model_name = os.path.basename(get_model_list(dirname, 'net'))
57 | epoch = last_model_name.split('_')[1]
58 | epoch = epoch.split('.')[0]
59 | if not epoch=='last':
60 | epoch = int(epoch)
61 | config_path = os.path.join(dirname,'opts.yaml')
62 | with open(config_path, 'r') as stream:
63 | config = yaml.load(stream)
64 |
65 | opt.name = config['name']
66 | opt.data_dir = config['data_dir']
67 | opt.train_all = config['train_all']
68 | opt.droprate = config['droprate']
69 | opt.color_jitter = config['color_jitter']
70 | opt.batchsize = config['batchsize']
71 | opt.h = config['h']
72 | opt.w = config['w']
73 | opt.stride = config['stride']
74 | if 'pool' in config:
75 | opt.pool = config['pool']
76 | if 'h' in config:
77 | opt.h = config['h']
78 | opt.w = config['w']
79 | if 'gpu_ids' in config:
80 | opt.gpu_ids = config['gpu_ids']
81 | opt.erasing_p = config['erasing_p']
82 | opt.lr = config['lr']
83 | opt.nclasses = config['nclasses']
84 | opt.erasing_p = config['erasing_p']
85 | opt.use_dense = config['use_dense']
86 | opt.PCB = config['PCB']
87 | opt.fp16 = config['fp16']
88 |
89 | if opt.use_dense:
90 | model = ft_net_dense(opt.nclasses, opt.droprate, opt.stride, None, opt.pool)
91 | else:
92 | model = ft_net(opt.nclasses, opt.droprate, opt.stride, None, opt.pool)
93 | if opt.PCB:
94 | model = PCB(opt.nclasses)
95 |
96 | # load model
97 | if isinstance(epoch, int):
98 | save_filename = 'net_%03d.pth'% epoch
99 | else:
100 | save_filename = 'net_%s.pth'% epoch
101 |
102 | save_path = os.path.join('./model',name,save_filename)
103 | print('Load the model from %s'%save_path)
104 | network = model
105 | network.load_state_dict(torch.load(save_path))
106 | return network, opt, epoch
107 |
108 | def toogle_grad(model, requires_grad):
109 | for p in model.parameters():
110 | p.requires_grad_(requires_grad)
111 |
112 | def update_average(model_tgt, model_src, beta):
113 | toogle_grad(model_src, False)
114 | toogle_grad(model_tgt, False)
115 |
116 | param_dict_src = dict(model_src.named_parameters())
117 |
118 | for p_name, p_tgt in model_tgt.named_parameters():
119 | p_src = param_dict_src[p_name]
120 | assert(p_src is not p_tgt)
121 | p_tgt.copy_(beta*p_tgt + (1. - beta)*p_src)
122 |
123 | toogle_grad(model_src, True)
124 |
--------------------------------------------------------------------------------
/mnist/main.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import argparse
3 | import torch
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 | import torch.optim as optim
7 | from torch.optim import lr_scheduler
8 | from torchvision import datasets, transforms
9 | from model import Net
10 |
11 | criterion = nn.CrossEntropyLoss()
12 |
13 | def train(args, model, device, train_loader, optimizer, epoch):
14 | model.train()
15 | for batch_idx, (data, target) in enumerate(train_loader):
16 | data, target = data.to(device), target.to(device)
17 | optimizer.zero_grad()
18 | output = model(data)
19 | loss = criterion(output, target)
20 | loss.backward()
21 | optimizer.step()
22 | if batch_idx % args.log_interval == 0:
23 | print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
24 | epoch, batch_idx * len(data), len(train_loader.dataset),
25 | 100. * batch_idx / len(train_loader), loss.item()))
26 |
27 | def test(args, model, device, test_loader):
28 | model.eval()
29 | test_loss = 0
30 | correct = 0
31 | with torch.no_grad():
32 | for data, target in test_loader:
33 | data, target = data.to(device), target.to(device)
34 | output = model(data)
35 | test_loss += criterion(output, target).item() # sum up batch loss
36 | pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
37 | correct += pred.eq(target.view_as(pred)).sum().item()
38 |
39 | test_loss /= len(test_loader.dataset)
40 | print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
41 | test_loss, correct, len(test_loader.dataset),
42 | 100. * correct / len(test_loader.dataset)))
43 |
44 | def save_network(network):
45 | torch.save(network.cpu().state_dict(), 'model/best.pth')
46 |
47 | def main():
48 | # Training settings
49 | parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
50 | parser.add_argument('--batch-size', type=int, default=128, metavar='N',
51 | help='input batch size for training (default: 64)')
52 | parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
53 | help='input batch size for testing (default: 1000)')
54 | parser.add_argument('--epochs', type=int, default=30, metavar='N',
55 | help='number of epochs to train (default: 30)')
56 | parser.add_argument('--lr', type=float, default=0.05, metavar='LR',
57 | help='learning rate (default: 0.01)')
58 | parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
59 | help='SGD momentum (default: 0.5)')
60 | parser.add_argument('--no-cuda', action='store_true', default=False,
61 | help='disables CUDA training')
62 | parser.add_argument('--seed', type=int, default=1, metavar='S',
63 | help='random seed (default: 1)')
64 | parser.add_argument('--log-interval', type=int, default=10, metavar='N',
65 | help='how many batches to wait before logging training status')
66 | args = parser.parse_args()
67 | use_cuda = not args.no_cuda and torch.cuda.is_available()
68 |
69 | torch.manual_seed(args.seed)
70 |
71 | device = torch.device("cuda" if use_cuda else "cpu")
72 |
73 | kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
74 | train_loader = torch.utils.data.DataLoader(
75 | datasets.MNIST('../data', train=True, download=True,
76 | transform=transforms.Compose([
77 | transforms.ToTensor(),
78 | transforms.Normalize((0.1307,), (0.3081,))
79 | ])),
80 | batch_size=args.batch_size, shuffle=True, **kwargs)
81 | test_loader = torch.utils.data.DataLoader(
82 | datasets.MNIST('../data', train=False, transform=transforms.Compose([
83 | transforms.ToTensor(),
84 | transforms.Normalize((0.1307,), (0.3081,))
85 | ])),
86 | batch_size=args.test_batch_size, shuffle=True, **kwargs)
87 |
88 |
89 | model = Net().to(device)
90 | optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
91 | exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)
92 |
93 | for epoch in range(1, args.epochs + 1):
94 | train(args, model, device, train_loader, optimizer, epoch)
95 | exp_lr_scheduler.step()
96 | test(args, model, device, test_loader)
97 | save_network(model)
98 |
99 |
100 | if __name__ == '__main__':
101 | main()
102 |
--------------------------------------------------------------------------------
/cub/demo.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import scipy.io
3 | import torch
4 | import numpy as np
5 | import os
6 | from torchvision import datasets
7 | import matplotlib
8 | matplotlib.use('agg')
9 | import matplotlib.pyplot as plt
10 | from shutil import copyfile
11 | from PIL import Image
12 | #######################################################################
13 | # Evaluate
14 | parser = argparse.ArgumentParser(description='Demo')
15 | parser.add_argument('--query_index', default=240, type=int, help='test_image_index')
16 | parser.add_argument('--method', default=5, type=int, help='test_image_index')
17 | parser.add_argument('--test_dir',default='./images',type=str, help='./test_data')
18 | parser.add_argument('--adv',action='store_true', help='./test_data')
19 | opts = parser.parse_args()
20 |
21 | data_dir = opts.test_dir
22 | image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ) for x in ['test']}
23 |
24 | #####################################################################
25 | #Show result
26 | def imshow(path, title=None):
27 | """Imshow for Tensor."""
28 | im = plt.imread(path)
29 | plt.imshow(im)
30 | if title is not None:
31 | plt.title(title)
32 | plt.pause(0.001) # pause a bit so that plots are updated
33 |
34 | ######################################################################
35 |
36 | result = scipy.io.loadmat('pytorch_result.mat')
37 |
38 | if opts.adv:
39 | result = scipy.io.loadmat('attack_query/rerun_lr4-%d/16/query.mat'%opts.method)
40 |
41 | query_feature = torch.FloatTensor(result['img_f'])
42 | query_label = result['label'][0]
43 |
44 | result = scipy.io.loadmat('pytorch_result.mat')
45 | gallery_feature = torch.FloatTensor(result['img_f'])
46 | gallery_label = result['label'][0]
47 |
48 | multi = os.path.isfile('multi_query.mat')
49 |
50 | if multi:
51 | m_result = scipy.io.loadmat('multi_query.mat')
52 | mquery_feature = torch.FloatTensor(m_result['mquery_f'])
53 | mquery_cam = m_result['mquery_cam'][0]
54 | mquery_label = m_result['mquery_label'][0]
55 | mquery_feature = mquery_feature.cuda()
56 |
57 | query_feature = query_feature.cuda()
58 | gallery_feature = gallery_feature.cuda()
59 |
60 | #######################################################################
61 | # sort the images
62 | def sort_img(qf, ql, gf, gl):
63 | query = qf.view(-1,1)
64 | # print(query.shape)
65 | score = torch.mm(gf,query)
66 | score = score.squeeze(1).cpu()
67 | score = score.numpy()
68 | # predict index
69 | index = np.argsort(score) #from small to large
70 | index = index[::-1]
71 |
72 | #good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)
73 | junk_index = np.argwhere(gl==-1)
74 |
75 | mask = np.in1d(index, junk_index, invert=True)
76 | index = index[mask]
77 | return index
78 |
79 | i = opts.query_index
80 | adv = ''
81 | if opts.adv:
82 | adv = 'a'
83 |
84 | if not os.path.isdir(str(opts.query_index)+adv+str(opts.method)):
85 | os.mkdir(str(opts.query_index)+adv+str(opts.method))
86 | index = sort_img(query_feature[i],query_label[i],gallery_feature,gallery_label)
87 |
88 | ########################################################################
89 | # Visualize the rank result
90 |
91 | query_path, _ = image_datasets['test'].imgs[i]
92 | query_label = query_label[i]
93 | if opts.adv:
94 | query_path = query_path.replace('images/test','attack_query/rerun_lr4-%d/16/'%opts.method)
95 | print(query_path)
96 | print('Top 10 images are as follow:')
97 | try: # Visualize Ranking Result
98 | # Graphical User Interface is needed
99 | fig = plt.figure(figsize=(12,4))
100 | ax = plt.subplot(1,11,1)
101 | ax.axis('off')
102 | imshow(query_path)
103 | query256 = Image.open(query_path)
104 | query256 = query256.resize((256,256))
105 | query256.save('./%d%s%d/query.jpg'%(opts.query_index,adv,opts.method) )
106 | #imshow(query_path,'query')
107 | for i in range(10):
108 | ax = plt.subplot(1,11,i+2)
109 | ax.axis('off')
110 | img_path, _ = image_datasets['test'].imgs[index[i+1]]
111 | label = gallery_label[index[i+1]]
112 | imshow(img_path)
113 | if label == query_label:
114 | ax.set_title('%d'%(i+1), color='green')
115 | else:
116 | ax.set_title('%d'%(i+1), color='red')
117 | print(img_path)
118 | img256 = Image.open(img_path)
119 | img256 = img256.resize((256,256))
120 | #copyfile(img_path, './%d%s/%d.jpg'%(opts.query_index,adv,i) )
121 | img256.save('./%d%s%d/%d.jpg'%(opts.query_index,adv,opts.method,i) )
122 | fig.savefig('result.jpg')
123 | except RuntimeError:
124 | for i in range(10):
125 | img_path = image_datasets.imgs[index[i+1]]
126 | print(img_path[0])
127 | print('If you want to see the visualization of the ranking result, graphical user interface is needed.')
128 |
129 |
--------------------------------------------------------------------------------
/visualize/statistic_pred.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import print_function, division
4 |
5 | import argparse
6 | import torch
7 | import torch.nn as nn
8 | import torch.optim as optim
9 | from torch.optim import lr_scheduler
10 | from torch.autograd import Variable
11 | import numpy as np
12 | import torchvision
13 | from torchvision import datasets, models, transforms
14 | import time
15 | import os
16 | import scipy.io
17 | import sys
18 | sys.path.append("..")
19 | from model import ft_net, ft_net_dense
20 |
21 | ######################################################################
22 | # Options
23 | # --------
24 | parser = argparse.ArgumentParser(description='Training')
25 | parser.add_argument('--gpu_ids',default='1', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2')
26 | parser.add_argument('--which_epoch',default='last', type=str, help='0,1,2,3...or last')
27 | parser.add_argument('--test_dir',default='/home/zzd/Market/pytorch',type=str, help='./test_data')
28 | parser.add_argument('--name', default='ft_ResNet50', type=str, help='save model path')
29 | parser.add_argument('--batchsize', default=100, type=int, help='batchsize')
30 | parser.add_argument('--use_dense', action='store_true', help='use densenet121' )
31 |
32 | opt = parser.parse_args()
33 |
34 | str_ids = opt.gpu_ids.split(',')
35 | #which_epoch = opt.which_epoch
36 | name = opt.name
37 | test_dir = opt.test_dir
38 |
39 | gpu_ids = []
40 | for str_id in str_ids:
41 | id = int(str_id)
42 | if id >=0:
43 | gpu_ids.append(id)
44 |
45 | # set gpu ids
46 | if len(gpu_ids)>0:
47 | torch.cuda.set_device(gpu_ids[0])
48 |
49 | ######################################################################
50 | # Load Data
51 | # ---------
52 | #
53 | # We will use torchvision and torch.utils.data packages for loading the
54 | # data.
55 | #
56 | data_transforms = transforms.Compose([
57 | transforms.Resize((256,128), interpolation=3),
58 | transforms.ToTensor(),
59 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
60 | ])
61 |
62 |
63 | data_dir = test_dir
64 | image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,data_transforms) for x in ['gallery','query']}
65 | dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,
66 | shuffle=False, num_workers=4) for x in ['gallery','query']}
67 |
68 | class_names = image_datasets['query'].classes
69 | use_gpu = torch.cuda.is_available()
70 |
71 | ######################################################################
72 | # Load model
73 | #---------------------------
74 | def load_network(network):
75 | save_path = os.path.join('../model',name,'net_%s.pth'%opt.which_epoch)
76 | network.load_state_dict(torch.load(save_path))
77 | return network
78 |
79 |
80 | ######################################################################
81 | # Extract feature
82 | # ----------------------
83 | #
84 | # Extract feature from a trained model.
85 | #
86 | def vis_feature(model,dataloaders):
87 | features = torch.FloatTensor()
88 | count = 0
89 | predict_max = []
90 | for data in dataloaders:
91 | img, label = data
92 | n, c, h, w = img.size()
93 | count += n
94 | input_img = Variable(img.cuda())
95 | outputs = model(input_img)
96 | sm = nn.Softmax(dim=1)
97 | outputs = sm(outputs)
98 | for i in range(n):
99 | max_value = torch.max(outputs[i])
100 | max_value = max_value.data.cpu()
101 | predict_max.append(max_value.numpy())
102 | print(np.mean(predict_max))
103 |
104 | def get_id(img_path):
105 | camera_id = []
106 | labels = []
107 | for path, v in img_path:
108 | filename = path.split('/')[-1]
109 | label = filename[0:4]
110 | camera = filename.split('c')[1]
111 | if label[0:2]=='-1':
112 | labels.append(-1)
113 | else:
114 | labels.append(int(label))
115 | camera_id.append(int(camera[0]))
116 | return camera_id, labels
117 |
118 | gallery_path = image_datasets['gallery'].imgs
119 | query_path = image_datasets['query'].imgs
120 |
121 | gallery_cam,gallery_label = get_id(gallery_path)
122 | query_cam,query_label = get_id(query_path)
123 |
124 | ######################################################################
125 | # Load Collected data Trained model
126 | #print('-------test-----------')
127 | if opt.use_dense:
128 | model_structure = ft_net_dense(751)
129 | else:
130 | model_structure = ft_net(751)
131 | model = load_network(model_structure)
132 |
133 | print(model)
134 | # Change to test mode
135 | model = model.eval()
136 | if use_gpu:
137 | model = model.cuda()
138 |
139 | # output generate
140 | output_folder = './%s' % opt.name
141 | if not os.path.exists(output_folder):
142 | os.makedirs(output_folder)
143 |
144 | # Extract feature
145 | query_feature = vis_feature(model,dataloaders['query'])
146 |
147 |
--------------------------------------------------------------------------------
/Cuisine-retrieval/demo.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import scipy.io
3 | import torch
4 | import numpy as np
5 | import os
6 | from torchvision import datasets
7 | import matplotlib
8 | matplotlib.use('agg')
9 | import matplotlib.pyplot as plt
10 | from shutil import copyfile
11 | from PIL import Image
12 | #######################################################################
13 | # Evaluate
14 | parser = argparse.ArgumentParser(description='Demo')
15 | parser.add_argument('--query_index', default=300, type=int, help='test_image_index')
16 | parser.add_argument('--method', default=5, type=int, help='test_image_index')
17 | parser.add_argument('--test_dir',default='./Food-cropped/pytorch',type=str, help='./test_data')
18 | parser.add_argument('--adv',action='store_true', help='./test_data')
19 | opts = parser.parse_args()
20 |
21 | data_dir = opts.test_dir
22 | image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ) for x in ['gallery','query']}
23 |
24 | #####################################################################
25 | #Show result
26 | def imshow(path, title=None):
27 | """Imshow for Tensor."""
28 | im = plt.imread(path)
29 | plt.imshow(im)
30 | if title is not None:
31 | plt.title(title)
32 | plt.pause(0.001) # pause a bit so that plots are updated
33 |
34 | ######################################################################
35 | result = scipy.io.loadmat('pytorch_result.mat')
36 |
37 | if opts.adv:
38 | result = scipy.io.loadmat('attack_query/ft_ResNet50_all-%d/16/query.mat'%opts.method)
39 |
40 | query_feature = torch.FloatTensor(result['query_f'])
41 | query_label = result['query_label'][0]
42 |
43 | result = scipy.io.loadmat('pytorch_result.mat')
44 | gallery_feature = torch.FloatTensor(result['gallery_f'])
45 | gallery_label = result['gallery_label'][0]
46 |
47 | multi = os.path.isfile('multi_query.mat')
48 |
49 | if multi:
50 | m_result = scipy.io.loadmat('multi_query.mat')
51 | mquery_feature = torch.FloatTensor(m_result['mquery_f'])
52 | mquery_cam = m_result['mquery_cam'][0]
53 | mquery_label = m_result['mquery_label'][0]
54 | mquery_feature = mquery_feature.cuda()
55 |
56 | query_feature = query_feature.cuda()
57 | gallery_feature = gallery_feature.cuda()
58 |
59 | #######################################################################
60 | # sort the images
61 | def sort_img(qf, ql, gf, gl):
62 | query = qf.view(-1,1)
63 | # print(query.shape)
64 | score = torch.mm(gf,query)
65 | score = score.squeeze(1).cpu()
66 | score = score.numpy()
67 | # predict index
68 | index = np.argsort(score) #from small to large
69 | index = index[::-1]
70 |
71 | #good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)
72 | junk_index = np.argwhere(gl==-1)
73 |
74 | mask = np.in1d(index, junk_index, invert=True)
75 | index = index[mask]
76 | return index
77 |
78 | i = opts.query_index
79 | adv = ''
80 | if opts.adv:
81 | adv = 'a'
82 |
83 | if not os.path.isdir(str(opts.query_index)+adv+str(opts.method)):
84 | os.mkdir(str(opts.query_index)+adv+str(opts.method))
85 | index = sort_img(query_feature[i],query_label[i],gallery_feature,gallery_label)
86 |
87 | ########################################################################
88 | # Visualize the rank result
89 |
90 | query_path, _ = image_datasets['query'].imgs[i]
91 | query_label = query_label[i]
92 | if opts.adv:
93 | query_path = query_path.replace('./Food-cropped/pytorch/query','attack_query/ft_ResNet50_all-%d/16/'%opts.method)
94 | print(query_path)
95 | print('Top 10 images are as follow:')
96 | try: # Visualize Ranking Result
97 | # Graphical User Interface is needed
98 | fig = plt.figure(figsize=(12,4))
99 | ax = plt.subplot(1,11,1)
100 | ax.axis('off')
101 | imshow(query_path)
102 | query256 = Image.open(query_path)
103 | query256 = query256.resize((256,256))
104 | query256.save('./%d%s%d/query.jpg'%(opts.query_index,adv,opts.method) )
105 | #copyfile(query_path, './%d%s/query.jpg'%(opts.query_index,adv) )
106 | #imshow(query_path,'query')
107 | for i in range(10):
108 | ax = plt.subplot(1,11,i+2)
109 | ax.axis('off')
110 | img_path, _ = image_datasets['gallery'].imgs[index[i+1]]
111 | label = gallery_label[index[i+1]]
112 | imshow(img_path)
113 | if label == query_label:
114 | ax.set_title('%d'%(i+1), color='green')
115 | else:
116 | ax.set_title('%d'%(i+1), color='red')
117 | print(img_path)
118 | img256 = Image.open(img_path)
119 | img256 = img256.resize((256,256))
120 | #copyfile(img_path, './%d%s/%d.jpg'%(opts.query_index,adv,i) )
121 | img256.save('./%d%s%d/%d.jpg'%(opts.query_index,adv,opts.method,i) )
122 | fig.savefig('result.jpg')
123 | except RuntimeError:
124 | for i in range(10):
125 | img_path = image_datasets.imgs[index[i+1]]
126 | print(img_path[0])
127 | print('If you want to see the visualization of the ranking result, graphical user interface is needed.')
128 |
129 |
--------------------------------------------------------------------------------
/demo.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import scipy.io
3 | import torch
4 | import numpy as np
5 | import os
6 | from torchvision import datasets
7 | import matplotlib
8 | matplotlib.use('agg')
9 | import matplotlib.pyplot as plt
10 | from shutil import copyfile
11 | from PIL import Image
12 | #######################################################################
13 | # Evaluate
14 | parser = argparse.ArgumentParser(description='Demo')
15 | parser.add_argument('--query_index', default=666, type=int, help='test_image_index')
16 | parser.add_argument('--method', default=0, type=int, help='test_image_index')
17 | parser.add_argument('--test_dir',default='../Market/pytorch',type=str, help='./test_data')
18 | parser.add_argument('--adv',action='store_true', help='./test_data')
19 | opts = parser.parse_args()
20 |
21 | data_dir = opts.test_dir
22 | image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ) for x in ['query','gallery']}
23 |
24 | #####################################################################
25 | #Show result
26 | def imshow(path, title=None):
27 | """Imshow for Tensor."""
28 | im = plt.imread(path)
29 | plt.imshow(im)
30 | if title is not None:
31 | plt.title(title)
32 | plt.pause(0.001) # pause a bit so that plots are updated
33 |
34 | ######################################################################
35 |
36 | result = scipy.io.loadmat('query_result.mat')
37 |
38 | if opts.adv:
39 | result = scipy.io.loadmat('attack_query/baseline-%d/16/query_result.mat'%opts.method)
40 |
41 | query_feature = torch.FloatTensor(result['query_f'])
42 | query_label = result['query_label'][0]
43 | query_cam = result['query_cam'][0]
44 | print(query_feature.shape)
45 |
46 | result = scipy.io.loadmat('gallery_result.mat')
47 | gallery_feature = torch.FloatTensor(result['gallery_f'])
48 | gallery_cam = result['gallery_cam'][0]
49 | gallery_label = result['gallery_label'][0]
50 |
51 | multi = os.path.isfile('multi_query.mat')
52 |
53 | if multi:
54 | m_result = scipy.io.loadmat('multi_query.mat')
55 | mquery_feature = torch.FloatTensor(m_result['mquery_f'])
56 | mquery_cam = m_result['mquery_cam'][0]
57 | mquery_label = m_result['mquery_label'][0]
58 | mquery_feature = mquery_feature.cuda()
59 |
60 | query_feature = query_feature.cuda()
61 | gallery_feature = gallery_feature.cuda()
62 |
63 | #######################################################################
64 | # sort the images
65 | def sort_img(qf, ql, qc, gf, gl, gc):
66 | query = qf.view(-1,1)
67 | # print(query.shape)
68 | score = torch.mm(gf,query)
69 | score = score.squeeze(1).cpu()
70 | score = score.numpy()
71 | # predict index
72 | index = np.argsort(score) #from small to large
73 | index = index[::-1]
74 |
75 | query_index = np.argwhere(gl==ql)
76 | camera_index = np.argwhere(gc==qc)
77 | #good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)
78 | junk_index1 = np.argwhere(gl==-1)
79 | junk_index2 = np.intersect1d(query_index, camera_index)
80 | junk_index = np.append(junk_index2, junk_index1) #.flatten())
81 |
82 | mask = np.in1d(index, junk_index, invert=True)
83 | index = index[mask]
84 | return index
85 |
86 | i = opts.query_index
87 | adv = ''
88 | if opts.adv:
89 | adv = 'a'
90 |
91 | if not os.path.isdir(str(opts.query_index)+adv+str(opts.method)):
92 | os.mkdir(str(opts.query_index)+adv+str(opts.method))
93 | index = sort_img(query_feature[i], query_label[i], query_cam[i], gallery_feature, gallery_label, gallery_cam)
94 |
95 | ########################################################################
96 | # Visualize the rank result
97 |
98 | query_path, _ = image_datasets['query'].imgs[i]
99 | query_label = query_label[i]
100 | if opts.adv:
101 | query_path = query_path.replace('../Market/pytorch/query/%04d'%query_label,'attack_query/baseline-%d/16/'%opts.method)
102 | print(query_path)
103 | print('Top 10 images are as follow:')
104 | try: # Visualize Ranking Result
105 | # Graphical User Interface is needed
106 | fig = plt.figure(figsize=(12,4))
107 | ax = plt.subplot(1,11,1)
108 | ax.axis('off')
109 | imshow(query_path)
110 | query256 = Image.open(query_path)
111 | #query256 = query256.resize((256,128))
112 | query256.save('./%d%s%d/query.jpg'%(opts.query_index,adv,opts.method) )
113 | #imshow(query_path,'query')
114 | for i in range(10):
115 | ax = plt.subplot(1,11,i+2)
116 | ax.axis('off')
117 | img_path, _ = image_datasets['gallery'].imgs[index[i+1]]
118 | label = gallery_label[index[i+1]]
119 | imshow(img_path)
120 | if label == query_label:
121 | ax.set_title('%d'%(i+1), color='green')
122 | else:
123 | ax.set_title('%d'%(i+1), color='red')
124 | print(img_path)
125 | img256 = Image.open(img_path)
126 | #img256 = img256.resize((256,256))
127 | #copyfile(img_path, './%d%s/%d.jpg'%(opts.query_index,adv,i) )
128 | img256.save('./%d%s%d/%d.jpg'%(opts.query_index,adv,opts.method,i) )
129 | fig.savefig('result.jpg')
130 | except RuntimeError:
131 | for i in range(10):
132 | img_path = image_datasets.imgs[index[i+1]]
133 | print(img_path[0])
134 | print('If you want to see the visualization of the ranking result, graphical user interface is needed.')
135 |
136 |
--------------------------------------------------------------------------------
/cub/losses.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | import yaml
4 | import torch.nn as nn
5 | from torch.autograd import Variable
6 | import torch.nn.functional as F
7 | from torch.nn import Parameter
8 | from torch.nn import init
9 | import math
10 |
11 | def L2Normalization(ff, dim = 1):
12 | # ff is B*N
13 | fnorm = torch.norm(ff, p=2, dim=dim, keepdim=True) + 1e-5
14 | ff = ff.div(fnorm.expand_as(ff))
15 | return ff
16 |
17 | def myphi(x,m):
18 | x = x * m
19 | return 1-x**2/math.factorial(2)+x**4/math.factorial(4)-x**6/math.factorial(6) + \
20 | x**8/math.factorial(8) - x**9/math.factorial(9)
21 |
22 | # I largely modified the AngleLinear Loss
23 | class AngleLinear(nn.Module):
24 | def __init__(self, in_features, out_features, m = 4, phiflag=True):
25 | super(AngleLinear, self).__init__()
26 | self.in_features = in_features
27 | self.out_features = out_features
28 | self.weight = Parameter(torch.Tensor(in_features,out_features))
29 | init.normal_(self.weight.data, std=0.001)
30 | self.phiflag = phiflag
31 | self.m = m
32 | self.mlambda = [
33 | lambda x: x**0,
34 | lambda x: x**1,
35 | lambda x: 2*x**2-1,
36 | lambda x: 4*x**3-3*x,
37 | lambda x: 8*x**4-8*x**2+1,
38 | lambda x: 16*x**5-20*x**3+5*x
39 | ]
40 |
41 | def forward(self, input):
42 | x = input # size=(B,F) F is feature len
43 | w = self.weight # size=(F,Classnum) F=in_features Classnum=out_features
44 |
45 | ww = w.renorm(2,1,1e-5).mul(1e5)
46 | xlen = x.pow(2).sum(1).pow(0.5) # size=B
47 | wlen = ww.pow(2).sum(0).pow(0.5) # size=Classnum
48 |
49 | cos_theta = x.mm(ww) # size=(B,Classnum)
50 | cos_theta = cos_theta / xlen.view(-1,1) / wlen.view(1,-1)
51 | cos_theta = cos_theta.clamp(-1,1)
52 |
53 | if self.phiflag:
54 | cos_m_theta = self.mlambda[self.m](cos_theta)
55 | theta = Variable(cos_theta.data.acos())
56 | k = (self.m*theta/3.14159265).floor()
57 | n_one = k*0.0 - 1
58 | phi_theta = (n_one**k) * cos_m_theta - 2*k
59 | else:
60 | theta = cos_theta.acos()
61 | phi_theta = myphi(theta,self.m)
62 | phi_theta = phi_theta.clamp(-1*self.m,1)
63 |
64 | cos_theta = cos_theta * xlen.view(-1,1)
65 | phi_theta = phi_theta * xlen.view(-1,1)
66 | output = (cos_theta,phi_theta)
67 | return output # size=(B,Classnum,2)
68 |
69 | #https://github.com/auroua/InsightFace_TF/blob/master/losses/face_losses.py#L80
70 | class ArcLinear(nn.Module):
71 | def __init__(self, in_features, out_features, s=64.0):
72 | super(ArcLinear, self).__init__()
73 | self.weight = Parameter(torch.Tensor(in_features,out_features))
74 | init.normal_(self.weight.data, std=0.001)
75 | self.loss_s = s
76 |
77 | def forward(self, input):
78 | embedding = input
79 | nembedding = L2Normalization(embedding, dim=1)*self.loss_s
80 | _weight = L2Normalization(self.weight, dim=0)
81 | fc7 = nembedding.mm(_weight)
82 | output = (fc7, _weight, nembedding)
83 | return output
84 |
85 | class ArcLoss(nn.Module):
86 | def __init__(self, m1=1.0, m2=0.5, m3 =0.0, s = 64.0):
87 | super(ArcLoss, self).__init__()
88 | self.loss_m1 = m1
89 | self.loss_m2 = m2
90 | self.loss_m3 = m3
91 | self.loss_s = s
92 |
93 | def forward(self, input, target):
94 | fc7, _weight, nembedding = input
95 |
96 | index = fc7.data * 0.0 #size=(B,Classnum)
97 | index.scatter_(1,target.data.view(-1,1),1)
98 | index = index.byte()
99 | index = Variable(index)
100 |
101 | zy = fc7[index]
102 | cos_t = zy/self.loss_s
103 | t = torch.acos(cos_t)
104 | t = t*self.loss_m1 + self.loss_m2
105 | body = torch.cos(t) - self.loss_m3
106 |
107 | new_zy = body*self.loss_s
108 | diff = new_zy - zy
109 | fc7[index] += diff
110 | loss = F.cross_entropy(fc7, target)
111 | return loss
112 |
113 | class AngleLoss(nn.Module):
114 | def __init__(self, gamma=0):
115 | super(AngleLoss, self).__init__()
116 | self.gamma = gamma
117 | self.it = 0
118 | self.LambdaMin = 5.0
119 | self.LambdaMax = 1500.0
120 | self.lamb = 1500.0
121 |
122 | def forward(self, input, target):
123 | self.it += 1
124 | cos_theta,phi_theta = input
125 | target = target.view(-1,1) #size=(B,1)
126 |
127 | index = cos_theta.data * 0.0 #size=(B,Classnum)
128 | index.scatter_(1,target.data.view(-1,1),1)
129 | index = index.byte()
130 | index = Variable(index)
131 |
132 | self.lamb = max(self.LambdaMin,self.LambdaMax/(1+0.1*self.it ))
133 | output = cos_theta * 1.0 #size=(B,Classnum)
134 | output[index] -= cos_theta[index]*(1.0+0)/(1+self.lamb)
135 | output[index] += phi_theta[index]*(1.0+0)/(1+self.lamb)
136 |
137 | logpt = F.log_softmax(output, dim=1)
138 | logpt = logpt.gather(1,target)
139 | logpt = logpt.view(-1)
140 | pt = Variable(logpt.data.exp())
141 |
142 | loss = -1 * (1-pt)**self.gamma * logpt
143 | loss = loss.mean()
144 |
145 | return loss
146 |
147 |
148 |
149 |
--------------------------------------------------------------------------------
/Cuisine-retrieval/losses.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | import yaml
4 | import torch.nn as nn
5 | from torch.autograd import Variable
6 | import torch.nn.functional as F
7 | from torch.nn import Parameter
8 | from torch.nn import init
9 | import math
10 |
11 | def L2Normalization(ff, dim = 1):
12 | # ff is B*N
13 | fnorm = torch.norm(ff, p=2, dim=dim, keepdim=True) + 1e-5
14 | ff = ff.div(fnorm.expand_as(ff))
15 | return ff
16 |
17 | def myphi(x,m):
18 | x = x * m
19 | return 1-x**2/math.factorial(2)+x**4/math.factorial(4)-x**6/math.factorial(6) + \
20 | x**8/math.factorial(8) - x**9/math.factorial(9)
21 |
22 | # I largely modified the AngleLinear Loss
23 | class AngleLinear(nn.Module):
24 | def __init__(self, in_features, out_features, m = 4, phiflag=True):
25 | super(AngleLinear, self).__init__()
26 | self.in_features = in_features
27 | self.out_features = out_features
28 | self.weight = Parameter(torch.Tensor(in_features,out_features))
29 | init.normal_(self.weight.data, std=0.001)
30 | self.phiflag = phiflag
31 | self.m = m
32 | self.mlambda = [
33 | lambda x: x**0,
34 | lambda x: x**1,
35 | lambda x: 2*x**2-1,
36 | lambda x: 4*x**3-3*x,
37 | lambda x: 8*x**4-8*x**2+1,
38 | lambda x: 16*x**5-20*x**3+5*x
39 | ]
40 |
41 | def forward(self, input):
42 | x = input # size=(B,F) F is feature len
43 | w = self.weight # size=(F,Classnum) F=in_features Classnum=out_features
44 |
45 | ww = w.renorm(2,1,1e-5).mul(1e5)
46 | xlen = x.pow(2).sum(1).pow(0.5) # size=B
47 | wlen = ww.pow(2).sum(0).pow(0.5) # size=Classnum
48 |
49 | cos_theta = x.mm(ww) # size=(B,Classnum)
50 | cos_theta = cos_theta / xlen.view(-1,1) / wlen.view(1,-1)
51 | cos_theta = cos_theta.clamp(-1,1)
52 |
53 | if self.phiflag:
54 | cos_m_theta = self.mlambda[self.m](cos_theta)
55 | theta = Variable(cos_theta.data.acos())
56 | k = (self.m*theta/3.14159265).floor()
57 | n_one = k*0.0 - 1
58 | phi_theta = (n_one**k) * cos_m_theta - 2*k
59 | else:
60 | theta = cos_theta.acos()
61 | phi_theta = myphi(theta,self.m)
62 | phi_theta = phi_theta.clamp(-1*self.m,1)
63 |
64 | cos_theta = cos_theta * xlen.view(-1,1)
65 | phi_theta = phi_theta * xlen.view(-1,1)
66 | output = (cos_theta,phi_theta)
67 | return output # size=(B,Classnum,2)
68 |
69 | #https://github.com/auroua/InsightFace_TF/blob/master/losses/face_losses.py#L80
70 | class ArcLinear(nn.Module):
71 | def __init__(self, in_features, out_features, s=64.0):
72 | super(ArcLinear, self).__init__()
73 | self.weight = Parameter(torch.Tensor(in_features,out_features))
74 | init.normal_(self.weight.data, std=0.001)
75 | self.loss_s = s
76 |
77 | def forward(self, input):
78 | embedding = input
79 | nembedding = L2Normalization(embedding, dim=1)*self.loss_s
80 | _weight = L2Normalization(self.weight, dim=0)
81 | fc7 = nembedding.mm(_weight)
82 | output = (fc7, _weight, nembedding)
83 | return output
84 |
85 | class ArcLoss(nn.Module):
86 | def __init__(self, m1=1.0, m2=0.5, m3 =0.0, s = 64.0):
87 | super(ArcLoss, self).__init__()
88 | self.loss_m1 = m1
89 | self.loss_m2 = m2
90 | self.loss_m3 = m3
91 | self.loss_s = s
92 |
93 | def forward(self, input, target):
94 | fc7, _weight, nembedding = input
95 |
96 | index = fc7.data * 0.0 #size=(B,Classnum)
97 | index.scatter_(1,target.data.view(-1,1),1)
98 | index = index.byte()
99 | index = Variable(index)
100 |
101 | zy = fc7[index]
102 | cos_t = zy/self.loss_s
103 | t = torch.acos(cos_t)
104 | t = t*self.loss_m1 + self.loss_m2
105 | body = torch.cos(t) - self.loss_m3
106 |
107 | new_zy = body*self.loss_s
108 | diff = new_zy - zy
109 | fc7[index] += diff
110 | loss = F.cross_entropy(fc7, target)
111 | return loss
112 |
113 | class AngleLoss(nn.Module):
114 | def __init__(self, gamma=0):
115 | super(AngleLoss, self).__init__()
116 | self.gamma = gamma
117 | self.it = 0
118 | self.LambdaMin = 5.0
119 | self.LambdaMax = 1500.0
120 | self.lamb = 1500.0
121 |
122 | def forward(self, input, target):
123 | self.it += 1
124 | cos_theta,phi_theta = input
125 | target = target.view(-1,1) #size=(B,1)
126 |
127 | index = cos_theta.data * 0.0 #size=(B,Classnum)
128 | index.scatter_(1,target.data.view(-1,1),1)
129 | index = index.byte()
130 | index = Variable(index)
131 |
132 | self.lamb = max(self.LambdaMin,self.LambdaMax/(1+0.1*self.it ))
133 | output = cos_theta * 1.0 #size=(B,Classnum)
134 | output[index] -= cos_theta[index]*(1.0+0)/(1+self.lamb)
135 | output[index] += phi_theta[index]*(1.0+0)/(1+self.lamb)
136 |
137 | logpt = F.log_softmax(output, dim=1)
138 | logpt = logpt.gather(1,target)
139 | logpt = logpt.view(-1)
140 | pt = Variable(logpt.data.exp())
141 |
142 | loss = -1 * (1-pt)**self.gamma * logpt
143 | loss = loss.mean()
144 |
145 | return loss
146 |
147 |
148 |
149 |
--------------------------------------------------------------------------------
/cifar/resnet2.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | '''Resnet for cifar dataset.
4 | Ported form
5 | https://github.com/facebook/fb.resnet.torch
6 | and
7 | https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
8 | (c) YANG, Wei
9 | '''
10 | import torch.nn as nn
11 | import math
12 |
13 |
14 | __all__ = ['resnet']
15 |
16 | def conv3x3(in_planes, out_planes, stride=1):
17 | "3x3 convolution with padding"
18 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
19 | padding=1, bias=False)
20 |
21 |
22 | class BasicBlock(nn.Module):
23 | expansion = 1
24 |
25 | def __init__(self, inplanes, planes, stride=1, downsample=None):
26 | super(BasicBlock, self).__init__()
27 | self.conv1 = conv3x3(inplanes, planes, stride)
28 | self.bn1 = nn.BatchNorm2d(planes)
29 | self.relu = nn.ReLU(inplace=True)
30 | self.conv2 = conv3x3(planes, planes)
31 | self.bn2 = nn.BatchNorm2d(planes)
32 | self.downsample = downsample
33 | self.stride = stride
34 |
35 | def forward(self, x):
36 | residual = x
37 |
38 | out = self.conv1(x)
39 | out = self.bn1(out)
40 | out = self.relu(out)
41 |
42 | out = self.conv2(out)
43 | out = self.bn2(out)
44 |
45 | if self.downsample is not None:
46 | residual = self.downsample(x)
47 |
48 | out += residual
49 | out = self.relu(out)
50 |
51 | return out
52 |
53 |
54 | class Bottleneck(nn.Module):
55 | expansion = 4
56 |
57 | def __init__(self, inplanes, planes, stride=1, downsample=None):
58 | super(Bottleneck, self).__init__()
59 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
60 | self.bn1 = nn.BatchNorm2d(planes)
61 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
62 | padding=1, bias=False)
63 | self.bn2 = nn.BatchNorm2d(planes)
64 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
65 | self.bn3 = nn.BatchNorm2d(planes * 4)
66 | self.relu = nn.ReLU(inplace=True)
67 | self.downsample = downsample
68 | self.stride = stride
69 |
70 | def forward(self, x):
71 | residual = x
72 |
73 | out = self.conv1(x)
74 | out = self.bn1(out)
75 | out = self.relu(out)
76 |
77 | out = self.conv2(out)
78 | out = self.bn2(out)
79 | out = self.relu(out)
80 |
81 | out = self.conv3(out)
82 | out = self.bn3(out)
83 |
84 | if self.downsample is not None:
85 | residual = self.downsample(x)
86 |
87 | out += residual
88 | out = self.relu(out)
89 |
90 | return out
91 |
92 |
93 | class ResNet(nn.Module):
94 |
95 | def __init__(self, depth, num_classes=1000):
96 | super(ResNet, self).__init__()
97 | # Model type specifies number of layers for CIFAR-10 model
98 | assert (depth - 2) % 6 == 0, 'depth should be 6n+2'
99 | n = (depth - 2) / 6
100 |
101 | block = Bottleneck if depth >=44 else BasicBlock
102 |
103 | self.inplanes = 16
104 | self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,
105 | bias=False)
106 | self.bn1 = nn.BatchNorm2d(16)
107 | self.relu = nn.ReLU(inplace=True)
108 | self.layer1 = self._make_layer(block, 16, n)
109 | self.layer2 = self._make_layer(block, 32, n, stride=2)
110 | self.layer3 = self._make_layer(block, 64, n, stride=2)
111 | self.avgpool = nn.AvgPool2d(8)
112 | self.fc = nn.Linear(64 * block.expansion, 2)
113 | self.fc2 = nn.Linear(2, num_classes, bias=False)
114 |
115 | for m in self.modules():
116 | if isinstance(m, nn.Conv2d):
117 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
118 | m.weight.data.normal_(0, math.sqrt(2. / n))
119 | elif isinstance(m, nn.BatchNorm2d):
120 | m.weight.data.fill_(1)
121 | m.bias.data.zero_()
122 |
123 | def _make_layer(self, block, planes, blocks, stride=1):
124 | downsample = None
125 | if stride != 1 or self.inplanes != planes * block.expansion:
126 | downsample = nn.Sequential(
127 | nn.Conv2d(self.inplanes, planes * block.expansion,
128 | kernel_size=1, stride=stride, bias=False),
129 | nn.BatchNorm2d(planes * block.expansion),
130 | )
131 |
132 | layers = []
133 | layers.append(block(self.inplanes, planes, stride, downsample))
134 | self.inplanes = planes * block.expansion
135 | for i in range(1, int(blocks)):
136 | layers.append(block(self.inplanes, planes))
137 |
138 | return nn.Sequential(*layers)
139 |
140 | def forward(self, x):
141 | x = self.conv1(x)
142 | x = self.bn1(x)
143 | x = self.relu(x) # 32x32
144 |
145 | x = self.layer1(x) # 32x32
146 | x = self.layer2(x) # 16x16
147 | x = self.layer3(x) # 8x8
148 |
149 | x = self.avgpool(x)
150 | x = x.view(x.size(0), -1)
151 | x = self.fc(x)
152 | x = self.fc2(x)
153 |
154 | return x
155 |
156 |
157 | def resnet(**kwargs):
158 | """
159 | Constructs a ResNet model.
160 | """
161 | return ResNet(**kwargs)
162 |
--------------------------------------------------------------------------------
/cub/test.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import print_function, division
4 |
5 | import argparse
6 | import torch
7 | import torch.nn as nn
8 | import torch.optim as optim
9 | from torch.optim import lr_scheduler
10 | from torch.autograd import Variable
11 | import numpy as np
12 | import torchvision
13 | from torchvision import datasets, models, transforms
14 | import time
15 | import os
16 | import scipy.io
17 | from model import ft_net, ft_net_dense, PCB, PCB_test
18 |
19 | ######################################################################
20 | # Options
21 | # --------
22 | parser = argparse.ArgumentParser(description='Training')
23 | parser.add_argument('--gpu_ids',default='0', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2')
24 | parser.add_argument('--which_epoch',default='060', type=str, help='0,1,2,3...or last')
25 | parser.add_argument('--test_dir',default='./images',type=str, help='./test_data')
26 | parser.add_argument('--name', default='ft_ResNet50_all', type=str, help='save model path')
27 | parser.add_argument('--batchsize', default=100, type=int, help='batchsize')
28 | parser.add_argument('--use_dense', action='store_true', help='use densenet121' )
29 | parser.add_argument('--PCB', action='store_true', help='use PCB' )
30 |
31 | opt = parser.parse_args()
32 |
33 | str_ids = opt.gpu_ids.split(',')
34 | #which_epoch = opt.which_epoch
35 | name = opt.name
36 | test_dir = opt.test_dir
37 |
38 | gpu_ids = []
39 | for str_id in str_ids:
40 | id = int(str_id)
41 | if id >=0:
42 | gpu_ids.append(id)
43 |
44 | # set gpu ids
45 | if len(gpu_ids)>0:
46 | torch.cuda.set_device(gpu_ids[0])
47 |
48 | ######################################################################
49 | # Load Data
50 | # ---------
51 | #
52 | # We will use torchvision and torch.utils.data packages for loading the
53 | # data.
54 | #
55 | data_transforms = transforms.Compose([
56 | transforms.Resize((256,256), interpolation=3),
57 | transforms.ToTensor(),
58 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
59 | ############### Ten Crop
60 | #transforms.TenCrop(224),
61 | #transforms.Lambda(lambda crops: torch.stack(
62 | # [transforms.ToTensor()(crop)
63 | # for crop in crops]
64 | # )),
65 | #transforms.Lambda(lambda crops: torch.stack(
66 | # [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop)
67 | # for crop in crops]
68 | # ))
69 | ])
70 |
71 | if opt.PCB:
72 | data_transforms = transforms.Compose([
73 | transforms.Resize((384,192), interpolation=3),
74 | transforms.ToTensor(),
75 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
76 | ])
77 |
78 |
79 | data_dir = test_dir
80 | image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,data_transforms) for x in ['test']}
81 | dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,
82 | shuffle=False, num_workers=16) for x in ['test']}
83 |
84 | class_names = image_datasets['test'].classes
85 | use_gpu = torch.cuda.is_available()
86 |
87 | ######################################################################
88 | # Load model
89 | #---------------------------
90 | def load_network(network):
91 | save_path = os.path.join('./model',name,'net_%s.pth'%opt.which_epoch)
92 | network.load_state_dict(torch.load(save_path))
93 | return network
94 |
95 |
96 | ######################################################################
97 | # Extract feature
98 | # ----------------------
99 | #
100 | # Extract feature from a trained model.
101 | #
102 | def fliplr(img):
103 | '''flip horizontal'''
104 | inv_idx = torch.arange(img.size(3)-1,-1,-1).long() # N x C x H x W
105 | img_flip = img.index_select(3,inv_idx)
106 | return img_flip
107 |
108 | def extract_feature(model,dataloaders):
109 | features = torch.FloatTensor()
110 | labels = []
111 | count = 0
112 | for data in dataloaders:
113 | img, label = data
114 | n, c, h, w = img.size()
115 | count += n
116 | print(count)
117 | #if opt.use_dense:
118 | # ff = torch.FloatTensor(n,1024).zero_()
119 | #else:
120 | # ff = torch.FloatTensor(n,2048).zero_()
121 | #if opt.PCB:
122 | # ff = torch.FloatTensor(n,2048,6).zero_() # we have four parts
123 | ff = torch.FloatTensor(n,512).zero_()
124 | for i in range(2):
125 | if(i==1):
126 | img = fliplr(img)
127 | input_img = Variable(img.cuda())
128 | outputs = model(input_img)
129 | f = outputs.data.cpu()
130 | ff = ff+f
131 | # norm feature
132 | if opt.PCB:
133 | # feature size (n,2048,4)
134 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
135 | ff = ff.div(fnorm.expand_as(ff))
136 | ff = ff.view(ff.size(0), -1)
137 | else:
138 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
139 | ff = ff.div(fnorm.expand_as(ff))
140 |
141 | features = torch.cat((features,ff), 0)
142 | labels.extend(label.numpy())
143 | return features, labels
144 |
145 |
146 | ######################################################################
147 | # Load Collected data Trained model
148 | print('-------test-----------')
149 | if opt.use_dense:
150 | model_structure = ft_net_dense(100)
151 | else:
152 | model_structure = ft_net(100)
153 |
154 | if opt.PCB:
155 | model_structure = PCB(100)
156 |
157 | model = load_network(model_structure)
158 |
159 | # Remove the final fc layer and classifier layer
160 | #model.model.fc = nn.Sequential()
161 | model.classifier.classifier = nn.Sequential()
162 |
163 | # Change to test mode
164 | model = model.eval()
165 | if use_gpu:
166 | model = model.cuda()
167 |
168 | # Extract feature
169 | with torch.no_grad():
170 | test_feature,test_label = extract_feature(model,dataloaders['test'])
171 | result = {'img_f':test_feature.numpy(),'label':test_label}
172 | scipy.io.savemat('pytorch_result.mat',result)
173 | scipy.io.savemat('query.mat',result)
174 |
175 |
--------------------------------------------------------------------------------
/cub/test_query.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import print_function, division
4 |
5 | import argparse
6 | import torch
7 | import torch.nn as nn
8 | import torch.optim as optim
9 | from torch.optim import lr_scheduler
10 | from torch.autograd import Variable
11 | import numpy as np
12 | import torchvision
13 | from torchvision import datasets, models, transforms
14 | import time
15 | import os
16 | import scipy.io
17 | from model import ft_net, ft_net_dense, PCB, PCB_test
18 |
19 | ######################################################################
20 | # Options
21 | # --------
22 | parser = argparse.ArgumentParser(description='Training')
23 | parser.add_argument('--gpu_ids',default='1', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2')
24 | parser.add_argument('--which_epoch',default='060', type=str, help='0,1,2,3...or last')
25 | parser.add_argument('--test_dir',default='./images',type=str, help='./test_data')
26 | parser.add_argument('--name', default='ft_ResNet50_all', type=str, help='save model path')
27 | parser.add_argument('--batchsize', default=100, type=int, help='batchsize')
28 | parser.add_argument('--use_dense', action='store_true', help='use densenet121' )
29 | parser.add_argument('--PCB', action='store_true', help='use PCB' )
30 | parser.add_argument('--output_path',default='./',type=str, help='output path')
31 |
32 | opt = parser.parse_args()
33 |
34 | str_ids = opt.gpu_ids.split(',')
35 | #which_epoch = opt.which_epoch
36 | name = opt.name
37 | test_dir = opt.test_dir
38 |
39 | gpu_ids = []
40 | for str_id in str_ids:
41 | id = int(str_id)
42 | if id >=0:
43 | gpu_ids.append(id)
44 |
45 | # set gpu ids
46 | if len(gpu_ids)>0:
47 | torch.cuda.set_device(gpu_ids[0])
48 |
49 | ######################################################################
50 | # Load Data
51 | # ---------
52 | #
53 | # We will use torchvision and torch.utils.data packages for loading the
54 | # data.
55 | #
56 | data_transforms = transforms.Compose([
57 | transforms.Resize((256,256), interpolation=3),
58 | transforms.ToTensor(),
59 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
60 | ############### Ten Crop
61 | #transforms.TenCrop(224),
62 | #transforms.Lambda(lambda crops: torch.stack(
63 | # [transforms.ToTensor()(crop)
64 | # for crop in crops]
65 | # )),
66 | #transforms.Lambda(lambda crops: torch.stack(
67 | # [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop)
68 | # for crop in crops]
69 | # ))
70 | ])
71 |
72 | if opt.PCB:
73 | data_transforms = transforms.Compose([
74 | transforms.Resize((384,192), interpolation=3),
75 | transforms.ToTensor(),
76 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
77 | ])
78 |
79 |
80 | data_dir = test_dir
81 | image_datasets = {}
82 | image_datasets['test'] = datasets.ImageFolder( data_dir ,data_transforms)
83 | dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,
84 | shuffle=False, num_workers=16) for x in ['test']}
85 |
86 | class_names = image_datasets['test'].classes
87 | use_gpu = torch.cuda.is_available()
88 |
89 | ######################################################################
90 | # Load model
91 | #---------------------------
92 | def load_network(network):
93 | save_path = os.path.join('./model',name,'net_%s.pth'%opt.which_epoch)
94 | network.load_state_dict(torch.load(save_path))
95 | return network
96 |
97 |
98 | ######################################################################
99 | # Extract feature
100 | # ----------------------
101 | #
102 | # Extract feature from a trained model.
103 | #
104 | def fliplr(img):
105 | '''flip horizontal'''
106 | inv_idx = torch.arange(img.size(3)-1,-1,-1).long() # N x C x H x W
107 | img_flip = img.index_select(3,inv_idx)
108 | return img_flip
109 |
110 | def extract_feature(model,dataloaders):
111 | features = torch.FloatTensor()
112 | labels = []
113 | count = 0
114 | for data in dataloaders:
115 | img, label = data
116 | n, c, h, w = img.size()
117 | count += n
118 | #print(count)
119 | #if opt.use_dense:
120 | # ff = torch.FloatTensor(n,1024).zero_()
121 | #else:
122 | # ff = torch.FloatTensor(n,2048).zero_()
123 | ff = torch.FloatTensor(n,512).zero_()
124 | for i in range(2):
125 | if(i==1):
126 | img = fliplr(img)
127 | input_img = Variable(img.cuda())
128 | outputs = model(input_img)
129 | f = outputs.data.cpu()
130 | ff = ff+f
131 | # norm feature
132 | if opt.PCB:
133 | # feature size (n,2048,4)
134 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
135 | ff = ff.div(fnorm.expand_as(ff))
136 | ff = ff.view(ff.size(0), -1)
137 | else:
138 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
139 | ff = ff.div(fnorm.expand_as(ff))
140 |
141 | features = torch.cat((features,ff), 0)
142 | labels.extend(label.numpy())
143 | return features, labels
144 |
145 |
146 | ######################################################################
147 | # Load Collected data Trained model
148 | #print('-------test-----------')
149 | if opt.use_dense:
150 | model_structure = ft_net_dense(100)
151 | else:
152 | model_structure = ft_net(100)
153 |
154 | if opt.PCB:
155 | model_structure = PCB(100)
156 |
157 | model = load_network(model_structure)
158 |
159 | # Remove the final fc layer and classifier layer
160 | #model.model.fc = nn.Sequential()
161 | model.classifier.classifier = nn.Sequential()
162 |
163 | # Change to test mode
164 | model = model.eval()
165 | if use_gpu:
166 | model = model.cuda()
167 |
168 | # Extract feature
169 | with torch.no_grad():
170 | test_feature,test_label = extract_feature(model,dataloaders['test'])
171 | #print(test_label)
172 | print(test_feature.shape)
173 | print(len(test_label))
174 | result = {'img_f':test_feature.numpy(),'label':test_label}
175 | scipy.io.savemat(opt.output_path+'/query.mat',result)
176 |
177 |
--------------------------------------------------------------------------------
/Cuisine-retrieval/test_query.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import print_function, division
4 |
5 | import argparse
6 | import torch
7 | import torch.nn as nn
8 | import torch.optim as optim
9 | from torch.optim import lr_scheduler
10 | from torch.autograd import Variable
11 | import numpy as np
12 | import torchvision
13 | from torchvision import datasets, models, transforms
14 | import time
15 | import os
16 | import scipy.io
17 | from model import ft_net, ft_net_dense, PCB, PCB_test
18 |
19 | ######################################################################
20 | # Options
21 | # --------
22 | parser = argparse.ArgumentParser(description='Training')
23 | parser.add_argument('--gpu_ids',default='1', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2')
24 | parser.add_argument('--which_epoch',default='060', type=str, help='0,1,2,3...or last')
25 | parser.add_argument('--test_dir',default='./Food-cropped/pytorch',type=str, help='./test_data')
26 | parser.add_argument('--name', default='ft_ResNet50_all', type=str, help='save model path')
27 | parser.add_argument('--batchsize', default=100, type=int, help='batchsize')
28 | parser.add_argument('--use_dense', action='store_true', help='use densenet121' )
29 | parser.add_argument('--PCB', action='store_true', help='use PCB' )
30 | parser.add_argument('--output_path',default='./',type=str, help='output path')
31 |
32 | opt = parser.parse_args()
33 |
34 | str_ids = opt.gpu_ids.split(',')
35 | #which_epoch = opt.which_epoch
36 | name = opt.name
37 | test_dir = opt.test_dir
38 |
39 | gpu_ids = []
40 | for str_id in str_ids:
41 | id = int(str_id)
42 | if id >=0:
43 | gpu_ids.append(id)
44 |
45 | # set gpu ids
46 | if len(gpu_ids)>0:
47 | torch.cuda.set_device(gpu_ids[0])
48 |
49 | ######################################################################
50 | # Load Data
51 | # ---------
52 | #
53 | # We will use torchvision and torch.utils.data packages for loading the
54 | # data.
55 | #
56 | data_transforms = transforms.Compose([
57 | transforms.Resize((256,256), interpolation=3),
58 | transforms.ToTensor(),
59 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
60 | ############### Ten Crop
61 | #transforms.TenCrop(224),
62 | #transforms.Lambda(lambda crops: torch.stack(
63 | # [transforms.ToTensor()(crop)
64 | # for crop in crops]
65 | # )),
66 | #transforms.Lambda(lambda crops: torch.stack(
67 | # [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop)
68 | # for crop in crops]
69 | # ))
70 | ])
71 |
72 | if opt.PCB:
73 | data_transforms = transforms.Compose([
74 | transforms.Resize((384,192), interpolation=3),
75 | transforms.ToTensor(),
76 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
77 | ])
78 |
79 |
80 | data_dir = test_dir
81 | image_datasets = {}
82 | image_datasets['test'] = datasets.ImageFolder( data_dir ,data_transforms)
83 | dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,
84 | shuffle=False, num_workers=16) for x in ['test']}
85 |
86 | class_names = image_datasets['test'].classes
87 | use_gpu = torch.cuda.is_available()
88 |
89 | ######################################################################
90 | # Load model
91 | #---------------------------
92 | def load_network(network):
93 | save_path = os.path.join('./model',name,'net_%s.pth'%opt.which_epoch)
94 | network.load_state_dict(torch.load(save_path))
95 | return network
96 |
97 |
98 | ######################################################################
99 | # Extract feature
100 | # ----------------------
101 | #
102 | # Extract feature from a trained model.
103 | #
104 | def fliplr(img):
105 | '''flip horizontal'''
106 | inv_idx = torch.arange(img.size(3)-1,-1,-1).long() # N x C x H x W
107 | img_flip = img.index_select(3,inv_idx)
108 | return img_flip
109 |
110 | def extract_feature(model,dataloaders):
111 | features = torch.FloatTensor()
112 | labels = []
113 | count = 0
114 | for data in dataloaders:
115 | img, label = data
116 | n, c, h, w = img.size()
117 | count += n
118 | #print(count)
119 | #if opt.use_dense:
120 | # ff = torch.FloatTensor(n,1024).zero_()
121 | #else:
122 | # ff = torch.FloatTensor(n,2048).zero_()
123 | ff = torch.FloatTensor(n,512).zero_()
124 | for i in range(2):
125 | if(i==1):
126 | img = fliplr(img)
127 | input_img = Variable(img.cuda())
128 | outputs = model(input_img)
129 | f = outputs.data.cpu()
130 | ff = ff+f
131 | # norm feature
132 | if opt.PCB:
133 | # feature size (n,2048,4)
134 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
135 | ff = ff.div(fnorm.expand_as(ff))
136 | ff = ff.view(ff.size(0), -1)
137 | else:
138 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
139 | ff = ff.div(fnorm.expand_as(ff))
140 |
141 | features = torch.cat((features,ff), 0)
142 | labels.extend(label.numpy())
143 | return features, labels
144 |
145 |
146 | ######################################################################
147 | # Load Collected data Trained model
148 | #print('-------test-----------')
149 | if opt.use_dense:
150 | model_structure = ft_net_dense(224)
151 | else:
152 | model_structure = ft_net(224)
153 |
154 | if opt.PCB:
155 | model_structure = PCB(224)
156 |
157 | model = load_network(model_structure)
158 |
159 | # Remove the final fc layer and classifier layer
160 | #model.model.fc = nn.Sequential()
161 | model.classifier.classifier = nn.Sequential()
162 |
163 | # Change to test mode
164 | model = model.eval()
165 | if use_gpu:
166 | model = model.cuda()
167 |
168 | # Extract feature
169 | with torch.no_grad():
170 | test_feature,test_label = extract_feature(model,dataloaders['test'])
171 | #print(test_label)
172 | print(test_feature.shape)
173 | print(len(test_label))
174 | result = {'query_f':test_feature.numpy(),'query_label':test_label}
175 | scipy.io.savemat(opt.output_path+'/query.mat',result)
176 |
177 |
--------------------------------------------------------------------------------
/test_only_query_PCB.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import print_function, division
4 |
5 | import argparse
6 | import torch
7 | import torch.nn as nn
8 | import torch.optim as optim
9 | from torch.optim import lr_scheduler
10 | from torch.autograd import Variable
11 | import numpy as np
12 | import torchvision
13 | from torchvision import datasets, models, transforms
14 | import time
15 | import os
16 | import scipy.io
17 | from model import ft_net, ft_net_dense, PCB, PCB_test
18 |
19 | import torch.backends.cudnn as cudnn
20 | cudnn.benchmark = True
21 | ######################################################################
22 | # Options
23 | # --------
24 | parser = argparse.ArgumentParser(description='Training')
25 | parser.add_argument('--gpu_ids',default='0', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2')
26 | parser.add_argument('--which_epoch',default='last', type=str, help='0,1,2,3...or last')
27 | parser.add_argument('--test_dir',default='/home/zzd/Market/pytorch',type=str, help='./test_data')
28 | parser.add_argument('--name', default='PCB', type=str, help='save model path')
29 | parser.add_argument('--batchsize', default=48, type=int, help='batchsize')
30 | parser.add_argument('--use_dense', action='store_true', help='use densenet121' )
31 | parser.add_argument('--test_all', action='store_true', help='test gallery and query')
32 | parser.add_argument('--output_path',default='./',type=str, help='output path')
33 |
34 | opt = parser.parse_args()
35 |
36 | str_ids = opt.gpu_ids.split(',')
37 | #which_epoch = opt.which_epoch
38 | name = opt.name
39 | test_dir = opt.test_dir
40 |
41 | gpu_ids = []
42 | for str_id in str_ids:
43 | id = int(str_id)
44 | if id >=0:
45 | gpu_ids.append(id)
46 |
47 | # set gpu ids
48 | if len(gpu_ids)>0:
49 | torch.cuda.set_device(gpu_ids[0])
50 |
51 | ######################################################################
52 | # Load Data
53 | # ---------
54 | #
55 | # We will use torchvision and torch.utils.data packages for loading the
56 | # data.
57 | #
58 | data_transforms = transforms.Compose([
59 | transforms.Resize((384,192), interpolation=3),
60 | transforms.ToTensor(),
61 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
62 | ])
63 |
64 | data_dir = test_dir
65 | image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,data_transforms) for x in ['gallery','query']}
66 | dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,
67 | shuffle=False, num_workers=4) for x in ['gallery','query']}
68 |
69 | class_names = image_datasets['query'].classes
70 | use_gpu = torch.cuda.is_available()
71 |
72 | ######################################################################
73 | # Load model
74 | #---------------------------
75 | def load_network(network):
76 | save_path = os.path.join('./model',name,'net_%s.pth'%opt.which_epoch)
77 | network.load_state_dict(torch.load(save_path))
78 | return network
79 |
80 |
81 | ######################################################################
82 | # Extract feature
83 | # ----------------------
84 | #
85 | # Extract feature from a trained model.
86 | #
87 | def fliplr(img):
88 | '''flip horizontal'''
89 | inv_idx = torch.arange(img.size(3)-1,-1,-1).long() # N x C x H x W
90 | img_flip = img.index_select(3,inv_idx)
91 | return img_flip
92 |
93 | def extract_feature(model,dataloaders):
94 | features = torch.FloatTensor()
95 | count = 0
96 | opt.PCB = True
97 | for data in dataloaders:
98 | img, label = data
99 | n, c, h, w = img.size()
100 | count += n
101 | print(count)
102 | if opt.use_dense:
103 | ff = torch.FloatTensor(n,1024).zero_()
104 | else:
105 | ff = torch.FloatTensor(n,2048).zero_()
106 | if opt.PCB:
107 | ff = torch.FloatTensor(n,2048,6).zero_() # we have six parts
108 |
109 | for i in range(2):
110 | if(i==1):
111 | img = fliplr(img)
112 | input_img = Variable(img.cuda())
113 | outputs = model(input_img)
114 | f = outputs.data.cpu()
115 | ff = ff+f
116 | # norm feature
117 | if opt.PCB:
118 | # feature size (n,2048,6)
119 | # 1. To treat every part equally, I calculate the norm for every 2048-dim part feature.
120 | # 2. To keep the cosine score==1, sqrt(6) is added to norm the whole feature (2048*6).
121 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) * np.sqrt(6)
122 | ff = ff.div(fnorm.expand_as(ff))
123 | ff = ff.view(ff.size(0), -1)
124 | else:
125 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
126 | ff = ff.div(fnorm.expand_as(ff))
127 |
128 | features = torch.cat((features,ff), 0)
129 | return features
130 |
131 | def get_id(img_path):
132 | camera_id = []
133 | labels = []
134 | for path, v in img_path:
135 | filename = path.split('/')[-1]
136 | label = filename[0:4]
137 | camera = filename.split('c')[1]
138 | if label[0:2]=='-1':
139 | labels.append(-1)
140 | else:
141 | labels.append(int(label))
142 | camera_id.append(int(camera[0]))
143 | return camera_id, labels
144 |
145 | gallery_path = image_datasets['gallery'].imgs
146 | query_path = image_datasets['query'].imgs
147 |
148 | gallery_cam,gallery_label = get_id(gallery_path)
149 | query_cam,query_label = get_id(query_path)
150 |
151 | ######################################################################
152 | # Load Collected data Trained model
153 | #print('-------test-----------')
154 |
155 | model_structure = PCB(751)
156 | model = load_network(model_structure)
157 | model = PCB_test(model)
158 |
159 | # Change to test mode
160 | model = model.eval()
161 | if use_gpu:
162 | model = model.cuda()
163 |
164 | model = torch.nn.DataParallel(model)
165 | # Extract feature
166 | if opt.test_all:
167 | gallery_feature = extract_feature(model,dataloaders['gallery'])
168 | result_g = {'gallery_f':gallery_feature.numpy(),'gallery_label':gallery_label,'gallery_cam':gallery_cam}
169 | scipy.io.savemat('gallery_result.mat', result_g)
170 |
171 | query_feature = extract_feature(model,dataloaders['query'])
172 | result_q = {'query_f':query_feature.numpy(),'query_label':query_label,'query_cam':query_cam}
173 | scipy.io.savemat('query_result.mat',result_q)
174 | scipy.io.savemat(opt.output_path+'/query_result.mat',result_q)
175 |
--------------------------------------------------------------------------------
/test_only_query.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import print_function, division
4 |
5 | import argparse
6 | import torch
7 | import torch.nn as nn
8 | import torch.optim as optim
9 | from torch.optim import lr_scheduler
10 | from torch.autograd import Variable
11 | import numpy as np
12 | import torchvision
13 | from torchvision import datasets, models, transforms
14 | import time
15 | import os
16 | import scipy.io
17 | from model import ft_net, ft_net_dense, PCB, PCB_test
18 | import torch.backends.cudnn as cudnn
19 | cudnn.benchmark = True
20 | ######################################################################
21 | # Options
22 | # --------
23 | parser = argparse.ArgumentParser(description='Training')
24 | parser.add_argument('--gpu_ids',default='0', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2')
25 | parser.add_argument('--which_epoch',default='last', type=str, help='0,1,2,3...or last')
26 | parser.add_argument('--test_dir',default='/home/zzd/Market/pytorch',type=str, help='./test_data')
27 | parser.add_argument('--name', default='ft_ResNet50', type=str, help='save model path')
28 | parser.add_argument('--batchsize', default=48, type=int, help='batchsize')
29 | parser.add_argument('--use_dense', action='store_true', help='use densenet121' )
30 | parser.add_argument('--PCB', action='store_true', help='use densenet121' )
31 | parser.add_argument('--test_all', action='store_true', help='test gallery and query')
32 | parser.add_argument('--output_path',default='./',type=str, help='output path')
33 |
34 | opt = parser.parse_args()
35 |
36 | str_ids = opt.gpu_ids.split(',')
37 | #which_epoch = opt.which_epoch
38 | name = opt.name
39 | test_dir = opt.test_dir
40 |
41 | gpu_ids = []
42 | for str_id in str_ids:
43 | id = int(str_id)
44 | if id >=0:
45 | gpu_ids.append(id)
46 |
47 | # set gpu ids
48 | if len(gpu_ids)>0:
49 | torch.cuda.set_device(gpu_ids[0])
50 |
51 | ######################################################################
52 | # Load Data
53 | # ---------
54 | #
55 | # We will use torchvision and torch.utils.data packages for loading the
56 | # data.
57 | #
58 | data_transforms = transforms.Compose([
59 | transforms.Resize((256,128), interpolation=3),
60 | transforms.ToTensor(),
61 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
62 | ])
63 |
64 | data_dir = test_dir
65 | image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,data_transforms) for x in ['gallery','query']}
66 | dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,
67 | shuffle=False, num_workers=4) for x in ['gallery','query']}
68 |
69 | class_names = image_datasets['query'].classes
70 | use_gpu = torch.cuda.is_available()
71 |
72 | ######################################################################
73 | # Load model
74 | #---------------------------
75 | def load_network(network):
76 | save_path = os.path.join('./model',name,'net_%s.pth'%opt.which_epoch)
77 | network.load_state_dict(torch.load(save_path))
78 | return network
79 |
80 |
81 | ######################################################################
82 | # Extract feature
83 | # ----------------------
84 | #
85 | # Extract feature from a trained model.
86 | #
87 | def fliplr(img):
88 | '''flip horizontal'''
89 | inv_idx = torch.arange(img.size(3)-1,-1,-1).long() # N x C x H x W
90 | img_flip = img.index_select(3,inv_idx)
91 | return img_flip
92 |
93 | def extract_feature(model,dataloaders):
94 | features = torch.FloatTensor()
95 | count = 0
96 | for data in dataloaders:
97 | img, label = data
98 | n, c, h, w = img.size()
99 | count += n
100 | #print(count)
101 | if opt.use_dense:
102 | ff = torch.FloatTensor(n,512).zero_()
103 | else:
104 | ff = torch.FloatTensor(n,512).zero_()
105 | if opt.PCB:
106 | ff = torch.FloatTensor(n,2048,6).zero_() # we have six parts
107 | for i in range(2):
108 | if(i==1):
109 | img = fliplr(img)
110 | input_img = Variable(img.cuda())
111 | outputs = model(input_img)
112 | f = outputs.data.cpu()
113 | ff = ff+f
114 | # norm feature
115 | if opt.PCB:
116 | # feature size (n,2048,6)
117 | # 1. To treat every part equally, I calculate the norm for every 2048-dim part feature.
118 | # 2. To keep the cosine score==1, sqrt(6) is added to norm the whole feature (2048*6).
119 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) * np.sqrt(6)
120 | ff = ff.div(fnorm.expand_as(ff))
121 | ff = ff.view(ff.size(0), -1)
122 | else:
123 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
124 | ff = ff.div(fnorm.expand_as(ff))
125 |
126 | features = torch.cat((features,ff), 0)
127 | return features
128 |
129 | def get_id(img_path):
130 | camera_id = []
131 | labels = []
132 | for path, v in img_path:
133 | filename = path.split('/')[-1]
134 | label = filename[0:4]
135 | camera = filename.split('c')[1]
136 | if label[0:2]=='-1':
137 | labels.append(-1)
138 | else:
139 | labels.append(int(label))
140 | camera_id.append(int(camera[0]))
141 | return camera_id, labels
142 |
143 | gallery_path = image_datasets['gallery'].imgs
144 | query_path = image_datasets['query'].imgs
145 |
146 | gallery_cam,gallery_label = get_id(gallery_path)
147 | query_cam,query_label = get_id(query_path)
148 |
149 | ######################################################################
150 | # Load Collected data Trained model
151 | #print('-------test-----------')
152 | if opt.use_dense:
153 | model_structure = ft_net_dense(751)
154 | else:
155 | model_structure = ft_net(751)
156 |
157 | #model_structure = PCB(751)
158 | model = load_network(model_structure)
159 | #model = PCB_test(model)
160 | model.classifier.classifier= nn.Sequential()
161 | # Change to test mode
162 | model = model.eval()
163 | if use_gpu:
164 | model = model.cuda()
165 |
166 | model = torch.nn.DataParallel(model)
167 | # Extract feature
168 | if opt.test_all:
169 | gallery_feature = extract_feature(model,dataloaders['gallery'])
170 | result_g = {'gallery_f':gallery_feature.numpy(),'gallery_label':gallery_label,'gallery_cam':gallery_cam}
171 | scipy.io.savemat('gallery_result.mat', result_g)
172 |
173 | query_feature = extract_feature(model,dataloaders['query'])
174 | result_q = {'query_f':query_feature.numpy(),'query_label':query_label,'query_cam':query_cam}
175 | scipy.io.savemat('query_result.mat',result_q)
176 | scipy.io.savemat(opt.output_path+'/query_result.mat',result_q)
177 |
178 |
--------------------------------------------------------------------------------
/test_normal.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import print_function, division
4 |
5 | import argparse
6 | import torch
7 | import torch.nn as nn
8 | import torch.optim as optim
9 | from torch.optim import lr_scheduler
10 | from torch.autograd import Variable
11 | import numpy as np
12 | import torchvision
13 | from torchvision import datasets, models, transforms
14 | import time
15 | import os
16 | import scipy.io
17 | from model import ft_net, ft_net_dense, PCB, PCB_test
18 |
19 | import torch.backends.cudnn as cudnn
20 | cudnn.benchmark = True
21 |
22 | ######################################################################
23 | # Options
24 | # --------
25 | parser = argparse.ArgumentParser(description='Training')
26 | parser.add_argument('--gpu_ids',default='0', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2')
27 | parser.add_argument('--which_epoch',default='last', type=str, help='0,1,2,3...or last')
28 | parser.add_argument('--test_dir',default='/home/zzd/Market/pytorch',type=str, help='./test_data')
29 | parser.add_argument('--name', default='ft_ResNet50', type=str, help='save model path')
30 | parser.add_argument('--batchsize', default=144, type=int, help='batchsize')
31 | parser.add_argument('--use_dense', action='store_true', help='use densenet121' )
32 | parser.add_argument('--PCB', action='store_true', help='use densenet121' )
33 | parser.add_argument('--test_all', action='store_true', help='test gallery and query')
34 |
35 | opt = parser.parse_args()
36 |
37 | str_ids = opt.gpu_ids.split(',')
38 | #which_epoch = opt.which_epoch
39 | name = opt.name
40 | test_dir = opt.test_dir
41 |
42 | gpu_ids = []
43 | for str_id in str_ids:
44 | id = int(str_id)
45 | if id >=0:
46 | gpu_ids.append(id)
47 |
48 | # set gpu ids
49 | if len(gpu_ids)>0:
50 | torch.cuda.set_device(gpu_ids[0])
51 |
52 | ######################################################################
53 | # Load Data
54 | # ---------
55 | #
56 | # We will use torchvision and torch.utils.data packages for loading the
57 | # data.
58 | #
59 | data_transforms = transforms.Compose([
60 | transforms.Resize((256,128), interpolation=3),
61 | transforms.ToTensor(),
62 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
63 | ])
64 |
65 | if opt.PCB:
66 | data_transforms = transforms.Compose([
67 | transforms.Resize((384,192), interpolation=3),
68 | transforms.ToTensor(),
69 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
70 | ])
71 | h, w = 384, 192
72 |
73 | data_dir = test_dir
74 | image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,data_transforms) for x in ['gallery','query']}
75 | dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,
76 | shuffle=False, num_workers=4) for x in ['gallery','query']}
77 |
78 | class_names = image_datasets['query'].classes
79 | use_gpu = torch.cuda.is_available()
80 |
81 | ######################################################################
82 | # Load model
83 | #---------------------------
84 | def load_network(network):
85 | save_path = os.path.join('./model',name,'net_%s.pth'%opt.which_epoch)
86 | network.load_state_dict(torch.load(save_path))
87 | return network
88 |
89 |
90 | ######################################################################
91 | # Extract feature
92 | # ----------------------
93 | #
94 | # Extract feature from a trained model.
95 | #
96 | def fliplr(img):
97 | '''flip horizontal'''
98 | inv_idx = torch.arange(img.size(3)-1,-1,-1).long() # N x C x H x W
99 | img_flip = img.index_select(3,inv_idx)
100 | return img_flip
101 |
102 | def extract_feature(model,dataloaders):
103 | features = torch.FloatTensor()
104 | count = 0
105 | for data in dataloaders:
106 | img, label = data
107 | n, c, h, w = img.size()
108 | count += n
109 | print(count)
110 | if opt.use_dense:
111 | ff = torch.FloatTensor(n,512).zero_()
112 | else:
113 | ff = torch.FloatTensor(n,512).zero_()
114 | if opt.PCB:
115 | ff = torch.FloatTensor(n,2048,6).zero_() # we have six parts
116 | for i in range(2):
117 | if(i==1):
118 | img = fliplr(img)
119 | input_img = Variable(img.cuda())
120 | outputs = model(input_img)
121 | f = outputs.data.cpu()
122 | ff = ff+f
123 |
124 | # norm feature
125 | if opt.PCB:
126 | # feature size (n,2048,6)
127 | # 1. To treat every part equally, I calculate the norm for every 2048-dim part feature.
128 | # 2. To keep the cosine score==1, sqrt(6) is added to norm the whole feature (2048*6).
129 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) * np.sqrt(6)
130 | ff = ff.div(fnorm.expand_as(ff))
131 | ff = ff.view(ff.size(0), -1)
132 | else:
133 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
134 | ff = ff.div(fnorm.expand_as(ff))
135 |
136 | features = torch.cat((features,ff), 0)
137 | return features
138 |
139 | def get_id(img_path):
140 | camera_id = []
141 | labels = []
142 | for path, v in img_path:
143 | filename = path.split('/')[-1]
144 | label = filename[0:4]
145 | camera = filename.split('c')[1]
146 | if label[0:2]=='-1':
147 | labels.append(-1)
148 | else:
149 | labels.append(int(label))
150 | camera_id.append(int(camera[0]))
151 | return camera_id, labels
152 |
153 | gallery_path = image_datasets['gallery'].imgs
154 | query_path = image_datasets['query'].imgs
155 |
156 | gallery_cam,gallery_label = get_id(gallery_path)
157 | query_cam,query_label = get_id(query_path)
158 |
159 | ######################################################################
160 | # Load Collected data Trained model
161 | #print('-------test-----------')
162 | if opt.use_dense:
163 | model_structure = ft_net_dense(751)
164 | else:
165 | model_structure = ft_net(751)
166 |
167 | if opt.PCB:
168 | model_structure = PCB(751)
169 | model = load_network(model_structure)
170 |
171 |
172 | if opt.PCB:
173 | model = PCB_test(model)
174 | else:
175 | model.classifier.classifier= nn.Sequential()
176 | # Change to test mode
177 | model = model.eval()
178 | if use_gpu:
179 | model = model.cuda()
180 |
181 | model = torch.nn.DataParallel(model)
182 | # Extract feature
183 | if opt.test_all:
184 | gallery_feature = extract_feature(model,dataloaders['gallery'])
185 | result_g = {'gallery_f':gallery_feature.numpy(),'gallery_label':gallery_label,'gallery_cam':gallery_cam}
186 | scipy.io.savemat('gallery_result.mat', result_g)
187 |
188 | query_feature = extract_feature(model,dataloaders['query'])
189 | result_q = {'query_f':query_feature.numpy(),'query_label':query_label,'query_cam':query_cam}
190 | scipy.io.savemat('query_result.mat',result_q)
191 |
192 | os.system('python evaluate_gpu.py')
193 |
--------------------------------------------------------------------------------
/Cuisine-retrieval/test.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import print_function, division
4 |
5 | import argparse
6 | import torch
7 | import torch.nn as nn
8 | import torch.optim as optim
9 | from torch.optim import lr_scheduler
10 | from torch.autograd import Variable
11 | import torch.backends.cudnn as cudnn
12 | import numpy as np
13 | import torchvision
14 | from torchvision import datasets, models, transforms
15 | import time
16 | import os
17 | import scipy.io
18 | import yaml
19 | import math
20 | from model import ft_net, ft_net_dense, ft_net_NAS, PCB, PCB_test
21 | from utils import load_network
22 |
23 | #fp16
24 | try:
25 | from apex.fp16_utils import *
26 | except ImportError: # will be 3.x series
27 | print('This is not an error. If you want to use low precision, i.e., fp16, please install the apex with cuda support (https://github.com/NVIDIA/apex) and update pytorch to 1.0')
28 | ######################################################################
29 | # Options
30 | # --------
31 |
32 | parser = argparse.ArgumentParser(description='Training')
33 | parser.add_argument('--gpu_ids',default='0', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2')
34 | parser.add_argument('--which_epoch',default='last', type=str, help='0,1,2,3...or last')
35 | parser.add_argument('--test_dir',default='./Food-cropped/pytorch',type=str, help='./test_data')
36 | parser.add_argument('--name', default='ft_ResNet50', type=str, help='save model path')
37 | parser.add_argument('--pool', default='avg', type=str, help='avg|max')
38 | parser.add_argument('--batchsize', default=256, type=int, help='batchsize')
39 | parser.add_argument('--h', default=256, type=int, help='height')
40 | parser.add_argument('--w', default=256, type=int, help='width')
41 | parser.add_argument('--use_dense', action='store_true', help='use densenet121' )
42 | parser.add_argument('--PCB', action='store_true', help='use PCB' )
43 | parser.add_argument('--multi', action='store_true', help='use multiple query' )
44 | parser.add_argument('--fp16', action='store_true', help='use fp16.' )
45 | parser.add_argument('--ms',default='1', type=str,help='multiple_scale: e.g. 1 1,1.1 1,1.1,1.2')
46 |
47 | opt = parser.parse_args()
48 | ###load config###
49 | # load the training config
50 | config_path = os.path.join('./model',opt.name,'opts.yaml')
51 | with open(config_path, 'r') as stream:
52 | config = yaml.load(stream)
53 | opt.fp16 = config['fp16']
54 | opt.PCB = config['PCB']
55 | opt.use_dense = config['use_dense']
56 | opt.use_NAS = config['use_NAS']
57 | opt.stride = config['stride']
58 |
59 | if 'h' in config:
60 | opt.h = config['h']
61 | opt.w = config['w']
62 |
63 | if 'nclasses' in config: # tp compatible with old config files
64 | opt.nclasses = config['nclasses']
65 | else:
66 | opt.nclasses = 751
67 |
68 | str_ids = opt.gpu_ids.split(',')
69 | #which_epoch = opt.which_epoch
70 | name = opt.name
71 | test_dir = opt.test_dir
72 |
73 | gpu_ids = []
74 | for str_id in str_ids:
75 | id = int(str_id)
76 | if id >=0:
77 | gpu_ids.append(id)
78 |
79 | print('We use the scale: %s'%opt.ms)
80 | str_ms = opt.ms.split(',')
81 | ms = []
82 | for s in str_ms:
83 | s_f = float(s)
84 | ms.append(math.sqrt(s_f))
85 |
86 | # set gpu ids
87 | if len(gpu_ids)>0:
88 | torch.cuda.set_device(gpu_ids[0])
89 | cudnn.benchmark = True
90 |
91 | ######################################################################
92 | # Load Data
93 | # ---------
94 | #
95 | # We will use torchvision and torch.utils.data packages for loading the
96 | # data.
97 | #
98 | data_transforms = transforms.Compose([
99 | transforms.Resize((opt.h, opt.w), interpolation=3),
100 | transforms.ToTensor(),
101 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
102 | ])
103 |
104 | if opt.PCB:
105 | data_transforms = transforms.Compose([
106 | transforms.Resize((384,192), interpolation=3),
107 | transforms.ToTensor(),
108 | transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
109 | ])
110 |
111 |
112 | data_dir = test_dir
113 |
114 | if opt.multi:
115 | image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,data_transforms) for x in ['gallery','query','multi-query']}
116 | dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,
117 | shuffle=False, num_workers=16) for x in ['gallery','query','multi-query']}
118 | else:
119 | image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,data_transforms) for x in ['gallery','query']}
120 | dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,
121 | shuffle=False, num_workers=16) for x in ['gallery','query']}
122 | class_names = image_datasets['query'].classes
123 | use_gpu = torch.cuda.is_available()
124 |
125 | ######################################################################
126 | # Extract feature
127 | # ----------------------
128 | #
129 | # Extract feature from a trained model.
130 | #
131 | def fliplr(img):
132 | '''flip horizontal'''
133 | inv_idx = torch.arange(img.size(3)-1,-1,-1).long() # N x C x H x W
134 | img_flip = img.index_select(3,inv_idx)
135 | return img_flip
136 |
137 | def extract_feature(model,dataloaders):
138 | features = torch.FloatTensor()
139 | labels = torch.LongTensor()
140 | count = 0
141 | for data in dataloaders:
142 | img, label = data
143 | n, c, h, w = img.size()
144 | count += n
145 | print(count)
146 | ff = torch.FloatTensor(n,512).zero_().cuda()
147 | if opt.PCB:
148 | ff = torch.FloatTensor(n,2048,6).zero_().cuda() # we have six parts
149 |
150 | for i in range(2):
151 | if(i==1):
152 | img = fliplr(img)
153 | input_img = Variable(img.cuda())
154 | for scale in ms:
155 | if scale != 1:
156 | # bicubic is only available in pytorch>= 1.1
157 | input_img = nn.functional.interpolate(input_img, scale_factor=scale, mode='bilinear', align_corners=False)
158 | outputs = model(input_img)
159 | ff += outputs
160 | # norm feature
161 | if opt.PCB:
162 | # feature size (n,2048,6)
163 | # 1. To treat every part equally, I calculate the norm for every 2048-dim part feature.
164 | # 2. To keep the cosine score==1, sqrt(6) is added to norm the whole feature (2048*6).
165 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) * np.sqrt(6)
166 | ff = ff.div(fnorm.expand_as(ff))
167 | ff = ff.view(ff.size(0), -1)
168 | else:
169 | fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
170 | ff = ff.div(fnorm.expand_as(ff))
171 |
172 | features = torch.cat((features,ff.data.cpu()), 0)
173 | labels = torch.cat((labels,label.data), 0)
174 | return features, labels
175 |
176 |
177 | gallery_path = image_datasets['gallery'].imgs
178 | query_path = image_datasets['query'].imgs
179 |
180 | if opt.multi:
181 | mquery_path = image_datasets['multi-query'].imgs
182 | mquery_cam,mquery_label = get_id(mquery_path)
183 |
184 | ######################################################################
185 | # Load Collected data Trained model
186 | print('-------test-----------')
187 |
188 | model, _, epoch = load_network(opt.name, opt)
189 | model.classifier.classifier = nn.Sequential()
190 | model = model.eval()
191 | if use_gpu:
192 | model = model.cuda()
193 | print(model)
194 | # Extract feature
195 | with torch.no_grad():
196 | query_feature, query_label = extract_feature(model,dataloaders['query'])
197 | gallery_feature, gallery_label = extract_feature(model,dataloaders['gallery'])
198 | if opt.multi:
199 | mquery_feature = extract_feature(model,dataloaders['multi-query'])
200 |
201 | # Save to Matlab for check
202 | result = {'gallery_f':gallery_feature.numpy(),'gallery_label':gallery_label.numpy(),'query_f':query_feature.numpy(),'query_label':query_label.numpy()}
203 | scipy.io.savemat('pytorch_result.mat',result)
204 | scipy.io.savemat('query.mat',result)
205 |
206 | print(opt.name)
207 | result = './model/%s/result.txt'%opt.name
208 | os.system('python evaluate_gpu.py | tee -a %s'%result)
209 |
210 | if opt.multi:
211 | result = {'mquery_f':mquery_feature.numpy(),'mquery_label':mquery_label,'mquery_cam':mquery_cam}
212 | scipy.io.savemat('multi_query.mat',result)
213 |
--------------------------------------------------------------------------------
/mnist/attack_mnist.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | import torch
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 | import torch.optim as optim
7 | from torch.autograd import Variable
8 | from torchvision import datasets, transforms
9 | from model import Net
10 | import matplotlib
11 | matplotlib.use('agg')
12 | import matplotlib.pyplot as plt
13 | from model import Net
14 | import matplotlib.cm as cm
15 | import numpy as np
16 | from PIL import Image
17 |
18 | parser = argparse.ArgumentParser(description='Testing')
19 | parser.add_argument('--method_id', default=5, type=int, help='1.fast || 2.least likely || 3.label smooth')
20 | parser.add_argument('--rate', default=2, type=int, help='attack rate')
21 |
22 | opt = parser.parse_args()
23 |
24 | colors = cm.rainbow(np.linspace(0, 1, 10))
25 |
26 | ######################################################################
27 | # Load model
28 | #---------------------------
29 | def load_network(network):
30 | save_path = os.path.join('./model/best.pth')
31 | network.load_state_dict(torch.load(save_path))
32 | return network
33 |
34 | def get_feature(model, x):
35 | x = F.max_pool2d(model.conv1(x), 2)
36 | x = F.max_pool2d(model.conv2(x), 2)
37 | x = F.relu(model.conv3(x))
38 | x = x.view(-1, 500)
39 | x = model.fc1(x)
40 | return x
41 |
42 | def attack(model,dataloaders, method_id=5):
43 | count = 0.0
44 | score = 0.0
45 | score5 = 0.0
46 | for data in dataloaders:
47 | img, label = data
48 | n, c, h, w = img.size()
49 | count += n
50 | inputs = Variable(img.cuda(), requires_grad = True)
51 | outputs = model(inputs)
52 | #-----------------attack-------------------
53 | # The input has been whiten.
54 | # So when we recover, we need to use a alpha
55 | alpha = 1.0 / (0.3081 * 255.0)
56 | criterion = nn.CrossEntropyLoss()
57 | inputs_copy = Variable(inputs.data, requires_grad = False)
58 | diff = torch.FloatTensor(inputs.shape).zero_()
59 | diff = Variable(diff.cuda(), requires_grad = False)
60 | #1. FGSM, GradientSignAttack
61 | if method_id == 1:
62 | _, preds = torch.max(outputs.data, 1)
63 | labels = Variable(preds.cuda())
64 | loss = criterion(outputs, labels)
65 | loss.backward()
66 | inputs = inputs + torch.sign(inputs.grad) * opt.rate * alpha
67 | inputs = clip(inputs,n)
68 | #2. IterativeGradientSignAttack
69 | elif method_id == 2:
70 | _, preds = torch.max(outputs.data, 1)
71 | labels = Variable(preds.cuda())
72 | for iter in range( round(min(1.25 * opt.rate, opt.rate+4))):
73 | loss = criterion(outputs, labels)
74 | loss.backward()
75 | diff += torch.sign(inputs.grad)
76 | mask_diff = diff.abs() > opt.rate
77 | diff[mask_diff] = opt.rate * torch.sign(diff[mask_diff])
78 | inputs = inputs_copy + diff * 1.0 * alpha # we use 1 instead of opt.rate
79 | inputs = clip(inputs,n)
80 | inputs = Variable(inputs.data, requires_grad=True)
81 | outputs = model(inputs)
82 | #3. Iterative Least-likely method
83 | elif method_id == 3:
84 | # least likely label is fixed
85 | _, ll_preds = torch.min(outputs.data, 1)
86 | ll_label = Variable(ll_preds, requires_grad=False)
87 | for iter in range( round(min(1.25 * opt.rate, opt.rate+4))):
88 | loss = criterion(outputs, ll_label)
89 | loss.backward()
90 | diff += torch.sign(inputs.grad)
91 | mask_diff = diff.abs() > opt.rate
92 | diff[mask_diff] = opt.rate * torch.sign(diff[mask_diff])
93 | inputs = inputs_copy - diff * 1.0 * alpha # we use 1 instead of opt.rate
94 | inputs = clip(inputs,n)
95 | inputs = Variable(inputs.data, requires_grad=True)
96 | outputs = model(inputs)
97 | #4. Label-smooth method
98 | elif method_id == 4:
99 | batch_size = inputs.shape[0]
100 | smooth_label = torch.ones(batch_size, 10) /10.0
101 | target = Variable(smooth_label.cuda(), requires_grad=False)
102 | criterion2 = nn.MSELoss()
103 | sm = nn.Softmax(dim = 1) #softmax work on the second dim (sum of the 751 elements = 1)
104 | for iter in range( round(min(1.25 * opt.rate, opt.rate+4))):
105 | sm_outputs = sm(outputs)
106 | loss2 = criterion2(sm_outputs, target)
107 | loss2.backward()
108 | prob,_ = torch.max(sm_outputs,1)
109 | #print('iter:%d smooth-loss:%4f max-pre:%4f'%(iter, loss2.data[0],torch.mean(prob)))
110 | diff += torch.sign(inputs.grad)
111 | mask_diff = diff.abs() > opt.rate
112 | diff[mask_diff] = opt.rate * torch.sign(diff[mask_diff])
113 | inputs = inputs_copy - diff * 1.0 * alpha
114 | inputs = clip(inputs,n)
115 | inputs = Variable(inputs.data, requires_grad=True)
116 | outputs = model(inputs)
117 | #5. MSE on feature
118 | elif method_id == 5:
119 | #remove classifier
120 | outputs = get_feature(model,inputs)
121 | #fnorm = torch.norm(outputs, p=2, dim=1, keepdim=True)
122 | #outputs = outputs.div(fnorm.expand_as(outputs))
123 | feature_dim = outputs.shape[1]
124 | batch_size = inputs.shape[0]
125 | #zero_feature = torch.zeros(batch_size,feature_dim)
126 | target = Variable(-outputs.data, requires_grad=False)
127 | criterion2 = nn.MSELoss()
128 | #s = target*target
129 | #print(torch.sum(s))
130 | for iter in range( round(min(1.25 * opt.rate, opt.rate+4))):
131 | loss2 = criterion2(outputs, target)
132 | loss2.backward()
133 | diff += torch.sign(inputs.grad)
134 | mask_diff = diff.abs() > opt.rate
135 | diff[mask_diff] = opt.rate * torch.sign(diff[mask_diff])
136 | inputs = inputs_copy - diff * 1.0 * alpha
137 | inputs = clip(inputs,n)
138 | inputs = Variable(inputs.data, requires_grad=True)
139 | outputs = get_feature(model, inputs)
140 | #fnorm = torch.norm(outputs, p=2, dim=1, keepdim=True)
141 | #outputs = outputs.div(fnorm.expand_as(outputs))
142 | #print( torch.sum(outputs*target))
143 | else:
144 | print('unknow method id')
145 |
146 | outputs = model(inputs)
147 | location = get_feature(model, inputs)
148 | test(location, label)
149 | outputs = outputs.data.cpu()
150 | _, preds = outputs.topk(5, dim=1)
151 | correct = preds.eq(label.view(n,1).expand_as(preds))
152 | score += torch.sum(correct[:,0])
153 | score5 += torch.sum(correct)
154 | print( '%f | %f'%(score, score5))
155 | return score
156 |
157 |
158 | is_appear = np.zeros(10)
159 | def test(location, label): #location and label
160 | location = location.data.cpu()
161 | label = label.data.cpu().numpy()
162 | for i in range(location.size(0)):
163 | l = label[i]
164 | if is_appear[l]==0:
165 | is_appear[l] = 1
166 | ax.scatter( location[i, 0], location[i, 1], c=colors[l], s=10, label = l,
167 | alpha=0.7, edgecolors='none')
168 | else:
169 | ax.scatter( location[i, 0], location[i, 1], c=colors[l], s=10,
170 | alpha=0.7, edgecolors='none')
171 | return
172 |
173 | data_transform = transforms.Compose([
174 | transforms.ToTensor(),
175 | transforms.Normalize((0.1307,), (0.3081,))])
176 |
177 | test_loader = torch.utils.data.DataLoader(
178 | datasets.MNIST('../data', train=False, download = True, transform=data_transform),
179 | batch_size=100, shuffle=False)
180 |
181 | model = Net()
182 | model = load_network(model)
183 | model = model.eval()
184 | model = model.cuda()
185 |
186 | #######################################################################
187 | # Creat Up bound and low bound
188 | # Clip
189 | zeros = np.zeros((28,28),dtype=np.uint8)
190 | zeros = Image.fromarray(zeros)
191 | zeros = data_transform(zeros)
192 |
193 | ones = 255*np.ones((28,28), dtype=np.uint8)
194 | ones = Image.fromarray(ones)
195 | ones = data_transform(ones)
196 |
197 | zeros,ones = zeros.cuda(),ones.cuda()
198 |
199 | def clip(inputs, batch_size):
200 | inputs = inputs.data
201 | for i in range(batch_size):
202 | inputs[i] = clip_single(inputs[i])
203 | inputs = Variable(inputs.cuda())
204 | return inputs
205 |
206 | def clip_single(input):
207 | low_mask = inputones
209 | input[low_mask] = zeros[low_mask]
210 | input[up_mask] = ones[up_mask]
211 | return input
212 |
213 | #######################################################################
214 | # Main
215 | #test(model, test_loader)
216 | fig, ax = plt.subplots()
217 | score = attack(model, test_loader)
218 | ax.grid(True)
219 | ax.set_xlim(-200, 200)
220 | ax.set_ylim(-200, 200)
221 | ax.legend(loc='best')
222 | fig.savefig('train%d-%d.jpg'%(opt.rate, score))
223 |
--------------------------------------------------------------------------------
/model.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from torch.nn import init
4 | from torchvision import models
5 | from torch.autograd import Variable
6 | import pretrainedmodels
7 | import timm
8 |
9 | ######################################################################
10 | def weights_init_kaiming(m):
11 | classname = m.__class__.__name__
12 | # print(classname)
13 | if classname.find('Conv') != -1:
14 | init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') # For old pytorch, you may use kaiming_normal.
15 | elif classname.find('Linear') != -1:
16 | init.kaiming_normal_(m.weight.data, a=0, mode='fan_out')
17 | init.constant_(m.bias.data, 0.0)
18 | elif classname.find('BatchNorm1d') != -1:
19 | init.normal_(m.weight.data, 1.0, 0.02)
20 | init.constant_(m.bias.data, 0.0)
21 |
22 | def weights_init_classifier(m):
23 | classname = m.__class__.__name__
24 | if classname.find('Linear') != -1:
25 | init.normal_(m.weight.data, std=0.001)
26 | init.constant_(m.bias.data, 0.0)
27 |
28 | # Defines the new fc layer and classification layer
29 | # |--Linear--|--bn--|--relu--|--Linear--|
30 | class ClassBlock(nn.Module):
31 | def __init__(self, input_dim, class_num, droprate, relu=False, bnorm=True, num_bottleneck=512, linear=True, return_f = False):
32 | super(ClassBlock, self).__init__()
33 | self.return_f = return_f
34 | add_block = []
35 | if linear:
36 | add_block += [nn.Linear(input_dim, num_bottleneck)]
37 | else:
38 | num_bottleneck = input_dim
39 | if bnorm:
40 | add_block += [nn.BatchNorm1d(num_bottleneck)]
41 | if relu:
42 | add_block += [nn.LeakyReLU(0.1)]
43 | if droprate>0:
44 | add_block += [nn.Dropout(p=droprate)]
45 | add_block = nn.Sequential(*add_block)
46 | add_block.apply(weights_init_kaiming)
47 |
48 | classifier = []
49 | classifier += [nn.Linear(num_bottleneck, class_num)]
50 | classifier = nn.Sequential(*classifier)
51 | classifier.apply(weights_init_classifier)
52 |
53 | self.add_block = add_block
54 | self.classifier = classifier
55 | def forward(self, x):
56 | x = self.add_block(x)
57 | if self.return_f:
58 | f = x
59 | x = self.classifier(x)
60 | return [x,f]
61 | else:
62 | x = self.classifier(x)
63 | return x
64 |
65 | # Define the ResNet50-based Model
66 | class ft_net(nn.Module):
67 |
68 | def __init__(self, class_num=751, droprate=0.5, stride=2, circle=False):
69 | super(ft_net, self).__init__()
70 | model_ft = models.resnet50(pretrained=True)
71 | # avg pooling to global pooling
72 | if stride == 1:
73 | model_ft.layer4[0].downsample[0].stride = (1,1)
74 | model_ft.layer4[0].conv2.stride = (1,1)
75 | model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1))
76 | self.model = model_ft
77 | self.circle = circle
78 | self.classifier = ClassBlock(2048, class_num, droprate, return_f = circle)
79 |
80 | def forward(self, x):
81 | x = self.model.conv1(x)
82 | x = self.model.bn1(x)
83 | x = self.model.relu(x)
84 | x = self.model.maxpool(x)
85 | x = self.model.layer1(x)
86 | x = self.model.layer2(x)
87 | x = self.model.layer3(x)
88 | x = self.model.layer4(x)
89 | x = self.model.avgpool(x)
90 | x = x.view(x.size(0), x.size(1))
91 | x = self.classifier(x)
92 | return x
93 |
94 |
95 | # Define the swin_base_patch4_window7_224 Model
96 | # pytorch > 1.6
97 | class ft_net_swin(nn.Module):
98 |
99 | def __init__(self, class_num, droprate=0.5, stride=2, circle=False):
100 | super(ft_net_swin, self).__init__()
101 | model_ft = timm.create_model('swin_base_patch4_window7_224', pretrained=True)
102 | # avg pooling to global pooling
103 | #model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1))
104 | model_ft.head = nn.Sequential() # save memory
105 | self.model = model_ft
106 | self.circle = circle
107 | self.classifier = ClassBlock(1024, class_num, droprate, return_f = circle)
108 |
109 | def forward(self, x):
110 | x = self.model.forward_features(x)
111 | x = self.classifier(x)
112 | return x
113 |
114 |
115 | # Define the DenseNet121-based Model
116 | class ft_net_dense(nn.Module):
117 |
118 | def __init__(self, class_num, droprate=0.5, circle=False):
119 | super().__init__()
120 | model_ft = models.densenet121(pretrained=True)
121 | model_ft.features.avgpool = nn.AdaptiveAvgPool2d((1,1))
122 | model_ft.fc = nn.Sequential()
123 | self.model = model_ft
124 | self.circle = circle
125 | # For DenseNet, the feature dim is 1024
126 | self.classifier = ClassBlock(1024, class_num, droprate, return_f=circle)
127 |
128 | def forward(self, x):
129 | x = self.model.features(x)
130 | x = x.view(x.size(0), x.size(1))
131 | x = self.classifier(x)
132 | return x
133 |
134 | # Define the NAS-based Model
135 | class ft_net_NAS(nn.Module):
136 |
137 | def __init__(self, class_num, droprate=0.5):
138 | super().__init__()
139 | model_name = 'nasnetalarge'
140 | # pip install pretrainedmodels
141 | model_ft = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet')
142 | model_ft.avg_pool = nn.AdaptiveAvgPool2d((1,1))
143 | model_ft.dropout = nn.Sequential()
144 | model_ft.last_linear = nn.Sequential()
145 | self.model = model_ft
146 | # For DenseNet, the feature dim is 4032
147 | self.classifier = ClassBlock(4032, class_num, droprate)
148 |
149 | def forward(self, x):
150 | x = self.model.features(x)
151 | x = self.model.avg_pool(x)
152 | x = x.view(x.size(0), x.size(1))
153 | x = self.classifier(x)
154 | return x
155 |
156 | # Define the ResNet50-based Model (Middle-Concat)
157 | # In the spirit of "The Devil is in the Middle: Exploiting Mid-level Representations for Cross-Domain Instance Matching." Yu, Qian, et al. arXiv:1711.08106 (2017).
158 | class ft_net_middle(nn.Module):
159 |
160 | def __init__(self, class_num=751, droprate=0.5):
161 | super(ft_net_middle, self).__init__()
162 | model_ft = models.resnet50(pretrained=True)
163 | # avg pooling to global pooling
164 | model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1))
165 | self.model = model_ft
166 | self.classifier = ClassBlock(2048, class_num, droprate)
167 |
168 | def forward(self, x):
169 | x = self.model.conv1(x)
170 | x = self.model.bn1(x)
171 | x = self.model.relu(x)
172 | x = self.model.maxpool(x)
173 | x = self.model.layer1(x)
174 | x = self.model.layer2(x)
175 | x = self.model.layer3(x)
176 | x = self.model.layer4(x)
177 | x = self.model.avgpool(x)
178 | x = torch.squeeze(x)
179 | x = self.classifier(x) #use our classifier.
180 | return x
181 |
182 | # Part Model proposed in Yifan Sun etal. (2018)
183 | class PCB(nn.Module):
184 | def __init__(self, class_num ):
185 | super(PCB, self).__init__()
186 |
187 | self.part = 6 # We cut the pool5 to 6 parts
188 | model_ft = models.resnet50(pretrained=True)
189 | self.model = model_ft
190 | self.avgpool = nn.AdaptiveAvgPool2d((self.part,1))
191 | self.dropout = nn.Dropout(p=0.5)
192 | # remove the final downsample
193 | self.model.layer4[0].downsample[0].stride = (1,1)
194 | self.model.layer4[0].conv2.stride = (1,1)
195 | # define 6 classifiers
196 | for i in range(self.part):
197 | name = 'classifier'+str(i)
198 | setattr(self, name, ClassBlock(2048, class_num, droprate=0.5, relu=False, bnorm=True, num_bottleneck=256))
199 |
200 | def forward(self, x):
201 | x = self.model.conv1(x)
202 | x = self.model.bn1(x)
203 | x = self.model.relu(x)
204 | x = self.model.maxpool(x)
205 |
206 | x = self.model.layer1(x)
207 | x = self.model.layer2(x)
208 | x = self.model.layer3(x)
209 | x = self.model.layer4(x)
210 | x = self.avgpool(x)
211 | x = self.dropout(x)
212 | part = {}
213 | predict = {}
214 | # get six part feature batchsize*2048*6
215 | for i in range(self.part):
216 | part[i] = x[:,:,i].view(x.size(0), x.size(1))
217 | name = 'classifier'+str(i)
218 | c = getattr(self,name)
219 | predict[i] = c(part[i])
220 |
221 | # sum prediction
222 | #y = predict[0]
223 | #for i in range(self.part-1):
224 | # y += predict[i+1]
225 | y = []
226 | for i in range(self.part):
227 | y.append(predict[i])
228 | return y
229 |
230 | class PCB_test(nn.Module):
231 | def __init__(self,model):
232 | super(PCB_test,self).__init__()
233 | self.part = 6
234 | self.model = model.model
235 | self.avgpool = nn.AdaptiveAvgPool2d((self.part,1))
236 | # remove the final downsample
237 | self.model.layer4[0].downsample[0].stride = (1,1)
238 | self.model.layer4[0].conv2.stride = (1,1)
239 |
240 | def forward(self, x):
241 | x = self.model.conv1(x)
242 | x = self.model.bn1(x)
243 | x = self.model.relu(x)
244 | x = self.model.maxpool(x)
245 |
246 | x = self.model.layer1(x)
247 | x = self.model.layer2(x)
248 | x = self.model.layer3(x)
249 | x = self.model.layer4(x)
250 | x = self.avgpool(x)
251 | y = x.view(x.size(0),x.size(1),x.size(2))
252 | return y
253 | '''
254 | # debug model structure
255 | # Run this code with:
256 | python model.py
257 | '''
258 | if __name__ == '__main__':
259 | # Here I left a simple forward function.
260 | # Test the model, before you train it.
261 | net = ft_net_swin(751, stride=1)
262 | net.classifier = nn.Sequential()
263 | print(net)
264 | input = Variable(torch.FloatTensor(8, 3, 224, 224))
265 | output = net(input)
266 | print('net output size:')
267 | print(output.shape)
268 |
--------------------------------------------------------------------------------