├── IORROILstm.py ├── LICENSE ├── MDN.py ├── README.md ├── components.py ├── conv_lstm_cell.py ├── eval.py ├── figs ├── overview.png └── sample_results.jpg ├── fixation_duration.py ├── imutils.py ├── results └── .DS_Store ├── sample_images ├── .DS_Store ├── img1.jpg ├── img2.jpg ├── img3.jpg ├── sem1.npy ├── sem2.npy └── sem3.npy ├── utils.py ├── vis.py └── visual_module ├── .DS_Store ├── vgg.py └── visual_features.py /IORROILstm.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from conv_lstm_cell import ConvLSTMCell, IORLSTMCell 5 | from utils import ChannelAttention, GaussianFilter 6 | 7 | 8 | class IORROILstm(nn.Module): 9 | def __init__(self, input_dim, state_dim): 10 | super(IORROILstm, self).__init__() 11 | 12 | self.input_dim = input_dim 13 | self.state_dim = state_dim 14 | 15 | self.iorlstm = IORLSTMCell(self.input_dim, self.state_dim, 128) 16 | self.roilstm = ConvLSTMCell(self.input_dim, 128) 17 | 18 | self.channel_attention = ChannelAttention(self.input_dim, 128) 19 | self.smooth = GaussianFilter(1, 3) 20 | 21 | def forward(self, x, current_ROI, fix_duration, fix_tran, ior_state, roi_state): 22 | batch_size = x.size()[0] 23 | spatial_size = x.size()[2:] 24 | 25 | if ior_state is None: 26 | state_size = [batch_size, self.state_dim] + list(spatial_size) 27 | device = torch.cuda.current_device() if torch.cuda.is_available() else torch.device('cpu') 28 | ior_state = ( 29 | torch.zeros(state_size).to(device), 30 | torch.zeros(state_size).to(device) 31 | ) 32 | if roi_state is None: 33 | state_size = [batch_size, self.state_dim] + list(spatial_size) 34 | device = torch.cuda.current_device() if torch.cuda.is_available() else torch.device('cpu') 35 | roi_state = ( 36 | torch.zeros(state_size).to(device), 37 | torch.zeros(state_size).to(device) 38 | ) 39 | 40 | with torch.no_grad(): 41 | current_roi = current_ROI.clone() 42 | current_roi[current_roi > 0.15] = 1 43 | current_roi = self.smooth(current_roi) 44 | current_roi = F.interpolate(current_roi, size=spatial_size, mode='bilinear') 45 | 46 | fix_duration = fix_duration.reshape(batch_size, 1, 1, 1) 47 | fix_tran = fix_tran.reshape(batch_size, 1, 1, 1) 48 | ior_hidden, ior_cell = self.iorlstm(x, current_roi, fix_duration, fix_tran, ior_state, roi_state[0]) 49 | 50 | ior_map = torch.mean(ior_cell, dim=1, keepdim=True) 51 | xi = x * (1 - ior_cell) 52 | 53 | ca = self.channel_attention(xi, roi_state[0], current_roi) 54 | xic = xi * ca 55 | 56 | roi_hidden, roi_cell = self.roilstm(xic, roi_state) 57 | roi_latent = torch.mean(roi_hidden, dim=1, keepdim=True) 58 | 59 | return (ior_hidden, ior_cell), (roi_hidden, roi_cell), ior_map, roi_latent 60 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. 166 | -------------------------------------------------------------------------------- /MDN.py: -------------------------------------------------------------------------------- 1 | import math 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | from torch.distributions import MultivariateNormal, Categorical 6 | 7 | 8 | class MDN(nn.Module): 9 | def __init__(self, input_dim, output_dim, num_gaussians): 10 | super(MDN, self).__init__() 11 | self.input_dim = input_dim 12 | self.output_dim = output_dim 13 | self.num_gaussians = num_gaussians 14 | 15 | self.pi = nn.Sequential( 16 | nn.Linear(self.input_dim, 256), 17 | nn.ReLU(), 18 | nn.Linear(256, self.num_gaussians), 19 | nn.Softmax(dim=-1) 20 | ) 21 | self.mu = nn.Sequential( 22 | nn.Linear(self.input_dim, 256), 23 | nn.ReLU(), 24 | nn.Linear(256, self.output_dim * self.num_gaussians) 25 | ) 26 | self.std = nn.Sequential( 27 | nn.Linear(self.input_dim, 256), 28 | nn.ReLU(), 29 | nn.Linear(256, self.output_dim * self.num_gaussians) 30 | ) 31 | self.rho = nn.Sequential( 32 | nn.Linear(self.input_dim, 256), 33 | nn.ReLU(), 34 | nn.Linear(256, self.num_gaussians) 35 | ) 36 | 37 | self.mu[-1].bias.data.copy_(torch.rand_like(self.mu[-1].bias)) 38 | 39 | def forward(self, x): 40 | pi = self.pi(x) 41 | 42 | mu = self.mu(x) 43 | sigma = 1 + F.elu(self.std(x)) 44 | sigma = torch.clamp(sigma, 0.06, 0.12) 45 | rho = torch.clamp(self.rho(x), -0.25, 0.25) 46 | mu = mu.reshape(-1, self.num_gaussians, self.output_dim) 47 | sigma = sigma.reshape(-1, self.num_gaussians, self.output_dim) 48 | rho = rho.reshape(-1, self.num_gaussians, 1) 49 | 50 | return pi, mu, sigma, rho 51 | 52 | 53 | def gaussian_probability(mu, sigma, rho, data): 54 | mean_x, mean_y = torch.chunk(mu, 2, dim=-1) 55 | std_x, std_y = torch.chunk(sigma, 2, dim=-1) 56 | x, y = torch.chunk(data, 2, dim=1) 57 | dx = x - mean_x 58 | dy = y - mean_y 59 | std_xy = std_x * std_y 60 | z = (dx * dx) / (std_x * std_x) + (dy * dy) / (std_y * std_y) - (2 * rho * dx * dy) / std_xy 61 | training_stablizer = 20 62 | norm = 1 / (training_stablizer * math.pi * std_x * std_y * torch.sqrt(1 - rho * rho)) 63 | p = norm * torch.exp(-z / (1 - rho * rho) * 0.5) 64 | 65 | return p 66 | 67 | 68 | def mixture_probability(pi, mu, sigma, rho, data): 69 | pi = pi.unsqueeze(-1) 70 | prob = pi * gaussian_probability(mu, sigma, rho, data) 71 | prob = torch.sum(prob, dim=1) 72 | 73 | return prob 74 | 75 | 76 | def sample_mdn(pi, mu, sigma, rho): 77 | device = torch.cuda.current_device() if torch.cuda.is_available() else torch.device('cpu') 78 | cat = Categorical(pi) 79 | pis = list(cat.sample().data) 80 | samples = list() 81 | for i, idx in enumerate(pis): 82 | loc = mu[i, idx] 83 | std = sigma[i, idx] 84 | std_x, std_y = std[0].item(), std[1].item() 85 | r = rho[i, idx].item() 86 | cov_mat = torch.tensor([[std_x * std_x, std_x * std_y * r], [std_x * std_y * r, std_y * std_y]]).to(device) 87 | MN = MultivariateNormal(loc, covariance_matrix=cov_mat) 88 | 89 | samples.append(MN.sample().unsqueeze(0)) 90 | 91 | return torch.cat(samples, dim=0) 92 | 93 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Visual Scanpath Prediction 2 | Pytorch implementation of the paper **"Visual Scanpath Prediction using IOR-ROI Recurrent Mixture Density Network"** 3 | 4 | ## Models 5 | 6 | ![](figs/overview.png) 7 | 8 | There are four major components presented in the model: an image feature extractor, the ROI generation module, the fixation duration prediction module and a saliency guidance network. Given an input image, the image feature extractor is responsible for extracting deep convolutional features and semantic features using off-the-shelf CNN architectures. The ROI generation module is composed of IOR-ROI LSTM and the MDN. The IOR-ROI LSTM plays a critical role in modeling the IOR dynamics and attention shift behavior. Instead of predicting a unique ROI in a deterministic manner for each step, the MDN is used to generate the distribution of ROIs as GMs. The next fixation is sampled from the mixture distribution of Gaussians to model the stochastic of human saccadic behavior. The fixation duration prediction module predicts fixation duration as a Gaussian distribution whose parameters are regressed by a fully connected neural network. The saliency guidance network is only employed during the training phase, encouraging the feature extractor to encode more saliency information in convolutional features. 9 | 10 | ## Run the code 11 | 12 | To run the code, you need to download the pre-trained models from [here](https://mega.nz/file/KvxEXS5Z#p-ZxpjiJ6k9Tj9vxH8CGX0Ec9MQW0SJX_XSeEJcmvW0) and extract the files to the **data** folder in the root directory. 13 | 14 | # get the source code 15 | git clone https://github.com/sunwj/scanpath.git 16 | 17 | # run the code 18 | python3 eval.py -i image_file -s semantic_file 19 | 20 | # example 21 | python3 eval.py -i ./sample_images/img1.jpg -s ./sample_images/img1.npy 22 | 23 | The generated scanpaths are saved in the **results** folder. 24 | 25 | ## Sample results 26 | ![](figs/sample_results.jpg) 27 | 28 | We provide images and semantic features for the OSIE dataset which can be downloaded from [here](https://mega.nz/file/CjgE0QQL#rs_i7Dy1BB4b-s8kGrejAG7A-DdFbNfVoZz27RM-cXs). The semantic features are extracted using the [code and pre-trained models](https://github.com/ronghanghu/seg_every_thing), you can use the code to extract semantic features of other images. 29 | -------------------------------------------------------------------------------- /components.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | import MDN 5 | from visual_module.visual_features import FeatureFusion, VGG 6 | from IORROILstm import IORROILstm 7 | from fixation_duration import FixationDuration 8 | from utils import Sampler2D, ROIGenerator, OculomotorBias 9 | 10 | 11 | first_fix_sampler = Sampler2D(np.load('./data/first_fix_dist.npy')) 12 | ob = OculomotorBias('./data/ob.mat', 12) 13 | roi_gen = ROIGenerator(400, 300, 30) 14 | 15 | device = torch.cuda.current_device() if torch.cuda.is_available() else torch.device('cpu') 16 | feature_extractor = VGG(model='vgg19', fine_tune=False).to(device) 17 | fuser = FeatureFusion(3001).to(device) 18 | iorroi_lstm = IORROILstm(512, 512).to(device) 19 | mdn = nn.Sequential( 20 | nn.Linear(450, 512), 21 | nn.Tanh(), 22 | MDN.MDN(512, 2, 10) 23 | ).to(device) 24 | fix_duration = FixationDuration(512, 128).to(device) 25 | 26 | feature_extractor.load_state_dict(torch.load('./data/weights/vgg19.pth')) 27 | fuser.load_state_dict(torch.load('./data/weights/fuser.pth')) 28 | iorroi_lstm.load_state_dict(torch.load('./data/weights/iorroi.pth')) 29 | mdn.load_state_dict(torch.load('./data/weights/mdn.pth')) 30 | fix_duration.load_state_dict(torch.load('./data/weights/fix_duration.pth')) 31 | -------------------------------------------------------------------------------- /conv_lstm_cell.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | # define some constants 7 | KSIZE = 5 8 | PADDING = KSIZE // 2 9 | 10 | 11 | class ConvLSTMCell(nn.Module): 12 | def __init__(self, input_dim, state_dim): 13 | super(ConvLSTMCell, self).__init__() 14 | 15 | self.input_dim = input_dim 16 | self.state_dim = state_dim 17 | 18 | self.compute_gates = nn.Conv2d(input_dim + state_dim, 4 * state_dim, KSIZE, padding=PADDING) 19 | self.compute_gates.bias.data[:state_dim].fill_(1.0) 20 | 21 | def forward(self, x, prev_state): 22 | batch_size = x.data.size()[0] 23 | spatial_size = x.data.size()[2:] 24 | 25 | if prev_state is None: 26 | state_size = [batch_size, self.state_dim] + list(spatial_size) 27 | device = torch.cuda.current_device() if torch.cuda.is_available() else torch.device('cpu') 28 | prev_state = ( 29 | torch.zeros(state_size).to(device), 30 | torch.zeros(state_size).to(device) 31 | ) 32 | 33 | prev_hidden, prev_cell = prev_state 34 | 35 | stacked_input = torch.cat([x, prev_hidden], dim=1) 36 | gates = self.compute_gates(stacked_input) 37 | 38 | fgate, igate, ogate, g_content = gates.chunk(4, 1) 39 | 40 | igate = torch.sigmoid(igate) 41 | fgate = torch.sigmoid(fgate) 42 | ogate = torch.sigmoid(ogate) 43 | g = torch.tanh(g_content) 44 | 45 | current_cell = fgate * prev_cell + igate * g 46 | current_hidden = ogate * torch.tanh(current_cell) 47 | 48 | return current_hidden, current_cell 49 | 50 | 51 | class IORLSTMCell(nn.Module): 52 | def __init__(self, input_dim, state_dim, roi_state_dim): 53 | super(IORLSTMCell, self).__init__() 54 | 55 | self.input_dim = input_dim 56 | self.state_dim = state_dim 57 | 58 | self.forget_gate = nn.Conv2d(input_dim + state_dim + 1 + roi_state_dim, state_dim, KSIZE, padding=PADDING) 59 | self.update_gate = nn.Conv2d(input_dim + state_dim + 1, state_dim, KSIZE, padding=PADDING) 60 | self.output_gate = nn.Sequential( 61 | nn.Conv2d(input_dim + state_dim + 1, self.state_dim, KSIZE, padding=PADDING), 62 | nn.Sigmoid() 63 | ) 64 | self.cell_transform = nn.Sequential( 65 | nn.Conv2d(self.state_dim, self.state_dim, KSIZE, padding=PADDING), 66 | nn.Tanh() 67 | ) 68 | self.forget_gate.bias.data.fill_(1.0) 69 | 70 | def forward(self, x, ROI, fix_duration, fix_transition, prev_state, roi_hidden): 71 | batch_size = x.data.size()[0] 72 | spatial_size = x.data.size()[2:] 73 | 74 | if prev_state is None: 75 | state_size = [batch_size, self.state_dim] + list(spatial_size) 76 | device = torch.cuda.current_device() if torch.cuda.is_available() else torch.device('cpu') 77 | prev_state = ( 78 | torch.zeros(state_size).to(device), 79 | torch.zeros(state_size).to(device) 80 | ) 81 | 82 | prev_hidden, prev_cell = prev_state 83 | 84 | with torch.no_grad(): 85 | fix_duration = torch.ones_like(ROI) * fix_duration 86 | fix_transition = torch.ones_like(ROI) * fix_transition 87 | duration = fix_duration + fix_transition 88 | 89 | ugate = self.update_gate(torch.cat([x * ROI, prev_hidden, fix_duration], dim=1)) 90 | fgate = self.forget_gate(torch.cat([x, prev_hidden, roi_hidden, duration], dim=1)) 91 | 92 | ugate = F.hardtanh(ugate, 0, 1) 93 | fgate = torch.sigmoid(fgate) 94 | 95 | current_cell = torch.max(fgate * prev_cell, ugate) 96 | current_hidden = self.cell_transform(current_cell) * self.output_gate(torch.cat([x, prev_hidden, duration], dim=1)) 97 | 98 | return current_hidden, current_cell 99 | -------------------------------------------------------------------------------- /eval.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import matplotlib.pyplot as plt 4 | from PIL import Image 5 | 6 | import torch.nn.functional as F 7 | from torchvision.transforms import Normalize, ToTensor, Compose 8 | 9 | from components import * 10 | from imutils import pad_img_KAR, pad_array_KAR 11 | from vis import draw_scanpath 12 | 13 | 14 | parser = argparse.ArgumentParser('Visual scanpath prediction') 15 | parser.add_argument('-i', '--image', type=str, required=True, help='path to the input image') 16 | parser.add_argument('-s', '--semantic', type=str, required=True, help='path to the semantic file') 17 | parser.add_argument('-l', '--length', type=int, default=8, help='scanpath length') 18 | parser.add_argument('-n', '--num_scanpaths', type=int, default=10, help='number of scanpaths to generate') 19 | args = parser.parse_args() 20 | 21 | NUM_SCANPATHS = args.num_scanpaths 22 | SCANPATH_LENGTH = args.length 23 | 24 | torch.set_grad_enabled(False) 25 | 26 | img_orig = Image.open(args.image) 27 | imgs, (pad_w, pad_h) = pad_img_KAR(img_orig, 400, 300) 28 | ratio = imgs.size[0] / 400 29 | imgs = imgs.resize((400, 300)) 30 | 31 | transform = Compose([ToTensor(), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) 32 | imgs = transform(imgs).unsqueeze(0) 33 | imgs = imgs.to(device) 34 | 35 | sem_infos = np.load(args.semantic) 36 | sem_infos, (_, _) = pad_array_KAR(sem_infos, 300, 400) 37 | sem_infos = torch.LongTensor(np.int32(sem_infos)).unsqueeze(0).unsqueeze(0) 38 | sem_infos = sem_infos.to(device) 39 | fix_trans = torch.FloatTensor([0.19]).to(device) 40 | 41 | y, x = np.mgrid[0:300, 0:400] 42 | x_t = torch.from_numpy(x / 300.).float().reshape(1, 1, -1) 43 | y_t = torch.from_numpy(y / 300.).float().reshape(1, 1, -1) 44 | xy_t = torch.cat([x_t, y_t], dim=1).to(device) 45 | 46 | scanpaths = list() 47 | for scanpath_idx in range(NUM_SCANPATHS): 48 | first_fix = first_fix_sampler.sample() 49 | ob.set_last_fixation(first_fix[0], first_fix[1]) 50 | pred_sp_x = [first_fix[0]] 51 | pred_sp_y = [first_fix[1]] 52 | pred_sp_fd = list() 53 | 54 | feature = feature_extractor(imgs) 55 | sem_infos = F.interpolate(sem_infos.float(), size=[feature.size(2), feature.size(3)]).long() 56 | sem_features = torch.zeros((feature.size(0), 3001, feature.size(2), feature.size(3))).float().to(device) 57 | sem_features[0, ...].scatter_(0, sem_infos[0, ...], 1) 58 | fused_feature = fuser(feature, sem_features) 59 | 60 | state_size = [1, 512] + list(fused_feature.size()[2:]) 61 | ior_state = (torch.zeros(state_size).to(device), torch.zeros(state_size).to(device)) 62 | state_size = [1, 128] + list(fused_feature.size()[2:]) 63 | roi_state = (torch.zeros(state_size).to(device), torch.zeros(state_size).to(device)) 64 | 65 | pred_xt = torch.tensor(np.int(pred_sp_x[-1])).float().to(device) 66 | pred_yt = torch.tensor(np.int(pred_sp_y[-1])).float().to(device) 67 | roi_map = roi_gen.generate_roi(pred_xt, pred_yt).unsqueeze(0).unsqueeze(0) 68 | pred_fd = fix_duration(fused_feature, roi_state[0], roi_map) 69 | pred_sp_fd.append(pred_fd[0, -1].item() * 750) 70 | 71 | for step in range(0, SCANPATH_LENGTH - 1): 72 | ior_state, roi_state, _, roi_latent = iorroi_lstm(fused_feature, roi_map, pred_fd, fix_trans, ior_state, roi_state) 73 | 74 | mdn_input = roi_latent.reshape(1, -1) 75 | pi, mu, sigma, rho = mdn(mdn_input) 76 | 77 | pred_roi_maps = MDN.mixture_probability(pi, mu, sigma, rho, xy_t).reshape((-1, 1, 300, 400)) 78 | samples = list() 79 | for _ in range(30): 80 | samples.append(MDN.sample_mdn(pi, mu, sigma, rho).data.cpu().numpy().squeeze()) 81 | 82 | samples = np.array(samples) 83 | samples[:, 0] = samples[:, 0] * 300 84 | samples[:, 1] = samples[:, 1] * 300 85 | x_mask = (samples[:, 0] > 0) & (samples[:, 0] < 400) 86 | y_mask = (samples[:, 1] > 0) & (samples[:, 1] < 300) 87 | samples = samples[x_mask & y_mask, ...] 88 | 89 | sample_idx = -1 90 | max_prob = 0 91 | roi_prob = pred_roi_maps.data.cpu().numpy().squeeze() 92 | for idx, sample in enumerate(samples): 93 | sample = np.int32(sample) 94 | p_ob = ob.prob(sample[0], sample[1]) 95 | p_roi = roi_prob[sample[1], sample[0]] 96 | if p_ob * p_roi > max_prob: 97 | max_prob = p_ob * p_roi 98 | sample_idx = idx 99 | 100 | if sample_idx == -1: 101 | fix = first_fix_sampler.sample() 102 | pred_sp_x.append(fix[0]) 103 | pred_sp_y.append(fix[1]) 104 | else: 105 | pred_sp_x.append(samples[sample_idx][0]) 106 | pred_sp_y.append(samples[sample_idx][1]) 107 | 108 | ob.set_last_fixation(pred_sp_x[-1], pred_sp_y[-1]) 109 | 110 | pred_xt = torch.tensor(np.int(pred_sp_x[-1])).float().to(device) 111 | pred_yt = torch.tensor(np.int(pred_sp_y[-1])).float().to(device) 112 | roi_map = roi_gen.generate_roi(pred_xt, pred_yt).unsqueeze(0).unsqueeze(0) 113 | pred_fd = fix_duration(fused_feature, roi_state[0], roi_map) 114 | pred_sp_fd.append(pred_fd[0, -1].item() * 750) 115 | 116 | pred_sp_x = [x * ratio - pad_w // 2 for x in pred_sp_x] 117 | pred_sp_y = [y * ratio - pad_h // 2 for y in pred_sp_y] 118 | scanpaths.append(np.array(list(zip(pred_sp_x, pred_sp_y, pred_sp_fd)))) 119 | 120 | plt.imshow(img_orig) 121 | plt.axis('off') 122 | draw_scanpath(pred_sp_x, pred_sp_y, pred_sp_fd) 123 | plt.show() 124 | 125 | name = os.path.basename(args.image) 126 | name = os.path.splitext(name)[0] 127 | np.save(f'./results/{name}.npy', scanpaths) 128 | 129 | 130 | -------------------------------------------------------------------------------- /figs/overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunwj/scanpath/01a685d35d5228c22c57cf5e86d9a9e9ba1cbe3b/figs/overview.png -------------------------------------------------------------------------------- /figs/sample_results.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunwj/scanpath/01a685d35d5228c22c57cf5e86d9a9e9ba1cbe3b/figs/sample_results.jpg -------------------------------------------------------------------------------- /fixation_duration.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.distributions import Normal 5 | from utils import WeightedGlobalMeanPool 6 | 7 | 8 | class FixationDuration(nn.Module): 9 | def __init__(self, feat_dim, roi_hidden_dim): 10 | super(FixationDuration, self).__init__() 11 | self.feat_dim = feat_dim 12 | self.roi_hidden_dim = roi_hidden_dim 13 | 14 | self.mu = nn.Sequential( 15 | nn.Linear(self.feat_dim + self.roi_hidden_dim, 128), 16 | nn.Tanh(), 17 | nn.Linear(128, 32), 18 | nn.Tanh(), 19 | nn.Linear(32, 1) 20 | ) 21 | self.mu[-1].bias.data.copy_(torch.rand_like(self.mu[-1].bias)) 22 | self.log_var = nn.Sequential( 23 | nn.Linear(self.feat_dim + self.roi_hidden_dim, 128), 24 | nn.Tanh(), 25 | nn.Linear(128, 32), 26 | nn.Tanh(), 27 | nn.Linear(32, 1) 28 | ) 29 | self.pool = WeightedGlobalMeanPool(keepdim=False) 30 | 31 | def forward(self, feat, hidden, current_ROI): 32 | spatial_size = feat.size()[2:] 33 | with torch.no_grad(): 34 | current_roi = F.interpolate(current_ROI, size=spatial_size, mode='bilinear') 35 | 36 | feat_vec = self.pool(feat, current_roi) 37 | hidden_vec = self.pool(hidden, current_roi) 38 | 39 | gaussian_input = torch.cat([feat_vec, hidden_vec], dim=-1) 40 | mu = self.mu(gaussian_input) 41 | mu = torch.clamp(mu, 0, 4) 42 | sigma_squared = torch.exp(self.log_var(gaussian_input)) 43 | sigma = torch.sqrt(sigma_squared) 44 | sigma = sigma.clamp(0.05, 0.1) 45 | 46 | normal_distribution = Normal(mu, sigma) 47 | sample = normal_distribution.rsample() 48 | sample = sample.clamp(0, 4) 49 | 50 | return sample 51 | -------------------------------------------------------------------------------- /imutils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from PIL import Image 3 | 4 | 5 | def pad_img_KAR(img, target_w, target_h, pad_value=(124, 116, 104)): 6 | w, h = img.size 7 | if w / h == target_w / target_h: 8 | return img, (0, 0) 9 | 10 | if w < h or w * (target_h / target_w) < h: 11 | new_w = int(h * (target_w / target_h)) 12 | new_img = Image.new('RGB', (new_w, h), color=pad_value) 13 | new_img.paste(img, (int((new_w - w) // 2), 0)) 14 | return new_img, (new_w - w, 0) 15 | else: 16 | new_h = int(w * (target_h / target_w)) 17 | new_img = Image.new('RGB', (w, new_h), color=pad_value) 18 | new_img.paste(img, (0, int((new_h - h) // 2))) 19 | return new_img, (0, new_h - h) 20 | 21 | 22 | def pad_array_KAR(arr, target_h, target_w, pad_value=np.array([[0]])): 23 | h, w = arr.shape 24 | if w / h == target_w / target_h: 25 | return arr, (0, 0) 26 | 27 | if w < h or w * (target_h / target_w) < h: 28 | new_w = int(h * (target_w / target_h)) 29 | new_arr = np.ones((h, new_w)) * pad_value 30 | new_arr[:, int((new_w - w) // 2):int((new_w - w) // 2)+w] = arr 31 | return new_arr, (0, new_w - w) 32 | else: 33 | new_h = int(w * (target_h / target_w)) 34 | new_arr = np.ones((new_h, w)) * pad_value 35 | new_arr[int((new_h - h) // 2):int((new_h - h) // 2) + h, :] = arr 36 | return new_arr, (new_h - h, 0) 37 | -------------------------------------------------------------------------------- /results/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunwj/scanpath/01a685d35d5228c22c57cf5e86d9a9e9ba1cbe3b/results/.DS_Store -------------------------------------------------------------------------------- /sample_images/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunwj/scanpath/01a685d35d5228c22c57cf5e86d9a9e9ba1cbe3b/sample_images/.DS_Store -------------------------------------------------------------------------------- /sample_images/img1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunwj/scanpath/01a685d35d5228c22c57cf5e86d9a9e9ba1cbe3b/sample_images/img1.jpg -------------------------------------------------------------------------------- /sample_images/img2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunwj/scanpath/01a685d35d5228c22c57cf5e86d9a9e9ba1cbe3b/sample_images/img2.jpg -------------------------------------------------------------------------------- /sample_images/img3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunwj/scanpath/01a685d35d5228c22c57cf5e86d9a9e9ba1cbe3b/sample_images/img3.jpg -------------------------------------------------------------------------------- /sample_images/sem1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunwj/scanpath/01a685d35d5228c22c57cf5e86d9a9e9ba1cbe3b/sample_images/sem1.npy -------------------------------------------------------------------------------- /sample_images/sem2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunwj/scanpath/01a685d35d5228c22c57cf5e86d9a9e9ba1cbe3b/sample_images/sem2.npy -------------------------------------------------------------------------------- /sample_images/sem3.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunwj/scanpath/01a685d35d5228c22c57cf5e86d9a9e9ba1cbe3b/sample_images/sem3.npy -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.io import loadmat 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | from bisect import bisect_left 7 | 8 | 9 | class GlobalMeanPool(nn.Module): 10 | def __init__(self, keepdim=True): 11 | super(GlobalMeanPool, self).__init__() 12 | self.keepdim = keepdim 13 | 14 | def forward(self, x): 15 | b, c, h, w = x.size() 16 | x = x.reshape(b, c, -1) 17 | x = torch.mean(x, dim=2) 18 | if self.keepdim: 19 | x = x.reshape(b, c, 1, 1) 20 | 21 | return x 22 | 23 | 24 | class ChannelAttention(nn.Module): 25 | def __init__(self, channels, state_dim): 26 | super(ChannelAttention, self).__init__() 27 | self.pool = GlobalMeanPool() 28 | self.U = nn.Sequential( 29 | nn.Conv2d(channels + state_dim, channels, 1), 30 | nn.Sigmoid() 31 | ) 32 | 33 | def forward(self, x, roi_hidden, roi): 34 | u = self.U(torch.cat([x, roi_hidden], dim=1)) 35 | u_roi = self.pool(u * roi) 36 | u_ctx = self.pool(u * (1 - roi)) 37 | 38 | ca = 1 - u_roi * u_ctx 39 | b, c, h, w = ca.size() 40 | ca = ca.reshape(b, -1) 41 | ca = F.softmax(ca, dim=-1) 42 | ca = ca.reshape(b, c, h, w) 43 | 44 | return ca 45 | 46 | 47 | class WeightedGlobalMeanPool(nn.Module): 48 | def __init__(self, keepdim=True): 49 | super(WeightedGlobalMeanPool, self).__init__() 50 | self.keepdim = keepdim 51 | 52 | def forward(self, x, weight): 53 | b, c, h, w = x.size() 54 | 55 | y = x * weight 56 | y = y.reshape(b, c, -1) 57 | y = torch.mean(y, dim=-1) 58 | if self.keepdim: 59 | y = y.reshape(b, c, 1, 1) 60 | 61 | return y 62 | 63 | 64 | class Sampler2D: 65 | def __init__(self, pdf): 66 | self.conditional = np.cumsum(pdf, axis=0) 67 | self.marginal = np.cumsum(self.conditional[-1, :]) 68 | 69 | def sample(self): 70 | v = np.random.rand() 71 | ind_v = bisect_left(self.marginal, v) 72 | 73 | conditional = self.conditional[:, ind_v].flatten() 74 | conditional = conditional / conditional[-1] 75 | 76 | u = np.random.rand() 77 | ind_u = bisect_left(conditional, u) 78 | 79 | return ind_v, ind_u # x, y 80 | 81 | 82 | class Sampler1D: 83 | def __init__(self, pdf, bin_size): 84 | self.cdf = np.cumsum(pdf) 85 | assert self.cdf[-1] <= 1 86 | self.bin_size = bin_size 87 | 88 | def sample(self): 89 | u = np.random.rand() 90 | ind_u = bisect_left(self.cdf, u) 91 | ind_u_right = ind_u + 1 92 | portion = (u - self.cdf[ind_u]) / (self.cdf[ind_u_right] - self.cdf[ind_u] + 1e-8) 93 | 94 | return self.bin_size * ind_u_right - self.bin_size * portion 95 | 96 | 97 | class OculomotorBias: 98 | def __init__(self, ob_file, pixels_per_degree): 99 | data = loadmat(ob_file, squeeze_me=True, struct_as_record=False) 100 | self.ob = data['distributionSmooth'] 101 | self.pixels_per_degree = pixels_per_degree 102 | self.last_x = None 103 | self.last_y = None 104 | 105 | def set_last_fixation(self, x, y): 106 | self.last_x = x 107 | self.last_y = y 108 | 109 | def prob(self, x, y, update=False): 110 | dx = x - self.last_x 111 | dy = y - self.last_y 112 | 113 | if dx == 0: 114 | if dy < 0: 115 | ang = 3 * np.pi / 2 116 | else: 117 | ang = np.pi / 2 118 | elif dx > 0: 119 | if dy >= 0: 120 | ang = np.arctan(dy / dx) 121 | else: 122 | ang = 2 * np.pi - np.arctan(-dy / dx) 123 | else: 124 | if dy < 0: 125 | ang = np.pi + np.arctan(-dy / dx) 126 | else: 127 | ang = np.pi - np.arctan(dy / dx) 128 | 129 | ang = int(ang / np.pi * 180) 130 | amp = int(np.sqrt(dx ** 2 + dy ** 2) / self.pixels_per_degree * 4) 131 | amp = np.clip(amp, 0, 79) 132 | 133 | if update: 134 | self.set_last_fixation(x, y) 135 | 136 | return self.ob[int(amp), int(ang)] 137 | 138 | 139 | class ROIGenerator: 140 | def __init__(self, img_w, img_h, radius): 141 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 142 | cy, cx = np.meshgrid(np.arange(img_h), np.arange(img_w)) 143 | 144 | self.cx = torch.from_numpy(cx.T).float().to(device) 145 | self.cy = torch.from_numpy(cy.T).float().to(device) 146 | self.radius = torch.tensor(radius).float().to(device) 147 | 148 | def generate_roi(self, x, y): 149 | e2 = (self.cx - x) ** 2 + (self.cy - y) ** 2 150 | roi = torch.exp(-e2 / (2 * self.radius ** 2)) 151 | roi[roi < 0.1] = 0 152 | return roi 153 | 154 | 155 | def _gaussian_kernel(size, size_y=None): 156 | size = int(size) 157 | 158 | if not size_y: 159 | size_y = size 160 | else: 161 | size_y = int(size_y) 162 | 163 | x, y = np.mgrid[-size: size + 1, -size_y: size_y + 1] 164 | g = np.exp(-(x ** 2 / float(size) + y ** 2 / float(size_y))) 165 | 166 | return g / g.sum() 167 | 168 | 169 | def _LoG_kernel(size, sigma): 170 | x, y = np.mgrid[-size: size + 1, -size: size + 1] 171 | g = (x ** 2 + y ** 2 - 2 * sigma ** 2) / (4 * sigma ** 2) * np.exp(-(x ** 2 + y ** 2) / (2 * sigma ** 2)) 172 | 173 | return g 174 | 175 | 176 | class GaussianFilter(nn.Module): 177 | def __init__(self, input_channels, gaussian_ksize=3): 178 | super(GaussianFilter, self).__init__() 179 | 180 | self.input_channels = input_channels 181 | fgk_size = gaussian_ksize * 2 + 1 182 | 183 | gaussian_kernel = _gaussian_kernel(gaussian_ksize) 184 | gaussian_kernel = np.broadcast_to(gaussian_kernel, (self.input_channels, fgk_size, fgk_size)) 185 | gaussian_kernel = nn.Parameter(torch.from_numpy(gaussian_kernel).float().unsqueeze(1)) 186 | 187 | self.conv_gaussian = nn.Sequential( 188 | nn.ReflectionPad2d(gaussian_ksize), 189 | nn.Conv2d(self.input_channels, self.input_channels, kernel_size=fgk_size, stride=1, bias=False, 190 | groups=self.input_channels) 191 | ) 192 | 193 | self.conv_gaussian[1].weight = gaussian_kernel 194 | 195 | for p in self.parameters(): 196 | p.requires_grad = False 197 | 198 | def forward(self, x): 199 | output = self.conv_gaussian(x) 200 | 201 | return output 202 | -------------------------------------------------------------------------------- /vis.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | import matplotlib.pyplot as plt 3 | 4 | # COLOURS 5 | # all colours are from the Tango colourmap, see: 6 | # http://tango.freedesktop.org/Tango_Icon_Theme_Guidelines#Color_Palette 7 | COLORS = {"butter": ['#fce94f', 8 | '#edd400', 9 | '#c4a000'], 10 | "orange": ['#fcaf3e', 11 | '#f57900', 12 | '#ce5c00'], 13 | "chocolate": ['#e9b96e', 14 | '#c17d11', 15 | '#8f5902'], 16 | "chameleon": ['#8ae234', 17 | '#73d216', 18 | '#4e9a06'], 19 | "skyblue": ['#729fcf', 20 | '#3465a4', 21 | '#204a87'], 22 | "plum": ['#ad7fa8', 23 | '#75507b', 24 | '#5c3566'], 25 | "scarletred": ['#ef2929', 26 | '#cc0000', 27 | '#a40000'], 28 | "aluminium": ['#eeeeec', 29 | '#d3d7cf', 30 | '#babdb6', 31 | '#888a85', 32 | '#555753', 33 | '#2e3436'], 34 | } 35 | 36 | FONT = {'family': 'Cabin', 'size': 15} 37 | matplotlib.rc('font', **FONT) 38 | 39 | 40 | def draw_scanpath(fix_x, fix_y, fix_d, alpha=1, invert_y=False, ydim=None): 41 | if fix_d is None: 42 | fix_d = 1 43 | if invert_y: 44 | if ydim is None: 45 | raise RuntimeError('ydim must be provided') 46 | fix_y = ydim - 1 - fix_y 47 | 48 | for i in range(1, len(fix_x)): 49 | plt.arrow(fix_x[i - 1], fix_y[i - 1], fix_x[i] - fix_x[i - 1], fix_y[i] - fix_y[i - 1], alpha=alpha, 50 | fc=COLORS['chameleon'][0], ec=COLORS['chameleon'][0], fill=True, shape='full', width=3, head_width=0, 51 | head_starts_at_zero=False, overhang=0) 52 | 53 | for i in range(len(fix_x)): 54 | if i == 0: 55 | plt.plot(fix_x[i], fix_y[i], marker='o', ms=fix_d[i] / 10, mfc=COLORS['skyblue'][0], mec='black', alpha=0.7) 56 | elif i == len(fix_x) - 1: 57 | plt.plot(fix_x[i], fix_y[i], marker='o', ms=fix_d[i] / 10, mfc=COLORS['scarletred'][0], mec='black', alpha=0.7) 58 | else: 59 | plt.plot(fix_x[i], fix_y[i], marker='o', ms=fix_d[i] / 10, mfc=COLORS['aluminium'][0], mec='black', alpha=0.7) 60 | 61 | for i in range(len(fix_x)): 62 | plt.text(fix_x[i]-4, fix_y[i]+1, str(i + 1), color='black', ha='left', va='center', 63 | multialignment='center', alpha=alpha) 64 | -------------------------------------------------------------------------------- /visual_module/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunwj/scanpath/01a685d35d5228c22c57cf5e86d9a9e9ba1cbe3b/visual_module/.DS_Store -------------------------------------------------------------------------------- /visual_module/vgg.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.utils.model_zoo as model_zoo 3 | import math 4 | from collections import OrderedDict 5 | 6 | 7 | __all__ = ['VGG', 'vgg16', 'vgg19'] 8 | 9 | model_urls = { 10 | 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', 11 | 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth' 12 | } 13 | 14 | cfg = { 15 | 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'D_2', 512, 512, 512], 16 | 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512] 17 | } 18 | 19 | 20 | class VGG(nn.Module): 21 | """ 22 | VGG model with only feature layers 23 | """ 24 | def __init__(self, features): 25 | super(VGG, self).__init__() 26 | self.features = features 27 | self._initialize_weights() 28 | 29 | def forward(self, x): 30 | x = self.features(x) 31 | return x 32 | 33 | def _initialize_weights(self): 34 | for m in self.modules(): 35 | if isinstance(m, nn.Conv2d): 36 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 37 | m.weight.data.normal_(0, math.sqrt(2. / n)) 38 | if m.bias is not None: 39 | m.bias.data.zero_() 40 | elif isinstance(m, nn.BatchNorm2d): 41 | m.weight.data.fill_(1) 42 | m.bias.data.zero_() 43 | 44 | 45 | def make_layers(cfg): 46 | layers = [] 47 | in_channels = 3 48 | padding = 1 49 | dilation = 1 50 | idx = 0 51 | for v in cfg: 52 | if v == 'M': 53 | layers.append((str(idx), nn.MaxPool2d(kernel_size=2, stride=2))) 54 | idx += 1 55 | else: 56 | conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=padding, dilation=dilation) 57 | layers.append((str(idx), conv2d)) 58 | idx += 1 59 | layers.append((str(idx), nn.ReLU(inplace=True))) 60 | idx += 1 61 | in_channels = v 62 | 63 | return nn.Sequential(OrderedDict(layers)) 64 | 65 | 66 | def vgg16(pretrained=False): 67 | """ 68 | dilated vgg16 model with 8 times downscale 69 | :param pretrained: load parameters pretrained on ImageNet 70 | :return: dilated vgg16 feature layers before fully connected layer 71 | """ 72 | model = VGG(make_layers(cfg['D'])) 73 | 74 | if pretrained: 75 | params = model_zoo.load_url(model_urls['vgg16']) 76 | params = OrderedDict([(key, item) for (key, item) in params.items() if 'features' in key]) 77 | model.load_state_dict(params) 78 | 79 | return model 80 | 81 | 82 | def vgg19(pretrained=False): 83 | """ 84 | dilated vgg19 model with 8 times downscale 85 | :param pretrained: load parameters pretrained on ImageNet 86 | :return: dilated vgg16 feature layers before fully connected layer 87 | """ 88 | model = VGG(make_layers(cfg['E'])) 89 | 90 | if pretrained: 91 | params = model_zoo.load_url(model_urls['vgg19']) 92 | params = OrderedDict([(key, item) for (key, item) in params.items() if 'features' in key]) 93 | model.load_state_dict(params) 94 | 95 | return model 96 | -------------------------------------------------------------------------------- /visual_module/visual_features.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from .vgg import vgg16, vgg19 5 | 6 | 7 | class VGG(nn.Module): 8 | def __init__(self, model='vgg16', fine_tune=True): 9 | super(VGG, self).__init__() 10 | 11 | backend = vgg16(pretrained=False) if model == 'vgg16' else vgg19(pretrained=False) 12 | # backend.features[-1] = nn.Tanh() 13 | features = list(backend.features.children()) 14 | 15 | if model == 'vgg16': 16 | self.front = nn.Sequential(*features[:24]) 17 | self.back = nn.Sequential(*features[24:]) 18 | else: 19 | self.front = nn.Sequential(*features[:28]) 20 | self.back = nn.Sequential(*features[28:]) 21 | 22 | self.fine_tune = fine_tune 23 | 24 | for p in self.front.parameters(): 25 | p.requires_grad_(False) 26 | 27 | if not self.fine_tune: 28 | for p in self.back.parameters(): 29 | p.requires_grad_(False) 30 | 31 | def forward(self, x): 32 | with torch.no_grad(): 33 | out = self.front(x) 34 | 35 | if self.fine_tune: 36 | out = self.back(out) 37 | else: 38 | with torch.no_grad(): 39 | out = self.back(out) 40 | return out 41 | 42 | 43 | class FeatureFusion(nn.Module): 44 | def __init__(self, sem_channels): 45 | super(FeatureFusion, self).__init__() 46 | 47 | self.sem_channels = sem_channels 48 | self.fusion = nn.Sequential( 49 | nn.Conv2d(512 + sem_channels, 512, 1), 50 | nn.ReLU(inplace=True), 51 | nn.Conv2d(512, 512, 1), 52 | nn.Tanh() 53 | ) 54 | 55 | def forward(self, img_feature, sem_info): 56 | if self.sem_channels == 0: 57 | return img_feature 58 | else: 59 | return self.fusion(torch.cat([img_feature, sem_info], dim=1)) 60 | --------------------------------------------------------------------------------