├── .gitignore ├── LICENSE ├── README.md ├── config.py ├── data ├── D.mat ├── D3_M100_lam0.05.mat ├── D_conv.mat ├── W.npy ├── test_images │ ├── Monarch.tif │ ├── Parrots.tif │ ├── barbara.tif │ ├── boats.tif │ ├── cameraman.tif │ ├── couple.tif │ ├── fingerprint.tif │ ├── flinstones.tif │ ├── foreman.tif │ ├── house.tif │ ├── lena256.tif │ └── peppers256.tif └── xtest_n500_p10.npy ├── experiments └── m250_n500_k0.0_p0.1_sinf │ └── prob.npz ├── main.py ├── matlabs ├── CalculateW.m └── CalculateW_conv.m ├── models ├── ALISTA.py ├── ALISTA_conv.py ├── ALISTA_robust.py ├── AtoW_grad.py ├── LAMP.py ├── LIHT.py ├── LIHT_cs.py ├── LISTA.py ├── LISTA_base.py ├── LISTA_cp.py ├── LISTA_cp_conv.py ├── LISTA_cpss.py ├── LISTA_cpss_conv.py ├── LISTA_cpss_cs.py ├── LISTA_cpss_robust.py ├── LISTA_cs.py ├── LISTA_ss.py ├── LISTA_ss_cs.py ├── TiLISTA.py └── __init__.py └── utils ├── README.md ├── __init__.py ├── config.py ├── cs.py ├── data.py ├── main.py ├── matlabs ├── CalculateW.m └── CalculateW_conv.m ├── models ├── ALISTA.py ├── ALISTA_conv.py ├── ALISTA_robust.py ├── AtoW_grad.py ├── LAMP.py ├── LIHT.py ├── LIHT_cs.py ├── LISTA.py ├── LISTA_base.py ├── LISTA_cp.py ├── LISTA_cp_conv.py ├── LISTA_cpss.py ├── LISTA_cpss_conv.py ├── LISTA_cpss_cs.py ├── LISTA_cpss_robust.py ├── LISTA_cs.py ├── LISTA_ss.py ├── LISTA_ss_cs.py ├── TiLISTA.py └── __init__.py ├── prob.py ├── prob_conv.py ├── tf.py └── train.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.ipynb 2 | *.pyc 3 | *.npy 4 | *.npz 5 | *.mat 6 | *.png 7 | *.jpg 8 | *.eps 9 | *.sbatch 10 | *.output 11 | *.zip 12 | out.* 13 | README 14 | conv_dicts/ 15 | algorithms/ 16 | */__pycache__/ 17 | .idea/ 18 | logs/ 19 | data/ 20 | experiments/ 21 | mat_weights/ 22 | results/ 23 | deprecated/ 24 | 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 TAMU-VITA 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /data/D.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/data/D.mat -------------------------------------------------------------------------------- /data/D3_M100_lam0.05.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/data/D3_M100_lam0.05.mat -------------------------------------------------------------------------------- /data/D_conv.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/data/D_conv.mat -------------------------------------------------------------------------------- /data/W.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/data/W.npy -------------------------------------------------------------------------------- /data/test_images/Monarch.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/data/test_images/Monarch.tif -------------------------------------------------------------------------------- /data/test_images/Parrots.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/data/test_images/Parrots.tif -------------------------------------------------------------------------------- /data/test_images/barbara.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/data/test_images/barbara.tif -------------------------------------------------------------------------------- /data/test_images/boats.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/data/test_images/boats.tif -------------------------------------------------------------------------------- /data/test_images/cameraman.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/data/test_images/cameraman.tif -------------------------------------------------------------------------------- /data/test_images/couple.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/data/test_images/couple.tif -------------------------------------------------------------------------------- /data/test_images/fingerprint.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/data/test_images/fingerprint.tif -------------------------------------------------------------------------------- /data/test_images/flinstones.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/data/test_images/flinstones.tif -------------------------------------------------------------------------------- /data/test_images/foreman.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/data/test_images/foreman.tif -------------------------------------------------------------------------------- /data/test_images/house.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/data/test_images/house.tif -------------------------------------------------------------------------------- /data/test_images/lena256.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/data/test_images/lena256.tif -------------------------------------------------------------------------------- /data/test_images/peppers256.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/data/test_images/peppers256.tif -------------------------------------------------------------------------------- /data/xtest_n500_p10.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/data/xtest_n500_p10.npy -------------------------------------------------------------------------------- /experiments/m250_n500_k0.0_p0.1_sinf/prob.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/experiments/m250_n500_k0.0_p0.1_sinf/prob.npz -------------------------------------------------------------------------------- /matlabs/CalculateW.m: -------------------------------------------------------------------------------- 1 | %% A script to calculate the dictionary with minimal coherence with D: 2 | % min_W \|W^T D\|^2 subject to diag(W^T D) = 1 3 | % This is an implement of Algorithm 1 in Appendix E.1 of(Liu et al, 2019). 4 | % Just run this script without any arguments. 5 | 6 | % Author: Jialin Liu, UCLA math department (danny19921123@gmail.com) 7 | % Last Modified: 2019-2-15 8 | 9 | %% Note: if the algorithm diverges, then try a smaller step size: eta. 10 | 11 | %% Script starts. 12 | clear; 13 | 14 | % load dictionary D 15 | load('./D.mat','D'); 16 | [m,n] = size(D); 17 | 18 | % Initialization 19 | W = D; 20 | f = func(D,D); 21 | 22 | % Step size 23 | eta = 0.1; 24 | 25 | % Main iteration 26 | fprintf('Calculation Starts...\n'); 27 | for t = 1: 1000 28 | 29 | % calculate residual and gradient 30 | res = D' * W - eye(n); 31 | gra = D * res; 32 | 33 | % gradient descent 34 | W_next = W - eta * gra; 35 | 36 | % projection 37 | W_next = proj(W_next,D); 38 | 39 | % calculate objective function value 40 | f_next = func(W_next,D); 41 | 42 | % stopping condition 43 | if abs(f-f_next)/f < 1e-12, break; end 44 | 45 | % update 46 | W = W_next; 47 | f = f_next; 48 | 49 | % report function values 50 | if mod(t,50) == 0, fprintf('t: %d\t, func: %f\n', t, f); end 51 | end 52 | 53 | % save to file 54 | save('W.mat','W'); 55 | fprintf('Calculation ends. Results are saved in W.mat.\n'); 56 | 57 | % visualization 58 | visualization(D,W); 59 | 60 | 61 | %% functions 62 | function f = func(W,D) 63 | % calculate function values 64 | n = size(D,2); 65 | res = D' * W - eye(n); 66 | Q = ones(n,n)+eye(n)*(-1); 67 | res = res .* sqrt(Q); 68 | f = sum(sum(res.*res)); 69 | end 70 | 71 | function W_next = proj(W,D) 72 | % conduct projection 73 | aw = diag(D'*W); 74 | aw = repmat(aw',[size(D,1), 1]); 75 | W_next = W + (1-aw).*D; 76 | end 77 | 78 | function visualization(D,W) 79 | % function for visualizing the coherences between A and W 80 | n = size(D,2); 81 | 82 | res = D' * W - eye(n); 83 | res0 = D' * D - eye(n); 84 | 85 | figure ('Units', 'pixels', 'Position', [300 300 800 275]) ; 86 | 87 | subplot(1,2,1); 88 | histogram(res(~eye(n)),'BinWidth',1e-2); 89 | hold on; 90 | histogram(res0(~eye(n)),'BinWidth',1e-2); 91 | title('off-diagonal'); 92 | legend('W','A'); 93 | hold off; 94 | 95 | subplot(1,2,2); 96 | histogram(res(logical(eye(n))),'BinWidth',1e-5); 97 | hold on; 98 | histogram(res0(logical(eye(n))),'BinWidth',1e-5); 99 | hold off; 100 | title('diagonal'); 101 | 102 | end 103 | -------------------------------------------------------------------------------- /matlabs/CalculateW_conv.m: -------------------------------------------------------------------------------- 1 | %% A script to calculate convolutional kernels with minimal coherence: 2 | % min \|W_conv^T D_conv\|^2 subject to diag(W_conv^T D_conv) = 1 3 | % This is an implement of Algorithm 2 in Appendix E.2 of(Liu et al, 2019). 4 | % Just run this script without any arguments. 5 | 6 | % Author: Jialin Liu, UCLA math department (danny19921123@gmail.com) 7 | % Last Modified: 2019-2-15 8 | 9 | %% Note: if the algorithm diverges, then try a smaller step size: eta. 10 | 11 | %% Script starts. 12 | clear; 13 | 14 | % Load convolutional kernels 15 | load('./D_conv.mat','D'); 16 | 17 | % Get the dimensions 18 | Ds = size(D,1); 19 | M = size(D,3); 20 | N = 2*Ds - 1; % Due to Theorem 3 in our paper 21 | 22 | % Initialization 23 | Df = fft2(D,N,N); 24 | Df = reshape(Df, [N,N,1,M]); 25 | Dft = reshape(Df, [N,N,M,1]); 26 | Dfh = reshape(conj(Df), [N,N,M,1]); 27 | W = reshape(D,[Ds,Ds,1,M]); 28 | Wf = Df; 29 | 30 | % Step size in the optimization 31 | eta = 0.002; % step size 32 | 33 | % Main iterations 34 | fprintf('Calculation Starts...\n'); 35 | for t = 1:500 36 | 37 | % calculate residuals and function values 38 | res = bsxfun(@times, Dfh, Wf); 39 | f = norm(res(:))^2; 40 | if mod(t,50)==0, fprintf('t: %d\t f: %.3f\n',t,f); end 41 | 42 | % calculate gradient 43 | gra = bsxfun(@times, Dft, res); 44 | gra = sum(gra, 3); 45 | 46 | % gradient descent in the fourier domain 47 | Wf = Wf - eta * gra; 48 | 49 | % back to the spacial domain and do projection 50 | W = ifft2(Wf, 'symmetric'); 51 | W = reshape(W, [N,N,M]); 52 | W = W(1:Ds,1:Ds,:); 53 | W = proj(W,D); 54 | 55 | % calculate FFT for the next step 56 | Wf = fft2(W,N,N); 57 | Wf = reshape(Wf, [N,N,1,M]); 58 | end 59 | 60 | % save to file 61 | save('W_conv.mat','W'); 62 | fprintf('Calculation ends. Results are saved in W_conv.mat.\n'); 63 | 64 | % Visualization 65 | % Please download SPORCO: http://brendt.wohlberg.net/software/SPORCO/ 66 | % And copy "util/imdisp.m" and "util/tiledict.m" to the current folder 67 | % Then uncomment the following two lines. You can get the visualization. 68 | 69 | % figure; 70 | % imdisp(tiledict(W)); 71 | 72 | 73 | %% Functions 74 | function W_out = proj(W_in,D) 75 | % projection of the dictionary on "diag(W^TD)=1" 76 | M = size(D,3); 77 | N = size(D,1); 78 | A = zeros(N*N,M); 79 | W = zeros(N*N,M); 80 | for ii = 1:M 81 | A(:,ii) = reshape(D(:,:,ii),[N*N,1]); 82 | W(:,ii) = reshape(W_in(:,:,ii),[N*N,1]); 83 | end 84 | aw = diag(A'*W); 85 | aw = repmat(aw',[size(A,1), 1]); 86 | W_next = W + (1-aw).*A; 87 | W_out = zeros(N,N,M); 88 | for ii = 1:M 89 | W_out(:,:,ii) = reshape(W_next(:,ii),[N,N]); 90 | end 91 | end 92 | -------------------------------------------------------------------------------- /models/ALISTA.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : ALISTA.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | date : 2019-02-21 9 | 10 | Implementation of ALISTA. 11 | """ 12 | 13 | import numpy as np 14 | import tensorflow as tf 15 | 16 | from utils.tf import shrink_ss, is_tensor 17 | from models.LISTA_base import LISTA_base 18 | 19 | 20 | class ALISTA(LISTA_base): 21 | 22 | """ 23 | Implementation of deep neural network model. 24 | """ 25 | 26 | def __init__(self, A, T, lam, W, percent, max_percent, coord, scope): 27 | """ 28 | :prob: : Instance of Problem class, describing problem settings. 29 | :T : Number of layers (depth) of this LISTA model. 30 | :lam : Initial value of thresholds of shrinkage functions. 31 | :untied : Whether weights are shared within layers. 32 | """ 33 | self._A = A.astype(np.float32) 34 | self._W = W 35 | self._T = T 36 | self._p = percent 37 | self._maxp = max_percent 38 | self._lam = lam 39 | self._M = self._A.shape[0] 40 | self._N = self._A.shape[1] 41 | 42 | self._scale = 1.001 * np.linalg.norm(A, ord=2)**2 43 | self._theta = (self._lam / self._scale).astype(np.float32) 44 | if coord: 45 | self._theta = np.ones((self._N, 1), dtype=np.float32) * self._theta 46 | 47 | self._ps = [(t+1) * self._p for t in range(self._T)] 48 | self._ps = np.clip(self._ps, 0.0, self._maxp) 49 | 50 | self._coord = coord 51 | self._scope = scope 52 | 53 | """ Set up layers.""" 54 | self.setup_layers() 55 | 56 | 57 | def setup_layers(self): 58 | """ Set up layers of ALISTA. 59 | """ 60 | alphas_ = [] # step sizes 61 | thetas_ = [] # thresholds 62 | 63 | with tf.variable_scope(self._scope, reuse=False) as vs: 64 | # constant 65 | self._kA_ = tf.constant(value=self._A, dtype=tf.float32) 66 | if not is_tensor(self._W): 67 | self._W_ = tf.constant(value=self._W, dtype=tf.float32) 68 | else: 69 | self._W_ = self._W 70 | self._Wt_ = tf.transpose(self._W_, perm=[1,0]) 71 | 72 | for t in range(self._T): 73 | alphas_.append(tf.get_variable(name="alpha_%d"%(t+1), 74 | dtype=tf.float32, 75 | initializer=1.0)) 76 | thetas_.append(tf.get_variable(name="theta_%d"%(t+1), 77 | dtype=tf.float32, 78 | initializer=self._theta)) 79 | 80 | # Collection of all trainable variables in the model layer by layer. 81 | # We name it as `vars_in_layer` because we will use it in the manner: 82 | # vars_in_layer [t] 83 | self.vars_in_layer = list(zip(alphas_, thetas_)) 84 | 85 | 86 | def inference(self, y_, x0_=None): 87 | xhs_ = [] # collection of the regressed sparse codes 88 | 89 | if x0_ is None: 90 | batch_size = tf.shape(y_)[-1] 91 | xh_ = tf.zeros(shape=(self._N, batch_size), dtype=tf.float32) 92 | else: 93 | xh_ = x0_ 94 | xhs_.append(xh_) 95 | 96 | with tf.variable_scope(self._scope, reuse=True) as vs: 97 | for t in range(self._T): 98 | alpha_, theta_ = self.vars_in_layer[t] 99 | percent = self._ps[t] 100 | 101 | res_ = y_ - tf.matmul(self._kA_, xh_) 102 | zh_ = xh_ + alpha_ * tf.matmul(self._Wt_, res_) 103 | xh_ = shrink_ss(zh_, theta_, percent) 104 | xhs_.append(xh_) 105 | 106 | return xhs_ 107 | 108 | 109 | -------------------------------------------------------------------------------- /models/ALISTA_robust.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : ALISTA_robust.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | date : 2019-02-21 9 | 10 | Implementation of ALISTA_robust, where the model will take both encoding model A 11 | and weight W as inputs. 12 | """ 13 | 14 | import numpy as np 15 | import tensorflow as tf 16 | 17 | from utils.tf import shrink_ss, is_tensor 18 | from models.LISTA_base import LISTA_base 19 | 20 | 21 | class ALISTA_robust(LISTA_base): 22 | 23 | """ 24 | Implementation of deep neural network model. 25 | """ 26 | 27 | def __init__(self, M, N, T, percent, max_percent, coord, scope): 28 | """ 29 | :prob: : Instance of Problem class, describing problem settings. 30 | :T : Number of layers (depth) of this LISTA model. 31 | :lam : Initial value of thresholds of shrinkage functions. 32 | :untied : Whether weights are shared within layers. 33 | """ 34 | self._M = M 35 | self._N = N 36 | self._T = T 37 | self._p = percent 38 | self._maxp = max_percent 39 | 40 | self._ps = [(t+1) * self._p for t in range(self._T)] 41 | self._ps = np.clip(self._ps, 0.0, self._maxp) 42 | 43 | self._coord = coord 44 | self._scope = scope 45 | 46 | """ Set up layers.""" 47 | self.setup_layers() 48 | 49 | 50 | def setup_layers(self): 51 | """ Set up layers of ALISTA. 52 | """ 53 | alphas_ = [] # step sizes 54 | thetas_ = [] # thresholds 55 | 56 | theta_shape = (self._n, 1) if self._coord else () 57 | 58 | with tf.variable_scope(self._scope, reuse=False) as vs: 59 | for t in range(self._T): 60 | alphas_.append(tf.get_variable(name="alpha_%d"%(t+1), 61 | dtype=tf.float32, 62 | initializer=1.0)) 63 | thetas_.append(tf.get_variable(name="theta_%d"%(t+1), 64 | shape=theta_shape, 65 | dtype=tf.float32)) 66 | 67 | # Collection of all trainable variables in the model layer by layer. 68 | # We name it as `vars_in_layer` because we will use it in the manner: 69 | # vars_in_layer [t] 70 | self.vars_in_layer = list(zip(alphas_, thetas_)) 71 | 72 | 73 | def inference(self, y_, A_, W_, x0_=None): 74 | assert A_.shape == W_.shape 75 | if len(A_.shape) > 2: 76 | return self.batch_inference(y_, A_, W_, x0_=None) 77 | 78 | xhs_ = [] # collection of the regressed sparse codes 79 | 80 | if x0_ is None: 81 | batch_size = tf.shape(y_)[-1] 82 | xh_ = tf.zeros(shape=(self._N, batch_size), dtype=tf.float32) 83 | else: 84 | xh_ = x0_ 85 | xhs_.append(xh_) 86 | 87 | Wt_ = tf.transpose(W_) 88 | with tf.variable_scope(self._scope, reuse=True) as vs: 89 | for t in range(self._T): 90 | alpha_, theta_ = self.vars_in_layer[t] 91 | percent = self._ps[t] 92 | 93 | res_ = y_ - tf.matmul(A_, xh_) 94 | zh_ = xh_ + alpha_ * tf.matmul(Wt_, res_) 95 | xh_ = shrink_ss(zh_, theta_, percent) 96 | xhs_.append(xh_) 97 | 98 | return xhs_ 99 | 100 | def batch_inference(self, ys_, As_, Ws_, x0_=None): 101 | """ 102 | Batch inference. Iterate over ys_, As_ and Wts_. 103 | The first dimension of list_xhs_ stands for the time/iteration in the 104 | model. list_xhs_ [k] is the stacked outputs of all (y_, A_, Wt_) at the 105 | step/iteration k. 106 | """ 107 | # print(ys_.shape) 108 | # print(As_.shape) 109 | # print(Ws_.shape) 110 | list_xhs_ = [[] for i in range(self._T + 1)] 111 | 112 | # iterate over ys_, As_ and Wts_ 113 | batch_size = ys_.shape.as_list()[0] 114 | for i in range(batch_size): 115 | xhs_ = self.inference(ys_[i], As_[i], Ws_[i], x0_) 116 | # append xhs_[t] to list_xhs_[t] for all t 117 | for t, xh_ in enumerate(xhs_): 118 | list_xhs_[t].append(xh_) 119 | 120 | # stacking 121 | stacked_list_xhs_ = list(map(tf.stack, list_xhs_)) 122 | 123 | return stacked_list_xhs_ 124 | 125 | 126 | -------------------------------------------------------------------------------- /models/AtoW_grad.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : AtoW_grad.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | date : 2019-02-20 9 | """ 10 | 11 | import numpy as np 12 | import tensorflow as tf 13 | import utils.train 14 | 15 | from utils.tf import get_subgradient_func, bmxbm, mxbm 16 | 17 | class AtoW_grad(object): 18 | 19 | """Docstring for AtoW_grad. """ 20 | 21 | def __init__(self, m, n, T, Binit, eta, loss, Q, scope): 22 | """TODO: to be defined1. 23 | 24 | :T: TODO 25 | :loss: TODO 26 | 27 | """ 28 | self._m = m 29 | self._n = n 30 | self._Binit = Binit 31 | self._T = T 32 | self._loss = loss 33 | self._eta = eta 34 | self._Q = Q 35 | self._scope = scope 36 | 37 | # subgradient function 38 | self._subgradient_func = get_subgradient_func(loss) 39 | 40 | # setup layers 41 | self.setup_layers (scope) 42 | 43 | def setup_layers(self, scope): 44 | """TODO: Docstring for setup_layers. 45 | :returns: TODO 46 | 47 | """ 48 | with tf.variable_scope (scope, reuse=False) as vs: 49 | # B initialization 50 | if isinstance(self._Binit, np.ndarray): 51 | Binit = (self._eta * self._Binit).astype(np.float32) 52 | self._Binit_ = tf.constant(value=Binit, 53 | dtype=tf.float32, 54 | name='Binit') 55 | elif Binit == 'uniform': 56 | self._Binit_ = tf.random_uniform_initializer(-0.01, 0.01, 57 | dtype=tf.float32) 58 | elif Binit == 'normal': 59 | self._Binit_ = tf.random_normal_initializer(0.0, 0.01, 60 | dtype=tf.float32) 61 | 62 | # weights 63 | for i in range (self._T): 64 | tf.get_variable (name='B_%d'%(i+1), 65 | dtype=tf.float32, 66 | initializer=self._Binit_) 67 | 68 | # Q matrix in loss and subgradient 69 | if self._Q is None: 70 | self._Q_ = None 71 | else: 72 | self._Q_ = tf.constant (value=self._Q, dtype=tf.float32, name='Q') 73 | 74 | # identity 75 | eye = np.eye (self._n) 76 | self._eye_ = tf.constant (value=eye, 77 | dtype=tf.float32, 78 | name='eye') 79 | 80 | def inference(self, A_): 81 | """TODO: Docstring for function. 82 | 83 | :A_: A tensor or placeholder with shape (batchsize, m, n) 84 | :returns: TODO 85 | 86 | """ 87 | At_ = tf.transpose (A_, [0,2,1]) 88 | W_ = A_ 89 | Q_ = self._Q_ 90 | with tf.variable_scope (self._scope, reuse=True) as vs: 91 | for i in range (self._T): 92 | Z_ = bmxbm (At_, W_, batch_first=True) - self._eye_ 93 | dF_ = self._subgradient_func (Z_, Q_) 94 | B_ = tf.get_variable ('B_%d'%(i+1)) 95 | W_ = W_ - mxbm (B_, dF_) 96 | 97 | return W_ 98 | 99 | def save_trainable_variables (self , sess , savefn): 100 | """ 101 | Save trainable variables in the model to npz file with current value of each 102 | variable in tf.trainable_variables(). 103 | 104 | :sess: Tensorflow session. 105 | :savefn: File name of saved file. 106 | 107 | """ 108 | state = getattr (self , 'state' , {}) 109 | utils.train.save_trainable_variables( 110 | sess, savefn, self._scope, **state ) 111 | 112 | def load_trainable_variables (self, sess, savefn): 113 | """ 114 | Load trainable variables from saved file. 115 | 116 | :sess: TODO 117 | :savefn: TODO 118 | :returns: TODO 119 | 120 | """ 121 | self.state = utils.train.load_trainable_variables(sess, savefn) 122 | 123 | 124 | -------------------------------------------------------------------------------- /models/LAMP.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LAMP.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | last_modified: 2018-10-15 9 | 10 | Implementation of Learned AMP model. 11 | """ 12 | 13 | import numpy as np 14 | import tensorflow as tf 15 | import utils.train 16 | 17 | from utils.tf import shrink_lamp 18 | from models.LISTA_base import LISTA_base 19 | 20 | class LAMP (LISTA_base): 21 | 22 | """ 23 | Implementation of Learned AMP model. 24 | """ 25 | 26 | def __init__(self, A, T, lam, untied, coord, scope): 27 | """ 28 | :A : Instance of Problem class, describing problem settings. 29 | :T : Number of layers (depth) of this LISTA model. 30 | :lam : Initial value of thresholds of shrinkage functions. 31 | :untied : Whether weights are shared within layers. 32 | :coord : 33 | :scope : 34 | """ 35 | self._A = A.astype (np.float32) 36 | self._T = T 37 | self._M = self._A.shape [0] 38 | self._N = self._A.shape [1] 39 | 40 | self._lam = lam 41 | if coord: 42 | self._lam = np.ones ((self._N, 1), dtype=np.float32) * self._lam 43 | 44 | self._scale = 1.001 * np.linalg.norm (A, ord=2)**2 45 | 46 | self._untied = untied 47 | self._coord = coord 48 | self._scope = scope 49 | 50 | """ Set up layers.""" 51 | self.setup_layers() 52 | 53 | 54 | def setup_layers(self): 55 | """ 56 | Implementation of LISTA model proposed by LeCun in 2010. 57 | 58 | :prob: Problem setting. 59 | :T: Number of layers in LISTA. 60 | :returns: 61 | :layers: List of tuples ( name, xh_, var_list ) 62 | :name: description of layers. 63 | :xh: estimation of sparse code at current layer. 64 | :var_list: list of variables to be trained seperately. 65 | 66 | """ 67 | Bs_ = [] 68 | lams_ = [] 69 | 70 | B = (np.transpose (self._A) / self._scale).astype (np.float32) 71 | 72 | with tf.variable_scope (self._scope, reuse=False) as vs: 73 | # constant 74 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 75 | 76 | if not self._untied: # tied model 77 | Bs_.append (tf.get_variable (name='B', dtype=tf.float32, 78 | initializer=B)) 79 | Bs_ = Bs_ * self._T 80 | 81 | for t in range (self._T): 82 | lams_.append (tf.get_variable (name="lam_%d"%(t+1), 83 | dtype=tf.float32, 84 | initializer=self._lam)) 85 | if self._untied: # untied model 86 | Bs_.append (tf.get_variable (name='B_%d'%(t+1), 87 | dtype=tf.float32, 88 | initializer=B)) 89 | 90 | # Collection of all trainable variables in the model layer by layer. 91 | # We name it as `vars_in_layer` because we will use it in the manner: 92 | # vars_in_layer [t] 93 | self.vars_in_layer = list (zip (Bs_, lams_)) 94 | 95 | 96 | def inference (self, y_, x0_=None, return_recon=False): 97 | xhs_ = [] # collection of the regressed sparse codes 98 | if return_recon: 99 | yhs_ = [] # collection of the reconstructed signals 100 | 101 | if x0_ is None: 102 | batch_size = tf.shape (y_) [-1] 103 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 104 | else: 105 | xh_ = x0_ 106 | xhs_.append (xh_) 107 | 108 | OneOverM = tf.constant (float(1)/self._M, dtype=tf.float32) 109 | NOverM = tf.constant (float(self._N)/self._M, dtype=tf.float32) 110 | vt_ = tf.zeros_like (y_, dtype=tf.float32) 111 | 112 | with tf.variable_scope (self._scope, reuse=True) as vs: 113 | for t in range (self._T): 114 | B_, lam_ = self.vars_in_layer [t] 115 | 116 | yh_ = tf.matmul (self._kA_, xh_) 117 | if return_recon: 118 | yhs_.append (yh_) 119 | 120 | xhl0_ = tf.reduce_mean (tf.to_float (tf.abs (xh_)>0), axis=0) 121 | bt_ = xhl0_ * NOverM 122 | 123 | vt_ = y_ - yh_ + bt_ * vt_ 124 | rvar_ = tf.reduce_sum (tf.square (vt_), axis=0) * OneOverM 125 | rh_ = xh_ + tf.matmul(B_, vt_) 126 | 127 | xh_ = shrink_lamp (rh_, rvar_, lam_) 128 | xhs_.append (xh_) 129 | 130 | if return_recon: 131 | yhs_.append (tf.matmul (self._kA_, xh_)) 132 | return xhs_, yhs_ 133 | else: 134 | return xhs_ 135 | 136 | # B = A.T / (1.001 * la.norm(A,2)**2) 137 | # B_ = tf.Variable(B,dtype=tf.float32,name='B_1') 138 | # By_ = tf.matmul( B_ , self.prob.y_ ) 139 | 140 | # lam_ = tf.Variable(self.init_lam, dtype=tf.float32, name='lam_1') 141 | # rvar_ = tf.reduce_sum(tf.square(self.prob.y_), axis=0) * OneOverM 142 | # xh_, xhl0_ = eta( By_, rvar_ , lam_ ) 143 | # self.layers.append( ('LAMP T=1', xh_, (B_, lam_,) ) ) 144 | 145 | # self.xhs_ = [self.x0_, xh_] 146 | 147 | # vt_ = self.prob.y_ 148 | # for t in range(1, self.T): 149 | # bt_ = xhl0_ * NOverM 150 | # vt_ = self.prob.y_ - tf.matmul( self.prob.A_ , xh_ ) + bt_ * vt_ 151 | # rvar_ = tf.reduce_sum(tf.square(vt_), axis=0) * OneOverM 152 | # lam_ = tf.Variable(self.init_lam,name='lam_'+str(t+1)) 153 | 154 | # if self.untied: 155 | # B_ = tf.Variable(B, dtype=tf.float32, name='B_'+str(t+1)) 156 | # var_list = (B_, lam_, ) 157 | # else: 158 | # var_list = (lam_, ) 159 | 160 | # rh_ = xh_ + tf.matmul(B_, vt_) 161 | # xh_, xhl0_ = eta( rh_ , rvar_ , lam_ ) 162 | # self.xhs_.append (xh_) 163 | # self.layers.append( ('LAMP T={}'.format(t+1), xh_, var_list ) ) 164 | 165 | -------------------------------------------------------------------------------- /models/LIHT.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | LISTA_cpss_corrected.py 6 | author: xhchrn 7 | chernxh@tamu.edu 8 | date : 2018-10-21 9 | 10 | Implementation of Learned ISTA with support selection and coupled weights like 11 | in LAMP, without setting thresholds to zeros if they are minor to zero. 12 | """ 13 | 14 | import numpy as np 15 | import tensorflow as tf 16 | import utils.train 17 | 18 | from utils.tf import hard_shrink 19 | from models.LISTA_base import LISTA_base 20 | 21 | class LIHT (LISTA_base): 22 | 23 | """ 24 | Implementation of deep neural network model. 25 | """ 26 | 27 | def __init__ (self, A, T, lam, untied, coord, scope): 28 | """ 29 | :prob: : Instance of Problem class, describing problem settings. 30 | :T : Number of layers (depth) of this LISTA model. 31 | :lam : Initial value of thresholds of shrinkage functions. 32 | :untied : Whether weights are shared within layers. 33 | """ 34 | self._A = A.astype (np.float32) 35 | self._T = T 36 | self._lam = lam 37 | self._M = self._A.shape [0] 38 | self._N = self._A.shape [1] 39 | 40 | self._theta = np.sqrt (self._lam) 41 | if coord: 42 | self._theta = np.ones ((self._N, 1), dtype=np.float32) * self._theta 43 | 44 | self._untied = untied 45 | self._coord = coord 46 | self._scope = scope 47 | 48 | """ Set up layers.""" 49 | self.setup_layers() 50 | 51 | 52 | def setup_layers(self): 53 | """ 54 | Implementation of LISTA model proposed by LeCun in 2010. 55 | 56 | :prob: Problem setting. 57 | :T: Number of layers in LISTA. 58 | :returns: 59 | :layers: List of tuples ( name, xh_, var_list ) 60 | :name: description of layers. 61 | :xh: estimation of sparse code at current layer. 62 | :var_list: list of variables to be trained seperately. 63 | 64 | """ 65 | Bs_ = [] 66 | Ws_ = [] 67 | thetas_ = [] 68 | 69 | B = (np.transpose (self._A)).astype (np.float32) 70 | W = np.eye (self._N, dtype=np.float32) - np.matmul (B, self._A) 71 | 72 | with tf.variable_scope (self._scope, reuse=False) as vs: 73 | # constant 74 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 75 | 76 | Bs_.append (tf.get_variable (name='B', dtype=tf.float32, 77 | initializer=B)) 78 | Bs_ = Bs_ * self._T 79 | if not self._untied: # tied model 80 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 81 | initializer=W)) 82 | Ws_ = Ws_ * self._T 83 | 84 | for t in range (self._T): 85 | thetas_.append (tf.get_variable (name="theta_%d"%(t+1), 86 | dtype=tf.float32, 87 | initializer=self._theta)) 88 | if self._untied: # untied model 89 | Ws_.append (tf.get_variable (name="W_%d"%(t+1), 90 | dtype=tf.float32, 91 | initializer=W)) 92 | 93 | # Collection of all trainable variables in the model layer by layer. 94 | # We name it as `vars_in_layer` because we will use it in the manner: 95 | # vars_in_layer [t] 96 | self.vars_in_layer = list (zip (Bs_, Ws_, thetas_)) 97 | 98 | 99 | def inference (self, y_, x0_=None): 100 | xhs_ = [] # collection of the regressed sparse codes 101 | 102 | if x0_ is None: 103 | batch_size = tf.shape (y_) [-1] 104 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 105 | else: 106 | xh_ = x0_ 107 | xhs_.append (xh_) 108 | 109 | with tf.variable_scope (self._scope, reuse=True) as vs: 110 | for t in range (self._T): 111 | B_, W_, theta_ = self.vars_in_layer [t] 112 | 113 | By_ = tf.matmul (B_, y_) 114 | xh_ = hard_shrink (tf.matmul (W_, xh_) + By_, theta_) 115 | xhs_.append (xh_) 116 | 117 | return xhs_ 118 | 119 | -------------------------------------------------------------------------------- /models/LIHT_cs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LIHT_cs.py 6 | author: xhchrn 7 | email : chernxh@tamu.edu 8 | date : 2018-10-21 9 | 10 | Implementation of the original Learned ISTA for real world image compressive 11 | sensing experiments. 12 | """ 13 | 14 | import numpy as np 15 | import tensorflow as tf 16 | import utils.train 17 | 18 | from utils.tf import hard_shrink 19 | from models.LISTA_base import LISTA_base 20 | 21 | 22 | class LIHT_cs (LISTA_base): 23 | 24 | """ 25 | Implementation of deep neural network model. 26 | """ 27 | 28 | def __init__(self, Phi, D, T, lam, untied, coord, scope): 29 | """ 30 | :prob: : Instance of Problem class, describing problem settings. 31 | :T : Number of layers (depth) of this LISTA model. 32 | :lam : Initial value of thresholds of shrinkage functions. 33 | :untied : Whether weights are shared within layers. 34 | """ 35 | self._Phi = Phi.astype (np.float32) 36 | self._D = D.astype (np.float32) 37 | self._A = np.matmul (self._Phi, self._D) 38 | self._T = T 39 | self._lam = lam 40 | self._M = self._Phi.shape [0] 41 | self._F = self._Phi.shape [1] 42 | self._N = self._D.shape [1] 43 | 44 | self._theta = np.sqrt (self._lam) 45 | if coord: 46 | self._theta = np.ones ((self._N, 1), dtype=np.float32) * self._theta 47 | 48 | self._untied = untied 49 | self._coord = coord 50 | self._scope = scope 51 | 52 | """ Set up layers.""" 53 | self.setup_layers() 54 | 55 | 56 | def setup_layers(self): 57 | """ 58 | Implementation of LISTA model proposed by LeCun in 2010. 59 | 60 | :prob: Problem setting. 61 | :T: Number of layers in LISTA. 62 | :returns: 63 | :layers: List of tuples ( name, xh_, var_list ) 64 | :name: description of layers. 65 | :xh: estimation of sparse code at current layer. 66 | :var_list: list of variables to be trained seperately. 67 | 68 | """ 69 | Bs_ = [] 70 | Ws_ = [] 71 | thetas_ = [] 72 | 73 | B = (np.transpose (self._A) / self._scale).astype (np.float32) 74 | W = np.eye (self._N, dtype=np.float32) - np.matmul (B, self._A) 75 | 76 | with tf.variable_scope (self._scope, reuse=False) as vs: 77 | # constant 78 | self._kPhi_ = tf.constant (value=self._Phi, dtype=tf.float32) 79 | self._kD_ = tf.constant (value=self._D, dtype=tf.float32) 80 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 81 | 82 | # variables 83 | self._vD_ = tf.get_variable (name='D', dtype=tf.float32, 84 | initializer=self._D) 85 | Bs_.append (tf.get_variable (name='B', dtype=tf.float32, 86 | initializer=B)) 87 | Bs_ = Bs_ * self._T 88 | if not self._untied: # tied model 89 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 90 | initializer=W)) 91 | Ws_ = Ws_ * self._T 92 | 93 | for t in range (self._T): 94 | thetas_.append (tf.get_variable (name="theta_%d"%(t+1), 95 | dtype=tf.float32, 96 | initializer=self._theta)) 97 | if self._untied: # untied model 98 | Ws_.append (tf.get_variable (name="W_%d"%(t+1), 99 | dtype=tf.float32, 100 | initializer=W)) 101 | 102 | # Collection of all trainable variables in the model layer by layer. 103 | # We name it as `vars_in_layer` because we will use it in the manner: 104 | # vars_in_layer [t] 105 | # Note here the last element of `self.vars_in_layer` is 106 | # (W_, theta_, vD_) 107 | self.vars_in_layer = list (zip (Bs_ [:-1], Ws_ [:-1], thetas_ [:-1])) 108 | self.vars_in_layer.append ((Bs_ [-1], Ws_ [-1], thetas_ [-1], self._vD_, )) 109 | 110 | 111 | def inference (self, y_, x0_=None): 112 | xhs_ = [] # collection of the regressed sparse codes 113 | fhs_ = [] # collection of the regressed signals 114 | 115 | if x0_ is None: 116 | batch_size = tf.shape (y_) [-1] 117 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 118 | else: 119 | xh_ = x0_ 120 | xhs_.append (xh_) 121 | fhs_.append (tf.matmul (self._kD_, xh_)) 122 | 123 | with tf.variable_scope (self._scope, reuse=True) as vs: 124 | for t in range (self._T): 125 | if t < self._T - 1: 126 | B_, W_, theta_ = self.vars_in_layer [t] 127 | D_ = self._kD_ 128 | else: 129 | B_, W_, theta_, D_ = self.vars_in_layer [t] 130 | 131 | By_ = tf.matmul (B_, y_) 132 | xh_ = hard_shrink (tf.matmul (W_, xh_) + By_, theta_) 133 | xhs_.append (xh_) 134 | 135 | fhs_.append (tf.matmul (D_, xh_)) 136 | 137 | return xhs_, fhs_ 138 | 139 | -------------------------------------------------------------------------------- /models/LISTA.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LISTA.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | last_modified : 2018-10-21 9 | 10 | Implementation of Learned ISTA proposed by LeCun et al in 2010. 11 | """ 12 | 13 | import numpy as np 14 | import tensorflow as tf 15 | import utils.train 16 | 17 | from utils.tf import shrink 18 | from models.LISTA_base import LISTA_base 19 | 20 | class LISTA (LISTA_base): 21 | 22 | """ 23 | Implementation of LISTA model proposed by LeCun in 2010. 24 | """ 25 | 26 | def __init__(self, A, T, lam, untied, coord, scope): 27 | """ 28 | :A : Numpy ndarray. Dictionary/Sensing matrix. 29 | :T : Integer. Number of layers (depth) of this LISTA model. 30 | :lam : Float. The initial weight of l1 loss term in LASSO. 31 | :untied : Boolean. Flag of whether weights are shared within layers. 32 | :scope : String. Scope name of the model. 33 | """ 34 | self._A = A.astype (np.float32) 35 | self._T = T 36 | self._lam = lam 37 | self._M = self._A.shape [0] 38 | self._N = self._A.shape [1] 39 | 40 | self._scale = 1.001 * np.linalg.norm (A, ord=2)**2 41 | self._theta = (self._lam / self._scale).astype(np.float32) 42 | if coord: 43 | self._theta = np.ones ((self._N, 1), dtype=np.float32) * self._theta 44 | 45 | self._untied = untied 46 | self._coord = coord 47 | self._scope = scope 48 | 49 | """ Set up layers.""" 50 | self.setup_layers() 51 | 52 | 53 | def setup_layers(self): 54 | """ 55 | Implementation of LISTA model proposed by LeCun in 2010. 56 | 57 | :prob: Problem setting. 58 | :T: Number of layers in LISTA. 59 | :returns: 60 | :layers: List of tuples ( name, xh_, var_list ) 61 | :name: description of layers. 62 | :xh: estimation of sparse code at current layer. 63 | :var_list: list of variables to be trained seperately. 64 | 65 | """ 66 | Bs_ = [] 67 | Ws_ = [] 68 | thetas_ = [] 69 | 70 | B = (np.transpose (self._A) / self._scale).astype (np.float32) 71 | W = np.eye (self._N, dtype=np.float32) - np.matmul (B, self._A) 72 | 73 | with tf.variable_scope (self._scope, reuse=False) as vs: 74 | # constant 75 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 76 | 77 | Bs_.append (tf.get_variable (name='B', dtype=tf.float32, 78 | initializer=B)) 79 | Bs_ = Bs_ * self._T 80 | if not self._untied: # tied model 81 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 82 | initializer=W)) 83 | Ws_ = Ws_ * self._T 84 | 85 | for t in range (self._T): 86 | thetas_.append (tf.get_variable (name="theta_%d"%(t+1), 87 | dtype=tf.float32, 88 | initializer=self._theta)) 89 | if self._untied: # untied model 90 | Ws_.append (tf.get_variable (name="W_%d"%(t+1), 91 | dtype=tf.float32, 92 | initializer=W)) 93 | 94 | # Collection of all trainable variables in the model layer by layer. 95 | # We name it as `vars_in_layer` because we will use it in the manner: 96 | # vars_in_layer [t] 97 | self.vars_in_layer = list (zip (Bs_, Ws_, thetas_)) 98 | 99 | def inference (self, y_, x0_=None): 100 | xhs_ = [] # collection of the regressed sparse codes 101 | 102 | if x0_ is None: 103 | batch_size = tf.shape (y_) [-1] 104 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 105 | else: 106 | xh_ = x0_ 107 | xhs_.append (xh_) 108 | 109 | with tf.variable_scope (self._scope, reuse=True) as vs: 110 | for t in range (self._T): 111 | B_, W_, theta_ = self.vars_in_layer [t] 112 | 113 | By_ = tf.matmul (B_, y_) 114 | xh_ = shrink (tf.matmul (W_, xh_) + By_, theta_) 115 | xhs_.append (xh_) 116 | 117 | return xhs_ 118 | 119 | -------------------------------------------------------------------------------- /models/LISTA_base.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LISTA_base.py 6 | author: xhchrn 7 | email : chernxh@tamu.edu 8 | date : 2019-02-18 9 | 10 | A base class for all LISTA networks. 11 | """ 12 | 13 | import numpy as np 14 | import numpy.linalg as la 15 | import tensorflow as tf 16 | 17 | import utils.train 18 | 19 | class LISTA_base (object): 20 | 21 | """ 22 | Implementation of deep neural network model. 23 | """ 24 | 25 | def __init__ (self): 26 | pass 27 | 28 | def setup_layers (self): 29 | pass 30 | 31 | def inference (self): 32 | pass 33 | 34 | def save_trainable_variables (self , sess , savefn): 35 | """ 36 | Save trainable variables in the model to npz file with current value of each 37 | variable in tf.trainable_variables(). 38 | 39 | :sess: Tensorflow session. 40 | :savefn: File name of saved file. 41 | 42 | """ 43 | state = getattr (self , 'state' , {}) 44 | utils.train.save_trainable_variables( 45 | sess, savefn, self._scope, **state ) 46 | 47 | def load_trainable_variables (self, sess, savefn): 48 | """ 49 | Load trainable variables from saved file. 50 | 51 | :sess: TODO 52 | :savefn: TODO 53 | :returns: TODO 54 | 55 | """ 56 | self.state = utils.train.load_trainable_variables(sess, savefn) 57 | 58 | def do_training(self, sess, stages, savefn, scope, 59 | val_step, maxit, better_wait): 60 | """ 61 | Do training actually. Refer to utils/train.py. 62 | 63 | :sess : Tensorflow session, in which we will run the training. 64 | :stages : List of tuples. Training stages obtained via 65 | `utils.train.setup_training`. 66 | :savefn : String. Path where the trained model is saved. 67 | :batch_size : Integer. Training batch size. 68 | :val_step : Integer. How many steps between two validation. 69 | :maxit : Integer. Max number of iterations in each training stage. 70 | :better_wait: Integer. Jump to next stage if no better performance after 71 | certain # of iterations. 72 | 73 | """ 74 | self.state = utils.train.do_training( 75 | sess, stages, savefn, scope, val_step, maxit, better_wait) 76 | 77 | -------------------------------------------------------------------------------- /models/LISTA_cp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LISTA_cp.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | last_modified : 2018-10-21 9 | 10 | Implementation of Learned ISTA with weight coupling. 11 | """ 12 | 13 | import numpy as np 14 | import tensorflow as tf 15 | import utils.train 16 | 17 | from utils.tf import shrink_free 18 | from models.LISTA_base import LISTA_base 19 | 20 | class LISTA_cp (LISTA_base): 21 | 22 | """ 23 | Implementation of learned ISTA with weight coupling constraint. 24 | """ 25 | def __init__ (self, A, T, lam, untied, coord, scope): 26 | """ 27 | :prob: : Instance of Problem class, describing problem settings. 28 | :T : Number of layers (depth) of this LISTA model. 29 | :lam : Initial value of thresholds of shrinkage functions. 30 | :untied : Whether weights are shared within layers. 31 | """ 32 | self._A = A.astype (np.float32) 33 | self._T = T 34 | self._lam = lam 35 | self._M = self._A.shape [0] 36 | self._N = self._A.shape [1] 37 | 38 | self._scale = 1.001 * np.linalg.norm (A, ord=2)**2 39 | self._theta = (self._lam / self._scale).astype(np.float32) 40 | if coord: 41 | self._theta = np.ones ((self._N, 1), dtype=np.float32) * self._theta 42 | 43 | self._untied = untied 44 | self._coord = coord 45 | self._scope = scope 46 | 47 | """ Set up layers.""" 48 | self.setup_layers() 49 | 50 | 51 | def setup_layers(self): 52 | """ 53 | Implementation of LISTA model proposed by LeCun in 2010. 54 | 55 | :prob: Problem setting. 56 | :T: Number of layers in LISTA. 57 | :returns: 58 | :layers: List of tuples ( name, xh_, var_list ) 59 | :name: description of layers. 60 | :xh: estimation of sparse code at current layer. 61 | :var_list: list of variables to be trained seperately. 62 | 63 | """ 64 | Ws_ = [] 65 | thetas_ = [] 66 | 67 | W = (np.transpose (self._A) / self._scale).astype (np.float32) 68 | 69 | with tf.variable_scope (self._scope, reuse=False) as vs: 70 | # constant 71 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 72 | 73 | if not self._untied: # tied model 74 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 75 | initializer=W)) 76 | Ws_ = Ws_ * self._T 77 | 78 | for t in range (self._T): 79 | thetas_.append (tf.get_variable (name="theta_%d"%(t+1), 80 | dtype=tf.float32, 81 | initializer=self._theta)) 82 | if self._untied: # untied model 83 | Ws_.append (tf.get_variable (name="W_%d"%(t+1), 84 | dtype=tf.float32, 85 | initializer=W)) 86 | 87 | # Collection of all trainable variables in the model layer by layer. 88 | # We name it as `vars_in_layer` because we will use it in the manner: 89 | # vars_in_layer [t] 90 | self.vars_in_layer = list (zip (Ws_, thetas_)) 91 | 92 | 93 | def inference (self, y_, x0_=None): 94 | xhs_ = [] # collection of the regressed sparse codes 95 | 96 | if x0_ is None: 97 | batch_size = tf.shape (y_) [-1] 98 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 99 | else: 100 | xh_ = x0_ 101 | xhs_.append (xh_) 102 | 103 | with tf.variable_scope (self._scope, reuse=True) as vs: 104 | for t in range (self._T): 105 | W_, theta_ = self.vars_in_layer [t] 106 | 107 | res_ = y_ - tf.matmul (self._kA_, xh_) 108 | xh_ = shrink_free (xh_ + tf.matmul (W_, res_), theta_) 109 | xhs_.append (xh_) 110 | 111 | return xhs_ 112 | 113 | -------------------------------------------------------------------------------- /models/LISTA_cp_conv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LISTA_cp_conv.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | date : 2019-02-17 9 | """ 10 | 11 | import numpy as np 12 | import tensorflow as tf 13 | import utils.train 14 | 15 | from utils.tf import shrink_free 16 | from models.LISTA_base import LISTA_base 17 | 18 | class LISTA_cp_conv(LISTA_base): 19 | 20 | """ 21 | Implementation of convolutional learned ISTA for with weight coupling constraint. 22 | """ 23 | def __init__(self, filters, T, lam, alpha, untied, scope): 24 | """TODO: to be defined1. 25 | 26 | :T: TODO 27 | :filters: TODO 28 | :lam: TODO 29 | :alpha: TODO 30 | :untied: TODO 31 | :scope: TODO 32 | 33 | """ 34 | self._T = T 35 | self._fh = filters.shape [0] 36 | self._fw = filters.shape [1] 37 | self._fn = filters.shape [2] 38 | self._lam = lam 39 | self._alpha = alpha 40 | self._untied = untied 41 | self._scope = scope 42 | 43 | # default threshld 44 | self._theta = self._lam * self._alpha 45 | 46 | # get the transpose of filters 47 | tfilters = self._alpha * np.rot90 (filters, k=2, axes=(0,1)) 48 | # set the _fs and _ft by adding new axes to them 49 | # _fs convs feature maps into image: (fh, fw, fn, 1) 50 | # _ft convs residual (one image) into feature maps: (fh, fw, 1, fn) 51 | self._fs = filters.reshape ((self._fh, self._fw, self._fn, 1)) 52 | self._ft = tfilters.reshape ((self._fh, self._fw, 1, self._fn)) 53 | 54 | # set up layers 55 | self.setup_layers () 56 | 57 | 58 | def setup_layers (self): 59 | # lists that hold parameters in the network 60 | self._Ws_ = [] 61 | self._thetas_ = [] 62 | with tf.variable_scope (self._scope, reuse=False) as vs: 63 | # tf constant for filters 64 | self._fs_const_ = tf.constant (value=self._fs, 65 | dtype=tf.float32, name='fs') 66 | 67 | self._ft_const_ = tf.constant (value=self._ft, 68 | dtype=tf.float32, name='ft') 69 | 70 | if self._untied == False: 71 | # tied model 72 | self._Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 73 | initializer=self._ft_const_)) 74 | self._Ws_ = self._Ws_ * self._T 75 | 76 | for i in range (self._T): 77 | self._thetas_.append (tf.get_variable (name='theta_'+str(i+1), 78 | dtype=tf.float32, 79 | initializer=self._theta)) 80 | if self._untied == True: 81 | # untied model 82 | self._Ws_.append (tf.get_variable (name='W_'+str(i+1), 83 | dtype=tf.float32, 84 | initializer=self._ft_const_)) 85 | 86 | 87 | def inference(self, input_, init_feature_=None): 88 | """TODO: Docstring for inference. 89 | 90 | :input_: Batch of images of size (batch_size, h, w, channel=1). 91 | :init_feature_: Batch of feature maps to be updated of size 92 | (batch_size, h+fh-1, w+fw-1, channel=self._fn). 93 | None means starting from all zero feature maps. 94 | :returns: TODO 95 | 96 | """ 97 | # list of features estimated in each layer 98 | features_ = [] 99 | recons_ = [] 100 | 101 | with tf.variable_scope (self._scope, reuse=True) as vs: 102 | # set paddding const for residual padding 103 | ph, pw = self._fh - 1, self._fw - 1 104 | paddings_ = tf.constant ([[0, 0], [ph, ph], [pw, pw], [0,0]]) 105 | # NOTE: the [0, 0] padding here is for the batch_size axis 106 | 107 | if init_feature_ is None: 108 | shape = tf.shape (input_) 109 | batch_size = shape [0] 110 | h = shape [1] 111 | w = shape [2] 112 | feature_ = tf.zeros (shape=(batch_size, 113 | h + self._fh - 1, 114 | w + self._fw - 1, 115 | self._fn), 116 | dtype=tf.float32, name='x_0') 117 | else: 118 | feature_ = init_feature_ 119 | features_.append (feature_) 120 | 121 | for t in range (self._T): 122 | # conv layer to get the reconstructed image 123 | conv_ = tf.nn.conv2d (input=feature_, 124 | filter=self._fs_const_, 125 | strides=(1,1,1,1), 126 | padding='VALID', 127 | use_cudnn_on_gpu=True, 128 | data_format='NHWC', 129 | name='conv_%d' % (t+1)) 130 | recons_.append (conv_) 131 | 132 | residual_ = input_ - conv_ 133 | # residual padding from (bs, h, w, 1) to 134 | # (bs, h+2fh-2, w+2fw-2, 1) 135 | padded_res_ = tf.pad (residual_, paddings_, "REFLECT") 136 | 137 | # deconv to calcualte the gradients w.r.t. feature maps 138 | W_ = self._Ws_ [t] 139 | grad_ = tf.nn.conv2d(input=padded_res_, 140 | filter=W_, 141 | strides=(1,1,1,1), 142 | padding="VALID", 143 | use_cudnn_on_gpu=True, 144 | data_format='NHWC', 145 | name='deconv_%d' % (t+1)) 146 | 147 | # feature_ update 148 | feature_ = feature_ + grad_ 149 | 150 | # thresholding 151 | theta_ = self._thetas_ [t] 152 | feature_ = shrink_free(feature_, theta_) 153 | # append feature_ to feature list 154 | features_.append (feature_) 155 | 156 | conv_ = tf.nn.conv2d(input=feature_, 157 | filter=self._fs_const_, 158 | strides=(1,1,1,1), 159 | padding='VALID', 160 | use_cudnn_on_gpu=True, 161 | data_format='NHWC', 162 | name='conv_%d' % (t+1)) 163 | recons_.append(conv_) 164 | return features_, recons_ 165 | 166 | 167 | # def setup_training (self, input_, label_, input_val_, label_val_, 168 | # init_feature_, init_lr, decay_rate, lr_decay): 169 | # """TODO: Docstring for setup_training. 170 | 171 | # :input_: Tensorflow placeholder or tensor. Input of training set. 172 | # :label_: Tensorflow placeholder or tensor. Label for the sparse feature 173 | # maps of training set. If `loss_type` is `recon`, `label_` should be 174 | # `input_` in noiseless reconstruction or noisy image in denoising. 175 | # :input_val_: Tensorflow placeholder or tensor. Input of validation set. 176 | # :label_val_: Tensorflow placeholder or tensor. Label for the sparse 177 | # feature maps of validation set. If `loss_type` is `recon`, 178 | # `label_` should be `input_` in noiseless reconstruction or noisy 179 | # image in denoising. 180 | # :init_feature_: TensorFlow tensor. Initial estimation of feature maps. 181 | # :init_lr: TODO 182 | # :decay_rate: TODO 183 | # :lr_decay: TODO 184 | # :returns: 185 | # :training_stages: list of training stages 186 | 187 | # """ 188 | # # infer feature_, feature_val_ from input_, input_val_ 189 | # # predictions are the reconstructions 190 | # _, predicts_ = self.inference (input_, init_feature_, True) 191 | # _, predicts_val_ = self.inference (input_val_, init_feature_, True) 192 | # assert len (predicts_) == self._T + 1 193 | # assert len (predicts_val_) == self._T + 1 194 | # nmse_denom_ = tf.nn.l2_loss (label_) 195 | # nmse_denom_val_ = tf.nn.l2_loss (label_val_) 196 | 197 | # # start setting up training 198 | # training_stages = [] 199 | 200 | # lrs = [init_lr * decay for decay in lr_decay] 201 | 202 | # # setup self.lr_multiplier dictionary 203 | # # learning rate multipliers of each variables 204 | # lr_multiplier = dict() 205 | # for var in tf.trainable_variables(): 206 | # lr_multiplier[var.op.name] = 1.0 207 | 208 | # # initialize self.train_vars list 209 | # # variables which will be updated in next training stage 210 | # train_vars = [] 211 | 212 | # for t in range (self._T): 213 | # # layer information for training monitoring 214 | # layer_info = "{scope} T={time}".format (scope=self._scope, time=t+1) 215 | 216 | # # set up loss_ and nmse_ 217 | # loss_ = tf.nn.l2_loss (predicts_ [t+1] - label_) 218 | # nmse_ = loss_ / nmse_denom_ 219 | # loss_val_ = tf.nn.l2_loss (predicts_val_ [t+1] - label_val_) 220 | # nmse_val_ = loss_val_ / nmse_denom_val_ 221 | 222 | # W_ = self._Ws_ [t] 223 | # theta_ = self._thetas_ [t] 224 | 225 | # # train parameters in current layer with initial learning rate 226 | # if W_ not in train_vars: 227 | # var_list = (W_, theta_, ) 228 | # else: 229 | # var_list = (theta_, ) 230 | # op_ = tf.train.AdamOptimizer (init_lr).minimize (loss_, 231 | # var_list=var_list) 232 | # training_stages.append ((layer_info, loss_, nmse_, 233 | # loss_val_, nmse_val_, op_, var_list)) 234 | 235 | # for var in var_list: 236 | # train_vars.append (var) 237 | 238 | # # train all variables in current and former layers with decayed 239 | # # learning rate 240 | # for lr in lrs: 241 | # op_ = get_train_op (loss_, train_vars, lr, lr_multiplier) 242 | # training_stages.append ((layer_info + ' lr={}'.format (lr), 243 | # loss_, 244 | # nmse_, 245 | # loss_val_, 246 | # nmse_val_, 247 | # op_, 248 | # tuple (train_vars), )) 249 | 250 | # # decay learning rates for trained variables 251 | # for var in train_vars: 252 | # lr_multiplier [var.op.name] *= decay_rate 253 | 254 | # return training_stages 255 | 256 | 257 | -------------------------------------------------------------------------------- /models/LISTA_cpss.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LISTA_cpss.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | date : 2019-02-17 9 | 10 | Implementation of Learned ISTA with support selection and coupled weights. 11 | """ 12 | 13 | import numpy as np 14 | import tensorflow as tf 15 | import utils.train 16 | 17 | from utils.tf import shrink_ss 18 | from models.LISTA_base import LISTA_base 19 | 20 | 21 | class LISTA_cpss (LISTA_base): 22 | 23 | """ 24 | Implementation of deep neural network model. 25 | """ 26 | 27 | def __init__(self, A, T, lam, percent, max_percent, 28 | untied, coord, scope): 29 | """ 30 | :prob: : Instance of Problem class, describing problem settings. 31 | :T : Number of layers (depth) of this LISTA model. 32 | :lam : Initial value of thresholds of shrinkage functions. 33 | :untied : Whether weights are shared within layers. 34 | """ 35 | self._A = A.astype (np.float32) 36 | self._T = T 37 | self._p = percent 38 | self._maxp = max_percent 39 | self._lam = lam 40 | self._M = self._A.shape [0] 41 | self._N = self._A.shape [1] 42 | 43 | self._scale = 1.001 * np.linalg.norm (A, ord=2)**2 44 | self._theta = (self._lam / self._scale).astype(np.float32) 45 | if coord: 46 | self._theta = np.ones ((self._N, 1), dtype=np.float32) * self._theta 47 | 48 | self._ps = [(t+1) * self._p for t in range (self._T)] 49 | self._ps = np.clip (self._ps, 0.0, self._maxp) 50 | 51 | self._untied = untied 52 | self._coord = coord 53 | self._scope = scope 54 | 55 | """ Set up layers.""" 56 | self.setup_layers() 57 | 58 | 59 | def setup_layers(self): 60 | """ 61 | Implementation of LISTA model proposed by LeCun in 2010. 62 | 63 | :prob: Problem setting. 64 | :T: Number of layers in LISTA. 65 | :returns: 66 | :layers: List of tuples ( name, xh_, var_list ) 67 | :name: description of layers. 68 | :xh: estimation of sparse code at current layer. 69 | :var_list: list of variables to be trained seperately. 70 | 71 | """ 72 | Ws_ = [] 73 | thetas_ = [] 74 | 75 | W = (np.transpose (self._A) / self._scale).astype (np.float32) 76 | 77 | with tf.variable_scope (self._scope, reuse=False) as vs: 78 | # constant 79 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 80 | 81 | if not self._untied: # tied model 82 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 83 | initializer=W)) 84 | Ws_ = Ws_ * self._T 85 | 86 | for t in range (self._T): 87 | thetas_.append (tf.get_variable (name="theta_%d"%(t+1), 88 | dtype=tf.float32, 89 | initializer=self._theta)) 90 | if self._untied: # untied model 91 | Ws_.append (tf.get_variable (name="W_%d"%(t+1), 92 | dtype=tf.float32, 93 | initializer=W)) 94 | 95 | # Collection of all trainable variables in the model layer by layer. 96 | # We name it as `vars_in_layer` because we will use it in the manner: 97 | # vars_in_layer [t] 98 | self.vars_in_layer = list (zip (Ws_, thetas_)) 99 | 100 | 101 | def inference (self, y_, x0_=None): 102 | xhs_ = [] # collection of the regressed sparse codes 103 | 104 | if x0_ is None: 105 | batch_size = tf.shape (y_) [-1] 106 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 107 | else: 108 | xh_ = x0_ 109 | xhs_.append (xh_) 110 | 111 | with tf.variable_scope (self._scope, reuse=True) as vs: 112 | for t in range (self._T): 113 | W_, theta_ = self.vars_in_layer [t] 114 | percent = self._ps [t] 115 | 116 | res_ = y_ - tf.matmul (self._kA_, xh_) 117 | xh_ = shrink_ss (xh_ + tf.matmul (W_, res_), theta_, percent) 118 | xhs_.append (xh_) 119 | 120 | return xhs_ 121 | 122 | -------------------------------------------------------------------------------- /models/LISTA_cpss_cs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LISTA_cpss_cs.py 6 | author: xhchrn 7 | email : chernxh@tamu.edu 8 | date : 2018-10-21 9 | 10 | Implementation of Learned ISTA with support selection and coupled weights for 11 | real world image compressive sensing experiments. 12 | """ 13 | 14 | import numpy as np 15 | import tensorflow as tf 16 | import utils.train 17 | 18 | from utils.tf import shrink_ss 19 | from models.LISTA_base import LISTA_base 20 | 21 | 22 | class LISTA_cpss_cs (LISTA_base): 23 | 24 | """ 25 | Implementation of deep neural network model. 26 | """ 27 | 28 | def __init__(self, Phi, D, T, lam, percent, max_percent, 29 | untied, coord, scope): 30 | """ 31 | :prob: : Instance of Problem class, describing problem settings. 32 | :T : Number of layers (depth) of this LISTA model. 33 | :lam : Initial value of thresholds of shrinkage functions. 34 | :untied : Whether weights are shared within layers. 35 | """ 36 | self._Phi = Phi.astype (np.float32) 37 | self._D = D.astype (np.float32) 38 | self._A = np.matmul (self._Phi, self._D) 39 | self._T = T 40 | self._p = percent 41 | self._maxp = max_percent 42 | self._lam = lam 43 | self._M = self._Phi.shape [0] 44 | self._F = self._Phi.shape [1] 45 | self._N = self._D.shape [1] 46 | 47 | self._scale = 1.001 * np.linalg.norm (self._A, ord=2)**2 48 | self._theta = (self._lam / self._scale).astype(np.float32) 49 | if coord: 50 | self._theta = np.ones ((self._N, 1), dtype=np.float32) * self._theta 51 | 52 | self._ps = [(t+1) * self._p for t in range (self._T)] 53 | self._ps = np.clip (self._ps, 0.0, self._maxp) 54 | 55 | self._untied = untied 56 | self._coord = coord 57 | self._scope = scope 58 | 59 | """ Set up layers.""" 60 | self.setup_layers() 61 | 62 | 63 | def setup_layers(self): 64 | """ 65 | Implementation of LISTA model proposed by LeCun in 2010. 66 | 67 | :prob: Problem setting. 68 | :T: Number of layers in LISTA. 69 | :returns: 70 | :layers: List of tuples ( name, xh_, var_list ) 71 | :name: description of layers. 72 | :xh: estimation of sparse code at current layer. 73 | :var_list: list of variables to be trained seperately. 74 | 75 | """ 76 | Ws_ = [] 77 | thetas_ = [] 78 | 79 | W = (np.transpose (self._A) / self._scale).astype (np.float32) 80 | 81 | with tf.variable_scope (self._scope, reuse=False) as vs: 82 | # constant 83 | self._kPhi_ = tf.constant (value=self._Phi, dtype=tf.float32) 84 | self._kD_ = tf.constant (value=self._D, dtype=tf.float32) 85 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 86 | self._vD_ = tf.get_variable (name='D', dtype=tf.float32, 87 | initializer=self._D) 88 | 89 | if not self._untied: # tied model 90 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 91 | initializer=W)) 92 | Ws_ = Ws_ * self._T 93 | 94 | for t in range (self._T): 95 | thetas_.append (tf.get_variable (name="theta_%d"%(t+1), 96 | dtype=tf.float32, 97 | initializer=self._theta)) 98 | if self._untied: # untied model 99 | Ws_.append (tf.get_variable (name="W_%d"%(t+1), 100 | dtype=tf.float32, 101 | initializer=W)) 102 | 103 | # Collection of all trainable variables in the model layer by layer. 104 | # We name it as `vars_in_layer` because we will use it in the manner: 105 | # vars_in_layer [t] 106 | # Note here the last element of `self.vars_in_layer` is 107 | # (W_, theta_, vD_) 108 | self.vars_in_layer = list (zip (Ws_ [:-1], thetas_ [:-1])) 109 | self.vars_in_layer.append ((Ws_ [-1], thetas_ [-1], self._vD_, )) 110 | 111 | 112 | def inference (self, y_, x0_=None): 113 | xhs_ = [] # collection of the regressed sparse codes 114 | fhs_ = [] # collection of the regressed signals 115 | 116 | if x0_ is None: 117 | batch_size = tf.shape (y_) [-1] 118 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 119 | else: 120 | xh_ = x0_ 121 | xhs_.append (xh_) 122 | fhs_.append (tf.matmul (self._kD_, xh_)) 123 | 124 | with tf.variable_scope (self._scope, reuse=True) as vs: 125 | for t in range (self._T): 126 | if t < self._T - 1: 127 | W_, theta_ = self.vars_in_layer [t] 128 | D_ = self._kD_ 129 | else: 130 | W_, theta_, D_ = self.vars_in_layer [t] 131 | percent = self._ps [t] 132 | 133 | res_ = y_ - tf.matmul (self._kA_, xh_) 134 | xh_ = shrink_ss (xh_ + tf.matmul (W_, res_), theta_, percent) 135 | xhs_.append (xh_) 136 | 137 | fhs_.append (tf.matmul (D_, xh_)) 138 | 139 | return xhs_, fhs_ 140 | 141 | -------------------------------------------------------------------------------- /models/LISTA_cs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LISTA_cs.py 6 | author: xhchrn 7 | email : chernxh@tamu.edu 8 | date : 2018-10-21 9 | 10 | Implementation of the original Learned ISTA for real world image compressive 11 | sensing experiments. 12 | """ 13 | 14 | import numpy as np 15 | import tensorflow as tf 16 | import utils.train 17 | 18 | from utils.tf import shrink 19 | from models.LISTA_base import LISTA_base 20 | 21 | 22 | class LISTA_cs (LISTA_base): 23 | 24 | """ 25 | Implementation of deep neural network model. 26 | """ 27 | 28 | def __init__(self, Phi, D, T, lam, untied, coord, scope): 29 | """ 30 | :prob: : Instance of Problem class, describing problem settings. 31 | :T : Number of layers (depth) of this LISTA model. 32 | :lam : Initial value of thresholds of shrinkage functions. 33 | :untied : Whether weights are shared within layers. 34 | """ 35 | self._Phi = Phi.astype (np.float32) 36 | self._D = D.astype (np.float32) 37 | self._A = np.matmul (self._Phi, self._D) 38 | self._T = T 39 | self._lam = lam 40 | self._M = self._Phi.shape [0] 41 | self._F = self._Phi.shape [1] 42 | self._N = self._D.shape [1] 43 | 44 | self._scale = 1.001 * np.linalg.norm (self._A, ord=2)**2 45 | self._theta = (self._lam / self._scale).astype(np.float32) 46 | if coord: 47 | self._theta = np.ones ((self._N, 1), dtype=np.float32) * self._theta 48 | 49 | self._untied = untied 50 | self._coord = coord 51 | self._scope = scope 52 | 53 | """ Set up layers.""" 54 | self.setup_layers() 55 | 56 | 57 | def setup_layers(self): 58 | """ 59 | Implementation of LISTA model proposed by LeCun in 2010. 60 | 61 | :prob: Problem setting. 62 | :T: Number of layers in LISTA. 63 | :returns: 64 | :layers: List of tuples ( name, xh_, var_list ) 65 | :name: description of layers. 66 | :xh: estimation of sparse code at current layer. 67 | :var_list: list of variables to be trained seperately. 68 | 69 | """ 70 | Bs_ = [] 71 | Ws_ = [] 72 | thetas_ = [] 73 | 74 | B = (np.transpose (self._A) / self._scale).astype (np.float32) 75 | W = np.eye (self._N, dtype=np.float32) - np.matmul (B, self._A) 76 | 77 | with tf.variable_scope (self._scope, reuse=False) as vs: 78 | # constant 79 | self._kPhi_ = tf.constant (value=self._Phi, dtype=tf.float32) 80 | self._kD_ = tf.constant (value=self._D, dtype=tf.float32) 81 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 82 | 83 | # variables 84 | self._vD_ = tf.get_variable (name='D', dtype=tf.float32, 85 | initializer=self._D) 86 | Bs_.append (tf.get_variable (name='B', dtype=tf.float32, 87 | initializer=B)) 88 | Bs_ = Bs_ * self._T 89 | if not self._untied: # tied model 90 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 91 | initializer=W)) 92 | Ws_ = Ws_ * self._T 93 | 94 | for t in range (self._T): 95 | thetas_.append (tf.get_variable (name="theta_%d"%(t+1), 96 | dtype=tf.float32, 97 | initializer=self._theta)) 98 | if self._untied: # untied model 99 | Ws_.append (tf.get_variable (name="W_%d"%(t+1), 100 | dtype=tf.float32, 101 | initializer=W)) 102 | 103 | # Collection of all trainable variables in the model layer by layer. 104 | # We name it as `vars_in_layer` because we will use it in the manner: 105 | # vars_in_layer [t] 106 | # Note here the last element of `self.vars_in_layer` is 107 | # (W_, theta_, vD_) 108 | self.vars_in_layer = list (zip (Bs_ [:-1], Ws_ [:-1], thetas_ [:-1])) 109 | self.vars_in_layer.append ((Bs_ [-1], Ws_ [-1], thetas_ [-1], self._vD_, )) 110 | 111 | 112 | def inference (self, y_, x0_=None): 113 | xhs_ = [] # collection of the regressed sparse codes 114 | fhs_ = [] # collection of the regressed signals 115 | 116 | if x0_ is None: 117 | batch_size = tf.shape (y_) [-1] 118 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 119 | else: 120 | xh_ = x0_ 121 | xhs_.append (xh_) 122 | fhs_.append (tf.matmul (self._kD_, xh_)) 123 | 124 | with tf.variable_scope (self._scope, reuse=True) as vs: 125 | for t in range (self._T): 126 | if t < self._T - 1: 127 | B_, W_, theta_ = self.vars_in_layer [t] 128 | D_ = self._kD_ 129 | else: 130 | B_, W_, theta_, D_ = self.vars_in_layer [t] 131 | 132 | By_ = tf.matmul (B_, y_) 133 | xh_ = shrink (tf.matmul (W_, xh_) + By_, theta_) 134 | xhs_.append (xh_) 135 | 136 | fhs_.append (tf.matmul (D_, xh_)) 137 | 138 | return xhs_, fhs_ 139 | 140 | -------------------------------------------------------------------------------- /models/LISTA_ss.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LISTA_ss.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | last_modified : 2018-10-21 9 | 10 | Implementation of Learned ISTA with support selection technique. 11 | """ 12 | 13 | import numpy as np 14 | import tensorflow as tf 15 | import utils.train 16 | 17 | from utils.tf import shrink_ss 18 | from models.LISTA_base import LISTA_base 19 | 20 | 21 | class LISTA_ss (LISTA_base): 22 | 23 | """ 24 | Implementation of deep neural network model. 25 | """ 26 | 27 | def __init__(self, A, T, lam, percent, max_percent, 28 | untied, coord, scope): 29 | """ 30 | :prob: : Instance of Problem class, describing problem settings. 31 | :T : Number of layers (depth) of this LISTA model. 32 | :lam : Initial value of thresholds of shrinkage functions. 33 | :untied : Whether weights are shared within layers. 34 | """ 35 | self._A = A.astype (np.float32) 36 | self._T = T 37 | self._p = percent 38 | self._maxp = max_percent 39 | self._lam = lam 40 | self._M = self._A.shape [0] 41 | self._N = self._A.shape [1] 42 | 43 | self._scale = 1.001 * np.linalg.norm (A, ord=2)**2 44 | self._theta = (self._lam / self._scale).astype(np.float32) 45 | if coord: 46 | self._theta = np.ones ((self._N, 1), dtype=np.float32) * self._theta 47 | 48 | self._ps = [(t+1) * self._p for t in range (self._T)] 49 | self._ps = np.clip (self._ps, 0.0, self._maxp) 50 | 51 | self._untied = untied 52 | self._coord = coord 53 | self._scope = scope 54 | 55 | """ Set up layers.""" 56 | self.setup_layers() 57 | 58 | 59 | def setup_layers(self): 60 | """ 61 | Implementation of LISTA model proposed by LeCun in 2010. 62 | 63 | :prob: Problem setting. 64 | :T: Number of layers in LISTA. 65 | :returns: 66 | :layers: List of tuples ( name, xh_, var_list ) 67 | :name: description of layers. 68 | :xh: estimation of sparse code at current layer. 69 | :var_list: list of variables to be trained seperately. 70 | 71 | """ 72 | Bs_ = [] 73 | Ws_ = [] 74 | thetas_ = [] 75 | 76 | B = (np.transpose (self._A) / self._scale).astype (np.float32) 77 | W = np.eye (self._N, dtype=np.float32) - np.matmul (B, self._A) 78 | 79 | with tf.variable_scope (self._scope, reuse=False) as vs: 80 | # constant 81 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 82 | 83 | Bs_.append (tf.get_variable (name='B', dtype=tf.float32, 84 | initializer=B)) 85 | Bs_ = Bs_ * self._T 86 | if not self._untied: # tied model 87 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 88 | initializer=W)) 89 | Ws_ = Ws_ * self._T 90 | 91 | for t in range (self._T): 92 | thetas_.append (tf.get_variable (name="theta_%d"%(t+1), 93 | dtype=tf.float32, 94 | initializer=self._theta)) 95 | if self._untied: # untied model 96 | Ws_.append (tf.get_variable (name="W_%d"%(t+1), 97 | dtype=tf.float32, 98 | initializer=W)) 99 | 100 | # Collection of all trainable variables in the model layer by layer. 101 | # We name it as `vars_in_layer` because we will use it in the manner: 102 | # vars_in_layer [t] 103 | self.vars_in_layer = list (zip (Bs_, Ws_, thetas_)) 104 | 105 | 106 | def inference (self, y_, x0_=None): 107 | xhs_ = [] # collection of the regressed sparse codes 108 | 109 | if x0_ is None: 110 | batch_size = tf.shape (y_) [-1] 111 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 112 | else: 113 | xh_ = x0_ 114 | xhs_.append (xh_) 115 | 116 | with tf.variable_scope (self._scope, reuse=True) as vs: 117 | for t in range (self._T): 118 | B_, W_, theta_ = self.vars_in_layer [t] 119 | percent = self._ps [t] 120 | 121 | By_ = tf.matmul (B_, y_) 122 | xh_ = shrink_ss (tf.matmul (W_, xh_) + By_, theta_, percent) 123 | xhs_.append (xh_) 124 | 125 | return xhs_ 126 | 127 | -------------------------------------------------------------------------------- /models/LISTA_ss_cs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LISTA_ss_cs.py 6 | author: xhchrn 7 | email : chernxh@tamu.edu 8 | date : 2018-10-25 9 | 10 | Implementation of Learned ISTA with only support selection real world image 11 | compressive sensing experiments. 12 | """ 13 | 14 | import numpy as np 15 | import tensorflow as tf 16 | import utils.train 17 | 18 | from utils.tf import shrink_ss 19 | from models.LISTA_base import LISTA_base 20 | 21 | 22 | class LISTA_ss_cs (LISTA_base): 23 | 24 | """ 25 | Implementation of deep neural network model. 26 | """ 27 | 28 | def __init__(self, Phi, D, T, lam, percent, max_percent, 29 | untied, coord, scope): 30 | """ 31 | :prob: : Instance of Problem class, describing problem settings. 32 | :T : Number of layers (depth) of this LISTA model. 33 | :lam : Initial value of thresholds of shrinkage functions. 34 | :untied : Whether weights are shared within layers. 35 | """ 36 | self._Phi = Phi.astype (np.float32) 37 | self._D = D.astype (np.float32) 38 | self._A = np.matmul (self._Phi, self._D) 39 | self._T = T 40 | self._p = percent 41 | self._maxp = max_percent 42 | self._lam = lam 43 | self._M = self._Phi.shape [0] 44 | self._F = self._Phi.shape [1] 45 | self._N = self._D.shape [1] 46 | 47 | self._scale = 1.001 * np.linalg.norm (self._A, ord=2)**2 48 | self._theta = (self._lam / self._scale).astype(np.float32) 49 | if coord: 50 | self._theta = np.ones ((self._N, 1), dtype=np.float32) * self._theta 51 | 52 | self._ps = [(t+1) * self._p for t in range (self._T)] 53 | self._ps = np.clip (self._ps, 0.0, self._maxp) 54 | 55 | self._untied = untied 56 | self._coord = coord 57 | self._scope = scope 58 | 59 | """ Set up layers.""" 60 | self.setup_layers() 61 | 62 | 63 | def setup_layers(self): 64 | """ 65 | Implementation of LISTA model proposed by LeCun in 2010. 66 | 67 | :prob: Problem setting. 68 | :T: Number of layers in LISTA. 69 | :returns: 70 | :layers: List of tuples ( name, xh_, var_list ) 71 | :name: description of layers. 72 | :xh: estimation of sparse code at current layer. 73 | :var_list: list of variables to be trained seperately. 74 | 75 | """ 76 | Bs_ = [] 77 | Ws_ = [] 78 | thetas_ = [] 79 | 80 | B = (np.transpose (self._A) / self._scale).astype (np.float32) 81 | W = np.eye (self._N, dtype=np.float32) - np.matmul (B, self._A) 82 | 83 | with tf.variable_scope (self._scope, reuse=False) as vs: 84 | # constant 85 | self._kPhi_ = tf.constant (value=self._Phi, dtype=tf.float32) 86 | self._kD_ = tf.constant (value=self._D, dtype=tf.float32) 87 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 88 | 89 | # variables 90 | self._vD_ = tf.get_variable (name='D', dtype=tf.float32, 91 | initializer=self._D) 92 | Bs_.append (tf.get_variable (name='B', dtype=tf.float32, 93 | initializer=B)) 94 | Bs_ = Bs_ * self._T 95 | if not self._untied: # tied model 96 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 97 | initializer=W)) 98 | Ws_ = Ws_ * self._T 99 | 100 | for t in range (self._T): 101 | thetas_.append (tf.get_variable (name="theta_%d"%(t+1), 102 | dtype=tf.float32, 103 | initializer=self._theta)) 104 | if self._untied: # untied model 105 | Ws_.append (tf.get_variable (name="W_%d"%(t+1), 106 | dtype=tf.float32, 107 | initializer=W)) 108 | 109 | # Collection of all trainable variables in the model layer by layer. 110 | # We name it as `vars_in_layer` because we will use it in the manner: 111 | # vars_in_layer [t] 112 | # Note here the last element of `self.vars_in_layer` is 113 | # (W_, theta_, vD_) 114 | self.vars_in_layer = list (zip (Bs_ [:-1], Ws_ [:-1], thetas_ [:-1])) 115 | self.vars_in_layer.append ((Bs_ [-1], Ws_ [-1], thetas_ [-1], self._vD_, )) 116 | 117 | 118 | def inference (self, y_, x0_=None): 119 | xhs_ = [] # collection of the regressed sparse codes 120 | fhs_ = [] # collection of the regressed signals 121 | 122 | if x0_ is None: 123 | batch_size = tf.shape (y_) [-1] 124 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 125 | else: 126 | xh_ = x0_ 127 | xhs_.append (xh_) 128 | fhs_.append (tf.matmul (self._kD_, xh_)) 129 | 130 | with tf.variable_scope (self._scope, reuse=True) as vs: 131 | for t in range (self._T): 132 | if t < self._T - 1: 133 | B_, W_, theta_ = self.vars_in_layer [t] 134 | D_ = self._kD_ 135 | else: 136 | B_, W_, theta_, D_ = self.vars_in_layer [t] 137 | percent = self._ps [t] 138 | 139 | By_ = tf.matmul (B_, y_) 140 | xh_ = shrink_ss (tf.matmul (W_, xh_) + By_, theta_, percent) 141 | xhs_.append (xh_) 142 | 143 | fhs_.append (tf.matmul (D_, xh_)) 144 | 145 | return xhs_, fhs_ 146 | 147 | -------------------------------------------------------------------------------- /models/TiLISTA.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : TiLISTA.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | date : 2019-02-17 9 | 10 | Implementation of TiLISTA --- LISTA with tied weight. 11 | """ 12 | 13 | import numpy as np 14 | import tensorflow as tf 15 | import utils.train 16 | 17 | from utils.tf import shrink_ss 18 | from models.LISTA_base import LISTA_base 19 | 20 | 21 | class TiLISTA(LISTA_base): 22 | 23 | """ 24 | Implementation of deep neural network model. 25 | """ 26 | 27 | def __init__(self, A, T, lam, percent, max_percent, coord, scope): 28 | """ 29 | :prob: : Instance of Problem class, describing problem settings. 30 | :T : Number of layers (depth) of this LISTA model. 31 | :lam : Initial value of thresholds of shrinkage functions. 32 | :untied : Whether weights are shared within layers. 33 | """ 34 | self._A = A.astype(np.float32) 35 | self._T = T 36 | self._p = percent 37 | self._maxp = max_percent 38 | self._lam = lam 39 | self._M = self._A.shape[0] 40 | self._N = self._A.shape[1] 41 | 42 | self._scale = 1.001 * np.linalg.norm(A, ord=2)**2 43 | self._theta = (self._lam / self._scale).astype(np.float32) 44 | if coord: 45 | self._theta = np.ones((self._N, 1), dtype=np.float32) * self._theta 46 | 47 | self._ps = [(t+1) * self._p for t in range(self._T)] 48 | self._ps = np.clip(self._ps, 0.0, self._maxp) 49 | 50 | self._coord = coord 51 | self._scope = scope 52 | 53 | """ Set up layers.""" 54 | self.setup_layers() 55 | 56 | 57 | def setup_layers(self): 58 | """ Set up layers of ALISTA. 59 | """ 60 | Ws_ = [] # weight 61 | alphas_ = [] # step sizes 62 | thetas_ = [] # thresholds 63 | 64 | W = (np.transpose (self._A) / self._scale).astype (np.float32) 65 | 66 | with tf.variable_scope(self._scope, reuse=False) as vs: 67 | # constant 68 | self._kA_ = tf.constant(value=self._A, dtype=tf.float32) 69 | # tied weight in TiLISTA 70 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 71 | initializer=W)) 72 | Ws_ = Ws_ * self._T 73 | 74 | for t in range(self._T): 75 | alphas_.append(tf.get_variable(name="alpha_%d"%(t+1), 76 | dtype=tf.float32, 77 | initializer=1.0)) 78 | thetas_.append(tf.get_variable(name="theta_%d"%(t+1), 79 | dtype=tf.float32, 80 | initializer=self._theta)) 81 | 82 | # Collection of all trainable variables in the model layer by layer. 83 | # We name it as `vars_in_layer` because we will use it in the manner: 84 | # vars_in_layer [t] 85 | self.vars_in_layer = list(zip(Ws_, alphas_, thetas_)) 86 | 87 | 88 | def inference(self, y_, x0_=None): 89 | xhs_ = [] # collection of the regressed sparse codes 90 | 91 | if x0_ is None: 92 | batch_size = tf.shape(y_)[-1] 93 | xh_ = tf.zeros(shape=(self._N, batch_size), dtype=tf.float32) 94 | else: 95 | xh_ = x0_ 96 | xhs_.append(xh_) 97 | 98 | with tf.variable_scope(self._scope, reuse=True) as vs: 99 | for t in range(self._T): 100 | W_, alpha_, theta_ = self.vars_in_layer[t] 101 | percent = self._ps[t] 102 | 103 | res_ = y_ - tf.matmul(self._kA_, xh_) 104 | zh_ = xh_ + alpha_ * tf.matmul(W_, res_) 105 | xh_ = shrink_ss(zh_, theta_, percent) 106 | xhs_.append(xh_) 107 | 108 | return xhs_ 109 | 110 | 111 | -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/models/__init__.py -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/utils/__init__.py -------------------------------------------------------------------------------- /utils/cs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : cs.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | date : 2019-02-18 9 | 10 | Utility functions for natural images compressive sensing. 11 | """ 12 | 13 | import numpy as np 14 | from PIL import Image 15 | 16 | def imread_CS_py(im_fn, patch_size, stride): 17 | im_org = np.array (Image.open (im_fn), dtype='float32') 18 | H, W = im_org.shape 19 | num_rpatch = (H - patch_size + stride - 1) // stride + 1 20 | num_cpatch = (W - patch_size + stride - 1) // stride + 1 21 | H_pad = patch_size + (num_rpatch - 1) * stride 22 | W_pad = patch_size + (num_cpatch - 1) * stride 23 | im_pad = np.zeros ((H_pad, W_pad), dtype=np.float32) 24 | im_pad [:H, :W] = im_org 25 | 26 | return im_org, H, W, im_pad, H_pad, W_pad 27 | 28 | def img2col_py(im_pad, patch_size, stride): 29 | [H, W] = im_pad.shape 30 | num_rpatch = (H - patch_size) / stride + 1 31 | num_cpatch = (W - patch_size) / stride + 1 32 | num_patches = int (num_rpatch * num_cpatch) 33 | img_col = np.zeros ([patch_size**2, num_patches]) 34 | count = 0 35 | for x in range(0, H-patch_size+1, stride): 36 | for y in range(0, W-patch_size+1, stride): 37 | img_col[:, count] = im_pad[x:x+patch_size, y:y+patch_size].reshape([-1]) 38 | count = count + 1 39 | return img_col 40 | 41 | def col2im_CS_py(X_col, patch_size, stride, H, W, H_pad, W_pad): 42 | X0_rec = np.zeros ((H_pad, W_pad)) 43 | counts = np.zeros ((H_pad, W_pad)) 44 | k = 0 45 | for x in range(0, H_pad-patch_size+1, stride): 46 | for y in range(0, W_pad-patch_size+1, stride): 47 | X0_rec[x:x+patch_size, y:y+patch_size] += X_col[:,k].\ 48 | reshape([patch_size, patch_size]) 49 | counts[x:x+patch_size, y:y+patch_size] += 1 50 | k = k + 1 51 | X0_rec /= counts 52 | X_rec = X0_rec[:H, :W] 53 | return X_rec 54 | 55 | -------------------------------------------------------------------------------- /utils/data.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : data.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | date : 2019-02-18 9 | 10 | Utility methods for data handling for natural images denoising and compressive 11 | sensing experiments. 12 | """ 13 | 14 | import os 15 | import sys 16 | import glob 17 | import argparse 18 | import numpy as np 19 | import tensorflow as tf 20 | 21 | sys.path.append(os.getcwd()) 22 | 23 | from PIL import Image 24 | from tqdm import tqdm 25 | from utils.prob import load_problem 26 | from sklearn.feature_extraction.image import extract_patches_2d 27 | tqdm.monitor_interval = 0 28 | 29 | def _int64_feature(value): 30 | return tf.train.Feature (int64_list=tf.train.Int64List (value=[value])) 31 | 32 | def _bytes_feature(value): 33 | return tf.train.Feature (bytes_list=tf.train.BytesList (value=[value])) 34 | 35 | def dir2tfrecords_cs (data_dir, out_path, Phi, patch_size, patches_per_image, suffix): 36 | Phi = Phi.astype (np.float32) 37 | if isinstance (patch_size, int): 38 | patch_size = (16,16) 39 | 40 | if not out_path.endswith(".tfrecords"): 41 | out_path += ".tfrecords" 42 | writer = tf.python_io.TFRecordWriter (out_path) 43 | for fn in tqdm (glob.glob (os.path.join (data_dir, "*." + suffix))) : 44 | """Read images (and convert to grayscale).""" 45 | im = Image.open (fn) 46 | if im.mode == 'RGB': 47 | im = im.convert ('L') 48 | im = np.asarray (im) 49 | 50 | """Extract patches.""" 51 | patches = extract_patches_2d (im, patch_size) 52 | perm = np.random.permutation (len (patches)) 53 | patches = patches [perm [:patches_per_image]] 54 | 55 | """Vectorize patches.""" 56 | fs = patches.reshape (len (patches), -1) 57 | 58 | """Demean and normalize.""" 59 | fs = fs - np.mean (fs, axis=1, keepdims=True) 60 | fs = (fs / 255.0).astype (np.float32) 61 | 62 | """Measure the signal using sensing matrix `Phi`.""" 63 | ys = np.transpose (Phi.dot (np.transpose (fs))) 64 | 65 | """Write singals and measurements to tfrecords file.""" 66 | for y, f in zip (ys, fs): 67 | yraw = y.tostring () 68 | fraw = f.tostring () 69 | example = tf.train.Example (features=tf.train.Features ( 70 | feature={ 71 | 'y': _bytes_feature (yraw), 72 | 'f': _bytes_feature (fraw) 73 | } 74 | )) 75 | 76 | writer.write (example.SerializeToString ()) 77 | 78 | writer.close () 79 | 80 | 81 | def dir2tfrecords_denoise (data_dir, out_dir, name, suffix, rgb2gray=False): 82 | images = glob.glob (os.path.join (data_dir, '*.' + suffix)) 83 | num_examples = len (images) 84 | 85 | if len (images) < 1: 86 | raise ValueError ('No images in {suffix} format found in {data_dir}'.format (suffix, data_dir)) 87 | 88 | if not name.endswith(".tfrecords"): 89 | name += ".tfrecords" 90 | out_fname = os.path.join(out_dir, name) 91 | print ('Writing', out_fname) 92 | with tf.python_io.TFRecordWriter(out_fname) as writer: 93 | for fname in images: 94 | image = Image.open(fname) 95 | width, height = image.size 96 | if image.mode == 'L': 97 | depth = 1 98 | elif image.mode == 'RGB': 99 | if rgb2gray: 100 | image = image.convert('L') 101 | depth = 1 102 | else: 103 | depth = 3 104 | image_raw = np.array(image).tostring() 105 | example = tf.train.Example( 106 | features=tf.train.Features( 107 | feature={ 108 | 'height': _int64_feature(height), 109 | 'width' : _int64_feature(width), 110 | 'depth' : _int64_feature(depth), 111 | 'image_raw': _bytes_feature(image_raw) 112 | })) 113 | writer.write (example.SerializeToString ()) 114 | 115 | 116 | """***************************************************************************** 117 | Input pipeline for real images compressive sensing on preprocessed BSD500 118 | datasets. 119 | *****************************************************************************""" 120 | def cs_decode(serialized_example): 121 | """Parses an image from the given `serialized_example`.""" 122 | features = tf.parse_single_example ( 123 | serialized_example, 124 | features={ 125 | 'y' : tf.FixedLenFeature ([], tf.string), 126 | 'f' : tf.FixedLenFeature ([], tf.string), 127 | }) 128 | 129 | # convert from a scalar string tensor to a uint8 tensor with 130 | # shape (heigth, width, depth) 131 | y_ = tf.decode_raw (features ['y'], tf.float32) 132 | f_ = tf.decode_raw (features ['f'], tf.float32) 133 | 134 | return y_, f_ 135 | 136 | 137 | def bsd500_cs_inputs (file_path, batch_size, num_epochs): 138 | if not num_epochs: 139 | num_epochs = None 140 | 141 | with tf.name_scope ('input'): 142 | # TFRecordDataset opens a binary file and reads one record at a time. 143 | # `filename` could also be a list of filenames, which will be read in order. 144 | dataset = tf.data.TFRecordDataset (file_path) 145 | 146 | # The map transformation takes a function and applies it to every element of 147 | # the dataset 148 | dataset = dataset.map (cs_decode, num_parallel_calls=4) 149 | 150 | # The shuffle transformation uses a finite-sized buffer to shuffle elements 151 | # in memory. The parameter is the number of elements in the buffer. For 152 | # completely uniform shuffling, set the parameter to be the same as the 153 | # number of elements in the dataset. 154 | dataset = dataset.apply ( 155 | tf.contrib.data.shuffle_and_repeat (50000, num_epochs)) 156 | 157 | dataset = dataset.batch (batch_size) 158 | dataset = dataset.prefetch (batch_size) 159 | 160 | iterator = dataset.make_one_shot_iterator () 161 | 162 | # After this step, the y_ and f_ will be of shape: 163 | # (batch_size, M) and (batch_size, F). Transpose them into shape 164 | # (M, batch_size) and (F, batch_size). 165 | y_, f_ = iterator.get_next () 166 | 167 | return tf.transpose (y_, [1,0]) , tf.transpose (f_, [1,0]) 168 | 169 | 170 | """***************************************************************************** 171 | Input pipeline for real images denoising on BSD500 and VOC2012 datasets. 172 | *****************************************************************************""" 173 | def denoise_decode (serialized_example): 174 | """Parses an image from the given `serialized_example`.""" 175 | features = tf.parse_single_example ( 176 | serialized_example, 177 | features={ 178 | 'image_raw': tf.FixedLenFeature ([], tf.string), 179 | 'height': tf.FixedLenFeature ([], tf.int64), 180 | 'width' : tf.FixedLenFeature ([], tf.int64), 181 | 'depth' : tf.FixedLenFeature ([], tf.int64), 182 | }) 183 | 184 | # convert from a scalar string tensor to a uint8 tensor with 185 | # shape (heigth, width, depth) 186 | image = tf.decode_raw (features ['image_raw'], tf.uint8) 187 | 188 | height = tf.cast (features ['height'], tf.int32) 189 | width = tf.cast (features ['width'], tf.int32) 190 | depth = tf.cast (features ['depth'], tf.int32) 191 | image = tf.cast (tf.reshape (image, (height, width, depth)), tf.float32) 192 | 193 | return image 194 | 195 | def normalization (image): 196 | """Convert `image` from [0, 255] -> [0, 1] floats and then de-mean.""" 197 | image = image * (1. / 255) 198 | return image - tf.reduce_mean (image) 199 | 200 | def crop (image, height_crop, width_crop): 201 | """Randomly crop images to size (height_crop, width_crop).""" 202 | image = tf.random_crop (image, [height_crop, width_crop, 1]) 203 | return image 204 | 205 | def bsd500_denoise_inputs (dataset_dir, filename, batch_size, 206 | height_crop, width_crop, num_epochs): 207 | if not num_epochs: 208 | num_epochs = None 209 | filename = os.path.join (dataset_dir, filename) 210 | 211 | with tf.name_scope ('input'): 212 | # TFRecordDataset opens a binary file and reads one record at a time. 213 | # `filename` could also be a list of filenames, which will be read in order. 214 | dataset = tf.data.TFRecordDataset (filename) 215 | 216 | # The map transformation takes a function and applies it to every element of 217 | # the dataset 218 | dataset = dataset.map(denoise_decode, num_parallel_calls=4) 219 | dataset = dataset.map(lambda x: crop(x, height_crop, width_crop), 220 | num_parallel_calls=4) 221 | dataset = dataset.map(normalization, num_parallel_calls=4) 222 | 223 | # The shuffle transformation uses a finite-sized buffer to shuffle elements 224 | # in memory. The parameter is the number of elements in the buffer. For 225 | # completely uniform shuffling, set the parameter to be the same as the 226 | # number of elements in the dataset. 227 | dataset = dataset.apply ( 228 | tf.data.experimental.shuffle_and_repeat (400, num_epochs)) 229 | 230 | dataset = dataset.batch (batch_size) 231 | dataset = dataset.prefetch (batch_size) 232 | 233 | iterator = dataset.make_one_shot_iterator () 234 | 235 | image_ = iterator.get_next () 236 | 237 | return image_ 238 | 239 | 240 | parser = argparse.ArgumentParser() 241 | parser.add_argument( 242 | "--task_type", type=str, help="Denoise `denoise` or compressive sensing `cs`.") 243 | parser.add_argument( 244 | "--dataset_dir", type=str, help="Path to the directory that holds images.") 245 | parser.add_argument( 246 | "--out_dir", type=str, help="Path to the output directory that holds the TFRecords file.") 247 | parser.add_argument( 248 | "--out_file", type=str, help="File name of the output file.") 249 | parser.add_argument( 250 | "--suffix", type=str, help="Format of input images. PNG or JPG or other format.") 251 | # Arguments for compressive sensing 252 | parser.add_argument( 253 | "--sensing", type=str, help="Sensing matrix file. Instance of Problem class.") 254 | parser.add_argument( 255 | "--patch_size", type=int, help="Size of extracted patches.") 256 | parser.add_argument( 257 | "--patches_per_img", type=int, help="How many patches to be extracted from each image.") 258 | 259 | if __name__ == "__main__": 260 | config, unparsed = parser.parse_known_args() 261 | if config.task_type == "cs": 262 | Phi = np.load(config.sensing)["A"] 263 | dir2tfrecords_cs(config.dataset_dir, 264 | os.path.join(config.out_dir, config.out_file), 265 | config.patch_size, config.patches_per_img, config.img_fmt) 266 | elif config.task_type == "denoise": 267 | dir2tfrecords_denoise(config.dataset_dir, config.out_dir, config.out_file, 268 | config.suffix, rgb2gray=True) 269 | 270 | -------------------------------------------------------------------------------- /utils/matlabs/CalculateW.m: -------------------------------------------------------------------------------- 1 | %% A script to calculate the dictionary with minimal coherence with D: 2 | % min_W \|W^T D\|^2 subject to diag(W^T D) = 1 3 | % This is an implement of Algorithm 1 in Appendix E.1 of(Liu et al, 2019). 4 | % Just run this script without any arguments. 5 | 6 | % Author: Jialin Liu, UCLA math department (danny19921123@gmail.com) 7 | % Last Modified: 2019-2-15 8 | 9 | %% Note: if the algorithm diverges, then try a smaller step size: eta. 10 | 11 | %% Script starts. 12 | clear; 13 | 14 | % load dictionary D 15 | load('./D.mat','D'); 16 | [m,n] = size(D); 17 | 18 | % Initialization 19 | W = D; 20 | f = func(D,D); 21 | 22 | % Step size 23 | eta = 0.1; 24 | 25 | % Main iteration 26 | fprintf('Calculation Starts...\n'); 27 | for t = 1: 1000 28 | 29 | % calculate residual and gradient 30 | res = D' * W - eye(n); 31 | gra = D * res; 32 | 33 | % gradient descent 34 | W_next = W - eta * gra; 35 | 36 | % projection 37 | W_next = proj(W_next,D); 38 | 39 | % calculate objective function value 40 | f_next = func(W_next,D); 41 | 42 | % stopping condition 43 | if abs(f-f_next)/f < 1e-12, break; end 44 | 45 | % update 46 | W = W_next; 47 | f = f_next; 48 | 49 | % report function values 50 | if mod(t,50) == 0, fprintf('t: %d\t, func: %f\n', t, f); end 51 | end 52 | 53 | % save to file 54 | save('W.mat','W'); 55 | fprintf('Calculation ends. Results are saved in W.mat.\n'); 56 | 57 | % visualization 58 | visualization(D,W); 59 | 60 | 61 | %% functions 62 | function f = func(W,D) 63 | % calculate function values 64 | n = size(D,2); 65 | res = D' * W - eye(n); 66 | Q = ones(n,n)+eye(n)*(-1); 67 | res = res .* sqrt(Q); 68 | f = sum(sum(res.*res)); 69 | end 70 | 71 | function W_next = proj(W,D) 72 | % conduct projection 73 | aw = diag(D'*W); 74 | aw = repmat(aw',[size(D,1), 1]); 75 | W_next = W + (1-aw).*D; 76 | end 77 | 78 | function visualization(D,W) 79 | % function for visualizing the coherences between A and W 80 | n = size(D,2); 81 | 82 | res = D' * W - eye(n); 83 | res0 = D' * D - eye(n); 84 | 85 | figure ('Units', 'pixels', 'Position', [300 300 800 275]) ; 86 | 87 | subplot(1,2,1); 88 | histogram(res(~eye(n)),'BinWidth',1e-2); 89 | hold on; 90 | histogram(res0(~eye(n)),'BinWidth',1e-2); 91 | title('off-diagonal'); 92 | legend('W','A'); 93 | hold off; 94 | 95 | subplot(1,2,2); 96 | histogram(res(logical(eye(n))),'BinWidth',1e-5); 97 | hold on; 98 | histogram(res0(logical(eye(n))),'BinWidth',1e-5); 99 | hold off; 100 | title('diagonal'); 101 | 102 | end 103 | -------------------------------------------------------------------------------- /utils/matlabs/CalculateW_conv.m: -------------------------------------------------------------------------------- 1 | %% A script to calculate convolutional kernels with minimal coherence: 2 | % min \|W_conv^T D_conv\|^2 subject to diag(W_conv^T D_conv) = 1 3 | % This is an implement of Algorithm 2 in Appendix E.2 of(Liu et al, 2019). 4 | % Just run this script without any arguments. 5 | 6 | % Author: Jialin Liu, UCLA math department (danny19921123@gmail.com) 7 | % Last Modified: 2019-2-15 8 | 9 | %% Note: if the algorithm diverges, then try a smaller step size: eta. 10 | 11 | %% Script starts. 12 | clear; 13 | 14 | % Load convolutional kernels 15 | load('./D_conv.mat','D'); 16 | 17 | % Get the dimensions 18 | Ds = size(D,1); 19 | M = size(D,3); 20 | N = 2*Ds - 1; % Due to Theorem 3 in our paper 21 | 22 | % Initialization 23 | Df = fft2(D,N,N); 24 | Df = reshape(Df, [N,N,1,M]); 25 | Dft = reshape(Df, [N,N,M,1]); 26 | Dfh = reshape(conj(Df), [N,N,M,1]); 27 | W = reshape(D,[Ds,Ds,1,M]); 28 | Wf = Df; 29 | 30 | % Step size in the optimization 31 | eta = 0.002; % step size 32 | 33 | % Main iterations 34 | fprintf('Calculation Starts...\n'); 35 | for t = 1:500 36 | 37 | % calculate residuals and function values 38 | res = bsxfun(@times, Dfh, Wf); 39 | f = norm(res(:))^2; 40 | if mod(t,50)==0, fprintf('t: %d\t f: %.3f\n',t,f); end 41 | 42 | % calculate gradient 43 | gra = bsxfun(@times, Dft, res); 44 | gra = sum(gra, 3); 45 | 46 | % gradient descent in the fourier domain 47 | Wf = Wf - eta * gra; 48 | 49 | % back to the spacial domain and do projection 50 | W = ifft2(Wf, 'symmetric'); 51 | W = reshape(W, [N,N,M]); 52 | W = W(1:Ds,1:Ds,:); 53 | W = proj(W,D); 54 | 55 | % calculate FFT for the next step 56 | Wf = fft2(W,N,N); 57 | Wf = reshape(Wf, [N,N,1,M]); 58 | end 59 | 60 | % save to file 61 | save('W_conv.mat','W'); 62 | fprintf('Calculation ends. Results are saved in W_conv.mat.\n'); 63 | 64 | % Visualization 65 | % Please download SPORCO: http://brendt.wohlberg.net/software/SPORCO/ 66 | % And copy "util/imdisp.m" and "util/tiledict.m" to the current folder 67 | % Then uncomment the following two lines. You can get the visualization. 68 | 69 | % figure; 70 | % imdisp(tiledict(W)); 71 | 72 | 73 | %% Functions 74 | function W_out = proj(W_in,D) 75 | % projection of the dictionary on "diag(W^TD)=1" 76 | M = size(D,3); 77 | N = size(D,1); 78 | A = zeros(N*N,M); 79 | W = zeros(N*N,M); 80 | for ii = 1:M 81 | A(:,ii) = reshape(D(:,:,ii),[N*N,1]); 82 | W(:,ii) = reshape(W_in(:,:,ii),[N*N,1]); 83 | end 84 | aw = diag(A'*W); 85 | aw = repmat(aw',[size(A,1), 1]); 86 | W_next = W + (1-aw).*A; 87 | W_out = zeros(N,N,M); 88 | for ii = 1:M 89 | W_out(:,:,ii) = reshape(W_next(:,ii),[N,N]); 90 | end 91 | end 92 | -------------------------------------------------------------------------------- /utils/models/ALISTA.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : ALISTA.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | date : 2019-02-21 9 | 10 | Implementation of ALISTA. 11 | """ 12 | 13 | import numpy as np 14 | import tensorflow as tf 15 | 16 | from utils.tf import shrink_ss, is_tensor 17 | from models.LISTA_base import LISTA_base 18 | 19 | 20 | class ALISTA(LISTA_base): 21 | 22 | """ 23 | Implementation of deep neural network model. 24 | """ 25 | 26 | def __init__(self, A, T, lam, W, percent, max_percent, coord, scope): 27 | """ 28 | :prob: : Instance of Problem class, describing problem settings. 29 | :T : Number of layers (depth) of this LISTA model. 30 | :lam : Initial value of thresholds of shrinkage functions. 31 | :untied : Whether weights are shared within layers. 32 | """ 33 | self._A = A.astype(np.float32) 34 | self._W = W 35 | self._T = T 36 | self._p = percent 37 | self._maxp = max_percent 38 | self._lam = lam 39 | self._M = self._A.shape[0] 40 | self._N = self._A.shape[1] 41 | 42 | self._scale = 1.001 * np.linalg.norm(A, ord=2)**2 43 | self._theta = (self._lam / self._scale).astype(np.float32) 44 | if coord: 45 | self._theta = np.ones((self._N, 1), dtype=np.float32) * self._theta 46 | 47 | self._ps = [(t+1) * self._p for t in range(self._T)] 48 | self._ps = np.clip(self._ps, 0.0, self._maxp) 49 | 50 | self._coord = coord 51 | self._scope = scope 52 | 53 | """ Set up layers.""" 54 | self.setup_layers() 55 | 56 | 57 | def setup_layers(self): 58 | """ Set up layers of ALISTA. 59 | """ 60 | alphas_ = [] # step sizes 61 | thetas_ = [] # thresholds 62 | 63 | with tf.variable_scope(self._scope, reuse=False) as vs: 64 | # constant 65 | self._kA_ = tf.constant(value=self._A, dtype=tf.float32) 66 | if not is_tensor(self._W): 67 | self._W_ = tf.constant(value=self._W, dtype=tf.float32) 68 | else: 69 | self._W_ = self._W 70 | self._Wt_ = tf.transpose(self._W_, perm=[1,0]) 71 | 72 | for t in range(self._T): 73 | alphas_.append(tf.get_variable(name="alpha_%d"%(t+1), 74 | dtype=tf.float32, 75 | initializer=1.0)) 76 | thetas_.append(tf.get_variable(name="theta_%d"%(t+1), 77 | dtype=tf.float32, 78 | initializer=self._theta)) 79 | 80 | # Collection of all trainable variables in the model layer by layer. 81 | # We name it as `vars_in_layer` because we will use it in the manner: 82 | # vars_in_layer [t] 83 | self.vars_in_layer = list(zip(alphas_, thetas_)) 84 | 85 | 86 | def inference(self, y_, x0_=None): 87 | xhs_ = [] # collection of the regressed sparse codes 88 | 89 | if x0_ is None: 90 | batch_size = tf.shape(y_)[-1] 91 | xh_ = tf.zeros(shape=(self._N, batch_size), dtype=tf.float32) 92 | else: 93 | xh_ = x0_ 94 | xhs_.append(xh_) 95 | 96 | with tf.variable_scope(self._scope, reuse=True) as vs: 97 | for t in range(self._T): 98 | alpha_, theta_ = self.vars_in_layer[t] 99 | percent = self._ps[t] 100 | 101 | res_ = y_ - tf.matmul(self._kA_, xh_) 102 | zh_ = xh_ + alpha_ * tf.matmul(self._Wt_, res_) 103 | xh_ = shrink_ss(zh_, theta_, percent) 104 | xhs_.append(xh_) 105 | 106 | return xhs_ 107 | 108 | 109 | -------------------------------------------------------------------------------- /utils/models/ALISTA_robust.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : ALISTA_robust.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | date : 2019-02-21 9 | 10 | Implementation of ALISTA_robust, where the model will take both encoding model A 11 | and weight W as inputs. 12 | """ 13 | 14 | import numpy as np 15 | import tensorflow as tf 16 | 17 | from utils.tf import shrink_ss, is_tensor 18 | from models.LISTA_base import LISTA_base 19 | 20 | 21 | class ALISTA_robust(LISTA_base): 22 | 23 | """ 24 | Implementation of deep neural network model. 25 | """ 26 | 27 | def __init__(self, M, N, T, percent, max_percent, coord, scope): 28 | """ 29 | :prob: : Instance of Problem class, describing problem settings. 30 | :T : Number of layers (depth) of this LISTA model. 31 | :lam : Initial value of thresholds of shrinkage functions. 32 | :untied : Whether weights are shared within layers. 33 | """ 34 | self._M = M 35 | self._N = N 36 | self._T = T 37 | self._p = percent 38 | self._maxp = max_percent 39 | 40 | self._ps = [(t+1) * self._p for t in range(self._T)] 41 | self._ps = np.clip(self._ps, 0.0, self._maxp) 42 | 43 | self._coord = coord 44 | self._scope = scope 45 | 46 | """ Set up layers.""" 47 | self.setup_layers() 48 | 49 | 50 | def setup_layers(self): 51 | """ Set up layers of ALISTA. 52 | """ 53 | alphas_ = [] # step sizes 54 | thetas_ = [] # thresholds 55 | 56 | theta_shape = (self._n, 1) if self._coord else () 57 | 58 | with tf.variable_scope(self._scope, reuse=False) as vs: 59 | for t in range(self._T): 60 | alphas_.append(tf.get_variable(name="alpha_%d"%(t+1), 61 | dtype=tf.float32, 62 | initializer=1.0)) 63 | thetas_.append(tf.get_variable(name="theta_%d"%(t+1), 64 | shape=theta_shape, 65 | dtype=tf.float32)) 66 | 67 | # Collection of all trainable variables in the model layer by layer. 68 | # We name it as `vars_in_layer` because we will use it in the manner: 69 | # vars_in_layer [t] 70 | self.vars_in_layer = list(zip(alphas_, thetas_)) 71 | 72 | 73 | def inference(self, y_, A_, W_, x0_=None): 74 | assert A_.shape == W_.shape 75 | if len(A_.shape) > 2: 76 | return self.batch_inference(y_, A_, W_, x0_=None) 77 | 78 | xhs_ = [] # collection of the regressed sparse codes 79 | 80 | if x0_ is None: 81 | batch_size = tf.shape(y_)[-1] 82 | xh_ = tf.zeros(shape=(self._N, batch_size), dtype=tf.float32) 83 | else: 84 | xh_ = x0_ 85 | xhs_.append(xh_) 86 | 87 | Wt_ = tf.transpose(W_) 88 | with tf.variable_scope(self._scope, reuse=True) as vs: 89 | for t in range(self._T): 90 | alpha_, theta_ = self.vars_in_layer[t] 91 | percent = self._ps[t] 92 | 93 | res_ = y_ - tf.matmul(A_, xh_) 94 | zh_ = xh_ + alpha_ * tf.matmul(Wt_, res_) 95 | xh_ = shrink_ss(zh_, theta_, percent) 96 | xhs_.append(xh_) 97 | 98 | return xhs_ 99 | 100 | def batch_inference(self, ys_, As_, Ws_, x0_=None): 101 | """ 102 | Batch inference. Iterate over ys_, As_ and Wts_. 103 | The first dimension of list_xhs_ stands for the time/iteration in the 104 | model. list_xhs_ [k] is the stacked outputs of all (y_, A_, Wt_) at the 105 | step/iteration k. 106 | """ 107 | # print(ys_.shape) 108 | # print(As_.shape) 109 | # print(Ws_.shape) 110 | list_xhs_ = [[] for i in range(self._T + 1)] 111 | 112 | # iterate over ys_, As_ and Wts_ 113 | batch_size = ys_.shape.as_list()[0] 114 | for i in range(batch_size): 115 | xhs_ = self.inference(ys_[i], As_[i], Ws_[i], x0_) 116 | # append xhs_[t] to list_xhs_[t] for all t 117 | for t, xh_ in enumerate(xhs_): 118 | list_xhs_[t].append(xh_) 119 | 120 | # stacking 121 | stacked_list_xhs_ = list(map(tf.stack, list_xhs_)) 122 | 123 | return stacked_list_xhs_ 124 | 125 | 126 | -------------------------------------------------------------------------------- /utils/models/AtoW_grad.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : AtoW_grad.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | date : 2019-02-20 9 | """ 10 | 11 | import numpy as np 12 | import tensorflow as tf 13 | import utils.train 14 | 15 | from utils.tf import get_subgradient_func, bmxbm, mxbm 16 | 17 | class AtoW_grad(object): 18 | 19 | """Docstring for AtoW_grad. """ 20 | 21 | def __init__(self, m, n, T, Binit, eta, loss, Q, scope): 22 | """TODO: to be defined1. 23 | 24 | :T: TODO 25 | :loss: TODO 26 | 27 | """ 28 | self._m = m 29 | self._n = n 30 | self._Binit = Binit 31 | self._T = T 32 | self._loss = loss 33 | self._eta = eta 34 | self._Q = Q 35 | self._scope = scope 36 | 37 | # subgradient function 38 | self._subgradient_func = get_subgradient_func(loss) 39 | 40 | # setup layers 41 | self.setup_layers (scope) 42 | 43 | def setup_layers(self, scope): 44 | """TODO: Docstring for setup_layers. 45 | :returns: TODO 46 | 47 | """ 48 | with tf.variable_scope (scope, reuse=False) as vs: 49 | # B initialization 50 | if isinstance(self._Binit, np.ndarray): 51 | Binit = (self._eta * self._Binit).astype(np.float32) 52 | self._Binit_ = tf.constant(value=Binit, 53 | dtype=tf.float32, 54 | name='Binit') 55 | elif Binit == 'uniform': 56 | self._Binit_ = tf.random_uniform_initializer(-0.01, 0.01, 57 | dtype=tf.float32) 58 | elif Binit == 'normal': 59 | self._Binit_ = tf.random_normal_initializer(0.0, 0.01, 60 | dtype=tf.float32) 61 | 62 | # weights 63 | for i in range (self._T): 64 | tf.get_variable (name='B_%d'%(i+1), 65 | dtype=tf.float32, 66 | initializer=self._Binit_) 67 | 68 | # Q matrix in loss and subgradient 69 | if self._Q is None: 70 | self._Q_ = None 71 | else: 72 | self._Q_ = tf.constant (value=self._Q, dtype=tf.float32, name='Q') 73 | 74 | # identity 75 | eye = np.eye (self._n) 76 | self._eye_ = tf.constant (value=eye, 77 | dtype=tf.float32, 78 | name='eye') 79 | 80 | def inference(self, A_): 81 | """TODO: Docstring for function. 82 | 83 | :A_: A tensor or placeholder with shape (batchsize, m, n) 84 | :returns: TODO 85 | 86 | """ 87 | At_ = tf.transpose (A_, [0,2,1]) 88 | W_ = A_ 89 | Q_ = self._Q_ 90 | with tf.variable_scope (self._scope, reuse=True) as vs: 91 | for i in range (self._T): 92 | Z_ = bmxbm (At_, W_, batch_first=True) - self._eye_ 93 | dF_ = self._subgradient_func (Z_, Q_) 94 | B_ = tf.get_variable ('B_%d'%(i+1)) 95 | W_ = W_ - mxbm (B_, dF_) 96 | 97 | return W_ 98 | 99 | def save_trainable_variables (self , sess , savefn): 100 | """ 101 | Save trainable variables in the model to npz file with current value of each 102 | variable in tf.trainable_variables(). 103 | 104 | :sess: Tensorflow session. 105 | :savefn: File name of saved file. 106 | 107 | """ 108 | state = getattr (self , 'state' , {}) 109 | utils.train.save_trainable_variables( 110 | sess, savefn, self._scope, **state ) 111 | 112 | def load_trainable_variables (self, sess, savefn): 113 | """ 114 | Load trainable variables from saved file. 115 | 116 | :sess: TODO 117 | :savefn: TODO 118 | :returns: TODO 119 | 120 | """ 121 | self.state = utils.train.load_trainable_variables(sess, savefn) 122 | 123 | 124 | -------------------------------------------------------------------------------- /utils/models/LAMP.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LAMP.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | last_modified: 2018-10-15 9 | 10 | Implementation of Learned AMP model. 11 | """ 12 | 13 | import numpy as np 14 | import tensorflow as tf 15 | import utils.train 16 | 17 | from utils.tf import shrink_lamp 18 | from models.LISTA_base import LISTA_base 19 | 20 | class LAMP (LISTA_base): 21 | 22 | """ 23 | Implementation of Learned AMP model. 24 | """ 25 | 26 | def __init__(self, A, T, lam, untied, coord, scope): 27 | """ 28 | :A : Instance of Problem class, describing problem settings. 29 | :T : Number of layers (depth) of this LISTA model. 30 | :lam : Initial value of thresholds of shrinkage functions. 31 | :untied : Whether weights are shared within layers. 32 | :coord : 33 | :scope : 34 | """ 35 | self._A = A.astype (np.float32) 36 | self._T = T 37 | self._M = self._A.shape [0] 38 | self._N = self._A.shape [1] 39 | 40 | self._lam = lam 41 | if coord: 42 | self._lam = np.ones ((self._N, 1), dtype=np.float32) * self._lam 43 | 44 | self._scale = 1.001 * np.linalg.norm (A, ord=2)**2 45 | 46 | self._untied = untied 47 | self._coord = coord 48 | self._scope = scope 49 | 50 | """ Set up layers.""" 51 | self.setup_layers() 52 | 53 | 54 | def setup_layers(self): 55 | """ 56 | Implementation of LISTA model proposed by LeCun in 2010. 57 | 58 | :prob: Problem setting. 59 | :T: Number of layers in LISTA. 60 | :returns: 61 | :layers: List of tuples ( name, xh_, var_list ) 62 | :name: description of layers. 63 | :xh: estimation of sparse code at current layer. 64 | :var_list: list of variables to be trained seperately. 65 | 66 | """ 67 | Bs_ = [] 68 | lams_ = [] 69 | 70 | B = (np.transpose (self._A) / self._scale).astype (np.float32) 71 | 72 | with tf.variable_scope (self._scope, reuse=False) as vs: 73 | # constant 74 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 75 | 76 | if not self._untied: # tied model 77 | Bs_.append (tf.get_variable (name='B', dtype=tf.float32, 78 | initializer=B)) 79 | Bs_ = Bs_ * self._T 80 | 81 | for t in range (self._T): 82 | lams_.append (tf.get_variable (name="lam_%d"%(t+1), 83 | dtype=tf.float32, 84 | initializer=self._lam)) 85 | if self._untied: # untied model 86 | Bs_.append (tf.get_variable (name='B_%d'%(t+1), 87 | dtype=tf.float32, 88 | initializer=B)) 89 | 90 | # Collection of all trainable variables in the model layer by layer. 91 | # We name it as `vars_in_layer` because we will use it in the manner: 92 | # vars_in_layer [t] 93 | self.vars_in_layer = list (zip (Bs_, lams_)) 94 | 95 | 96 | def inference (self, y_, x0_=None, return_recon=False): 97 | xhs_ = [] # collection of the regressed sparse codes 98 | if return_recon: 99 | yhs_ = [] # collection of the reconstructed signals 100 | 101 | if x0_ is None: 102 | batch_size = tf.shape (y_) [-1] 103 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 104 | else: 105 | xh_ = x0_ 106 | xhs_.append (xh_) 107 | 108 | OneOverM = tf.constant (float(1)/self._M, dtype=tf.float32) 109 | NOverM = tf.constant (float(self._N)/self._M, dtype=tf.float32) 110 | vt_ = tf.zeros_like (y_, dtype=tf.float32) 111 | 112 | with tf.variable_scope (self._scope, reuse=True) as vs: 113 | for t in range (self._T): 114 | B_, lam_ = self.vars_in_layer [t] 115 | 116 | yh_ = tf.matmul (self._kA_, xh_) 117 | if return_recon: 118 | yhs_.append (yh_) 119 | 120 | xhl0_ = tf.reduce_mean (tf.to_float (tf.abs (xh_)>0), axis=0) 121 | bt_ = xhl0_ * NOverM 122 | 123 | vt_ = y_ - yh_ + bt_ * vt_ 124 | rvar_ = tf.reduce_sum (tf.square (vt_), axis=0) * OneOverM 125 | rh_ = xh_ + tf.matmul(B_, vt_) 126 | 127 | xh_ = shrink_lamp (rh_, rvar_, lam_) 128 | xhs_.append (xh_) 129 | 130 | if return_recon: 131 | yhs_.append (tf.matmul (self._kA_, xh_)) 132 | return xhs_, yhs_ 133 | else: 134 | return xhs_ 135 | 136 | # B = A.T / (1.001 * la.norm(A,2)**2) 137 | # B_ = tf.Variable(B,dtype=tf.float32,name='B_1') 138 | # By_ = tf.matmul( B_ , self.prob.y_ ) 139 | 140 | # lam_ = tf.Variable(self.init_lam, dtype=tf.float32, name='lam_1') 141 | # rvar_ = tf.reduce_sum(tf.square(self.prob.y_), axis=0) * OneOverM 142 | # xh_, xhl0_ = eta( By_, rvar_ , lam_ ) 143 | # self.layers.append( ('LAMP T=1', xh_, (B_, lam_,) ) ) 144 | 145 | # self.xhs_ = [self.x0_, xh_] 146 | 147 | # vt_ = self.prob.y_ 148 | # for t in range(1, self.T): 149 | # bt_ = xhl0_ * NOverM 150 | # vt_ = self.prob.y_ - tf.matmul( self.prob.A_ , xh_ ) + bt_ * vt_ 151 | # rvar_ = tf.reduce_sum(tf.square(vt_), axis=0) * OneOverM 152 | # lam_ = tf.Variable(self.init_lam,name='lam_'+str(t+1)) 153 | 154 | # if self.untied: 155 | # B_ = tf.Variable(B, dtype=tf.float32, name='B_'+str(t+1)) 156 | # var_list = (B_, lam_, ) 157 | # else: 158 | # var_list = (lam_, ) 159 | 160 | # rh_ = xh_ + tf.matmul(B_, vt_) 161 | # xh_, xhl0_ = eta( rh_ , rvar_ , lam_ ) 162 | # self.xhs_.append (xh_) 163 | # self.layers.append( ('LAMP T={}'.format(t+1), xh_, var_list ) ) 164 | 165 | -------------------------------------------------------------------------------- /utils/models/LIHT.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | LISTA_cpss_corrected.py 6 | author: xhchrn 7 | chernxh@tamu.edu 8 | date : 2018-10-21 9 | 10 | Implementation of Learned ISTA with support selection and coupled weights like 11 | in LAMP, without setting thresholds to zeros if they are minor to zero. 12 | """ 13 | 14 | import numpy as np 15 | import tensorflow as tf 16 | import utils.train 17 | 18 | from utils.tf import hard_shrink 19 | from models.LISTA_base import LISTA_base 20 | 21 | class LIHT (LISTA_base): 22 | 23 | """ 24 | Implementation of deep neural network model. 25 | """ 26 | 27 | def __init__ (self, A, T, lam, untied, coord, scope): 28 | """ 29 | :prob: : Instance of Problem class, describing problem settings. 30 | :T : Number of layers (depth) of this LISTA model. 31 | :lam : Initial value of thresholds of shrinkage functions. 32 | :untied : Whether weights are shared within layers. 33 | """ 34 | self._A = A.astype (np.float32) 35 | self._T = T 36 | self._lam = lam 37 | self._M = self._A.shape [0] 38 | self._N = self._A.shape [1] 39 | 40 | self._theta = np.sqrt (self._lam) 41 | if coord: 42 | self._theta = np.ones ((self._N, 1), dtype=np.float32) * self._theta 43 | 44 | self._untied = untied 45 | self._coord = coord 46 | self._scope = scope 47 | 48 | """ Set up layers.""" 49 | self.setup_layers() 50 | 51 | 52 | def setup_layers(self): 53 | """ 54 | Implementation of LISTA model proposed by LeCun in 2010. 55 | 56 | :prob: Problem setting. 57 | :T: Number of layers in LISTA. 58 | :returns: 59 | :layers: List of tuples ( name, xh_, var_list ) 60 | :name: description of layers. 61 | :xh: estimation of sparse code at current layer. 62 | :var_list: list of variables to be trained seperately. 63 | 64 | """ 65 | Bs_ = [] 66 | Ws_ = [] 67 | thetas_ = [] 68 | 69 | B = (np.transpose (self._A)).astype (np.float32) 70 | W = np.eye (self._N, dtype=np.float32) - np.matmul (B, self._A) 71 | 72 | with tf.variable_scope (self._scope, reuse=False) as vs: 73 | # constant 74 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 75 | 76 | Bs_.append (tf.get_variable (name='B', dtype=tf.float32, 77 | initializer=B)) 78 | Bs_ = Bs_ * self._T 79 | if not self._untied: # tied model 80 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 81 | initializer=W)) 82 | Ws_ = Ws_ * self._T 83 | 84 | for t in range (self._T): 85 | thetas_.append (tf.get_variable (name="theta_%d"%(t+1), 86 | dtype=tf.float32, 87 | initializer=self._theta)) 88 | if self._untied: # untied model 89 | Ws_.append (tf.get_variable (name="W_%d"%(t+1), 90 | dtype=tf.float32, 91 | initializer=W)) 92 | 93 | # Collection of all trainable variables in the model layer by layer. 94 | # We name it as `vars_in_layer` because we will use it in the manner: 95 | # vars_in_layer [t] 96 | self.vars_in_layer = list (zip (Bs_, Ws_, thetas_)) 97 | 98 | 99 | def inference (self, y_, x0_=None): 100 | xhs_ = [] # collection of the regressed sparse codes 101 | 102 | if x0_ is None: 103 | batch_size = tf.shape (y_) [-1] 104 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 105 | else: 106 | xh_ = x0_ 107 | xhs_.append (xh_) 108 | 109 | with tf.variable_scope (self._scope, reuse=True) as vs: 110 | for t in range (self._T): 111 | B_, W_, theta_ = self.vars_in_layer [t] 112 | 113 | By_ = tf.matmul (B_, y_) 114 | xh_ = hard_shrink (tf.matmul (W_, xh_) + By_, theta_) 115 | xhs_.append (xh_) 116 | 117 | return xhs_ 118 | 119 | -------------------------------------------------------------------------------- /utils/models/LIHT_cs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LIHT_cs.py 6 | author: xhchrn 7 | email : chernxh@tamu.edu 8 | date : 2018-10-21 9 | 10 | Implementation of the original Learned ISTA for real world image compressive 11 | sensing experiments. 12 | """ 13 | 14 | import numpy as np 15 | import tensorflow as tf 16 | import utils.train 17 | 18 | from utils.tf import hard_shrink 19 | from models.LISTA_base import LISTA_base 20 | 21 | 22 | class LIHT_cs (LISTA_base): 23 | 24 | """ 25 | Implementation of deep neural network model. 26 | """ 27 | 28 | def __init__(self, Phi, D, T, lam, untied, coord, scope): 29 | """ 30 | :prob: : Instance of Problem class, describing problem settings. 31 | :T : Number of layers (depth) of this LISTA model. 32 | :lam : Initial value of thresholds of shrinkage functions. 33 | :untied : Whether weights are shared within layers. 34 | """ 35 | self._Phi = Phi.astype (np.float32) 36 | self._D = D.astype (np.float32) 37 | self._A = np.matmul (self._Phi, self._D) 38 | self._T = T 39 | self._lam = lam 40 | self._M = self._Phi.shape [0] 41 | self._F = self._Phi.shape [1] 42 | self._N = self._D.shape [1] 43 | 44 | self._theta = np.sqrt (self._lam) 45 | if coord: 46 | self._theta = np.ones ((self._N, 1), dtype=np.float32) * self._theta 47 | 48 | self._untied = untied 49 | self._coord = coord 50 | self._scope = scope 51 | 52 | """ Set up layers.""" 53 | self.setup_layers() 54 | 55 | 56 | def setup_layers(self): 57 | """ 58 | Implementation of LISTA model proposed by LeCun in 2010. 59 | 60 | :prob: Problem setting. 61 | :T: Number of layers in LISTA. 62 | :returns: 63 | :layers: List of tuples ( name, xh_, var_list ) 64 | :name: description of layers. 65 | :xh: estimation of sparse code at current layer. 66 | :var_list: list of variables to be trained seperately. 67 | 68 | """ 69 | Bs_ = [] 70 | Ws_ = [] 71 | thetas_ = [] 72 | 73 | B = (np.transpose (self._A) / self._scale).astype (np.float32) 74 | W = np.eye (self._N, dtype=np.float32) - np.matmul (B, self._A) 75 | 76 | with tf.variable_scope (self._scope, reuse=False) as vs: 77 | # constant 78 | self._kPhi_ = tf.constant (value=self._Phi, dtype=tf.float32) 79 | self._kD_ = tf.constant (value=self._D, dtype=tf.float32) 80 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 81 | 82 | # variables 83 | self._vD_ = tf.get_variable (name='D', dtype=tf.float32, 84 | initializer=self._D) 85 | Bs_.append (tf.get_variable (name='B', dtype=tf.float32, 86 | initializer=B)) 87 | Bs_ = Bs_ * self._T 88 | if not self._untied: # tied model 89 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 90 | initializer=W)) 91 | Ws_ = Ws_ * self._T 92 | 93 | for t in range (self._T): 94 | thetas_.append (tf.get_variable (name="theta_%d"%(t+1), 95 | dtype=tf.float32, 96 | initializer=self._theta)) 97 | if self._untied: # untied model 98 | Ws_.append (tf.get_variable (name="W_%d"%(t+1), 99 | dtype=tf.float32, 100 | initializer=W)) 101 | 102 | # Collection of all trainable variables in the model layer by layer. 103 | # We name it as `vars_in_layer` because we will use it in the manner: 104 | # vars_in_layer [t] 105 | # Note here the last element of `self.vars_in_layer` is 106 | # (W_, theta_, vD_) 107 | self.vars_in_layer = list (zip (Bs_ [:-1], Ws_ [:-1], thetas_ [:-1])) 108 | self.vars_in_layer.append ((Bs_ [-1], Ws_ [-1], thetas_ [-1], self._vD_, )) 109 | 110 | 111 | def inference (self, y_, x0_=None): 112 | xhs_ = [] # collection of the regressed sparse codes 113 | fhs_ = [] # collection of the regressed signals 114 | 115 | if x0_ is None: 116 | batch_size = tf.shape (y_) [-1] 117 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 118 | else: 119 | xh_ = x0_ 120 | xhs_.append (xh_) 121 | fhs_.append (tf.matmul (self._kD_, xh_)) 122 | 123 | with tf.variable_scope (self._scope, reuse=True) as vs: 124 | for t in range (self._T): 125 | if t < self._T - 1: 126 | B_, W_, theta_ = self.vars_in_layer [t] 127 | D_ = self._kD_ 128 | else: 129 | B_, W_, theta_, D_ = self.vars_in_layer [t] 130 | 131 | By_ = tf.matmul (B_, y_) 132 | xh_ = hard_shrink (tf.matmul (W_, xh_) + By_, theta_) 133 | xhs_.append (xh_) 134 | 135 | fhs_.append (tf.matmul (D_, xh_)) 136 | 137 | return xhs_, fhs_ 138 | 139 | -------------------------------------------------------------------------------- /utils/models/LISTA.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LISTA.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | last_modified : 2018-10-21 9 | 10 | Implementation of Learned ISTA proposed by LeCun et al in 2010. 11 | """ 12 | 13 | import numpy as np 14 | import tensorflow as tf 15 | import utils.train 16 | 17 | from utils.tf import shrink 18 | from models.LISTA_base import LISTA_base 19 | 20 | class LISTA (LISTA_base): 21 | 22 | """ 23 | Implementation of LISTA model proposed by LeCun in 2010. 24 | """ 25 | 26 | def __init__(self, A, T, lam, untied, coord, scope): 27 | """ 28 | :A : Numpy ndarray. Dictionary/Sensing matrix. 29 | :T : Integer. Number of layers (depth) of this LISTA model. 30 | :lam : Float. The initial weight of l1 loss term in LASSO. 31 | :untied : Boolean. Flag of whether weights are shared within layers. 32 | :scope : String. Scope name of the model. 33 | """ 34 | self._A = A.astype (np.float32) 35 | self._T = T 36 | self._lam = lam 37 | self._M = self._A.shape [0] 38 | self._N = self._A.shape [1] 39 | 40 | self._scale = 1.001 * np.linalg.norm (A, ord=2)**2 41 | self._theta = (self._lam / self._scale).astype(np.float32) 42 | if coord: 43 | self._theta = np.ones ((self._N, 1), dtype=np.float32) * self._theta 44 | 45 | self._untied = untied 46 | self._coord = coord 47 | self._scope = scope 48 | 49 | """ Set up layers.""" 50 | self.setup_layers() 51 | 52 | 53 | def setup_layers(self): 54 | """ 55 | Implementation of LISTA model proposed by LeCun in 2010. 56 | 57 | :prob: Problem setting. 58 | :T: Number of layers in LISTA. 59 | :returns: 60 | :layers: List of tuples ( name, xh_, var_list ) 61 | :name: description of layers. 62 | :xh: estimation of sparse code at current layer. 63 | :var_list: list of variables to be trained seperately. 64 | 65 | """ 66 | Bs_ = [] 67 | Ws_ = [] 68 | thetas_ = [] 69 | 70 | B = (np.transpose (self._A) / self._scale).astype (np.float32) 71 | W = np.eye (self._N, dtype=np.float32) - np.matmul (B, self._A) 72 | 73 | with tf.variable_scope (self._scope, reuse=False) as vs: 74 | # constant 75 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 76 | 77 | Bs_.append (tf.get_variable (name='B', dtype=tf.float32, 78 | initializer=B)) 79 | Bs_ = Bs_ * self._T 80 | if not self._untied: # tied model 81 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 82 | initializer=W)) 83 | Ws_ = Ws_ * self._T 84 | 85 | for t in range (self._T): 86 | thetas_.append (tf.get_variable (name="theta_%d"%(t+1), 87 | dtype=tf.float32, 88 | initializer=self._theta)) 89 | if self._untied: # untied model 90 | Ws_.append (tf.get_variable (name="W_%d"%(t+1), 91 | dtype=tf.float32, 92 | initializer=W)) 93 | 94 | # Collection of all trainable variables in the model layer by layer. 95 | # We name it as `vars_in_layer` because we will use it in the manner: 96 | # vars_in_layer [t] 97 | self.vars_in_layer = list (zip (Bs_, Ws_, thetas_)) 98 | 99 | def inference (self, y_, x0_=None): 100 | xhs_ = [] # collection of the regressed sparse codes 101 | 102 | if x0_ is None: 103 | batch_size = tf.shape (y_) [-1] 104 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 105 | else: 106 | xh_ = x0_ 107 | xhs_.append (xh_) 108 | 109 | with tf.variable_scope (self._scope, reuse=True) as vs: 110 | for t in range (self._T): 111 | B_, W_, theta_ = self.vars_in_layer [t] 112 | 113 | By_ = tf.matmul (B_, y_) 114 | xh_ = shrink (tf.matmul (W_, xh_) + By_, theta_) 115 | xhs_.append (xh_) 116 | 117 | return xhs_ 118 | 119 | -------------------------------------------------------------------------------- /utils/models/LISTA_base.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LISTA_base.py 6 | author: xhchrn 7 | email : chernxh@tamu.edu 8 | date : 2019-02-18 9 | 10 | A base class for all LISTA networks. 11 | """ 12 | 13 | import numpy as np 14 | import numpy.linalg as la 15 | import tensorflow as tf 16 | 17 | import utils.train 18 | 19 | class LISTA_base (object): 20 | 21 | """ 22 | Implementation of deep neural network model. 23 | """ 24 | 25 | def __init__ (self): 26 | pass 27 | 28 | def setup_layers (self): 29 | pass 30 | 31 | def inference (self): 32 | pass 33 | 34 | def save_trainable_variables (self , sess , savefn): 35 | """ 36 | Save trainable variables in the model to npz file with current value of each 37 | variable in tf.trainable_variables(). 38 | 39 | :sess: Tensorflow session. 40 | :savefn: File name of saved file. 41 | 42 | """ 43 | state = getattr (self , 'state' , {}) 44 | utils.train.save_trainable_variables( 45 | sess, savefn, self._scope, **state ) 46 | 47 | def load_trainable_variables (self, sess, savefn): 48 | """ 49 | Load trainable variables from saved file. 50 | 51 | :sess: TODO 52 | :savefn: TODO 53 | :returns: TODO 54 | 55 | """ 56 | self.state = utils.train.load_trainable_variables(sess, savefn) 57 | 58 | def do_training(self, sess, stages, savefn, scope, 59 | val_step, maxit, better_wait): 60 | """ 61 | Do training actually. Refer to utils/train.py. 62 | 63 | :sess : Tensorflow session, in which we will run the training. 64 | :stages : List of tuples. Training stages obtained via 65 | `utils.train.setup_training`. 66 | :savefn : String. Path where the trained model is saved. 67 | :batch_size : Integer. Training batch size. 68 | :val_step : Integer. How many steps between two validation. 69 | :maxit : Integer. Max number of iterations in each training stage. 70 | :better_wait: Integer. Jump to next stage if no better performance after 71 | certain # of iterations. 72 | 73 | """ 74 | self.state = utils.train.do_training( 75 | sess, stages, savefn, scope, val_step, maxit, better_wait) 76 | 77 | -------------------------------------------------------------------------------- /utils/models/LISTA_cp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LISTA_cp.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | last_modified : 2018-10-21 9 | 10 | Implementation of Learned ISTA with weight coupling. 11 | """ 12 | 13 | import numpy as np 14 | import tensorflow as tf 15 | import utils.train 16 | 17 | from utils.tf import shrink_free 18 | from models.LISTA_base import LISTA_base 19 | 20 | class LISTA_cp (LISTA_base): 21 | 22 | """ 23 | Implementation of learned ISTA with weight coupling constraint. 24 | """ 25 | def __init__ (self, A, T, lam, untied, coord, scope): 26 | """ 27 | :prob: : Instance of Problem class, describing problem settings. 28 | :T : Number of layers (depth) of this LISTA model. 29 | :lam : Initial value of thresholds of shrinkage functions. 30 | :untied : Whether weights are shared within layers. 31 | """ 32 | self._A = A.astype (np.float32) 33 | self._T = T 34 | self._lam = lam 35 | self._M = self._A.shape [0] 36 | self._N = self._A.shape [1] 37 | 38 | self._scale = 1.001 * np.linalg.norm (A, ord=2)**2 39 | self._theta = (self._lam / self._scale).astype(np.float32) 40 | if coord: 41 | self._theta = np.ones ((self._N, 1), dtype=np.float32) * self._theta 42 | 43 | self._untied = untied 44 | self._coord = coord 45 | self._scope = scope 46 | 47 | """ Set up layers.""" 48 | self.setup_layers() 49 | 50 | 51 | def setup_layers(self): 52 | """ 53 | Implementation of LISTA model proposed by LeCun in 2010. 54 | 55 | :prob: Problem setting. 56 | :T: Number of layers in LISTA. 57 | :returns: 58 | :layers: List of tuples ( name, xh_, var_list ) 59 | :name: description of layers. 60 | :xh: estimation of sparse code at current layer. 61 | :var_list: list of variables to be trained seperately. 62 | 63 | """ 64 | Ws_ = [] 65 | thetas_ = [] 66 | 67 | W = (np.transpose (self._A) / self._scale).astype (np.float32) 68 | 69 | with tf.variable_scope (self._scope, reuse=False) as vs: 70 | # constant 71 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 72 | 73 | if not self._untied: # tied model 74 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 75 | initializer=W)) 76 | Ws_ = Ws_ * self._T 77 | 78 | for t in range (self._T): 79 | thetas_.append (tf.get_variable (name="theta_%d"%(t+1), 80 | dtype=tf.float32, 81 | initializer=self._theta)) 82 | if self._untied: # untied model 83 | Ws_.append (tf.get_variable (name="W_%d"%(t+1), 84 | dtype=tf.float32, 85 | initializer=W)) 86 | 87 | # Collection of all trainable variables in the model layer by layer. 88 | # We name it as `vars_in_layer` because we will use it in the manner: 89 | # vars_in_layer [t] 90 | self.vars_in_layer = list (zip (Ws_, thetas_)) 91 | 92 | 93 | def inference (self, y_, x0_=None): 94 | xhs_ = [] # collection of the regressed sparse codes 95 | 96 | if x0_ is None: 97 | batch_size = tf.shape (y_) [-1] 98 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 99 | else: 100 | xh_ = x0_ 101 | xhs_.append (xh_) 102 | 103 | with tf.variable_scope (self._scope, reuse=True) as vs: 104 | for t in range (self._T): 105 | W_, theta_ = self.vars_in_layer [t] 106 | 107 | res_ = y_ - tf.matmul (self._kA_, xh_) 108 | xh_ = shrink_free (xh_ + tf.matmul (W_, res_), theta_) 109 | xhs_.append (xh_) 110 | 111 | return xhs_ 112 | 113 | -------------------------------------------------------------------------------- /utils/models/LISTA_cp_conv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LISTA_cp_conv.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | date : 2019-02-17 9 | """ 10 | 11 | import numpy as np 12 | import tensorflow as tf 13 | import utils.train 14 | 15 | from utils.tf import shrink_free 16 | from models.LISTA_base import LISTA_base 17 | 18 | class LISTA_cp_conv(LISTA_base): 19 | 20 | """ 21 | Implementation of convolutional learned ISTA for with weight coupling constraint. 22 | """ 23 | def __init__(self, filters, T, lam, alpha, untied, scope): 24 | """TODO: to be defined1. 25 | 26 | :T: TODO 27 | :filters: TODO 28 | :lam: TODO 29 | :alpha: TODO 30 | :untied: TODO 31 | :scope: TODO 32 | 33 | """ 34 | self._T = T 35 | self._fh = filters.shape [0] 36 | self._fw = filters.shape [1] 37 | self._fn = filters.shape [2] 38 | self._lam = lam 39 | self._alpha = alpha 40 | self._untied = untied 41 | self._scope = scope 42 | 43 | # default threshld 44 | self._theta = self._lam * self._alpha 45 | 46 | # get the transpose of filters 47 | tfilters = self._alpha * np.rot90 (filters, k=2, axes=(0,1)) 48 | # set the _fs and _ft by adding new axes to them 49 | # _fs convs feature maps into image: (fh, fw, fn, 1) 50 | # _ft convs residual (one image) into feature maps: (fh, fw, 1, fn) 51 | self._fs = filters.reshape ((self._fh, self._fw, self._fn, 1)) 52 | self._ft = tfilters.reshape ((self._fh, self._fw, 1, self._fn)) 53 | 54 | # set up layers 55 | self.setup_layers () 56 | 57 | 58 | def setup_layers (self): 59 | # lists that hold parameters in the network 60 | self._Ws_ = [] 61 | self._thetas_ = [] 62 | with tf.variable_scope (self._scope, reuse=False) as vs: 63 | # tf constant for filters 64 | self._fs_const_ = tf.constant (value=self._fs, 65 | dtype=tf.float32, name='fs') 66 | 67 | self._ft_const_ = tf.constant (value=self._ft, 68 | dtype=tf.float32, name='ft') 69 | 70 | if self._untied == False: 71 | # tied model 72 | self._Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 73 | initializer=self._ft_const_)) 74 | self._Ws_ = self._Ws_ * self._T 75 | 76 | for i in range (self._T): 77 | self._thetas_.append (tf.get_variable (name='theta_'+str(i+1), 78 | dtype=tf.float32, 79 | initializer=self._theta)) 80 | if self._untied == True: 81 | # untied model 82 | self._Ws_.append (tf.get_variable (name='W_'+str(i+1), 83 | dtype=tf.float32, 84 | initializer=self._ft_const_)) 85 | 86 | 87 | def inference(self, input_, init_feature_=None): 88 | """TODO: Docstring for inference. 89 | 90 | :input_: Batch of images of size (batch_size, h, w, channel=1). 91 | :init_feature_: Batch of feature maps to be updated of size 92 | (batch_size, h+fh-1, w+fw-1, channel=self._fn). 93 | None means starting from all zero feature maps. 94 | :returns: TODO 95 | 96 | """ 97 | # list of features estimated in each layer 98 | features_ = [] 99 | recons_ = [] 100 | 101 | with tf.variable_scope (self._scope, reuse=True) as vs: 102 | # set paddding const for residual padding 103 | ph, pw = self._fh - 1, self._fw - 1 104 | paddings_ = tf.constant ([[0, 0], [ph, ph], [pw, pw], [0,0]]) 105 | # NOTE: the [0, 0] padding here is for the batch_size axis 106 | 107 | if init_feature_ is None: 108 | shape = tf.shape (input_) 109 | batch_size = shape [0] 110 | h = shape [1] 111 | w = shape [2] 112 | feature_ = tf.zeros (shape=(batch_size, 113 | h + self._fh - 1, 114 | w + self._fw - 1, 115 | self._fn), 116 | dtype=tf.float32, name='x_0') 117 | else: 118 | feature_ = init_feature_ 119 | features_.append (feature_) 120 | 121 | for t in range (self._T): 122 | # conv layer to get the reconstructed image 123 | conv_ = tf.nn.conv2d (input=feature_, 124 | filter=self._fs_const_, 125 | strides=(1,1,1,1), 126 | padding='VALID', 127 | use_cudnn_on_gpu=True, 128 | data_format='NHWC', 129 | name='conv_%d' % (t+1)) 130 | recons_.append (conv_) 131 | 132 | residual_ = input_ - conv_ 133 | # residual padding from (bs, h, w, 1) to 134 | # (bs, h+2fh-2, w+2fw-2, 1) 135 | padded_res_ = tf.pad (residual_, paddings_, "REFLECT") 136 | 137 | # deconv to calcualte the gradients w.r.t. feature maps 138 | W_ = self._Ws_ [t] 139 | grad_ = tf.nn.conv2d(input=padded_res_, 140 | filter=W_, 141 | strides=(1,1,1,1), 142 | padding="VALID", 143 | use_cudnn_on_gpu=True, 144 | data_format='NHWC', 145 | name='deconv_%d' % (t+1)) 146 | 147 | # feature_ update 148 | feature_ = feature_ + grad_ 149 | 150 | # thresholding 151 | theta_ = self._thetas_ [t] 152 | feature_ = shrink_free(feature_, theta_) 153 | # append feature_ to feature list 154 | features_.append (feature_) 155 | 156 | conv_ = tf.nn.conv2d(input=feature_, 157 | filter=self._fs_const_, 158 | strides=(1,1,1,1), 159 | padding='VALID', 160 | use_cudnn_on_gpu=True, 161 | data_format='NHWC', 162 | name='conv_%d' % (t+1)) 163 | recons_.append(conv_) 164 | return features_, recons_ 165 | 166 | 167 | # def setup_training (self, input_, label_, input_val_, label_val_, 168 | # init_feature_, init_lr, decay_rate, lr_decay): 169 | # """TODO: Docstring for setup_training. 170 | 171 | # :input_: Tensorflow placeholder or tensor. Input of training set. 172 | # :label_: Tensorflow placeholder or tensor. Label for the sparse feature 173 | # maps of training set. If `loss_type` is `recon`, `label_` should be 174 | # `input_` in noiseless reconstruction or noisy image in denoising. 175 | # :input_val_: Tensorflow placeholder or tensor. Input of validation set. 176 | # :label_val_: Tensorflow placeholder or tensor. Label for the sparse 177 | # feature maps of validation set. If `loss_type` is `recon`, 178 | # `label_` should be `input_` in noiseless reconstruction or noisy 179 | # image in denoising. 180 | # :init_feature_: TensorFlow tensor. Initial estimation of feature maps. 181 | # :init_lr: TODO 182 | # :decay_rate: TODO 183 | # :lr_decay: TODO 184 | # :returns: 185 | # :training_stages: list of training stages 186 | 187 | # """ 188 | # # infer feature_, feature_val_ from input_, input_val_ 189 | # # predictions are the reconstructions 190 | # _, predicts_ = self.inference (input_, init_feature_, True) 191 | # _, predicts_val_ = self.inference (input_val_, init_feature_, True) 192 | # assert len (predicts_) == self._T + 1 193 | # assert len (predicts_val_) == self._T + 1 194 | # nmse_denom_ = tf.nn.l2_loss (label_) 195 | # nmse_denom_val_ = tf.nn.l2_loss (label_val_) 196 | 197 | # # start setting up training 198 | # training_stages = [] 199 | 200 | # lrs = [init_lr * decay for decay in lr_decay] 201 | 202 | # # setup self.lr_multiplier dictionary 203 | # # learning rate multipliers of each variables 204 | # lr_multiplier = dict() 205 | # for var in tf.trainable_variables(): 206 | # lr_multiplier[var.op.name] = 1.0 207 | 208 | # # initialize self.train_vars list 209 | # # variables which will be updated in next training stage 210 | # train_vars = [] 211 | 212 | # for t in range (self._T): 213 | # # layer information for training monitoring 214 | # layer_info = "{scope} T={time}".format (scope=self._scope, time=t+1) 215 | 216 | # # set up loss_ and nmse_ 217 | # loss_ = tf.nn.l2_loss (predicts_ [t+1] - label_) 218 | # nmse_ = loss_ / nmse_denom_ 219 | # loss_val_ = tf.nn.l2_loss (predicts_val_ [t+1] - label_val_) 220 | # nmse_val_ = loss_val_ / nmse_denom_val_ 221 | 222 | # W_ = self._Ws_ [t] 223 | # theta_ = self._thetas_ [t] 224 | 225 | # # train parameters in current layer with initial learning rate 226 | # if W_ not in train_vars: 227 | # var_list = (W_, theta_, ) 228 | # else: 229 | # var_list = (theta_, ) 230 | # op_ = tf.train.AdamOptimizer (init_lr).minimize (loss_, 231 | # var_list=var_list) 232 | # training_stages.append ((layer_info, loss_, nmse_, 233 | # loss_val_, nmse_val_, op_, var_list)) 234 | 235 | # for var in var_list: 236 | # train_vars.append (var) 237 | 238 | # # train all variables in current and former layers with decayed 239 | # # learning rate 240 | # for lr in lrs: 241 | # op_ = get_train_op (loss_, train_vars, lr, lr_multiplier) 242 | # training_stages.append ((layer_info + ' lr={}'.format (lr), 243 | # loss_, 244 | # nmse_, 245 | # loss_val_, 246 | # nmse_val_, 247 | # op_, 248 | # tuple (train_vars), )) 249 | 250 | # # decay learning rates for trained variables 251 | # for var in train_vars: 252 | # lr_multiplier [var.op.name] *= decay_rate 253 | 254 | # return training_stages 255 | 256 | 257 | -------------------------------------------------------------------------------- /utils/models/LISTA_cpss.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LISTA_cpss.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | date : 2019-02-17 9 | 10 | Implementation of Learned ISTA with support selection and coupled weights. 11 | """ 12 | 13 | import numpy as np 14 | import tensorflow as tf 15 | import utils.train 16 | 17 | from utils.tf import shrink_ss 18 | from models.LISTA_base import LISTA_base 19 | 20 | 21 | class LISTA_cpss (LISTA_base): 22 | 23 | """ 24 | Implementation of deep neural network model. 25 | """ 26 | 27 | def __init__(self, A, T, lam, percent, max_percent, 28 | untied, coord, scope): 29 | """ 30 | :prob: : Instance of Problem class, describing problem settings. 31 | :T : Number of layers (depth) of this LISTA model. 32 | :lam : Initial value of thresholds of shrinkage functions. 33 | :untied : Whether weights are shared within layers. 34 | """ 35 | self._A = A.astype (np.float32) 36 | self._T = T 37 | self._p = percent 38 | self._maxp = max_percent 39 | self._lam = lam 40 | self._M = self._A.shape [0] 41 | self._N = self._A.shape [1] 42 | 43 | self._scale = 1.001 * np.linalg.norm (A, ord=2)**2 44 | self._theta = (self._lam / self._scale).astype(np.float32) 45 | if coord: 46 | self._theta = np.ones ((self._N, 1), dtype=np.float32) * self._theta 47 | 48 | self._ps = [(t+1) * self._p for t in range (self._T)] 49 | self._ps = np.clip (self._ps, 0.0, self._maxp) 50 | 51 | self._untied = untied 52 | self._coord = coord 53 | self._scope = scope 54 | 55 | """ Set up layers.""" 56 | self.setup_layers() 57 | 58 | 59 | def setup_layers(self): 60 | """ 61 | Implementation of LISTA model proposed by LeCun in 2010. 62 | 63 | :prob: Problem setting. 64 | :T: Number of layers in LISTA. 65 | :returns: 66 | :layers: List of tuples ( name, xh_, var_list ) 67 | :name: description of layers. 68 | :xh: estimation of sparse code at current layer. 69 | :var_list: list of variables to be trained seperately. 70 | 71 | """ 72 | Ws_ = [] 73 | thetas_ = [] 74 | 75 | W = (np.transpose (self._A) / self._scale).astype (np.float32) 76 | 77 | with tf.variable_scope (self._scope, reuse=False) as vs: 78 | # constant 79 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 80 | 81 | if not self._untied: # tied model 82 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 83 | initializer=W)) 84 | Ws_ = Ws_ * self._T 85 | 86 | for t in range (self._T): 87 | thetas_.append (tf.get_variable (name="theta_%d"%(t+1), 88 | dtype=tf.float32, 89 | initializer=self._theta)) 90 | if self._untied: # untied model 91 | Ws_.append (tf.get_variable (name="W_%d"%(t+1), 92 | dtype=tf.float32, 93 | initializer=W)) 94 | 95 | # Collection of all trainable variables in the model layer by layer. 96 | # We name it as `vars_in_layer` because we will use it in the manner: 97 | # vars_in_layer [t] 98 | self.vars_in_layer = list (zip (Ws_, thetas_)) 99 | 100 | 101 | def inference (self, y_, x0_=None): 102 | xhs_ = [] # collection of the regressed sparse codes 103 | 104 | if x0_ is None: 105 | batch_size = tf.shape (y_) [-1] 106 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 107 | else: 108 | xh_ = x0_ 109 | xhs_.append (xh_) 110 | 111 | with tf.variable_scope (self._scope, reuse=True) as vs: 112 | for t in range (self._T): 113 | W_, theta_ = self.vars_in_layer [t] 114 | percent = self._ps [t] 115 | 116 | res_ = y_ - tf.matmul (self._kA_, xh_) 117 | xh_ = shrink_ss (xh_ + tf.matmul (W_, res_), theta_, percent) 118 | xhs_.append (xh_) 119 | 120 | return xhs_ 121 | 122 | -------------------------------------------------------------------------------- /utils/models/LISTA_cpss_cs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LISTA_cpss_cs.py 6 | author: xhchrn 7 | email : chernxh@tamu.edu 8 | date : 2018-10-21 9 | 10 | Implementation of Learned ISTA with support selection and coupled weights for 11 | real world image compressive sensing experiments. 12 | """ 13 | 14 | import numpy as np 15 | import tensorflow as tf 16 | import utils.train 17 | 18 | from utils.tf import shrink_ss 19 | from models.LISTA_base import LISTA_base 20 | 21 | 22 | class LISTA_cpss_cs (LISTA_base): 23 | 24 | """ 25 | Implementation of deep neural network model. 26 | """ 27 | 28 | def __init__(self, Phi, D, T, lam, percent, max_percent, 29 | untied, coord, scope): 30 | """ 31 | :prob: : Instance of Problem class, describing problem settings. 32 | :T : Number of layers (depth) of this LISTA model. 33 | :lam : Initial value of thresholds of shrinkage functions. 34 | :untied : Whether weights are shared within layers. 35 | """ 36 | self._Phi = Phi.astype (np.float32) 37 | self._D = D.astype (np.float32) 38 | self._A = np.matmul (self._Phi, self._D) 39 | self._T = T 40 | self._p = percent 41 | self._maxp = max_percent 42 | self._lam = lam 43 | self._M = self._Phi.shape [0] 44 | self._F = self._Phi.shape [1] 45 | self._N = self._D.shape [1] 46 | 47 | self._scale = 1.001 * np.linalg.norm (self._A, ord=2)**2 48 | self._theta = (self._lam / self._scale).astype(np.float32) 49 | if coord: 50 | self._theta = np.ones ((self._N, 1), dtype=np.float32) * self._theta 51 | 52 | self._ps = [(t+1) * self._p for t in range (self._T)] 53 | self._ps = np.clip (self._ps, 0.0, self._maxp) 54 | 55 | self._untied = untied 56 | self._coord = coord 57 | self._scope = scope 58 | 59 | """ Set up layers.""" 60 | self.setup_layers() 61 | 62 | 63 | def setup_layers(self): 64 | """ 65 | Implementation of LISTA model proposed by LeCun in 2010. 66 | 67 | :prob: Problem setting. 68 | :T: Number of layers in LISTA. 69 | :returns: 70 | :layers: List of tuples ( name, xh_, var_list ) 71 | :name: description of layers. 72 | :xh: estimation of sparse code at current layer. 73 | :var_list: list of variables to be trained seperately. 74 | 75 | """ 76 | Ws_ = [] 77 | thetas_ = [] 78 | 79 | W = (np.transpose (self._A) / self._scale).astype (np.float32) 80 | 81 | with tf.variable_scope (self._scope, reuse=False) as vs: 82 | # constant 83 | self._kPhi_ = tf.constant (value=self._Phi, dtype=tf.float32) 84 | self._kD_ = tf.constant (value=self._D, dtype=tf.float32) 85 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 86 | self._vD_ = tf.get_variable (name='D', dtype=tf.float32, 87 | initializer=self._D) 88 | 89 | if not self._untied: # tied model 90 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 91 | initializer=W)) 92 | Ws_ = Ws_ * self._T 93 | 94 | for t in range (self._T): 95 | thetas_.append (tf.get_variable (name="theta_%d"%(t+1), 96 | dtype=tf.float32, 97 | initializer=self._theta)) 98 | if self._untied: # untied model 99 | Ws_.append (tf.get_variable (name="W_%d"%(t+1), 100 | dtype=tf.float32, 101 | initializer=W)) 102 | 103 | # Collection of all trainable variables in the model layer by layer. 104 | # We name it as `vars_in_layer` because we will use it in the manner: 105 | # vars_in_layer [t] 106 | # Note here the last element of `self.vars_in_layer` is 107 | # (W_, theta_, vD_) 108 | self.vars_in_layer = list (zip (Ws_ [:-1], thetas_ [:-1])) 109 | self.vars_in_layer.append ((Ws_ [-1], thetas_ [-1], self._vD_, )) 110 | 111 | 112 | def inference (self, y_, x0_=None): 113 | xhs_ = [] # collection of the regressed sparse codes 114 | fhs_ = [] # collection of the regressed signals 115 | 116 | if x0_ is None: 117 | batch_size = tf.shape (y_) [-1] 118 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 119 | else: 120 | xh_ = x0_ 121 | xhs_.append (xh_) 122 | fhs_.append (tf.matmul (self._kD_, xh_)) 123 | 124 | with tf.variable_scope (self._scope, reuse=True) as vs: 125 | for t in range (self._T): 126 | if t < self._T - 1: 127 | W_, theta_ = self.vars_in_layer [t] 128 | D_ = self._kD_ 129 | else: 130 | W_, theta_, D_ = self.vars_in_layer [t] 131 | percent = self._ps [t] 132 | 133 | res_ = y_ - tf.matmul (self._kA_, xh_) 134 | xh_ = shrink_ss (xh_ + tf.matmul (W_, res_), theta_, percent) 135 | xhs_.append (xh_) 136 | 137 | fhs_.append (tf.matmul (D_, xh_)) 138 | 139 | return xhs_, fhs_ 140 | 141 | -------------------------------------------------------------------------------- /utils/models/LISTA_cs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LISTA_cs.py 6 | author: xhchrn 7 | email : chernxh@tamu.edu 8 | date : 2018-10-21 9 | 10 | Implementation of the original Learned ISTA for real world image compressive 11 | sensing experiments. 12 | """ 13 | 14 | import numpy as np 15 | import tensorflow as tf 16 | import utils.train 17 | 18 | from utils.tf import shrink 19 | from models.LISTA_base import LISTA_base 20 | 21 | 22 | class LISTA_cs (LISTA_base): 23 | 24 | """ 25 | Implementation of deep neural network model. 26 | """ 27 | 28 | def __init__(self, Phi, D, T, lam, untied, coord, scope): 29 | """ 30 | :prob: : Instance of Problem class, describing problem settings. 31 | :T : Number of layers (depth) of this LISTA model. 32 | :lam : Initial value of thresholds of shrinkage functions. 33 | :untied : Whether weights are shared within layers. 34 | """ 35 | self._Phi = Phi.astype (np.float32) 36 | self._D = D.astype (np.float32) 37 | self._A = np.matmul (self._Phi, self._D) 38 | self._T = T 39 | self._lam = lam 40 | self._M = self._Phi.shape [0] 41 | self._F = self._Phi.shape [1] 42 | self._N = self._D.shape [1] 43 | 44 | self._scale = 1.001 * np.linalg.norm (self._A, ord=2)**2 45 | self._theta = (self._lam / self._scale).astype(np.float32) 46 | if coord: 47 | self._theta = np.ones ((self._N, 1), dtype=np.float32) * self._theta 48 | 49 | self._untied = untied 50 | self._coord = coord 51 | self._scope = scope 52 | 53 | """ Set up layers.""" 54 | self.setup_layers() 55 | 56 | 57 | def setup_layers(self): 58 | """ 59 | Implementation of LISTA model proposed by LeCun in 2010. 60 | 61 | :prob: Problem setting. 62 | :T: Number of layers in LISTA. 63 | :returns: 64 | :layers: List of tuples ( name, xh_, var_list ) 65 | :name: description of layers. 66 | :xh: estimation of sparse code at current layer. 67 | :var_list: list of variables to be trained seperately. 68 | 69 | """ 70 | Bs_ = [] 71 | Ws_ = [] 72 | thetas_ = [] 73 | 74 | B = (np.transpose (self._A) / self._scale).astype (np.float32) 75 | W = np.eye (self._N, dtype=np.float32) - np.matmul (B, self._A) 76 | 77 | with tf.variable_scope (self._scope, reuse=False) as vs: 78 | # constant 79 | self._kPhi_ = tf.constant (value=self._Phi, dtype=tf.float32) 80 | self._kD_ = tf.constant (value=self._D, dtype=tf.float32) 81 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 82 | 83 | # variables 84 | self._vD_ = tf.get_variable (name='D', dtype=tf.float32, 85 | initializer=self._D) 86 | Bs_.append (tf.get_variable (name='B', dtype=tf.float32, 87 | initializer=B)) 88 | Bs_ = Bs_ * self._T 89 | if not self._untied: # tied model 90 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 91 | initializer=W)) 92 | Ws_ = Ws_ * self._T 93 | 94 | for t in range (self._T): 95 | thetas_.append (tf.get_variable (name="theta_%d"%(t+1), 96 | dtype=tf.float32, 97 | initializer=self._theta)) 98 | if self._untied: # untied model 99 | Ws_.append (tf.get_variable (name="W_%d"%(t+1), 100 | dtype=tf.float32, 101 | initializer=W)) 102 | 103 | # Collection of all trainable variables in the model layer by layer. 104 | # We name it as `vars_in_layer` because we will use it in the manner: 105 | # vars_in_layer [t] 106 | # Note here the last element of `self.vars_in_layer` is 107 | # (W_, theta_, vD_) 108 | self.vars_in_layer = list (zip (Bs_ [:-1], Ws_ [:-1], thetas_ [:-1])) 109 | self.vars_in_layer.append ((Bs_ [-1], Ws_ [-1], thetas_ [-1], self._vD_, )) 110 | 111 | 112 | def inference (self, y_, x0_=None): 113 | xhs_ = [] # collection of the regressed sparse codes 114 | fhs_ = [] # collection of the regressed signals 115 | 116 | if x0_ is None: 117 | batch_size = tf.shape (y_) [-1] 118 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 119 | else: 120 | xh_ = x0_ 121 | xhs_.append (xh_) 122 | fhs_.append (tf.matmul (self._kD_, xh_)) 123 | 124 | with tf.variable_scope (self._scope, reuse=True) as vs: 125 | for t in range (self._T): 126 | if t < self._T - 1: 127 | B_, W_, theta_ = self.vars_in_layer [t] 128 | D_ = self._kD_ 129 | else: 130 | B_, W_, theta_, D_ = self.vars_in_layer [t] 131 | 132 | By_ = tf.matmul (B_, y_) 133 | xh_ = shrink (tf.matmul (W_, xh_) + By_, theta_) 134 | xhs_.append (xh_) 135 | 136 | fhs_.append (tf.matmul (D_, xh_)) 137 | 138 | return xhs_, fhs_ 139 | 140 | -------------------------------------------------------------------------------- /utils/models/LISTA_ss.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LISTA_ss.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | last_modified : 2018-10-21 9 | 10 | Implementation of Learned ISTA with support selection technique. 11 | """ 12 | 13 | import numpy as np 14 | import tensorflow as tf 15 | import utils.train 16 | 17 | from utils.tf import shrink_ss 18 | from models.LISTA_base import LISTA_base 19 | 20 | 21 | class LISTA_ss (LISTA_base): 22 | 23 | """ 24 | Implementation of deep neural network model. 25 | """ 26 | 27 | def __init__(self, A, T, lam, percent, max_percent, 28 | untied, coord, scope): 29 | """ 30 | :prob: : Instance of Problem class, describing problem settings. 31 | :T : Number of layers (depth) of this LISTA model. 32 | :lam : Initial value of thresholds of shrinkage functions. 33 | :untied : Whether weights are shared within layers. 34 | """ 35 | self._A = A.astype (np.float32) 36 | self._T = T 37 | self._p = percent 38 | self._maxp = max_percent 39 | self._lam = lam 40 | self._M = self._A.shape [0] 41 | self._N = self._A.shape [1] 42 | 43 | self._scale = 1.001 * np.linalg.norm (A, ord=2)**2 44 | self._theta = (self._lam / self._scale).astype(np.float32) 45 | if coord: 46 | self._theta = np.ones ((self._N, 1), dtype=np.float32) * self._theta 47 | 48 | self._ps = [(t+1) * self._p for t in range (self._T)] 49 | self._ps = np.clip (self._ps, 0.0, self._maxp) 50 | 51 | self._untied = untied 52 | self._coord = coord 53 | self._scope = scope 54 | 55 | """ Set up layers.""" 56 | self.setup_layers() 57 | 58 | 59 | def setup_layers(self): 60 | """ 61 | Implementation of LISTA model proposed by LeCun in 2010. 62 | 63 | :prob: Problem setting. 64 | :T: Number of layers in LISTA. 65 | :returns: 66 | :layers: List of tuples ( name, xh_, var_list ) 67 | :name: description of layers. 68 | :xh: estimation of sparse code at current layer. 69 | :var_list: list of variables to be trained seperately. 70 | 71 | """ 72 | Bs_ = [] 73 | Ws_ = [] 74 | thetas_ = [] 75 | 76 | B = (np.transpose (self._A) / self._scale).astype (np.float32) 77 | W = np.eye (self._N, dtype=np.float32) - np.matmul (B, self._A) 78 | 79 | with tf.variable_scope (self._scope, reuse=False) as vs: 80 | # constant 81 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 82 | 83 | Bs_.append (tf.get_variable (name='B', dtype=tf.float32, 84 | initializer=B)) 85 | Bs_ = Bs_ * self._T 86 | if not self._untied: # tied model 87 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 88 | initializer=W)) 89 | Ws_ = Ws_ * self._T 90 | 91 | for t in range (self._T): 92 | thetas_.append (tf.get_variable (name="theta_%d"%(t+1), 93 | dtype=tf.float32, 94 | initializer=self._theta)) 95 | if self._untied: # untied model 96 | Ws_.append (tf.get_variable (name="W_%d"%(t+1), 97 | dtype=tf.float32, 98 | initializer=W)) 99 | 100 | # Collection of all trainable variables in the model layer by layer. 101 | # We name it as `vars_in_layer` because we will use it in the manner: 102 | # vars_in_layer [t] 103 | self.vars_in_layer = list (zip (Bs_, Ws_, thetas_)) 104 | 105 | 106 | def inference (self, y_, x0_=None): 107 | xhs_ = [] # collection of the regressed sparse codes 108 | 109 | if x0_ is None: 110 | batch_size = tf.shape (y_) [-1] 111 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 112 | else: 113 | xh_ = x0_ 114 | xhs_.append (xh_) 115 | 116 | with tf.variable_scope (self._scope, reuse=True) as vs: 117 | for t in range (self._T): 118 | B_, W_, theta_ = self.vars_in_layer [t] 119 | percent = self._ps [t] 120 | 121 | By_ = tf.matmul (B_, y_) 122 | xh_ = shrink_ss (tf.matmul (W_, xh_) + By_, theta_, percent) 123 | xhs_.append (xh_) 124 | 125 | return xhs_ 126 | 127 | -------------------------------------------------------------------------------- /utils/models/LISTA_ss_cs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : LISTA_ss_cs.py 6 | author: xhchrn 7 | email : chernxh@tamu.edu 8 | date : 2018-10-25 9 | 10 | Implementation of Learned ISTA with only support selection real world image 11 | compressive sensing experiments. 12 | """ 13 | 14 | import numpy as np 15 | import tensorflow as tf 16 | import utils.train 17 | 18 | from utils.tf import shrink_ss 19 | from models.LISTA_base import LISTA_base 20 | 21 | 22 | class LISTA_ss_cs (LISTA_base): 23 | 24 | """ 25 | Implementation of deep neural network model. 26 | """ 27 | 28 | def __init__(self, Phi, D, T, lam, percent, max_percent, 29 | untied, coord, scope): 30 | """ 31 | :prob: : Instance of Problem class, describing problem settings. 32 | :T : Number of layers (depth) of this LISTA model. 33 | :lam : Initial value of thresholds of shrinkage functions. 34 | :untied : Whether weights are shared within layers. 35 | """ 36 | self._Phi = Phi.astype (np.float32) 37 | self._D = D.astype (np.float32) 38 | self._A = np.matmul (self._Phi, self._D) 39 | self._T = T 40 | self._p = percent 41 | self._maxp = max_percent 42 | self._lam = lam 43 | self._M = self._Phi.shape [0] 44 | self._F = self._Phi.shape [1] 45 | self._N = self._D.shape [1] 46 | 47 | self._scale = 1.001 * np.linalg.norm (self._A, ord=2)**2 48 | self._theta = (self._lam / self._scale).astype(np.float32) 49 | if coord: 50 | self._theta = np.ones ((self._N, 1), dtype=np.float32) * self._theta 51 | 52 | self._ps = [(t+1) * self._p for t in range (self._T)] 53 | self._ps = np.clip (self._ps, 0.0, self._maxp) 54 | 55 | self._untied = untied 56 | self._coord = coord 57 | self._scope = scope 58 | 59 | """ Set up layers.""" 60 | self.setup_layers() 61 | 62 | 63 | def setup_layers(self): 64 | """ 65 | Implementation of LISTA model proposed by LeCun in 2010. 66 | 67 | :prob: Problem setting. 68 | :T: Number of layers in LISTA. 69 | :returns: 70 | :layers: List of tuples ( name, xh_, var_list ) 71 | :name: description of layers. 72 | :xh: estimation of sparse code at current layer. 73 | :var_list: list of variables to be trained seperately. 74 | 75 | """ 76 | Bs_ = [] 77 | Ws_ = [] 78 | thetas_ = [] 79 | 80 | B = (np.transpose (self._A) / self._scale).astype (np.float32) 81 | W = np.eye (self._N, dtype=np.float32) - np.matmul (B, self._A) 82 | 83 | with tf.variable_scope (self._scope, reuse=False) as vs: 84 | # constant 85 | self._kPhi_ = tf.constant (value=self._Phi, dtype=tf.float32) 86 | self._kD_ = tf.constant (value=self._D, dtype=tf.float32) 87 | self._kA_ = tf.constant (value=self._A, dtype=tf.float32) 88 | 89 | # variables 90 | self._vD_ = tf.get_variable (name='D', dtype=tf.float32, 91 | initializer=self._D) 92 | Bs_.append (tf.get_variable (name='B', dtype=tf.float32, 93 | initializer=B)) 94 | Bs_ = Bs_ * self._T 95 | if not self._untied: # tied model 96 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 97 | initializer=W)) 98 | Ws_ = Ws_ * self._T 99 | 100 | for t in range (self._T): 101 | thetas_.append (tf.get_variable (name="theta_%d"%(t+1), 102 | dtype=tf.float32, 103 | initializer=self._theta)) 104 | if self._untied: # untied model 105 | Ws_.append (tf.get_variable (name="W_%d"%(t+1), 106 | dtype=tf.float32, 107 | initializer=W)) 108 | 109 | # Collection of all trainable variables in the model layer by layer. 110 | # We name it as `vars_in_layer` because we will use it in the manner: 111 | # vars_in_layer [t] 112 | # Note here the last element of `self.vars_in_layer` is 113 | # (W_, theta_, vD_) 114 | self.vars_in_layer = list (zip (Bs_ [:-1], Ws_ [:-1], thetas_ [:-1])) 115 | self.vars_in_layer.append ((Bs_ [-1], Ws_ [-1], thetas_ [-1], self._vD_, )) 116 | 117 | 118 | def inference (self, y_, x0_=None): 119 | xhs_ = [] # collection of the regressed sparse codes 120 | fhs_ = [] # collection of the regressed signals 121 | 122 | if x0_ is None: 123 | batch_size = tf.shape (y_) [-1] 124 | xh_ = tf.zeros (shape=(self._N, batch_size), dtype=tf.float32) 125 | else: 126 | xh_ = x0_ 127 | xhs_.append (xh_) 128 | fhs_.append (tf.matmul (self._kD_, xh_)) 129 | 130 | with tf.variable_scope (self._scope, reuse=True) as vs: 131 | for t in range (self._T): 132 | if t < self._T - 1: 133 | B_, W_, theta_ = self.vars_in_layer [t] 134 | D_ = self._kD_ 135 | else: 136 | B_, W_, theta_, D_ = self.vars_in_layer [t] 137 | percent = self._ps [t] 138 | 139 | By_ = tf.matmul (B_, y_) 140 | xh_ = shrink_ss (tf.matmul (W_, xh_) + By_, theta_, percent) 141 | xhs_.append (xh_) 142 | 143 | fhs_.append (tf.matmul (D_, xh_)) 144 | 145 | return xhs_, fhs_ 146 | 147 | -------------------------------------------------------------------------------- /utils/models/TiLISTA.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : TiLISTA.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | date : 2019-02-17 9 | 10 | Implementation of TiLISTA --- LISTA with tied weight. 11 | """ 12 | 13 | import numpy as np 14 | import tensorflow as tf 15 | import utils.train 16 | 17 | from utils.tf import shrink_ss 18 | from models.LISTA_base import LISTA_base 19 | 20 | 21 | class TiLISTA(LISTA_base): 22 | 23 | """ 24 | Implementation of deep neural network model. 25 | """ 26 | 27 | def __init__(self, A, T, lam, percent, max_percent, coord, scope): 28 | """ 29 | :prob: : Instance of Problem class, describing problem settings. 30 | :T : Number of layers (depth) of this LISTA model. 31 | :lam : Initial value of thresholds of shrinkage functions. 32 | :untied : Whether weights are shared within layers. 33 | """ 34 | self._A = A.astype(np.float32) 35 | self._T = T 36 | self._p = percent 37 | self._maxp = max_percent 38 | self._lam = lam 39 | self._M = self._A.shape[0] 40 | self._N = self._A.shape[1] 41 | 42 | self._scale = 1.001 * np.linalg.norm(A, ord=2)**2 43 | self._theta = (self._lam / self._scale).astype(np.float32) 44 | if coord: 45 | self._theta = np.ones((self._N, 1), dtype=np.float32) * self._theta 46 | 47 | self._ps = [(t+1) * self._p for t in range(self._T)] 48 | self._ps = np.clip(self._ps, 0.0, self._maxp) 49 | 50 | self._coord = coord 51 | self._scope = scope 52 | 53 | """ Set up layers.""" 54 | self.setup_layers() 55 | 56 | 57 | def setup_layers(self): 58 | """ Set up layers of ALISTA. 59 | """ 60 | Ws_ = [] # weight 61 | alphas_ = [] # step sizes 62 | thetas_ = [] # thresholds 63 | 64 | W = (np.transpose (self._A) / self._scale).astype (np.float32) 65 | 66 | with tf.variable_scope(self._scope, reuse=False) as vs: 67 | # constant 68 | self._kA_ = tf.constant(value=self._A, dtype=tf.float32) 69 | # tied weight in TiLISTA 70 | Ws_.append (tf.get_variable (name='W', dtype=tf.float32, 71 | initializer=W)) 72 | Ws_ = Ws_ * self._T 73 | 74 | for t in range(self._T): 75 | alphas_.append(tf.get_variable(name="alpha_%d"%(t+1), 76 | dtype=tf.float32, 77 | initializer=1.0)) 78 | thetas_.append(tf.get_variable(name="theta_%d"%(t+1), 79 | dtype=tf.float32, 80 | initializer=self._theta)) 81 | 82 | # Collection of all trainable variables in the model layer by layer. 83 | # We name it as `vars_in_layer` because we will use it in the manner: 84 | # vars_in_layer [t] 85 | self.vars_in_layer = list(zip(Ws_, alphas_, thetas_)) 86 | 87 | 88 | def inference(self, y_, x0_=None): 89 | xhs_ = [] # collection of the regressed sparse codes 90 | 91 | if x0_ is None: 92 | batch_size = tf.shape(y_)[-1] 93 | xh_ = tf.zeros(shape=(self._N, batch_size), dtype=tf.float32) 94 | else: 95 | xh_ = x0_ 96 | xhs_.append(xh_) 97 | 98 | with tf.variable_scope(self._scope, reuse=True) as vs: 99 | for t in range(self._T): 100 | W_, alpha_, theta_ = self.vars_in_layer[t] 101 | percent = self._ps[t] 102 | 103 | res_ = y_ - tf.matmul(self._kA_, xh_) 104 | zh_ = xh_ + alpha_ * tf.matmul(W_, res_) 105 | xh_ = shrink_ss(zh_, theta_, percent) 106 | xhs_.append(xh_) 107 | 108 | return xhs_ 109 | 110 | 111 | -------------------------------------------------------------------------------- /utils/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VITA-Group/ALISTA/23f3637f01066a919d37af76fd60ee03d33188b9/utils/models/__init__.py -------------------------------------------------------------------------------- /utils/prob.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding:utf-8 -*- 3 | 4 | """ 5 | file : prob.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | date : 2019-02-23 9 | 10 | Define problem class that is used experiments. 11 | """ 12 | 13 | import os 14 | import argparse 15 | import numpy as np 16 | import numpy.linalg as la 17 | from scipy.io import savemat, loadmat 18 | 19 | def str2bool(v): 20 | return v.lower() in ('true', '1') 21 | 22 | class Problem(object): 23 | 24 | """ 25 | Problem class is a abstraction of the problem we want to solve: recover 26 | sparse code x in R^n space from undersampled measurement y = Ax in R^m 27 | space, where A is a m times n measurement matrix. 28 | 29 | In every problem, we define: 30 | :A : numpy array, measurement matrix 31 | :M, N : integers, # of rows and cols of matrix A 32 | :yval, xval: numpy arrays, a set of validation data 33 | :L : integer, size of validation data 34 | :pnz : hyperparameter about how many non-zero entris in sparse code x 35 | :SNR : noise level in measurements 36 | """ 37 | 38 | def __init__(self): 39 | pass 40 | 41 | def build_prob(self, A, L=1000, pnz=0.1, SNR=40.0, con_num=0.0, 42 | col_normalized=True): 43 | self.A = A 44 | # self.A_ = tf.constant( A, dtype=tf.float32, name='A' ) 45 | self.M, self.N = A.shape 46 | self.L = L 47 | 48 | self.con_num = con_num 49 | self.pnz = pnz 50 | self.SNR = SNR 51 | 52 | self.yval, self.xval \ 53 | = self.gen_samples( self.L ) 54 | 55 | if con_num > 0: 56 | U, _, V = la.svd (A, full_matrices=False) 57 | s = np.logspace (0, np.log10 (1 / con_num), self.M) 58 | A = np.dot (U * (s * np.sqrt(self.N) / la.norm(s)), V).astype (np.float32) 59 | if col_normalized: 60 | A = A / np.sqrt(np.sum(np.square(A), axis=0, keepdims=True)) 61 | # self.x_ = tf.placeholder( tf.float32, (self.N, None), name='x' ) 62 | # self.y_ = tf.placeholder( tf.float32, (self.M, None), name='y' ) 63 | 64 | def measure (self, x, SNR=None): 65 | """ 66 | Measure sparse code x with matrix A and return the measurement. 67 | TODO: 68 | Only consider noiseless setting now. 69 | """ 70 | if SNR is None: 71 | SNR = self.SNR 72 | y = np.matmul (self.A, x) 73 | std = np.std (y, axis=0) * np.power (10.0, -SNR/20.0) 74 | ## The following line is for the compatibility for older versions of 75 | ## `Numpy` pacakge where the `scale` parameter in `np.randon.normal` 76 | ## is not allowed to be zero. 77 | std = np.maximum (std, 10e-50) 78 | noise = np.random.normal (size=y.shape , scale=std).astype (np.float32) 79 | 80 | return y + noise 81 | 82 | def gen_samples(self, size, pnz=None, SNR=None, probability=None): 83 | """ 84 | Generate samples (y, x) in current problem setting. 85 | TODO: 86 | - About how to generate sparse code x, need to refer to Wotao' paper 87 | about the strength of signal x. 88 | Here I just use standard Gaussian. 89 | """ 90 | if pnz is None: 91 | pnz = self.pnz 92 | 93 | if SNR is None: 94 | SNR = self.SNR 95 | 96 | if probability is None: 97 | probability = pnz 98 | else: 99 | assert len (probability) == self.N 100 | assert np.abs (np.sum (probability) - self.N * pnz) < 1 101 | 102 | bernoulli = np.random.uniform (size=(self.N, size)) <= probability 103 | bernoulli = bernoulli.astype (np.float32) 104 | x = bernoulli * np.random.normal (size=(self.N, size)).\ 105 | astype(np.float32) 106 | 107 | y = self.measure (x, SNR) 108 | return y, x 109 | 110 | def save(self, path, ftype='npz'): 111 | """Save current problem settings to npz file or mat file.""" 112 | D = dict(A=self.A, 113 | M=self.M, 114 | N=self.N, 115 | L=self.L, 116 | pnz=self.pnz, 117 | SNR=self.SNR, 118 | con_num=self.con_num, 119 | y=self.yval, 120 | x=self.xval) 121 | 122 | if path[-4:] != '.' + ftype: 123 | path = path + '.' + ftype 124 | 125 | if ftype == 'npz': 126 | np.savez( path, **D ) 127 | elif ftype == 'mat': 128 | savemat( path, D, oned_as='column' ) 129 | else: 130 | raise ValueError( 'invalid file type {}'.format( ftype ) ) 131 | 132 | print("problem saved to {}".format(path)) 133 | 134 | 135 | def read(self, fname): 136 | """ 137 | Read saved problem from file. 138 | """ 139 | if not os.path.exists( fname ): 140 | raise ValueError('saved problem file {} not found'.format( fname )) 141 | if fname[-4:] == '.npz': 142 | # read from saved npz file 143 | D = np.load( fname ) 144 | elif fname[-4:] == '.mat': 145 | # read from saved mat file 146 | D = loadmat( fname ) 147 | else: 148 | raise ValueError('invalid file type; npz or mat file required') 149 | 150 | if not 'A' in D.keys(): 151 | raise ValueError('invalid input file; matrix A missing') 152 | 153 | for k, v in D.items(): 154 | if k == 'y': 155 | setattr( self, 'yval' ,v ) 156 | elif k == 'x': 157 | setattr( self, 'xval', v ) 158 | else: 159 | setattr( self, k, v ) 160 | 161 | self.M, self.N = self.A.shape 162 | _ , self.L = self.xval.shape 163 | 164 | # # tensorflow part 165 | # self.x_ = tf.placeholder( tf.float32, (self.N, None), name='x' ) 166 | # self.y_ = tf.placeholder( tf.float32, (self.M, None), name='y' ) 167 | # self.A_ = tf.constant( self.A, dtype=tf.float32, name='A' ) 168 | 169 | print( "problem {} successfully loaded".format( fname ) ) 170 | 171 | 172 | def random_A(M, N, con_num=0, col_normalized=True): 173 | """ 174 | Randomly sample measurement matrix A. 175 | Curruently I sample A from i.i.d Gaussian distribution with 1./M variance and 176 | normalize columns. 177 | TODO: check assumptions on measurement matrix A referring to Wotao Yin's Bregman 178 | ISS paper. 179 | 180 | :M: integer, dimension of measurement y 181 | :N: integer, dimension of sparse code x 182 | :col_normalized: 183 | boolean, indicating whether normalize columns, default to True 184 | :returns: 185 | A: numpy array of shape (M, N) 186 | 187 | """ 188 | A = np.random.normal( scale=1.0/np.sqrt(M), size=(M,N) ).astype(np.float32) 189 | return A 190 | 191 | def setup_problem (A, L, pnz, SNR, con_num, col_normalized): 192 | prob = Problem() 193 | prob.build_prob(A, L, pnz, SNR, con_num, col_normalized) 194 | return prob 195 | 196 | def load_problem( fname ): 197 | prob = Problem() 198 | prob.read (fname) 199 | return prob 200 | 201 | parser = argparse.ArgumentParser() 202 | parser.add_argument( 203 | '-M', '--M', type=int, default=250, 204 | help="Dimension of measurements.") 205 | parser.add_argument( 206 | '-N', '--N', type=int, default=500, 207 | help="Dimension of sparse codes.") 208 | parser.add_argument( 209 | '-L', '--L', type=int, default=0, 210 | help="Number of samples for validation (deprecated. please use default).") 211 | parser.add_argument( 212 | '-P', '--pnz', type=float, default=0.1, 213 | help="Percent of nonzero entries in sparse codes.") 214 | parser.add_argument( 215 | '-S', '--SNR', type=str, default='inf', 216 | help="Strength of noises in measurements.") 217 | parser.add_argument( 218 | '-C', '--con_num', type=float, default=0.0, 219 | help="Condition number of measurement matrix. 0 for no modification on condition number.") 220 | parser.add_argument( 221 | '-CN', '--col_normalized', type=str2bool, default=True, 222 | help="Flag of whether normalize the columns of the dictionary or sensing matrix.") 223 | parser.add_argument( 224 | "-lA", "--load_A", type=str, default=None, 225 | help="Path to the measurement matrix to be loaded.") 226 | parser.add_argument( 227 | '-ef', '--exp_folder', type=str, default='./experiments', 228 | help="Root folder for problems and momdels.") 229 | parser.add_argument( 230 | "-pfn", "--prob_file", type=str, default="prob.npz", 231 | help="The (base) file name of problem file.") 232 | 233 | if __name__ == "__main__": 234 | config, unparsed = parser.parse_known_args() 235 | if not config.load_A is None: 236 | try: 237 | A = np.load(config.load_A) 238 | print("matrix loaded from {}. will be used to generate the problem" 239 | .format(config.load_A)) 240 | except Exception as e: 241 | raise ValueError("invalid file {}".format(config.load_A)) 242 | config.M, config.N = A.shape 243 | else: 244 | A = np.random.normal(scale=1.0/np.sqrt(config.M), 245 | size=(config.M, config.N)).astype(np.float32) 246 | prob_desc = ('m{}_n{}_k{}_p{}_s{}'.format( 247 | config.M, config.N, config.con_num, config.pnz, config.SNR)) 248 | prob_folder = os.path.join(config.exp_folder, prob_desc) 249 | if not os.path.exists(prob_folder): 250 | os.makedirs(prob_folder) 251 | out_file = os.path.join(config.exp_folder, prob_desc, config.prob_file) 252 | if os.path.exists(out_file): 253 | raise ValueError("specified problem file {} already exists".format(out_file)) 254 | if config.SNR == "inf": 255 | SNR = np.inf 256 | else: 257 | try: 258 | SNR = float(config.SNR) 259 | except Exception as e: 260 | raise ValueError("invalid SNR. use 'inf' or a float number.") 261 | p = setup_problem(A, config.L, config.pnz, SNR, config.con_num, 262 | config.col_normalized) 263 | p.save(out_file, ftype="npz") 264 | 265 | -------------------------------------------------------------------------------- /utils/prob_conv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding:utf-8 -*- 3 | 4 | """ 5 | file : prob_conv.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | data : 2019-02-23 9 | """ 10 | 11 | import os 12 | import argparse 13 | import numpy as np 14 | import numpy.linalg as la 15 | from scipy.io import savemat, loadmat 16 | from scipy.signal import convolve2d 17 | 18 | def str2bool(v): 19 | return v.lower() in ('true', '1') 20 | 21 | class ProblemConv(object): 22 | 23 | """ 24 | Define the model of convolutional sparse coding. 25 | 26 | In every problem, we define: 27 | :fs : Numpy array of size (fh, fw, fn). Convolutional dictionary. 28 | :fh : Integer. Height of filters. 29 | :fw : Integer. Width of filters. 30 | :fn : Integer. Number of filters. 31 | :pnz : Float. Percentage of entries in feature maps that are not zeros. 32 | :SNR : Float. Noise level in measurements. 33 | """ 34 | 35 | def __init__(self): 36 | pass 37 | 38 | def build_prob(self, fs , pnz=0.1, SNR=40.0): 39 | self._fs = fs 40 | self._ft = np.rot90 (self._fs, axes=(0,1), k=2) 41 | self._fh = self._fs.shape [0] 42 | self._fw = self._fs.shape [1] 43 | self._fn = self._fs.shape [2] 44 | self._pnz = pnz 45 | self._SNR = SNR 46 | 47 | # TODO: how to calculate noise level? SNR -> noise variace? 48 | self._noise_var = 1e-50 49 | # self.noise_var = pnz * self.N / self.M * np.power (10.0 , -SNR/10.0 ) 50 | 51 | # self.yval, self.xval = self.gen_samples( self.L ) 52 | 53 | def measure (self, features, noise_var=None): 54 | """ 55 | Convolve feature map using self._fs. 56 | 57 | :features : Numpy array of size (bs, h + fh - 1, w + fw - 1, fn) 58 | :noise_var: Float. The variance of the noise. 59 | 60 | Only consider noiseless setting now. 61 | """ 62 | if noise_var == None: 63 | noise_var = self._noise_var 64 | 65 | bs, fmh, fmw, fmn = features.shape 66 | assert fmn == self._fn # the number of feature maps should be same as 67 | # the number of filters 68 | ih = fmh - self._fh + 1 # image height 69 | iw = fmw - self._fw + 1 # image width 70 | 71 | # do convolution 72 | convs = [] 73 | for feature in features: 74 | conv = np.zeros (shape=(ih, iw), dtype=np.float32) 75 | for i in range (self._fn): 76 | # NOTE: Here we use the transpose of the filters to convolve 77 | # because numpy will first transpose the filters before 78 | # convolution. 79 | conv += convolve2d (feature [:,:,i], 80 | self._ft [:,:,i], 81 | mode='valid') 82 | convs.append (conv) 83 | convs = np.asarray (convs, dtype=np.float32) 84 | 85 | # add noises 86 | noise = np.random.normal (size=convs.shape, scale=np.sqrt(noise_var)).\ 87 | astype (np.float32) 88 | 89 | return convs + noise 90 | 91 | 92 | def gen_samples(self, bs, ih, iw, pnz=None, SNR=None, probability=None): 93 | """TODO: Docstring for gen_samples. 94 | Generate samples (y, x) in current problem setting. 95 | 96 | :bs: Integer. Batch size, the number of images to be generated. 97 | :ih: Integer. Height of the generated images. 98 | :iw: Integer. Width of the generated images. 99 | :pnz: Float. Percentage in decimal of the sparsity of feature maps. 100 | :SNR: Float. Signal-to-Noise ratio in this measurement. 101 | :probability: Float or numpy.ndarray. Probability map of the probability 102 | that entries in the feature maps are none-zero. 103 | :returns: TODO 104 | 105 | """ 106 | if pnz is None: 107 | pnz = self._pnz 108 | 109 | if SNR is None: 110 | noise_var = self._noise_var 111 | # TODO: exetnd to noisy case 112 | # else: 113 | # noise_var = (self.pnz * self.N / self.M * 114 | # np.power (10.0 , -SNR/10.0 )) 115 | noise_var = np.max ([noise_var , 1e-50]) 116 | 117 | fmh, fmw, fmn = ih + self._fh - 1, iw + self._fw - 1, self._fn 118 | 119 | if probability is None: 120 | probability = pnz 121 | else: 122 | assert probability.shape == (fmh, fmw, fmn) 123 | assert np.abs (np.sum (probability) - fmh * fmw * fmn * pnz) < 1 124 | 125 | bernoulli = np.random.uniform (size=(bs, fmh, fmw, fmn)) <= probability 126 | bernoulli = bernoulli.astype (np.float32) 127 | features = (bernoulli * (np.random.normal (size=(bs, fmh, fmw, fmn)) 128 | .astype (np.float32))) 129 | 130 | images = self.measure (features , noise_var) 131 | return images, features 132 | 133 | def save(self, path, ftype='npz'): 134 | """ 135 | Save current problem settings to npz file or mat file. 136 | """ 137 | D = dict(fs=self._fs, 138 | ft=self._ft, 139 | fh=self._fh, 140 | fw=self._fw, 141 | fn=self._fn, 142 | pnz=self._pnz, 143 | SNR=self._SNR, 144 | noise_var=self._noise_var) 145 | if path[-4:] != '.' + ftype: 146 | path = path + '.' + ftype 147 | 148 | if ftype == 'npz': 149 | np.savez( path, **D ) 150 | elif ftype == 'mat': 151 | savemat( path, D, oned_as='column' ) 152 | else: 153 | raise ValueError ('invalid file type {}'.format (ftype)) 154 | 155 | 156 | def read (self, fname): 157 | """ 158 | Read saved problem from a npz/mat file. 159 | """ 160 | if not os.path.exists( fname ): 161 | raise ValueError('saved problem file {} not found'.format( fname )) 162 | if fname[-4:] == '.npz': 163 | # read from saved npz file 164 | D = np.load (fname) 165 | elif fname[-4:] == '.mat': 166 | # read from saved mat file 167 | D = loadmat (fname) 168 | else: 169 | raise ValueError('invalid file type; npz or mat file required') 170 | 171 | if not 'fs' in D.keys(): 172 | raise ValueError('invalid input file; filters fs missing') 173 | 174 | for k, v in D.items(): 175 | setattr (self, "_"+k, v) 176 | 177 | print( "problem {} successfully loaded".format( fname ) ) 178 | 179 | 180 | def random_fs (shape): 181 | """ 182 | Randomly sample filters from i.i.d. Gaussian and then normalize each filter. 183 | 184 | :shape: Tuple of integers. Assume it has the form of (fh, fw, fn). 185 | :returns: 186 | fs: numpy.ndarray of shape (fh, fw, fn). 187 | 188 | """ 189 | if len (shape) != 3: 190 | raise ValueError ("The shape of filters should be of the form of" 191 | "(height, width, channels).") 192 | 193 | fs = np.random.normal (size=shape).astype(np.float32) 194 | 195 | # normalization 196 | norms = np.sqrt (np.sum (np.square (fs), axis=(0,1))) 197 | return fs / norms 198 | 199 | 200 | def setup_problem(fs, pnz, SNR): 201 | # create and build problem for conv sparse coding 202 | prob = ProblemConv () 203 | prob.build_prob (fs, pnz, SNR) 204 | 205 | return prob 206 | 207 | 208 | def load_problem (fname): 209 | prob = ProblemConv() 210 | prob.read (fname) 211 | return prob 212 | 213 | parser = argparse.ArgumentParser() 214 | parser.add_argument( 215 | '-cd', '--conv_d', type=int, default=3, 216 | help="The size of kernels in a convolutional dictionary.") 217 | parser.add_argument( 218 | '-cm', '--conv_m', type=int, default=100, 219 | help="The number of kernels in a convolutional dictionary.") 220 | parser.add_argument( 221 | '-clam', '--conv_lam', type=float, default=0.05, 222 | help="The weight in the objective function used to learn the convolutional dictioanry.") 223 | parser.add_argument( 224 | '-L', '--L', type=int, default=0, 225 | help="Number of samples for validation (deprecated. please use default).") 226 | parser.add_argument( 227 | '-P', '--pnz', type=float, default=0.001, 228 | help="Percent of nonzero entries in sparse codes.") 229 | parser.add_argument( 230 | '-S', '--SNR', type=str, default='inf', 231 | help="Strength of noises in measurements.") 232 | parser.add_argument( 233 | '-C', '--con_num', type=float, default=0.0, 234 | help="Condition number of measurement matrix. 0 for no modification on condition number.") 235 | parser.add_argument( 236 | '-CN', '--col_normalized', type=str2bool, default=True, 237 | help="Flag of whether normalize the columns of the dictionary or sensing matrix.") 238 | parser.add_argument( 239 | "-ld", "--load_dict", type=str, default=None, 240 | help="Path to the convolutional dictionary to be loaded.") 241 | parser.add_argument( 242 | '-ef', '--exp_folder', type=str, default='./experiments', 243 | help="Root folder for problems and momdels.") 244 | parser.add_argument( 245 | "-pfn", "--prob_file", type=str, default="prob.npz", 246 | help="The (base) file name of problem file.") 247 | 248 | if __name__ == "__main__": 249 | config, unparsed = parser.parse_known_args() 250 | if not config.load_dict is None: 251 | if config.load_dict.endswith(".npy"): 252 | D = np.load(config.load_dict) 253 | print("matrix loaded from {}. will be used to generate the problem" 254 | .format(config.load_dict)) 255 | elif config.load_dict.endswith(".mat"): 256 | import scipy.io as sio 257 | D = sio.loadmat(config.load_dict)['D'] 258 | # NOTE: here we rotate the filters in `D` with 180 degrees because 259 | # the conv2d in TensorFlow is actually the correlation 260 | # operation in the convolutional dictionary learning 261 | # terminology. 262 | D = np.rot90(D, axes=(0,1), k=2) 263 | else: 264 | raise ValueError("invalid file {}".format(config.load_dict)) 265 | config.conv_d, config.conv_m = D.shape[0], D.shape[2] 266 | else: 267 | shape = (config.conv_d, config.conv_d, config.conv_m) 268 | config.conv_lam = 0.0 269 | D = random_fs(shape) 270 | prob_desc = ('denoise_d{}_m{}_lam{}'.format(config.conv_d, config.conv_m, 271 | config.conv_lam)) 272 | prob_folder = os.path.join(config.exp_folder, prob_desc) 273 | if not os.path.exists(prob_folder): 274 | os.makedirs(prob_folder) 275 | out_file = os.path.join(config.exp_folder, prob_desc, config.prob_file) 276 | if os.path.exists(out_file): 277 | raise ValueError("specified problem file {} already exists".format(out_file)) 278 | if config.SNR == "inf": 279 | SNR = np.inf 280 | else: 281 | try: 282 | SNR = float(config.SNR) 283 | except Exception as e: 284 | raise ValueError("invalid SNR. use 'inf' or a float number.") 285 | p = setup_problem(D, config.pnz, SNR) 286 | p.save(out_file, ftype="npz") 287 | print("problem saved to {}".format(out_file)) 288 | 289 | -------------------------------------------------------------------------------- /utils/tf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | file : utils/tf.py 6 | author: Xiaohan Chen 7 | email : chernxh@tamu.edu 8 | date : 2019-02-18 9 | 10 | Utility functions implemented in TensorFlow, including: 11 | - miscellaneous 12 | - shrinkage functions 13 | - circular padding 14 | - activations 15 | - subgradient functions 16 | - related functions 17 | """ 18 | 19 | import tensorflow as tf 20 | 21 | 22 | ############################################################ 23 | ####################### Misc ########################### 24 | ############################################################ 25 | def is_tensor(x): 26 | return isinstance(x, (tf.Tensor, tf.SparseTensor, tf.Variable)) 27 | 28 | 29 | ############################################################ 30 | #################### Shrinkage ######################### 31 | ############################################################ 32 | def shrink(input_, theta_): 33 | """ 34 | Soft thresholding function with input input_ and threshold theta_. 35 | """ 36 | theta_ = tf.maximum( theta_, 0.0 ) 37 | return tf.sign(input_) * tf.maximum( tf.abs(input_) - theta_, 0.0 ) 38 | 39 | 40 | def shrink_free(input_, theta_): 41 | """ 42 | Soft Shrinkage function without the constraint that the thresholds must be 43 | greater than zero. 44 | """ 45 | return tf.sign(input_) * tf.maximum( tf.abs(input_) - theta_, 0.0 ) 46 | 47 | 48 | def shrink_ss(inputs_, theta_, q): 49 | """ 50 | Special shrink that does not apply soft shrinkage to entries of top q% 51 | magnitudes. 52 | 53 | :inputs_: TODO 54 | :thres_: TODO 55 | :q: TODO 56 | :returns: TODO 57 | 58 | """ 59 | abs_ = tf.abs(inputs_) 60 | thres_ = tf.contrib.distributions.percentile( 61 | abs_, 100.0-q, axis=0, keep_dims=True) 62 | 63 | """ 64 | Entries that are greater than thresholds and in the top q% simultnaneously 65 | will be selected into the support, and thus will not be sent to the 66 | shrinkage function. 67 | """ 68 | index_ = tf.logical_and(abs_ > theta_, abs_ > thres_) 69 | index_ = tf.to_float(index_) 70 | """Stop gradient at index_, considering it as constant.""" 71 | index_ = tf.stop_gradient(index_) 72 | cindex_ = 1.0 - index_ # complementary index 73 | 74 | return (tf.multiply(index_, inputs_) + 75 | shrink_free(tf.multiply(cindex_, inputs_), theta_ )) 76 | 77 | 78 | def shrink_ss_batch(inputs_, theta_, q): 79 | """ 80 | Special shrink that does not apply soft shrinkage to entries of top q% 81 | magnitudes. 82 | 83 | :inputs_: TODO 84 | :thres_: TODO 85 | :q: TODO 86 | :returns: TODO 87 | 88 | """ 89 | abs_ = tf.abs(inputs_) 90 | thres_ = tf.contrib.distributions.percentile(abs_, 100.0-q) 91 | 92 | """ 93 | Entries that are greater than thresholds and in the top q% simultnaneously 94 | will be selected into the support, and thus will not be sent to the 95 | shrinkage function. 96 | """ 97 | index_ = tf.logical_and(abs_ > theta_, abs_ > thres_) 98 | index_ = tf.to_float(index_) 99 | """Stop gradient at index_, considering it as constant.""" 100 | index_ = tf.stop_gradient(index_) 101 | cindex_ = 1.0 - index_ # complementary index 102 | 103 | return (tf.multiply(index_, inputs_) + 104 | shrink_free(tf.multiply(cindex_, inputs_), theta_ )) 105 | 106 | 107 | def shrink_lamp(r_, rvar_, lam_): 108 | """ 109 | Implementation of thresholding neuron in Learned AMP model. 110 | """ 111 | theta_ = tf.maximum(tf.sqrt(rvar_) * lam_, 0.0) 112 | xh_ = tf.sign(r_) * tf.maximum(tf.abs(r_) - theta_, 0.0) 113 | return xh_ 114 | # shrink = tf.abs(r_) - theta_ 115 | # xh_ = tf.sign(r_) * tf.maximum(shrink, 0.0) 116 | # xhl0_ = tf.reduce_mean(tf.to_float(shrink>0), axis=0) 117 | # return xh_, xhl0_ 118 | 119 | def hard_shrink(r_, tau_): 120 | """ 121 | Implementation of hard thresholding neuron. 122 | """ 123 | return tf.nn.relu(tf.sign(tf.abs(r_) - tau_)) * r_ 124 | 125 | 126 | ############################################################ 127 | #################### Padding ######################### 128 | ############################################################ 129 | 130 | def circular_pad(input_, filter_, paddings): 131 | """TODO: Docstring for circular_pad . 132 | 133 | :input_: TODO 134 | :filter_: TODO 135 | :paddings: TODO 136 | :returns: TODO 137 | 138 | """ 139 | pass 140 | 141 | 142 | ############################################################ 143 | ################## Subgradient ####################### 144 | ############################################################ 145 | 146 | def subgradient_l1(inputs_, Q_): 147 | if Q_ is None: 148 | return tf.sign(inputs_) 149 | else: 150 | return tf.sign(inputs_) * Q_ 151 | 152 | def subgradient_l2(inputs_, Q_): 153 | if Q_ is None: 154 | return inputs_ 155 | else: 156 | return inputs_ * Q_ * Q_ 157 | 158 | def subgradient_expl1(inputs_, Q_): 159 | if Q_ is None: 160 | return tf.sign(inputs_) * tf.exp(tf.abs(inputs_)) 161 | else: 162 | return tf.sign(inputs_) * tf.exp(tf.abs(inputs_)) * Q_ 163 | 164 | def subgradient_expl2(inputs_, Q_): 165 | if Q_ is None: 166 | return inputs_ * tf.exp(tf.square(inputs_)) 167 | else: 168 | return inputs_ * tf.exp(tf.square(inputs_)) * Q_ 169 | 170 | subgradient_funcs = { 171 | # NOTE: here reweighted l_p norms use the same subgradient function as 172 | # the normal l_p norms; the difference is only the Q_ parameter 173 | "l1" : subgradient_l1, 174 | "l2" : subgradient_l2, 175 | "rel2" : subgradient_l2, 176 | "expl1": subgradient_expl1, 177 | "expl2": subgradient_expl2 178 | } 179 | 180 | def get_subgradient_func(norm): 181 | return subgradient_funcs[norm] 182 | 183 | 184 | 185 | ############################################################ 186 | ################## Loss Functions ##################### 187 | ############################################################ 188 | 189 | def loss_l1(residual_, Q_): 190 | if Q_ is None: 191 | return tf.reduce_sum(tf.abs(residual_)) 192 | else: 193 | return tf.reduce_sum(tf.abs(residual_) * Q_) 194 | 195 | def loss_l2(residual_, Q_): 196 | if Q_ is None: 197 | return tf.reduce_sum(tf.square(residual_)) / 2.0 198 | else: 199 | return tf.reduce_sum(tf.square(residual_) * Q_ * Q_) / 2.0 200 | 201 | def loss_expl1(residual_, Q_): 202 | if Q_ is None: 203 | return tf.reduce_sum(tf.exp(tf.abs(residual_))) 204 | else: 205 | return tf.reduce_sum(tf.exp(tf.abs(residual_)) * Q_) 206 | 207 | def loss_expl2(residual_, Q_): 208 | if Q_ is None: 209 | return tf.reduce_sum(tf.exp(tf.square(residual_))) / 2.0 210 | else: 211 | return tf.reduce_sum(tf.exp(tf.square(residual_)) * Q_) / 2.0 212 | 213 | loss_funcs = { 214 | # NOTE: here reweighted l_p norms use the same loss function as the 215 | # normal l_p norms; the difference is only the Q_ parameter 216 | "l1" : loss_l1, 217 | "rel1" : loss_l1, 218 | "l2" : loss_l2, 219 | "rel2" : loss_l2, 220 | "expl1" : loss_expl1, 221 | "reexpl1": loss_expl1, 222 | "expl2" : loss_expl2, 223 | "reexpl2": loss_expl2 224 | } 225 | 226 | def get_loss_func(loss, Q): 227 | return lambda residual: loss_funcs[loss](residual, Q) 228 | 229 | 230 | 231 | ############################################################ 232 | ##################### Operator ####################### 233 | ############################################################ 234 | 235 | def bmxbm(s, t, batch_first=True): 236 | """ 237 | Batched matrix and batched matrix multiplication. 238 | """ 239 | if batch_first: 240 | equation = "aij,ajk->aik" 241 | else: 242 | equation = "ija,jka->ika" 243 | 244 | return tf.einsum(equation, s, t) 245 | 246 | 247 | def bmxm(s, t, batch_first=True): 248 | """ 249 | Batched matrix and normal matrix multiplication. 250 | """ 251 | if batch_first: 252 | equation = "aij,jk->aik" 253 | else: 254 | equation = "ija,jk->ika" 255 | 256 | return tf.einsum(equation, s, t) 257 | 258 | 259 | def mxbm(s, t, batch_first=True): 260 | """ 261 | Normal matrix and batched matrix multiplication. 262 | """ 263 | if batch_first: 264 | equation = "ij,ajk->aik" 265 | else: 266 | equation = "ij,jka->ika" 267 | 268 | return tf.einsum(equation, s, t) 269 | 270 | --------------------------------------------------------------------------------