├── .gitignore ├── ASC ├── ASC_utils.pyx ├── ASC_utils.pyxbld ├── STA_STC.py ├── __init__.py ├── frame_reader.py ├── image_sparser.py ├── sparse_manager.py └── train_sparse_coding.py ├── LICENSE.TXT ├── README.md ├── install_ubuntu_dependencies.sh ├── requirements.txt ├── setup.py └── setup.sh /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.bak 3 | venv/ 4 | 5 | *.egg-info 6 | *.pyxbldc 7 | *.pkl 8 | 9 | build/ 10 | dist/ 11 | -------------------------------------------------------------------------------- /ASC/ASC_utils.pyx: -------------------------------------------------------------------------------- 1 | # ============================================================================ 2 | # Copyright 2015 BRAIN Corporation. All rights reserved. This software is 3 | # provided to you under BRAIN Corporation's Beta License Agreement and 4 | # your use of the software is governed by the terms of that Beta License 5 | # Agreement, found at http://www.braincorporation.com/betalicense. 6 | # ============================================================================ 7 | import cython 8 | import numpy as np 9 | cimport numpy as np 10 | from cython.parallel import prange, parallel, threadid 11 | 12 | import scipy.sparse 13 | 14 | cdef extern from *: 15 | pass 16 | 17 | 18 | cdef extern from "xmmintrin.h": 19 | void _mm_setcsr(unsigned int) 20 | unsigned int _mm_getcsr() 21 | 22 | 23 | def set_flush_denormals(): 24 | """ 25 | This call will modify the Control Status Register (CSR) to instruct the CPU to flush denormals. 26 | 27 | Very small numbers are treated differently in order to gain a small amount of extra precision, however, 28 | this extra precision comes with very significant computational cost. By flushing denormals to 0, 29 | we lose a small amount of precision but now all arithmetic operations run with consistent speed. 30 | 31 | This code only works for X86 SSE. Flush to zero is default on ARM architecture. 32 | """ 33 | _mm_setcsr((_mm_getcsr() & ~0x0040) | (0x0040)) 34 | 35 | 36 | # a is a vector of length N 37 | # B is a matrix of MxN (C) or NxM (Fortran) 38 | # o is a preallocated vector of length M 39 | @cython.nonecheck(False) 40 | @cython.boundscheck(False) 41 | @cython.wraparound(False) 42 | cdef inline void dot(const int N, const double *a, const int M, const double *B, double *o) nogil: 43 | cdef double dot1, dot2, dot3, dot4 44 | cdef int i, j 45 | cdef const double *Bj 46 | 47 | for j in range(M): 48 | Bj = &B[j*N] 49 | i = 0 50 | dot1 = dot2 = dot3 = dot4 = 0 51 | while i < N-3: 52 | dot1 += a[i] * Bj[i]; 53 | dot2 += a[i + 1] * Bj[i + 1]; 54 | dot3 += a[i + 2] * Bj[i + 2]; 55 | dot4 += a[i + 3] * Bj[i + 3]; 56 | i += 4 57 | 58 | while i < N: 59 | dot1 += a[i] * Bj[i]; 60 | i += 1 61 | 62 | o[j] = dot1 + dot2 + dot3 + dot4; 63 | 64 | 65 | @cython.nonecheck(False) 66 | @cython.boundscheck(False) 67 | @cython.wraparound(False) 68 | cdef double single_ASC(const unsigned int K, const unsigned int N, const double* D, const double* DtD, const double* x, unsigned int target_l0, double* a, double* z, double ave_stop_l) nogil: 69 | cdef unsigned int i, j, num_nonzero, cnt, iK 70 | cdef double ai, ainew, l, aidelta, start_l 71 | 72 | for i in range(N): 73 | if x[i]: 74 | break 75 | else: # if x all zeros 76 | return ave_stop_l 77 | 78 | dot(N, x, K, D, z) 79 | 80 | l = 0.0 81 | for i in xrange(K): 82 | if l < z[i]: 83 | l = z[i] 84 | 85 | start_l = l 86 | 87 | if ave_stop_l <= 0.0: 88 | ave_stop_l = 1.0 89 | 90 | l *= ave_stop_l 91 | 92 | cnt = 0 93 | num_nonzero = 0 94 | while (num_nonzero != target_l0) and l > 0.0 and cnt < 25: 95 | num_nonzero = 0 96 | for i in xrange(K): 97 | ai = a[i] 98 | ainew = ai + z[i] - l 99 | if ainew < 0: 100 | ainew = 0 101 | 102 | if ai != ainew: 103 | a[i] = ainew 104 | iK = i*K 105 | aidelta = ainew-ai 106 | for j in xrange(K): 107 | z[j] -= DtD[iK+j]*aidelta 108 | if ainew != 0: 109 | num_nonzero += 1 110 | cnt += 1 111 | l *= (1.0+2.0/cnt) if num_nonzero > target_l0 else (1.0-0.75/cnt) 112 | return l/start_l if start_l > 0 else ave_stop_l 113 | 114 | 115 | @cython.boundscheck(False) 116 | @cython.wraparound(False) 117 | def ASC(np.ndarray[double, ndim=2,] D, np.ndarray[double, ndim=2,] DtD, np.ndarray[double, ndim=2,] X, unsigned int target_l0, unsigned int num_threads=1, bint add_one=False, double ave_stop_l=1.0, np.ndarray inv_std=None): 118 | cdef unsigned int K, N, M, 119 | cdef int i, tid, j, k, k_stop 120 | cdef np.ndarray[double, ndim=2,] A, Z, stop_ls 121 | cdef double* Ap 122 | cdef double* Zp 123 | cdef double* Xp 124 | cdef double* DtDp 125 | cdef double* Dp 126 | cdef double * stop_lsp 127 | cdef double cum_stop_ls = 0 128 | 129 | K = D.shape[1] 130 | N = X.shape[0] 131 | M = X.shape[1] 132 | 133 | cdef np.ndarray[double, ndim=1,] data = np.zeros((target_l0+1)*M) 134 | cdef np.ndarray[unsigned int, ndim=1,] rows = np.zeros((target_l0+1)*M, dtype=np.uint32) 135 | cdef np.ndarray[unsigned int, ndim=1,] indptr = np.arange(0,(target_l0+1)*M+1,(target_l0+1), dtype=np.uint32) 136 | 137 | cdef double* datap = &data[0] 138 | cdef unsigned int* rowsp = &rows[0] 139 | cdef double* inv_stdp = NULL 140 | 141 | if inv_std is not None: 142 | assert inv_std.size >= K 143 | inv_stdp = inv_std.data 144 | 145 | D=np.asfortranarray(D) 146 | X=np.asfortranarray(X) 147 | # DtD is symatric so C or Fortran ordering is fine 148 | 149 | A = np.zeros((X.shape[1], K)) 150 | Z = np.zeros((num_threads, K)) 151 | stop_ls = np.zeros((num_threads, 1)) 152 | Ap=&A[0,0] 153 | Zp=&Z[0,0] 154 | Xp=&X[0,0] 155 | DtDp=&DtD[0,0] 156 | Dp=&D[0,0] 157 | stop_lsp = &stop_ls[0,0] 158 | with nogil, parallel(num_threads=num_threads): 159 | for i in prange(M,schedule='dynamic'): 160 | tid = threadid() 161 | stop_lsp[tid] = single_ASC(K, N, Dp, DtDp, &Xp[i*N], target_l0, &Ap[i*K], &Zp[tid*K], ave_stop_l) 162 | k = i*(target_l0+1) 163 | k_stop = k + target_l0 164 | for j in xrange(K): 165 | if Ap[i*K+j] != 0.0: 166 | datap[k] = Ap[i*K+j] if inv_stdp == NULL else Ap[i*K+j] * inv_stdp[j] 167 | rowsp[k] = j 168 | k = k + 1 169 | if k == k_stop: 170 | break 171 | if add_one: 172 | datap[k] = 1.0 173 | rowsp[k] = K 174 | with nogil: 175 | for i in xrange(num_threads): 176 | cum_stop_ls += stop_lsp[i] 177 | ave_stop_l = cum_stop_ls/num_threads 178 | return scipy.sparse.csc_matrix((data, rows, indptr),shape=((K+1) if add_one else K, M)), ave_stop_l 179 | 180 | 181 | @cython.nonecheck(False) 182 | @cython.boundscheck(False) 183 | @cython.wraparound(False) 184 | def parallel_dense_add_a_dot_at(np.ndarray[double, ndim=2,] A, np.ndarray[int, ndim=2,] rows, np.ndarray[double, ndim=2,] data, unsigned int num_threads=1): 185 | """ Computes A += D.dot(D.T), where D is a sparse matrix represented by matrices 'rows' (D.indices) and 'data' (D.data).""" 186 | cdef int num_rows = rows.shape[0] 187 | cdef int num_elements = rows.shape[1] 188 | cdef int K = A.shape[0] 189 | cdef int i, j, k, ind, tid 190 | cdef np.ndarray[int, ndim=2,] row_inds = np.zeros((num_threads, num_rows), dtype=np.int32) 191 | cdef double d_ind 192 | 193 | cdef int* rowsp = &rows[0, 0] 194 | cdef double* datap = &data[0, 0] 195 | cdef double* Ap = &A[0, 0] 196 | cdef int* row_indsp = &row_inds[0, 0] 197 | 198 | with nogil, parallel(num_threads=num_threads): 199 | for i in prange(K, schedule='dynamic'): 200 | tid = threadid() 201 | for j in range(num_rows): 202 | # find row i amongst the rows, exploit the fact that rows are sorted and appear only once 203 | ind = row_indsp[tid*num_rows+j] 204 | while rowsp[j*num_elements+ind] < i: 205 | ind = ind + 1 206 | if rowsp[j*num_elements+ind] == i: 207 | row_indsp[tid*num_rows+j] = ind + 1 208 | 209 | d_ind = datap[j*num_elements+ind] 210 | for k in range(num_elements): 211 | Ap[i*K + rowsp[j*num_elements+k]] += d_ind * datap[j*num_elements+k] 212 | 213 | 214 | @cython.nonecheck(False) 215 | @cython.boundscheck(False) 216 | @cython.wraparound(False) 217 | def parallel_dense_add_dense_dot_at(np.ndarray[double, ndim=2,] A, np.ndarray[double, ndim=2,] X, np.ndarray[int, ndim=2,] rows, np.ndarray[double, ndim=2,] data, unsigned int num_threads=1): 218 | """ Computes A += D.dot(X.T).T, where D is a sparse matrix represented by matrices 'rows' (D.indices) and 'data' (D.data). """ 219 | cdef int num_rows = rows.shape[0] 220 | cdef int num_elements = rows.shape[1] 221 | cdef int K = A.shape[1] 222 | cdef int M = X.shape[0] # number of inputs 223 | cdef int i, j, k, ind, tid 224 | cdef np.ndarray[int, ndim=2,] row_inds = np.zeros((num_threads, num_rows), dtype=np.int32) 225 | cdef double d_ind 226 | 227 | assert A.shape[0] == M and A.shape[1] == K, "%d %d %d %d" % (A.shape[0], M, A.shape[1], K) 228 | assert X.shape[0] == M and X.shape[1] == num_rows, "%d %d %d %d" % (X.shape[0], M, X.shape[1], num_rows) 229 | 230 | X = np.asfortranarray(X) # make sure the first dimension of X is contiguous so that for Xp[j*M+k], k indexes along the first dimension, instead of the second for normal numpy arrays 231 | A = np.asfortranarray(A) # make sure the first dimension of A is contiguous so that for Ap[i*M+k], k indexes along the first dimension, instead of the second for normal numpy arrays 232 | 233 | cdef int* rowsp = &rows[0, 0] 234 | cdef double* datap = &data[0, 0] 235 | cdef double* Ap = &A[0, 0] 236 | cdef double* Xp = &X[0, 0] 237 | cdef int* row_indsp = &row_inds[0, 0] 238 | 239 | with nogil, parallel(num_threads=num_threads): 240 | for i in prange(K, schedule='dynamic'): 241 | tid = threadid() 242 | for j in range(num_rows): 243 | # find row i amongst the rows, exploit the fact that rows are sorted and appear only once 244 | ind = row_indsp[tid*num_rows+j] 245 | while rowsp[j*num_elements+ind] < i: 246 | ind = ind + 1 247 | if rowsp[j*num_elements+ind] == i: 248 | row_indsp[tid*num_rows+j] = ind + 1 249 | 250 | d_ind = datap[j*num_elements+ind] 251 | for k in range(M): 252 | Ap[i*M+k] += d_ind * Xp[j*M+k] 253 | return A 254 | 255 | 256 | @cython.nonecheck(False) 257 | @cython.boundscheck(False) 258 | @cython.wraparound(False) 259 | def parallel_dot(np.ndarray[double, ndim=2,] A, np.ndarray[double, ndim=2,] X, unsigned int num_threads=1, str AX_order='C', AX=None): 260 | """ Compute matrix multiply, A*X, in parallel and returns the solution in provided matrix AX. if AX is None then a new matrix will be created.""" 261 | cdef int An = A.shape[0] 262 | cdef int Am = A.shape[1] 263 | cdef int Xn = X.shape[0] 264 | cdef int Xm = X.shape[1] 265 | cdef int i, j, k 266 | cdef int A_order_C = not np.isfortran(A) 267 | cdef int X_order_C = not np.isfortran(X) 268 | 269 | cdef np.ndarray[double, ndim=2,] AX_local 270 | if AX is None: 271 | AX_local = np.zeros((An, Xm), order=AX_order) 272 | else: 273 | assert AX.shape[0] == An and AX.shape[1] == Xm 274 | AX_local = AX 275 | 276 | assert Am == Xn 277 | 278 | cdef double* Ap = &A[0, 0] 279 | cdef double* AXp = &AX_local[0, 0] 280 | cdef double* Xp = &X[0, 0] 281 | cdef double cum 282 | cdef int AX_order_C = AX_order == 'C' 283 | 284 | with nogil, parallel(num_threads=num_threads): 285 | for i in prange(An, schedule='dynamic'): 286 | for j in range(Xm): 287 | cum = 0 288 | if A_order_C: 289 | if X_order_C: 290 | for k in range(Xn): # Xn == Am 291 | cum = cum + Ap[i*Am+k] * Xp[j+k*Xm] 292 | else: 293 | for k in range(Xn): # Xn == Am 294 | cum = cum + Ap[i*Am+k] * Xp[j*Xn+k] 295 | else: 296 | if X_order_C: 297 | for k in range(Xn): # Xn == Am 298 | cum = cum + Ap[k*An+i] * Xp[j+k*Xm] 299 | else: 300 | for k in range(Xn): # Xn == Am 301 | cum = cum + Ap[k*An+i] * Xp[j*Xn+k] 302 | if AX_order_C: 303 | AXp[i*Xm+j] = cum 304 | else: 305 | AXp[j*An+i] = cum 306 | 307 | return AX_local 308 | 309 | @cython.nonecheck(False) 310 | @cython.boundscheck(False) 311 | @cython.wraparound(False) 312 | def add_scale(np.ndarray[double, ndim=2,] A, np.ndarray[double, ndim=2,] X, double scale, unsigned int num_threads=1): 313 | """ Computes: A += X*scale, where A and X are matrices of the same size and data order. """ 314 | cdef double* Ap = &A[0, 0] 315 | cdef double* Xp = &X[0, 0] 316 | cdef int N = A.shape[0] * A.shape[1] 317 | 318 | assert np.isfortran(A) == np.isfortran(X) 319 | assert N == X.shape[0] * X.shape[1] 320 | 321 | with nogil: 322 | for i in range(N): 323 | Ap[i] += Xp[i]*scale 324 | -------------------------------------------------------------------------------- /ASC/ASC_utils.pyxbld: -------------------------------------------------------------------------------- 1 | # ============================================================================ 2 | # Copyright 2015 BRAIN Corporation. All rights reserved. This software is 3 | # provided to you under BRAIN Corporation's Beta License Agreement and 4 | # your use of the software is governed by the terms of that Beta License 5 | # Agreement, found at http://www.braincorporation.com/betalicense. 6 | # ============================================================================ 7 | 8 | import os.path 9 | 10 | def make_ext(modname, pyxfilename): 11 | from distutils.extension import Extension 12 | ext = Extension(name = modname, 13 | sources=[pyxfilename], 14 | extra_compile_args=['-fopenmp','-march=native','-funroll-loops'], 15 | extra_link_args=['-fopenmp','-march=native'], 16 | ) 17 | return ext 18 | 19 | def make_setup_args(): 20 | return dict(script_args=["--verbose"]) 21 | -------------------------------------------------------------------------------- /ASC/STA_STC.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import numpy as np 4 | import numpy.linalg 5 | import pyximport 6 | pyximport.install(reload_support=True) 7 | import ASC 8 | import cPickle as pickle 9 | from matplotlib import pylab as plt 10 | 11 | if __name__ == '__main__': 12 | assert len(sys.argv) == 2, "USAGE: python STA_STC.py [path to model.pkl]" 13 | filename = sys.argv[1] 14 | 15 | SM=pickle.load(open(filename, "r")) 16 | SM.stop_learning() 17 | 18 | num_frames = 10000 19 | feed_multiple = 2 20 | 21 | V1S_STAs = 0 22 | V1C_STAs = 0 23 | V1S_STCs = 0 24 | V1C_STCs = 0 25 | outer_stim = np.zeros((num_frames, SM.Vs[0].m * SM.Vs[0].m)) 26 | 27 | for k in range(100): 28 | stim=np.random.randn(num_frames, SM.Vs[0].m) 29 | V1S=[] 30 | V1C=[] 31 | im = np.zeros((80, 80, 3), dtype=np.uint8) 32 | for i in range(num_frames): 33 | im[0:10, 0:10, :] = np.minimum(255, np.maximum(0, stim[i, :].reshape((10,10,3))*127/3+127.5)) 34 | for j in range(feed_multiple): 35 | respS, respC = SM.feed(im) 36 | V1S.append(respS[0][:,0].todense()) 37 | V1C.append(respC[0][:,0]) 38 | 39 | V1S=np.array(V1S).squeeze() 40 | V1C=np.array(V1C).squeeze() 41 | 42 | V1S_STAs = V1S_STAs + ASC.parallel_dot(V1S.T, stim, num_threads=20) 43 | V1C_STAs = V1C_STAs + ASC.parallel_dot(V1C.T, stim, num_threads=20) 44 | 45 | print k 46 | SM.Vs[0].show((V1S_STAs/(0.0001+np.sqrt(np.sum(V1S_STAs**2, axis=1)[...,None])))[0:SM.Vs[0].K,:].T, "V1S_STA_%d_"%k) 47 | SM.Vs[0].show((V1C_STAs/(0.0001+np.sqrt(np.sum(V1C_STAs**2, axis=1)[...,None])))[0:SM.Vs[0].K,:].T, "V1C_STA_%d_"%k) 48 | 49 | for i in range(num_frames): 50 | outer_stim[i, :] = np.outer(stim[i, :], stim[i, :]).reshape((1, -1)) 51 | 52 | V1S_STCs = V1S_STCs + ASC.parallel_dot(V1S.T, outer_stim, num_threads=20).astype(np.float32) 53 | V1C_STCs = V1C_STCs + ASC.parallel_dot(V1C.T, outer_stim, num_threads=20).astype(np.float32) 54 | 55 | # save data for later 56 | pickle.dump((V1S_STCs, V1C_STCs), open("STCs.pkl", "w")) 57 | 58 | V1S_STCs=V1S_STCs.reshape((-1, SM.Vs[0].m, SM.Vs[0].m)) 59 | V1C_STCs=V1C_STCs.reshape((-1, SM.Vs[0].m, SM.Vs[0].m)) 60 | 61 | for i in range(0, SM.Vs[0].K, SM.Vs[0].K/20): 62 | u, e, v = numpy.linalg.svd(V1C_STCs[i,:,:].squeeze(), full_matrices=0, compute_uv=1) 63 | SM.Vs[0].show(u[:, np.hstack((np.arange(0, 50), np.arange(-50, 0)))], "STC_%d_"%i) 64 | plt.clf() 65 | plt.plot(e/np.mean(e),'.') 66 | plt.axis("off") 67 | plt.gcf().set_size_inches(1, 1) 68 | plt.xlim(-10, 310) 69 | plt.savefig("Eigvals_%d"%i, dpi=100) 70 | 71 | -------------------------------------------------------------------------------- /ASC/__init__.py: -------------------------------------------------------------------------------- 1 | # ================================================================================== 2 | # Copyright (c) 2016, Brain Corporation 3 | # 4 | # This software is released under Creative Commons 5 | # Attribution-NonCommercial-ShareAlike 3.0 (BY-NC-SA) license. 6 | # Full text available here in LICENSE.TXT file as well as: 7 | # https://creativecommons.org/licenses/by-nc-sa/3.0/us/legalcode 8 | # 9 | # In summary - you are free to: 10 | # 11 | # Share - copy and redistribute the material in any medium or format 12 | # Adapt - remix, transform, and build upon the material 13 | # 14 | # The licensor cannot revoke these freedoms as long as you follow the license terms. 15 | # 16 | # Under the following terms: 17 | # * Attribution - You must give appropriate credit, provide a link to the 18 | # license, and indicate if changes were made. You may do so 19 | # in any reasonable manner, but not in any way that suggests 20 | # the licensor endorses you or your use. 21 | # * NonCommercial - You may not use the material for commercial purposes. 22 | # * ShareAlike - If you remix, transform, or build upon the material, you 23 | # must distribute your contributions under the same license 24 | # as the original. 25 | # * No additional restrictions - You may not apply legal terms or technological 26 | # measures that legally restrict others from 27 | # doing anything the license permits. 28 | # ================================================================================== 29 | 30 | import image_sparser 31 | import frame_reader 32 | -------------------------------------------------------------------------------- /ASC/frame_reader.py: -------------------------------------------------------------------------------- 1 | # ================================================================================== 2 | # Copyright (c) 2016, Brain Corporation 3 | # 4 | # This software is released under Creative Commons 5 | # Attribution-NonCommercial-ShareAlike 3.0 (BY-NC-SA) license. 6 | # Full text available here in LICENSE.TXT file as well as: 7 | # https://creativecommons.org/licenses/by-nc-sa/3.0/us/legalcode 8 | # 9 | # In summary - you are free to: 10 | # 11 | # Share - copy and redistribute the material in any medium or format 12 | # Adapt - remix, transform, and build upon the material 13 | # 14 | # The licensor cannot revoke these freedoms as long as you follow the license terms. 15 | # 16 | # Under the following terms: 17 | # * Attribution - You must give appropriate credit, provide a link to the 18 | # license, and indicate if changes were made. You may do so 19 | # in any reasonable manner, but not in any way that suggests 20 | # the licensor endorses you or your use. 21 | # * NonCommercial - You may not use the material for commercial purposes. 22 | # * ShareAlike - If you remix, transform, or build upon the material, you 23 | # must distribute your contributions under the same license 24 | # as the original. 25 | # * No additional restrictions - You may not apply legal terms or technological 26 | # measures that legally restrict others from 27 | # doing anything the license permits. 28 | # ================================================================================== 29 | import cv2 30 | import os.path 31 | import random 32 | 33 | 34 | class FrameReader(object): 35 | def __init__(self, video_list, step=1, cut_front=0000, cut_back=0000, randomize=False, shape=(100, 100)): 36 | self.shape = shape 37 | self.loaded = False 38 | self.randomize = randomize 39 | self.frames = [] 40 | self._frame_gen = self.load_frames(video_list, step, cut_front, cut_back) 41 | if randomize: 42 | for img in self._frame_gen: 43 | self.frames.append(img) 44 | random.shuffle(self.frames) 45 | self.loaded = True 46 | 47 | self.cnt = 0 48 | 49 | def read(self): 50 | if not self.loaded: 51 | try: 52 | img = next(self._frame_gen) 53 | self.frames.append(img) 54 | self.cnt += 1 55 | return img 56 | except StopIteration: 57 | self.loaded = True 58 | img = self.frames[self.cnt % len(self.frames)] 59 | 60 | self.cnt += 1 61 | return img 62 | 63 | def load_frames(self, video_list, step=1, cut_front=5000, cut_back=5000): 64 | frames = [] 65 | for file in video_list: 66 | try: 67 | from tracker_tools.tsartifacts import TSArtifacts 68 | ts = TSArtifacts() 69 | full_file = ts.get(file) 70 | if full_file == None: 71 | full_file = file 72 | except: 73 | print "Could not download training video", file, "using local copy instead." 74 | full_file = file 75 | if full_file[-4:] == '.pkl': 76 | from tracker_base.labeled_movie import FrameCollection 77 | fc = FrameCollection() 78 | 79 | fc.load_from_file(full_file) 80 | for i in xrange(cut_front/step, len(fc)/step-cut_back/step): 81 | img = fc.Frame(i*step).get_image() 82 | img = cv2.resize(img, dsize=(self.shape), interpolation=cv2.INTER_AREA) 83 | yield img 84 | else: 85 | if cut_back > 0: 86 | print "Warning, ignoring cut_back while reading non-pkl videos." 87 | 88 | assert os.path.exists(full_file), "Error, file not found: "+full_file 89 | # use opencv to read everything else 90 | cap = cv2.VideoCapture(full_file) 91 | while(cap.isOpened()): 92 | for j in range(cut_front): 93 | ret, img = cap.read() 94 | for j in range(step): 95 | ret, img = cap.read() 96 | if not ret: 97 | break 98 | img = cv2.resize(img, dsize=(self.shape), interpolation=cv2.INTER_AREA) 99 | yield img 100 | -------------------------------------------------------------------------------- /ASC/image_sparser.py: -------------------------------------------------------------------------------- 1 | # ================================================================================== 2 | # Copyright (c) 2016, Brain Corporation 3 | # 4 | # This software is released under Creative Commons 5 | # Attribution-NonCommercial-ShareAlike 3.0 (BY-NC-SA) license. 6 | # Full text available here in LICENSE.TXT file as well as: 7 | # https://creativecommons.org/licenses/by-nc-sa/3.0/us/legalcode 8 | # 9 | # In summary - you are free to: 10 | # 11 | # Share - copy and redistribute the material in any medium or format 12 | # Adapt - remix, transform, and build upon the material 13 | # 14 | # The licensor cannot revoke these freedoms as long as you follow the license terms. 15 | # 16 | # Under the following terms: 17 | # * Attribution - You must give appropriate credit, provide a link to the 18 | # license, and indicate if changes were made. You may do so 19 | # in any reasonable manner, but not in any way that suggests 20 | # the licensor endorses you or your use. 21 | # * NonCommercial - You may not use the material for commercial purposes. 22 | # * ShareAlike - If you remix, transform, or build upon the material, you 23 | # must distribute your contributions under the same license 24 | # as the original. 25 | # * No additional restrictions - You may not apply legal terms or technological 26 | # measures that legally restrict others from 27 | # doing anything the license permits. 28 | # ================================================================================== 29 | 30 | import os 31 | #os.putenv('OMP_WAIT_POLICY', 'active') 32 | import cv2 33 | import numpy as np 34 | import scipy.sparse 35 | import scipy.sparse.linalg 36 | import time 37 | import multiprocessing as mp 38 | import cPickle as pickle 39 | 40 | import pyximport 41 | pyximport.install(reload_support=True) 42 | import ASC_utils 43 | 44 | np.seterr(invalid='raise', divide='raise', over='raise') # generate exceptions when invalid computations (Nan,Inf) are encountered 45 | 46 | try: 47 | ASC_utils.set_flush_denormals() 48 | except: 49 | # Unable to set the flags 50 | print "Setting flush denormals CPU flag not available." 51 | 52 | 53 | class ImageSparser(object): 54 | def __init__(self, K, Xb, Yb, num_color=1, num_time=1, target_active_cnt=70, show_D=False, use_feedback=True, top_level=False, 55 | update_D_steps=1000, num_threads=-1, name="model", positive_input=False, use_neighborhood=5, max_epoch_size=np.Inf): 56 | self.K = K 57 | self.Xb = Xb 58 | self.Yb = Yb 59 | self.positive_input = positive_input 60 | if positive_input: 61 | self.num_color = num_color * 2 62 | else: 63 | self.num_color = num_color 64 | self.num_time = num_time 65 | self.m = self.Xb * self.Yb * self.num_color * self.num_time 66 | self.input_history = None 67 | self.D = np.asfortranarray(np.random.randint(low=0, high=256, size=(self.m, self.K)).astype(np.float)/255.0-0.5) 68 | self.D /= np.sqrt(np.sum(self.D**2, axis=0))+0.001 69 | self.DtD = np.dot(self.D.T, self.D) 70 | 71 | self.target_active_cnt = target_active_cnt # N from the paper 72 | self.A = 0 # matrix E from the paper 73 | self.B = 0 74 | self.dA = np.zeros((self.K+1, self.K+1)) 75 | self.dB = np.zeros((self.m, self.K+1)) 76 | self.A_cnt = 0 77 | self.dA_cnt = 0 78 | self.var_Complex = 0 79 | self.inv_std_Complex = None 80 | self.learning = True 81 | self.learn_Complex = True 82 | self.learn_Simple = True 83 | self.show_D = show_D 84 | self.step = 0 85 | self.update_D_steps = update_D_steps 86 | self.sparsify_time = np.zeros(3) 87 | self.last_update = 0 88 | self.check_point = True 89 | self.ave_cnts = target_active_cnt 90 | self.name = name 91 | self.ave_stop_l = 1.0 92 | self.cum_stop_l = 0.0 93 | self.stop_l_cnt = 0 94 | self.max_epoch_size = max_epoch_size 95 | 96 | self.num_threads_auto = (num_threads == -1) 97 | if self.num_threads_auto: 98 | num_threads = mp.cpu_count()/2 99 | assert num_threads > 0 100 | self.num_threads = num_threads 101 | 102 | self.inv_std_Simple = None 103 | self.inv_std_Simple_sp = None 104 | 105 | assert use_feedback is False or use_feedback == 1 or use_feedback == 4 106 | self.use_feedback = False if top_level else use_feedback 107 | assert use_neighborhood is False or use_neighborhood == 1 or use_neighborhood == 5 108 | if use_neighborhood == 4 and top_level: 109 | use_neighborhood = 1; 110 | self.use_neighborhood = use_neighborhood 111 | self.prev_context = None 112 | self.C = None # delay creation of the C matrix until the size of the context input is known 113 | self.dC = 0 114 | self.dC_cnt = 0 115 | self.cum_Cerr = 0 116 | self.oldD = 0 117 | self.prev_Complex_alpha = None 118 | self._dC = None 119 | 120 | def __setstate__(self, dict): # called when unpickled 121 | """ __setstate__() is called during unpickling. Here can handle converting old formats to the current""" 122 | self.__dict__.update(dict) 123 | 124 | if 'num_C' in dict: 125 | self.num_color = self.num_C 126 | if 'num_T' in dict: 127 | self.num_time = self.num_T 128 | if 'S' in dict: 129 | self.C = self.S # C matrix used to be called S 130 | self._dC = self._dS 131 | self.dC_cnt = self.dS_cnt 132 | self.var_Complex = self.var_C 133 | if 'use_feedback' not in dict: 134 | self.use_feedback = (self.C.shape[0]/self.C.shape[1] == 7) 135 | if 'inv_std_S' in dict: 136 | self.inv_std_Simple = self.inv_std_S 137 | self.inv_std_Simple_sp = self.inv_std_S_sp 138 | self.inv_std_Complex = self.inv_std_C 139 | self.prev_Complex_alpha = self.prev_C_alpha 140 | if 'learn_D' in dict: 141 | self.learn_Simple = self.learn_D 142 | self.learn_Complex = self.learn_S 143 | if 'positive_input' not in dict: 144 | self.positive_input = False 145 | if 'use_neighborhood' not in dict: 146 | self.use_neighborhood = False 147 | self.dave_context = 0 148 | self.ave_context = 0 149 | self.prev_context = None 150 | if 'num_threads_auto' not in dict or self.num_threads_auto: 151 | num_threads = mp.cpu_count()/2 152 | self.num_threads = num_threads 153 | 154 | def tile_img(self, img=None): 155 | """ reorders and reshapes image into (self.Xb*self.Yb*self.num_color*self.num_time, num_tiles) """ 156 | if img is None: 157 | return self.tiled_img, self.tile_num_Xb, self.tile_num_Yb 158 | num_Xb = img.shape[0]/self.Xb 159 | num_Yb = img.shape[1]/self.Yb 160 | img = img.reshape((num_Xb, self.Xb, num_Yb, self.Yb, self.num_color*self.num_time)).transpose((1, 3, 4, 0, 2)).reshape((self.Xb*self.Yb*self.num_color*self.num_time, num_Xb*num_Yb)) 161 | return img, num_Xb, num_Yb 162 | 163 | def sparsify(self, img, context=None): 164 | if self.step == 0 and not self.learning: 165 | return None, None 166 | 167 | assert img.dtype == np.float 168 | 169 | t = time.time() 170 | 171 | if self.positive_input: 172 | img = np.concatenate((np.maximum(0, img), np.maximum(0, -img)), axis=img.ndim-1) 173 | 174 | if self.num_time > 1: 175 | if self.input_history is None: 176 | self.input_history = np.zeros(tuple(list(img.shape) + [self.num_time])) 177 | self.input_history[..., 1:] = self.input_history[..., 0:-1] * (1.0 - 1.0/self.num_time) 178 | self.input_history[..., 0] = img 179 | img = self.input_history.reshape((img.shape[0], img.shape[1], -1)) 180 | 181 | X, num_Xb, num_Yb = self.tile_img(img) 182 | self.tiled_img = X 183 | self.tile_num_Xb = num_Xb 184 | self.tile_num_Yb = num_Yb 185 | X = np.asfortranarray(X.astype(np.float)) 186 | 187 | # Adaptive Sparse Coding 188 | Simple_alpha, stop_l = ASC_utils.ASC(self.D, self.DtD, X, self.target_active_cnt, num_threads=self.num_threads, 189 | add_one=True, ave_stop_l=self.ave_stop_l, 190 | inv_std=None if self.learn_Simple and self.learning else self.inv_std_Simple) 191 | 192 | if self.learning and self.learn_Simple: 193 | self.cum_stop_l += stop_l 194 | self.stop_l_cnt += 1 195 | 196 | self.ave_cnts = self.ave_cnts * 0.9 + 0.1*np.sum(Simple_alpha.data>0)/Simple_alpha.shape[1] 197 | 198 | self.sparsify_time[0] += time.time()-t 199 | 200 | if self.learning: 201 | self.step += 1 202 | if self.learn_Simple and self.step % self.update_D_steps == 0: 203 | # if no unit has responded in the update period, fake a response to a single random input 204 | zero_inds = np.where(self.dA.diagonal()==0)[0] 205 | if len(zero_inds) > 0: 206 | m = Simple_alpha.max() 207 | rinds = np.random.randint(Simple_alpha.shape[1], size=len(zero_inds)) 208 | Simple_alpha[zero_inds, rinds] = m 209 | 210 | t2=time.time() 211 | if self.learn_Simple: 212 | if Simple_alpha.indices.size == Simple_alpha.shape[1]*(self.target_active_cnt+1): 213 | rows = Simple_alpha.indices.reshape((Simple_alpha.shape[1], self.target_active_cnt+1)) 214 | data = Simple_alpha.data.reshape((Simple_alpha.shape[1], self.target_active_cnt+1)) 215 | ASC_utils.parallel_dense_add_a_dot_at(self.dA, rows, data, num_threads=self.num_threads) 216 | self.dB = ASC_utils.parallel_dense_add_dense_dot_at(self.dB, X, rows, data, num_threads=self.num_threads) 217 | else: 218 | print self.step, "sparse matrix data is not rectangular, falling back to slower operation" 219 | self.dA = self.dA + Simple_alpha.dot(Simple_alpha.T) # sparse matrix does not have += operator 220 | self.dB += Simple_alpha.dot(X.T).T 221 | self.dA_cnt += Simple_alpha.shape[1] 222 | 223 | # if we don't even have the rates of each unit yet, don't compute Complex layer 224 | if self.inv_std_Simple is None: 225 | if self.learning and self.step % self.update_D_steps == 0: 226 | self.update_D() 227 | self.sparsify_time[1] += time.time()-t2 228 | self.sparsify_time[2] += time.time()-t 229 | self.prev_Complex_alpha = np.zeros(Simple_alpha.shape) 230 | return None, None 231 | 232 | if self.learn_Simple and self.learning: 233 | Simple_alpha = Simple_alpha.multiply(self.inv_std_Simple_sp) # normalization happens in ASC() after D is learned 234 | 235 | if self.use_neighborhood: 236 | if self.use_neighborhood == 5: 237 | sN = int(np.sqrt(self.prev_Complex_alpha.shape[1])) 238 | v_zeros = np.zeros((sN, 1, self.K+1)) 239 | h_zeros = v_zeros.transpose((1, 0, 2)) 240 | prev_Complex_alpha = self.prev_Complex_alpha.T.reshape((sN, sN, -1)) 241 | neighborhood_list = [Simple_alpha.T.toarray(), self.prev_Complex_alpha.T, 242 | np.concatenate((prev_Complex_alpha[1:, :, :], h_zeros), axis=0).reshape((sN*sN, -1)), 243 | np.concatenate((h_zeros, prev_Complex_alpha[:-1, :, :]), axis=0).reshape((sN*sN, -1)), 244 | np.concatenate((prev_Complex_alpha[:, 1:, :], v_zeros), axis=1).reshape((sN*sN, -1)), 245 | np.concatenate((v_zeros, prev_Complex_alpha[:, :-1, :]), axis=1).reshape((sN*sN, -1)) 246 | ] 247 | 248 | if context is not None: 249 | neighborhood_list.append(context) 250 | 251 | context = np.hstack(neighborhood_list) 252 | else: 253 | if context is None: 254 | context = np.hstack((Simple_alpha.T.toarray(), self.prev_Complex_alpha.T)) 255 | else: 256 | context = np.hstack((Simple_alpha.T.toarray(), self.prev_Complex_alpha.T, context)) 257 | else: 258 | if context is None: 259 | context = Simple_alpha.T.toarray() 260 | else: 261 | context = np.hstack((Simple_alpha.T.toarray(), context)) 262 | 263 | if self.C is None: 264 | self.C = np.asfortranarray(np.random.rand(context.shape[1], self.K+1)*0.0) 265 | 266 | if type(self.C) == int: 267 | # no complex weight matrix yet, need to wait for first update 268 | Complex_alpha = Simple_alpha.T.toarray() 269 | else: 270 | # use alpha.T.dot(self.C)).T because alpha is sparse and self.C is dense; dense.dot(sparse) doesn't work 271 | if scipy.sparse.issparse(context): 272 | Complex_alpha = np.array(context.dot(self.C)).T 273 | else: 274 | Complex_alpha = ASC_utils.parallel_dot(context, self.C, num_threads=self.num_threads).T 275 | Complex_alpha = np.maximum(0, Complex_alpha) 276 | 277 | if self.prev_context is None: 278 | self.prev_context = context 279 | 280 | if self.learning: 281 | if self.learn_Complex: 282 | if False and self.name[-1] == '1': 283 | ind = np.argmax(np.abs(Simple_alpha.toarray()-self.prev_Complex_alpha)) 284 | row = ind / Simple_alpha.shape[1] 285 | col = ind % Simple_alpha.shape[1] 286 | print row, col, Simple_alpha[row,col], self.prev_Complex_alpha[row,col], np.max(np.abs(self.C[:,row])) 287 | self.cum_Cerr += np.sum(np.mean(np.array(Simple_alpha-self.prev_Complex_alpha)**2, axis=0)**0.5) 288 | delta = (np.array(Simple_alpha - self.prev_Complex_alpha)*((self.prev_Complex_alpha>0)+0.01)).T/Complex_alpha.shape[1] 289 | if scipy.sparse.issparse(self.prev_context): 290 | dC = np.asfortranarray(self.prev_context.T.dot(delta)) 291 | else: 292 | self._dC = ASC_utils.parallel_dot(self.prev_context.T, delta, num_threads=self.num_threads, AX_order='F', AX=self._dC) 293 | dC = self._dC 294 | learning_rate = 1.0/(10000.0+self.step/10.0) 295 | self.C *= 1.0-learning_rate*0.00001 296 | diag = np.arange(0,self.C.shape[1]) 297 | self.C[diag,diag] *= 1.0-learning_rate*0.9 298 | max_abs = np.maximum(np.max(dC), -np.min(dC)) 299 | ASC_utils.add_scale(self.C, dC, learning_rate/np.maximum(1.0, max_abs+0.0000001)) 300 | self.dC_cnt += Complex_alpha.shape[1] 301 | self.var_Complex = self.var_Complex*(1.0-learning_rate) + learning_rate*np.mean(Complex_alpha**2, axis=1) 302 | self.inv_std_Complex = (1.0/(np.sqrt(self.var_Complex)+0.0000001)).reshape((self.K+1, 1)) 303 | 304 | self.sparsify_time[1] += time.time()-t2 305 | 306 | self.prev_context = context 307 | 308 | self.prev_Complex_alpha = Complex_alpha 309 | 310 | Complex_alpha = self.inv_std_Complex * Complex_alpha if self.inv_std_Complex is not None else Simple_alpha.toarray() 311 | 312 | if self.learning and self.step % self.update_D_steps == 0: 313 | self.update_D() 314 | 315 | self.sparsify_time[2] += time.time()-t 316 | return Simple_alpha, Complex_alpha 317 | 318 | def update_D(self): 319 | if self.learn_Simple: 320 | self.A_cnt = self.A_cnt/2.0 + self.dA_cnt 321 | self.A = np.array(self.A/2.0 + self.dA) 322 | var_S = np.diag(self.A).reshape(-1)/self.A_cnt 323 | self.inv_std_Simple = (1.0/(np.sqrt(var_S)+0.0000001)).reshape((self.K+1, 1)) #np.max(var_S)/(100000.0**2) 324 | self.inv_std_Simple_sp = scipy.sparse.csc_matrix(self.inv_std_Simple) 325 | 326 | self.B = self.B/2.0 + self.dB 327 | 328 | dA = np.diag(self.A)[0:self.K] 329 | A = scipy.sparse.csc_matrix(self.A) 330 | self.oldD = self.D.copy() 331 | for i in range(self.K): 332 | self.D[:, i] += (self.B[:, i]-np.array(A[0:self.K, i].T.dot(self.D.T)).reshape(-1))/(dA[i]+0.0000001) 333 | self.D[:, i] /= np.sqrt(np.sum(self.D[:, i]**2))+0.0000001 334 | self.DtD = np.dot(self.D.T, self.D) 335 | 336 | self.ave_stop_l = self.cum_stop_l / self.stop_l_cnt 337 | self.stop_l_cnt = 0 338 | self.cum_stop_l = 0.0 339 | 340 | print time.ctime(), self.step, "updating D, average FPS", (self.step - self.last_update)/self.sparsify_time[2], self.ave_cnts, "mean L2 D", np.mean((self.D-self.oldD)**2)**0.5, self.cum_Cerr/(self.dC_cnt+0.0001) 341 | 342 | self.dC_cnt = 0 343 | self.cum_Cerr = 0 344 | self.dA_cnt = 0 345 | self.dA = np.zeros((self.K+1, self.K+1)) 346 | self.dB = np.zeros((self.m, self.K+1)) 347 | self.last_update = self.step 348 | self.update_D_steps += np.minimum(self.max_epoch_size, self.update_D_steps / 10) 349 | self.sparsify_time[:] = 0 350 | 351 | if self.show_D: 352 | self.show() 353 | self.show(self.D-self.oldD, self.name+"_Diff_") 354 | 355 | if self.check_point: 356 | pickle.dump(self, open(self.name+".pkl", "w"), protocol=-1) 357 | 358 | def imgs_D(self, D): 359 | if D is None: 360 | D = self.D 361 | W=int(np.sqrt(D.shape[1])) 362 | H=int(np.ceil(D.shape[1]/float(W))) 363 | 364 | dx = (self.Xb+1) 365 | dy = (self.Yb+1) 366 | imgs = [] 367 | for j in range(self.num_time): 368 | img = np.zeros((W*dx, H*dy, 3)) + np.max(D) 369 | for i in range(D.shape[1]): 370 | x=i / H 371 | y=i % H 372 | if self.num_color == 6: 373 | img6=D[:, i].reshape((self.Xb, self.Yb, self.num_color, self.num_time))[:, :, :, j] 374 | img[x*dx:(x+1)*dx-1, y*dy:(y+1)*dy-1, :] = img6[:, :, 0:3] - img6[:, :, 3:6] 375 | else: 376 | img[x*dx:(x+1)*dx-1, y*dy:(y+1)*dy-1, :] = D[:, i].reshape((self.Xb, self.Yb, self.num_color, self.num_time))[:, :, :, j] 377 | img /= np.maximum(np.abs(np.min(D)),np.max(D)) 378 | img += 1.0 379 | img /= 2.0/255 380 | img = img.astype(np.uint8) 381 | imgs.append(img) 382 | 383 | return imgs 384 | 385 | def show(self, D=None, prefix=None): 386 | if prefix is None: 387 | prefix = self.name+"_" 388 | if D is None: 389 | D = self.D 390 | imgs = self.imgs_D(D) 391 | for i, img in enumerate(imgs): 392 | cv2.imwrite("%s%d_%d.png" % (prefix, self.step, i), cv2.resize(img, (img.shape[0]*3, img.shape[1]*3), interpolation=cv2.INTER_NEAREST)) 393 | -------------------------------------------------------------------------------- /ASC/sparse_manager.py: -------------------------------------------------------------------------------- 1 | # ================================================================================== 2 | # Copyright (c) 2016, Brain Corporation 3 | # 4 | # This software is released under Creative Commons 5 | # Attribution-NonCommercial-ShareAlike 3.0 (BY-NC-SA) license. 6 | # Full text available here in LICENSE.TXT file as well as: 7 | # https://creativecommons.org/licenses/by-nc-sa/3.0/us/legalcode 8 | # 9 | # In summary - you are free to: 10 | # 11 | # Share - copy and redistribute the material in any medium or format 12 | # Adapt - remix, transform, and build upon the material 13 | # 14 | # The licensor cannot revoke these freedoms as long as you follow the license terms. 15 | # 16 | # Under the following terms: 17 | # * Attribution - You must give appropriate credit, provide a link to the 18 | # license, and indicate if changes were made. You may do so 19 | # in any reasonable manner, but not in any way that suggests 20 | # the licensor endorses you or your use. 21 | # * NonCommercial - You may not use the material for commercial purposes. 22 | # * ShareAlike - If you remix, transform, or build upon the material, you 23 | # must distribute your contributions under the same license 24 | # as the original. 25 | # * No additional restrictions - You may not apply legal terms or technological 26 | # measures that legally restrict others from 27 | # doing anything the license permits. 28 | # ================================================================================== 29 | 30 | import cv2 31 | import os.path 32 | import numpy as np 33 | import cPickle as pickle 34 | import multiprocessing as mp 35 | from ASC.image_sparser import ImageSparser 36 | 37 | import pyximport 38 | pyximport.install(reload_support=True) 39 | import ASC_utils 40 | 41 | 42 | class SparseManager(object): 43 | def __init__(self, name='SparseManager', im_shape=(80, 80), have_display=True, feed_multiple=1): 44 | self.im_shape = im_shape 45 | self.Vs = [] 46 | self.done_priming = False 47 | self.priming_steps = 0 48 | self.num_resets = 0 49 | self.name = name 50 | self.have_display = have_display 51 | self.feed_multiple = feed_multiple 52 | self.prime_data = None 53 | self.prime_labels = None 54 | 55 | def __setstate__(self, dict): # called when unpickled 56 | self.__dict__.update(dict) 57 | 58 | if 'V1' in dict: 59 | self.Vs = [Vn for Vn in [self.V1, self.V2, self.V3, self.V4, self.V5] if Vn is not None] 60 | if 'feed_multiple' not in dict: 61 | self.feed_multiple = 1 62 | if 'prime_data' not in dict: 63 | self.prime_data = None 64 | self.prime_labels = None 65 | 66 | def create_V1(self, Xb=8, Yb=8, num_color=3, **kwargs): 67 | assert (self.im_shape[0]/Xb)*Xb==self.im_shape[0] 68 | assert (self.im_shape[1]/Yb)*Yb==self.im_shape[1] 69 | 70 | V1 = ImageSparser(name=self.name+"_V1", Xb=Xb, Yb=Yb, num_color=num_color, **kwargs) 71 | if len(self.Vs) < 1: 72 | self.Vs.append([]) 73 | self.Vs[0] = V1 74 | 75 | def create_Vn(self, n=2, **kwargs): 76 | assert len(self.Vs) >= n-1, "must create V%d before you create V%d" % (n-1, n) 77 | assert ((self.im_shape[0]/self.Vs[0].Xb) % 2**(n-1)) == 0, "number of X tiles must be multiple of %d" % (2**(n-1)) 78 | assert ((self.im_shape[1]/self.Vs[0].Yb) % 2**(n-1)) == 0, "number of Y tiles must be multiple of %d" % (2**(n-1)) 79 | 80 | Vn = ImageSparser(name=self.name+"_V%d"%(n), Xb=(self.Vs[0].K+1)*4, Yb=1, **kwargs) 81 | if len(self.Vs) < n: 82 | self.Vs.append([]) 83 | self.Vs[n-1] = Vn 84 | 85 | def create_all(self, num_levels=4, Xb=8, Yb=8, num_color=3, **kwargs): 86 | self.create_V1(Xb=Xb, Yb=Yb, num_color=num_color, **kwargs) 87 | 88 | for i in range(1, num_levels): 89 | n = i + 1 90 | self.create_Vn(n, top_level=n==num_levels, **kwargs) 91 | 92 | def load_all(self, base_filename): 93 | def get_filename(base_filename, n): 94 | return base_filename+"_V%d.pkl" % (n) 95 | 96 | self.Vs = [] 97 | n = 1 98 | while os.path.exists(get_filename(base_filename, n)): 99 | Vn = pickle.load(open(get_filename(base_filename, n), "r")) 100 | if n==1: 101 | assert (self.im_shape[0]/Vn.Xb)*Vn.Xb==self.im_shape[0] 102 | assert (self.im_shape[1]/Vn.Yb)*Vn.Yb==self.im_shape[1] 103 | else: 104 | assert ((self.im_shape[0]/self.Vs[0].Xb) % 2**(n-1)) == 0, "number of X tiles must be multiple of %d" % (2**(n-1)) 105 | assert ((self.im_shape[1]/self.Vs[0].Yb) % 2**(n-1)) == 0, "number of Y tiles must be multiple of %d" % (2**(n-1)) 106 | self.Vs.append(Vn) 107 | n += 1 108 | assert n > 1, "no level files found with base filename %s" % base_filename 109 | 110 | def stop_learning(self): 111 | for V in self.Vs: 112 | V.learning = False 113 | 114 | def group_NxN_input(self, im, N, numInput, numX, numY): 115 | return im.reshape((numInput, numX/N, N, numY/N, N)).transpose((2, 4, 0, 1, 3)).reshape((N*N*numInput, -1)) 116 | 117 | def gen_context(self, Vn, numX, numY): 118 | if Vn is None: 119 | return None 120 | 121 | if Vn.use_feedback == 4: # allow all 4 tiles to have different feedback weights, very expensive 122 | context = np.zeros((4, Vn.K+1, numX, numY)) 123 | if Vn.prev_Complex_alpha is not None: 124 | c = Vn.prev_Complex_alpha.reshape((Vn.K+1, numX/2, numY/2)) 125 | context[0, :, 0::2, 0::2] = c 126 | context[1, :, 1::2, 0::2] = c 127 | context[2, :, 0::2, 1::2] = c 128 | context[3, :, 1::2, 1::2] = c 129 | 130 | return context.reshape((4*(Vn.K+1), numX*numY)).T 131 | else: 132 | context = np.zeros((Vn.K+1, numX, numY)) 133 | if Vn.prev_Complex_alpha is not None: 134 | c = Vn.prev_Complex_alpha.reshape((Vn.K+1, numX/2, numY/2)) 135 | context[:, 0::2, 0::2] = c 136 | context[:, 1::2, 0::2] = c 137 | context[:, 0::2, 1::2] = c 138 | context[:, 1::2, 1::2] = c 139 | 140 | return context.reshape((Vn.K+1, numX*numY)).T 141 | 142 | def feed(self, im): 143 | assert im.dtype == np.uint8 144 | im = cv2.resize(im, dsize=self.im_shape, interpolation=cv2.INTER_AREA) 145 | im = im - 127.5 146 | 147 | ss = [None]*len(self.Vs) 148 | cs = [None]*len(self.Vs) 149 | inp = im 150 | for i, Vn in enumerate(self.Vs): 151 | n = i + 1 152 | if self.Vs[i].use_feedback and (i+1) < len(self.Vs): 153 | context = self.gen_context(self.Vs[i+1], self.im_shape[0]/self.Vs[0].Xb/(2**i), self.im_shape[1]/self.Vs[0].Yb/(2**i)) 154 | else: 155 | context = None # top level doesn't have any feedback 156 | s, c = self.Vs[i].sparsify(inp, context=context) 157 | ss[i] = s 158 | cs[i] = c 159 | 160 | if c is None: # sparsify returns None if a layer isn't trained enough to return a response 161 | break 162 | if n < len(self.Vs): 163 | # input for next level 164 | inp = self.group_NxN_input(c, 2, self.Vs[0].K+1, self.im_shape[0]/self.Vs[0].Xb/(2**i), self.im_shape[1]/self.Vs[0].Yb/(2**i)) 165 | 166 | return ss, cs 167 | 168 | def prime(self, im, priming_mask, debug=False): 169 | self.priming_steps += 1 170 | assert not self.done_priming 171 | 172 | if self.have_display and debug: 173 | cv2.imshow("im", im) 174 | 175 | for i in range(self.feed_multiple): 176 | ss, cs = self.feed(im) 177 | 178 | if self.prime_data is None: 179 | self.prime_data = [[] for v in self.Vs] 180 | self.prime_labels = [[] for v in self.Vs] 181 | 182 | V1_Xb = self.Vs[0].Xb 183 | V1_Yb = self.Vs[0].Yb 184 | num_Xb = self.im_shape[0]/V1_Xb 185 | num_Yb = self.im_shape[1]/V1_Yb 186 | for i, c in enumerate(cs): 187 | n = i + 1 188 | s = 2**i 189 | mask = (cv2.resize(priming_mask, dsize=(self.im_shape[0]/s, self.im_shape[1]/s), interpolation=cv2.INTER_AREA) > 127)*1.0 190 | tmp_im = cv2.resize(im, dsize=(self.im_shape[0]/s, self.im_shape[1]/s), interpolation=cv2.INTER_AREA) 191 | 192 | Vn_mask = mask.reshape((num_Xb/s, V1_Xb, num_Yb/s, V1_Yb, 1)).transpose((1, 3, 4, 0, 2)).reshape((V1_Xb*V1_Yb, num_Xb*num_Yb/s/s)) 193 | tmp_im = tmp_im.reshape((num_Xb/s, V1_Xb, num_Yb/s, V1_Yb, 3)).transpose((1, 3, 4, 0, 2)).reshape((V1_Xb*V1_Yb*3, num_Xb*num_Yb/s/s)) 194 | 195 | for j in range(c.shape[1]): 196 | self.prime_data[i].append(c[:,j]) 197 | self.prime_labels[i].append(Vn_mask[:,j]) 198 | 199 | if self.have_display and debug: 200 | cv2.waitKey(10) 201 | 202 | def train_tracker(self): 203 | self.done_priming = True 204 | self.heatmap_models = [] 205 | # smallest_len = np.min([len(d) for d in self.prime_data]) 206 | smallest_len = 1024 207 | for i in range(len(self.Vs)): 208 | num_threads = mp.cpu_count()/2 209 | 210 | block_size = 1024 211 | SI = 0 212 | StS = 0 213 | for j in range(int(np.ceil(len(self.prime_data[i])/block_size))): 214 | inds = np.arange(j*block_size, np.minimum(len(self.prime_data[i]),(j+1)*block_size), dtype=np.int32) 215 | data = np.array([self.prime_data[i][ind] for ind in inds]).T # (K+1) x num_samples 216 | labels = np.array([self.prime_labels[i][ind] for ind in inds]).T # num_positions x num_samples 217 | SI = SI + ASC_utils.parallel_dot(data, labels.T, num_threads=num_threads) 218 | StS = StS + ASC_utils.parallel_dot(data, data.T, num_threads=num_threads) 219 | W = np.asfortranarray(np.linalg.solve((StS+np.eye(StS.shape[0])*0.000001), SI)) 220 | 221 | # W = np.asfortranarray(np.random.randn(self.Vs[i].K+1, len(self.prime_labels[i][0]))/self.Vs[i].K/100) 222 | dW = None 223 | small_step_cnt = 0 224 | 225 | prev_mean_error = np.inf 226 | learning_rate = 0.05 227 | improved_last = 0 228 | for k in range(50): 229 | if k-improved_last>5: 230 | break 231 | inds = np.random.permutation(len(self.prime_data[i]))[0:smallest_len] 232 | data = np.array([self.prime_data[i][ind] for ind in inds]).T # (K+1) x num_samples 233 | labels = np.array([self.prime_labels[i][ind] for ind in inds]).T # num_positions x num_samples 234 | 235 | inds2 = np.random.permutation(len(self.prime_data[i]))[0:smallest_len*16] 236 | data2 = np.array([self.prime_data[i][ind] for ind in inds2]).T # (K+1) x num_samples 237 | labels2 = np.array([self.prime_labels[i][ind] for ind in inds2]).T # num_positions x num_samples 238 | 239 | # normalize the data 240 | # data /= np.sqrt(np.mean(data**2, axis=1, keepdims=True)) + 0.1/smallest_len 241 | # labels /= np.mean(labels, axis=1, keepdims=True) + 0.1/smallest_len 242 | 243 | prev_W = W.copy(order='F') 244 | a2 = ASC_utils.parallel_dot(W.T, data2, num_threads=num_threads) 245 | a2 = np.maximum(0, a2)# + np.minimum(0, a)*0.01 246 | e2 = labels2 - a2 247 | 248 | mean_error = np.mean(e2**2) 249 | for j in range(100): 250 | a = ASC_utils.parallel_dot(W.T, data, num_threads=num_threads) 251 | a = np.maximum(0, a)# + np.minimum(0, a)*0.01 252 | e = labels - a 253 | 254 | d = e / data.shape[1] * ((a>=0) + 0.01)#*(a<0)) 255 | dW = ASC_utils.parallel_dot(data, d.T, num_threads=num_threads, AX_order='F', AX=dW) 256 | prev_W = W.copy(order='F') 257 | ASC_utils.add_scale(W, dW, learning_rate) 258 | 259 | a2 = ASC_utils.parallel_dot(W.T, data2, num_threads=num_threads) 260 | a2 = np.maximum(0, a2)# + np.minimum(0, a)*0.01 261 | e2 = labels2 - a2 262 | 263 | mean_error = np.mean(e2**2) 264 | if prev_mean_error - mean_error < 0.00001: 265 | if j == 0: 266 | learning_rate /= 2 267 | else: 268 | improved_last = k 269 | # print k, j, data.shape[1], prev_mean_error, learning_rate 270 | W = prev_W.copy(order='F') 271 | break 272 | else: 273 | prev_mean_error = mean_error 274 | 275 | 276 | a = np.minimum(1,np.maximum(0, ASC_utils.parallel_dot(W.T, data, num_threads=num_threads))) 277 | l = ((labels>0)*1.0) 278 | 279 | V1_Xb = self.Vs[0].Xb 280 | V1_Yb = self.Vs[0].Yb 281 | a = a.reshape((V1_Xb, V1_Yb, -1)).transpose((2, 0, 1)).reshape((-1, V1_Yb))[:,:,None][:,:,[0, 0, 0]] 282 | l = l.reshape((V1_Xb, V1_Yb, -1)).transpose((2, 0, 1)).reshape((-1, V1_Yb))[:,:,None][:,:,[0, 0, 0]] 283 | 284 | self.heatmap_models.append(W) 285 | self.prime_data = None 286 | self.prime_labels = None 287 | 288 | def track(self, im, debug=False): 289 | if not self.done_priming: 290 | self.train_tracker() 291 | 292 | for i in range(self.feed_multiple): 293 | ss, cs = self.feed(im) 294 | self.tracking_steps += 1 295 | 296 | heatmaps = [] 297 | 298 | V1_Xb = self.Vs[0].Xb 299 | V1_Yb = self.Vs[0].Yb 300 | num_Xb = self.im_shape[0]/V1_Xb 301 | num_Yb = self.im_shape[1]/V1_Yb 302 | for i, c in enumerate(cs): 303 | n = i + 1 304 | s = 2**i 305 | Vn_heatmap = np.maximum(0, ASC_utils.parallel_dot(self.heatmap_models[i].T, c)) 306 | Vn_heatmap_im = Vn_heatmap.reshape((V1_Xb, V1_Yb, 1, num_Xb/s, num_Yb/s)).transpose((3, 0, 4, 1, 2)).reshape((num_Xb*V1_Xb/s, num_Yb*V1_Yb/s)) 307 | Vn_heatmap_im = cv2.resize(Vn_heatmap_im, self.im_shape, interpolation=cv2.INTER_NEAREST) 308 | heatmaps.append(Vn_heatmap_im) 309 | 310 | if len(heatmaps) >= 4 and debug: 311 | heatmaps_im = np.minimum(1.0, np.maximum(0.0, 0.5*np.hstack((np.vstack((heatmaps[0], heatmaps[1])), np.vstack((heatmaps[2], heatmaps[3]))))[..., None])) 312 | if self.have_display: 313 | cv2.imshow("im_att", np.hstack((np.vstack((im/255.0, np.zeros(im.shape))), heatmaps_im[:,:,[0, 0, 0]]))) 314 | cv2.imwrite("im_att%d_%04d.png" % (self.num_resets, self.tracking_steps), (np.hstack((np.vstack((im/255.0, np.zeros(im.shape))), heatmaps_im[:,:,[0, 0, 0]]))*255).astype(np.uint8)) 315 | 316 | return heatmaps 317 | 318 | def reset_priming(self): 319 | self.priming_steps = 0 320 | self.tracking_steps = 0 321 | if 'num_resets' not in self.__dict__: 322 | self.num_resets = 0 323 | else: 324 | self.num_resets += 1 325 | self.done_priming = False 326 | self.attention_models = [] 327 | 328 | def set_num_threads(self, num_threads=-1): 329 | if num_threads == -1: 330 | num_threads = mp.cpu_count()/2 331 | 332 | for Vn in self.Vs: 333 | Vn.num_threads = num_threads 334 | 335 | def save(self, filename=None): 336 | if filename is None: 337 | filename = self.name+".pkl" 338 | pickle.dump(self, open(filename, 'w'), protocol=-1) 339 | -------------------------------------------------------------------------------- /ASC/train_sparse_coding.py: -------------------------------------------------------------------------------- 1 | # ================================================================================== 2 | # Copyright (c) 2016, Brain Corporation 3 | # 4 | # This software is released under Creative Commons 5 | # Attribution-NonCommercial-ShareAlike 3.0 (BY-NC-SA) license. 6 | # Full text available here in LICENSE.TXT file as well as: 7 | # https://creativecommons.org/licenses/by-nc-sa/3.0/us/legalcode 8 | # 9 | # In summary - you are free to: 10 | # 11 | # Share - copy and redistribute the material in any medium or format 12 | # Adapt - remix, transform, and build upon the material 13 | # 14 | # The licensor cannot revoke these freedoms as long as you follow the license terms. 15 | # 16 | # Under the following terms: 17 | # * Attribution - You must give appropriate credit, provide a link to the 18 | # license, and indicate if changes were made. You may do so 19 | # in any reasonable manner, but not in any way that suggests 20 | # the licensor endorses you or your use. 21 | # * NonCommercial - You may not use the material for commercial purposes. 22 | # * ShareAlike - If you remix, transform, or build upon the material, you 23 | # must distribute your contributions under the same license 24 | # as the original. 25 | # * No additional restrictions - You may not apply legal terms or technological 26 | # measures that legally restrict others from 27 | # doing anything the license permits. 28 | # ================================================================================== 29 | 30 | import sys 31 | import numpy as np 32 | import cPickle as pickle 33 | from ASC.sparse_manager import SparseManager 34 | from ASC.frame_reader import FrameReader 35 | 36 | 37 | def run(prefix, video_list, im_shape=(80,80), profiling=False, **kwargs): 38 | 39 | SM = SparseManager(prefix, im_shape=im_shape) 40 | SM.create_all(**kwargs) 41 | 42 | FR = FrameReader(video_list=video_list, step=1, randomize=False, shape=(SM.im_shape[0], SM.im_shape[1])) 43 | for i in range(3000000): 44 | SM.feed(FR.read()) 45 | if profiling and i==15000: 46 | break 47 | SM.save() 48 | 49 | IS=SM.Vs[0] 50 | for i in range(0,IS.K,IS.K/20): 51 | inds = np.argsort(-IS.C[0:IS.K,i]) 52 | IS.show((IS.D*np.abs(IS.C[0:IS.K,i]))[:,inds],prefix+"_C_"+str(i)+"_") 53 | IS.show(IS.D[:,inds[0:16]],prefix+"_C16_"+str(i)+"_") 54 | 55 | 56 | if __name__ == '__main__': 57 | if len(sys.argv) == 1: 58 | video_list = ('DARPA/TrainingData/Parrots.pkl', 'DARPA/TrainingData/AmazingLife.pkl', 'DARPA/TrainingData/HoneyBadgers.pkl') 59 | max_epoch_size=300000 # how many frames before the stimulus repeats 60 | else: 61 | video_list = [sys.argv[i] for i in range(1, len(sys.argv)-1)] 62 | max_epoch_size=int(sys.argv[-1]) # how many frames before the stimulus repeats 63 | 64 | profiling = False 65 | num_levels = 4 # how many levels to create 66 | K = 400 # number of units in Simple and Complex layers shared across all levels 67 | target_active_cnt = 70 68 | Xb = 10 # tile width in pixels 69 | Yb = 10 # tile height in pixels 70 | im_shape = (Xb*(2**(num_levels-1)), Yb*(2**(num_levels-1))) # input image size 71 | positive_input = False # when True generate a 6 channel retina instead of 3 72 | use_neighborhood = 5 # can be False for not lateral, 1 for only receiving input from previous Complex, or 5 to receive input from the 5 neighborhood 73 | use_feedback = 1 # allow top-down connections, can be False, 1 or 4 74 | num_time = 1 # set to higher than 1 to learn motion 75 | prefix = "ASC_tiles%dx%d_px%dx%d_V%d_K%d" % (im_shape[0]/Xb, im_shape[1]/Yb, Xb, Yb, num_levels, K) 76 | 77 | if use_neighborhood is not None: 78 | prefix = prefix + "_N%d" % (use_neighborhood) 79 | if use_feedback: 80 | prefix = prefix + "_feedback" 81 | 82 | if not profiling: 83 | run(prefix=prefix, im_shape=im_shape, video_list=video_list, K=K, num_time=num_time, target_active_cnt=target_active_cnt, Xb=Xb, Yb=Yb, positive_input=positive_input, 84 | num_levels=num_levels, use_neighborhood=use_neighborhood, use_feedback=use_feedback, max_epoch_size=max_epoch_size) 85 | else: 86 | import line_profiler 87 | SM = SparseManager() 88 | SM.create_V1(K=K) # need to create a dummy V1 so we can get a handle to the sparsify method 89 | 90 | lp = line_profiler.LineProfiler(SM.Vs[0].sparsify, SM.feed) 91 | lp.run('run(profiling=True, prefix="profiling", im_shape=im_shape, video_list=video_list, K=K, num_time=num_time, target_active_cnt=target_active_cnt, Xb=Xb, Yb=Yb, positive_input=positive_input, num_levels=num_levels, use_neighborhood=use_neighborhood, use_feedback=use_feedback, max_epoch_size=max_epoch_size)') 92 | lp.print_stats() 93 | -------------------------------------------------------------------------------- /LICENSE.TXT: -------------------------------------------------------------------------------- 1 | 2 | License 3 | 4 | THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. 5 | 6 | BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS. 7 | 8 | 1. Definitions 9 | 10 | "Collective Work" means a work, such as a periodical issue, anthology or encyclopedia, in which the Work in its entirety in unmodified form, along with one or more other contributions, constituting separate and independent works in themselves, are assembled into a collective whole. A work that constitutes a Collective Work will not be considered a Derivative Work (as defined below) for the purposes of this License. 11 | "Derivative Work" means a work based upon the Work or upon the Work and other pre-existing works, such as a translation, musical arrangement, dramatization, fictionalization, motion picture version, sound recording, art reproduction, abridgment, condensation, or any other form in which the Work may be recast, transformed, or adapted, except that a work that constitutes a Collective Work will not be considered a Derivative Work for the purpose of this License. For the avoidance of doubt, where the Work is a musical composition or sound recording, the synchronization of the Work in timed-relation with a moving image ("synching") will be considered a Derivative Work for the purpose of this License. 12 | "Licensor" means the individual, individuals, entity or entities that offer(s) the Work under the terms of this License. 13 | "Original Author" means the individual, individuals, entity or entities who created the Work. 14 | "Work" means the copyrightable work of authorship offered under the terms of this License. 15 | "You" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation. 16 | "License Elements" means the following high-level license attributes as selected by Licensor and indicated in the title of this License: Attribution, Noncommercial, ShareAlike. 17 | 18 | 2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or restrict any rights arising from fair use, first sale or other limitations on the exclusive rights of the copyright owner under copyright law or other applicable laws. 19 | 20 | 3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below: 21 | 22 | to reproduce the Work, to incorporate the Work into one or more Collective Works, and to reproduce the Work as incorporated in the Collective Works; 23 | to create and reproduce Derivative Works provided that any such Derivative Work, including any translation in any medium, takes reasonable steps to clearly label, demarcate or otherwise identify that changes were made to the original Work. For example, a translation could be marked "The original work was translated from English to Spanish," or a modification could indicate "The original work has been modified."; 24 | to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission the Work including as incorporated in Collective Works; 25 | to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission Derivative Works; 26 | 27 | The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. All rights not expressly granted by Licensor are hereby reserved, including but not limited to the rights set forth in Sections 4(e) and 4(f). 28 | 29 | 4. Restrictions. The license granted in Section 3 above is expressly made subject to and limited by the following restrictions: 30 | 31 | You may distribute, publicly display, publicly perform, or publicly digitally perform the Work only under the terms of this License, and You must include a copy of, or the Uniform Resource Identifier for, this License with every copy or phonorecord of the Work You distribute, publicly display, publicly perform, or publicly digitally perform. You may not offer or impose any terms on the Work that restrict the terms of this License or the ability of a recipient of the Work to exercise the rights granted to that recipient under the terms of the License. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties. When You distribute, publicly display, publicly perform, or publicly digitally perform the Work, You may not impose any technological measures on the Work that restrict the ability of a recipient of the Work from You to exercise the rights granted to that recipient under the terms of the License. This Section 4(a) applies to the Work as incorporated in a Collective Work, but this does not require the Collective Work apart from the Work itself to be made subject to the terms of this License. If You create a Collective Work, upon notice from any Licensor You must, to the extent practicable, remove from the Collective Work any credit as required by Section 4(d), as requested. If You create a Derivative Work, upon notice from any Licensor You must, to the extent practicable, remove from the Derivative Work any credit as required by Section 4(d), as requested. 32 | You may distribute, publicly display, publicly perform, or publicly digitally perform a Derivative Work only under: (i) the terms of this License; (ii) a later version of this License with the same License Elements as this License; or, (iii) either the unported Creative Commons license or a Creative Commons license for another jurisdiction (either this or a later license version) that contains the same License Elements as this License (e.g. Attribution-NonCommercial-ShareAlike 3.0 (Unported)) ("the Applicable License"). You must include a copy of, or the Uniform Resource Identifier for, the Applicable License with every copy or phonorecord of each Derivative Work You distribute, publicly display, publicly perform, or publicly digitally perform. You may not offer or impose any terms on the Derivative Works that restrict the terms of the Applicable License or the ability of a recipient of the Work to exercise the rights granted to that recipient under the terms of the Applicable License. You must keep intact all notices that refer to the Applicable License and to the disclaimer of warranties. When You distribute, publicly display, publicly perform, or publicly digitally perform the Derivative Work, You may not impose any technological measures on the Derivative Work that restrict the ability of a recipient of the Derivative Work from You to exercise the rights granted to that recipient under the terms of the Applicable License. This Section 4(b) applies to the Derivative Work as incorporated in a Collective Work, but this does not require the Collective Work apart from the Derivative Work itself to be made subject to the terms of the Applicable License. 33 | You may not exercise any of the rights granted to You in Section 3 above in any manner that is primarily intended for or directed toward commercial advantage or private monetary compensation. The exchange of the Work for other copyrighted works by means of digital file-sharing or otherwise shall not be considered to be intended for or directed toward commercial advantage or private monetary compensation, provided there is no payment of any monetary compensation in connection with the exchange of copyrighted works. 34 | If You distribute, publicly display, publicly perform, or publicly digitally perform the Work (as defined in Section 1 above) or any Derivative Works (as defined in Section 1 above) or Collective Works (as defined in Section 1 above), You must, unless a request has been made pursuant to Section 4(a), keep intact all copyright notices for the Work and provide, reasonable to the medium or means You are utilizing: (i) the name of the Original Author (or pseudonym, if applicable) if supplied, and/or (ii) if the Original Author and/or Licensor designate another party or parties (e.g. a sponsor institute, publishing entity, journal) for attribution ("Attribution Parties") in Licensor's copyright notice, terms of service or by other reasonable means, the name of such party or parties; the title of the Work if supplied; to the extent reasonably practicable, the Uniform Resource Identifier, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and, consistent with Section 3(b) in the case of a Derivative Work, a credit identifying the use of the Work in the Derivative Work (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). The credit required by this Section 4(d) may be implemented in any reasonable manner; provided, however, that in the case of a Derivative Work or Collective Work, at a minimum such credit will appear, if a credit for all contributing authors of the Derivative Work or Collective Work appears, then as part of these credits and in a manner at least as prominent as the credits for the other contributing authors. For the avoidance of doubt, You may only use the credit required by this Section for the purpose of attribution in the manner set out above and, by exercising Your rights under this License, You may not implicitly or explicitly assert or imply any connection with, sponsorship or endorsement by the Original Author, Licensor and/or Attribution Parties, as appropriate, of You or Your use of the Work, without the separate, express prior written permission of the Original Author, Licensor and/or Attribution Parties. 35 | 36 | For the avoidance of doubt, where the Work is a musical composition: 37 | Performance Royalties Under Blanket Licenses. Licensor reserves the exclusive right to collect whether individually or, in the event that Licensor is a member of a performance rights society (e.g. ASCAP, BMI, SESAC), via that society, royalties for the public performance or public digital performance (e.g. webcast) of the Work if that performance is primarily intended for or directed toward commercial advantage or private monetary compensation. 38 | Mechanical Rights and Statutory Royalties. Licensor reserves the exclusive right to collect, whether individually or via a music rights agency or designated agent (e.g. Harry Fox Agency), royalties for any phonorecord You create from the Work ("cover version") and distribute, subject to the compulsory license created by 17 USC Section 115 of the US Copyright Act (or the equivalent in other jurisdictions), if Your distribution of such cover version is primarily intended for or directed toward commercial advantage or private monetary compensation. 39 | Webcasting Rights and Statutory Royalties. For the avoidance of doubt, where the Work is a sound recording, Licensor reserves the exclusive right to collect, whether individually or via a performance-rights society (e.g. SoundExchange), royalties for the public digital performance (e.g. webcast) of the Work, subject to the compulsory license created by 17 USC Section 114 of the US Copyright Act (or the equivalent in other jurisdictions), if Your public digital performance is primarily intended for or directed toward commercial advantage or private monetary compensation. 40 | 41 | 5. Representations, Warranties and Disclaimer 42 | 43 | UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND ONLY TO THE EXTENT OF ANY RIGHTS HELD IN THE LICENSED WORK BY THE LICENSOR. THE LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MARKETABILITY, MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU. 44 | 45 | 6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 46 | 47 | 7. Termination 48 | 49 | This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Derivative Works (as defined in Section 1 above) or Collective Works (as defined in Section 1 above) from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License. 50 | Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above. 51 | 52 | 8. Miscellaneous 53 | 54 | Each time You distribute or publicly digitally perform the Work (as defined in Section 1 above) or a Collective Work (as defined in Section 1 above), the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License. 55 | Each time You distribute or publicly digitally perform a Derivative Work, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License. 56 | If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. 57 | No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent. 58 | This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You. 59 | 60 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Adaptive Sparse Coding Cortical Model 2 | 3 | ## Introduction 4 | This repository contains the core code used for the model published in the paper "Fundamental principles of cortical computation: unsupervised learning with prediction, compression and feedback". 5 | 6 | ## Starting up 7 | 8 | 9 | Make sure that virtualenv is installed on your system first. 10 | Then clone the repo and run the following commands: 11 | 12 | ``` 13 | git clone git@github.com:braincorp/ASC 14 | cd ASC 15 | 16 | sudo ./install_ubuntu_dependencies.sh 17 | 18 | ./setup.sh 19 | 20 | source venv/bin/activate 21 | python ASC/train_sparse_coding.py [list of video files separated by spaces] [total number of frames] 22 | 23 | ``` 24 | 25 | -------------------------------------------------------------------------------- /install_ubuntu_dependencies.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | apt-get -y install python-opencv python-pip python-virtualenv python-dev libblas-dev liblapack-dev 3 | apt-get -y install gcc g++ gfortran 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # pip requirements file 2 | numpy>=1.8.0 3 | cython>=0.23.0 4 | scipy>=0.17.0 5 | ipython 6 | multiprocessing 7 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # ================================================================================== 2 | # Copyright (c) 2016, Brain Corporation 3 | # 4 | # This software is released under Creative Commons 5 | # Attribution-NonCommercial-ShareAlike 3.0 (BY-NC-SA) license. 6 | # Full text available here in LICENSE.TXT file as well as: 7 | # https://creativecommons.org/licenses/by-nc-sa/3.0/us/legalcode 8 | # 9 | # In summary - you are free to: 10 | # 11 | # Share - copy and redistribute the material in any medium or format 12 | # Adapt - remix, transform, and build upon the material 13 | # 14 | # The licensor cannot revoke these freedoms as long as you follow the license terms. 15 | # 16 | # Under the following terms: 17 | # * Attribution - You must give appropriate credit, provide a link to the 18 | # license, and indicate if changes were made. You may do so 19 | # in any reasonable manner, but not in any way that suggests 20 | # the licensor endorses you or your use. 21 | # * NonCommercial - You may not use the material for commercial purposes. 22 | # * ShareAlike - If you remix, transform, or build upon the material, you 23 | # must distribute your contributions under the same license 24 | # as the original. 25 | # * No additional restrictions - You may not apply legal terms or technological 26 | # measures that legally restrict others from 27 | # doing anything the license permits. 28 | # ================================================================================== 29 | from setuptools import setup, find_packages 30 | 31 | 32 | setup( 33 | name='ASC', 34 | author='Brain Corporation', 35 | author_email='richert@braincorporation.com', 36 | url='https://github.com/braincorp/ASC', 37 | long_description='', 38 | version='1.0', 39 | packages=find_packages(), 40 | include_package_data=True, 41 | install_requires=[]) 42 | -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | virtualenv --system-site-packages ./venv 3 | source venv/bin/activate 4 | pip install -r requirements.txt 5 | python setup.py install 6 | --------------------------------------------------------------------------------