├── .gitignore ├── old ├── experience_map.py ├── pose_cell_network.py ├── ratslam.py ├── simple_vision.py ├── sketch.py ├── video_source.py └── visual_odometer.py ├── ratslam ├── README ├── __init__.py ├── experience_map.py ├── pose_cell_network.py ├── simulation.py ├── video.py ├── view_templates.py └── visual_odometer.py ├── scripts └── ratslam └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | -------------------------------------------------------------------------------- /old/experience_map.py: -------------------------------------------------------------------------------- 1 | 2 | class Experience: 3 | "A point in the experience map" 4 | 5 | def __init__(self, id, x_pc, y_pc, th_pc, x_m, y_m, facing_rad, vt_id): 6 | self.id = id 7 | self.x_pc = x_pc 8 | self.y_pc = y_pc 9 | self.th_pc = th_pc 10 | self.x_m = x_m 11 | self.y_m = y_m 12 | self.facing_rad = facing_rad 13 | self.vt_id = vt_id 14 | 15 | self.links = [] 16 | 17 | def add_link(self, target, d, heading_rad, facing_rad): 18 | link = ExperienceLink(target, d, heading_rad, facing_rad) 19 | self.links.append(links) 20 | 21 | class ExperienceLink: 22 | "A link between two Experience objects" 23 | 24 | def __init__(self, target, d, heading_rad, facing_rad): 25 | self.target = target 26 | self.d = d 27 | self.heading_rad = heading_rad 28 | self.facing_rad = facing_rad 29 | 30 | class ExperienceMap: 31 | 32 | def __init__(self, pose_cell_network, view_templates, **kwargs): 33 | self.exps = [] 34 | self.curr_exp_id = 0 35 | self.pc_network_shape = pose_cell_network.shape 36 | self.view_templates = view_templates 37 | 38 | self.delta_pc_threshold = kwargs.pop("delta_pc_threshold", 1.0) 39 | self.correction_factor = kwargs.pop("correction_factor", 0.5) 40 | self.exp_loops = kwargs.pop("exp_loops", 100) 41 | 42 | def update(self, vt_id, trans_est, rot_est, pose_cell_packet_center): 43 | """ Update the experience map 44 | 45 | (Matlab version: equivalent to rs_experience_map_iteration) 46 | """ 47 | (x_pc, y_pc, th_pc) = pose_cell_packet_center 48 | vt = self.view_templates[vt_id] 49 | 50 | # integrate the delta x, y, facing 51 | accum_delta_facing = self.clip_rad_180(accum_delta_facing + rot_est); 52 | accum_delta_x = accum_delta_x + trans_est * cos(accum_delta_facing); 53 | accum_delta_y = accum_delta_y + trans_est * sin(accum_delta_facing); 54 | 55 | delta_pc = sqrt( self.min_delta( self.exps[curr_exp_id].x_pc, 56 | x_pc, self.pc_network_shape[0])**2 + \ 57 | self.min_delta( self.exps[curr_exp_id].y_pc, 58 | y_pc, self.pc_network_shape[1])**2 + \ 59 | self.min_delta( self.exps[curr_exp_id].th_pc, 60 | th_pc, self.pc_network_shape[2])**2 ) 61 | 62 | # if the vt is new or the pc x,y,th has changed enough create a new 63 | # experience 64 | if (vt.numexps == 0) or (delta_pc > self.delta_pc_threshold): 65 | 66 | self.add_new_exp(self.curr_exp_id, x_pc, y_pc, th_pc, vt_id) 67 | 68 | self.prev_exp_id = self.curr_exp_id 69 | self.curr_exp_id = self.numexps 70 | 71 | accum_delta_x = 0 72 | accum_delta_y = 0 73 | accum_delta_facing = self.exps[self.curr_exp_id].facing_rad 74 | 75 | curr_exp = self.exps[self.curr_exp_id] 76 | 77 | elif vt_id != prev_vt_id: 78 | 79 | # find the exp associated with the current vt and that is under the 80 | # threshold distance to the centre of pose cell activity 81 | # if multiple exps are under the threshold then don't match (to 82 | # reduce hash collisions) 83 | matched_exp_id = 0 84 | matched_exp_count = 0 85 | 86 | for search_id in range(0, vt.numexps): 87 | search_exp = self.exps[vt.exps[search_id].id] 88 | search_x_pc = search_exp.x_pc 89 | search_y_pc = search_exp.y_pc 90 | search_th_pc = search_exp.th_pc 91 | delta_pc[search_id] = \ 92 | sqrt(self.min_delta( search_x_pc, 93 | x_pc, self.pc_network_shape[0])**2+\ 94 | self.min_delta( search_y_pc, 95 | y_pc, self.pc_network_shape[1])**2+\ 96 | self.min_delta( search_th_pc, 97 | th_pc, self.pc_network_shape[2])**2) 98 | 99 | if delta_pc[search_id] < self.delta_pc_threshold: 100 | matched_exp_count += 1; 101 | 102 | if matched_exp_count > 1: 103 | # this means we aren't sure about which experience is a match due 104 | # to hash table collision 105 | # instead of a false posivitive which may create blunder links in 106 | # the experience map keep the previous experience 107 | # matched_exp_count 108 | pass 109 | else: 110 | min_delta = min(delta_pc) 111 | min_delta_id = argmin(delta_pc) 112 | 113 | if min_delta < self.delta_pc_threshold: 114 | 115 | matched_exp_id = vt.exps[min_delta_id].id 116 | 117 | # see if the prev exp already has a link to the current exp 118 | link_exists = False 119 | for link_id in range(0, len(self.exps[curr_exp_id].links)): 120 | if curr_exp.links[link_id].exp_id == matched_exp_id: 121 | link_exists = True 122 | break 123 | 124 | # if we didn't find a link then create the link between 125 | # current experience and the expereince for the current vt 126 | if not link_exists: 127 | 128 | d = sqrt(accum_delta_x**2 + accum_delta_y**2) 129 | heading_rad = self.signed_delta_rad(curr_exp.facing_rad, 130 | arctan2(accum_delta_y, 131 | accum_delta_x)) 132 | facing_rad = self.signed_delta_rad(curr_exp.facing_rad, 133 | accum_delta_facing) 134 | curr_exp.add_link(self.exps[matched_expid], 135 | d, heading_rad, facing_rad) 136 | 137 | if matched_exp_id == 0: 138 | newexp = self.add_new_exp(curr_exp_id,x_pc,y_pc,th_pc,vt_id) 139 | matched_exp_id = newexp.id 140 | 141 | self.prev_exp_id = self.curr_exp_id 142 | self.curr_exp_id = matched_exp_id 143 | 144 | accum_delta_x = 0 145 | accum_delta_y = 0 146 | accum_delta_facing = self.exps[curr_exp_id].facing_rad 147 | 148 | for i in range(0, self.exp_loops): 149 | # loop through experiences 150 | for exp_id in range(0, self.numexps): 151 | this_exp = self.exps[exp_id] 152 | for link_id in range(0, this_exp.numlinks): 153 | # experience 0 has a link to experience 1 154 | e0 = exp_id 155 | e1 = this_exp.links[link_id].exp_id 156 | linked_exp = self.exps[e1] 157 | 158 | # work out where e0 thinks e1 (x,y) should be based on 159 | # the stored link information 160 | lx = this_exp.x_m + \ 161 | this_exp.links[link_id].d * \ 162 | cos(this_exp.facing_rad + \ 163 | this_exp.links[link_id].heading_rad) 164 | 165 | ly = this_exp.y_m + \ 166 | this_exp.links[link_id].d * \ 167 | sin(this_exp.facing_rad + \ 168 | this_exp.links[link_id].heading_rad) 169 | 170 | # correct e0 and e1 (x,y) by equal but opposite amounts 171 | # a 0.5 correction parameter means that e0 and e1 will 172 | # be fully corrected based on e0's link information 173 | this_exp.x_m = this_exp.x_m + \ 174 | (linked_exp.x_m-lx) * self.correction_factor 175 | 176 | this_exp.y_m = this_exp.y_m + \ 177 | (linked_exp.y_m-ly) * self.correction_factor 178 | 179 | linked_exp.x_m = linked_exp.x_m - \ 180 | (linked_exp.x_m-lx)*self.correction_factor 181 | 182 | linked_exp.y_m = linked_exp.y_m - \ 183 | (linked_exp.y_m-ly)*self.correction_factor 184 | 185 | # determine the angle between where e0 thinks e1's 186 | # facing should be based on the link information 187 | df = self.signed_delta_rad( 188 | (this_exp.facing_rad + \ 189 | this_exp.links[link_id].facing_rad), 190 | linked_exp.facing_rad) 191 | 192 | # correct e0 and e1 facing by equal but opposite amounts 193 | # a 0.5 correction parameter means that e0 and e1 will be fully 194 | # corrected based on e0's link information 195 | this_exp.facing_rad = self.clip_rad_180( 196 | this_exp.facing_rad + df * \ 197 | self.correction_factor) 198 | linked_exp.facing_rad = self.clip_rad_180( 199 | linked_exp.facing_rad - df * \ 200 | self.correction_factor) 201 | 202 | self.exp_history.append(self.curr_exp_id) 203 | 204 | def get_current_experience(self): 205 | return self.exps[self.curr_exp_id] 206 | 207 | def add_new_experience(self, curr_exp_id, 208 | new_exp_id, x_pc, y_pc, th_pc, vt_id): 209 | "Create a new experience and link current experience to it" 210 | 211 | # add link information to the current experience for the new experience 212 | # including the experience_id, odo distance to the experience, odo 213 | # heading (relative to the current experience's facing) to the 214 | # experience, odo delta facing (relative to the current expereience's 215 | # facing). 216 | 217 | curr_exp = self.exps[self.curr_exp_id] 218 | 219 | exp_id = len(self.exps) 220 | 221 | # create the new experience which will have no links to being with 222 | new_exp = Experience(exp_id, x_pc, y_pc, th_pc, vt_id, 223 | curr_exp.x_m + accum_delta_x, # x_m 224 | curr_exp.y_m + accum_delta_y, # y_m 225 | self.clip_rad_180(accum_delta_facing), #facing_rad 226 | ) 227 | 228 | # compute properties of the link to the new experience 229 | d = sqrt(accum_delta_x**2 + accum_delta_y**2); 230 | heading_rad = self.signed_delta_rad(curr_exp.facing_rad, 231 | arctan2(accum_delta_y, 232 | accum_delta_x)) 233 | facing_rad = self.signed_delta_rad(curr_exp.facing_rad, 234 | accum_delta_facing) 235 | 236 | # add the link to the new experience 237 | curr_exp.add_link(new_exp, d, heading_rad, facing_rad) 238 | 239 | # add this experience id to the vt for efficient lookup 240 | self.view_templates[vt_id].exps.append(new_exp_id) 241 | 242 | # add the new Experience to the list 243 | self.exps.append(new_exp) 244 | 245 | return new_exp 246 | 247 | def clip_rad_360(self, angle): 248 | "Clip the input angle to between 0 and 2pi radians" 249 | while angle < 0: 250 | angle += 2*pi 251 | while angle >= 2*pi: 252 | angle -= 2*pi 253 | return angle 254 | 255 | def clip_rad_180(self, angle): 256 | "Clip the input angle to between -pi and pi radians" 257 | while angle > pi: 258 | angle -= 2*pi 259 | while angle <= -pi: 260 | angle += 2*pi 261 | return angle 262 | 263 | def min_delta(self, d1, d2, maxd): 264 | """ Get the minimum delta distance between two values assuming a wrap 265 | to zero at max""" 266 | return min((abs(d1 - d2), maxd - abs(d1 - d2))) 267 | 268 | def signed_delta_rad(self, angle1, angle2): 269 | """ Get the signed delta angle from angle1 to angle2 handling the 270 | wrap from 2pi to 0.""" 271 | 272 | direction = self.clip_rad_180(angle2 - angle1) 273 | 274 | delta_angle = abs(self.clip_rad_360(angle1) - \ 275 | self.clip_rad_360(angle2)) 276 | 277 | if (delta_angle) < (2*pi - delta_angle): 278 | if (direction > 0): 279 | angle = delta_angle 280 | else: 281 | angle = -delta_angle 282 | 283 | else: 284 | if (dir > 0): 285 | angle = 2*pi - delta_angle 286 | else: 287 | angle = -(2*pi - delta_angle) 288 | 289 | return angle -------------------------------------------------------------------------------- /old/pose_cell_network.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | class PoseCellNetwork: 4 | 5 | def __init__(self, shape): 6 | self.activities = np.zeros(shape) 7 | self.shape = shape 8 | 9 | self.view_template_inject_energy = None # TODO 10 | 11 | # excitatory parameters 12 | self.e_xy_wrap = None # TODO 13 | self.e_th_wrap = None # TODO 14 | self.e_w_dim = None # TODO 15 | self.excitatory_weights = None # TODO 16 | 17 | # inhibitory parameters 18 | self.i_xy_wrap = None # TODO 19 | self.i_th_wrap = None # TODO 20 | self.i_w_dim = None # TODO 21 | self.inhibitory_weights = None # TODO 22 | 23 | self.global_inhibition = 0.00002 24 | 25 | self.c_size_th = None # TODO 26 | 27 | def deafult_pose_cell_weight_distribution(self, dim, var): 28 | """ Create a 3D normalized distribution of size dim^3, 29 | with a variance of var 30 | 31 | (Matlab version: equivalent to rs_create_posecell_weights) 32 | """ 33 | 34 | dim_center = floor(dim/2.); 35 | weights = zeros((dim,dim,dim)) 36 | for x in range(0, dim): 37 | for y in range(0, dim): 38 | for z in range(0, dim): 39 | weights[x,y,z] = \ 40 | 1./(var*sqrt(2*pi))*exp((-(x-dim_center)**2 - \ 41 | (y-dim_center)**2-(z-dim_center)**2)/(2*var^2)); 42 | total = sum( weights.ravel() ) 43 | return weights / total 44 | 45 | def update(self, view_template, trans_est, rot_est): 46 | """ Evolve the network, given a visual template input and estimates of 47 | translation and rotation (e.g. from visual odometry) 48 | 49 | (Matlab version: equivalent to rs_posecell_iteration) 50 | """ 51 | 52 | if not view_template.first: 53 | act_x = min( max(round(view_template.x_pc),0), 54 | self.activities.shape[0]-1 ) 55 | act_y = min( max(round(view_template.y_pc),0), 56 | self.activities.shape[1]-1 ) 57 | act_th = min( max(round(view_template.th_pc),0), 58 | self.activities.shape[2]-1) 59 | 60 | energy = self.view_template_inject_energy * (1/30.) * \ 61 | (30 - exp(1.2 * view_template.template_decay)) 62 | 63 | if energy > 0: 64 | self.activities[act_x, act_y, act_th] += energy 65 | 66 | # local excitation 67 | new_activity = np.zeros_like(self.activities) 68 | for x in range(0, self.activities.shape[0]): 69 | for y in range(0, self.activities.shape[1]): 70 | for z in range(0, self.activities.shape[2]): 71 | if self.activities[x,y,z] != 0: 72 | xs = self.e_xy_wrap[x:x+self.w_e_dim-1] 73 | ys = self.e_xy_wrap[y:y+self.w_e_dim-1] 74 | ths = self.e_th_wrap[z:z+self.w_e_dim-1] 75 | new_activity[xs,ys,th] += self.activities[x,y,s] * \ 76 | self.excitatory_weights 77 | 78 | self.activities = new_activity 79 | 80 | # local inhibition 81 | new_activity = np.zeros_like(self.activities) 82 | for x in range(0, self.activities.shape[0]): 83 | for y in range(0, self.activities.shape[1]): 84 | for z in range(0, self.activities.shape[2]): 85 | if self.activities[x,y,z] != 0: 86 | xs = self.i_xy_wrap[x:x+self.w_i_dim-1] 87 | ys = self.i_xy_wrap[y:y+self.w_i_dim-1] 88 | ths = self.i_th_wrap[z:z+self.w_i_dim-1] 89 | new_activity[xs,ys,th] += self.activities[x,y,s] * \ 90 | self.inhibitory_weights 91 | 92 | self.activities -= new_activity 93 | 94 | # global inhibition 95 | self.activities -= self.global_inhibition 96 | self.activities[self.activities < 0.0] = 0.0 97 | 98 | # normalization 99 | self.activities /= sum(self.activities.ravel()) 100 | 101 | # path integration 102 | # trans_est affects xy direction 103 | # rot_est affects th 104 | for dir_pc in range(0, self.activities.shape[2]): 105 | direction = dir_pc * self.c_size_th 106 | 107 | # TODO: left off at line 109 108 | if direction == 0: 109 | self.activities[:,:,dir_pc] *= (1 - trans_est) 110 | self.activities[:,:,dir_pc] += \ 111 | roll(self.activities[:,:,dir_pc],1,1)*trans_est 112 | elif direction == pi/2.: 113 | self.activities[:,:,dir_pc] *= (1 - trans_est) 114 | self.activities[:,:,dir_pc] += \ 115 | roll(self.activities[:,:,dir_pc],1,0)*trans_est 116 | elif direction == pi: 117 | self.activities[:,:,dir_pc] *= (1 - trans_est) 118 | self.activities[:,:,dir_pc] += \ 119 | roll(self.activities[:,:,dir_pc],-1,1)*trans_est 120 | elif direction == 3*pi/2.: 121 | self.activities[:,:,dir_pc] *= (1 - trans_est) 122 | self.activities[:,:,dir_pc] += \ 123 | roll(self.activities[:,:,dir_pc],-1,0)*trans_est 124 | else: 125 | # rotate activities instead of implementing for four quadrants 126 | activities90 = rot90(activities[:,:,dir_pc], 127 | floor(direction * 2. / pi)) 128 | dir90 = direction - floor(direction*2/pi) * pi / 2. 129 | 130 | # extend pose cell one unit each direction 131 | # work out weigh contribution to the NE cell from the SW, 132 | # NW, SE cells 133 | # given vtrans and the direction 134 | # weight_sw = v * cos(th) * v * sin(th) 135 | # weight_se = (1 - v * cos(th)) * v * sin(th) 136 | # weight_nw = (1 - v * sin(th)) * v * sin(th) 137 | # weight_ne = 1 - weight_sw - weight_se - weight_nw 138 | # think in terms of NE divided into 4 rectangles with the sides 139 | # given by vtrans and the angle 140 | 141 | new_act = zeros(activities.shape[0]+2) 142 | new_act[1:-1,1:-1] = activities90 143 | weight_sw = trans_est**2 * cos(dir90) * sin(dir90) 144 | weight_se = trans_est*sin(dir90) - \ 145 | trans_est**2 * cos(dir90) * sin(dir90) 146 | weight_nw = trans_est*cos(dir90) - \ 147 | trans_est**2 * cos(dir90) * sin(dir90) 148 | weight_ne = 1.0 - weight_sw - weight_se - weight_nw 149 | 150 | # circular shift and multiple by the contributing weight 151 | # copy those shifted elements for the wrap around 152 | new_act = new_act*weight_ne + \ 153 | roll(new_act, 1, 1) * weight_nw + \ 154 | roll(new_act, 1, 0) * weight_se + \ 155 | roll(roll(new_act,1,0),1,1) * weight_sw 156 | activities90 = new_act[1:-1,1:-1] 157 | activities90[1:,0] += new_act[2:-1,-1] 158 | activities90[0,1:] += new_act[-1,2:-1] 159 | activities90[0,0] += new_act[-1,-1] 160 | 161 | # unrotate the pose cell xy layer 162 | activities[:,:,dir_pc] = rot90(activities90, 163 | 4 - floor(direction*2/pi)) 164 | # Path integration: Theta 165 | # shift pose cells +/- theta, given rot_est 166 | if rot_est is not 0: 167 | # mod to work out the partial shift amount 168 | weight = mod(abs(rot_est) / self.activities.shape[2],1) 169 | if weight == 0: 170 | weight = 1.0 171 | roll_amount1 = sign(rot_est) * \ 172 | floor(abs(rot_est)/self.activities[2]) 173 | roll_amount2 = sign(rot_est) * \ 174 | ceil(abs(rot_est)/self.activities[2]) 175 | 176 | self.activities = roll(self.activities,roll_amount1,2) \ 177 | * (1.-weight) + \ 178 | roll(self.activities,roll_amount2,2) \ 179 | * (weight) 180 | 181 | 182 | def activty_packet_center(self): 183 | """ Find the x,y,th center of the activity in the network 184 | 185 | (Matlab version: equivalent to rs_get_posecell_xyth) 186 | """ 187 | # find the most active cell 188 | (x,y,z) = unravel_index( activies.argmax(), activities.shape ) 189 | 190 | # take max +- avg cell in 3D space (whatever that means...) 191 | z_pose_cells = zeros(activities.shape) 192 | 193 | z_val = self.activities[self.avg_xy_wrap[x:x+self.n_cells_to_average*2], 194 | self.avg_xy_wrap[y:y+self.n_cells_to_average*2], 195 | self.avg_th_wrap[z:z+self.n_cells_to_average*2]] 196 | 197 | z[ self.avg_xy_wrap[x:x+self.n_cells_to_average*2], 198 | self.avg_xy_wrap[y:y+self.n_cells_to_average*2], 199 | self.avg_th_wrap[z:z+self.n_cells_to_average*2] ] = z_val 200 | 201 | # get the sums for each axis 202 | x_sums = sum( sum( z, 1), 2 ).T 203 | y_sums = sum( sum( z, 0), 2 ) 204 | th_sums = sum( sum( z, 0), 1).T 205 | 206 | # find the x,y,th using population vector decoding 207 | 208 | x = mod( arctan2( sum(self.xy_sum_sin_lookup * x_sums), 209 | sum(self.xy_sum_cos_lookup * x_sums) ) * \ 210 | self.activities.shape[0] / (2 * pi), 211 | self.activities.shape[0] ) 212 | 213 | y = mod( arctan2( sum(self.xy_sum_sin_lookup * y_sums), 214 | sum(self.xy_sum_cos_lookup * y_sums) ) * \ 215 | self.activities.shape[1] / (2 * pi), 216 | self.activities.shape[1] ) 217 | th = mod( arctan2( sum(self.th_sum_sin_lookup * th_sums), 218 | sum(self.th_sum_cos_lookup * th_sums) ) * \ 219 | self.activities.shape[2] / (2 * pi), 220 | self.activities.shape[2] ) 221 | return (x,y,th) 222 | -------------------------------------------------------------------------------- /old/ratslam.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import numpy as np 4 | import pylab as plt 5 | 6 | from pose_cell_network import PoseCellNetwork 7 | from experience_map import ExperienceMap 8 | from visual_odometer import SimpleVisualOdometer 9 | from visual_templates import VisualTemplateCollection 10 | from video_source import FFMpegVideoSource, DirVideoSource 11 | 12 | class RatSLAM: 13 | """ A complete rat slam instance 14 | 15 | (Matlab version: equivalent to rs_main) 16 | """ 17 | 18 | def __init__(self, **kwargs): 19 | pose_cell_shape = kwargs.pop("pose_cell_dimensions", (100,100,60)) 20 | 21 | 22 | shift_match = kwargs.pop("odo_shift_match", 20) 23 | 24 | # self.odometer = SimpleVisualOdometer() 25 | self.odometer = SimpleVisualOdometer(fov_deg = kwargs.pop("fov_deg",50), 26 | im_trans_y_range = kwargs.pop("im_trans_y_range"), 27 | im_rot_y_range = kwargs.pop("im_rot_y_range"), 28 | im_odo_x_range = kwargs.pop("im_odo_x_range"), 29 | shift_match = shift_match, 30 | trans_scale = kwargs.pop("odo_trans_scale",100.)) 31 | 32 | # vt_match_threshold = kwargs.pop('vt_match_threshold', None) 33 | # self.view_templates = VisualTemplateCollection() 34 | self.view_templates = VisualTemplateCollection( template_decay = kwargs.pop('template_decay', 1.0), 35 | match_y_range = kwargs.pop('im_vt_y_range', None), 36 | match_x_range = kwargs.pop('im_vt_x_range', None), 37 | shift_match = shift_match, 38 | match_threshold = kwargs.pop('vt_match_threshold', None)) 39 | 40 | self.pose_cell_network = PoseCellNetwork(pose_cell_shape)#, 41 | # self.view_templates) 42 | 43 | 44 | self.experience_map = ExperienceMap(self.pose_cell_network, 45 | self.view_templates, 46 | delta_pc_threshold = kwargs.pop("exp_delta_pc_threshold",1.0), 47 | correction_factor = kwargs.pop("exp_correction_factor",0.5), 48 | exp_loops = kwargs.pop("exp_loops",100)) 49 | 50 | 51 | 52 | def update(self, video_frame): 53 | # TODO save experience map 54 | 55 | # convert video_frame to grayscale # done outside 56 | # plt.imshow(video_frame) 57 | # plt.show() 58 | 59 | # get odometry from video 60 | speed, rotation, odo = self.odometer.estimate_odometry(video_frame) 61 | 62 | # get most active visual template (bassed on current video_frame) 63 | vt_id = self.view_templates.match(video_frame, odo[0], odo[1], odo[2]) 64 | 65 | # update pose cells 66 | self.pose_cell_network.update(self.view_templates.templates[vt_id], speed, rotation) 67 | 68 | # get 'best' center of pose cell activity 69 | px, py, pr = self.pose_cell_network.activty_packet_center() 70 | 71 | # run an iteration fo the experience map 72 | experience_map.update(vt_id, speed, rotation, (px, py, pr)) 73 | 74 | # store current odometry for next update 75 | # pass 76 | print self.experience_map.get_current_experience() 77 | 78 | 79 | if __name__ == "__main__": 80 | 81 | # instantiate a "RatSLAM" model 82 | rs = RatSLAM( fov_deg = 50., 83 | odo_shift_match = 20, 84 | odo_trans_scale = 100., 85 | im_x_offset = 15, 86 | vt_match_threshold = 0.09, 87 | im_vt_y_range = slice((480/2 - 80 - 40),(480/2 + 80 - 40)), 88 | im_vt_x_range = slice((640/2 - 280 + 15),(640/2 + 280 + 15)), 89 | im_trans_y_range = slice(270,430), 90 | im_rot_y_range = slice(75,240), 91 | im_odo_x_range = slice((180 + 15),(460 + 15)), 92 | exp_delta_pc_threshold = 1.0, 93 | exp_correction_factor = 0.5, 94 | exp_loops = 100, 95 | pc_trans_scaling = 1./10.) 96 | 97 | # get the video source 98 | block_read = 100 99 | render_rate = 10 100 | 101 | test_video = DirVideoSource('/Users/graham/Desktop/ratslam/videos/stlucia_0to21000',"%06i.png",1) 102 | n_frames = 10#2102 103 | # test_video = FFMpegVideoSource("stlucia_1to21000.mov") 104 | 105 | # feed each video frame into the model 106 | # frame = test_video.frame 107 | 108 | # while frame is not None: 109 | for i in xrange(n_frames): 110 | frame = test_video.get_frame() 111 | 112 | # convert frame into form the model wants 113 | frame = np.mean(frame, 2) 114 | 115 | # update the model with the new frame 116 | rs.update(frame) 117 | 118 | # Report some diagnostic info 119 | pass 120 | 121 | # fetch the next frame 122 | frame = test_video.advance() 123 | -------------------------------------------------------------------------------- /old/simple_vision.py: -------------------------------------------------------------------------------- 1 | def compare_segments(seg1, seg2, slen, cwl): 2 | """ Do a simple template match of two segments to find the best match 3 | 4 | (Matlab version: equivalent to rs_compare_segments) 5 | """ 6 | 7 | # assume a large difference 8 | min_diff = 1e6 9 | diffs = zeros(slen) 10 | min_offset = None 11 | 12 | for offset in range(0, slen-1): 13 | cdiff = abs( seg1[offset:cwl-1] - seg2[0: cwl-offset-1] ) 14 | cdiff = sum(cdiff) / (cwl - offset) 15 | diffs[slen - offset] = cdiff 16 | if(cdiff < min_diff): 17 | min_diff = cdiff 18 | min_offset = offset 19 | 20 | for offset in range(0, slen-1): 21 | cdiff = abs( seg1[0:cwl-offset-1] - seg2[offset:cwl] ) 22 | cdiff = sum(cdiff) / (cwl - offset) 23 | diffs[slen+offset] = cdiff 24 | if(cdiff < min_diff): 25 | min_diff = cdiff 26 | min_offset = -offset 27 | 28 | return (min_offset, min_diff) -------------------------------------------------------------------------------- /old/sketch.py: -------------------------------------------------------------------------------- 1 | class RatSLAM: 2 | """ A complete rat slam instance 3 | 4 | (Matlab version: equivalent to rs_main) 5 | """ 6 | 7 | def __init__(self, **kwargs): 8 | pose_cell_shape = kwargs.pop("pose_cell_dimensions", (100,100,60)) 9 | self.PoseCellNetwork(pose_cell_shape) 10 | pass 11 | 12 | 13 | class PoseCellNetwork: 14 | 15 | def __init__(self, shape): 16 | self.activities = array(shape) 17 | self.inhibitory_weights = None 18 | self.excitatory_weights = None 19 | 20 | def deafult_pose_cell_weight_distribution(self, dim, var): 21 | """ Create a 3D normalized distribution of size dim^3, 22 | with a variance of var 23 | 24 | (Matlab version: equivalent to rs_create_posecell_weights) 25 | """ 26 | 27 | dim_center = floor(dim/2.); 28 | weights = zeros((dim,dim,dim)) 29 | for x in range(0, dim): 30 | for y in range(0, dim): 31 | for z in range(0, dim): 32 | weights[x,y,z] = 33 | 1./(var*sqrt(2*pi))*exp((-(x-dim_center)**2 - \ 34 | (y-dim_center)**2-(z-dim_center)**2)/(2*var^2)); 35 | total = sum( weights.ravel() ) 36 | return weights / total 37 | 38 | def update(self, template_id, trans_est, rot_est): 39 | """ Evolve the network, given a visual template input and estimates of 40 | translation and rotation (e.g. from visual odometry) 41 | 42 | (Matlab version: equivalent to rs_posecell_iteration) 43 | """ 44 | 45 | pass 46 | 47 | 48 | def activty_packet_center(self): 49 | """ Find the x,y,th center of the activity in the network 50 | 51 | (Matlab version: equivalent to rs_get_posecell_xyth) 52 | """ 53 | x = 0 54 | y = 0 55 | th = 0 56 | return (x,y,th) 57 | 58 | 59 | class ExperienceMap: 60 | 61 | def __init__(self): 62 | self.experience_map = None 63 | 64 | def update(self, template_id, trans_est, rot_est, pose_cell_packet_center): 65 | """ Update the experience map 66 | 67 | (Matlab version: equivalent to rs_experience_map_iteration) 68 | """ 69 | 70 | 71 | class VisualOdometer: 72 | 73 | def __init__(self): 74 | self.last_image = None 75 | self.fov_deg = 50. 76 | 77 | 78 | def estimate_odometry(self, im): 79 | """ Estimate the translation and rotation of the viewer, given a new 80 | image sample of the environment 81 | 82 | (Matlab version: equivalent to rs_visual_odometry) 83 | """ 84 | dpp = self.fov_deg / im.shape[1] 85 | 86 | trans = 0 87 | rot = 0 88 | return trans, rot 89 | 90 | def compare_segments(self, seg1, seg2, slen, cwl): 91 | """ Do a simple template match of two segments to find the best match 92 | 93 | (Matlab version: equivalent to rs_compare_segments) 94 | """ 95 | 96 | # assume a large difference 97 | mindiff = 1e6 98 | diffs = zeros(slen) 99 | 100 | return (offset, sdif) 101 | -------------------------------------------------------------------------------- /old/video_source.py: -------------------------------------------------------------------------------- 1 | import pyffmpeg 2 | from pylab import imread 3 | 4 | class VideoSource: 5 | """ Base VideoSource (yeah, I know: gratuitous in Python)""" 6 | 7 | # @property(get_frame) 8 | # frame 9 | 10 | def get_frame(self): 11 | pass 12 | 13 | def advance(self): 14 | pass 15 | 16 | class DirVideoSource(VideoSource): 17 | def __init__(self, directory, template, starting_frame=1): 18 | self.frame_index = starting_frame 19 | self.template = template 20 | self.directory = directory 21 | 22 | def get_frame(self): 23 | return imread('/'.join((self.directory, self.template % self.frame_index))) 24 | 25 | def advance(self): 26 | self.frame_index += 1 27 | return self.get_frame() 28 | 29 | class FFMpegVideoSource (VideoSource): 30 | 31 | def __init__(self, filename): 32 | self.stream = pyffmpeg.VideoStream() 33 | self.stream.open(filename) 34 | 35 | def get_frame(self): 36 | try: 37 | frame = stream.GetCurrentFrame() 38 | except Exception, e: 39 | return None 40 | 41 | return frame.asarray() 42 | 43 | def advance(self): 44 | try: 45 | frame = stream.GetNextFrame() 46 | except Exception, e: 47 | return None 48 | return frame.asarray() -------------------------------------------------------------------------------- /old/visual_odometer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pylab as plt 3 | import simple_vision 4 | 5 | def compare_segments(x_sums, prev_x_sums, shift, shape): 6 | if (x_sums is None) or (prev_x_sums is None): 7 | return 0, 0 8 | minval = np.inf 9 | offset = 0 10 | for s in range(shift-shape, shape-shift): 11 | val = 0 12 | for n in xrange(shape-abs(s)): 13 | val += abs(x_sums[n+max(s,0)] - prev_x_sums[n-min(s,0)]) 14 | val /= (shape - abs(s)) 15 | if val < minval: 16 | minval = val 17 | offset = s 18 | return offset, minval 19 | 20 | class SimpleVisualOdometer: 21 | """ Very simple visual odometry machinery """ 22 | 23 | def __init__(self, **kwargs): 24 | self.last_image = None 25 | self.fov_deg = kwargs.pop("fov_deg", 50.) 26 | self.im_trans_y_range = kwargs.pop("im_trans_y_range", slice(100,120)) 27 | self.im_rot_y_range = kwargs.pop("im_rot_y_range", slice(75,240)) 28 | self.im_odo_x_range = kwargs.pop("im_odo_x_range", slice(200,460)) 29 | 30 | self.shift_match = kwargs.pop("shift_match", 20) #VISUAL_ODO_SHIFT_MATCH 31 | self.trans_scale = kwargs.pop("trans_scale", 100.) #VTRANS_SCALE 32 | self.prev_trans_x_sums = None # prev_vtrans_image_x_sums 33 | self.prev_rot_x_sums = None # prev_vrot_image_x_sums 34 | 35 | # initial heading 36 | self.odo = np.array([0, 0, np.pi / 2.]) 37 | 38 | def estimate_odometry(self, im): 39 | """ Estimate the translation and rotation of the viewer, given a new 40 | image sample of the environment 41 | 42 | (Matlab version: equivalent to rs_visual_odometry) 43 | """ 44 | degrees_per_pixel = self.fov_deg / im.shape[1] 45 | 46 | # Translation 47 | # take slice of the image to average over 48 | sub_im = im[self.im_trans_y_range, self.im_odo_x_range] 49 | 50 | # sum down to a single strip 51 | im_x_sums = sum(sub_im, 0) 52 | # plt.imshow(sub_im) 53 | # plt.plot(im_x_sums) 54 | # plt.show() 55 | avint = sum(im_x_sums) / im_x_sums.shape[0] 56 | im_x_sums /= avint 57 | # im_x_sums /= sum(im_x_sums) / im_x_sums.shape[0] 58 | 59 | (min_offset, min_diff) = compare_segments(im_x_sums, 60 | self.prev_trans_x_sums, 61 | self.shift_match, 62 | im_x_sums.shape[0]) 63 | trans = min_diff * self.trans_scale 64 | 65 | if trans > 10.0: 66 | trans = 0.0 67 | 68 | # cache for the next round 69 | self.prev_trans_x_sums = im_x_sums 70 | 71 | # Rotation 72 | sub_im = im[ self.im_rot_y_range, self.im_odo_x_range ] 73 | im_x_sums = sum(sub_im, 0) 74 | avint = sum(im_x_sums) / im_x_sums.shape[0] 75 | im_x_sums /= avint 76 | # im_x_sums /= sum(im_x_sums) / im_x_sums.shape[0] 77 | 78 | (min_offset, min_diff) = compare_segments(im_x_sums, 79 | self.prev_rot_x_sums, 80 | self.shift_match, 81 | im_x_sums.shape[0]) 82 | 83 | rot = min_offset * degrees_per_pixel * np.pi / 180. 84 | self.prev_rot_x_sums = im_x_sums 85 | 86 | # update odometry 87 | self.odo[2] += rot 88 | self.odo[0] += trans * np.cos(self.odo[2]) 89 | self.odo[1] += trans * np.sin(self.odo[2]) 90 | 91 | return (trans, rot, self.odo) 92 | 93 | 94 | -------------------------------------------------------------------------------- /ratslam/README: -------------------------------------------------------------------------------- 1 | README 08.09.11 2 | @author: Christine Lee (cgl11@duke.edu) 3 | Rowland Institute at Harvard University - REU Summer 2011 4 | 5 | RatSLAM 6 | 7 | ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- 8 | vod2: Visual Odometer 9 | 10 | Functions - 11 | rs_compare_segments(ndarray seg1, ndarray seg2, int slen, int cwl) 12 | 13 | Takes in two scanline intensity profiles seg1 and seg2 and returns shift (offset) and minimum difference value (sdiff) 14 | between the minimum difference points. 15 | 16 | returns offset and sdiff 17 | 18 | Classes - 19 | VisOdom 20 | attributes 21 | 22 | prev_vrot_image_x_sums: ndarray initially 281 zeros 23 | prev_vtrans_image_x_sums: ndarray initially 281 zeros 24 | IMAGE_ODO_X_RANGE: slice initially (194, 475) 25 | IMAGE_VTRANS_Y_RANGE: slice initially (269, 430) 26 | IMAGE_VROT_Y_RANGE: slice initially (74, 240) 27 | VTRANS_SCALE: int initially 100 28 | VISUAL_ODO_SHIFT_MATCH: int initially 140 29 | odo: list initially [float 0, float 0, float pi/2] for accumulation of [x-direction translation, y-direction translation, heading] 30 | delta: list initially [0, 0] for [translation change, rotation change] 31 | 32 | update(String raw_image) 33 | 34 | Takes in string, uploads image from string, determines shift and minimum difference value, calculates incremental translation and rotation change, 35 | and updates odometry accumulation data which is returned. 36 | 37 | returns odo 38 | ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- 39 | localView: Visual Template Collection 40 | 41 | Classes - 42 | template(ndarray curr_xsums) 43 | attributes 44 | 45 | template: ndarray curr_xsums which is the scanline intensity profile associated with the template 46 | activity: float64 initially 1.0 47 | x_pc: int initially 31 for x position in pose cell network 48 | y_pc: int initially 31 for y position in pose cell network 49 | th_pc: int initially 19 for theta position in pose cell network 50 | vc: int initially 0 for index number in templateCollection 51 | first: bool initially True for whether this template has been activated before 52 | exps: ndarray initially empty for indices of experiences associated with this visual template 53 | 54 | templateCollection 55 | attributes 56 | 57 | templates: ndarray initially empty for list of templates in templateCollection 58 | x_range: slice initially (54, 615) for scanline intensity profile calculation 59 | y_range: slice initially (119, 280) for scanline intensity profile calculation 60 | globalDecay: float initially 0.1 61 | decay: float initially 1.0 62 | offsetP: int initially 20 63 | match_threshold: float initially 0.09 64 | curr_vc: int initially none for current visual template index 65 | 66 | match(cv.iplimage im) 67 | 68 | Takes in loaded image, determines current image scanline intensity profile, compares with other scanline intensity profiles 69 | in templateCollection, either determines a match or creates a new visual template. 70 | 71 | returns vc of match or new visual template 72 | 73 | update(ndarray curr_xsums) 74 | 75 | Takes in scanline intensity profile, creates a new visual template, appends new visual template to templateCollection, returns 76 | index of new visual template in templateCollection. 77 | 78 | returns vc 79 | 80 | get_template(int vc) 81 | 82 | Takes int value, and returns visual template with matching vc. 83 | 84 | returns template 85 | ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- 86 | pc_network: Pose Cell Network 87 | 88 | Classes - 89 | PoseCellNetwork(ndarray shape) 90 | attributes 91 | 92 | shape: ndarray initially [PC_DIM_XY, PC_DIM_XY, PC_DIM_TH] == [61, 61, 36] for shape of pose cell network matrix 93 | posecells: ndarray initially zero matrix with shape [PC_DIM_XY, PC_DIM_XY, PC_DIM_TH] 94 | vt_inject: float initially 0.1 95 | pc_vtrans_scaling: float64 initially 0.1 96 | e_xywrap: ndarray initially [58, 59, ...60, 0, 1... 60, 0, 1, 2] 97 | e_thwrap: ndarray initially [33, 34, ...35, 0, 1... 60, 0, 1, 2] 98 | e_wdim: int initially 7 99 | e_pcw: ndarray for excitation activity bubble shape 100 | i_xywrap: ndarray initially [59, 60, 0, 1, 2..60, 0, 1] 101 | i_thwrap: ndarray initially [34, 35, 0, 1, 2..35, 0, 1] 102 | i_wdim: int initially 5 103 | i_pcw: ndarray for inhibition activity bubble shape 104 | global_inhibition: float initially 0.00002 105 | c_size_th: float64 initially pi/18 106 | xy_sum_sin: ndarray initially with values sin(index*2*pi/61) 107 | xy_sum_cos: ndarray initially with values cos(index*2*pi/61) 108 | th_sum_sin: ndarray initially with values sin(index*2*pi/36) 109 | th_sum_cos: ndarray initially with values cos(index*2*pi/36) 110 | cells_avg: int initially 3 111 | avg_xywrap: ndarray initially [58, 59, ...60, 0, 1... 60, 0, 1, 2] 112 | avg_thwrap: ndarray initially [33, 34, ...35, 0, 1... 60, 0, 1, 2] 113 | max_pc: tuple initially (0, 0, 0) 114 | 115 | create_pc_weights(int dim, int var) 116 | 117 | Takes in two integer arguments dim and var, creates 3D matrix with shape [dim, dim, dim], stores weight values depending on 118 | distance from the matrix center, normalizes values in matrix, and returns matrix. 119 | 120 | returns ndarray of weighted values 121 | 122 | compute_activity_matrix(ndarray xywrap, ndarray thwrap, int wdim, ndarray pcw) 123 | 124 | Creates an zero matrix with same shape as pose cell network, and finds indices of nonzero values in pose cell network. Then multiplies 125 | each nonzero value by the Gaussian weight distribution pcw which is added to the zero matrix indices that correspond with the nonzero value pose 126 | cell network indices. 127 | 128 | returns ndarray which is the updated pose cell network 129 | 130 | get_pc_max(ndarray xywrap, ndarray thwrap) 131 | 132 | Finds index of absolute maximum value of pose cell network, and uses population vectors to calculate estimation of real-life 133 | position in space. 134 | 135 | returns tuple with values (x position, y position, theta direction) 136 | 137 | update(tuple ododelta, localView template v_temp) 138 | 139 | Determine the effect of visual template v_temp on pose cell network by seeing whether it has been activated before. If it has been activated previously, 140 | adjust value of associated pose cells by a factor related to v_temp activity level, otherwise, pass. Pose cell network is excited, inhibited, globally inhibited, 141 | and normalized. Then path integration is done to shift pose cell activity bubble translationally then rotationally. 142 | 143 | returns ndarray of maximum value pose cell index 144 | ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- 145 | em: Experience Mapping 146 | 147 | Functions - 148 | get_minDelta(float d1, float d2, float max) 149 | 150 | Absolute value difference between d1 and d2 is calculated, and difference between float max and absolute value is calculated. 151 | 152 | returns float 153 | 154 | ClipRad180(float angle) 155 | 156 | Calculates angle value equivalent [-180, 180). 157 | 158 | returns float 159 | 160 | ClipRad360(float angle) 161 | 162 | Calculates angle value equivalent [0, 360). 163 | 164 | returns float 165 | 166 | Get_Signed_Delta_Rad(float angle1, float angle2) 167 | 168 | Takes two angle values, finds absolute difference, and gives sign to absolute difference depending on direction. 169 | 170 | returns float of signed absolute difference between angles 171 | 172 | 173 | Classes - 174 | exp 175 | attributes 176 | 177 | x_pc: int initially 0 178 | y_pc: int initially 0 179 | th_pc: int initially 0 180 | vt_id: int initially 0 181 | exp_id: int initially 0 182 | links: list initially empty 183 | x_m: float initially 0 184 | y_m: float initially 0 185 | facing_rad: float initially 0 186 | 187 | link 188 | attributes 189 | 190 | exp_id: int initially 0 191 | facing_rad: float initially 0 192 | d: float initially 0 193 | heading_rad: float initially 0 194 | 195 | exp_map(pc_network PoseCellNetwork posecellnetwork, localView templateCollection tempCol) 196 | attributes 197 | 198 | exps: list initially empty 199 | exp_id: int initially None 200 | accum_delta_x: int initially 0 201 | accum_delta_y: int initially 0 202 | accum_delta_facing: float initially pi/2 203 | vt: list initially containing templates in templateCollection tempCol 204 | vt_id: int initially 0 for vc of currently activated visual template 205 | pcnet: pcNetwork initially posecellnetwork 206 | PC_DIM_XY: int initially 61 207 | PC_DIM_TH: int initially 36 208 | EXP_DELTA_PC_THRESHOLD: float initially 1.0 209 | exploops: int initially 100 210 | exp_correction: float initially 0.5 211 | exp_history: list initially empty 212 | 213 | get_exp(int exp_id) 214 | 215 | Access information about experience with index of exp_id. 216 | 217 | returns void 218 | 219 | link_exps(int curr_exp_id, int new_exp_id) 220 | 221 | Takes two integer values as current experience index and new experience index, calculates heading_rad and facing_rad, 222 | creates link between the two experience, appends newly created link to links list in both experiences. 223 | 224 | returns void 225 | 226 | create_exp(int curr_exp_id, int new_exp_id, int nx_pc, int ny_pc, int nth_pc, int nvt_id) 227 | 228 | Creates link between current experience and experience being created, creates new experience with attributes specified in 229 | create_exp parameters, add new experience to experience map list exps, and update correlated visual template with experience index value. 230 | 231 | returns exp 232 | 233 | update(float vtrans, float vrot, int x_pc, int y_pc, int th_pc, int vt_id) 234 | 235 | Updates position (accum_delta_x, accum_delta_y, accum_delta_facing) using using odometry information vtrans and vrot, determines whether 236 | current position according to pose cell network is distinct from previous position using EXP_DELTA_PC_THRESHOLD. If two positions are distinct 237 | according to comparison to threshold, new experience is created and appended to exp_map. If current visual template vc (vt_id) and previous 238 | visual template vc (stored in self.vt_id) are not not the same, determine whether current visual template is associated with current experience, 239 | otherwise update so that current experience matches current visual template. Correct for loop closure. 240 | 241 | returns int of current experience index 242 | ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- 243 | plot_graph: Main 244 | 245 | Functions - 246 | 247 | draw_position(float x, float y) 248 | 249 | Draws scatterplot point at position x, y. 250 | 251 | returns void 252 | 253 | draw_x_y_z(pcmax, subplot) 254 | 255 | Draws point at position x, y, z in 3D scatterplot. 256 | 257 | returns void 258 | 259 | get_image(int index) 260 | 261 | returns String of image directory 262 | 263 | initialize_em(localView templateCollection tempCol, vod2 VisOdom vod, pc_network PoseCellNetwork pcnet) 264 | 265 | returns exp_map 266 | 267 | initialize_pc(localView templateCollection tempCol, vod2 VisOdom vod) 268 | 269 | returns PoseCellNetwork 270 | 271 | initialize_lv 272 | 273 | returns templateCollection 274 | 275 | initialize_odo 276 | 277 | returns VisOdom 278 | 279 | -------------------------------------------------------------------------------- /ratslam/__init__.py: -------------------------------------------------------------------------------- 1 | from experience_map import * 2 | from view_templates import * 3 | from pose_cell_network import * 4 | from visual_odometer import * 5 | from simulation import * 6 | from video import * -------------------------------------------------------------------------------- /ratslam/experience_map.py: -------------------------------------------------------------------------------- 1 | from pylab import * 2 | import logging 3 | import line_profiler 4 | 5 | # Helper Functions 6 | 7 | def min_delta(d1, d2, max): 8 | delta = min([abs(d1-d2), max-abs(d1-d2)]) 9 | return delta 10 | 11 | def clip_rad_180(angle): 12 | while angle > pi: 13 | angle -= 2*pi 14 | while angle <= -pi: 15 | angle += 2*pi 16 | return angle 17 | 18 | def clip_rad_360(angle): 19 | while angle < 0: 20 | angle += 2*pi 21 | while angle >= 2*pi: 22 | angle -= 2*pi 23 | return angle 24 | 25 | def signed_delta_rad(angle1, angle2): 26 | dir = clip_rad_180(angle2-angle1) 27 | 28 | delta_angle = abs(clip_rad_360(angle1)-clip_rad_360(angle2)) 29 | 30 | if (delta_angle < (2*pi-delta_angle)): 31 | if (dir>0): 32 | angle = delta_angle 33 | else: 34 | angle = -delta_angle 35 | else: 36 | if (dir>0): 37 | angle = 2*pi - delta_angle 38 | else: 39 | angle = -(2*pi-delta_angle) 40 | return angle 41 | 42 | 43 | # Classes for implementing the RatSLAM experience map 44 | 45 | class Experience: 46 | "A point in the experience map" 47 | 48 | def __init__(self, parent, x_pc, y_pc, th_pc, vt, x_m, y_m, facing_rad): 49 | self.x_pc = x_pc 50 | self.y_pc = y_pc 51 | self.th_pc = th_pc 52 | 53 | self.facing_rad = facing_rad 54 | 55 | self.vt = vt 56 | 57 | self.links = [] 58 | self.x_m = x_m #x_m is xcoord in experience map 59 | self.y_m = y_m #y_m is ycoord in experience map 60 | 61 | self.parent_map = parent 62 | 63 | def link_to(self, target): 64 | pm = self.parent_map 65 | d = sqrt(pm.accum_delta_x**2 + pm.accum_delta_y**2) 66 | heading_rad = signed_delta_rad(self.facing_rad, 67 | arctan2(pm.accum_delta_y, 68 | pm.accum_delta_x)) 69 | facing_rad = signed_delta_rad(self.facing_rad, 70 | pm.accum_delta_facing) 71 | 72 | new_link = ExperienceLink(target, facing_rad, d, heading_rad) 73 | self.links.append(new_link) 74 | 75 | class ExperienceLink: 76 | "A directed link between two Experience objects" 77 | 78 | def __init__(self, target, facing_rad, d, heading_rad): 79 | self.target = target 80 | self.facing_rad = facing_rad 81 | self.d = d 82 | self.heading_rad = heading_rad 83 | 84 | 85 | class ExperienceMap: 86 | def __init__(self, **kwargs): 87 | 88 | self.exps = [] 89 | self.current_exp = None 90 | 91 | self.accum_delta_x = 0 92 | self.accum_delta_y = 0 93 | self.accum_delta_facing = pi/2 94 | 95 | self.current_vt = None 96 | 97 | self.PC_DIM_XY = kwargs.pop('PC_DIM_XY', 61) 98 | self.PC_DIM_TH = kwargs.pop('PC_DIM_TH', 36) 99 | self.EXP_DELTA_PC_THRESHOLD = kwargs.pop('EXP_DELTA_PC_THRESHOLD', 1.0) 100 | 101 | self.exploops = kwargs.pop('exp_loop_iter', 100) 102 | self.exp_correction = kwargs.pop('exp_correction', 0.5) 103 | 104 | self.history = [] 105 | 106 | def create_and_link_new_exp(self, nx_pc, ny_pc, nth_pc, vt): 107 | "Create a new experience and link current experience to it" 108 | 109 | new_x_m = self.accum_delta_x 110 | new_y_m = self.accum_delta_y 111 | new_facing_rad = clip_rad_180(self.accum_delta_facing) 112 | 113 | # add contributions from the current exp, if one is available 114 | if self.current_exp is not None: 115 | new_x_m += self.current_exp.x_m 116 | new_y_m += self.current_exp.y_m 117 | 118 | # TODO: no contribution from current_exp? 119 | new_facing_rad = clip_rad_180(self.accum_delta_facing) 120 | 121 | # create a new expereince 122 | new_exp = Experience(self, x_pc = nx_pc, y_pc = ny_pc, th_pc = nth_pc, 123 | vt = vt, x_m = new_x_m, y_m = new_y_m, 124 | facing_rad = new_facing_rad) 125 | 126 | 127 | if self.current_exp is not None: 128 | # link the current_exp for this one 129 | self.current_exp.link_to(new_exp) 130 | 131 | # add the new experience to the map 132 | self.exps.append(new_exp) 133 | 134 | # add this experience to the view template for efficient lookup 135 | vt.exps.append(new_exp) 136 | 137 | return new_exp 138 | 139 | @profile 140 | def update(self, vtrans, vrot, x_pc, y_pc, th_pc, vt): 141 | """ Update the experience map 142 | 143 | (Matlab version: equivalent to rs_experience_map_iteration) 144 | """ 145 | 146 | # update accumulators 147 | self.accum_delta_facing = clip_rad_180(self.accum_delta_facing+vrot) 148 | self.accum_delta_x += vtrans*cos(self.accum_delta_facing) 149 | self.accum_delta_y += vtrans*sin(self.accum_delta_facing) 150 | 151 | # check if this the first update 152 | if self.current_exp is None: 153 | # first experience 154 | delta_pc = 0 155 | else: 156 | # subsequent experience 157 | delta_pc = sqrt(min_delta(self.current_exp.x_pc, 158 | x_pc, self.PC_DIM_XY)**2 + \ 159 | min_delta(self.current_exp.y_pc, 160 | y_pc, self.PC_DIM_XY)**2 + \ 161 | min_delta(self.current_exp.th_pc, 162 | th_pc, self.PC_DIM_TH)**2) 163 | 164 | adjust_map = False 165 | 166 | 167 | # if this view template is not associated with any experiences yet, 168 | # or if the pc x,y,th has changed enough, create and link a new exp 169 | if len(vt.exps) == 0 or delta_pc > self.EXP_DELTA_PC_THRESHOLD: 170 | 171 | new_exp = self.create_and_link_new_exp(x_pc, y_pc, th_pc, vt) 172 | self.current_exp = new_exp 173 | 174 | # reset accumulators 175 | self.accum_delta_x = 0 176 | self.accum_delta_y = 0 177 | self.accum_delta_facing = self.current_exp.facing_rad 178 | 179 | # if the view template has changed (but isn't new) search for the 180 | # mathcing experience 181 | elif vt != self.current_exp.vt: 182 | 183 | # find the exp associated with the current vt and that is under the 184 | # threshold distance to the centre of pose cell activity 185 | # if multiple exps are under the threshold then don't match (to 186 | # reduce hash collisions) 187 | 188 | adjust_map = True 189 | matched_exp = None 190 | 191 | delta_pcs = [] 192 | n_candidate_matches = 0 193 | for (i, e) in enumerate(vt.exps): 194 | delta_pc = sqrt(min_delta(e.x_pc, x_pc, self.PC_DIM_XY)**2 + \ 195 | min_delta(e.y_pc, y_pc, self.PC_DIM_XY)**2 + \ 196 | min_delta(e.th_pc, th_pc, self.PC_DIM_TH)**2) 197 | delta_pcs.append(delta_pc) 198 | 199 | if delta_pc < self.EXP_DELTA_PC_THRESHOLD: 200 | n_candidate_matches += 1 201 | 202 | 203 | if n_candidate_matches > 1: 204 | # this means we aren't sure about which experience is a match 205 | # due to hash table collision instead of a false posivitive 206 | # which may create blunder links in the experience map keep 207 | # the previous experience matched_exp_count 208 | 209 | # TODO: raise? 210 | # TODO: what is being accomplished here 211 | # check rs_experience_map_iteration.m, line 75-ish 212 | logging.warning("Too many candidate matches when searching" +\ 213 | " experience map") 214 | pass 215 | else: 216 | min_delta_val = min(delta_pcs) 217 | min_delta_id = argmin(delta_pcs) 218 | 219 | if min_delta_val < self.EXP_DELTA_PC_THRESHOLD: 220 | matched_exp = vt.exps[min_delta_id] 221 | 222 | # check if a link already exists 223 | link_exists = False 224 | 225 | for linked_exp in [l.target for l in self.current_exp.links]: 226 | if linked_exp == matched_exp: 227 | link_exists = True 228 | break 229 | 230 | if not link_exists: 231 | self.current_exp.link_to(matched_exp) 232 | 233 | 234 | #self.exp_id = len(self.exps)-1 235 | 236 | if matched_exp is None: 237 | matched_exp = self.create_and_link_new_exp(x_pc, y_pc, 238 | th_pc, vt) 239 | 240 | 241 | self.accum_delta_x = 0 242 | self.accum_delta_y = 0 243 | self.accum_delta_facing = self.current_exp.facing_rad 244 | self.current_exp = matched_exp 245 | 246 | self.history.append(self.current_exp) 247 | 248 | if not adjust_map: 249 | return 250 | 251 | # Iteratively update the experience map with the new information 252 | for i in range(0, self.exploops): 253 | for e0 in self.exps: 254 | for l in e0.links: 255 | # e0 is the experience under consideration 256 | # e1 is an experience linked from e0 257 | # l is the link object which contains additoinal heading 258 | # info 259 | 260 | e1 = l.target 261 | 262 | # correction factor 263 | cf = self.exp_correction 264 | 265 | # work out where exp0 thinks exp1 (x,y) should be based on 266 | # the stored link information 267 | lx = e0.x_m + l.d * cos(e0.facing_rad + l.heading_rad) 268 | ly = e0.y_m + l.d * sin(e0.facing_rad + l.heading_rad); 269 | 270 | # correct e0 and e1 (x,y) by equal but opposite amounts 271 | # a 0.5 correction parameter means that e0 and e1 will be 272 | # fully corrected based on e0's link information 273 | e0.x_m = e0.x_m + (e1.x_m - lx) * cf 274 | e0.y_m = e0.y_m + (e1.y_m - ly) * cf 275 | e1.x_m = e1.x_m - (e1.x_m - lx) * cf 276 | e1.y_m = e1.y_m - (e1.y_m - ly) * cf 277 | 278 | # determine the angle between where e0 thinks e1's facing 279 | # should be based on the link information 280 | df = signed_delta_rad(e0.facing_rad + l.facing_rad, 281 | e1.facing_rad) 282 | 283 | # correct e0 and e1 facing by equal but opposite amounts 284 | # a 0.5 correction parameter means that e0 and e1 will be 285 | # fully corrected based on e0's link information 286 | e0.facing_rad = clip_rad_180(e0.facing_rad + df * cf) 287 | e1.facing_rad = clip_rad_180(e1.facing_rad - df * cf) 288 | 289 | return 290 | 291 | -------------------------------------------------------------------------------- /ratslam/pose_cell_network.py: -------------------------------------------------------------------------------- 1 | import local_view_cell as lv 2 | from pylab import * 3 | import line_profiler 4 | 5 | class PoseCellNetwork: 6 | 7 | def __init__(self, shape, **kwargs): 8 | 9 | self.shape = kwargs.pop('pc_shape', shape) # [61, 61, 36] 10 | PC_DIM_XY = self.shape[0] 11 | PC_DIM_TH = self.shape[2] 12 | 13 | self.posecells = zeros(shape) 14 | 15 | self.vt_inject = kwargs.pop('vt_inject', 0.1) 16 | self.pc_vtrans_scaling = kwargs.pop('vtrans_scaling', float64(0.1)) 17 | 18 | # 19 | # Excitation constants 20 | # 21 | 22 | self.e_wdim = kwargs.pop('e_wdim', 7) 23 | e_dim_half = self.e_wdim/2 24 | 25 | # these act as lookups to wrap the pose cell excitation/inhibition weight steps 26 | self.e_xywrap = range(PC_DIM_XY-e_dim_half, PC_DIM_XY) + \ 27 | range(0, PC_DIM_XY) + range(0, PC_DIM_XY-e_dim_half) 28 | self.e_thwrap = range(PC_DIM_TH-e_dim_half, PC_DIM_TH) + \ 29 | range(0, PC_DIM_TH) + range(0, PC_DIM_TH-e_dim_half) 30 | self.e_pcw = self.create_pc_weights(self.e_wdim, 1) 31 | 32 | # 33 | # Inhibition constants 34 | # 35 | 36 | self.i_wdim = kwargs.pop('i_wdim', 5) 37 | i_dim_half = self.i_wdim/2 38 | 39 | # these act as lookups to wrap the pose cell excitation/inhibition weight steps 40 | self.i_xywrap = range(PC_DIM_XY-i_dim_half, PC_DIM_XY) + \ 41 | range(0, PC_DIM_XY) + range(0, PC_DIM_XY-i_dim_half) 42 | self.i_thwrap = range(PC_DIM_TH-i_dim_half, PC_DIM_TH) + \ 43 | range(0, PC_DIM_TH) + range(0, PC_DIM_TH-i_dim_half) 44 | self.i_pcw = self.create_pc_weights(self.i_wdim, 2) 45 | 46 | self.global_inhibition = kwargs.pop('global_pc_inhibition', 0.00002) 47 | self.c_size_th = kwargs.pop('c_size_th', 2*float64(pi)/36) 48 | 49 | #finding pose cell centre constants 50 | self.xy_sum_sin = sin(arange(PC_DIM_XY)* 2*float64(pi)/PC_DIM_XY) 51 | self.xy_sum_cos = cos(arange(PC_DIM_XY)* 2*float64(pi)/PC_DIM_XY) 52 | self.th_sum_sin = sin(arange(PC_DIM_TH)* 2*float64(pi)/PC_DIM_TH) 53 | self.th_sum_cos = cos(arange(PC_DIM_TH)* 2*float64(pi)/PC_DIM_TH) 54 | 55 | self.cells_avg = kwargs.pop('cell_avg', 3) 56 | self.avg_wdim = kwargs.pop('avg_wdim', 7) 57 | avg_dim_half = self.avg_wdim/2 58 | self.avg_xywrap = range(PC_DIM_XY-avg_dim_half, PC_DIM_XY) + \ 59 | range(0, PC_DIM_XY) + \ 60 | range(0, PC_DIM_XY-avg_dim_half) 61 | self.avg_thwrap = range(PC_DIM_TH-avg_dim_half, PC_DIM_TH) + \ 62 | range(0, PC_DIM_TH) + \ 63 | range(0, PC_DIM_TH-avg_dim_half) 64 | 65 | self.max_pc = [0, 0, 0] 66 | 67 | def create_pc_weights(self, dim, var): 68 | """ Create a 3D normalized distribution of size dim**3, 69 | with a variance of var. 70 | 71 | (Matlab version: equivalent to rs_create_posecell_weights) 72 | """ 73 | 74 | weight = zeros([dim, dim, dim]) 75 | dim_center = math.floor(dim/2) 76 | 77 | for x in xrange(dim): 78 | for y in xrange(dim): 79 | for z in xrange(dim): 80 | weight[x,y,z] = 1.0/(var*math.sqrt(2*pi)) * \ 81 | math.exp(-((x-dim_center)**2 +\ 82 | (y-dim_center)**2 +\ 83 | (z-dim_center)**2) / (2*var**2)) 84 | 85 | weight /= abs(sum(weight.ravel())) 86 | 87 | return weight 88 | 89 | @profile 90 | def compute_activity_matrix(self, xywrap, thwrap, wdim, pcw): 91 | "Returns gaussian distribution of excitation/inhibition pc activity" 92 | 93 | # The goal is to return an update matrix that can be added/subtracted 94 | # from the posecell matrix 95 | pca_new = zeros(self.shape) 96 | 97 | # for nonzero posecell values 98 | indices = nonzero(self.posecells) 99 | 100 | for i,j,k in zip(*indices): 101 | pca_new[ix_(xywrap[i:i+wdim], 102 | xywrap[j:j+wdim], 103 | thwrap[k:k+wdim])] += self.posecells[i,j,k]*pcw 104 | 105 | # for index in xrange(len(indices[0])): 106 | # (i, j, k) = (indices[0][index], 107 | # indices[1][index], 108 | # indices[2][index]) 109 | # pca_new[ix_(xywrap[i:i+wdim], 110 | # xywrap[j:j+wdim], 111 | # thwrap[k:k+wdim])] += self.posecells[i,j,k]*pcw 112 | 113 | return pca_new 114 | 115 | def get_pc_max(self, xywrap, thwrap): 116 | """ Find the x,y,th center of the activity in the network 117 | 118 | (Matlab version: equivalent to rs_get_posecell_xyth) 119 | """ 120 | 121 | (x,y,z) = unravel_index(self.posecells.argmax(), self.posecells.shape) 122 | 123 | z_posecells=zeros(self.shape) 124 | 125 | zval = self.posecells[ix_(xywrap[x:x+7], xywrap[y:y+7], thwrap[z:z+7])] 126 | z_posecells[ix_(self.avg_xywrap[x:x+self.cells_avg*2+1], 127 | self.avg_xywrap[y:y+self.cells_avg*2+1], 128 | self.avg_thwrap[z:z+self.cells_avg*2+1])] = zval 129 | 130 | # get the sums for each axis 131 | x_sums = sum(sum(z_posecells, 2), 1) 132 | y_sums = sum(sum(z_posecells, 2), 0) 133 | th_sums = sum(sum(z_posecells, 1), 0) 134 | th_sums = th_sums[:] 135 | 136 | # now find the (x, y, th) using population vector decoding to handle 137 | # the wrap around 138 | x = (arctan2(sum(self.xy_sum_sin*x_sums), 139 | sum(self.xy_sum_cos*x_sums)) * \ 140 | self.shape[0]/(2*pi)) % (self.shape[0]) 141 | 142 | y = (arctan2(sum(self.xy_sum_sin*y_sums), 143 | sum(self.xy_sum_cos*y_sums)) * \ 144 | self.shape[0]/(2*pi)) % (self.shape[0]) 145 | 146 | th = (arctan2(sum(self.th_sum_sin*th_sums), 147 | sum(self.th_sum_cos*th_sums)) * \ 148 | self.shape[2]/(2*pi)) % (self.shape[2]) 149 | 150 | return (x, y, th) 151 | 152 | @profile 153 | def update(self, ododelta, v_temp): 154 | """ Evolve the network, given a visual template input and estimates of 155 | translation and rotation (e.g. from visual odometry) 156 | 157 | (Matlab version: equivalent to rs_posecell_iteration) 158 | """ 159 | 160 | vtrans = ododelta[0]*self.pc_vtrans_scaling 161 | vrot = ododelta[1] 162 | 163 | #if this visual template has been activated before, inject more 164 | # energy into the pc associated with it. 165 | if (v_temp.first != True): 166 | act_x = min([max([round(v_temp.x_pc), 1]), self.shape[0]]) 167 | act_y = min([max([round(v_temp.y_pc), 1]), self.shape[0]]) 168 | act_th = min([max([round(v_temp.th_pc), 1]), self.shape[2]]) 169 | 170 | energy = self.vt_inject * 1.0/30.0 * \ 171 | (30.0 - math.exp(1.2 * v_temp.activity)) 172 | if energy > 0: 173 | self.posecells[act_x, act_y, act_th] += energy; 174 | 175 | # Excitation weighted matrix 176 | self.posecells = self.compute_activity_matrix(self.e_xywrap, 177 | self.e_thwrap, 178 | self.e_wdim, 179 | self.e_pcw) 180 | 181 | # Inhibition weighted matrix 182 | self.posecells -= self.compute_activity_matrix(self.i_xywrap, 183 | self.i_thwrap, 184 | self.i_wdim, 185 | self.i_pcw) 186 | 187 | # Global inhibition 188 | self.posecells[self.posecells < self.global_inhibition] = 0 189 | self.posecells[self.posecells >= self.global_inhibition] -= \ 190 | self.global_inhibition 191 | 192 | #normalization 193 | total = sum(self.posecells.ravel()) 194 | self.posecells /= total 195 | 196 | #path integration for trans 197 | for dir_pc in xrange(0, self.shape[2]): 198 | 199 | direction = float64(dir_pc-1) * self.c_size_th 200 | # N,E,S,W are straightforward 201 | if (direction == 0): 202 | self.posecells[:,:,dir_pc] = \ 203 | self.posecells[:,:,dir_pc] * (1.0 - vtrans) + \ 204 | roll(self.posecells[:,:,dir_pc], 1,1)*vtrans 205 | elif direction == pi/2: 206 | self.posecells[:,:,dir_pc] = \ 207 | self.posecells[:,:,dir_pc]*(1.0 - vtrans) + \ 208 | roll(self.posecells[:,:,dir_pc], 1,0)*vtrans 209 | elif direction == pi: 210 | self.posecells[:,:,dir_pc] = \ 211 | self.posecells[:,:,dir_pc]*(1.0 - vtrans) + \ 212 | roll(self.posecells[:,:,dir_pc], -1,1)*vtrans 213 | elif direction == 3*pi/2: 214 | self.posecells[:,:,dir_pc] = \ 215 | self.posecells[:,:,dir_pc]*(1.0 - vtrans) + \ 216 | roll(self.posecells[:,:,dir_pc], -1,0)*vtrans 217 | else: 218 | pca90 = rot90(self.posecells[:,:,dir_pc], 219 | floor(direction *2/pi)) 220 | dir90 = direction - floor(direction *2/pi) * pi/2 221 | pca_new=zeros([self.shape[0]+2, self.shape[0]+2]) 222 | pca_new[1:-1, 1:-1] = pca90 223 | 224 | weight_sw = (vtrans**2) *cos(dir90) * sin(dir90) 225 | weight_se = vtrans*sin(dir90) - \ 226 | (vtrans**2) * cos(dir90) * sin(dir90) 227 | weight_nw = vtrans*cos(dir90) - \ 228 | (vtrans**2) *cos(dir90) * sin(dir90) 229 | weight_ne = 1.0 - weight_sw - weight_se - weight_nw 230 | 231 | pca_new = pca_new*weight_ne + \ 232 | roll(pca_new, 1, 1) * weight_nw + \ 233 | roll(pca_new, 1, 0) * weight_se + \ 234 | roll(roll(pca_new, 1, 1), 1, 0) * weight_sw 235 | 236 | pca90 = pca_new[1:-1, 1:-1] 237 | pca90[1:, 0] = pca90[1:, 0] + pca_new[2:-1, -1] 238 | pca90[1, 1:] = pca90[1, 1:] + pca_new[-1, 2:-1] 239 | pca90[0,0] = pca90[0,0] + pca_new[-1,-1] 240 | 241 | #unrotate the pose cell xy layer 242 | self.posecells[:,:,dir_pc] = rot90(pca90, 243 | 4 - floor(direction * 2/pi)) 244 | 245 | #path integration for rot 246 | if vrot != 0: 247 | weight = (abs(vrot)/self.c_size_th)%1 248 | if weight == 0: 249 | weight = 1.0 250 | shift1 = int(sign(vrot) * floor(abs(vrot)/self.c_size_th)) 251 | shift2 = int(sign(vrot) * ceil(abs(vrot)/self.c_size_th)) 252 | self.posecells = roll(self.posecells, shift1, 2) * (1.0 - weight) + \ 253 | roll(self.posecells, shift2, 2) * (weight) 254 | 255 | self.max_pc = self.get_pc_max(self.avg_xywrap, self.avg_thwrap) 256 | 257 | return self.max_pc 258 | 259 | 260 | 261 | 262 | 263 | -------------------------------------------------------------------------------- /ratslam/simulation.py: -------------------------------------------------------------------------------- 1 | from ratslam import (ViewTemplateCollection, 2 | IntensityProfileTemplate, 3 | PoseCellNetwork, 4 | ExperienceMap, 5 | SimpleVisualOdometer) 6 | 7 | import os, os.path 8 | import csv 9 | import time 10 | from functools import partial 11 | 12 | from itertools import imap 13 | from pylab import * 14 | import numpy 15 | 16 | import line_profiler 17 | 18 | class RatSLAM (object): 19 | 20 | def __init__(self, image_source): 21 | self.image_source = image_source 22 | 23 | pc_shape = [61,61,36] 24 | 25 | self.view_templates = self.initialize_view_templates(pc_shape) 26 | self.visual_odometer = self.initialize_odo() 27 | self.pose_cell_network = self.initialize_pose_cell_network(pc_shape) 28 | self.experience_map = self.initialize_experience_map() 29 | 30 | self.time_step = 0 31 | 32 | #creating visual templatecollection and initializing it with the first image. 33 | def initialize_view_templates(self, pc_shape): 34 | 35 | simple_template = partial(IntensityProfileTemplate, 36 | x_range = slice(54,615), 37 | y_range = slice(119,280), 38 | shift_match=20) 39 | 40 | templates = ViewTemplateCollection(simple_template, 41 | global_decay=0.1, 42 | match_threshold=0.09) 43 | im = self.image_source[0] 44 | #templates.update(zeros(561)) 45 | 46 | templates.curr_vc = templates.match(im, pc_shape[0]/2.0, 47 | pc_shape[1]/2.0, 48 | pc_shape[2]/2.0 ) 49 | 50 | return templates 51 | 52 | #initializing Visual Odometer 53 | def initialize_odo(self): 54 | im0 = self.image_source[0] 55 | vod = SimpleVisualOdometer() 56 | vod.update(im0) 57 | return vod 58 | 59 | #initializing position of the pose cell network activity bubble at center 60 | def initialize_pose_cell_network(self, pc_shape): 61 | 62 | pcnet = PoseCellNetwork(pc_shape) 63 | xy_pc = pc_shape[0]/2+1 64 | th_pc = pc_shape[2]/2+1 65 | 66 | pcnet.posecells[xy_pc, xy_pc, th_pc] = 1 67 | 68 | v_temp = self.view_templates[0] 69 | pcnet.update(self.visual_odometer.delta, v_temp) 70 | #pcmax = pcnet.get_pc_max(pcnet.avg_xywrap, pcnet.avg_thwrap) 71 | 72 | return pcnet 73 | 74 | def initialize_experience_map(self): 75 | 76 | emap = ExperienceMap(exp_loop_iter=100) 77 | 78 | current_vt = self.view_templates.curr_vc 79 | 80 | emap.update(self.visual_odometer.delta[0], 81 | self.visual_odometer.delta[1], 82 | self.pose_cell_network.max_pc[0], 83 | self.pose_cell_network.max_pc[1], 84 | self.pose_cell_network.max_pc[2], 85 | current_vt) 86 | 87 | return emap 88 | 89 | @property 90 | def current_exp(self): 91 | return self.experience_map.current_exp 92 | 93 | @profile 94 | def evolve(self): 95 | c = self.time_step 96 | self.time_step += 1 97 | 98 | self.current_image = self.image_source[c] 99 | 100 | avg_xywrap = self.pose_cell_network.avg_xywrap 101 | avg_thwrap = self.pose_cell_network.avg_thwrap 102 | pcmax = self.pose_cell_network.get_pc_max(avg_xywrap, avg_thwrap) 103 | 104 | # get visual template 105 | v_temp = self.view_templates.match(self.current_image, pcmax[0], 106 | pcmax[1], 107 | pcmax[2]) 108 | 109 | # get odometry 110 | self.current_odo = self.visual_odometer.update(self.current_image) 111 | 112 | #get pcmax 113 | self.pose_cell_network.update(self.visual_odometer.delta, v_temp) 114 | pcmax = self.pose_cell_network.get_pc_max(self.pose_cell_network.avg_xywrap, 115 | self.pose_cell_network.avg_thwrap) 116 | 117 | self.current_pose_cell = pcmax 118 | 119 | self.experience_map.update(self.visual_odometer.delta[0], 120 | self.visual_odometer.delta[1], 121 | pcmax[0], pcmax[1], pcmax[2], 122 | v_temp) 123 | -------------------------------------------------------------------------------- /ratslam/video.py: -------------------------------------------------------------------------------- 1 | import pipeffmpeg as pff 2 | from PIL import Image 3 | import cStringIO as StringIO 4 | from numpy import * 5 | 6 | class VideoSource (object): 7 | 8 | def __init__(self, video_file, grayscale=False): 9 | 10 | self.video_stream = pff.InputVideoStream(video_file, cache_size=300) 11 | self.grayscale = grayscale 12 | 13 | self.first_frame = self[0] 14 | 15 | 16 | def __getitem__(self, index): 17 | fr = self.video_stream[index] 18 | 19 | im = asarray( Image.open( StringIO.StringIO(fr) ) ) 20 | 21 | if self.grayscale: 22 | return mean(im,2) 23 | else: 24 | return im 25 | -------------------------------------------------------------------------------- /ratslam/view_templates.py: -------------------------------------------------------------------------------- 1 | from numpy import * 2 | from visual_odometer import compare_segments 3 | 4 | import numpy as np 5 | #import simple_vision 6 | 7 | from visual_odometer import compare_segments 8 | 9 | 10 | class ViewTemplate (object): 11 | 12 | def __init__(self, x_pc, y_pc, z_pc): 13 | self.x_pc = x_pc 14 | self.y_pc = y_pc 15 | self.z_pc = z_pc 16 | self.exps = [] 17 | 18 | def match(self, input_frame): 19 | raise NotImplemented 20 | 21 | 22 | class IntensityProfileTemplate(ViewTemplate): 23 | """A very simple view template as described in Milford and Wyeth's 24 | original algorithm. Basically, just compute an intensity profile 25 | over some region of the image. Simple, but suprisingly effective 26 | under certain conditions 27 | 28 | """ 29 | 30 | def __init__(self, input_frame, x_pc, y_pc, z_pc, 31 | y_range, x_range, shift_match): 32 | 33 | ViewTemplate.__init__(self, x_pc, y_pc, z_pc) 34 | 35 | self.x_range = x_range 36 | self.y_range = y_range 37 | self.shift_match = shift_match 38 | 39 | self.first = True 40 | 41 | # compute template from input_frame 42 | self.template = self.convert_frame(input_frame) 43 | 44 | def convert_frame(self, input_frame): 45 | "Convert an input frame into an intensity line-profile" 46 | 47 | sub_im = input_frame[self.y_range, self.x_range] 48 | 49 | # normalised intensity sums 50 | image_x_sums = sum(sub_im, 0) 51 | image_x_sums = image_x_sums / sum(image_x_sums) 52 | 53 | return image_x_sums 54 | 55 | def match(self, input_frame): 56 | "Return a score for how well the template matches an input frame" 57 | image_x_sums = self.convert_frame(input_frame) 58 | 59 | toffset, tdiff = compare_segments(image_x_sums, 60 | self.template, 61 | self.shift_match, 62 | image_x_sums.shape[0]) 63 | 64 | return tdiff * len(image_x_sums) 65 | 66 | 67 | class ViewTemplateCollection (object): 68 | """ A collection of simple visual templates against which an incoming 69 | frame can be compared. The entire collection of templates is matched 70 | against an incoming frame, and either the best match is returned, or 71 | a new template is created from the input image in the event that none 72 | of the templates match well enough 73 | 74 | (Matlab equivalent: rs_visual_template) 75 | 76 | """ 77 | 78 | 79 | def __init__(self, template_generator, match_threshold, global_decay): 80 | """ 81 | Arguments: 82 | template_generator -- a callable object that generates a ViewTemplate 83 | subclass 84 | match_threshold -- the threshold below which a subtemplate's match 85 | is considered "good enough". Failure to match at 86 | this threshold will result in the generation of a 87 | new template object 88 | global_decay -- not currently used 89 | 90 | """ 91 | self.template_generator = template_generator 92 | self.global_decay = global_decay 93 | self.match_threshold = match_threshold 94 | 95 | self.templates = [] 96 | self.current_template = None 97 | 98 | 99 | def __getitem__(self, index): 100 | return self.templates[index] 101 | 102 | def match(self, input_frame, x, y, th): 103 | 104 | match_scores = [ t.match(input_frame) for t in self.templates] 105 | 106 | if len(match_scores) == 0 or min(match_scores) > self.match_threshold: 107 | # no matches, so build a new one 108 | new_template = self.template_generator(input_frame, x, y, th) 109 | self.templates.append(new_template) 110 | return new_template 111 | 112 | best_template = self.templates[argmin(match_scores)] 113 | 114 | return best_template 115 | 116 | 117 | -------------------------------------------------------------------------------- /ratslam/visual_odometer.py: -------------------------------------------------------------------------------- 1 | from pylab import * 2 | import time 3 | from PIL.Image import Image 4 | 5 | def get_im_xSums(im, y_range_type, x_range): 6 | 7 | #gets the scanline intensity which is returned as im_xsums of the sub_image specified by the parameters initialized in the constructor 8 | 9 | sub_im = im[y_range_type, x_range] 10 | im_xsums = asarray(sum(sub_im, 0), dtype = 'float64') 11 | im_xsums = im_xsums/(sum(im_xsums)/ len(im_xsums)) 12 | return im_xsums 13 | 14 | def compare_segments(seg1, seg2, slen, cwl): 15 | 16 | # assume a large difference 17 | mindiff = inf 18 | 19 | # for each offset, sum the abs difference between the two segments 20 | for offset in xrange(slen+1): 21 | cdiff = abs(seg1[offset:cwl] - seg2[:cwl - offset]) 22 | cdiff = float(sum(cdiff, 0)) / float(cwl - offset) 23 | if (cdiff < mindiff): 24 | mindiff = cdiff 25 | minoffset = offset 26 | 27 | for offset in xrange(1, slen+1): 28 | cdiff = abs(seg1[:cwl - offset] - seg2[offset:cwl]) 29 | cdiff = sum(cdiff, 0) / float64(cwl - offset) 30 | if (cdiff < mindiff): 31 | mindiff = cdiff 32 | minoffset = -(offset) 33 | offset = minoffset 34 | sdif = mindiff 35 | 36 | return (offset, sdif) 37 | 38 | class SimpleVisualOdometer: 39 | "Very simple visual odometry machinery" 40 | 41 | def __init__(self, **kwargs): 42 | 43 | self.prev_vrot_image_x_sums = kwargs.pop('prev_vrot_image_x_sums', zeros(281)) 44 | self.prev_vtrans_image_x_sums = kwargs.pop('prev_vtrans_image_x_sums', zeros(281)) 45 | self.IMAGE_ODO_X_RANGE = kwargs.pop('IMAGE_ODO_X_RANGE', slice(194, 475)) 46 | self.IMAGE_VTRANS_Y_RANGE = kwargs.pop('IMAGE_VTRANS_RANGE', slice(269, 430)) 47 | self.IMAGE_VROT_Y_RANGE = kwargs.pop('IMAGE_VROT_Y_RANGE', slice(74, 240) ) 48 | self.VTRANS_SCALE = 100 49 | self.VISUAL_ODO_SHIFT_MATCH = 140 50 | 51 | self.odo = [0.0,0.0, pi/2] 52 | self.delta = [0,0] #[vtrans, vrot] 53 | 54 | def update(self, im): 55 | """ Estimate the translation and rotation of the viewer, given a new 56 | image sample of the environment 57 | 58 | (Matlab version: equivalent to rs_visual_odometry) 59 | """ 60 | 61 | FOV_DEG = 50.0 62 | dpp = float64(FOV_DEG) / im.shape[0] 63 | 64 | # vtrans 65 | sub_image = im[self.IMAGE_VTRANS_Y_RANGE, self.IMAGE_ODO_X_RANGE] 66 | 67 | image_x_sums = sum(sub_image, 0) 68 | avint = float64(sum(image_x_sums)) / len(image_x_sums) 69 | image_x_sums = image_x_sums/avint 70 | 71 | [minoffset, mindiff] = compare_segments(image_x_sums, 72 | self.prev_vtrans_image_x_sums, 73 | self.VISUAL_ODO_SHIFT_MATCH, 74 | len(image_x_sums)) 75 | vtrans = mindiff * self.VTRANS_SCALE 76 | 77 | # a hack to detect excessively large vtrans 78 | if vtrans > 10: 79 | vtrans = 0 80 | 81 | self.prev_vtrans_image_x_sums = image_x_sums 82 | 83 | # vrot 84 | sub_image = im[self.IMAGE_VROT_Y_RANGE, self.IMAGE_ODO_X_RANGE] 85 | image_x_sums = sum(sub_image, 0) 86 | avint = float64(sum(image_x_sums)) / len(image_x_sums) 87 | image_x_sums = image_x_sums/avint 88 | 89 | [minoffset, mindiff] = compare_segments(image_x_sums, 90 | self.prev_vrot_image_x_sums, 91 | self.VISUAL_ODO_SHIFT_MATCH, 92 | len(image_x_sums)) 93 | vrot = minoffset * dpp * pi / 180.0 94 | 95 | self.prev_vrot_image_x_sums = image_x_sums 96 | 97 | self.odo[2] += vrot 98 | self.odo[0] += vtrans* cos(self.odo[2]) #xcoord 99 | self.odo[1] += vtrans* sin(self.odo[2]) #ycoord 100 | 101 | self.delta = [vtrans, vrot] 102 | 103 | return self.odo 104 | 105 | -------------------------------------------------------------------------------- /scripts/ratslam: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import ratslam 3 | import sys 4 | import time 5 | import os, os.path 6 | from numpy import * 7 | from matplotlib.pylab import * 8 | #import matplotlib.pyplot as plt 9 | from mpl_toolkits.mplot3d import Axes3D 10 | import cPickle as pkl 11 | 12 | 13 | def draw_position(x, y): 14 | scatter(x,y) 15 | 16 | def draw_x_y_z(pcmax, xcoord, ycoord, thcoord, subplot): 17 | ax3 = Axes3D(gcf(), rect=subplot.get_position()) 18 | ax3.scatter(xcoord, ycoord, thcoord, 'z') 19 | ax3.set_xlim3d([0, 61]) 20 | ax3.set_ylim3d([0, 61]) 21 | ax3.set_zlim3d([0, 36]) 22 | 23 | 24 | def main(): 25 | # TODO use argparse 26 | video_path = sys.argv[1] 27 | output_path = sys.argv[2] 28 | 29 | pkl_state = False 30 | wait_for_key = False 31 | 32 | image_source = ratslam.VideoSource(video_path, grayscale=True) 33 | 34 | driver = ratslam.RatSLAM(image_source) 35 | 36 | xcoord = [] 37 | ycoord = [] 38 | thcoord = [] 39 | 40 | last_time = time.time() 41 | 42 | n_steps = 21000 43 | #n_steps = 500 44 | 45 | ######################################################################## 46 | for i in xrange(n_steps): 47 | 48 | # option to quit/break cleanly 49 | if wait_for_key: 50 | print "Press key to continue to next image or 'q' to quit" 51 | input = raw_input() 52 | if input == 'q': 53 | break 54 | 55 | # do a time step of the simulation 56 | driver.evolve() 57 | 58 | # query some values for plotting 59 | im = driver.current_image 60 | emap = driver.experience_map 61 | pcmax = driver.current_pose_cell 62 | odo = driver.current_odo 63 | current_exp = driver.current_exp 64 | 65 | xcoord.append(pcmax[0]) 66 | ycoord.append(pcmax[1]) 67 | thcoord.append(pcmax[2]) 68 | 69 | if pkl_state and i % 100 == 0: 70 | print "Pickling..." 71 | with open(os.path.join(output_path, "output%06i.pkl" % i), 'wb') as f: 72 | pkl.dump(driver, f) 73 | 74 | if i % 5 == 0: 75 | 76 | print "Plotting..." 77 | subplot(2,2,1) 78 | imshow(im, cmap=cm.gray) 79 | a=gca() 80 | a.axis('off') 81 | title('Raw Image') 82 | 83 | subplot(2,2,2) 84 | draw_position(odo[0], odo[1]) 85 | b = gca() 86 | title('Raw Odometry') 87 | #b.set_xlim([-50, 100]) 88 | #b.set_ylim([0, 125]) 89 | 90 | pcdata = subplot(2,2,3) 91 | draw_x_y_z(pcmax, xcoord, ycoord, thcoord, pcdata) 92 | title('Pose Cell Activity') 93 | pcdata.axis('off') 94 | 95 | subplot(2,2,4) 96 | draw_position(current_exp.x_m, current_exp.y_m) 97 | title('Experience Map') 98 | d = gca() 99 | #d.set_xlim([-50, 100]) 100 | #d.set_ylim([0, 120]) 101 | savefig(os.path.join(output_path, 'output%06i.png' % i)) 102 | 103 | now = time.time() 104 | fps = 1.0 / (now - last_time) 105 | last_time = now 106 | print "Using frames %i and %i (%f fps ; %f spf)" % (i, i+1, fps, 1.0/fps) 107 | 108 | ############################################################################ 109 | 110 | if __name__ == "__main__": 111 | main() 112 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ distribute- and pip-enabled setup.py for ratslam """ 5 | 6 | from distribute_setup import use_setuptools 7 | use_setuptools() 8 | from setuptools import setup, Extension 9 | import os, sys 10 | 11 | import re 12 | 13 | 14 | 15 | setup( 16 | name='ratslam', 17 | 18 | version='dev', 19 | 20 | scripts=['scripts/ratslam'], 21 | 22 | include_package_data=True, 23 | 24 | #install_requires=parse_requirements('requirements.txt'), 25 | #dependency_links=parse_dependency_links('requirements.txt') 26 | ) 27 | --------------------------------------------------------------------------------