├── LICENSE.md ├── README.md ├── code.tgz ├── code ├── assignment2_sol.py ├── assignment3_params.py ├── assignment3_sol.py ├── assignment4_sol.py ├── assignment5_sol.py ├── assignment6.py ├── double_pendulum.py ├── ekeberg1.py ├── hopfield.tgz ├── jacobian_plots.py ├── lorenz1.py ├── lorenz2.py ├── lotkavolterra.py ├── mass_spring.py ├── minjerk.py ├── onejoint_lagrange.py ├── onejointarm_passive.py ├── onejointmuscle_1.py ├── onejointmuscle_2.py ├── onejointmuscle_3.py ├── onejointmuscle_4.py ├── optimizer_example.py ├── som1.m ├── traindata.pickle ├── twojointarm.py ├── twojointarm_game.py ├── twojointarm_lagrange.py ├── twojointarm_passive.py ├── xor.py ├── xor_aima.py ├── xor_cg.py └── xor_plot.py ├── figs ├── HH1.png ├── HH2.png ├── assignment5_figures.pdf ├── ekeberg1.png ├── ekeberg_fig1.png ├── elbow_dynamics.png ├── elbow_kinematics.png ├── elbow_movement_kinematics.png ├── forcelengthce.png ├── forcelengthse.png ├── forcevelocity.png ├── fullblownschematic.png ├── hillmuscle.png ├── jacobian_plots.png ├── lorenz1.png ├── lorenz2.png ├── lorenz3.png ├── lotkavolterra1.png ├── lotkavolterra2.png ├── mass-spring-sim.png ├── onejointanimation.png ├── onejointarm_muscle.png ├── onejointarm_muscle2.png ├── onejointarm_muscle3.png ├── onejointarm_muscle4.png ├── onejointarm_passive.png ├── sin.png ├── spring-mass.png ├── twojointarm_dynamics.png ├── twojointarm_kinematics.png ├── twojointarm_kinematics_workspace.png └── twojointarmgame.png ├── go.el ├── html ├── 0_Setup_Your_Computer.html ├── 1_Dynamical_Systems.html ├── 2_Modelling_Dynamical_Systems.html ├── 3_Modelling_Action_Potentials.html ├── 4_Computational_Motor_Control_Kinematics.html ├── 5_Computational_Motor_Control_Dynamics.html ├── 6_Computational_Motor_Control_Muscle_Models.html └── index.html └── org ├── 0_Setup_Your_Computer.org ├── 1_Dynamical_Systems.org ├── 2_Modelling_Dynamical_Systems.org ├── 3_Modelling_Action_Potentials.org ├── 4_Computational_Motor_Control_Kinematics.org ├── 5_Computational_Motor_Control_Dynamics.org ├── 6_Computational_Motor_Control_Muscle_Models.org ├── index.org ├── mystyle.css ├── refs.bib ├── refs.html └── refs_bib.html /LICENSE.md: -------------------------------------------------------------------------------- 1 | This work is licensed under a [Creative Commons Attribution 4.0 2 | International License](http://creativecommons.org/licenses/by/4.0/) 3 | 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | CompNeuro 2 | ========= 3 | 4 | Neuroscience 9520 5 | Computational Modelling in Neuroscience 6 | 7 | Paul Gribble 8 | 9 | paul@gribblelab.org 10 | 11 | www.gribblelab.org 12 | 13 | -------------------------------------------------------------------------------- /code.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/code.tgz -------------------------------------------------------------------------------- /code/assignment2_sol.py: -------------------------------------------------------------------------------- 1 | # assignment 2 solution 2 | 3 | from scipy.integrate import odeint 4 | 5 | def baseball(state, t): 6 | k = 5.9e-4 7 | m = 0.15 8 | g = 9.81 9 | x = state[0] 10 | xd = state[1] 11 | y = state[2] 12 | yd = state[3] 13 | xdd = (-k/m)*xd*sqrt(xd*xd + yd*yd) 14 | ydd = (-k/m)*yd*sqrt(xd*xd + yd*yd) - g 15 | return [xd, xdd, yd, ydd] 16 | 17 | # set up a time range 18 | t = arange(0, 20, 0.01) 19 | 20 | # initial conditions 21 | state0 = [0, 30, 0, 50] 22 | 23 | # simulate! 24 | state = odeint(baseball, state0, t) 25 | 26 | # find where y < 0 27 | i, = where(y<0) 28 | 29 | # find first time y < 0 30 | ifirst = i[0] 31 | time_ground = t[ifirst] 32 | x_ground = state[ifirst,0] 33 | 34 | # import optimizer 35 | from scipy import optimize 36 | 37 | def myErrFun(vels): 38 | xd = vels[0] 39 | yd = vels[1] 40 | state0 = [0, xd, 0, yd] 41 | t = arange(0, 20, 0.01) 42 | state = odeint(baseball, state0, t) 43 | i, = where(state[:,2]<0) 44 | ifirst = i[0] 45 | x_ground = state[ifirst,0] 46 | err = (x_ground-100.0)**2 47 | return err 48 | 49 | vels_best = optimize.fmin(myErrFun, [25.0, 25.0]) 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /code/assignment3_params.py: -------------------------------------------------------------------------------- 1 | # set up a dictionary of parameters 2 | 3 | E_params = { 4 | 'E_leak' : -7.0e-2, 5 | 'G_leak' : 3.0e-09, 6 | 'C_m' : 3.0e-11, 7 | 'I_ext' : 0*1.0e-10 8 | } 9 | 10 | Na_params = { 11 | 'Na_E' : 5.0e-2, 12 | 'Na_G' : 1.0e-6, 13 | 'k_Na_act' : 3.0e+0, 14 | 'A_alpha_m_act' : 2.0e+5, 15 | 'B_alpha_m_act' : -4.0e-2, 16 | 'C_alpha_m_act' : 1.0e-3, 17 | 'A_beta_m_act' : 6.0e+4, 18 | 'B_beta_m_act' : -4.9e-2, 19 | 'C_beta_m_act' : 2.0e-2, 20 | 'l_Na_inact' : 1.0e+0, 21 | 'A_alpha_m_inact' : 8.0e+4, 22 | 'B_alpha_m_inact' : -4.0e-2, 23 | 'C_alpha_m_inact' : 1.0e-3, 24 | 'A_beta_m_inact' : 4.0e+2, 25 | 'B_beta_m_inact' : -3.6e-2, 26 | 'C_beta_m_inact' : 2.0e-3 27 | } 28 | 29 | K_params = { 30 | 'k_E' : -9.0e-2, 31 | 'k_G' : 2.0e-7, 32 | 'k_K' : 4.0e+0, 33 | 'A_alpha_m_act' : 2.0e+4, 34 | 'B_alpha_m_act' : -3.1e-2, 35 | 'C_alpha_m_act' : 8.0e-4, 36 | 'A_beta_m_act' : 5.0e+3, 37 | 'B_beta_m_act' : -2.8e-2, 38 | 'C_beta_m_act' : 4.0e-4 39 | } 40 | 41 | Ca_params = { 42 | 'E_Ca' : 150e-03 , 43 | 'Ca_act_alpha_A' : 0.08e+06, 44 | 'Ca_act_alpha_B' : -10e-03, 45 | 'Ca_act_alpha_C' : 11e-03, 46 | 'Ca_act_beta_A' : 0.001e+06, 47 | 'Ca_act_beta_B' : -10e-03, 48 | 'Ca_act_beta_C' : 0.5e-03, 49 | 'G_Ca' : 1.0e-08, # G_Ca (uS) ***** this appears as zero in Ekeberg 1991 ***** 50 | 'Ca_rho' : 4.0e+03, 51 | 'Ca_delta' : 30.0, 52 | 'G_KCA' : 0.01e-06, 53 | } 54 | 55 | params = { 56 | 'E_params' : E_params, 57 | 'Na_params' : Na_params, 58 | 'K_params' : K_params, 59 | 'Ca_params' : Ca_params 60 | } 61 | -------------------------------------------------------------------------------- /code/assignment3_sol.py: -------------------------------------------------------------------------------- 1 | # ipython --pylab 2 | 3 | # import some needed functions 4 | from scipy.integrate import odeint 5 | 6 | # set up a dictionary of parameters 7 | 8 | E_params = { 9 | 'E_leak' : -7.0e-2, 10 | 'G_leak' : 3.0e-09, 11 | 'C_m' : 3.0e-11, 12 | 'I_ext' : 0*1.0e-10 13 | } 14 | 15 | Na_params = { 16 | 'Na_E' : 5.0e-2, 17 | 'Na_G' : 1.0e-6, 18 | 'k_Na_act' : 3.0e+0, 19 | 'A_alpha_m_act' : 2.0e+5, 20 | 'B_alpha_m_act' : -4.0e-2, 21 | 'C_alpha_m_act' : 1.0e-3, 22 | 'A_beta_m_act' : 6.0e+4, 23 | 'B_beta_m_act' : -4.9e-2, 24 | 'C_beta_m_act' : 2.0e-2, 25 | 'l_Na_inact' : 1.0e+0, 26 | 'A_alpha_m_inact' : 8.0e+4, 27 | 'B_alpha_m_inact' : -4.0e-2, 28 | 'C_alpha_m_inact' : 1.0e-3, 29 | 'A_beta_m_inact' : 4.0e+2, 30 | 'B_beta_m_inact' : -3.6e-2, 31 | 'C_beta_m_inact' : 2.0e-3 32 | } 33 | 34 | K_params = { 35 | 'k_E' : -9.0e-2, 36 | 'k_G' : 2.0e-7, 37 | 'k_K' : 4.0e+0, 38 | 'A_alpha_m_act' : 2.0e+4, 39 | 'B_alpha_m_act' : -3.1e-2, 40 | 'C_alpha_m_act' : 8.0e-4, 41 | 'A_beta_m_act' : 5.0e+3, 42 | 'B_beta_m_act' : -2.8e-2, 43 | 'C_beta_m_act' : 4.0e-4 44 | } 45 | 46 | Ca_params = { 47 | 'E_Ca' : 150e-03 , 48 | 'Ca_act_alpha_A' : 0.08e+06, 49 | 'Ca_act_alpha_B' : -10e-03, 50 | 'Ca_act_alpha_C' : 11e-03, 51 | 'Ca_act_beta_A' : 0.001e+06, 52 | 'Ca_act_beta_B' : -10e-03, 53 | 'Ca_act_beta_C' : 0.5e-03, 54 | 'G_Ca' : 1.0e-08, # G_Ca (uS) ***** this appears as zero in Ekeberg 1991 ***** 55 | 'Ca_rho' : 4.0e+03, 56 | 'Ca_delta' : 30.0, 57 | 'G_KCA' : 0.01e-06 58 | } 59 | 60 | params = { 61 | 'E_params' : E_params, 62 | 'Na_params' : Na_params, 63 | 'K_params' : K_params, 64 | 'Ca_params' : Ca_params 65 | } 66 | 67 | # define our ODE function 68 | 69 | def neuron(state, t, params): 70 | """ 71 | Ekeberg 1991 72 | """ 73 | E = state[0] # soma potential 74 | m = state[1] # Na activation 75 | h = state[2] # Na inactivation 76 | n = state[3] # K activation 77 | q = state[4] # Ca activation 78 | CaAP = state[5] # Ca2+ dependent K channel 79 | 80 | Epar = params['E_params'] 81 | Na = params['Na_params'] 82 | K = params['K_params'] 83 | Ca = params['Ca_params'] 84 | 85 | # external current (from "voltage clamp", other compartments, other neurons, etc) 86 | I_ext = Epar['I_ext'] 87 | 88 | # calculate Na rate functions and I_Na 89 | # Na activation 90 | alpha_act = Na['A_alpha_m_act'] * (E-Na['B_alpha_m_act']) / (1.0 - exp((Na['B_alpha_m_act']-E) / Na['C_alpha_m_act'])) 91 | beta_act = Na['A_beta_m_act'] * (Na['B_beta_m_act']-E) / (1.0 - exp((E-Na['B_beta_m_act']) / Na['C_beta_m_act']) ) 92 | dmdt = ( alpha_act * (1.0 - m) ) - ( beta_act * m ) 93 | # Na inactivation 94 | alpha_inact = Na['A_alpha_m_inact'] * (Na['B_alpha_m_inact']-E) / (1.0 - exp((E-Na['B_alpha_m_inact']) / Na['C_alpha_m_inact'])) 95 | beta_inact = Na['A_beta_m_inact'] / (1.0 + (exp((Na['B_beta_m_inact']-E) / Na['C_beta_m_inact']))) 96 | dhdt = ( alpha_inact*(1.0 - h) ) - ( beta_inact*h ) 97 | # Na-current: 98 | I_Na =(Na['Na_E']-E) * Na['Na_G'] * (m**Na['k_Na_act']) * h 99 | 100 | # calculate K rate functions and I_K 101 | alpha_kal = K['A_alpha_m_act'] * (E-K['B_alpha_m_act']) / (1.0 - exp((K['B_alpha_m_act']-E) / K['C_alpha_m_act'])) 102 | beta_kal = K['A_beta_m_act'] * (K['B_beta_m_act']-E) / (1.0 - exp((E-K['B_beta_m_act']) / K['C_beta_m_act'])) 103 | dndt = ( alpha_kal*(1.0 - n) ) - ( beta_kal*n ) 104 | # K current 105 | I_K = (K['k_E']-E) * K['k_G'] * n**K['k_K'] 106 | 107 | # Ca rate functions and Ca current 108 | alpha_Ca_act = (Ca['Ca_act_alpha_A']*(E-Ca['Ca_act_alpha_B']))/(1-exp((Ca['Ca_act_alpha_B']-E)/Ca['Ca_act_alpha_C'])) 109 | beta_Ca_act = (Ca['Ca_act_beta_A']*(Ca['Ca_act_beta_B']-E))/(1-exp((E-Ca['Ca_act_beta_B'])/Ca['Ca_act_beta_C'])) 110 | dqdt = alpha_Ca_act*(1-q) - beta_Ca_act*q 111 | # Ca current 112 | I_Ca = (Ca['E_Ca'] - E)*Ca['G_Ca']*(q**5) 113 | 114 | # Ca2+ gated K channels 115 | dCaAPdt = (Ca['E_Ca'] - E)*Ca['Ca_rho']*(q**5) - Ca['Ca_delta']*CaAP 116 | E_K = K['k_E'] 117 | # Ca2+ gated K current 118 | I_KCA = (K['k_E'] - E)*Ca['G_KCA']*CaAP 119 | 120 | # leak current 121 | I_leak = (Epar['E_leak']-E) * Epar['G_leak'] 122 | 123 | # calculate derivative of E 124 | dEdt = (I_leak + I_K + I_Na + I_ext + I_Ca + I_KCA) / Epar['C_m'] 125 | statep = [dEdt, dmdt, dhdt, dndt, dqdt, dCaAPdt] 126 | 127 | return statep 128 | 129 | 130 | # simulate 131 | 132 | # external current 133 | params['E_params']['I_ext'] = 2.0e-09 134 | 135 | # set initial states and time vector 136 | state0 = [-70e-03, 0, 1, 0, 0, 0] 137 | t = arange(0, 0.2, 0.0001) 138 | 139 | # run simulation 140 | state = odeint(neuron, state0, t, args=(params,)) 141 | 142 | # plot soma potential over time 143 | plot(t, state[:,0]) 144 | 145 | # what is the inter-spike interval between spike 1-2, 2-3 and 3-4? 146 | soma = state[:,0] 147 | vt = 0.02 148 | peaks = array([]) 149 | for i in arange(1,size(t)-1): 150 | v0 = soma[i-1] 151 | v1 = soma[i] 152 | v2 = soma[i+1] 153 | if ((v2 > vt) & (v0 < v1) & (v2 < v1)): 154 | peaks = append(peaks, i) 155 | 156 | # plot lines on figure to verify 157 | for i in peaks: 158 | plot([t[i],t[i]],[-0.08,0.06],'r-') 159 | 160 | # compute inter-spike intervals 161 | 162 | isi = array([]) 163 | for i in arange(size(peaks)-1): 164 | isi = append(isi, t[peaks[i+1]]-t[peaks[i]]) 165 | 166 | # repeat for I_Ext = 4.0e-09 and then 6.0e-09 167 | # to be efficient let's make a function that wraps the simulation and the isi calculation 168 | 169 | def find_isi(I_Ext): 170 | params['E_params']['I_ext'] = I_Ext 171 | state0 = [-70e-03, 0, 1, 0, 0, 0] 172 | t = arange(0, 0.2, 0.0001) 173 | state = odeint(neuron, state0, t, args=(params,)) 174 | soma = state[:,0] 175 | vt = 0.02 176 | peaks = array([]) 177 | for i in arange(1,size(t)-1): 178 | v0 = soma[i-1] 179 | v1 = soma[i] 180 | v2 = soma[i+1] 181 | if ((v2 > vt) & (v0 < v1) & (v2 < v1)): 182 | peaks = append(peaks, i) 183 | isi = array([]) 184 | for i in arange(size(peaks)-1): 185 | isi = append(isi, t[peaks[i+1]]-t[peaks[i]]) 186 | return isi,t,state[:,0] 187 | 188 | figure() 189 | isi1, t1, soma1 = find_isi(2.0e-09) 190 | subplot(3,1,1) 191 | plot(t1,soma1) 192 | title("I_Ext = 2.0e-09") 193 | isi2, t2, soma2 = find_isi(4.0e-09) 194 | subplot(3,1,2) 195 | plot(t2,soma2) 196 | title("I_Ext = 4.0e-09") 197 | isi3, t3, soma3 = find_isi(6.0e-09) 198 | subplot(3,1,3) 199 | plot(t3,soma3) 200 | title("I_Ext = 6.0e-09") 201 | 202 | print isi1 203 | print isi2 204 | print isi3 205 | 206 | 207 | -------------------------------------------------------------------------------- /code/assignment4_sol.py: -------------------------------------------------------------------------------- 1 | # assignment 4 solution 2 | 3 | def joints_to_hand(a1,a2,l1,l2): 4 | Ex = l1 * cos(a1) 5 | Ey = l1 * sin(a1) 6 | Hx = Ex + (l2 * cos(a1+a2)) 7 | Hy = Ey + (l2 * sin(a1+a2)) 8 | return Ex,Ey,Hx,Hy 9 | 10 | def minjerk(H1x,H1y,H2x,H2y,t,n): 11 | """ 12 | Given hand initial position H1x,H1y, final position H2x,H2y and movement duration t, 13 | and the total number of desired sampled points n, 14 | Calculates the hand path H over time T that satisfies minimum-jerk. 15 | Flash, Tamar, and Neville Hogan. "The coordination of arm 16 | movements: an experimentally confirmed mathematical model." The 17 | journal of Neuroscience 5, no. 7 (1985): 1688-1703. 18 | """ 19 | T = linspace(0,t,n) 20 | Hx = zeros(n) 21 | Hy = zeros(n) 22 | for i in range(n): 23 | tau = T[i]/t 24 | Hx[i] = H1x + ((H1x-H2x)*(15*(tau**4) - (6*tau**5) - (10*tau**3))) 25 | Hy[i] = H1y + ((H1y-H2y)*(15*(tau**4) - (6*tau**5) - (10*tau**3))) 26 | return T,Hx,Hy 27 | 28 | 29 | # Question 1 30 | l1 = 0.34 31 | l2 = 0.46 32 | angs = array([30.0,60.0,90.0]) * pi/180 33 | figure(figsize=(5,10)) 34 | for i in range(3): 35 | for j in range(3): 36 | a1 = angs[i] 37 | a2 = angs[j] 38 | subplot(2,1,1) 39 | plot(a1*180/pi,a2*180/pi,'r+') 40 | ex,ey,hx,hy = joints_to_hand(a1,a2,l1,l2) 41 | subplot(2,1,2) 42 | plot(hx,hy,'r+') 43 | for k in range(20): 44 | a1n = a1 + randn()*(sqrt(3)*pi/180) 45 | a2n = a2 + randn()*(sqrt(3)*pi/180) 46 | subplot(2,1,1) 47 | plot(a1n*180/pi,a2n*180/pi,'b.') 48 | ex,ey,hx,hy = joints_to_hand(a1n,a2n,l1,l2) 49 | subplot(2,1,2) 50 | plot(hx,hy,'b.') 51 | subplot(2,1,1) 52 | axis('equal') 53 | xlabel('SHOULDER ANGLE (deg)') 54 | ylabel('ELBOW ANGLE (deg)') 55 | subplot(2,1,2) 56 | axis('equal') 57 | xlabel('HAND POSITION X (m)') 58 | ylabel('HAND POSITION Y (m)') 59 | 60 | 61 | # Question 2 62 | def hand_to_joints(hx,hy,l1,l2): 63 | """ 64 | Given hand position H=(hx,hy) and link lengths l1,l2, 65 | returns joint angles A=(a1,a2) 66 | """ 67 | a2 = arccos(((hx*hx)+(hy*hy)-(l1*l1)-(l2*l2))/(2.0*l1*l2)) 68 | a1 = arctan(hy/hx) - arctan((l2*sin(a2))/(l1+(l2*cos(a2)))) 69 | if a1 < 0: 70 | a1 = a1 + pi 71 | elif a1 > pi: 72 | a1 = a1 - pi 73 | return a1,a2 74 | 75 | 76 | # Question 3 77 | l1 = 0.34 78 | l2 = 0.46 79 | angs = array([30.0,60.0,90.0]) * pi/180 80 | figure(figsize=(5,10)) 81 | for i in range(3): 82 | for j in range(3): 83 | a1 = angs[i] 84 | a2 = angs[j] 85 | subplot(2,1,1) 86 | plot(a1*180/pi,a2*180/pi,'r+') 87 | ex,ey,hx,hy = joints_to_hand(a1,a2,l1,l2) 88 | subplot(2,1,2) 89 | plot(hx,hy,'r+') 90 | for k in range(20): 91 | hxn = hx + randn()*(sqrt(2)/100) 92 | hyn = hy + randn()*(sqrt(2)/100) 93 | a1n,a2n = hand_to_joints(hxn,hyn,l1,l2) 94 | subplot(2,1,1) 95 | plot(a1n*180/pi,a2n*180/pi,'b.') 96 | subplot(2,1,2) 97 | plot(hxn,hyn,'b.') 98 | subplot(2,1,1) 99 | axis('equal') 100 | xlabel('SHOULDER ANGLE (deg)') 101 | ylabel('ELBOW ANGLE (deg)') 102 | title('JOINT SPACE') 103 | subplot(2,1,2) 104 | axis('equal') 105 | xlabel('HAND POSITION X (m)') 106 | ylabel('HAND POSITION Y (m)') 107 | title('HAND SPACE') 108 | 109 | 110 | # Question 4 111 | l1,l2 = 0.34, 0.46 112 | H1x,H1y = -0.20, -.55 113 | movdist = 0.10 114 | movtime = 0.50 115 | npts = 20 116 | ncirc = 8 117 | angs = linspace(0,360,ncirc+1)*pi/180 118 | angs = angs[0:-1] 119 | figure(figsize=(5,10)) 120 | for i in range(ncirc): 121 | H2x = H1x + movdist*cos(angs[i]) 122 | H2y = H1y + movdist*sin(angs[i]) 123 | T,Hx,Hy = minjerk(H1x,H1y,H2x,H2y,movtime,npts) 124 | subplot(2,1,2) 125 | plot(Hx,Hy,'.') 126 | axis('equal') 127 | A1 = zeros(npts) 128 | A2 = zeros(npts) 129 | for j in range(npts): 130 | A1[j],A2[j] = hand_to_joints(Hx[j],Hy[j],l1,l2) 131 | subplot(2,1,1) 132 | plot(A1*180/pi,A2*180/pi,'.') 133 | axis('equal') 134 | subplot(2,1,1) 135 | xlabel('SHOULDER ANGLE (deg)') 136 | ylabel('ELBOW ANGLE (deg)') 137 | title('JOINT SPACE') 138 | subplot(2,1,2) 139 | xlabel('HAND POS X (m)') 140 | ylabel('HAND POS Y (m)') 141 | title('HAND SPACE') 142 | 143 | # Question 5 144 | l1,l2 = 0.34, 0.46 145 | A1s,A1e = 45*pi/180, 90*pi/180 146 | movdist = 10*pi/180 147 | movtime = 0.50 148 | npts = 20 149 | ncirc = 8 150 | angs = linspace(0,360,ncirc+1)*pi/180 151 | angs = angs[0:-1] 152 | figure(figsize=(5,10)) 153 | for i in range(ncirc): 154 | A2s = A1s + movdist*cos(angs[i]) 155 | A2e = A1e + movdist*sin(angs[i]) 156 | T,As,Ae = minjerk(A1s,A1e,A2s,A2e,movtime,npts) 157 | subplot(2,1,1) 158 | plot(As*180/pi,Ae*180/pi,'.') 159 | axis('equal') 160 | Hx = zeros(npts) 161 | Hy = zeros(npts) 162 | for j in range(npts): 163 | ex,ey,Hx[j],Hy[j] = joints_to_hand(As[j],Ae[j],l1,l2) 164 | subplot(2,1,2) 165 | plot(Hx,Hy,'.') 166 | axis('equal') 167 | subplot(2,1,1) 168 | xlabel('SHOULDER ANGLE (deg)') 169 | ylabel('ELBOW ANGLE (deg)') 170 | title('JOINT SPACE') 171 | subplot(2,1,2) 172 | xlabel('HAND POS X (m)') 173 | ylabel('HAND POS Y (m)') 174 | title('HAND SPACE') 175 | -------------------------------------------------------------------------------- /code/assignment5_sol.py: -------------------------------------------------------------------------------- 1 | # ipython --pylab 2 | 3 | # load in the functions and parameters from twojointarm.py 4 | %load twojointarm.py 5 | 6 | # Question 1 7 | H1 = [-0.20, -.55] 8 | movdist = 0.10 9 | movtime = 0.50 10 | npts = 20 11 | ncirc = 8 12 | angs = linspace(0,360,ncirc+1)*pi/180 13 | angs = angs[0:-1] 14 | figure(figsize=(6,9)) 15 | for i in range(ncirc): 16 | H2x = H1[0] + movdist*cos(angs[i]) 17 | H2y = H1[1] + movdist*sin(angs[i]) 18 | H2 = [H2x,H2y] 19 | t,H,A,Ad,Add = get_min_jerk_movement(H1,H2,mt) 20 | Q = inverse_dynamics(A,Ad,Add,aparams) 21 | subplot(2,1,1) 22 | plot(H[:,0],H[:,1],'b.-') 23 | subplot(2,1,2) 24 | plot(Q[:,0],Q[:,1],'r.-') 25 | draw() 26 | subplot(2,1,1) 27 | axis('equal') 28 | xlabel('HAND POS X (m)') 29 | ylabel('HAND POS Y (m)') 30 | title('HAND SPACE') 31 | subplot(2,1,2) 32 | axis('equal') 33 | xlabel('SHOULDER TORQUE (Nm)') 34 | ylabel('ELBOW TORQUE (Nm)') 35 | title('JOINT TORQUE SPACE') 36 | 37 | # Question 2 38 | A1 = [45*pi/180, 110*pi/180] # starting angles 39 | A2 = [45*pi/180, 70*pi/180] # ending angles 40 | movtime = 0.500 # 500 ms movement time 41 | npts = 100 # use 100 pts for our desired trajectory 42 | # get a min-jerk (joint-space) trajectory 43 | t,A,Ad,Add = minjerk(A1,A2,movtime,100) 44 | # compute required joint torques 45 | Q = inverse_dynamics(A,Ad,Add,aparams) 46 | # plot the junk 47 | figure(figsize=(6,9)) 48 | subplot(2,1,1) 49 | plot(t,A*180/pi) 50 | ylim([40,120]) 51 | legend(('shoulder','elbow')) 52 | ylabel('JOINT ANGLE (deg)') 53 | title('JOINT ANGLES') 54 | subplot(2,1,2) 55 | plot(t,Q) 56 | xlabel('TIME (sec)') 57 | ylabel('JOINT TORQUES (Nm)') 58 | legend(('shoulder','elbow'), loc='upper left') 59 | 60 | # Question 3 61 | A1 = [30*pi/180, 90*pi/180] # starting angles 62 | A2 = [60*pi/180, 90*pi/180] # ending angles 63 | movtime = 0.500 # 500 ms movement time 64 | npts = 100 # use 100 pts for our desired trajectory 65 | # get a min-jerk (joint-space) trajectory 66 | t,A,Ad,Add = minjerk(A1,A2,movtime,100) 67 | # compute required joint torques 68 | Q = inverse_dynamics(A,Ad,Add,aparams) 69 | # plot the junk 70 | figure(figsize=(6,9)) 71 | subplot(2,1,1) 72 | plot(t,A*180/pi) 73 | ylim([20,100]) 74 | legend(('shoulder','elbow'), loc='bottom right') 75 | ylabel('JOINT ANGLE (deg)') 76 | title('JOINT ANGLES') 77 | subplot(2,1,2) 78 | plot(t,Q) 79 | xlabel('TIME (sec)') 80 | ylabel('JOINT TORQUES (Nm)') 81 | legend(('shoulder','elbow'), loc='upper right') 82 | 83 | # Question 4 84 | H1 = [-0.2, 0.4] # hand start position 85 | H2 = [-0.2, 0.6] # hand end position 86 | # get min-jerk hand trajectory 87 | t,H,A,Ad,Add = get_min_jerk_movement(H1,H2,0.500) 88 | # compute torques 89 | Q = inverse_dynamics(A,Ad,Add,aparams) 90 | # initial state of arm for forward simulation 91 | state0 = [A[0,0],A[0,1],Ad[0,0],Ad[0,1]] 92 | # simulate forward dynamics equations of motion with driving torques Q 93 | state = odeint(forward_dynamics, state0, t, args=(aparams, Q, t,)) 94 | figure() 95 | plot(t,H) 96 | ylim([-0.25, 0.65]) 97 | xlabel('TIME (sec)') 98 | ylabel('HAND POS (m)') 99 | legend(('Hand X','Hand Y'), loc='top left') 100 | 101 | # Question 5 102 | figure(figsize=(6,9)) 103 | for i in range(25): 104 | # add noise to initial hand location 105 | H1n = [H1[0]+(randn()*sqrt(0.001)), H1[1]+(randn()*sqrt(0.001))] 106 | # convert noisy initial hand loction to joint angles 107 | A1n = hand_to_joints(matrix(H1n),aparams) 108 | # initial states for forward simulation 109 | state0 = [A1n[0,0], A1n[0,1], 0.0, 0.0] 110 | state = odeint(forward_dynamics, state0, t, args=(aparams, Q, t,)) 111 | Htraj, Etraj = joints_to_hand(state[:,[0,1]], aparams) 112 | subplot(2,1,1) 113 | plot(Htraj[:,0],Htraj[:,1],'b-') 114 | subplot(2,1,2) 115 | plot(Htraj[0,0],Htraj[0,1],'b.') # initial hand position 116 | plot(Htraj[-1,0],Htraj[-1,1],'r.') # final hand position 117 | draw() 118 | subplot(2,1,1) 119 | axis('equal') 120 | ylabel('HAND POS Y (m)') 121 | subplot(2,1,2) 122 | axis('equal') 123 | xlabel('HAND POS X (m)') 124 | ylabel('HAND POS Y (m)') 125 | 126 | # Question 6 127 | figure(figsize=(6,9)) 128 | xshifts = linspace(-0.1, 0.1, 11) 129 | xerr = zeros(len(xshifts)) 130 | yerr = zeros(len(xshifts)) 131 | for i in range(len(xshifts)): 132 | # shift initial hand x position by specified amount 133 | H1s = [H1[0]+xshifts[i], H1[1]] 134 | # convert new initial hand loction to joint angles 135 | A1s = hand_to_joints(matrix(H1s),aparams) 136 | # initial states for forward simulation 137 | state0 = [A1s[0,0], A1s[0,1], 0.0, 0.0] 138 | # simulate! 139 | state = odeint(forward_dynamics, state0, t, args=(aparams, Q, t,)) 140 | # get hand trajectory 141 | Htraj, Etraj = joints_to_hand(state[:,[0,1]], aparams) 142 | subplot(2,1,1) 143 | # plot ideal hand trajectory 144 | plot((Htraj[0,0],H2[0]+xshifts[i]),(Htraj[0,1],H2[1]),'k--') 145 | # plot actual hand trajectory 146 | plot(Htraj[:,0],Htraj[:,1],'b-') 147 | draw() 148 | # compute endpoint error in x and y 149 | xerr[i] = Htraj[-1,0] - Htraj[0,0] # endpoint error in x position 150 | yerr[i] = Htraj[-1,1] - H2[1] # endpoint error in y position 151 | subplot(2,1,1) 152 | axis('equal') 153 | xlabel('HAND POS X (m)') 154 | ylabel('HAND POS Y (m)') 155 | subplot(2,1,2) 156 | plot((-.12,.12),(0,0),'k--') 157 | plot(xshifts,xerr,'b+-') 158 | plot(xshifts,yerr,'rs-') 159 | legend(('X error','Y error'), loc='upper left') 160 | xlabel('X POSITION SHIFT (m)') 161 | ylabel('ENDPOINT ERROR (m)') 162 | xlim([-.12, .12]) 163 | 164 | -------------------------------------------------------------------------------- /code/assignment6.py: -------------------------------------------------------------------------------- 1 | # assignment 6 2 | # due Sunday Dec 2, 11:59pm EST 3 | 4 | # 1. explore how (a) number of hidden units, and (b) the cost on the 5 | # sum of squared weights, affects the network's classification of 6 | # the input space 7 | # 8 | # 2. (bonus): implement an additional hidden layer and explore how 9 | # this affects the capability of the network to divide up the input 10 | # space into classes 11 | 12 | 13 | # choose a different random seed each time we run the code 14 | import time 15 | myseed = int(time.time()) 16 | random.seed(myseed) 17 | 18 | # we will need this for conjugate gradient descent 19 | from scipy.optimize import fmin_cg 20 | 21 | ############################################################ 22 | # IMPORT TRAINING DATA # 23 | ############################################################ 24 | 25 | # inputs: 100 x 2 matrix (100 examples, 2 inputs each) 26 | # outputs: 100 x 1 matrix (100 examples, 1 output each) 27 | # we will want to change outputs to 4: 28 | # outputs: 100 x 4 matrix (100 examples, 4 outputs each) 29 | # "1" = [1,0,0,0] 30 | # "2" = [0,1,0,0] 31 | # "3" = [0,0,1,0] 32 | # "4" = [0,0,0,1] 33 | # 34 | import pickle 35 | fid = open('traindata.pickle','r') 36 | traindata = pickle.load(fid) 37 | fid.close() 38 | train_in = traindata['inputs'] 39 | n_examples = shape(train_in)[0] 40 | out1 = traindata['outputs'] 41 | # convert one output value {1,2,3,4} into four binary outputs [o1,o2,o3,o4] {0,1} 42 | train_out = zeros((n_examples,4)) 43 | for i in range(n_examples): 44 | out_i = out1[i,0] 45 | train_out[i,out_i-1] = 1.0 46 | 47 | ############################################################ 48 | # UTILITY FUNCTIONS # 49 | ############################################################ 50 | 51 | # The output layer transfer function will be logsig [ 0, +1 ] 52 | 53 | def logsig(x): 54 | """ logsig activation function """ 55 | return 1.0 / (1.0 + exp(-x)) 56 | 57 | def dlogsig(x): 58 | """ derivative of logsig function """ 59 | return multiply(x,(1.0 - x)) 60 | 61 | # The hidden layer transfer function will be tansig [-1, +1 ] 62 | 63 | def tansig(x): 64 | """ tansig activation function """ 65 | return tanh(x) 66 | 67 | def dtansig(x): 68 | """ derivative of tansig function """ 69 | return 1.0 - (multiply(x,x)) # element-wise multiplication 70 | 71 | def pack_weights(w_hid, b_hid, w_out, b_out, params): 72 | """ pack weight matrices into a single vector """ 73 | n_in, n_hid, n_out = params[0], params[1], params[2] 74 | g_j = hstack((reshape(w_hid,(1,n_in*n_hid)), 75 | reshape(b_hid,(1,n_hid)), 76 | reshape(w_out,(1,n_hid*n_out)), 77 | reshape(b_out,(1,n_out))))[0] 78 | g_j = array(g_j[0,:])[0] 79 | return g_j 80 | 81 | def unpack_weights(x, params): 82 | """ unpack weights from single vector into weight matrices """ 83 | n_in, n_hid, n_out = params[0], params[1], params[2] 84 | pat_in, pat_out = params[3], params[4] 85 | n_pat = shape(pat_in)[0] 86 | i1,i2 = 0,n_in*n_hid 87 | w_hid = reshape(x[i1:i2], (n_in,n_hid)) 88 | i1,i2 = i2,i2+n_hid 89 | b_hid = reshape(x[i1:i2],(1,n_hid)) 90 | i1,i2 = i2,i2+(n_hid*n_out) 91 | w_out = reshape(x[i1:i2], (n_hid,n_out)) 92 | i1,i2 = i2,i2+n_out 93 | b_out = reshape(x[i1:i2],(1,n_out)) 94 | return w_hid, b_hid, w_out, b_out 95 | 96 | def net_forward(x, params, ret_hids=False): 97 | """ propagate inputs through the network and return outputs """ 98 | w_hid,b_hid,w_out,b_out = unpack_weights(x, params) 99 | pat_in = params[3] 100 | hid_act = tansig((pat_in * w_hid) + b_hid) 101 | out_act = logsig((hid_act * w_out) + b_out) 102 | if ret_hids: 103 | return out_act,hid_act 104 | else: 105 | return out_act 106 | 107 | def f(x,params): 108 | """ returns the cost (SSE) of a given weight vector """ 109 | t = params[4] 110 | y = net_forward(x,params) 111 | sse = sum(square(t-y)) 112 | w_cost = params[5]*sum(square(x)) 113 | cost = sse + w_cost 114 | print "sse=%7.5f wcost=%7.5f" % (sse,w_cost) 115 | return cost 116 | 117 | def fd(x,params): 118 | """ returns the gradients (dW/dE) for the weight vector """ 119 | n_in, n_hid, n_out = params[0], params[1], params[2] 120 | pat_in, pat_out = params[3], params[4] 121 | w_cost = params[5] 122 | w_hid,b_hid,w_out,b_out = unpack_weights(x, params) 123 | act_hid = tansig( (pat_in * w_hid) + b_hid ) 124 | act_out = logsig( (act_hid * w_out) + b_out ) 125 | err_out = act_out - pat_out 126 | deltas_out = multiply(dlogsig(act_out), err_out) 127 | err_hid = deltas_out * transpose(w_out) 128 | deltas_hid = multiply(dtansig(act_hid), err_hid) 129 | grad_w_out = transpose(act_hid)*deltas_out 130 | grad_w_out = grad_w_out + (2*w_cost*grad_w_out) 131 | grad_b_out = sum(deltas_out,0) 132 | grad_b_out = grad_b_out + (2*w_cost*grad_b_out) 133 | grad_w_hid = transpose(pat_in)*deltas_hid 134 | grad_w_hid = grad_w_hid + (2*w_cost*grad_w_hid) 135 | grad_b_hid = sum(deltas_hid,0) 136 | grad_b_hid = grad_b_hid + (2*w_cost*grad_b_hid) 137 | return pack_weights(grad_w_hid, grad_b_hid, grad_w_out, grad_b_out, params) 138 | 139 | ############################################################ 140 | # TRAIN THE SUCKER # 141 | ############################################################ 142 | 143 | # network parameters 144 | n_in = shape(train_in)[1] 145 | n_hid = 4 146 | n_out = shape(train_out)[1] 147 | w_cost = 0.01 148 | params = [n_in, n_hid, n_out, train_in, train_out, w_cost] 149 | 150 | # initialize weights to small random (uniformly distributed) 151 | # values between -0.10 and +0.10 152 | nw = n_in*n_hid + n_hid + n_hid*n_out + n_out 153 | w0 = random.rand(nw)*0.1 - 0.05 154 | 155 | # optimize using conjugate gradient descent 156 | out = fmin_cg(f, w0, fprime=fd, args=(params,), 157 | full_output=True, retall=True, disp=True, 158 | gtol=1e-3, maxiter=1000) 159 | # unpack optimizer outputs 160 | wopt,fopt,func_calls,grad_calls,warnflag,allvecs = out 161 | 162 | # net performance 163 | netout = net_forward(wopt,params) 164 | pc = array(netout.argmax(1).T) == params[4].argmax(1) # I hate munging numpy matrices/arrays 165 | pc, = where(pc[0,:]) # hate hate hate 166 | pc = float(len(pc)) / float(shape(params[4])[0]) # more hate 167 | print "percent correct = %6.3f" % (pc) 168 | 169 | ############################################################ 170 | # PRETTY PLOTS # 171 | ############################################################ 172 | 173 | # test our network on the entire range of inputs 174 | # and visualize the results 175 | # 176 | n_grid = 100 177 | min_grid,max_grid = -10.0, 20.0 178 | g_grid = linspace(min_grid, max_grid, n_grid) 179 | g1,g2 = meshgrid(g_grid, g_grid) 180 | grid_inputs = matrix(hstack((reshape(g1,(n_grid*n_grid,1)), 181 | reshape(g2,(n_grid*n_grid,1))))) 182 | params_grid = list(params) 183 | params_grid[3] = grid_inputs 184 | act_grid,hid_grid = net_forward(wopt,params_grid,ret_hids=True) 185 | # choose which neuron has greatest activity 186 | cat_grid = reshape(act_grid.argmax(1),(n_grid,n_grid)) 187 | figure() 188 | # plot the network performance 189 | imshow(cat_grid,extent=[min_grid,max_grid,min_grid,max_grid]) 190 | # now overlay the training data 191 | i1 = where(traindata['outputs']==1)[0] 192 | i2 = where(traindata['outputs']==2)[0] 193 | i3 = where(traindata['outputs']==3)[0] 194 | i4 = where(traindata['outputs']==4)[0] 195 | plot(traindata['inputs'][i1,0],traindata['inputs'][i1,1],'ys',markeredgecolor='k') 196 | plot(traindata['inputs'][i2,0],traindata['inputs'][i2,1],'rs',markeredgecolor='k') 197 | plot(traindata['inputs'][i3,0],traindata['inputs'][i3,1],'bs',markeredgecolor='k') 198 | plot(traindata['inputs'][i4,0],traindata['inputs'][i4,1],'cs',markeredgecolor='k') 199 | axis([min_grid,max_grid,min_grid,max_grid]) 200 | xlabel('INPUT 1') 201 | ylabel('INPUT 2') 202 | 203 | # hidden neuron activations for entire range of inputs 204 | # 205 | figure() 206 | ncols = ceil(sqrt(n_hid)) 207 | nrows = ceil(float(n_hid)/float(ncols)) 208 | w_hid, b_hid, w_out, b_out = unpack_weights(wopt, params) 209 | for i in range(n_hid): 210 | cgi = reshape(hid_grid[:,i], (n_grid,n_grid)) 211 | subplot(nrows,ncols,i+1) 212 | imshow(cgi, extent=[min_grid,max_grid,min_grid,max_grid]) 213 | axis([min_grid,max_grid,min_grid,max_grid]) 214 | axis('off') 215 | title('HID_%d' % i) 216 | 217 | # output neuron activations for entire range of inputs 218 | # 219 | figure(figsize=(16,4)) 220 | for i in range(4): 221 | cgi = reshape(act_grid[:,i], (n_grid,n_grid)) 222 | subplot(1,4,i+1) 223 | imshow(cgi, extent=[min_grid,max_grid,min_grid,max_grid]) 224 | plot(traindata['inputs'][i1,0],traindata['inputs'][i1,1],'ys',markeredgecolor='k') 225 | plot(traindata['inputs'][i2,0],traindata['inputs'][i2,1],'rs',markeredgecolor='k') 226 | plot(traindata['inputs'][i3,0],traindata['inputs'][i3,1],'bs',markeredgecolor='k') 227 | plot(traindata['inputs'][i4,0],traindata['inputs'][i4,1],'cs',markeredgecolor='k') 228 | axis([min_grid,max_grid,min_grid,max_grid]) 229 | xlabel('INPUT 1') 230 | ylabel('INPUT 2') 231 | title('OUT_%d' % i) 232 | 233 | 234 | 235 | 236 | 237 | 238 | 239 | 240 | 241 | -------------------------------------------------------------------------------- /code/double_pendulum.py: -------------------------------------------------------------------------------- 1 | # ipython --pylab 2 | 3 | from scipy.integrate import odeint 4 | 5 | # here is our state function that implements the 6 | # differential equations defining the dynamics of 7 | # a double-pendulum 8 | def DoublePendulum(x, t): 9 | damping = 0.0 10 | g = 9.8 11 | # inertia matrix 12 | M = matrix([[3 + 2*cos(x[1]), 1+cos(x[1])], [1+cos(x[1]), 1]]) 13 | # coriolis, centripetal and gravitational forces 14 | c1 = x[3]*((2*x[2]) + x[3])*sin(x[1]) + 2*g*sin(x[0]) + g*sin(x[0]+x[1]) 15 | c2 = -(x[2]**2)*sin(x[1]) + g*sin(x[0]+x[1]) 16 | # passive dynamics 17 | cc = [c1-damping*x[2], c2-damping*x[3]] 18 | u = linalg.solve(M,cc) 19 | return [x[2], x[3], u[0], u[1]] 20 | 21 | # decide on a time range for simulation 22 | # and initial states 23 | t = arange(0, 10, 0.01) 24 | x0 = [pi, pi/2, 0.0, 0.0] # a0, a1, a0d, a1d 25 | 26 | # simulate! 27 | x = odeint(DoublePendulum, x0, t) 28 | 29 | # plot the states over time 30 | figure() 31 | plot(t,x) 32 | legend(('a0','a1','a0d','a1d')) 33 | xlabel('TIME (sec)') 34 | ylabel('ANGLE (rad)') 35 | draw() 36 | 37 | # a utility function to convert from joint angles to hinge positions 38 | # we will use this for our animation 39 | def a2h(a0,a1): 40 | h0 = [0,0] 41 | h1 = [np.sin(a0), np.cos(a0)] 42 | h2 = [np.sin(a0)+np.sin(a0+a1), np.cos(a0)+np.cos(a0+a1)] 43 | return [h0, h1, h2] 44 | 45 | # let's make an animation of the pendulum's motion 46 | def AnimatePendulum(states,t): 47 | # slice out the two angles and two angular velocities 48 | a0 = states[:,0] 49 | a1 = states[:,1] 50 | a0d = states[:,2] 51 | a1d = states[:,3] 52 | 53 | # new figure 54 | fig = plt.figure(figsize=(10,5)) 55 | ax = fig.add_subplot(1,2,1) 56 | ax.plot(t,a0,'b') 57 | ax.plot(t,a1,'r') 58 | l, = ax.plot((t[0],t[0]),(-2,5),'k-') 59 | xlabel('TIME (sec)') 60 | ylabel('ANGLE (rad)') 61 | ax = fig.add_subplot(1,2,2) 62 | (h0,h1,h2) = a2h(a0[0],a1[0]) # convert first time point to hinge positions 63 | line1, = plot([h0[0], h1[0]],[h0[1], h1[1]],'b') 64 | line2, = plot([h1[0], h2[0]],[h1[1], h2[1]],'r') 65 | p0, = plot(h0[0], h0[1], 'k.') 66 | p1, = plot(h1[0], h1[1], 'b.') 67 | p2, = plot(h2[0], h2[1], 'r.') 68 | plt.xlim([-2.2,2.2]) 69 | plt.ylim([-2.2,2.2]) 70 | title1 = title("%3.2fs" % 0.0) 71 | xlabel('X POSITION (m)') 72 | ylabel('Y POSITION (m)') 73 | n = t.size 74 | for i in arange(0,n,5): 75 | l.set_xdata((t[i],t[i])) 76 | (h0,h1,h2) = a2h(a0[i],a1[i]) 77 | line1.set_xdata([h0[0], h1[0]]) 78 | line1.set_ydata([h0[1], h1[1]]) 79 | line2.set_xdata([h1[0], h2[0]]) 80 | line2.set_ydata([h1[1], h2[1]]) 81 | p1.set_xdata(h1[0]) 82 | p1.set_ydata(h1[1]) 83 | p2.set_xdata(h2[0]) 84 | p2.set_ydata(h2[1]) 85 | title1.set_text('time = %3.2fs' %t[i]) 86 | draw() 87 | 88 | # Go animation! 89 | AnimatePendulum(x,t) 90 | 91 | 92 | -------------------------------------------------------------------------------- /code/ekeberg1.py: -------------------------------------------------------------------------------- 1 | # ipython --pylab 2 | 3 | # import some needed functions 4 | from scipy.integrate import odeint 5 | 6 | # set up a dictionary of parameters 7 | 8 | E_params = { 9 | 'E_leak' : -7.0e-2, 10 | 'G_leak' : 3.0e-09, 11 | 'C_m' : 3.0e-11, 12 | 'I_ext' : 0*1.0e-10 13 | } 14 | 15 | Na_params = { 16 | 'Na_E' : 5.0e-2, 17 | 'Na_G' : 1.0e-6, 18 | 'k_Na_act' : 3.0e+0, 19 | 'A_alpha_m_act' : 2.0e+5, 20 | 'B_alpha_m_act' : -4.0e-2, 21 | 'C_alpha_m_act' : 1.0e-3, 22 | 'A_beta_m_act' : 6.0e+4, 23 | 'B_beta_m_act' : -4.9e-2, 24 | 'C_beta_m_act' : 2.0e-2, 25 | 'l_Na_inact' : 1.0e+0, 26 | 'A_alpha_m_inact' : 8.0e+4, 27 | 'B_alpha_m_inact' : -4.0e-2, 28 | 'C_alpha_m_inact' : 1.0e-3, 29 | 'A_beta_m_inact' : 4.0e+2, 30 | 'B_beta_m_inact' : -3.6e-2, 31 | 'C_beta_m_inact' : 2.0e-3 32 | } 33 | 34 | K_params = { 35 | 'k_E' : -9.0e-2, 36 | 'k_G' : 2.0e-7, 37 | 'k_K' : 4.0e+0, 38 | 'A_alpha_m_act' : 2.0e+4, 39 | 'B_alpha_m_act' : -3.1e-2, 40 | 'C_alpha_m_act' : 8.0e-4, 41 | 'A_beta_m_act' : 5.0e+3, 42 | 'B_beta_m_act' : -2.8e-2, 43 | 'C_beta_m_act' : 4.0e-4 44 | } 45 | 46 | params = { 47 | 'E_params' : E_params, 48 | 'Na_params' : Na_params, 49 | 'K_params' : K_params 50 | } 51 | 52 | # define our ODE function 53 | 54 | def neuron(state, t, params): 55 | """ 56 | Purpose: simulate Hodgkin and Huxley model for the action potential using 57 | the equations from Ekeberg et al, Biol Cyb, 1991. 58 | Input: state ([E m h n] (ie [membrane potential; activation of 59 | Na++ channel; inactivation of Na++ channel; activation of K+ 60 | channel]), 61 | t (time), 62 | and the params (parameters of neuron; see Ekeberg et al). 63 | Output: statep (state derivatives). 64 | """ 65 | 66 | E = state[0] 67 | m = state[1] 68 | h = state[2] 69 | n = state[3] 70 | 71 | Epar = params['E_params'] 72 | Na = params['Na_params'] 73 | K = params['K_params'] 74 | 75 | # external current (from "voltage clamp", other compartments, other neurons, etc) 76 | I_ext = Epar['I_ext'] 77 | 78 | # calculate Na rate functions and I_Na 79 | alpha_act = Na['A_alpha_m_act'] * (E-Na['B_alpha_m_act']) / (1.0 - exp((Na['B_alpha_m_act']-E) / Na['C_alpha_m_act'])) 80 | beta_act = Na['A_beta_m_act'] * (Na['B_beta_m_act']-E) / (1.0 - exp((E-Na['B_beta_m_act']) / Na['C_beta_m_act']) ) 81 | dmdt = ( alpha_act * (1.0 - m) ) - ( beta_act * m ) 82 | 83 | alpha_inact = Na['A_alpha_m_inact'] * (Na['B_alpha_m_inact']-E) / (1.0 - exp((E-Na['B_alpha_m_inact']) / Na['C_alpha_m_inact'])) 84 | beta_inact = Na['A_beta_m_inact'] / (1.0 + (exp((Na['B_beta_m_inact']-E) / Na['C_beta_m_inact']))) 85 | dhdt = ( alpha_inact*(1.0 - h) ) - ( beta_inact*h ) 86 | 87 | # Na-current: 88 | I_Na =(Na['Na_E']-E) * Na['Na_G'] * (m**Na['k_Na_act']) * h 89 | 90 | # calculate K rate functions and I_K 91 | alpha_kal = K['A_alpha_m_act'] * (E-K['B_alpha_m_act']) / (1.0 - exp((K['B_alpha_m_act']-E) / K['C_alpha_m_act'])) 92 | beta_kal = K['A_beta_m_act'] * (K['B_beta_m_act']-E) / (1.0 - exp((E-K['B_beta_m_act']) / K['C_beta_m_act'])) 93 | dndt = ( alpha_kal*(1.0 - n) ) - ( beta_kal*n ) 94 | I_K = (K['k_E']-E) * K['k_G'] * n**K['k_K'] 95 | 96 | # leak current 97 | I_leak = (Epar['E_leak']-E) * Epar['G_leak'] 98 | 99 | # calculate derivative of E 100 | dEdt = (I_leak + I_K + I_Na + I_ext) / Epar['C_m'] 101 | statep = [dEdt, dmdt, dhdt, dndt] 102 | 103 | return statep 104 | 105 | 106 | # simulate 107 | 108 | # set initial states and time vector 109 | state0 = [-70e-03, 0, 1, 0] 110 | t = arange(0, 0.2, 0.001) 111 | 112 | # let's inject some external current 113 | params['E_params']['I_ext'] = 1.0e-10 114 | 115 | # run simulation 116 | state = odeint(neuron, state0, t, args=(params,)) 117 | 118 | # plot the results 119 | 120 | figure(figsize=(8,12)) 121 | subplot(4,1,1) 122 | plot(t, state[:,0]) 123 | title('membrane potential') 124 | subplot(4,1,2) 125 | plot(t, state[:,1]) 126 | title('Na2+ channel activation') 127 | subplot(4,1,3) 128 | plot(t, state[:,2]) 129 | title('Na2+ channel inactivation') 130 | subplot(4,1,4) 131 | plot(t, state[:,3]) 132 | title('K+ channel activation') 133 | xlabel('TIME (sec)') 134 | 135 | 136 | 137 | 138 | -------------------------------------------------------------------------------- /code/hopfield.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/code/hopfield.tgz -------------------------------------------------------------------------------- /code/jacobian_plots.py: -------------------------------------------------------------------------------- 1 | def joints_to_hand(A,aparams): 2 | """ 3 | Given joint angles A=(a1,a2) and anthropometric params aparams, 4 | returns hand position H=(hx,hy) and elbow position E=(ex,ey) 5 | """ 6 | l1 = aparams['l1'] 7 | l2 = aparams['l2'] 8 | n = shape(A)[0] 9 | E = zeros((n,2)) 10 | H = zeros((n,2)) 11 | for i in range(n): 12 | E[i,0] = l1 * cos(A[i,0]) 13 | E[i,1] = l1 * sin(A[i,0]) 14 | H[i,0] = E[i,0] + (l2 * cos(A[i,0]+A[i,1])) 15 | H[i,1] = E[i,1] + (l2 * sin(A[i,0]+A[i,1])) 16 | return H,E 17 | 18 | def jacobian(A,aparams): 19 | """ 20 | Given joint angles A=(a1,a2) 21 | returns the Jacobian matrix J(q) = dH/dA 22 | """ 23 | l1 = aparams['l1'] 24 | l2 = aparams['l2'] 25 | dHxdA1 = -l1*sin(A[0]) - l2*sin(A[0]+A[1]) 26 | dHxdA2 = -l2*sin(A[0]+A[1]) 27 | dHydA1 = l1*cos(A[0]) + l2*cos(A[0]+A[1]) 28 | dHydA2 = l2*cos(A[0]+A[1]) 29 | J = matrix([[dHxdA1,dHxdA2],[dHydA1,dHydA2]]) 30 | return J 31 | 32 | aparams = {'l1' : 0.3384, 'l2' : 0.4554} 33 | 34 | npts = 10 35 | angs = linspace(10.0,120.0,npts) *pi/180.0 36 | A1,A2 = meshgrid(angs,angs) 37 | 38 | figure() 39 | # visualize +ve shoulder angle velocity in joint-space and in hand-space 40 | dA1 = ones((npts,npts)) * (5.0*pi/180.0) 41 | dA2 = ones((npts,npts)) * (0.0*pi/180.0) 42 | dHx = zeros((npts,npts)) 43 | dHy = zeros((npts,npts)) 44 | Hx = zeros((npts,npts)) 45 | Hy = zeros((npts,npts)) 46 | for i in range(npts): 47 | for j in range(npts): 48 | J = jacobian(array([A1[i,j],A2[i,j]]),aparams) 49 | dA = matrix([[dA1[i,j]],[dA2[i,j]]]) 50 | dH = J * dA 51 | dHx[i,j], dHy[i,j] = dH[0,0], dH[1,0] 52 | aij = matrix([[A1[i,j],A2[i,j]]]) 53 | h,e = joints_to_hand(aij,aparams) 54 | Hx[i,j], Hy[i,j] = h[0,0], h[0,1] 55 | subplot(2,2,1) 56 | quiver(A1*180/pi,A2*180/pi,dA1*180/pi,dA2*180/pi,color='b') 57 | xlabel('SHOULDER ANGLE (deg)') 58 | ylabel('ELBOW ANGLE (deg)') 59 | title('PURE SHOULDER VELOCITY') 60 | subplot(2,2,3) 61 | quiver(Hx,Hy,dHx,dHy,color='r') 62 | xlabel('HAND POS X (m)') 63 | ylabel('HAND POS Y (m)') 64 | # visualize +ve elbow angle velocity in joint-space and in hand-space 65 | dA1 = ones((npts,npts)) * (0.0*pi/180.0) 66 | dA2 = ones((npts,npts)) * (5.0*pi/180.0) 67 | dHx = zeros((npts,npts)) 68 | dHy = zeros((npts,npts)) 69 | Hx = zeros((npts,npts)) 70 | Hy = zeros((npts,npts)) 71 | for i in range(npts): 72 | for j in range(npts): 73 | J = jacobian(array([A1[i,j],A2[i,j]]),aparams) 74 | dA = matrix([[dA1[i,j]],[dA2[i,j]]]) 75 | dH = J * dA 76 | dHx[i,j], dHy[i,j] = dH[0,0], dH[1,0] 77 | aij = matrix([[A1[i,j],A2[i,j]]]) 78 | h,e = joints_to_hand(aij,aparams) 79 | Hx[i,j], Hy[i,j] = h[0,0], h[0,1] 80 | subplot(2,2,2) 81 | quiver(A1*180/pi,A2*180/pi,dA1*180/pi,dA2*180/pi,color='b') 82 | xlabel('SHOULDER ANGLE (deg)') 83 | ylabel('ELBOW ANGLE (deg)') 84 | title('PURE ELBOW VELOCITY') 85 | subplot(2,2,4) 86 | quiver(Hx,Hy,dHx,dHy,color='r') 87 | xlabel('HAND POS X (m)') 88 | ylabel('HAND POS Y (m)') 89 | 90 | -------------------------------------------------------------------------------- /code/lorenz1.py: -------------------------------------------------------------------------------- 1 | # ipython --pylab 2 | 3 | from scipy.integrate import odeint 4 | 5 | def Lorenz(state,t): 6 | # unpack the state vector 7 | x = state[0] 8 | y = state[1] 9 | z = state[2] 10 | 11 | # these are our constants 12 | sigma = 10.0 13 | rho = 28.0 14 | beta = 8.0/3.0 15 | 16 | # compute state derivatives 17 | xd = sigma * (y-x) 18 | yd = (rho-z)*x - y 19 | zd = x*y - beta*z 20 | 21 | # return the state derivatives 22 | return [xd, yd, zd] 23 | 24 | state0 = [2.0, 3.0, 4.0] 25 | t = arange(0.0, 30.0, 0.01) 26 | 27 | state = odeint(Lorenz, state0, t) 28 | 29 | # do some fancy 3D plotting 30 | from mpl_toolkits.mplot3d import Axes3D 31 | fig = figure() 32 | ax = fig.gca(projection='3d') 33 | ax.plot(state[:,0],state[:,1],state[:,2]) 34 | ax.set_xlabel('x') 35 | ax.set_ylabel('y') 36 | ax.set_zlabel('z') 37 | show() 38 | 39 | -------------------------------------------------------------------------------- /code/lorenz2.py: -------------------------------------------------------------------------------- 1 | # ipython --pylab 2 | 3 | from scipy.integrate import odeint 4 | 5 | def Lorenz(state,t): 6 | # unpack the state vector 7 | x = state[0] 8 | y = state[1] 9 | z = state[2] 10 | 11 | # these are our constants 12 | sigma = 10.0 13 | rho = 28.0 14 | beta = 8.0/3.0 15 | 16 | # compute state derivatives 17 | xd = sigma * (y-x) 18 | yd = (rho-z)*x - y 19 | zd = x*y - beta*z 20 | 21 | # return the state derivatives 22 | return [xd, yd, zd] 23 | 24 | t = arange(0.0, 30, 0.01) 25 | 26 | # original initial conditions 27 | state1_0 = [2.0, 3.0, 4.0] 28 | state1 = odeint(Lorenz, state1_0, t) 29 | 30 | # rerun with very small change in initial conditions 31 | delta = 0.0001 32 | state2_0 = [2.0+delta, 3.0, 4.0] 33 | state2 = odeint(Lorenz, state2_0, t) 34 | 35 | # animation 36 | figure() 37 | pb, = plot(state1[:,0],state1[:,1],'b-',alpha=0.2) 38 | xlabel('x') 39 | ylabel('y') 40 | p, = plot(state1[0:10,0],state1[0:10,1],'b-') 41 | pp, = plot(state1[10,0],state1[10,1],'b.',markersize=10) 42 | p2, = plot(state2[0:10,0],state2[0:10,1],'r-') 43 | pp2, = plot(state2[10,0],state2[10,1],'r.',markersize=10) 44 | tt = title("%4.2f sec" % 0.00) 45 | # animate 46 | step = 3 47 | for i in xrange(1,shape(state1)[0]-10,step): 48 | p.set_xdata(state1[10+i:20+i,0]) 49 | p.set_ydata(state1[10+i:20+i,1]) 50 | pp.set_xdata(state1[19+i,0]) 51 | pp.set_ydata(state1[19+i,1]) 52 | p2.set_xdata(state2[10+i:20+i,0]) 53 | p2.set_ydata(state2[10+i:20+i,1]) 54 | pp2.set_xdata(state2[19+i,0]) 55 | pp2.set_ydata(state2[19+i,1]) 56 | tt.set_text("%4.2f sec" % (i*0.01)) 57 | draw() 58 | 59 | i = 1939 # the two simulations really diverge here! 60 | s1 = state1[i,:] 61 | s2 = state2[i,:] 62 | d12 = norm(s1-s2) # distance 63 | print ("distance = %f for a %f different in initial condition") % (d12, delta) 64 | 65 | -------------------------------------------------------------------------------- /code/lotkavolterra.py: -------------------------------------------------------------------------------- 1 | from scipy.integrate import odeint 2 | 3 | def LotkaVolterra(state,t): 4 | x = state[0] 5 | y = state[1] 6 | alpha = 0.1 7 | beta = 0.1 8 | sigma = 0.1 9 | gamma = 0.1 10 | xd = x*(alpha - beta*y) 11 | yd = -y*(gamma - sigma*x) 12 | return [xd,yd] 13 | 14 | t = arange(0,500,1) 15 | state0 = [0.5,0.5] 16 | state = odeint(LotkaVolterra,state0,t) 17 | figure() 18 | plot(t,state) 19 | ylim([0,8]) 20 | xlabel('Time') 21 | ylabel('Population Size') 22 | legend(('x (prey)','y (predator)')) 23 | title('Lotka-Volterra equations') 24 | 25 | # animation in state-space 26 | figure() 27 | pb, = plot(state[:,0],state[:,1],'b-',alpha=0.2) 28 | xlabel('x (prey population size)') 29 | ylabel('y (predator population size)') 30 | p, = plot(state[0:10,0],state[0:10,1],'b-') 31 | pp, = plot(state[10,0],state[10,1],'b.',markersize=10) 32 | tt = title("%4.2f sec" % 0.00) 33 | 34 | # animate 35 | step=2 36 | for i in xrange(1,shape(state)[0]-10,step): 37 | p.set_xdata(state[10+i:20+i,0]) 38 | p.set_ydata(state[10+i:20+i,1]) 39 | pp.set_xdata(state[19+i,0]) 40 | pp.set_ydata(state[19+i,1]) 41 | tt.set_text("%d steps" % (i)) 42 | draw() 43 | 44 | -------------------------------------------------------------------------------- /code/mass_spring.py: -------------------------------------------------------------------------------- 1 | # ipython --pylab 2 | 3 | from scipy.integrate import odeint 4 | 5 | def MassSpring(state,t): 6 | # unpack the state vector 7 | x = state[0] 8 | xd = state[1] 9 | 10 | # these are our constants 11 | k = 2.5 # Newtons per metre 12 | m = 1.5 # Kilograms 13 | g = 9.8 # metres per second 14 | 15 | # compute acceleration xdd 16 | xdd = ((-k*x)/m) + g 17 | 18 | # return the two state derivatives 19 | return [xd, xdd] 20 | 21 | state0 = [0.0, 0.0] 22 | t = arange(0.0, 10.0, 0.1) 23 | 24 | state = odeint(MassSpring, state0, t) 25 | 26 | plot(t, state) 27 | xlabel('TIME (sec)') 28 | ylabel('STATES') 29 | title('Mass-Spring System') 30 | legend(('$x$ (m)', '$\dot{x}$ (m/sec)')) 31 | 32 | -------------------------------------------------------------------------------- /code/minjerk.py: -------------------------------------------------------------------------------- 1 | def minjerk(H1x,H1y,H2x,H2y,t,n): 2 | """ 3 | Given hand initial position H1x,H1y, final position H2x,H2y and movement duration t, 4 | and the total number of desired sampled points n, 5 | Calculates the hand path H over time T that satisfies minimum-jerk. 6 | 7 | Flash, Tamar, and Neville Hogan. "The coordination of arm 8 | movements: an experimentally confirmed mathematical model." The 9 | journal of Neuroscience 5, no. 7 (1985): 1688-1703. 10 | 11 | """ 12 | T = linspace(0,t,n) 13 | Hx = zeros(n) 14 | Hy = zeros(n) 15 | for i in range(n): 16 | tau = T[i]/t 17 | Hx[i] = H1x + ((H1x-H2x)*(15*(tau**4) - (6*tau**5) - (10*tau**3))) 18 | Hy[i] = H1y + ((H1y-H2y)*(15*(tau**4) - (6*tau**5) - (10*tau**3))) 19 | return T,Hx,Hy 20 | 21 | -------------------------------------------------------------------------------- /code/onejoint_lagrange.py: -------------------------------------------------------------------------------- 1 | from sympy import * 2 | 3 | m,r,i,l,a,t,g = symbols('m r i l a t g') 4 | x = r * sin(a(t)) 5 | y = -r * cos(a(t)) 6 | xd = diff(x,t) 7 | yd = diff(y,t) 8 | Tlin = 0.5 * m * ((xd*xd) + (yd*yd)) 9 | Tlin = simplify(Tlin) 10 | ad = diff(a(t),t) 11 | Trot = 0.5 * i * ad * ad 12 | T = Tlin + Trot 13 | T = simplify(T) 14 | U = m * g * r * (1-cos(a(t))) 15 | L = T - U 16 | L = simplify(L) 17 | Q = diff(diff(L,diff(a(t))),t) - diff(L,a(t)) 18 | pprint(Q) 19 | -------------------------------------------------------------------------------- /code/onejointarm_passive.py: -------------------------------------------------------------------------------- 1 | from scipy.integrate import odeint 2 | 3 | def onejointarm(state,t): 4 | theta = state[0] # joint angle (rad) 5 | theta_dot = state[1] # joint velocity (rad/s) 6 | l = 0.50 # link length (m) 7 | g = 9.81 # gravitational constant (m/s/s) 8 | theta_ddot = -g*sin(theta) / l 9 | return [theta_dot, theta_ddot] 10 | 11 | t = linspace(0.0,10.0,1001) # 10 seconds sampled at 1000 Hz 12 | state0 = [90.0*pi/180.0, 0.0] # 90 deg initial angle, 0 deg/sec initial velocity 13 | state = odeint(onejointarm, state0, t) 14 | 15 | figure() 16 | plot(t,state*180/pi) 17 | legend(('theta','thetadot')) 18 | xlabel('TIME (sec)') 19 | ylabel('THETA (deg) & THETA_DOT (deg/sec)') 20 | 21 | def animate_arm(state,t): 22 | l = 0.5 23 | figure(figsize=(12,6)) 24 | plot(0,0,'r.') 25 | p, = plot((0,l*sin(state[0,0])),(0,-l*cos(state[0,0])),'b-') 26 | tt = title("%4.2f sec" % 0.00) 27 | xlim([-l-.05,l+.05]) 28 | ylim([-l,.10]) 29 | step = 3 30 | for i in xrange(1,shape(state)[0]-10,step): 31 | p.set_xdata((0,l*sin(state[i,0]))) 32 | p.set_ydata((0,-l*cos(state[i,0]))) 33 | tt.set_text("%4.2f sec" % (i*0.01)) 34 | draw() 35 | 36 | animate_arm(state,t) 37 | -------------------------------------------------------------------------------- /code/onejointmuscle_1.py: -------------------------------------------------------------------------------- 1 | # ipython --pylab 2 | 3 | from scipy.integrate import odeint 4 | 5 | def onejointmuscle(state,t,Tm): 6 | m = 1.65 # kg 7 | g = -9.81 # m/s/s 8 | l = 0.179 # metres 9 | I = 0.0779 # kg m**2 10 | a = state[0] 11 | ad = state[1] 12 | add = (m*g*lz*cos(a) + Tm) / I 13 | return [ad,add] 14 | 15 | state0 = [30*pi/180, 0] # 30 deg initial position and 0 deg/s initial velocity 16 | t = linspace(0,5,1001) # 0 to 5 seconds at 200 Hz 17 | Tm = 0.0 # muscle torque 18 | state = odeint(onejointmuscle, state0, t, args=(Tm,)) 19 | 20 | def animate_arm(state,t): 21 | l = 0.45 22 | figure() 23 | plot(0,0,'k.',markersize=10) 24 | plot((0,0),(0,.5),'k-',linewidth=2) 25 | plot((0,.5),(0,0),'k--') 26 | plot((-0.5,0.5,0.5,-0.5,-0.5),(-0.5,-0.5,0.5,0.5,-0.5),'k-',linewidth=0.5) 27 | p, = plot((0,l*cos(state[0,0])),(0,l*sin(state[0,0])),'b-') 28 | tt = title("%4.2f sec" % 0.00) 29 | xlim([-l-.05,l+.05]) 30 | ylim([-l-.05,l+.05]) 31 | axis('equal') 32 | step = 3 33 | tt = title("Click on this plot to continue...") 34 | ginput(1) 35 | for i in xrange(1,shape(state)[0]-10,step): 36 | p.set_xdata((0,l*cos(state[i,0]))) 37 | p.set_ydata((0,l*sin(state[i,0]))) 38 | tt.set_text("%4.2f sec" % (i*0.01)) 39 | draw() 40 | 41 | animate_arm(state,t) 42 | -------------------------------------------------------------------------------- /code/onejointmuscle_2.py: -------------------------------------------------------------------------------- 1 | # ipython --pylab 2 | 3 | from scipy.integrate import odeint 4 | 5 | def onejointmuscle(state,t,Tmf,Tme): 6 | m = 1.65 # kg 7 | g = -9.81 # m/s/s 8 | lz = 0.179 # metres 9 | I = 0.0779 # kg m**2 10 | a = state[0] 11 | ad = state[1] 12 | add = (m*g*lz*cos(a) + Tmf - Tme) / I 13 | return [ad,add] 14 | 15 | state0 = [30*pi/180, 0] # 30 deg initial position and 0 deg/s initial velocity 16 | t = linspace(0,5,1001) # 0 to 5 seconds at 200 Hz 17 | Tmf, Tme = 0.0, 0.0 # muscle torques from flexor and extensor muscles 18 | state = odeint(onejointmuscle, state0, t, args=(Tmf,Tme,)) 19 | 20 | def animate_arm(state,t): 21 | l = 0.45 22 | figure() 23 | plot(0,0,'k.',markersize=10) 24 | plot((0,0),(0,.5),'k-',linewidth=2) 25 | plot((0,.5),(0,0),'k--') 26 | plot((-0.5,0.5,0.5,-0.5,-0.5),(-0.5,-0.5,0.5,0.5,-0.5),'k-',linewidth=0.5) 27 | p, = plot((0,l*cos(state[0,0])),(0,l*sin(state[0,0])),'b-') 28 | tt = title("%4.2f sec" % 0.00) 29 | xlim([-l-.05,l+.05]) 30 | ylim([-l-.05,l+.05]) 31 | axis('equal') 32 | step = 3 33 | tt = title("Click on this plot to continue...") 34 | ginput(1) 35 | for i in xrange(1,shape(state)[0]-10,step): 36 | p.set_xdata((0,l*cos(state[i,0]))) 37 | p.set_ydata((0,l*sin(state[i,0]))) 38 | tt.set_text("%4.2f sec" % (i*0.01)) 39 | draw() 40 | 41 | animate_arm(state,t) 42 | -------------------------------------------------------------------------------- /code/onejointmuscle_3.py: -------------------------------------------------------------------------------- 1 | # ipython --pylab 2 | 3 | from scipy.integrate import odeint 4 | 5 | def onejointmuscle(state,t,a0): 6 | m = 1.65 # kg 7 | g = -9.81 # m/s/s 8 | lz = 0.179 # metres 9 | I = 0.0779 # kg m**2 10 | k = -10.0 # Nm/rad 11 | a = state[0] 12 | ad = state[1] 13 | Tmf = max(k*(a-a0),0) 14 | Tme = min(k*(a-a0),0) 15 | add = (m*g*lz*cos(a) + Tmf + Tme) / I 16 | return [ad,add] 17 | 18 | state0 = [30*pi/180, 0] # 30 deg initial position and 0 deg/s initial velocity 19 | t = linspace(0,5,1001) # 0 to 5 seconds at 200 Hz 20 | a0 = 30*pi/180 # rest angle for muscles 21 | state = odeint(onejointmuscle, state0, t, args=(a0,)) 22 | 23 | def animate_arm(state,t): 24 | l = 0.45 25 | figure() 26 | plot(0,0,'k.',markersize=10) 27 | plot((0,0),(0,.5),'k-',linewidth=2) 28 | plot((0,.5),(0,0),'k--') 29 | plot((-0.5,0.5,0.5,-0.5,-0.5),(-0.5,-0.5,0.5,0.5,-0.5),'k-',linewidth=0.5) 30 | p, = plot((0,l*cos(state[0,0])),(0,l*sin(state[0,0])),'b-') 31 | tt = title("%4.2f sec" % 0.00) 32 | xlim([-l-.05,l+.05]) 33 | ylim([-l-.05,l+.05]) 34 | axis('equal') 35 | step = 3 36 | tt = title("Click on this plot to continue...") 37 | ginput(1) 38 | for i in xrange(1,shape(state)[0]-10,step): 39 | p.set_xdata((0,l*cos(state[i,0]))) 40 | p.set_ydata((0,l*sin(state[i,0]))) 41 | tt.set_text("%4.2f sec" % (i*0.01)) 42 | draw() 43 | 44 | animate_arm(state,t) 45 | -------------------------------------------------------------------------------- /code/onejointmuscle_4.py: -------------------------------------------------------------------------------- 1 | # ipython --pylab 2 | 3 | from scipy.integrate import odeint 4 | 5 | def onejointmuscle(state,t,a0): 6 | m = 1.65 # kg 7 | g = -9.81 # m/s/s 8 | lz = 0.179 # metres 9 | I = 0.0779 # kg m**2 10 | k = -10.0 # Nm/rad 11 | b = 0.5 # Nms/rad 12 | a = state[0] 13 | ad = state[1] 14 | Tmf = max((k*(a-a0)) - (b*ad),0) 15 | Tme = min((k*(a-a0)) - (b*ad),0) 16 | add = (m*g*lz*cos(a) + Tmf + Tme) / I 17 | return [ad,add] 18 | 19 | state0 = [30*pi/180, 0] # 30 deg initial position and 0 deg/s initial velocity 20 | t = linspace(0,5,1001) # 0 to 5 seconds at 200 Hz 21 | a0 = 30*pi/180 # rest angle for muscles 22 | state = odeint(onejointmuscle, state0, t, args=(a0,)) 23 | 24 | def animate_arm(state,t): 25 | l = 0.45 26 | figure() 27 | plot(0,0,'k.',markersize=10) 28 | plot((0,0),(0,.5),'k-',linewidth=2) 29 | plot((0,.5),(0,0),'k--') 30 | plot((-0.5,0.5,0.5,-0.5,-0.5),(-0.5,-0.5,0.5,0.5,-0.5),'k-',linewidth=0.5) 31 | p, = plot((0,l*cos(state[0,0])),(0,l*sin(state[0,0])),'b-') 32 | tt = title("%4.2f sec" % 0.00) 33 | xlim([-l-.05,l+.05]) 34 | ylim([-l-.05,l+.05]) 35 | axis('equal') 36 | step = 3 37 | tt = title("Click on this plot to continue...") 38 | ginput(1) 39 | for i in xrange(1,shape(state)[0]-10,step): 40 | p.set_xdata((0,l*cos(state[i,0]))) 41 | p.set_ydata((0,l*sin(state[i,0]))) 42 | tt.set_text("%4.2f sec" % (i*0.01)) 43 | draw() 44 | 45 | animate_arm(state,t) 46 | -------------------------------------------------------------------------------- /code/optimizer_example.py: -------------------------------------------------------------------------------- 1 | from scipy.integrate import odeint 2 | 3 | # here's our error function 4 | # For a given input x, it returns the error as output 5 | def myFun(x): 6 | return (x-5.0)**2.0 + 3.0 7 | 8 | 9 | # here's a list of candidate values to try 10 | values = arange(-10, 20, 0.1) 11 | 12 | # initialize an array to store the results 13 | # fill it with zeros for now 14 | out = zeros(size(values)) 15 | 16 | # loop over the candidate inputs, computing error 17 | # store each one in our out array 18 | for i in arange(0,size(values),1): 19 | out[i] = myFun(values[i]) 20 | 21 | # plot the relationship between input and error 22 | plot(values, out) 23 | xlabel('INPUT VALUES') 24 | ylabel('OUTPUT VALUES') 25 | 26 | # now let's use an optimizer to find the best input automatically 27 | # it will find the input that minimizes the error function 28 | 29 | from scipy import optimize 30 | 31 | best_in = optimize.fminbound(myFun, -10.0, 10.0) 32 | 33 | ################################### 34 | 35 | # here is how you might structure your assignment, question #4 36 | 37 | # your ode function for baseball simulation 38 | def Baseball(state, t): 39 | # blha blah blah 40 | state_d = ... 41 | return state_d 42 | 43 | # your error function that relates initial state [x0,y0] to error 44 | def myErrFun(state0): 45 | t = ... 46 | state = odeint(Baseball, state0, t) 47 | xpos_at_y0 = .... 48 | error = xpos_at_y0 - 100.0 49 | return error 50 | 51 | state0_guess = [...] 52 | best_state0 = optimize.fmin(myErrFun, state0_guess) 53 | 54 | 55 | -------------------------------------------------------------------------------- /code/som1.m: -------------------------------------------------------------------------------- 1 | clear; clf; 2 | 3 | % Self-organizing feature map in two dimensions 4 | n_out= 20; % number of nodes in the output layer 5 | k=.01; % learning rate 6 | sig=1.5; % width of neighborhood function 7 | sig_inv=1/(2*sig^2); 8 | [I,J] = meshgrid(1:n_out, 1:n_out); 9 | 10 | r=zeros(n_out); 11 | 12 | % design some random non-uniform clusters of data 13 | R = [randn(2,100)*.08 + .3, randn(2,100)*.1 + .6, ... 14 | randn(2,100)*.05 + repmat([.2; .7],1,100)]; 15 | plot(R(1,:),R(2,:),'r.'); 16 | axis([-.1 1.1 -.1 1.1]); 17 | hold on 18 | 19 | % initialize weights on a regular grid 20 | for i=1:n_out 21 | for j=1:n_out 22 | w1(i,j)=(i/n_out)*1; 23 | w2(i,j)=(j/n_out)*1; 24 | end; 25 | end; 26 | 27 | hold on; 28 | wp1=plot(w1,w2,'k'); 29 | wp2=plot(w1',w2','k'); 30 | axis([-.1 1.1 -.1 1.1]); xlabel('w1'); ylabel('w2'); 31 | drawnow 32 | pause 33 | 34 | % iterate and adjust weights 35 | for epochs=1:500; 36 | for i_example=1:size(R,2) 37 | r_in=R(:,i_example); 38 | % calculate winner 39 | r=exp(-(w1-r_in(1)).^2-(w2-r_in(2)).^2); 40 | [rmax,i_winner]=max(max(r)); 41 | [rmax,j_winner]=max(max(r')); 42 | % update weight vectors using neighborhood function (sig_inv) 43 | r=exp(-((I-i_winner).^2+(J-j_winner).^2)*sig_inv); 44 | w1=w1+k*r.*(r_in(1)-w1); 45 | w2=w2+k*r.*(r_in(2)-w2); 46 | end; 47 | delete(wp1); delete(wp2); 48 | wp1=plot(w1,w2,'k'); 49 | wp2=plot(w1',w2','k'); 50 | axis([-.1 1.1 -.1 1.1]); 51 | title(num2str(epochs)); 52 | drawnow 53 | pause(0.05); 54 | end; 55 | 56 | -------------------------------------------------------------------------------- /code/traindata.pickle: -------------------------------------------------------------------------------- 1 | (dp0 2 | S'inputs' 3 | p1 4 | cnumpy.core.multiarray 5 | _reconstruct 6 | p2 7 | (cnumpy.matrixlib.defmatrix 8 | matrix 9 | p3 10 | (I0 11 | tp4 12 | S'b' 13 | p5 14 | tp6 15 | Rp7 16 | (I1 17 | (I100 18 | I2 19 | tp8 20 | cnumpy 21 | dtype 22 | p9 23 | (S'f8' 24 | p10 25 | I0 26 | I1 27 | tp11 28 | Rp12 29 | (I3 30 | S'<' 31 | p13 32 | NNNI-1 33 | I-1 34 | I0 35 | tp14 36 | bI00 37 | S'J\x9c\xbcq+\xb2\x15\xc0Y\xa2\xf8\x92\xda9\xf2?u\xee\xd7\x9f\xbe\xf4\xa6?k@\xf2&?\xf4\xe1?brsH\xed\xff\xf1\xbfT\x8c\xec\x1a.\x80\xfe\xbfk\xc3G;?\xa8\x15\xc0\xfa\xdb\x04\x00L\xf6\xd8?f\xfc\xe1\xe6\xd96\x11\xc0\x1c\xca3\xd4ed\xf0\xbf\xeac\x88\xb7\x08\x1f\xf1?\xab\xeb|n,_\xe5\xbf\xc7\xe12\xd4\xd0\xfc?4\xf3\xaf\x1eq\xa0*@!t\xbcX9I\x00\xc0\rs5;.s\x1b@\x1a\x98lc\x7f\xae\xfc?\xe3\xbf\xb0\xd8\xf9C%@&\xc6\xe4ia-\x04@\x10>e=N\x0c#@\xbdR\xef?2\xc6\xbf\xbf\xeeljz\x01\x98"@)\x89\xfa\xbc\xc4\xf6\xf9?\xf2\xb5\xce\xabU\xf7\x1c@2\x0en\x1dp\x88\x15\xc0&\x9a\x9590\xcd+@\xec\xf2\xea\xe2\xfaV\xfd?-\xd2\xf6B\x8e4$@\xf4=\xa1"\xbf\xa0\x15\xc0\x12\xce\x94\x946\xf6\x1e@1\xcd@\xc9A1\xe3?\xa1\x87\x8d\xbb\xa6\xc3 @4K\xc1yC\x96\x0b@\xf6(\xcc_R\xcc)@(,|R\xac\xad\xd8\xbfP\xb9f;\xa3\xdc$@G7\xf6\xbbo\x10\t@\xfc\xb7]\xebb\xd5\x14@(\xb3\xeb\xcb\x87\x9a\x1a@\x07\xea\x9e~/]%@{%\x8bs\xe5p\xf9?\x99%B2KD(@\x0c\xb0\x10\xa3\x07\x93\xe8?r\xe2Q\xe2\x01\xee,@\xf4\xa6lP\xaa\x18\x0b@\x9ab\xed\x0e\t\xae @\xa1\xe3%\x120\x1f\x08@\xd7f\xfa,\xa2\xf9-@\xed\xcc\x98GC\xdf\xfe\xbf\xe6\xd5\x13\x0b\x08\x03!@\xc4\xf6L\xe8\x90\x8a\xd3?v\x08\xf8\x1cC\x1d*@\x14\xday\xac\xc1\r\x07\xc0n\x93\xf39\xb4\xe7\x1d@W:\xc5\x02@\x0c\xfb?\x93t\x9c\xe6\x1dY @\xbcpv\x80\x95P\xce\xbf\x8c\x12=\xf9+\x96\x19@W\xf1\xfaZ\x0bX\xec\xbf8Z\x03\xb3T\x05\xff\xbfZ\xbb[\x9b\xa2{\x1d@\x8en\xbf_G\xd1\xdc?\xb5\x02\x1d\xe5^\xf1\x1d@\x86\x1f(\xb9\xee\xcc\x15@\xaa\xd7u\x87"G\x1a@z\xa3\x8a!\xfa\xce\xe4?\xe4S\xb0i\xben"@@\x9e:\xed\xef\x14\xd0\xbf\xcc\xff\xcb\xd7\x07\xda(@T\xcd@\xe0\xe3.\xcd\xbf\xcb\xdf\x03t^\x7f @\xd3\xc1\xfcy\xebH\xe4?\x0f\x15|gP\x03\x1f@\x94\x1f\x01\xdc\xb6\xa9\xf0\xbf]\xab\xde\x9f\xe9\x80\'@\x93\x1b\xde\xff6-\x02\xc0\x91:tTx(\'@\xfbp\x8a\x83\xf9Q\xe9\xbf\xec\x1d\x7f{\xd9\x91*@1b\xad\xff\xf3`\x05@\x9c\x17\xa68\xa1D$@\x11\xb1t\xdao\xcc\xf5\xbf%\xb5|\x1e#y"@\x8a>^\xc3\xf2\x1b\x0c@\xdf\xd8\xeb\x81\xdf\x14.@Y\x8c\xbeV\x12\xca\x13\xc0\xa1/>\x19v\xd0$@\xbe\xc7\x91z\xb3\x9a\x05\xc0\xdb`\xb8\x19<\x8b\x1f@\x192\xfd\x0e\x83f\xd1\xbf`Z2T\xe0\xb8"@\xca\xa6\xa5\xf6BS\xfd?\xa5\x17\x16\xfa@\x8e"@\xa9\xaa\x04\xaa\x93\x93\x0b@\xc9\xfa\x8b\xfcE\x81)@,;\x8a2B\r\xfd?\x97\x89D\xcb9\r&@\x1bF(s\x8d\x94\x1a\xc0\x13|b\x9bQ\x9b\x1f@2\xd5\x90c\xa6\xc9\x15\xc0`\xffT\t\xb9g#@\xd5Y\xaeGm\xe7\xf2?\x9a\\V\x1ei\x0b\'@\x11\xce\xc8\xdb6E\x07\xc06[\x82\xe7\x16\xa2%@\xef\x8c#\xa8+\x81\xf2?\x9b.\x7f\xed\xa0\x94$@\xd0+/+j\x1a\x1e\xc0B"ddo\x0f&@\xf9_\xfd\x11\xa0M,@=\xe5\xff\xeb?9!@\x99\xca\xa7\xc4\xa5*#@\xbf0\xb5\xd5|m\x1a@\xc0\x1b\x19\xc8\x1cD(@]\x04\x1fu\xb2\xc6\'@\x04\xcc\xe7\x85\xeaP%@(\t/1\x17o0@\xe8\x8d\x96\x1bC\xbd\x07@t\x89\xec\xcb\x96\xc5"@F\x96j\xd6:\xe4"@\xab\xfeQa\x86\xea,@\xc4WQ\x9alI-@\xb8\x9c\nV\xadu\x07@v\xa5vPfH(@2\t7\x19;\x16#@\xea\x7f\xe6$\xb7\xb8\x1c@\xfb\x8dN\x16G\xa6$@z\x1a\xb9s\x95\xa8\x15@\xfb\x1f\xaf\x07\xdb\x10$@E)\xf7\xd82\xc4\x10@\xed|\xdei\xd5?\'@}\xc0Gp \xa7%@\xa6i0W>\x14$@\xce\xb02\x87\x80\x86$@2\x03\xdf\xeeHc @\xe4\xd3\xcd\x92\x8d\x04\x19@(SV\xd3g\xa7\x1b@\xc2\xc2\x05sR\xee"@\x9c\x06\x9d\x87\x81\xbe\x1f@\x93\xa2s\xd5\x84\xdc @\xb4\t\xd58>0\x0f@>\xd9\xdc\xee\x917(@\xe6v|\xa7\xb4.\x13@\xca\x93\x10\xd1QE\x19@\xa4\x0b3\xc0\xc1d+@\x0b\x1a\xa4(\xb84"@r\xaf\x84N\xf7]\x1b@V\xd0Lb6y(@P&\xc7\xe1\xbf\x9b\x0f@\xf0&\x98\x86\xb2[%@\xab\xba\x19\xf4qS!@\xec$9N\x02K)@\xaf\xbc\xce\xed\x14\x02&@b\xd9OP\xc2\xaf\x18@b\xdf9\xd4\x8c\x19)@\xda\x10+i\xa5\xa3\x1c@\xc2\xe5\x9b\rC$ @\xb2\xe5\x80h\xdez1@j!\xe18d("@' 38 | p15 39 | tp16 40 | bsS'outputs' 41 | p17 42 | g2 43 | (g3 44 | (I0 45 | tp18 46 | g5 47 | tp19 48 | Rp20 49 | (I1 50 | (I100 51 | I1 52 | tp21 53 | g9 54 | (S'i8' 55 | p22 56 | I0 57 | I1 58 | tp23 59 | Rp24 60 | (I3 61 | S'<' 62 | p25 63 | NNNI-1 64 | I-1 65 | I0 66 | tp26 67 | bI00 68 | S'\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00' 69 | p27 70 | tp28 71 | bs. -------------------------------------------------------------------------------- /code/twojointarm.py: -------------------------------------------------------------------------------- 1 | # ipython --pylab 2 | 3 | # two joint arm in a horizontal plane, no gravity 4 | 5 | # compute a min-jerk trajectory 6 | def minjerk(H1,H2,t,n): 7 | """ 8 | Given hand initial position H1=(x1,y1), final position H2=(x2,y2) and movement duration t, 9 | and the total number of desired sampled points n, 10 | Calculates the hand path H over time T that satisfies minimum-jerk. 11 | Also returns derivatives Hd and Hdd 12 | 13 | Flash, Tamar, and Neville Hogan. "The coordination of arm 14 | movements: an experimentally confirmed mathematical model." The 15 | journal of Neuroscience 5, no. 7 (1985): 1688-1703. 16 | """ 17 | T = linspace(0,t,n) 18 | H = zeros((n,2)) 19 | Hd = zeros((n,2)) 20 | Hdd = zeros((n,2)) 21 | for i in range(n): 22 | tau = T[i]/t 23 | H[i,0] = H1[0] + ((H1[0]-H2[0])*(15*(tau**4) - (6*tau**5) - (10*tau**3))) 24 | H[i,1] = H1[1] + ((H1[1]-H2[1])*(15*(tau**4) - (6*tau**5) - (10*tau**3))) 25 | Hd[i,0] = (H1[0] - H2[0])*(-30*T[i]**4/t**5 + 60*T[i]**3/t**4 - 30*T[i]**2/t**3) 26 | Hd[i,1] = (H1[1] - H2[1])*(-30*T[i]**4/t**5 + 60*T[i]**3/t**4 - 30*T[i]**2/t**3) 27 | Hdd[i,0] = (H1[0] - H2[0])*(-120*T[i]**3/t**5 + 180*T[i]**2/t**4 - 60*T[i]/t**3) 28 | Hdd[i,1] = (H1[1] - H2[1])*(-120*T[i]**3/t**5 + 180*T[i]**2/t**4 - 60*T[i]/t**3) 29 | return T,H,Hd,Hdd 30 | 31 | # forward kinematics 32 | def joints_to_hand(A,aparams): 33 | """ 34 | Given joint angles A=(a1,a2) and anthropometric params aparams, 35 | returns hand position H=(hx,hy) and elbow position E=(ex,ey) 36 | Note: A must be type matrix 37 | """ 38 | l1 = aparams['l1'] 39 | l2 = aparams['l2'] 40 | n = shape(A)[0] 41 | E = zeros((n,2)) 42 | H = zeros((n,2)) 43 | for i in range(n): 44 | E[i,0] = l1 * cos(A[i,0]) 45 | E[i,1] = l1 * sin(A[i,0]) 46 | H[i,0] = E[i,0] + (l2 * cos(A[i,0]+A[i,1])) 47 | H[i,1] = E[i,1] + (l2 * sin(A[i,0]+A[i,1])) 48 | return H,E 49 | 50 | # inverse kinematics 51 | def hand_to_joints(H,aparams): 52 | """ 53 | Given hand position H=(hx,hy) and anthropometric params aparams, 54 | returns joint angles A=(a1,a2) 55 | Note: H must be type matrix 56 | """ 57 | l1 = aparams['l1'] 58 | l2 = aparams['l2'] 59 | n = shape(H)[0] 60 | A = zeros((n,2)) 61 | for i in range(n): 62 | A[i,1] = arccos(((H[i,0]*H[i,0])+(H[i,1]*H[i,1])-(l1*l1)-(l2*l2))/(2.0*l1*l2)) 63 | A[i,0] = arctan(H[i,1]/H[i,0]) - arctan((l2*sin(A[i,1]))/(l1+(l2*cos(A[i,1])))) 64 | if A[i,0] < 0: 65 | A[i,0] = A[i,0] + pi 66 | elif A[i,0] > pi: 67 | A[i,0] = A[i,0] - pi 68 | return A 69 | 70 | # jacobian matrix J(q) = dx/da 71 | def jacobian(A,aparams): 72 | """ 73 | Given joint angles A=(a1,a2) 74 | returns the Jacobian matrix J(q) = dx/dA 75 | """ 76 | l1 = aparams['l1'] 77 | l2 = aparams['l2'] 78 | dx1dA1 = -l1*sin(A[0]) - l2*sin(A[0]+A[1]) 79 | dx1dA2 = -l2*sin(A[0]+A[1]) 80 | dx2dA1 = l1*cos(A[0]) + l2*cos(A[0]+A[1]) 81 | dx2dA2 = l2*cos(A[0]+A[1]) 82 | J = matrix([[dx1dA1,dx1dA2],[dx2dA1,dx2dA2]]) 83 | return J 84 | 85 | # jacobian matrix Jd(q) 86 | def jacobiand(A,Ad,aparams): 87 | """ 88 | Given joint angles A=(a1,a2) and velocities Ad=(a1d,a2d) 89 | returns the time derivative of the Jacobian matrix d/dt (J) 90 | """ 91 | l1 = aparams['l1'] 92 | l2 = aparams['l2'] 93 | Jd11 = -l1*cos(A[0])*Ad[0] - l2*(Ad[0] + Ad[1])*cos(A[0] + A[1]) 94 | Jd12 = -l2*(Ad[0] + Ad[1])*cos(A[0] + A[1]) 95 | Jd21 = -l1*sin(A[0])*Ad[0] - l2*(Ad[0] + Ad[1])*sin(A[0] + A[1]) 96 | Jd22 = -l2*(Ad[0] + Ad[1])*sin(A[0] + A[1]) 97 | Jd = matrix([[Jd11, Jd12],[Jd21, Jd22]]) 98 | return Jd 99 | 100 | # utility function for interpolating torque inputs 101 | def getTorque(TorquesIN, TorquesTIME, ti): 102 | """ 103 | Given a desired torque command (TorquesIN) defined over a time vector (TorquesTIME), 104 | returns an interpolated torque command at an intermediate time point ti 105 | Note: TorquesIN and TorquesTIME must be type matrix 106 | """ 107 | t1 = interp(ti, TorquesTIME, TorquesIN[:,0]) 108 | t2 = interp(ti, TorquesTIME, TorquesIN[:,1]) 109 | return matrix([[t1],[t2]]) 110 | 111 | # utility function for computing some limb dynamics terms 112 | def compute_dynamics_terms(A,Ad,aparams): 113 | """ 114 | Given a desired set of joint angles A=(a1,a2) and joint velocities Ad=(a1d,a2d), 115 | returns M and C matrices associated with inertial and centrifugal/coriolis terms 116 | """ 117 | a1,a2,a1d,a2d = A[0],A[1],Ad[0],Ad[1] 118 | l1,l2 = aparams['l1'], aparams['l2'] 119 | m1,m2 = aparams['m1'], aparams['m2'] 120 | i1,i2 = aparams['i1'], aparams['i2'] 121 | r1,r2 = aparams['r1'], aparams['r2'] 122 | M11 = i1 + i2 + (m1*r1*r1) + (m2*((l1*l1) + (r2*r2) + (2*l1*r2*cos(a2)))) 123 | M12 = i2 + (m2*((r2*r2) + (l1*r2*cos(a2)))) 124 | M21 = M12 125 | M22 = i2 + (m2*r2*r2) 126 | M = matrix([[M11,M12],[M21,M22]]) 127 | C1 = -(m2*l1*a2d*a2d*r2*sin(a2)) - (2*m2*l1*a1d*a2d*r2*sin(a2)) 128 | C2 = m2*l1*a1d*a1d*r2*sin(a2) 129 | C = matrix([[C1],[C2]]) 130 | return M,C 131 | 132 | # inverse dynamics 133 | def inverse_dynamics(A,Ad,Add,aparams): 134 | """ 135 | inverse dynamics of a two-link planar arm 136 | Given joint angles A=(a1,a2), velocities Ad=(a1d,a2d) and accelerations Add=(a1dd,a2dd), 137 | returns joint torques Q required to generate that movement 138 | Note: A, Ad and Add must be type matrix 139 | """ 140 | n = shape(A)[0] 141 | T = zeros((n,2)) 142 | for i in range(n): 143 | M,C = compute_dynamics_terms(A[i,:],Ad[i,:],aparams) 144 | ACC = matrix([[Add[i,0]],[Add[i,1]]]) 145 | Qi = M*ACC + C 146 | T[i,0],T[i,1] = Qi[0,0],Qi[1,0] 147 | return T 148 | 149 | # forward dynamics 150 | def forward_dynamics(state, t, aparams, TorquesIN, TorquesTIME): 151 | """ 152 | forward dynamics of a two-link planar arm 153 | note: TorquesIN and TorquesTIME must be type matrix 154 | """ 155 | a1, a2, a1d, a2d = state # unpack the four state variables 156 | Q = getTorque(TorquesIN, TorquesTIME, t) 157 | M,C = compute_dynamics_terms(state[0:2],state[2:4],aparams) 158 | # Q = M*ACC + C 159 | ACC = inv(M) * (Q-C) 160 | return [a1d, a2d, ACC[0,0], ACC[1,0]] 161 | 162 | # Utility function to return hand+joint kinematics for 163 | # a min-jerk trajectory between H1 and H2 in movtime with 164 | # time padding padtime at beginning and end of movement 165 | def get_min_jerk_movement(H1,H2,movtime,padtime=0.2): 166 | # create a desired min-jerk hand trajectory 167 | t,H,Hd,Hdd = minjerk(H1,H2,movtime,100) 168 | # pad it with some hold time on each end 169 | t = append(append(0.0, t+padtime), t[-1]+padtime+padtime) 170 | H = vstack((H[0,:],H,H[-1,:])) 171 | Hd = vstack((Hd[0,:],Hd,Hd[-1,:])) 172 | Hdd = vstack((Hdd[0,:],Hdd,Hdd[-1,:])) 173 | # interpolate to get equal spacing over time 174 | ti = linspace(t[0],t[-1],100) 175 | hxi = interp(ti, t, H[:,0]) 176 | hyi = interp(ti, t, H[:,1]) 177 | H = zeros((len(ti),2)) 178 | H[:,0],H[:,1] = hxi,hyi 179 | hxdi = interp(ti, t, Hd[:,0]) 180 | hydi = interp(ti, t, Hd[:,1]) 181 | Hd = zeros((len(ti),2)) 182 | Hd[:,0],Hd[:,1] = hxdi,hydi 183 | hxddi = interp(ti, t, Hdd[:,0]) 184 | hyddi = interp(ti, t, Hdd[:,1]) 185 | Hdd = zeros((len(ti),2)) 186 | Hdd[:,0],Hdd[:,1] = hxddi,hyddi 187 | t = ti 188 | A = zeros((len(t),2)) 189 | Ad = zeros((len(t),2)) 190 | Add = zeros((len(t),2)) 191 | # use inverse kinematics to compute desired joint angles 192 | A = hand_to_joints(H,aparams) 193 | # use jacobian to transform hand vels & accels to joint vels & accels 194 | for i in range(len(t)): 195 | J = jacobian(A[i,:],aparams) 196 | Ad[i,:] = transpose(inv(J) * matrix([[Hd[i,0]],[Hd[i,1]]])) 197 | Jd = jacobiand(A[i,:],Ad[i,:],aparams) 198 | b = matrix([[Hdd[i,0]],[Hdd[i,1]]]) - Jd*matrix([[Ad[i,0]],[Ad[i,1]]]) 199 | Add[i,:] = transpose(inv(J) * b) 200 | return t,H,A,Ad,Add 201 | 202 | # utility function to plot a trajectory 203 | def plot_trajectory(t,H,A): 204 | """ 205 | Note: H and A must be of type matrix 206 | """ 207 | hx,hy = H[:,0],H[:,1] 208 | a1,a2 = A[:,0],A[:,1] 209 | figure() 210 | subplot(2,2,1) 211 | plot(t,hx,t,hy) 212 | ylim(min(min(hx),min(hy))-0.03, max(max(hx),max(hy))+0.03) 213 | xlabel('TIME (sec)') 214 | ylabel('HAND POS (m)') 215 | legend(('Hx','Hy')) 216 | subplot(2,2,2) 217 | plot(hx,hy,'.') 218 | axis('equal') 219 | plot(hx[0],hy[0],'go',markersize=8) 220 | plot(hx[-1],hy[-1],'ro',markersize=8) 221 | xlabel('HAND X POS (m)') 222 | ylabel('HAND Y POS (m)') 223 | subplot(2,2,3) 224 | plot(t,a1*180/pi,t,a2*180/pi) 225 | ylim(min(min(a1),min(a1))*180/pi - 5, max(max(a2),max(a2))*180/pi + 5) 226 | xlabel('TIME (sec)') 227 | ylabel('JOINT ANGLE (deg)') 228 | legend(('a1','a2')) 229 | subplot(2,2,4) 230 | plot(a1*180/pi,a2*180/pi,'.') 231 | plot(a1[0]*180/pi,a2[0]*180/pi,'go',markersize=8) 232 | plot(a1[-1]*180/pi,a2[-1]*180/pi,'ro',markersize=8) 233 | axis('equal') 234 | xlabel('SHOULDER ANGLE (deg)') 235 | ylabel('ELBOW ANGLE (deg)') 236 | 237 | def animatearm(state,t,aparams,step=3,crumbs=0): 238 | """ 239 | animate the twojointarm 240 | """ 241 | A = state[:,[0,1]] 242 | A[:,0] = A[:,0] 243 | H,E = joints_to_hand(A,aparams) 244 | l1,l2 = aparams['l1'], aparams['l2'] 245 | figure() 246 | plot(0,0,'b.') 247 | p1, = plot(E[0,0],E[0,1],'b.') 248 | p2, = plot(H[0,0],H[0,1],'b.') 249 | p3, = plot((0,E[0,0],H[0,0]),(0,E[0,1],H[0,1]),'b-') 250 | xlim([-l1-l2, l1+l2]) 251 | ylim([-l1-l2, l1+l2]) 252 | dt = t[1]-t[0] 253 | tt = title("Click on this plot to continue...") 254 | ginput(1) 255 | for i in xrange(0,shape(state)[0]-step,step): 256 | p1.set_xdata((E[i,0])) 257 | p1.set_ydata((E[i,1])) 258 | p2.set_xdata((H[i,0])) 259 | p2.set_ydata((H[i,1])) 260 | p3.set_xdata((0,E[i,0],H[i,0])) 261 | p3.set_ydata((0,E[i,1],H[i,1])) 262 | if crumbs==1: 263 | plot(H[i,0],H[i,1],'b.') 264 | tt.set_text("%4.2f sec" % (i*dt)) 265 | draw() 266 | 267 | 268 | ############################################################################## 269 | ############################# THE FUN PART ################################# 270 | ############################################################################## 271 | 272 | # anthropometric parameters of the arm 273 | aparams = { 274 | 'l1' : 0.3384, # metres 275 | 'l2' : 0.4554, 276 | 'r1' : 0.1692, 277 | 'r2' : 0.2277, 278 | 'm1' : 2.10, # kg 279 | 'm2' : 1.65, 280 | 'i1' : 0.025, # kg*m*m 281 | 'i2' : 0.075, 282 | } 283 | 284 | # Get a desired trajectory between two arm positions defined by 285 | # a min-jerk trajectory in Hand-space 286 | 287 | H1 = [-0.2, 0.4] # hand initial position 288 | H2 = [-0.2, 0.6] # hand final target 289 | mt = 0.500 # 500 milliseconds movement time 290 | 291 | # get min-jerk desired kinematic trajectory 292 | 293 | t,H,A,Ad,Add = get_min_jerk_movement(H1,H2,mt) 294 | plot_trajectory(t,H,A) 295 | 296 | # now compute required joint torques using inverse dynamics equations of motion 297 | 298 | TorquesIN = inverse_dynamics(A,Ad,Add,aparams) 299 | figure() 300 | plot(t,TorquesIN) 301 | legend(('torque1','torque2')) 302 | 303 | # now do a forward simulation using forward dynamics equations of motion 304 | # just to demonstrate that indeed the TorquesIN do in fact generate 305 | # the desired arm movement 306 | 307 | from scipy.integrate import odeint 308 | from scipy.interpolate import interp1d 309 | 310 | state0 = [A[0,0], A[0,1], Ad[0,0], Ad[0,1]] 311 | tt = linspace(t[0],t[-1],100) 312 | state = odeint(forward_dynamics, state0, tt, args=(aparams, TorquesIN, t,)) 313 | 314 | # run through forward kinematics equations to get hand trajectory and plot 315 | 316 | Hsim,Esim = joints_to_hand(state,aparams) 317 | plot_trajectory(tt,Hsim,state[:,[0,1]]) 318 | 319 | animatearm(state,tt,aparams) 320 | 321 | 322 | 323 | 324 | 325 | 326 | 327 | 328 | 329 | 330 | 331 | 332 | 333 | 334 | 335 | 336 | 337 | 338 | 339 | 340 | 341 | 342 | -------------------------------------------------------------------------------- /code/twojointarm_game.py: -------------------------------------------------------------------------------- 1 | # Paul Gribble 2 | # paul [at] gribblelab [dot] org 3 | # Oct 31, 2012 4 | 5 | import pygame 6 | import numpy 7 | import math 8 | 9 | numpy.random.seed(10) 10 | 11 | showarm = True 12 | 13 | # Define some colors 14 | black = ( 0, 0, 0) 15 | white = ( 255, 255, 255) 16 | green = ( 0, 255, 0) 17 | red = ( 255, 0, 0) 18 | 19 | targetsize = 30 # radius 20 | ballsize = 5 # radius 21 | 22 | # Function for computing distance to target 23 | def dist_to_target(xh,hy,tx,ty): 24 | return math.sqrt((hx-(tx+targetsize))**2 + (hy-(ty+targetsize))**2) 25 | 26 | # Function for displaying text 27 | def printText(txtText, Textfont, Textsize , Textx, Texty, Textcolor): 28 | # pick a font you have and set its size 29 | myfont = pygame.font.SysFont(Textfont, Textsize) 30 | # apply it to text on a label 31 | label = myfont.render(txtText, 1, Textcolor) 32 | # put the label object on the screen at point Textx, Texty 33 | screen.blit(label, (Textx, Texty)) 34 | 35 | # Function for computing hand pos from joint angles 36 | def joint_to_xy(sx,sy,s,e,l1,l2,ppm): 37 | exi = sx + (l1*ppm)*math.cos(s) 38 | eyi = sy - (l1*ppm)*math.sin(s) 39 | hxi = exi + (l2*ppm)*math.cos(s+e) 40 | hyi = eyi - (l2*ppm)*math.sin(s+e) 41 | return exi,eyi,hxi,hyi 42 | 43 | # Function to draw arm 44 | def draw_arm(screen,sx,sy,ex,ey,hx,hy,l1,l2): 45 | pygame.draw.lines(screen,black,False,[(sx,sy),(ex,ey),(hx,hy)],1) 46 | 47 | # Function to get a new random target 48 | def getnewtarget(sx,sy,l1,l2,ppm): 49 | slim = [15*math.pi/180, 90*math.pi/180] 50 | elim = [45*math.pi/180, 90*math.pi/180] 51 | stgt = (numpy.random.random() * (slim[1]-slim[0])) + slim[0] 52 | etgt = (numpy.random.random() * (elim[1]-elim[0])) + elim[0] 53 | ex,ey,tx,ty = joint_to_xy(sx,sy,stgt,etgt,l1,l2,ppm) 54 | return tx,ty 55 | 56 | # Setup 57 | pygame.init() 58 | 59 | # Set the width and height of the screen [width,height] 60 | ssize=[800,800] 61 | screen=pygame.display.set_mode(ssize) 62 | 63 | pygame.display.set_caption("hit the red targets with the black dot") 64 | 65 | #Loop until the user clicks the close button. 66 | done=False 67 | 68 | # Used to manage how fast the screen updates 69 | clock=pygame.time.Clock() 70 | 71 | # Hide the mouse cursor 72 | pygame.mouse.set_visible(0) 73 | 74 | # time increment (frames per second) 75 | dt = 30.0 76 | dti = 1.0/dt 77 | 78 | # arm geometry 79 | sx = ssize[0] / 2 80 | sy = ssize[1] - (ssize[1]/4) 81 | l1 = 0.34 # metres 82 | l2 = 0.46 # metres 83 | ppm = 500 # pixels per metre 84 | m1 = 2.1 # kg 85 | m2 = 1.65 # kg 86 | i1 = 0.025 87 | i2 = 0.075 88 | # keypress torque [Nm/s * s/frame = Nm/frame] 89 | fsf = 0.10 90 | fse = 0.10 91 | fef = 0.10 92 | fee = 0.10 93 | sfmax = 0.5 94 | semax = 0.5 95 | efmax = 0.5 96 | eemax = 0.5 97 | fleak = 0.90 # "leakage" of muscle force 98 | 99 | maxvel = 3.0 100 | maxacc = 30.0 101 | 102 | # initial position (radians) 103 | s0 = 45*math.pi/180 104 | e0 = 90*math.pi/180 105 | s=s0 106 | e=e0 107 | 108 | # initial vel (rad/s) 109 | sd=0.0 110 | ed=0.0 111 | 112 | # initial acc (rad/s/s) 113 | sdd=0.0 114 | edd=0.0 115 | 116 | # initial stim values 117 | ksf = 0.0 118 | kse = 0.0 119 | kef = 0.0 120 | kee = 0.0 121 | 122 | # initial muscle forces 123 | sf = 0.0 124 | se = 0.0 125 | ef = 0.0 126 | ee = 0.0 127 | 128 | # map muscle forces [sf,se,ef,ee] to joint torques [st,et] 129 | M = numpy.matrix([[1,0],[-1,0],[0,1],[0,-1]]) 130 | 131 | ttotal = 0.0 132 | score = 0 133 | timelimit = 60.0 134 | 135 | # target 136 | tx,ty = getnewtarget(sx,sy,l1,l2,ppm) 137 | 138 | # -------- Main Program Loop ----------- 139 | while done==False: 140 | if ttotal >= timelimit: 141 | done=True 142 | # ALL EVENT PROCESSING SHOULD GO BELOW THIS COMMENT 143 | for event in pygame.event.get(): # User did something 144 | if event.type == pygame.QUIT: # If user clicked close 145 | done=True # Flag that we are done so we exit this loop 146 | # User pressed down on a key 147 | 148 | if event.type == pygame.KEYDOWN: 149 | # Figure out if it was an arrow key. If so 150 | # adjust speed. 151 | if event.key == pygame.K_d: 152 | ksf = fsf 153 | if event.key == pygame.K_f: 154 | kse = fse 155 | if event.key == pygame.K_j: 156 | kef = fef 157 | if event.key == pygame.K_k: 158 | kee = fee 159 | if event.key == pygame.K_SPACE: 160 | s = s0 161 | e = e0 162 | sd = 0 163 | ed = 0 164 | sdd = 0 165 | edd = 0 166 | sf,se,ef,ee = 0,0,0,0 167 | score = score-1 168 | if (score<0): 169 | score = 0 170 | if event.key == pygame.K_TAB: 171 | showarm = not showarm 172 | 173 | # User let up on a key 174 | if event.type == pygame.KEYUP: 175 | # If it is an arrow key, reset vector back to zero 176 | if event.key == pygame.K_d: 177 | ksf = 0.0 178 | if event.key == pygame.K_f: 179 | kse = 0.0 180 | if event.key == pygame.K_j: 181 | kef = 0.0 182 | if event.key == pygame.K_k: 183 | kee = 0.0 184 | 185 | # ALL EVENT PROCESSING SHOULD GO ABOVE THIS COMMENT 186 | 187 | # ALL GAME LOGIC SHOULD GO BELOW THIS COMMENT 188 | 189 | # convert stim (keypresses) into muscle torque 190 | sf = min(max((sf + ksf) - (fsf*fleak), 0), sfmax) 191 | se = min(max((se + kse) - (fse*fleak), 0), semax) 192 | ef = min(max((ef + kef) - (fef*fleak), 0), efmax) 193 | ee = min(max((ee + kee) - (fee*fleak), 0), eemax) 194 | 195 | # convert muscle torque into shoulder and elbow torques 196 | muscles = numpy.matrix([sf,se,ef,ee]) 197 | torque = muscles * M 198 | 199 | # print some values 200 | # print '-------------------' 201 | # print ksf, kse, kef, kee 202 | # print round(sf,2),round(se,2),round(ef,2),round(ee,2) 203 | 204 | # forward dynamics: convert joint torques into joint accelerations 205 | tmp1 = m2*l1*l2*math.cos(e); 206 | tmp2 = m2*l2*l2; 207 | tmp3 = m2*l1*l2*math.sin(e); 208 | tmp4 = (m2*(l2**2))/4.0; 209 | A = i1+i2+tmp1+(((m1*l1*l1)+tmp2)/4.0)+(m2*l1*l1); 210 | B = i2+(tmp2/4.0)+(tmp1/2.0); 211 | C = tmp3*(ed**2)/2.0; 212 | D = tmp3*sd*ed; 213 | E = i2 + (tmp1/2.0)+tmp4; 214 | F = i2 + tmp4; 215 | G = (tmp3*(sd**2))/2.0; 216 | I = numpy.matrix([[A,B],[E,F]]) 217 | T = numpy.matrix([[torque[0,0]],[torque[0,1]]]) 218 | H = numpy.matrix([[-C-D],[G]]) 219 | acc = numpy.linalg.inv(I) * (T-H) 220 | sdd = acc[0,0] 221 | edd = acc[1,0] 222 | 223 | # integrate accelerations into vels 224 | sd = sd + (sdd*dti) 225 | ed = ed + (edd*dti) 226 | 227 | # integrate vels into positions 228 | s = s + (sd*dti) 229 | e = e + (ed*dti) 230 | 231 | ex,ey,hx,hy = joint_to_xy(sx,sy,s,e,l1,l2,ppm) 232 | 233 | # target hit detection 234 | tardist = dist_to_target(hx,hy,tx,ty) 235 | if tardist < targetsize: 236 | score = score + 1 237 | tx,ty = getnewtarget(sx,sy,l1,l2,ppm) 238 | 239 | # ALL GAME LOGIC SHOULD GO ABOVE THIS COMMENT 240 | 241 | # ALL CODE TO DRAW SHOULD GO BELOW THIS COMMENT 242 | 243 | # First, clear the screen to white. Don't put other drawing commands 244 | # above this, or they will be erased with this command. 245 | screen.fill(white) 246 | 247 | if showarm: 248 | draw_arm(screen,sx,sy,ex,ey,hx,hy,l1,l2) 249 | 250 | pygame.draw.ellipse(screen,red,[tx,ty,targetsize*2,targetsize*2],0) 251 | pygame.draw.ellipse(screen,black,[hx-ballsize,hy-ballsize,ballsize*2,ballsize*2],0) 252 | 253 | printText("Score:", "MS Comic Sans", 30, 10, 10, red) 254 | printText(repr(score), "MS Comic Sans", 30, 10, 35, red) 255 | 256 | printText("controls: d f j k", "MS Comic Sans", 30, ssize[0]/2 - 80, 10, red) 257 | printText("reset: SPACEBAR", "MS Comic Sans", 30, ssize[0]/2 - 80, 35, red) 258 | printText("toggle arm: TAB", "MS Comic Sans", 30, ssize[0]/2 - 80, 60, red) 259 | 260 | printText("Time:", "MS Comic Sans", 30, ssize[0]-100, 10, red) 261 | printText(repr(round(timelimit-ttotal,1)), "MS Comic Sans", 30, ssize[0]-100, 35, red) 262 | 263 | # ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT 264 | 265 | # Go ahead and update the screen with what we've drawn. 266 | pygame.display.flip() 267 | 268 | # Limit to dt frames per second 269 | clock.tick(dt) 270 | ttotal = ttotal + dti 271 | 272 | # Close the window and quit. 273 | # If you forget this line, the program will 'hang' 274 | # on exit if running from IDLE. 275 | pygame.quit () 276 | print "your score is: %d" % score 277 | 278 | -------------------------------------------------------------------------------- /code/twojointarm_lagrange.py: -------------------------------------------------------------------------------- 1 | from sympy import * 2 | 3 | m1,m2,r1,r2,i1,i2,l1,l2,a1,a2,t,g = symbols('m1 m2 r1 r2 i1 i2 l1 l2 a1 a2 t g') 4 | 5 | # positions 6 | x1 = r1*sin(a1(t)) 7 | y1 = -r1*cos(a1(t)) 8 | x2 = l1*sin(a1(t)) + r2*sin(a1(t)+a2(t)) 9 | y2 = -l1*cos(a1(t)) - r2*cos(a1(t)+a2(t)) 10 | 11 | # velocities 12 | x1d = diff(x1,t) 13 | y1d = diff(y1,t) 14 | x2d = diff(x2,t) 15 | y2d = diff(y2,t) 16 | a1d = diff(a1(t),t) 17 | a2d = diff(a2(t),t) 18 | 19 | # linear kinetic energy 20 | Tlin1 = 0.5 * m1 * ((x1d*x1d) + (y1d*y1d)) 21 | Tlin2 = 0.5 * m2 * ((x2d*x2d) + (y2d*y2d)) 22 | 23 | # rotational kinetic energy 24 | Trot1 = 0.5 * i1 * a1d * a1d 25 | Trot2 = 0.5 * i2 * (a1d+a2d) * (a1d+a2d) 26 | 27 | # total kinetic energy 28 | T = Tlin1 + Tlin2 + Trot1 + Trot2 29 | 30 | # potential energy 31 | U1 = m1 * g * ( r1*(1-cos(a1(t))) ) 32 | U2 = m2 * g * ( l1*(1-cos(a1(t))) + r2*(1-cos(a1(t)-a2(t))) ) 33 | U = U1 + U2 34 | 35 | # lagrangian L 36 | L = T - U 37 | L = nsimplify(L) 38 | 39 | # compute generalized forces (toruqes) Qj 40 | dldq1 = simplify(diff(L,a1(t))) 41 | dldqd1 = simplify(diff(L,diff(a1(t),t))) 42 | ddtdldqd1 = simplify(diff(dldqd1,t)) 43 | Q1 = ddtdldqd1 - dldq1 44 | 45 | dldq2 = simplify(diff(L,a2(t))) 46 | dldqd2 = simplify(diff(L,diff(a2(t),t))) 47 | ddtdldqd2 = simplify(diff(dldqd2,t)) 48 | Q2 = ddtdldqd2 - dldq2 49 | 50 | # simplify! 51 | # converts floats that are really integers to integers, gets rid of "1.0" 52 | Q1 = simplify(nsimplify(Q1)) 53 | Q2 = simplify(nsimplify(Q2)) 54 | # magic sauce to further simplify with some trigonometric identities 55 | Q1 = Q1.rewrite(exp).expand().powsimp().rewrite(sin).expand() 56 | Q2 = Q2.rewrite(exp).expand().powsimp().rewrite(sin).expand() 57 | # collect derivative terms 58 | Q1 = collect(Q1, Derivative(Derivative(a1(t),t),t)) 59 | Q1 = collect(Q1, Derivative(Derivative(a2(t),t),t)) 60 | Q2 = collect(Q2, Derivative(Derivative(a1(t),t),t)) 61 | Q2 = collect(Q2, Derivative(Derivative(a2(t),t),t)) 62 | # collect sin() terms 63 | Q1 = collect(Q1, sin(a1(t))) 64 | Q2 = collect(Q2, sin(a1(t))) 65 | 66 | pprint(Q1) 67 | pprint(Q2) 68 | -------------------------------------------------------------------------------- /code/twojointarm_passive.py: -------------------------------------------------------------------------------- 1 | # ipython --pylab 2 | 3 | # two joint arm in a vertical plane, with gravity 4 | 5 | from scipy.integrate import odeint 6 | 7 | # forward dynamics equations of our passive two-joint arm 8 | def twojointarm(state,t,aparams): 9 | """ 10 | passive two-joint arm in a vertical plane 11 | X is fwd(+) and back(-) 12 | Y is up(+) and down(-) 13 | gravity acts down 14 | shoulder angle a1 relative to Y vert, +ve counter-clockwise 15 | elbow angle a2 relative to upper arm, +ve counter-clockwise 16 | """ 17 | a1,a2,a1d,a2d = state 18 | l1,l2 = aparams['l1'], aparams['l2'] 19 | m1,m2 = aparams['m1'], aparams['m2'] 20 | i1,i2 = aparams['i1'], aparams['i2'] 21 | r1,r2 = aparams['r1'], aparams['r2'] 22 | g = 9.81 23 | M11 = i1 + i2 + (m1*r1*r1) + (m2*((l1*l1) + (r2*r2) + (2*l1*r2*cos(a2)))) 24 | M12 = i2 + (m2*((r2*r2) + (l1*r2*cos(a2)))) 25 | M21 = M12 26 | M22 = i2 + (m2*r2*r2) 27 | M = matrix([[M11,M12],[M21,M22]]) 28 | C1 = -(m2*l1*a2d*a2d*r2*sin(a2)) - (2*m2*l1*a1d*a2d*r2*sin(a2)) 29 | C2 = m2*l1*a1d*a1d*r2*sin(a2) 30 | C = matrix([[C1],[C2]]) 31 | G1 = (g*sin(a1)*((m2*l1)+(m1*r1))) + (g*m2*r2*sin(a1+a2)) 32 | G2 = g*m2*r2*sin(a1+a2) 33 | G = matrix([[G1],[G2]]) 34 | ACC = inv(M) * (-C-G) 35 | a1dd,a2dd = ACC[0,0],ACC[1,0] 36 | return [a1d, a2d, a1dd, a2dd] 37 | 38 | # anthropometric parameters of the arm 39 | aparams = { 40 | 'l1' : 0.3384, # metres 41 | 'l2' : 0.4554, 42 | 'r1' : 0.1692, 43 | 'r2' : 0.2277, 44 | 'm1' : 2.10, # kg 45 | 'm2' : 1.65, 46 | 'i1' : 0.025, # kg*m*m 47 | 'i2' : 0.075 48 | } 49 | 50 | # forward kinematics 51 | def joints_to_hand(A,aparams): 52 | """ 53 | Given joint angles A=(a1,a2) and anthropometric params aparams, 54 | returns hand position H=(hx,hy) and elbow position E=(ex,ey) 55 | """ 56 | l1 = aparams['l1'] 57 | l2 = aparams['l2'] 58 | n = shape(A)[0] 59 | E = zeros((n,2)) 60 | H = zeros((n,2)) 61 | for i in range(n): 62 | E[i,0] = l1 * cos(A[i,0]) 63 | E[i,1] = l1 * sin(A[i,0]) 64 | H[i,0] = E[i,0] + (l2 * cos(A[i,0]+A[i,1])) 65 | H[i,1] = E[i,1] + (l2 * sin(A[i,0]+A[i,1])) 66 | return H,E 67 | 68 | def animatearm(state,t,aparams,step=3): 69 | """ 70 | animate the twojointarm 71 | """ 72 | A = state[:,[0,1]] 73 | A[:,0] = A[:,0] - (pi/2) 74 | H,E = joints_to_hand(A,aparams) 75 | l1,l2 = aparams['l1'], aparams['l2'] 76 | figure() 77 | plot(0,0,'b.') 78 | p1, = plot(E[0,0],E[0,1],'b.') 79 | p2, = plot(H[0,0],H[0,1],'b.') 80 | p3, = plot((0,E[0,0],H[0,0]),(0,E[0,1],H[0,1]),'b-') 81 | xlim([-l1-l2, l1+l2]) 82 | ylim([-l1-l2, l1+l2]) 83 | dt = t[1]-t[0] 84 | tt = title("Click on this plot to continue...") 85 | ginput(1) 86 | for i in xrange(0,shape(state)[0]-step,step): 87 | p1.set_xdata((E[i,0])) 88 | p1.set_ydata((E[i,1])) 89 | p2.set_xdata((H[i,0])) 90 | p2.set_ydata((H[i,1])) 91 | p3.set_xdata((0,E[i,0],H[i,0])) 92 | p3.set_ydata((0,E[i,1],H[i,1])) 93 | tt.set_text("%4.2f sec" % (i*dt)) 94 | draw() 95 | 96 | 97 | state0 = [0*pi/180, 90*pi/180, 0, 0] # initial joint angles and vels 98 | t = arange(2001.)/200 # 10 seconds at 200 Hz 99 | state = odeint(twojointarm, state0, t, args=(aparams,)) 100 | 101 | animatearm(state,t,aparams) 102 | -------------------------------------------------------------------------------- /code/xor.py: -------------------------------------------------------------------------------- 1 | # feedforward neural network trained with backpropagation 2 | # input(2+bias) -> hidden(2+bias) -> output(1) 3 | # trained on the XOR problem 4 | # Paul Gribble, November 2012 5 | # paul [at] gribblelab [dot] org 6 | 7 | # ipython --pylab 8 | 9 | import time 10 | random.seed(int(time.time())) 11 | 12 | # sigmoid activation function 13 | def tansig(x): 14 | return tanh(x) 15 | 16 | # derivative of sigmoid function 17 | # (needed for calculating error gradients using backprop) 18 | def dtansig(x): 19 | # in case x is a vector, multiply() will do element-wise multiplication 20 | return 1.0 - (multiply(x,x)) 21 | 22 | # load up some training examples 23 | # we will try to get our nnet to learn 24 | # the XOR mapping 25 | # http://en.wikipedia.org/wiki/XOR_gate 26 | 27 | # numpy matrix of input examples 28 | # 4 training examples, each with 2 inputs 29 | xor_in = matrix([[0.0, 0.0], 30 | [0.0, 1.0], 31 | [1.0, 0.0], 32 | [1.0, 1.0] ]) 33 | 34 | # numpy matrix of corresponding outputs 35 | # 4 training examples, each with 1 output 36 | xor_out = matrix([[0.0], 37 | [1.0], 38 | [1.0], 39 | [0.0] ]) 40 | 41 | # out nnet: input(2+bias) -> hidden(2+bias) -> output(1) 42 | # initialize weights and biases to small random values 43 | sigw = 0.5 44 | w_hid = rand(2,2)*sigw # [inp1,inp2] x-> [hid1,hid2] 45 | b_hid = rand(1,2)*sigw # 1.0 -> [b_hid1,b_hid2] 46 | w_out = rand(2,1)*sigw # [hid1,hid2] x-> [out1] 47 | b_out = rand(1,1)*sigw # 1.0 -> [b_out1] 48 | w_out_prev_change = zeros(shape(w_out)) 49 | b_out_prev_change = zeros(shape(b_out)) 50 | w_hid_prev_change = zeros(shape(w_hid)) 51 | b_hid_prev_change = zeros(shape(b_hid)) 52 | 53 | maxepochs = 5000 54 | errors = zeros((maxepochs,1)) 55 | N = 0.01 # learning rate parameter 56 | M = 0.05 # momentum parameter 57 | 58 | # train the sucker! 59 | for i in range(maxepochs): 60 | net_out = zeros(shape(xor_out)) 61 | for j in range(shape(xor_in)[0]): # for each training example 62 | # forward pass 63 | act_inp = xor_in[j,:] 64 | act_hid = tansig( (act_inp * w_hid) + b_hid ) 65 | act_out = tansig( (act_hid * w_out) + b_out ) 66 | net_out[j,:] = act_out[0,:] 67 | 68 | # error gradients starting at outputs and working backwards 69 | err_out = (act_out - xor_out[j,:]) 70 | deltas_out = multiply(dtansig(act_out), err_out) 71 | err_hid = deltas_out * transpose(w_out) 72 | deltas_hid = multiply(dtansig(act_hid), err_hid) 73 | 74 | # update the weights and bias units 75 | w_out_change = -2.0 * transpose(act_hid)*deltas_out 76 | w_out = w_out + (N * w_out_change) + (M * w_out_prev_change) 77 | w_out_prev_change = w_out_change 78 | b_out_change = -2.0 * deltas_out 79 | b_out = b_out + (N * b_out_change) + (M * b_out_prev_change) 80 | b_out_prev_change = b_out_change 81 | 82 | w_hid_change = -2.0 * transpose(act_inp)*deltas_hid 83 | w_hid = w_hid + (N * w_hid_change) + (M * w_hid_prev_change) 84 | w_hid_prev_change = w_hid_change 85 | b_hid_change = -2.0 * deltas_hid 86 | b_hid = b_hid + (N * b_hid_change) + (M * b_hid_prev_change) 87 | b_hid_prev_change = b_hid_change 88 | 89 | # compute errors across all targets 90 | errors[i] = 0.5 * sum(square(net_out - xor_out)) 91 | if ((i % 100)==0): 92 | print "*** EPOCH %4d/%4d : SSE = %6.5f" % (i,maxepochs,errors[i]) 93 | print net_out 94 | 95 | # plot SSE over time 96 | figure() 97 | subplot(2,1,1) 98 | plot(errors) 99 | xlabel('EPOCH') 100 | ylabel('SS_ERROR') 101 | subplot(2,1,2) 102 | plot(log(errors)) 103 | xlabel('EPOCH') 104 | ylabel('LOG (SS_ERROR)') 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | -------------------------------------------------------------------------------- /code/xor_aima.py: -------------------------------------------------------------------------------- 1 | # Figure 20.25 from AIMA by Russell and Norvig. Back-propagation Neural Net 2 | 3 | import math 4 | 5 | def g(x): # tanh faster than the standard 1/(1+e^-x) 6 | return math.tanh(x) 7 | 8 | def gp(y): # derivative of g 9 | return 1.0-y*y 10 | 11 | def BACK_PROP_LEARNING(examples, network) : 12 | alpha = 0.2 13 | (Wih, Who) = network 14 | for epoch in range(4000) : 15 | for (x,y) in examples : 16 | ai=x[:] # inputs/outputs 17 | deltao = [0.0]*len(y) 18 | deltah = [0.0]*len(Who[0]) 19 | ah = [0.0]*len(Who[0]) 20 | ao = [0.0]*len(y) 21 | 22 | for j in range(len(ah)) : # activate hidden layer 23 | ini = 0.0 24 | for k in range(len(ai)) : 25 | ini = ini+Wih[j][k]*ai[k] 26 | ah[j] = g(ini) # hidden activation 27 | 28 | for i in range(len(ao)) : # activate output layer 29 | ini = 0.0 30 | for j in range(len(ah)) : 31 | ini = ini+Who[i][j]*ah[j] 32 | ao[i] = g(ini) # output activation 33 | deltao[i] = gp(ao[i])*(y[i]-ao[i]) # output error gradient 34 | 35 | for k in range(len(ah)) : 36 | error = 0.0 37 | for j in range(len(y)) : # back propagate to hidden 38 | error = error + Who[j][k]*deltao[j] 39 | deltah[k] = gp(ah[k]) * error # hidden error gradient 40 | 41 | for j in range(len(ah)) : 42 | for i in range(len(ao)) : # update output weights 43 | Who[i][j] = Who[i][j] + (alpha * ah[j] * deltao[i]) 44 | 45 | for k in range(len(ai)) : 46 | for j in range(len(ah)) : # update hidden weights 47 | Wih[j][k] = Wih[j][k] + (alpha * ai[k] * deltah[j]) 48 | 49 | return network 50 | 51 | def BACK_PROP_TEST(examples, network) : 52 | result = [] 53 | (Wih, Who) = network 54 | for (x,y) in examples : 55 | eresult=[] 56 | ai=x[:] # inputs/outputs 57 | ah = [0.0]*len(Who[0]) 58 | 59 | for j in range(len(ah)) : # activate hidden layer 60 | ini = 0.0 61 | for i in range(len(ai)) : 62 | ini = ini+Wih[j][i]*ai[i] 63 | ah[j] = g(ini) # hidden activation 64 | 65 | for k in range(len(y)) : # activate output layer 66 | ini = 0.0 67 | for j in range(len(ah)) : 68 | ini = ini+Who[k][j]*ah[j] 69 | 70 | eresult.append(g(ini)) # output activation 71 | result.append(eresult) 72 | return result 73 | 74 | XOR = [ ([0,0], [0]), # Training examples 75 | ([0,1], [1]), 76 | ([1,0], [1]), 77 | ([1,1], [0])] 78 | 79 | NN = [[[0.1, -0.2], # input to hidden weights 80 | [-0.3, 0.4]], # 2 input and 2 hidden 81 | [[0.5, -0.6]] # hidden to output weights 82 | ] # 2 hidden and 1 output 83 | 84 | print 'Learning results: ', BACK_PROP_LEARNING(XOR, NN) 85 | print 'Training and test set: ', XOR 86 | print 'Test results: ', BACK_PROP_TEST(XOR, NN) 87 | -------------------------------------------------------------------------------- /code/xor_cg.py: -------------------------------------------------------------------------------- 1 | # feedforward neural network 2 | # input(2+bias) -> hidden(2+bias) -> output(1) 3 | # trained on the XOR problem 4 | # weights optimized using scipy.optimize.fmin_cg with 5 | # gradients computed using backpropagation 6 | # Paul Gribble, November 2012 7 | # paul [at] gribblelab [dot] org 8 | 9 | # ipython --pylab 10 | 11 | import time 12 | random.seed(int(time.time())) 13 | 14 | def tansig(x): 15 | """ sigmoid activation function """ 16 | return tanh(x) 17 | 18 | def dtansig(x): 19 | """ derivative of sigmoid function """ 20 | return 1.0 - (multiply(x,x)) # element-wise multiplication 21 | 22 | def pack_weights(w_hid, b_hid, w_out, b_out, params): 23 | """ pack weight matrices into a single vector """ 24 | n_in, n_hid, n_out = params[0], params[1], params[2] 25 | g_j = hstack((reshape(w_hid,(1,n_in*n_hid)), 26 | reshape(b_hid,(1,n_hid)), 27 | reshape(w_out,(1,n_hid*n_out)), 28 | reshape(b_out,(1,n_out))))[0] 29 | g_j = array(g_j[0,:])[0] 30 | return g_j 31 | 32 | def unpack_weights(x, params): 33 | """ unpack weights from single vector into weight matrices """ 34 | n_in, n_hid, n_out = params[0], params[1], params[2] 35 | pat_in, pat_out = params[3], params[4] 36 | n_pat = shape(pat_in)[0] 37 | i1,i2 = 0,n_in*n_hid 38 | w_hid = reshape(x[i1:i2], (n_in,n_hid)) 39 | i1,i2 = i2,i2+n_hid 40 | b_hid = reshape(x[i1:i2],(1,n_hid)) 41 | i1,i2 = i2,i2+(n_hid*n_out) 42 | w_out = reshape(x[i1:i2], (n_hid,n_out)) 43 | i1,i2 = i2,i2+n_out 44 | b_out = reshape(x[i1:i2],(1,n_out)) 45 | return w_hid, b_hid, w_out, b_out 46 | 47 | def net_forward(x, params): 48 | """ propagate inputs through the network and return outputs """ 49 | w_hid,b_hid,w_out,b_out = unpack_weights(x, params) 50 | pat_in = params[3] 51 | return tansig((tansig((pat_in * w_hid) + b_hid) * w_out) + b_out) 52 | 53 | def f(x,params): 54 | """ returns the cost (SSE) of a given weight vector """ 55 | pat_out = params[4] 56 | return sum(square(net_forward(x,params) - pat_out)) 57 | 58 | def fd(x,params): 59 | """ returns the gradients (dW/dE) for the weight vector """ 60 | n_in, n_hid, n_out = params[0], params[1], params[2] 61 | pat_in, pat_out = params[3], params[4] 62 | w_hid,b_hid,w_out,b_out = unpack_weights(x, params) 63 | act_hid = tansig( (pat_in * w_hid) + b_hid ) 64 | act_out = tansig( (act_hid * w_out) + b_out ) 65 | err_out = act_out - pat_out 66 | deltas_out = multiply(dtansig(act_out), err_out) 67 | err_hid = deltas_out * transpose(w_out) 68 | deltas_hid = multiply(dtansig(act_hid), err_hid) 69 | grad_w_out = transpose(act_hid)*deltas_out 70 | grad_b_out = sum(deltas_out,0) 71 | grad_w_hid = transpose(pat_in)*deltas_hid 72 | grad_b_hid = sum(deltas_hid,0) 73 | return pack_weights(grad_w_hid, grad_b_hid, grad_w_out, grad_b_out, params) 74 | 75 | ############################ 76 | # train on XOR mapping # 77 | ############################ 78 | 79 | from scipy.optimize import fmin_cg 80 | 81 | xor_in = matrix([[0.0, 0.0], 82 | [0.0, 1.0], 83 | [1.0, 0.0], 84 | [1.0, 1.0] ]) 85 | 86 | xor_out = matrix([[0.0], 87 | [1.0], 88 | [1.0], 89 | [0.0] ]) 90 | 91 | # network parameters 92 | n_in = shape(xor_in)[1] 93 | n_hid = 2 94 | n_out = shape(xor_out)[1] 95 | params = [n_in, n_hid, n_out, xor_in, xor_out] 96 | 97 | # initialize weights to small random values 98 | nw = n_in*n_hid + n_hid + n_hid*n_out + n_out 99 | w0 = rand(nw)*0.2 - 0.1 100 | 101 | # optimize using conjugate gradient descent 102 | out = fmin_cg(f, w0, fprime=fd, args=(params,), 103 | full_output=True, retall=True, disp=True) 104 | # unpack optimizer outputs 105 | wopt,fopt,func_calls,grad_calls,warnflag,allvecs = out 106 | 107 | # print net performance on optimal weights wopt 108 | net_out = net_forward(wopt,params) 109 | print net_out.round(3) 110 | 111 | -------------------------------------------------------------------------------- /code/xor_plot.py: -------------------------------------------------------------------------------- 1 | # feedforward neural network trained with backpropagation 2 | # input(2+bias) -> hidden(2+bias) -> output(1) 3 | # trained on the XOR problem 4 | # Paul Gribble, November 2012 5 | # paul [at] gribblelab [dot] org 6 | 7 | # ipython --pylab 8 | 9 | import time 10 | random.seed(int(time.time())) 11 | 12 | # sigmoid activation function 13 | def tansig(x): 14 | return tanh(x) 15 | 16 | # derivative of sigmoid function 17 | # (needed for calculating error gradients using backprop) 18 | def dtansig(x): 19 | # in case x is a vector, multiply() will do element-wise multiplication 20 | return 1.0 - (multiply(x,x)) 21 | 22 | # load up some training examples 23 | # we will try to get our nnet to learn 24 | # the XOR mapping 25 | # http://en.wikipedia.org/wiki/XOR_gate 26 | 27 | # numpy matrix of input examples 28 | # 4 training examples, each with 2 inputs 29 | xor_in = matrix([[0.0, 0.0], 30 | [0.0, 1.0], 31 | [1.0, 0.0], 32 | [1.0, 1.0] ]) 33 | 34 | # numpy matrix of corresponding outputs 35 | # 4 training examples, each with 1 output 36 | xor_out = matrix([[0.0], 37 | [1.0], 38 | [1.0], 39 | [0.0] ]) 40 | 41 | # out nnet: input(2+bias) -> hidden(2+bias) -> output(1) 42 | # initialize weights and biases to small random values 43 | sigw = 0.5 44 | w_hid = rand(2,2)*sigw # [inp1,inp2] x-> [hid1,hid2] 45 | b_hid = rand(1,2)*sigw # 1.0 -> [b_hid1,b_hid2] 46 | w_out = rand(2,1)*sigw # [hid1,hid2] x-> [out1] 47 | b_out = rand(1,1)*sigw # 1.0 -> [b_out1] 48 | w_out_prev_change = zeros(shape(w_out)) 49 | b_out_prev_change = zeros(shape(b_out)) 50 | w_hid_prev_change = zeros(shape(w_hid)) 51 | b_hid_prev_change = zeros(shape(b_hid)) 52 | 53 | maxepochs = 1000 54 | errors = zeros((maxepochs,1)) 55 | N = 0.01 # learning rate parameter 56 | M = 0.10 # momentum parameter 57 | 58 | # we are going to plot the network's performance over the course of learning 59 | # inputs will be a regular grid of [inp1,inp2] points 60 | n_grid = 20 61 | g_grid = linspace(-1.0, 2.0, n_grid) 62 | g1,g2 = meshgrid(g_grid, g_grid) 63 | figure() 64 | 65 | # train the sucker! 66 | for i in range(maxepochs): 67 | net_out = zeros(shape(xor_out)) 68 | for j in range(shape(xor_in)[0]): # for each training example 69 | # forward pass 70 | act_inp = xor_in[j,:] 71 | act_hid = tansig( (act_inp * w_hid) + b_hid ) 72 | act_out = tansig( (act_hid * w_out) + b_out ) 73 | net_out[j,:] = act_out[0,:] 74 | 75 | # error gradients starting at outputs and working backwards 76 | err_out = (act_out - xor_out[j,:]) 77 | deltas_out = multiply(dtansig(act_out), err_out) 78 | err_hid = deltas_out * transpose(w_out) 79 | deltas_hid = multiply(dtansig(act_hid), err_hid) 80 | 81 | # update the weights and bias units 82 | w_out_change = -2.0 * transpose(act_hid)*deltas_out 83 | w_out = w_out + (N * w_out_change) + (M * w_out_prev_change) 84 | w_out_prev_change = w_out_change 85 | b_out_change = -2.0 * deltas_out 86 | b_out = b_out + (N * b_out_change) + (M * b_out_prev_change) 87 | b_out_prev_change = b_out_change 88 | 89 | w_hid_change = -2.0 * transpose(act_inp)*deltas_hid 90 | w_hid = w_hid + (N * w_hid_change) + (M * w_hid_prev_change) 91 | w_hid_prev_change = w_hid_change 92 | b_hid_change = -2.0 * deltas_hid 93 | b_hid = b_hid + (N * b_hid_change) + (M * b_hid_prev_change) 94 | b_hid_prev_change = b_hid_change 95 | 96 | # compute errors across all targets 97 | errors[i] = 0.5 * sum(square(net_out - xor_out)) 98 | if ((i % 2)==0): 99 | print "*** EPOCH %4d/%4d : SSE = %6.5f" % (i,maxepochs,errors[i]) 100 | print net_out 101 | # now do our plotting 102 | net_perf = zeros(shape(g1)) 103 | for i1 in range(n_grid): 104 | for i2 in range(n_grid): 105 | act_inp = matrix([g1[i1,i2],g2[i1,i2]]) 106 | o_grid = tansig( (tansig( (act_inp * w_hid) + b_hid ) * w_out) + b_out ) 107 | o_grid = int(o_grid >= 0.50) # hardlim 108 | net_perf[i1,i2] = o_grid 109 | cla() 110 | imshow(net_perf, extent=[-1,2,-1,2]) 111 | plot((0,0,1,1),(0,1,0,1),'ws',markersize=10) 112 | axis([-1, 2, -1, 2]) 113 | draw() 114 | 115 | 116 | # plot SSE over time 117 | figure() 118 | subplot(2,1,1) 119 | plot(errors) 120 | xlabel('EPOCH') 121 | ylabel('SS_ERROR') 122 | subplot(2,1,2) 123 | plot(log(errors)) 124 | xlabel('EPOCH') 125 | ylabel('LOG (SS_ERROR)') 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | -------------------------------------------------------------------------------- /figs/HH1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/HH1.png -------------------------------------------------------------------------------- /figs/HH2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/HH2.png -------------------------------------------------------------------------------- /figs/assignment5_figures.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/assignment5_figures.pdf -------------------------------------------------------------------------------- /figs/ekeberg1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/ekeberg1.png -------------------------------------------------------------------------------- /figs/ekeberg_fig1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/ekeberg_fig1.png -------------------------------------------------------------------------------- /figs/elbow_dynamics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/elbow_dynamics.png -------------------------------------------------------------------------------- /figs/elbow_kinematics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/elbow_kinematics.png -------------------------------------------------------------------------------- /figs/elbow_movement_kinematics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/elbow_movement_kinematics.png -------------------------------------------------------------------------------- /figs/forcelengthce.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/forcelengthce.png -------------------------------------------------------------------------------- /figs/forcelengthse.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/forcelengthse.png -------------------------------------------------------------------------------- /figs/forcevelocity.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/forcevelocity.png -------------------------------------------------------------------------------- /figs/fullblownschematic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/fullblownschematic.png -------------------------------------------------------------------------------- /figs/hillmuscle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/hillmuscle.png -------------------------------------------------------------------------------- /figs/jacobian_plots.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/jacobian_plots.png -------------------------------------------------------------------------------- /figs/lorenz1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/lorenz1.png -------------------------------------------------------------------------------- /figs/lorenz2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/lorenz2.png -------------------------------------------------------------------------------- /figs/lorenz3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/lorenz3.png -------------------------------------------------------------------------------- /figs/lotkavolterra1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/lotkavolterra1.png -------------------------------------------------------------------------------- /figs/lotkavolterra2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/lotkavolterra2.png -------------------------------------------------------------------------------- /figs/mass-spring-sim.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/mass-spring-sim.png -------------------------------------------------------------------------------- /figs/onejointanimation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/onejointanimation.png -------------------------------------------------------------------------------- /figs/onejointarm_muscle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/onejointarm_muscle.png -------------------------------------------------------------------------------- /figs/onejointarm_muscle2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/onejointarm_muscle2.png -------------------------------------------------------------------------------- /figs/onejointarm_muscle3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/onejointarm_muscle3.png -------------------------------------------------------------------------------- /figs/onejointarm_muscle4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/onejointarm_muscle4.png -------------------------------------------------------------------------------- /figs/onejointarm_passive.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/onejointarm_passive.png -------------------------------------------------------------------------------- /figs/sin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/sin.png -------------------------------------------------------------------------------- /figs/spring-mass.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/spring-mass.png -------------------------------------------------------------------------------- /figs/twojointarm_dynamics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/twojointarm_dynamics.png -------------------------------------------------------------------------------- /figs/twojointarm_kinematics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/twojointarm_kinematics.png -------------------------------------------------------------------------------- /figs/twojointarm_kinematics_workspace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/twojointarm_kinematics_workspace.png -------------------------------------------------------------------------------- /figs/twojointarmgame.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulgribble/CompNeuro/f586944c0c3254976203d29f096ecf80e175bff4/figs/twojointarmgame.png -------------------------------------------------------------------------------- /go.el: -------------------------------------------------------------------------------- 1 | (require 'ox-publish) 2 | (require 'ox-bibtex) 3 | (setq org-publish-project-alist 4 | '( 5 | ("CompNeuro" 6 | :base-directory "org/" 7 | :base-extension "org" 8 | :publishing-directory "~/github/CompNeuro/html/" 9 | :publishing-function org-html-publish-to-html 10 | :recursive t 11 | :section-numbers nil 12 | :html-postamble "
%a | %d
This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License
\"Creative
" 13 | :language en 14 | :link-home "index.html" 15 | :link-up "index.html" 16 | :html-head "" 17 | ) 18 | 19 | ("CompNeuro_html" 20 | :base-directory "html/" 21 | :base-extension "css\\|html" 22 | :publishing-directory "/ssh:plg@toro.ssc.uwo.ca:~/gribblelab.org/compneuro/" 23 | :publishing-function org-publish-attachment 24 | :recursive t 25 | ) 26 | 27 | ("CompNeuro_bibhtml" 28 | :base-directory "org/" 29 | :base-extension "html\\|css" 30 | :publishing-directory "/ssh:plg@toro.ssc.uwo.ca:~/gribblelab.org/compneuro/" 31 | :publishing-function org-publish-attachment 32 | :recursive t 33 | ) 34 | 35 | ("CompNeuro_figs" 36 | :base-directory "figs/" 37 | :base-extension "png\\|jpg\\|pdf" 38 | :publishing-directory "/ssh:plg@toro.ssc.uwo.ca:~/gribblelab.org/compneuro/figs/" 39 | :publishing-function org-publish-attachment 40 | :recursive t 41 | ) 42 | 43 | ("CompNeuro_code" 44 | :base-directory "code/" 45 | :base-extension "c\\|h\\|txt\\|csv\\|py\\|tgz\\|pickle\\|m\\|tgz" 46 | :publishing-directory "/ssh:plg@toro.ssc.uwo.ca:~/gribblelab.org/compneuro/code/" 47 | :publishing-function org-publish-attachment 48 | :recursive t 49 | ) 50 | 51 | ("org" :components ("CompNeuro" "CompNeuro_html" "CompNeuro_bibhtml" "CompNeuro_figs" "CompNeuro_code")))) 52 | 53 | (org-publish-project "CompNeuro") 54 | (org-publish-project "CompNeuro_html") 55 | (org-publish-project "CompNeuro_bibhtml") 56 | (org-publish-project "CompNeuro_figs") 57 | (org-publish-project "CompNeuro_code") 58 | -------------------------------------------------------------------------------- /html/0_Setup_Your_Computer.html: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | 0. Setup Your Computer 10 | 11 | 12 | 97 | 98 | 144 | 145 | 146 |
147 | UP 148 | | 149 | HOME 150 |
151 |

0. Setup Your Computer

152 |
153 |

Table of Contents

154 | 175 |
176 | 177 | 178 |
179 |

Install Options

180 |
181 |
182 |

Option 1: Download and build source code from websites above

183 |
184 |
    185 |
  • good luck with that, there are many dependencies, it’s a bit of a mess
  • 186 |
187 |
188 |
189 | 190 |
191 |

Option 2: Install the Enthought Python Distribution (all platforms)

192 |
193 | 199 |
200 |
201 | 202 |
203 |

Option 3: Install a software virtual machine running Ubuntu GNU/Linux (all platforms)

204 |
205 |
    206 |
  • perhaps the easiest and most "self-contained" option - it won’t 207 | install stuff on your own machine but will install stuff within a 208 | virtual machine, leaving your machine untouched
  • 209 |
  • download and install VirtualBox, it’s free and runs on Windows & Mac 210 | (and GNU/Linux)
  • 211 |
  • download the pre-configured UbuntuVM.ova provided by me (beware, 212 | it’s a 3.8 GB file)
  • 213 |
  • in VirtualBox, "Import Appliance…" and point to UbuntuVM.ova
  • 214 |
  • Then start the virtual machine (username is compneuro and password is 215 | compneuro)
  • 216 |
  • you’re ready to rumble, I have installed all the software already
  • 217 |
218 |
219 |
220 | 221 |
222 |

Option 4 (Mac) : install Python + scientific libraries on your machine

223 |
224 |
    225 |
  • install Xcode from the mac app store (the download is LARGE, several 226 | GB)
  • 227 |
  • in Xcode: Preferences/Downloads/Components and Install the "Command 228 | Line Tools"
  • 229 |
  • download and run the SciPy Superpack install script
  • 230 |
  • note: you may have to download install python-setuptools first… if 231 | the superpack install script doesn’t work, try this
  • 232 |
  • you’re ready to rumble
  • 233 |
234 |
235 |
236 | 237 |
238 |

Option 5 (windows)

239 |
240 |
    241 |
  • <laughing>
  • 242 |
  • seriously though I have little to no idea about the windows universe
  • 243 |
  • your best bet may be the Enthought Python Distribution
  • 244 |
  • they have an Academic Version which is free, you just have to fill 245 | out a form and they send you an email with a download link
  • 246 |
  • here is a blog post detailing how to get the ipython notebook 247 | running on Windows 7
  • 248 |
249 |
250 |
251 |
252 | 253 |
254 |

Testing your installation

255 |
256 |
257 |

Launching iPython

258 |
259 |

260 | To launch iPython, open up a Terminal and type a command to launch: 261 |

262 | 263 |

264 | To make it so Figures appear in their own window on your desktop (like MATLAB): 265 |

266 |
267 | 268 |
ipython --pylab
269 | 
270 |
271 | 272 |

273 | To make it so Figures appear in the console itself, right after the 274 | commmand(s) that produced them: 275 |

276 |
277 | 278 |
ipython qtconsole --pylab inline
279 | 
280 |
281 | 282 |

283 | To launch a browser-based "notebook" (this is really neat) 284 |

285 |
286 | 287 |
ipython notebook --pylab inline
288 | 
289 |
290 |
291 |
292 | 293 |
294 |

Making a plot

295 |
296 |

297 | Type the following: 298 |

299 | 300 |
301 | 302 |
t = arange(0, 1, 0.01)
303 | y = sin(2*pi*t*3)
304 | plot(t,y)
305 | 
306 |
307 | 308 |

309 | and you should see this plot: 310 |

311 | 312 | 313 |
314 |

sin.png 315 |

316 |
317 |
318 |
319 |
320 | 321 | 322 |
323 |

Next steps

324 |
325 |

326 | In the next topic we will talk about dynamical systems — what they 327 | are, and how they can be used to address scientific questions through 328 | computer simulation. 329 |

330 | 331 |

332 | [ next ] 333 |

334 | 335 | 336 | 337 |
338 |
339 |
340 | 341 |
342 |

Links

343 |
344 | 374 |
375 |
376 |
377 |
378 |
Paul Gribble | fall 2012
This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License
Creative Commons License
379 |
380 | 381 | 382 | -------------------------------------------------------------------------------- /html/1_Dynamical_Systems.html: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | 1. Dynamical Systems 10 | 11 | 12 | 97 | 98 | 144 | 164 | 166 | 167 | 168 |
169 | UP 170 | | 171 | HOME 172 |
173 |

1. Dynamical Systems

174 |
175 |

Table of Contents

176 |
177 | 182 |
183 |
184 |
185 |

References

186 | 187 |
188 | 189 | 190 | 191 | 194 | 202 | 203 | 204 | 205 | 206 | 209 | 217 | 218 |
192 | [1] 193 | 195 | A. L. Hodgkin and A. F. Huxley. 196 | A quantitative description of membrane current and its application 197 | to conduction and excitation in nerve. 198 | J. Physiol. (Lond.), 117(4):500--544, Aug 1952. 199 | [ bib ] 200 | 201 |
207 | [2] 208 | 210 | A. L. Hodgkin, A. F. Huxley, A. L. Hodgkin, and A. F. Huxley. 211 | A quantitative description of membrane current and its application 212 | to conduction and excitation in nerve. 1952. 213 | Bull. Math. Biol., 52(1-2):25--71, 1990. 214 | [ bib ] 215 | 216 |
219 | 220 |
221 | 222 |
223 |

What is a dynamical system?

224 |
225 |

226 | Systems can be characterized by the specific relation between their 227 | input(s) and output(s). A static system has an output that only 228 | depends on its input. A mechanical example of such a system is an 229 | idealized, massless (mass=0) spring. The length of the spring depends 230 | only on the force (the input) that acts upon it. Change the input 231 | force, and the length of the spring will change, and this will happen 232 | instantaneously (obviously a massless spring is a theoretical 233 | construct). A system becomes dynamical (it is said to have dynamics 234 | when a mass is attached to the spring. Now the position of the mass 235 | (and equivalently, the length of the spring) is no longer directly 236 | dependent on the input force, but is also tied to the acceleration of 237 | the mass, which in turn depends on the sum of all forces acting upon 238 | it (the sum of the input force and the force due to the spring). The 239 | net force depends on the position of the mass, which depends on the 240 | length of the spring, which depends on the spring force. The property 241 | that acceleration of the mass depends on its position makes this a 242 | dynamical system. 243 |

244 | 245 | 246 |
247 |

spring-mass.png 248 |

249 |

Figure 1: A spring with a mass attached

250 |
251 | 252 |

253 | Dynamical systems can be characterized by differential equations that 254 | relate the state derivatives (e.g. velocity or acceleration) to the 255 | state variables (e.g. position). The differential equation for the 256 | spring-mass system depicted above is: 257 |

258 | 259 | \begin{equation} 260 | m\ddot{x} = -kx + mg 261 | \end{equation} 262 | 263 |

264 | Where \(x\) is the position of the mass \(m\) (the length of the spring), 265 | \(\ddot{x}\) is the second derivative of position (i.e. acceleration), 266 | \(k\) is a constant (related to the stiffness of the spring), and \(g\) is 267 | the graviational constant. 268 |

269 | 270 |

271 | The system is said to be a second order system, as the highest 272 | derivative that appears in the differential equation describing the 273 | system, is two. The position \(x\) and its time derivative \(\dot{x}\) are 274 | called states of the system, and \(\dot{x}\) and \(\ddot{x}\) are called 275 | state derivatives. 276 |

277 | 278 |

279 | Most systems out there in nature are dynamical systems. For example 280 | most chemical reactions under natural circumstances are dynamical: the 281 | rate of change of a chemical reaction depends on the amount of 282 | chemical present, in other words the state derivative is proportional 283 | to the state. Dynamical systems exist in biology as well. For example 284 | the rate of change of a certain species depends on its population 285 | size. 286 |

287 | 288 |

289 | Dynamical equations are often described by a set of coupled 290 | differential equations. For example, the reproduction rate of 291 | rabbits (state derivative 1) depends on the population of rabbits 292 | (state 1) and on the population size of foxes (state 2). The 293 | reproduction rate of foxes (state derivative 2) depends on the 294 | population of foxes (state 2) and also on the population of rabbits 295 | (state 1). In this case we have two coupled first-order differential 296 | equations, and hence a system of order two. The so-called 297 | predator-prey model is also known as the Lotka-Volterra equations. 298 |

299 | 300 | \begin{eqnarray} 301 | \dot{x} &= x(\alpha - \beta y)\\ 302 | \dot{y} &= -y(\gamma - \delta x) 303 | \end{eqnarray} 304 |
305 |
306 | 307 |
308 |

Why make models?

309 |
310 |

311 | There are two main reasons: one being practical and one mostly 312 | theoretical. The practical use is prediction. A typical example of a 313 | dynamical system that is modelled for prediction is the weather. The 314 | weather is a very complex, (high-order, nonlinear, coupled and 315 | chaotic) system. More theoretically, one reason to make models is to 316 | test the validity of a functional hypothesis of an observed 317 | phenomenon. A beautiful example is the model made by Hodgkin and 318 | Huxley to understand how action potentials arise and propagate in 319 | neurons [1,2]. They modelled the different 320 | (voltage-gated) ion channels in an axon membrane and showed using 321 | mathematical models that indeed the changes in ion concentrations were 322 | responsible for the electical spikes observed experimentally 7 years 323 | earlierr. 324 |

325 | 326 | 327 |
328 |

HH1.png 329 |

330 |

Figure 2: Hodgkin-Huxley model of voltage-gated ion channels

331 |
332 | 333 | 334 |
335 |

HH2.png 336 |

337 |

Figure 3: Action potentials across the membrane

338 |
339 | 340 |

341 | A second theoretical reason to make models is that it is sometimes 342 | very difficult, if not impossible, to answer a certain question 343 | empirically. As an example we take the following biomechanical 344 | question: Would you be able to jump higher if your biceps femoris 345 | (part of your hamstrings) were two separate muscles each crossing only 346 | one joint rather than being one muscle crossing both the hip and knee 347 | joint? Not a strange question as one could then independently control 348 | the torques around each joint. 349 |

350 | 351 |

352 | In order to answer this question empirically, one would like to do the 353 | following experiment: 354 |

355 | 356 |
    357 |
  • measure the maximal jump height of a subject
  • 358 |
  • change only the musculoskeletal properties in question
  • 359 |
  • measure the jump height again
  • 360 |
361 | 362 |

363 | Of course, such an experiment would yield several major ethical, 364 | practical and theoretical drawbacks. It is unlikely that an ethics 365 | committee would approve the transplantation of the origin and 366 | insertion of the hamstrings in order to examine its effect on jump 367 | height. And even so, one would have some difficulties finding 368 | subjects. Even with a volunteer for such a surgery it would not bring 369 | us any closer to an answer. After such a surgery, the subject would 370 | not be able to jump right away, but has to undergo severe revalidation 371 | and surely during such a period many factors will undesirably change 372 | like maximal contractile forces. And even if the subject would fully 373 | recover (apart from the hamstrings transplantation), his or her 374 | nervous system would have to find the new optimal muscle stimulation 375 | pattern. 376 |

377 | 378 |

379 | If one person jumps lower than another person, is that because she 380 | cannot jump as high with her particular muscles, or was it just that 381 | her CNS was not able to find the optimal muscle activation pattern? 382 | Ultimately, one wants to know through what mechanism the subject's 383 | jump performance changes. To investigate this, one would need to know, 384 | for example, the forces produced by the hamstrings as a function of 385 | time, something that is impossible to obtain experimentally. Of 386 | course, this example is somewhat ridiculous, but its message is 387 | hopefully clear that for several questions a strict empirical approach 388 | is not suitable. An alternative is provided by mathematical modelling. 389 |

390 |
391 |
392 | 393 |
394 |

Next steps

395 |
396 |

397 | In the next topic, we will be examining three systems — a 398 | mass-spring system, a system representing weather patterns, and a 399 | system characterizing predator-prey interactions. In each case we will 400 | see how to go from differential equations characterizing the dynamics 401 | of the system, to Python code, and run that code to simulate the 402 | behaviour of the system over time. We will see the great power of 403 | simulation, namely the ability to change aspects of the system at 404 | will, and simulate to explore the resulting change in system 405 | behaviour. 406 |

407 | 408 |

409 | [ next ] 410 |

411 | 412 |
413 |
414 |
415 |
416 |
417 |
Paul Gribble & Dinant Kistemaker | fall 2012
This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License
Creative Commons License
418 |
419 | 420 | 421 | -------------------------------------------------------------------------------- /org/0_Setup_Your_Computer.org: -------------------------------------------------------------------------------- 1 | #+STARTUP: showall 2 | 3 | #+TITLE: 0. Setup Your Computer 4 | #+AUTHOR: Paul Gribble 5 | #+EMAIL: paul@gribblelab.org 6 | #+DATE: fall 2012 7 | #+HTML_LINK_UP: http://www.gribblelab.org/compneuro/index.html 8 | #+HTML_LINK_HOME: http://www.gribblelab.org/compneuro/index.html 9 | 10 | 11 | * Install Options 12 | 13 | ** Option 1: Download and build source code from websites above 14 | - good luck with that, there are many dependencies, it’s a bit of a mess 15 | 16 | ** Option 2: Install the Enthought Python Distribution (all platforms) 17 | - your best bet may be the [[http://www.enthought.com/products/epd.php][Enthought Python Distribution]] 18 | - they have an [[http://www.enthought.com/products/edudownload.php][Academic Version]] which is free 19 | - also a totally free version here: [[http://www.enthought.com/products/epd_free.php][EPD Free]] 20 | - doesn't necessarily include latest versions of packages (e.g. iPython) 21 | 22 | ** Option 3: Install a software virtual machine running Ubuntu GNU/Linux (all platforms) 23 | - perhaps the easiest and most "self-contained" option - it won’t 24 | install stuff on your own machine but will install stuff within a 25 | virtual machine, leaving your machine untouched 26 | - download and install [[https://www.virtualbox.org/][VirtualBox]], it’s free and runs on Windows & Mac 27 | (and GNU/Linux) 28 | - download the pre-configured [[http://www.gribblelab.org/compneuro/installers/UbuntuVM.ova][UbuntuVM.ova]] provided by me (beware, 29 | it’s a 3.8 GB file) 30 | - in VirtualBox, "Import Appliance..." and point to UbuntuVM.ova 31 | - Then start the virtual machine (username is compneuro and password is 32 | compneuro) 33 | - you’re ready to rumble, I have installed all the software already 34 | 35 | ** Option 4 (Mac) : install Python + scientific libraries on your machine 36 | - install [[http://itunes.apple.com/ca/app/xcode/id497799835?mt=12][Xcode]] from the mac app store (the download is LARGE, several 37 | GB) 38 | - in Xcode: Preferences/Downloads/Components and Install the "Command 39 | Line Tools" 40 | - download and run the [[http://fonnesbeck.github.com/ScipySuperpack/][SciPy Superpack install script]] 41 | - note: you may have to download install [[http://pypi.python.org/pypi/setuptools][python-setuptools]] first... if 42 | the superpack install script doesn’t work, try this 43 | - you’re ready to rumble 44 | 45 | ** Option 5 (windows) 46 | - 47 | - seriously though I have little to no idea about the windows universe 48 | - your best bet may be the [[http://www.enthought.com/products/epd.php][Enthought Python Distribution]] 49 | - they have an [[http://www.enthought.com/products/edudownload.php][Academic Version]] which is free, you just have to fill 50 | out a form and they send you an email with a download link 51 | - here is a [[http://goo.gl/HSVPp][blog post]] detailing how to get the ipython notebook 52 | running on Windows 7 53 | 54 | * Testing your installation 55 | 56 | ** Launching iPython 57 | 58 | To launch iPython, open up a Terminal and type a command to launch: 59 | 60 | To make it so Figures appear in their own window on your desktop (like MATLAB): 61 | #+BEGIN_SRC sh 62 | ipython --pylab 63 | #+END_SRC 64 | 65 | To make it so Figures appear in the console itself, right after the 66 | commmand(s) that produced them: 67 | #+BEGIN_SRC sh 68 | ipython qtconsole --pylab inline 69 | #+END_SRC 70 | 71 | To launch a browser-based "notebook" (this is really neat) 72 | #+BEGIN_SRC sh 73 | ipython notebook --pylab inline 74 | #+END_SRC 75 | 76 | ** Making a plot 77 | 78 | Type the following: 79 | 80 | #+BEGIN_SRC python 81 | t = arange(0, 1, 0.01) 82 | y = sin(2*pi*t*3) 83 | plot(t,y) 84 | #+END_SRC 85 | 86 | and you should see this plot: 87 | 88 | #+ATTR_HTML: height="200px" 89 | [[file:figs/sin.png]] 90 | 91 | 92 | * Next steps 93 | 94 | In the next topic we will talk about dynamical systems --- what they 95 | are, and how they can be used to address scientific questions through 96 | computer simulation. 97 | 98 | [ [[file:1_Dynamical_Systems.html][next]] ] 99 | 100 | 101 | 102 | ----- 103 | 104 | * Links 105 | - python : http://www.python.org/ 106 | - numpy : http://numpy.scipy.org/ 107 | - scipy : http://www.scipy.org/ 108 | - matplotlib : http://matplotlib.sourceforge.net/ 109 | - ipython : http://ipython.org/ 110 | - Free Virtual Machine software virtualbox (mac, windows, linux) : 111 | [[https://www.virtualbox.org/]] 112 | - Commercial Virtual Machine software 113 | - vmware (mac) : 114 | https://www.vmware.com/products/fusion/overview.html 115 | - vmware (windows) : 116 | https://www.vmware.com/products/workstation/overview.html 117 | - parallels desktop (mac) : 118 | http://www.parallels.com/products/desktop/ 119 | - parallels workstation (windows, linux) : http://www.parallels.com/products/workstation/ 120 | - Free Ubuntu GNU/Linux distributions 121 | - ubuntu : http://www.ubuntu.com/download/desktop 122 | - Ubuntu Shell scripts to install python + scientific stuff and LaTeX 123 | - python gist : https://gist.github.com/3692447 124 | - LaTeX gist : https://gist.github.com/3692459 125 | - [[http://fperez.org/py4science/starter_kit.html][Py4Science]] a Starter Kit 126 | - [[http://neuro.debian.net/][NeuroDebian]] linux-based turnkey software platform for neuroscience 127 | -------------------------------------------------------------------------------- /org/1_Dynamical_Systems.org: -------------------------------------------------------------------------------- 1 | #+STARTUP: showall 2 | 3 | #+TITLE: 1. Dynamical Systems 4 | #+AUTHOR: Paul Gribble & Dinant Kistemaker 5 | #+EMAIL: paul@gribblelab.org 6 | #+DATE: fall 2012 7 | #+HTML_LINK_UP: http://www.gribblelab.org/compneuro/0_Setup_Your_Computer.html 8 | #+HTML_LINK_HOME: http://www.gribblelab.org/compneuro/index.html 9 | #+BIBLIOGRAPHY: refs plain option:-d limit:t 10 | 11 | ----- 12 | 13 | * What is a dynamical system? 14 | 15 | Systems can be characterized by the specific relation between their 16 | input(s) and output(s). A static system has an output that only 17 | depends on its input. A mechanical example of such a system is an 18 | idealized, massless (mass=0) spring. The length of the spring depends 19 | only on the force (the input) that acts upon it. Change the input 20 | force, and the length of the spring will change, and this will happen 21 | instantaneously (obviously a massless spring is a theoretical 22 | construct). A system becomes dynamical (it is said to have /dynamics/ 23 | when a mass is attached to the spring. Now the position of the mass 24 | (and equivalently, the length of the spring) is no longer directly 25 | dependent on the input force, but is also tied to the acceleration of 26 | the mass, which in turn depends on the sum of all forces acting upon 27 | it (the sum of the input force and the force due to the spring). The 28 | net force depends on the position of the mass, which depends on the 29 | length of the spring, which depends on the spring force. The property 30 | that acceleration of the mass depends on its position makes this a 31 | dynamical system. 32 | 33 | #+ATTR_HTML: :height 200px :align center 34 | #+CAPTION: A spring with a mass attached 35 | [[file:figs/spring-mass.png]] 36 | 37 | Dynamical systems can be characterized by differential equations that 38 | relate the state derivatives (e.g. velocity or acceleration) to the 39 | state variables (e.g. position). The differential equation for the 40 | spring-mass system depicted above is: 41 | 42 | \begin{equation} 43 | m\ddot{x} = -kx + mg 44 | \end{equation} 45 | 46 | Where $x$ is the position of the mass $m$ (the length of the spring), 47 | $\ddot{x}$ is the second derivative of position (i.e. acceleration), 48 | $k$ is a constant (related to the stiffness of the spring), and $g$ is 49 | the graviational constant. 50 | 51 | The system is said to be a /second order/ system, as the highest 52 | derivative that appears in the differential equation describing the 53 | system, is two. The position $x$ and its time derivative $\dot{x}$ are 54 | called /states/ of the system, and $\dot{x}$ and $\ddot{x}$ are called 55 | /state derivatives/. 56 | 57 | Most systems out there in nature are dynamical systems. For example 58 | most chemical reactions under natural circumstances are dynamical: the 59 | rate of change of a chemical reaction depends on the amount of 60 | chemical present, in other words the state derivative is proportional 61 | to the state. Dynamical systems exist in biology as well. For example 62 | the rate of change of a certain species depends on its population 63 | size. 64 | 65 | Dynamical equations are often described by a set of /coupled 66 | differential equations/. For example, the reproduction rate of 67 | rabbits (state derivative 1) depends on the population of rabbits 68 | (state 1) and on the population size of foxes (state 2). The 69 | reproduction rate of foxes (state derivative 2) depends on the 70 | population of foxes (state 2) and also on the population of rabbits 71 | (state 1). In this case we have two coupled first-order differential 72 | equations, and hence a system of order two. The so-called 73 | predator-prey model is also known as the [[http://en.wikipedia.org/wiki/Lotka_Volterra_equation][Lotka-Volterra equations]]. 74 | 75 | \begin{eqnarray} 76 | \dot{x} &= x(\alpha - \beta y)\\ 77 | \dot{y} &= -y(\gamma - \delta x) 78 | \end{eqnarray} 79 | 80 | * Why make models? 81 | 82 | There are two main reasons: one being practical and one mostly 83 | theoretical. The practical use is prediction. A typical example of a 84 | dynamical system that is modelled for prediction is the weather. The 85 | weather is a very complex, (high-order, nonlinear, coupled and 86 | chaotic) system. More theoretically, one reason to make models is to 87 | test the validity of a functional hypothesis of an observed 88 | phenomenon. A beautiful example is the model made by [[http://en.wikipedia.org/wiki/Hodgkin-Huxley_model][Hodgkin and 89 | Huxley]] to understand how action potentials arise and propagate in 90 | neurons \cite{HH1952,HH1990}. They modelled the different 91 | (voltage-gated) ion channels in an axon membrane and showed using 92 | mathematical models that indeed the changes in ion concentrations were 93 | responsible for the electical spikes observed experimentally 7 years 94 | earlierr. 95 | 96 | #+CAPTION: Hodgkin-Huxley model of voltage-gated ion channels 97 | #+ATTR_HTML: :height 200px 98 | [[file:figs/HH1.png]] 99 | 100 | #+CAPTION: Action potentials across the membrane 101 | #+ATTR_HTML: :height 200px 102 | [[file:figs/HH2.png]] 103 | 104 | A second theoretical reason to make models is that it is sometimes 105 | very difficult, if not impossible, to answer a certain question 106 | empirically. As an example we take the following biomechanical 107 | question: Would you be able to jump higher if your biceps femoris 108 | (part of your hamstrings) were two separate muscles each crossing only 109 | one joint rather than being one muscle crossing both the hip and knee 110 | joint? Not a strange question as one could then independently control 111 | the torques around each joint. 112 | 113 | In order to answer this question empirically, one would like to do the 114 | following experiment: 115 | 116 | - measure the maximal jump height of a subject 117 | - change only the musculoskeletal properties in question 118 | - measure the jump height again 119 | 120 | Of course, such an experiment would yield several major ethical, 121 | practical and theoretical drawbacks. It is unlikely that an ethics 122 | committee would approve the transplantation of the origin and 123 | insertion of the hamstrings in order to examine its effect on jump 124 | height. And even so, one would have some difficulties finding 125 | subjects. Even with a volunteer for such a surgery it would not bring 126 | us any closer to an answer. After such a surgery, the subject would 127 | not be able to jump right away, but has to undergo severe revalidation 128 | and surely during such a period many factors will undesirably change 129 | like maximal contractile forces. And even if the subject would fully 130 | recover (apart from the hamstrings transplantation), his or her 131 | nervous system would have to find the new optimal muscle stimulation 132 | pattern. 133 | 134 | If one person jumps lower than another person, is that because she 135 | cannot jump as high with her particular muscles, or was it just that 136 | her CNS was not able to find the optimal muscle activation pattern? 137 | Ultimately, one wants to know through what mechanism the subject's 138 | jump performance changes. To investigate this, one would need to know, 139 | for example, the forces produced by the hamstrings as a function of 140 | time, something that is impossible to obtain experimentally. Of 141 | course, this example is somewhat ridiculous, but its message is 142 | hopefully clear that for several questions a strict empirical approach 143 | is not suitable. An alternative is provided by mathematical modelling. 144 | 145 | * Next steps 146 | 147 | In the next topic, we will be examining three systems --- a 148 | mass-spring system, a system representing weather patterns, and a 149 | system characterizing predator-prey interactions. In each case we will 150 | see how to go from differential equations characterizing the dynamics 151 | of the system, to Python code, and run that code to simulate the 152 | behaviour of the system over time. We will see the great power of 153 | simulation, namely the ability to change aspects of the system at 154 | will, and simulate to explore the resulting change in system 155 | behaviour. 156 | 157 | [ [[file:2_Modelling_Dynamical_Systems.html][next]] ] 158 | 159 | ----- 160 | -------------------------------------------------------------------------------- /org/2_Modelling_Dynamical_Systems.org: -------------------------------------------------------------------------------- 1 | #+STARTUP: showall 2 | 3 | #+TITLE: 2. Modelling Dynamical Systems 4 | #+AUTHOR: Paul Gribble & Dinant Kistemaker 5 | #+EMAIL: paul@gribblelab.org 6 | #+DATE: fall 2012 7 | #+HTML_LINK_UP: http://www.gribblelab.org/compneuro/1_Dynamical_Systems.html 8 | #+HTML_LINK_HOME: http://www.gribblelab.org/compneuro/index.html 9 | 10 | ----- 11 | 12 | * Characterizing a System Using Differential Equations 13 | 14 | A dynamical system such as the mass-spring system we saw before, can 15 | be characterized by the relationship between state variables $s$ and 16 | their (time) derivatives $\dot{s}$. How do we arrive at the correct 17 | characterization of this relationship? The short answer is, we figure 18 | it out using our knowledge of physics, or we are simply given the 19 | equations by someone else. Let's look at a simple mass-spring system 20 | again. 21 | 22 | #+ATTR_HTML: :height 200px :align center 23 | #+CAPTION: A spring with a mass attached 24 | [[file:figs/spring-mass.png]] 25 | 26 | We know a couple of things about this system. We know from [[http://en.wikipedia.org/wiki/Hooke's_law][Hooke's law]] 27 | of elasticity that the extension of a spring is directly and linearly 28 | proportional to the load applied to it. More precisely, the force that 29 | a spring applies in response to a perturbation from it's /resting 30 | length/ (the length at which it doesn't generate any force), is 31 | linearly proportional, through a constant $k$, to the difference in 32 | length between its current length and its resting length (let's call 33 | this distance $x$). For convention let's assume positive values of $x$ 34 | correspond to lengthening the spring beyond its resting length, and 35 | negative values of $x$ correspond to shortening the spring from its 36 | resting length. 37 | 38 | \begin{equation} 39 | F = -kx 40 | \end{equation} 41 | 42 | Let's decide that the /state variable/ that we are interested in for 43 | our system is $x$. We will refer to $x$ instead of $s$ from now on to 44 | denote our state variable. 45 | 46 | We also know from [[http://en.wikipedia.org/wiki/Newton's_laws_of_motion][Newton's laws of motion]] (specifically [[http://en.wikipedia.org/wiki/Newton's_laws_of_motion#Newton.27s_second_law][Newton's 47 | second law]]) that the net force on an object is equal to its mass $m$ 48 | multiplied by its acceleration $a$ (the second derivative of 49 | position). 50 | 51 | \begin{equation} 52 | F = ma 53 | \end{equation} 54 | 55 | Instead of using $a$ to denote acceleration let's use a different 56 | notation, in terms of the spring's perturbed length $x$. The rate of 57 | change (velocity) is denoted $\dot{x}$ and the rate of change of the 58 | velocity (i.e. the acceleration) is denoted $\ddot{x}$. 59 | 60 | \begin{equation} 61 | F = m \ddot{x} 62 | \end{equation} 63 | 64 | We also know that the mass is affected by two forces: the force due to 65 | the spring ($-kx$) and also the gravitational force $g$. So the 66 | equation characterizing the /net forces/ on the mass is 67 | 68 | \begin{equation} 69 | \sum{F} = m\ddot{x} = -kx + mg 70 | \end{equation} 71 | 72 | or just 73 | 74 | \begin{equation} 75 | m\ddot{x} = -kx + mg 76 | \end{equation} 77 | 78 | This equation is a /second-order/ differential equation, because the 79 | highest state derivative is a /second derivative/ (i.e. $\ddot{x}$, 80 | the second derivative, i.e. the acceleration, of $x$). The equation 81 | specifies the relationship between the state variables (in this case a 82 | single state variable $x$) and its derivatives (in this case a single 83 | derivative, $\ddot{x}$). 84 | 85 | The reason we want an equation like this, from a practical point of 86 | view, is that we will be using numerical solvers in Python/Scipy to 87 | /integrate/ this differential equation over time, so that we can 88 | /simulate/ the behaviour of the system. What these solvers need is a 89 | Python function that returns state derivatives, given current 90 | states. We can re-arrange the equation above so that it specifies how 91 | to compute the state derivative $\ddot{x}$ given the current state 92 | $\ddot{x}$. 93 | 94 | \begin{equation} 95 | \ddot{x} = \frac{-kx}{m} + g 96 | \end{equation} 97 | 98 | Now we have what we need in order to simulate this system in 99 | Python/Scipy. At any time point, we can compute the acceleration of 100 | the mass by the formula above. 101 | 102 | * Integrating Differential Equations in Python/SciPy 103 | 104 | Here is a Python function that we will be using to simulate the 105 | mass-spring system. All it does, really, is compute the equation 106 | above: what is the value of $\ddot{x}$, given $x$? The one addition we 107 | have is that we are going to keep track not just of one state variable 108 | $x$ but also its first derivative $\dot{x}$ (the rate of change of 109 | $x$, i.e. velocity). 110 | 111 | #+BEGIN_SRC python 112 | def MassSpring(state,t): 113 | # unpack the state vector 114 | x = state[0] 115 | xd = state[1] 116 | 117 | # these are our constants 118 | k = 2.5 # Newtons per metre 119 | m = 1.5 # Kilograms 120 | g = 9.8 # metres per second 121 | 122 | # compute acceleration xdd 123 | xdd = ((-k*x)/m) + g 124 | 125 | # return the two state derivatives 126 | return [xd, xdd] 127 | #+END_SRC 128 | 129 | Note that the function we wrote takes two arguments as inputs: =state= 130 | and =t=, which corresponds to time. This is necessary for the 131 | numerical solver that we will use in Python/Scipy. The =state= 132 | variable is actually an /array/ of two values corresponding to $x$ and 133 | $\dot{x}$. 134 | 135 | How does numerical integration (simulation) work? Here is a summary of the steps that a numerical solver takes. First, you have to provide the system with two things: 136 | 137 | 1. initial conditions (what are the initial states of the system?) 138 | 2. a time vector over which to simulate 139 | 140 | Given this, the numerical solver will go through the following steps to simulate the system: 141 | 142 | - calculate state derivatives $\ddot{x}$ at the initial time ($t=0$) 143 | given the initial states $(x,\dot{x})$ 144 | - estimate $x(t+ \Delta t)$ using $x(t=0)$, $\dot{x}(t=0)$ and 145 | $\ddot{x}(t=0)$ 146 | - calculate $\ddot{x}(t=t + \Delta t)$ from $x(t=t + \Delta t)$ and 147 | $\dot{x}(t=t + \Delta t)$ 148 | - estimate $x(t + 2 \Delta t)$ and $\dot{x}(t + 2 \Delta t)$ using 149 | $x(t=t + \Delta t)$, $\dot{x}(t=t + \Delta t)$ and $\ddot{x}(t=t + 150 | \Delta t)$ 151 | - calculate $\ddot{x}(t=t + 2\Delta t)$ from $x(t=t + 2\Delta t)$ and 152 | $\dot{x}(t=t + 2\Delta t)$ 153 | - ... etc 154 | 155 | In this way the numerical solver can esimate how the system states 156 | $(x,\dot{x})$ unfold over time, given the initial conditions, and the 157 | known relationship between state derivatives and system states. The 158 | details of the "estimate" steps above are not something we are going 159 | to dive into now. Suffice it to say that current estimation algorithms 160 | are based on the work of two German mathematicians named [[http://en.wikipedia.org/wiki/Runge–Kutta_methods][Runge and 161 | Kutta]] in the beginning of the 20th century. These numerical recipies 162 | are readily available in Scipy ([[http://docs.scipy.org/doc/scipy/reference/integrate.html][docs here]] (and in MATLAB, and other 163 | numerical software) and are known as ODE solvers (ODE stands for 164 | /ordinary differential equation/). 165 | 166 | Here's how we would simulate the mass-spring system above. Launch 167 | iPython with the =--pylab= argument (this automatically imports a 168 | bunch of libraries that we will use, including plotting libraries). 169 | 170 | #+BEGIN_SRC python 171 | from scipy.integrate import odeint 172 | 173 | def MassSpring(state,t): 174 | # unpack the state vector 175 | x = state[0] 176 | xd = state[1] 177 | 178 | # these are our constants 179 | k = -2.5 # Newtons per metre 180 | m = 1.5 # Kilograms 181 | g = 9.8 # metres per second 182 | 183 | # compute acceleration xdd 184 | xdd = ((k*x)/m) + g 185 | 186 | # return the two state derivatives 187 | return [xd, xdd] 188 | 189 | state0 = [0.0, 0.0] 190 | t = arange(0.0, 10.0, 0.1) 191 | 192 | state = odeint(MassSpring, state0, t) 193 | 194 | plot(t, state) 195 | xlabel('TIME (sec)') 196 | ylabel('STATES') 197 | title('Mass-Spring System') 198 | legend(('$x$ (m)', '$\dot{x}$ (m/sec)')) 199 | #+END_SRC 200 | 201 | [[file:code/mass_spring.py][mass\_spring.py]] 202 | 203 | A couple of notes about the code. I have simply chosen, out of the 204 | blue, values for the constants $k$ and $m$. The [[http://en.wikipedia.org/wiki/Gravitational_constant][gravitational constant]] 205 | $g$ is of course known. I have also chosen to simulate the system for 206 | 10 seconds, and I have chosen a time /resolution/ of 100 milliseconds 207 | (0.1 seconds). We will talk later about the issue of what is an 208 | appropriate time resolution for simulation. 209 | 210 | You should see a plot like this: 211 | 212 | #+ATTR_HTML: :height 400px :align center 213 | #+CAPTION: Mass-Spring Simulation 214 | [[file:figs/mass-spring-sim.png]] 215 | 216 | The blue line shows the position $x$ of the mass (the length of the 217 | spring) over time, and the green line shows the rate of change of $x$, 218 | in other words the velocity $\dot{x}$, over time. These are the two 219 | states of the system, simulated over time. 220 | 221 | The way to interpret this simulation is, if we start the system at 222 | $x=0$ and $\dot{x}=0$, and simulate for 10 seconds, this is how the 223 | system would behave. 224 | 225 | ** The power of modelling and simulation 226 | 227 | Now you can appreciate the power of mathematical models and 228 | simulation: given a model that characterizes (to some degree of 229 | accuracy) the behaviour of a system we are interested in, we can use 230 | simulation to perform experiments /in simulation/ instead of in 231 | reality. This can be very powerful. We can ask questions of the model, 232 | in simulation, that may be too difficult, or expensive, or time 233 | consuming, or just plain impossible, to do in real-life empirical 234 | studies. The degree to which we regard the results of simulations as 235 | interpretable, is a direct reflection of the degree to which we 236 | believe that our mathematical model is a reasonable characterization 237 | of the behaviour of the real system. 238 | 239 | ** Exercises 240 | 241 | 1. We have started the system at $x=0$ which means that the spring is 242 | not stretched beyond its resting length (so spring force due to 243 | stretch should equal zero), and $\dot{x}=0$, which means the 244 | spring's velocity is zero, i.e. it is not moving. Why does the 245 | simulation predict that the spring will begin stretching, then 246 | bouncing back and forth? 247 | 248 | 2. What is the influence of the sign and magnitude of the stiffness 249 | parameter $k$? 250 | 251 | 3. In physics, [[http://en.wikipedia.org/wiki/Damping][damping]] can be used to reduce the magnitude of 252 | oscillations. Damping generates a force that is directly 253 | proportional to velocity ($F = -b\dot{x}$). Add damping to the 254 | mass-spring system and re-run the simulation. Specify the value of 255 | the damping constant $b=-2.0$. What happens? 256 | 257 | 4. What is the influence of the sign and magnitude of the damping 258 | coefficient $b$? 259 | 260 | 5. Double the mass, and re-run the simulaton. What happens? 261 | 262 | 6. How would you add an input force to the system? 263 | 264 | 265 | * Lorenz Attractor 266 | 267 | The [[http://en.wikipedia.org/wiki/Lorenz_system][Lorenz system]] is a dynamical system that we will look at briefly, 268 | as it will allow us to discuss several interesting issues around 269 | dynamical systems. It is a system often used to illustrate [[http://en.wikipedia.org/wiki/Nonlinear_system][non-linear 270 | systems]] theory and [[http://en.wikipedia.org/wiki/Chaos_theory][chaos theory]]. It's sometimes used as a simple 271 | demonstration of the [[http://en.wikipedia.org/wiki/Butterfly_effect][butterfly effect]] (sensitivity to initial 272 | conditions). 273 | 274 | The Lorenz system is a simplified mathematical model for atmospheric 275 | convection. Let's not worry about the details of what it represents, 276 | for now the important things to note are that it is a system of three 277 | /coupled/ differential equations, and characterizes a system with 278 | three state variables $(x,y,z$). 279 | 280 | \begin{eqnarray} 281 | \dot{x} &= &\sigma(y-x)\\ 282 | \dot{y} &= &(\rho-z)x - y\\ 283 | \dot{z} &= &xy-\beta z 284 | \end{eqnarray} 285 | 286 | If you set the three constants $(\sigma,\rho,\beta)$ to specific 287 | values, the system exhibits /chaotic behaviour/. 288 | 289 | \begin{eqnarray} 290 | \sigma &= &10\\ 291 | \rho &= &28\\ 292 | \beta &= &\frac{8}{3} 293 | \end{eqnarray} 294 | 295 | Let's implement this system in Python/Scipy. We have been given above 296 | the three equations that characterize how the state derivatives 297 | $(\dot{x},\dot{y},\dot{z})$ depend on $(x,y,z)$ and the constants 298 | $(\sigma,\rho,\beta)$. All we have to do is write a function that 299 | implements this, set some initial conditions, decide on a time array 300 | to simulate over, and run the simulation using =odeint()=. 301 | 302 | #+BEGIN_SRC python 303 | from scipy.integrate import odeint 304 | 305 | def Lorenz(state,t): 306 | # unpack the state vector 307 | x = state[0] 308 | y = state[1] 309 | z = state[2] 310 | 311 | # these are our constants 312 | sigma = 10.0 313 | rho = 28.0 314 | beta = 8.0/3.0 315 | 316 | # compute state derivatives 317 | xd = sigma * (y-x) 318 | yd = (rho-z)*x - y 319 | zd = x*y - beta*z 320 | 321 | # return the state derivatives 322 | return [xd, yd, zd] 323 | 324 | state0 = [2.0, 3.0, 4.0] 325 | t = arange(0.0, 30.0, 0.01) 326 | 327 | state = odeint(Lorenz, state0, t) 328 | 329 | # do some fancy 3D plotting 330 | from mpl_toolkits.mplot3d import Axes3D 331 | fig = figure() 332 | ax = fig.gca(projection='3d') 333 | ax.plot(state[:,0],state[:,1],state[:,2]) 334 | ax.set_xlabel('x') 335 | ax.set_ylabel('y') 336 | ax.set_zlabel('z') 337 | show() 338 | #+END_SRC 339 | 340 | [[file:code/lorenz1.py][lorenz1.py]] 341 | 342 | You should see something like this: 343 | 344 | #+ATTR_HTML: :height 400px :align center 345 | #+CAPTION: Lorenz Attractor 346 | [[file:figs/lorenz1.png]] 347 | 348 | The three axes on the plot represent the three states $(x,y,z)$ 349 | plotted over the 30 seconds of simulated time. We started the system 350 | with three particular values of $(x,y,z)$ (I chose them arbitrarily), 351 | and we set the simulation in motion. This is the trajectory, in 352 | /state-space/, of the Lorenz system. 353 | 354 | You can see an interesting thing... the system seems to have two 355 | stable equilibrium states, or attractors: those circular paths. The 356 | system circles around in one "neighborhood" in state-space, and then 357 | flips over and circles around the second neighborhood. The number of 358 | times it circles in a given neighborhood, and the time at which it 359 | switches, displays chaotic behaviour, in the sense that they are 360 | exquisitly sensitive to initial conditions. 361 | 362 | For example let's re-run the simulation but change the initial 363 | conditions. Let's change them by a very small amount, say 364 | 0.0001... and let's only change the $x$ initial state by that very 365 | small amount. We will simulate for 30 seconds. 366 | 367 | #+BEGIN_SRC python 368 | t = arange(0.0, 30, 0.01) 369 | 370 | # original initial conditions 371 | state1_0 = [2.0, 3.0, 4.0] 372 | state1 = odeint(Lorenz, state1_0, t) 373 | 374 | # rerun with very small change in initial conditions 375 | delta = 0.0001 376 | state2_0 = [2.0+delta, 3.0, 4.0] 377 | state2 = odeint(Lorenz, state2_0, t) 378 | 379 | # animation 380 | figure() 381 | pb, = plot(state1[:,0],state1[:,1],'b-',alpha=0.2) 382 | xlabel('x') 383 | ylabel('y') 384 | p, = plot(state1[0:10,0],state1[0:10,1],'b-') 385 | pp, = plot(state1[10,0],state1[10,1],'b.',markersize=10) 386 | p2, = plot(state2[0:10,0],state2[0:10,1],'r-') 387 | pp2, = plot(state2[10,0],state2[10,1],'r.',markersize=10) 388 | tt = title("%4.2f sec" % 0.00) 389 | # animate 390 | step = 3 391 | for i in xrange(1,shape(state1)[0]-10,step): 392 | p.set_xdata(state1[10+i:20+i,0]) 393 | p.set_ydata(state1[10+i:20+i,1]) 394 | pp.set_xdata(state1[19+i,0]) 395 | pp.set_ydata(state1[19+i,1]) 396 | p2.set_xdata(state2[10+i:20+i,0]) 397 | p2.set_ydata(state2[10+i:20+i,1]) 398 | pp2.set_xdata(state2[19+i,0]) 399 | pp2.set_ydata(state2[19+i,1]) 400 | tt.set_text("%4.2f sec" % (i*0.01)) 401 | draw() 402 | 403 | i = 1939 # the two simulations really diverge here! 404 | s1 = state1[i,:] 405 | s2 = state2[i,:] 406 | d12 = norm(s1-s2) # distance 407 | print ("distance = %f for a %f different in initial condition") % (d12, delta) 408 | #+END_SRC 409 | 410 | [[file:code/lorenz2.py][lorenz2.py]] 411 | 412 | #+BEGIN_EXAMPLE 413 | distance = 32.757253 for a 0.000100 different in initial condition 414 | #+END_EXAMPLE 415 | 416 | You should see an animation of the two state-space trajectories. For 417 | convenience we are only plotting $x$ vs $y$ and ignoring $z$. It turns 418 | out that 3D animations are not trivial in matplotlib (there is a 419 | library called mayavi that is excellent for 3D stuff). 420 | 421 | The original simulation is shown in blue and the new one (in which the 422 | initial condition of $x$ was increased by 0.0001) in red. The two 423 | follow each other quite closely for a long time, and then begin to 424 | diverge at about the 16 second mark. At the end of the animation it 425 | looks like this: 426 | 427 | #+ATTR_HTML: :height 400px :align center 428 | #+CAPTION: Lorenz Attractor 429 | [[file:figs/lorenz2.png]] 430 | 431 | At 19.39 seconds it looks like this: 432 | 433 | #+ATTR_HTML: :height 400px :align center 434 | #+CAPTION: Lorenz Attractor 435 | [[file:figs/lorenz3.png]] 436 | 437 | Note how the two systems are in different "neighborhoods" entirely! 438 | 439 | At the end of the code above we compute the distance between the two 440 | systems (the 3D distance between their respective $(x,y,z)$ positions 441 | in state-space), and the distance is a whopping 32.76 units, for a 442 | 0.0001 difference in initial conditions. 443 | 444 | This illustrates how systems with relatively simple differential 445 | equations characterizing their behaviour, can turn out to be 446 | exquisitely sensitive to initial conditions. Just imagine if the 447 | initial conditions of your simulation were gathered from empirical 448 | observations (like the weather, for example). Now imagine you use a 449 | model simulation to predict whether it will be sunny (left-hand 450 | "neighborhood" of the plot above) or thunderstorms (right-hand 451 | "neighborhood"), 30 days from now. If the answer can flip between one 452 | prediction and the other, based on a 1/10,000 different in 453 | measurement, you had better be sure of your empirical measurement 454 | instruments, when you make a prediction 30 days out! Actually this 455 | won't even solve the problem, no matter how precise your 456 | measurements. The point is that the system as a whole is very 457 | sensitive to even tiny changes in initial conditions. This is why 458 | short-term weather forecasts are relatively accurate, but forecasts 459 | past a couple of days can turn out to be dead wrong. 460 | 461 | 462 | * Predator-Prey model 463 | 464 | The [[http://en.wikipedia.org/wiki/Lotka%E2%80%93Volterra_equation][Lotka-Volterra equations]] are two coupled first-order nonlinear 465 | differential equations that are used to characterize the dynamics of 466 | biological systems in which a predator population and a prey popuation 467 | interact. The two populations develop over time according to these equations: 468 | 469 | \begin{eqnarray} 470 | \dot{x} &= &x(\alpha-\beta y)\\ 471 | \dot{y} &= &-y(\gamma - \sigma x) 472 | \end{eqnarray} 473 | 474 | where $x$ is the number of prey (for example, rabbits), $y$ is the 475 | number of predators (e.g. foxes), and $\dot{x}$ and $\dot{y}$ 476 | represent the growth rates (the rates of change over time) of the two 477 | populations. The values $(\alpha,\beta,\gamma,\sigma)$ are parameters 478 | (constants) that characterize different aspects of the two 479 | populations. 480 | 481 | Assumptions of this simple form of the model are: 482 | 483 | 1. prey find ample food at all times 484 | 2. food supply of predators depends entirely on prey population 485 | 3. rate of change of population is proportional to its size 486 | 4. the environment does not change 487 | 488 | The parameters can be interepreted as: 489 | 490 | - $\alpha$ is the natural growth rate of prey in the absence of predation 491 | - $\beta$ is the death rate per encounter of prey due to predation 492 | - $\sigma$ is related to the growth rate of predators 493 | - $\gamma$ is the natural death rate of predators in the absence of food (prey) 494 | 495 | Here is some example code showing how to simulate this system. Just as 496 | before, we need to complete a few steps: 497 | 498 | 1. write a Python function that characterizes how the system's state 499 | derivatives are related to the system's states (this is given by 500 | the equations above) 501 | 2. decide on values of the system parameters 502 | 3. decide on values of the initial conditions of the system (the 503 | initial values of the states) 504 | 4. decide on a time span and time resolution for simulating the system 505 | 5. simulate! (i.e. use an ODE solver to integrate the differential 506 | equations over time) 507 | 6. examine the states, typically by plotting them 508 | 509 | Here is some code: 510 | 511 | #+BEGIN_SRC python 512 | from scipy.integrate import odeint 513 | 514 | def LotkaVolterra(state,t): 515 | x = state[0] 516 | y = state[1] 517 | alpha = 0.1 518 | beta = 0.1 519 | sigma = 0.1 520 | gamma = 0.1 521 | xd = x*(alpha - beta*y) 522 | yd = -y*(gamma - sigma*x) 523 | return [xd,yd] 524 | 525 | t = arange(0,500,1) 526 | state0 = [0.5,0.5] 527 | state = odeint(LotkaVolterra,state0,t) 528 | figure() 529 | plot(t,state) 530 | ylim([0,8]) 531 | xlabel('Time') 532 | ylabel('Population Size') 533 | legend(('x (prey)','y (predator)')) 534 | title('Lotka-Volterra equations') 535 | #+END_SRC 536 | 537 | You should see a plot like this: 538 | 539 | #+ATTR_HTML: :height 400px :align center 540 | #+CAPTION: Lotka-Volterra Simulation 541 | [[file:figs/lotkavolterra1.png]] 542 | 543 | We can also plot the trajectory of the system in /state-space/ (much 544 | like we did for the Lorenz system above): 545 | 546 | #+BEGIN_SRC python 547 | # animation in state-space 548 | figure() 549 | pb, = plot(state[:,0],state[:,1],'b-',alpha=0.2) 550 | xlabel('x (prey population size)') 551 | ylabel('y (predator population size)') 552 | p, = plot(state[0:10,0],state[0:10,1],'b-') 553 | pp, = plot(state[10,0],state[10,1],'b.',markersize=10) 554 | tt = title("%4.2f sec" % 0.00) 555 | 556 | # animate 557 | step=2 558 | for i in xrange(1,shape(state)[0]-10,step): 559 | p.set_xdata(state[10+i:20+i,0]) 560 | p.set_ydata(state[10+i:20+i,1]) 561 | pp.set_xdata(state[19+i,0]) 562 | pp.set_ydata(state[19+i,1]) 563 | tt.set_text("%d steps" % (i)) 564 | draw() 565 | #+END_SRC 566 | 567 | [[file:code/lotkavolterra.py][lotkavolterra.py]] 568 | 569 | You should see a plot like this: 570 | 571 | #+ATTR_HTML: :height 400px :align center 572 | #+CAPTION: Lotka-Volterra State-space plot 573 | [[file:figs/lotkavolterra2.png]] 574 | 575 | ** Exercises 576 | 577 | 1. Increase the $\alpha$ parameter and re-run the simulation. What 578 | happens and why? 579 | 2. Set all parameters to 0.2. What happens and why? 580 | 3. Try the following: $(\alpha,\beta,\gamma,\sigma)$ = 581 | (0.20, 0.20, 0.02, 0.0). What happens and why? 582 | 583 | * Next steps 584 | 585 | We have seen how to take a set of differential equations that 586 | characterize the dynamics of a system, and implement them in Python, 587 | and run a simulation of the behaviour of that system over time. In the 588 | next topic, we will be applying this to models of single neurons, and 589 | simulating the dynamics of voltage-gated ion channels, and examining 590 | how these models predict spiking behaviour. 591 | 592 | [ [[file:3_Modelling_Action_Potentials.html][next]] ] 593 | -------------------------------------------------------------------------------- /org/3_Modelling_Action_Potentials.org: -------------------------------------------------------------------------------- 1 | #+STARTUP: showall 2 | 3 | #+TITLE: 3. Modelling Action Potentials 4 | #+AUTHOR: Paul Gribble & Dinant Kistemaker 5 | #+EMAIL: paul@gribblelab.org 6 | #+DATE: fall 2012 7 | #+HTML_LINK_UP: http://www.gribblelab.org/compneuro/2_Modelling_Dynamical_Systems.html 8 | #+HTML_LINK_HOME: http://www.gribblelab.org/compneuro/index.html 9 | #+BIBLIOGRAPHY: refs plain option:-d limit:t 10 | 11 | ----- 12 | 13 | * Introduction 14 | 15 | In this section we will use a model of voltage-gated ion channels in a 16 | single neuron to simulate action potentials. The model is based on the 17 | work by Hodgkin & Huxley in the 1940s and 1950s 18 | \cite{HH1952,HH1990}. A good reference to refresh your memory about 19 | how ion channels in a neuron work is the Kandel, Schwartz & Jessel 20 | book "Principles of Neural Science" \cite{kandel2000principles}. 21 | 22 | To model the action potential we will use an article by Ekeberg et 23 | al. (1991) published in Biological Cybernetics 24 | \cite{ekeberg1991}. When reading the article you can focus on the 25 | first three pages (up to paragraph 2.3) and try to find answers to the 26 | following questions: 27 | 28 | - How many differential equations are there? 29 | - What is the order of the system described in equations 1-9? 30 | - What are the states and state derivatives of the system? 31 | 32 | * Simulating the Hodgkin & Huxley model 33 | 34 | Before we begin coding up the model, it may be useful to remind you of 35 | a fundamental law of electricity, one that relates electrical 36 | potential $V$ to electric current $I$ and resistance $R$ (or 37 | conductance $G$, the reciprocal of resistance). This of course is 38 | known as [[http://en.wikipedia.org/wiki/Ohm's_law][Ohm's law]]: 39 | 40 | \begin{equation} 41 | V = IR 42 | \end{equation} 43 | 44 | or 45 | 46 | \begin{equation} 47 | V = \frac{I}{G} 48 | \end{equation} 49 | 50 | Our goal here is to code up a dynamical model of the membrane's 51 | electric circuit including two types of ion channels: sodium and 52 | potassium channels. We will use this model to better understand the 53 | process underlying the origin of an action potential. 54 | 55 | ** The neuron model 56 | 57 | #+ATTR_HTML: :width 400px :align center 58 | #+CAPTION: Schematic of Ekeberg et al. 1991 neuron model 59 | [[file:figs/ekeberg_fig1.png]] 60 | 61 | The figure above, adapted from Ekeberg et al., 1991 62 | \cite{ekeberg1991}, schematically illustrates the model of a 63 | neuron. In panel A we see a soma, and multiple dendrites. Each of 64 | these can be modelled by an electrical "compartment" (Panel B) and the 65 | passive interactions between them can be modelled as a pretty standard 66 | electrical circuit (see [[http://en.wikipedia.org/wiki/Biological_neuron_model][Biological Neuron Model]] for more details about 67 | compartmental models of neurons). In panel C, we see an expanded model 68 | of the Soma from panel A. Here, a number of active ion channels are 69 | included in the model of the soma. 70 | 71 | For our purposes here, we will focus on the soma, and we will not 72 | include any additional dendrites in our implementation of the 73 | model. Thus essentially we will be modelling what appears in panel C, 74 | and at that, only a subset. 75 | 76 | In panel C we see that the soma can be modelled as an electrical 77 | circuit with a sodium ion channel (Na), a potassium ion channel (K), a 78 | calcium ion channel (Ca), and a calcium-dependent potassium channel 79 | (K(Ca)). What we will be concerned with simulating, ultimately, is the 80 | intracellular potential E. 81 | 82 | *** Passive Properties 83 | 84 | Equation (1) of Ekeberg is a differential equation describing the 85 | relation between the time derivative of the membrane potential $E$ as 86 | a function of the passive leak current through the membrane, and the 87 | current through the ion channels. Note that Ekeberg uses $E$ instead 88 | of the typical $V$ symbol to represent electrical potential. 89 | 90 | \begin{equation} 91 | \frac{dE}{dt} = \frac{(E_{leak}-E)G_{m} + \sum{\left(E_{comp}-E\right)}G_{core} + I_{channels}}{C_{m}} 92 | \end{equation} 93 | 94 | Don't panic, it's not actually that complicated. What this equation is 95 | saying is that the rate of change of electrical potential across the 96 | current (the left hand side of the equation, $\frac{dE}{dt}$) is equal 97 | to the sum of a bunch of other terms, divided by membrane capacitance 98 | $C_{m}$ (the right hand side of the equation). Recall from basic 99 | physics that [[http://en.wikipedia.org/wiki/Capacitance][capacitance]] is a measure of the ability of something to 100 | store an electrical charge. 101 | 102 | The "bunch of other things" is a sum of three things, actually, (from 103 | left to right): a passive leakage current, plus a term characterizing 104 | the electrical coupling of different compartments, plus the currents 105 | of the various ion channels. Since we are not going to be modelling 106 | dendrites here, we can ignore the middle term on the right hand side 107 | of the equation $\sum{\left(E_{comp}-E\right)}G_{core}$ which 108 | represents the sum of currents from adjacent compartments (we have 109 | none). 110 | 111 | We are also going to include in our model an external current 112 | $I_{ext}$. This can essentially represent the sum of currents coming 113 | in from the dendrites (which we are not explicitly modelling). It can 114 | also represent external current injected in a [[http://en.wikipedia.org/wiki/Patch_clamp][patch-clamp]] 115 | experiment. This is what we as experimenters can manipulate, for 116 | example, to see how neuron spiking behaviour changes. So what we will 117 | actually be working with is this: 118 | 119 | \begin{equation} 120 | \frac{dE}{dt} = \frac{(E_{leak}-E)G_{m} + I_{channels} + I_{ext}}{C_{m}} 121 | \end{equation} 122 | 123 | What we need to do now is unpack the $I_{channels}$ term representing 124 | the currents from all of the ion channels in the model. Initially we 125 | will only be including two, the potassium channel (K) and the sodium 126 | channel (Na). 127 | 128 | *** Sodium channels (Na) 129 | 130 | The current through sodium channels that enter the soma are 131 | represented by equation (2) in Ekeberg et al. (1991): 132 | 133 | \begin{equation} 134 | I_{Na} = (E_{Na} - E_{soma})G_{Na}m^{3}h 135 | \end{equation} 136 | 137 | where $m$ is the activation of the sodium channel and $h$ is the 138 | inactivation of the sodium channel, and the other terms are constant 139 | parameters: $E_{Na}$ is the reversal potential, $G_{Na}$ is the 140 | maximum sodium conductance throught the membrane, and $E_{soma}$ is 141 | the membrane potential of the soma. 142 | 143 | The activation $m$ of the sodium channels is described by the 144 | differential equation (3) in Ekeberg et al. (1991): 145 | 146 | \begin{equation} 147 | \frac{dm}{dt} = \alpha_{m}(1-m) - \beta_{m}m 148 | \end{equation} 149 | 150 | where $\alpha_{m}$ represents the rate at which the channel switches 151 | from a closed to an open state, and $\beta_{m}$ is rate for the 152 | reverse. These two parameters $\alpha$ and $\beta$ depend on the 153 | membrane potential in the soma. In other words the sodium channel is 154 | voltage-gated. Equation (4) in Ekeberg et al. (1991) gives these 155 | relationships: 156 | 157 | \begin{eqnarray} 158 | \alpha_{m} &= &\frac{A(E_{soma}-B)}{1-e^{(B-E_{soma})/C}}\\ 159 | \beta_{m} &= &\frac{A(B-E_{soma})}{1-e^{(E_{soma}-B)/C}} 160 | \end{eqnarray} 161 | 162 | A tricky bit in the Ekeberg et al. (1991) paper is that the $A$, $B$ 163 | and $C$ parameters above are different for $\alpha$ and $\beta$ even 164 | though there is no difference in the symbols used in the equations. 165 | 166 | The inactivation of the sodium channels is described by a similar set of equations: a differential equation giving the rate of change of the sodium channel deactivation, from Ekeberg et al. (1991) equation (5): 167 | 168 | \begin{equation} 169 | \frac{dh}{dt} = \alpha_{h}(1-h) - \beta_{h}h 170 | \end{equation} 171 | 172 | and equations specifying how $\alpha_{h}$ and $\beta_{h}$ are 173 | voltage-dependent, given in Ekeberg et al. (1991) equation (6): 174 | 175 | \begin{eqnarray} 176 | \alpha_{h} &= &\frac{A(B-E_{soma})}{1-e^{(E_{soma}-B)/C}}\\ 177 | \beta_{h} &= &\frac{A}{1-e^{(B-E_{soma})/C}} 178 | \end{eqnarray} 179 | 180 | Note again that although the terms $A$, $B$ and $C$ are different for 181 | $\alpha_{h}$ and $\beta_{h}$ even though they are represented by the 182 | same symbols in the equations. 183 | 184 | So in summary, for the sodium channels, we have two state variables: 185 | $(m,h)$ representing the activation ($m$) and deactivation ($h$) of 186 | the sodium channels. We have a differential equation for each, 187 | describing how the rate of change (the first derivative) of these 188 | states can be calculated: Ekeberg equations (3) and (5). Those 189 | differential equations involve parameters $(\alpha,\beta)$, one set 190 | for $m$ and a second set for $h$. Those $(\alpha,\beta)$ parameters 191 | are computed from Ekeberg equations (4) (for $m$) and (6) (for 192 | $h$). Those equations involve parameters $(A,B,C)$ that have parameter 193 | values specific to $\alpha$ and $\beta$ and $m$ and $h$ (see Table 1 194 | of Ekeberg et al., 1991). 195 | 196 | *** Potassium channels (K) 197 | 198 | The potassium channels are represted in a similar way, although in 199 | this case there is only channel activation, and no inactivation. In 200 | Ekeberg et al. (1991) the three equations (7), (8) and (9) represent 201 | the potassium channels: 202 | 203 | \begin{equation} 204 | I_{k} = (E_{k}-E_{soma})G_{k}n^{4} 205 | \end{equation} 206 | 207 | \begin{equation} 208 | \frac{dn}{dt} = \alpha_{n}(1-n) - \beta_{n}n 209 | \end{equation} 210 | 211 | where $n$ is the state variable representing the activation of 212 | potassium channels. As before we have expressions for $(\alpha,\beta)$ 213 | which represent the fact that the potassium channel is also 214 | voltage-gated: 215 | 216 | \begin{eqnarray} 217 | \alpha_{n} &= &\frac{A(E_{soma}-B)}{1-e^{(B-E_{soma})/C}}\\ 218 | \beta_{n} &= &\frac{A(B-E_{soma})}{1-e^{(E_{soma}-B)/C}} 219 | \end{eqnarray} 220 | 221 | Again, the parameter values for $(A,B,C)$ can be found in Ekeberg et 222 | al., (1991) Table 1. 223 | 224 | To summarize, the potassium channel has a single state variable $n$ 225 | representing the activation of the potassium channel. 226 | 227 | *** Summary 228 | 229 | We have a model now that includes four state variables: 230 | 231 | 1. $E$ representing the potential in the soma, given by differential equation (1) in Ekeberg et al., (1991) 232 | 2. $m$ representing the activation of sodium channels, Ekeberg equation (3) 233 | 3. $h$ representing the inactivation of sodium channels, Ekeberg equation (5) 234 | 4. $n$ representing the activation of potassium channels, Ekeberg equation (8) 235 | 236 | Each of the differential equations that define how to compute state 237 | derivatives, involve $(\alpha,\beta)$ terms that are given by Ekeberg 238 | equations (4) (for $m$), (6) (for $h$) and (9) (for $n$). 239 | 240 | So what we have to do in order to simulate the dynamic behaviour of 241 | this neuron over time, is simply to implement these equations in 242 | Python code, give the system some reasonable initial conditions, and 243 | simulate it over time using the =odeint()= function. 244 | 245 | ** Python code 246 | 247 | A full code listing of a model including sodium and potassium channels 248 | can be found here: [[file:code/ekeberg1.py][ekeberg1.py]]. Admittedly, this system involves more 249 | equations, and more parameters, than the other simple "toy" systems 250 | that we saw in the previous section. The fundamental ideas are the 251 | same however, so let's step through things bit by bit. 252 | 253 | We begin by setting up all of the necessary model parameters (there 254 | are many). They are found in Ekeberg et al. (1991) Tables 1 and 2. I 255 | have chosen to do this using a Python data type called a 256 | [[http://docs.python.org/tutorial/datastructures.html#dictionaries][dictionary]]. This is a useful data type to parcel all of our parameters 257 | together. Unlike an array or list, which we would have to index using 258 | integer values (and then keep track of which one corresponded to which 259 | parameter), with a dictionary, we can index into it using string 260 | labels. 261 | 262 | #+BEGIN_SRC python 263 | # ipython --pylab 264 | 265 | # import some needed functions 266 | from scipy.integrate import odeint 267 | 268 | # set up a dictionary of parameters 269 | 270 | E_params = { 271 | 'E_leak' : -7.0e-2, 272 | 'G_leak' : 3.0e-09, 273 | 'C_m' : 3.0e-11, 274 | 'I_ext' : 0*1.0e-10 275 | } 276 | 277 | Na_params = { 278 | 'Na_E' : 5.0e-2, 279 | 'Na_G' : 1.0e-6, 280 | 'k_Na_act' : 3.0e+0, 281 | 'A_alpha_m_act' : 2.0e+5, 282 | 'B_alpha_m_act' : -4.0e-2, 283 | 'C_alpha_m_act' : 1.0e-3, 284 | 'A_beta_m_act' : 6.0e+4, 285 | 'B_beta_m_act' : -4.9e-2, 286 | 'C_beta_m_act' : 2.0e-2, 287 | 'l_Na_inact' : 1.0e+0, 288 | 'A_alpha_m_inact' : 8.0e+4, 289 | 'B_alpha_m_inact' : -4.0e-2, 290 | 'C_alpha_m_inact' : 1.0e-3, 291 | 'A_beta_m_inact' : 4.0e+2, 292 | 'B_beta_m_inact' : -3.6e-2, 293 | 'C_beta_m_inact' : 2.0e-3 294 | } 295 | 296 | K_params = { 297 | 'k_E' : -9.0e-2, 298 | 'k_G' : 2.0e-7, 299 | 'k_K' : 4.0e+0, 300 | 'A_alpha_m_act' : 2.0e+4, 301 | 'B_alpha_m_act' : -3.1e-2, 302 | 'C_alpha_m_act' : 8.0e-4, 303 | 'A_beta_m_act' : 5.0e+3, 304 | 'B_beta_m_act' : -2.8e-2, 305 | 'C_beta_m_act' : 4.0e-4 306 | } 307 | 308 | params = { 309 | 'E_params' : E_params, 310 | 'Na_params' : Na_params, 311 | 'K_params' : K_params 312 | } 313 | #+END_SRC 314 | 315 | We could have stored the four values in =E_params= in an array like this: 316 | 317 | #+BEGIN_SRC python 318 | E_params = array([-7.0e-2, 3.0e-09, 6.0e-11, 0*1.0e-10]) 319 | #+END_SRC 320 | 321 | but then we would have to access particular values by indexing into that array with integers like this: 322 | 323 | #+BEGIN_SRC python 324 | E_leak = E_params[0] 325 | G_leak = E_params[1] 326 | #+END_SRC 327 | 328 | Instead, if we use a dictionary, we can index into the structure using alphanumeric strings as index values, like this: 329 | 330 | #+BEGIN_SRC python 331 | E_params['E_leak'] 332 | E_params['G_leak'] 333 | #+END_SRC 334 | 335 | You don't have to use a dictionary to store the parameter values, but 336 | I find it a really useful way to maintain readability. 337 | 338 | Our next bit of code is the implementation of the ODE function 339 | itself. Remember, our ultimate goal is to model $E$, the potential 340 | across the soma membrane. We know from Ekeberg equation (1) that the 341 | rate of change of $E$ depends on leakage current and on the sum of 342 | currents from other channels. These other currents are given by 343 | Ekeberg equations (2) (sodium) and (7) (potassium). These equations 344 | involve the three other states in our system: sodium activation $m$, 345 | sodium inactivation $h$ and potassium activation $n$, which are each 346 | defined by their own differential equations, Ekeberg equations (3), 347 | (5) and (8), respectively. It's just a matter of coding things up step 348 | by step. 349 | 350 | #+BEGIN_SRC python 351 | # define our ODE function 352 | 353 | def neuron(state, t, params): 354 | """ 355 | Purpose: simulate Hodgkin and Huxley model for the action potential using 356 | the equations from Ekeberg et al, Biol Cyb, 1991. 357 | Input: state ([E m h n] (ie [membrane potential; activation of 358 | Na++ channel; inactivation of Na++ channel; activation of K+ 359 | channel]), 360 | t (time), 361 | and the params (parameters of neuron; see Ekeberg et al). 362 | Output: statep (state derivatives). 363 | """ 364 | 365 | E = state[0] 366 | m = state[1] 367 | h = state[2] 368 | n = state[3] 369 | 370 | Epar = params['E_params'] 371 | Na = params['Na_params'] 372 | K = params['K_params'] 373 | 374 | # external current (from "voltage clamp", other compartments, other neurons, etc) 375 | I_ext = Epar['I_ext'] 376 | 377 | # calculate Na rate functions and I_Na 378 | alpha_act = Na['A_alpha_m_act'] * (E-Na['B_alpha_m_act']) / (1.0 - exp((Na['B_alpha_m_act']-E) / Na['C_alpha_m_act'])) 379 | beta_act = Na['A_beta_m_act'] * (Na['B_beta_m_act']-E) / (1.0 - exp((E-Na['B_beta_m_act']) / Na['C_beta_m_act']) ) 380 | dmdt = ( alpha_act * (1.0 - m) ) - ( beta_act * m ) 381 | 382 | alpha_inact = Na['A_alpha_m_inact'] * (Na['B_alpha_m_inact']-E) / (1.0 - exp((E-Na['B_alpha_m_inact']) / Na['C_alpha_m_inact'])) 383 | beta_inact = Na['A_beta_m_inact'] / (1.0 + (exp((Na['B_beta_m_inact']-E) / Na['C_beta_m_inact']))) 384 | dhdt = ( alpha_inact*(1.0 - h) ) - ( beta_inact*h ) 385 | 386 | # Na-current: 387 | I_Na =(Na['Na_E']-E) * Na['Na_G'] * (m**Na['k_Na_act']) * h 388 | 389 | # calculate K rate functions and I_K 390 | alpha_kal = K['A_alpha_m_act'] * (E-K['B_alpha_m_act']) / (1.0 - exp((K['B_alpha_m_act']-E) / K['C_alpha_m_act'])) 391 | beta_kal = K['A_beta_m_act'] * (K['B_beta_m_act']-E) / (1.0 - exp((E-K['B_beta_m_act']) / K['C_beta_m_act'])) 392 | dndt = ( alpha_kal*(1.0 - n) ) - ( beta_kal*n ) 393 | I_K = (K['k_E']-E) * K['k_G'] * n**K['k_K'] 394 | 395 | # leak current 396 | I_leak = (Epar['E_leak']-E) * Epar['G_leak'] 397 | 398 | # calculate derivative of E 399 | dEdt = (I_leak + I_K + I_Na + I_ext) / Epar['C_m'] 400 | statep = [dEdt, dmdt, dhdt, dndt] 401 | 402 | return statep 403 | #+END_SRC 404 | 405 | Next we run a simulation by setting up our initial states, and a time 406 | array, and then calling =odeint()=. Note that we are injecting some 407 | external current by changing the value of the 408 | =params['E_params']['I_ext']= entry in the =params= dictionary. 409 | 410 | #+BEGIN_SRC python 411 | # simulate 412 | 413 | # set initial states and time vector 414 | state0 = [-70e-03, 0, 1, 0] 415 | t = arange(0, 0.2, 0.001) 416 | 417 | # let's inject some external current 418 | params['E_params']['I_ext'] = 1.0e-10 419 | 420 | # run simulation 421 | state = odeint(neuron, state0, t, args=(params,)) 422 | #+END_SRC 423 | 424 | Finally, we plot the results: 425 | 426 | #+BEGIN_SRC python 427 | # plot the results 428 | 429 | figure(figsize=(8,12)) 430 | subplot(4,1,1) 431 | plot(t, state[:,0]) 432 | title('membrane potential') 433 | subplot(4,1,2) 434 | plot(t, state[:,1]) 435 | title('Na2+ channel activation') 436 | subplot(4,1,3) 437 | plot(t, state[:,2]) 438 | title('Na2+ channel inactivation') 439 | subplot(4,1,4) 440 | plot(t, state[:,3]) 441 | title('K+ channel activation') 442 | xlabel('TIME (sec)') 443 | #+END_SRC 444 | 445 | Here is what you should see: 446 | 447 | #+ATTR_HTML: :width 400px :align center 448 | #+CAPTION: Spiking neuron simulation based on Ekeberg et al., 1991 449 | [[file:figs/ekeberg1.png]] 450 | 451 | * Things to try 452 | 453 | 1. alter the [[file:code/ekeberg1.py][ekeberg1.py]] code so that the modelled neuron only has the 454 | leakage current and external current. In other words, comment out 455 | the terms related to sodium and potassium channels. Run a 456 | simulation with an initial membrane potential of -70mv and an 457 | external current of 0.0mv. What happens and why? 458 | 2. Change the external current to 1.0e-10 and re-run the 459 | simulation. What happens and why? 460 | 3. Add in the terms related to the sodium channel (activation and 461 | deactivation). Run a simulation with external current of 1.0e-10 462 | and initial states =[-70e-03, 0, 1]=. What happens and why? 463 | 4. Add in the terms related to the potassium channel. Run a simulation 464 | with external current of 1.0e-10 and initial states =[-70e-03, 0, 465 | 1, 0]=. What happens and why? 466 | 5. Play with the external current level (increase it slightly, 467 | decrease it slightly, etc). What is the effect on the behaviour of 468 | the neuron? 469 | 6. What is the minimum amount of external current necessary to 470 | generate an action potential? Why? 471 | 472 | * Next steps 473 | 474 | Next we will be looking at models of motor control. We will be using 475 | human arm movement as the model system. We will first look at 476 | kinematic models of one and two-joint arms, so we can talk about the 477 | problem of coordinate transformations between hand-space and 478 | joint-space, and the non-linear geometrical transformations that must 479 | take place. After that we will move on to talking about models of 480 | muscle, force production, and limb dynamics, with an eye towards a 481 | modelling the neural control of arm movements such as reaching and 482 | pointing. 483 | 484 | [ [[file:4_Computational_Motor_Control_Kinematics.html][Computational Motor Control: Kinematics]] ] 485 | -------------------------------------------------------------------------------- /org/index.org: -------------------------------------------------------------------------------- 1 | #+STARTUP: showall 2 | 3 | #+TITLE: Computational Modelling in Neuroscience 4 | #+AUTHOR: Paul Gribble 5 | #+EMAIL: paul@gribblelab.org 6 | #+DATE: Fall 2012 7 | #+OPTIONS: toc:nil 8 | #+LINK_UP: http://www.gribblelab.org/teaching.html 9 | #+LINK_HOME: http://www.gribblelab.org/ 10 | 11 | ----- 12 | * Administrivia 13 | - This is the homepage for Neuroscience 9520: Computational Modelling in Neuroscience 14 | - Class will be Mondays, 2:00pm - 3:30pm, and Thursdays, 11:30am - 15 | 1:00pm, in NSC 245A 16 | - The instructor is Paul Gribble (email: paul [at] gribblelab [dot] org) 17 | - course [[file:syllabus.pdf][syllabus.pdf]] 18 | - We will use several chapters from a book on computational motor 19 | control: "The Computational Neurobiology of Reaching and Pointing" 20 | by Reza Shadmehr and Steven P. Wise, MIT Press, 2005. [ [[http://goo.gl/QKykK][google books 21 | link]] ] [ [[file:readings/SW_00_cover_info.pdf][cover info]] ] 22 | 23 | ----- 24 | * Code 25 | 26 | When there is a python script linked in the notes, you will get a 27 | permissions error when clicking on it. I haven't figured out how to 28 | avoid this yet. In the meantime, all code can be downloaded here in 29 | this tarred gzipped archive: [[file:code.tgz][code.tgz]] 30 | 31 | ----- 32 | * Course Notes 33 | 34 | 0. [@0] [[file:0_Setup_Your_Computer.html][Setup Your Computer]] 35 | 1. [[file:1_Dynamical_Systems.html][Dynamical Systems]] 36 | 2. [[file:2_Modelling_Dynamical_Systems.html][Modelling Dynamical Systems]] 37 | 3. [[file:3_Modelling_Action_Potentials.html][Modelling Action Potentials]] 38 | 4. [[file:4_Computational_Motor_Control_Kinematics.html][Computational Motor Control: Kinematics]] 39 | 5. [[file:5_Computational_Motor_Control_Dynamics.html][Computational Motor Control: Dynamics]] 40 | 6. [[file:6_Computational_Motor_Control_Muscle_Models.html][Computational Motor Control: Muscle Models]] 41 | 42 | ----- 43 | * Schedule & Topics 44 | 45 | ----- 46 | ** Sep 10: Introductions & course schedule 47 | - lecture slides: [[file:lecture1.pdf][lecture1.pdf]] 48 | - please read this: [[file:readings/Trappenberg1.pdf][Trappenberg1.pdf]] 49 | - please read this: [[file:readings/OM1.pdf][OM1.pdf]] (1st Ed.) or [[http://grey.colorado.edu/CompCogNeuro/index.php?title=CCNBook/Intro][CCNBook/Intro]] 50 | - your first assignment: [[file:assignment1.pdf][assignment1.pdf]] (due Sep 23) 51 | 52 | ** Sep 13: Getting your computer set up with Python & scientific libraries 53 | - course notes [[file:0_Setup_Your_Computer.html][0: Setup Your Computer]] 54 | 55 | ----- 56 | ** Sep 17 : Modelling Dynamical Systems I 57 | - course notes [[file:1_Dynamical_Systems.html][1: Dynamical Systems]] 58 | - course notes [[file:2_Modelling_Dynamical_Systems.html][2: Modelling Dynamical Systems]] 59 | 60 | ** Sep 20: Modelling Dynamical Systems II 61 | - more on dynamical systems 62 | - [[file:assignment2.pdf][assignment2.pdf]] (due Sep 30) 63 | - code example for using optimization: [[file:code/optimizer_example.py][optimizer\_example.py]] 64 | - cool demo of [[http://www.youtube.com/watch?v=Klw7L0OZbFQ][synchronization of metronomes]] (and [[http://www.youtube.com/watch?v=kqFc4wriBvE][japanese version]]), 65 | plus [[https://github.com/paulgribble/metronomes][python code]] for simulating it 66 | 67 | ----- 68 | ** Sep 24, 27 : no class (Paul away) 69 | 70 | ----- 71 | ** Oct 1, 4 : Modelling Action Potentials - Hodgkin-Huxley models 72 | - [[file:readings/ekeberg1991.pdf][Ekeberg et al. (1991)]] (please read this) 73 | - optional: see the original Hodgkin & Huxley 1952 paper reprinted in 74 | 1990: [[file:readings/HH1990.pdf][Hodgkin & Huxley 1952 (1990)]] 75 | - optional: a chapter on [[file:readings/spiking_neuron_models.pdf][Spiking Neuron Models]] for a general overview 76 | of the field 77 | - course notes [[file:3_Modelling_Action_Potentials.html][3: Modelling Action Potentials]] 78 | - refresher slides on [[file:readings/action_potentials.pdf][action potentials]] 79 | - YouTube videos on [[http://www.youtube.com/watch?v=7EyhsOewnH4][The Action Potential]] and [[http://www.youtube.com/watch?v=LXdTg9jZYvs][Voltage Gated Channels 80 | and the Action Potential]] 81 | - [[file:assignment3.pdf][assignment3.pdf]] (due Oct 7) [[file:code/assignment3_params.py][assignment3\_params.py]] 82 | - [[file:code/assignment2_sol.py][assignment2\_sol.py]] 83 | - [[file:code/assignment3_sol.py][assignment3\_sol.py]] 84 | 85 | ----- 86 | ** Oct 8, 11 : no class (thanksgiving, SFN) 87 | 88 | ----- 89 | ** Oct 15, 18 : no class (SFN) 90 | 91 | ----- 92 | ** Oct 22, 25 : Computational Motor Control: Kinematics 93 | - course notes: [[file:4_Computational_Motor_Control_Kinematics.html][4: Computational Motor Control: Kinematics]] 94 | - read *at least two* of the papers listed in the course notes 95 | - read Shadmehr & Wise book, [[file:readings/SW_18.pdf][Chapter 18]] and [[file:readings/SW_19.pdf][Chapter 19]] 96 | - [[file:assignment4.pdf][assignment4.pdf]] 97 | - [[file:code/minjerk.py][minjerk.py]] 98 | 99 | ----- 100 | ** Oct 29, Nov 1 : Computational Motor Control: Dynamics 101 | - [[file:code/assignment4_sol.py][assignment4\_sol.py]] 102 | - course notes: [[file:5_Computational_Motor_Control_Dynamics.html][5: Computational Motor Control: Dynamics]] 103 | - read *at least two* of the papers listed in the course notes 104 | - read Shadmehr & Wise book, [[file:readings/SW_20.pdf][Chapter 20]] and [[file:readings/SW_21.pdf][Chapter 21]] (and [[file:readings/SW_22.pdf][Chapter 22]] 105 | if you are interested in the topic) 106 | - [[file:code/twojointarm.py][twojointarm.py]] utility functions and Python code for doing inverse 107 | and forward dynamics of a two-joint arm in a horizontal plane (no 108 | gravity) with external driving torques, and animating the resulting 109 | arm motion 110 | - [[file:code/twojointarm_game.py][twojointarm\_game.py]] : try your hand at this game in which you have 111 | to control a two-joint arm to hit as many targets as you can before 112 | time runs out. Use the [d,f,j,k] keys to control [sf,se,ef,ee] 113 | joint torques (s=shoulder, e=elbow, f=flexor, e=extensor). Spacebar 114 | will "reset" the arm to its home position, handy if your arm starts 115 | spinning out of control (though each time you use spacebar your 116 | score will be decremented by one). Start the game by typing =python 117 | twojointarm_game.py= at the command line. At the end of the game 118 | your score will be printed out on the command line. 119 | - [[file:assignment5.pdf][assignment5.pdf]] 120 | ----- 121 | 122 | ** Nov 5, 8 : Computational Motor Control: Muscle Models 123 | - [[file:code/assignment5_sol.py][assignment5\_sol.py]] and [[file:figs/assignment5_figures.pdf][assignment5\_figures.pdf]] : coming soon ... 124 | - read Shadmehr & Wise book, [[file:readings/SW_07.pdf][Chapter 7]] and [[file:readings/SW_08.pdf][Chapter 8]] and supplementary 125 | documents: [[http://www.shadmehrlab.org/book/musclemodel.pdf][musclemodel.pdf]] 126 | - course notes: [[file:6_Computational_Motor_Control_Muscle_Models.html][6: Computational Motor Control: Muscle Models]] 127 | - assignment: catch up on readings. 128 | - *note* no class on Thurs Nov 8. 129 | 130 | ----- 131 | ** Nov 12, 15 : Computational Models of Learning part 1 132 | - some lecture slides: [[file:readings/nn_slides.pdf][nn\_slides.pdf]] 133 | - Readings: 134 | - [[file:readings/Jain_1996_NNetTutorial.pdf][Artificial Neural Networks: A Tutorial]] Jain & Mao, 1996 135 | - [[file:readings/Trappenberg5.pdf][Trappenberg5.pdf]], [[file:readings/Trappenberg6.pdf][Trappenberg6.pdf]], [[file:readings/Robinson92.pdf][Robinson92.pdf]], [[file:readings/Mitchell4.pdf][Mitchell4.pdf]] 136 | (4.8 optional) 137 | - Optional: [[file:readings/Haykin0.pdf][Haykin0.pdf]], [[file:readings/Haykin1.pdf][Haykin1.pdf]], [[file:readings/Haykin4.pdf][Haykin4.pdf]] 138 | - tutorial: [[http://galaxy.agh.edu.pl/~vlsi/AI/backp_t_en/backprop.html][Principles of training multi-layer neural network using 139 | backpropagation]] 140 | - A classic reference: McClelland & Rumelhart PDP books [[file:readings/PDP.pdf][PDP.pdf]], 141 | [[file:readings/PDP_Handbook.pdf][PDP\_Handbook.pdf]] 142 | - [[http://www.cs.toronto.edu/~hinton/absps/sciam93.pdf][Simulating Brain Damage]] 143 | - for a really nice overview of all sort of NNs, see: [[https://www.coursera.org/course/neuralnets][Neural Networks 144 | for Machine Learning]] (Geoff Hinton, Univ Toronto, Coursera online 145 | course) 146 | - for thoughts about motor learning, read Shadmher & Wise book, 147 | [[file:readings/SW_24.pdf][Chapter 24]] 148 | - Software: [[http://pybrain.org/][PyBrain]] 149 | - there is also this: [[http://leenissen.dk/fann/wp/help/installing-fann/][FANN: Fast Artificial Neural Network Library]] 150 | - code examples: 151 | - [[file:code/xor_aima.py][xor\_aima.py]] from [[http://aima.cs.berkeley.edu/][Norvig & Russell's book]] 152 | - [[file:code/xor.py][xor.py]] my code, vectorized numpy matrices 153 | - [[file:code/xor_plot.py][xor\_plot.py]] same as above, plots during training to visualize network performance 154 | - [[file:code/xor_cg.py][xor\_cg.py]] my code, uses backprop to compute gradients and conjugate gradient descent to optimize weights 155 | - [[http://yann.lecun.com/exdb/mnist/][MNIST Database of handwritten digits]] 156 | - [[http://arxiv.org/abs/1003.0358][Deep Big Simple Neural Nets Excel on Handwritten Digit Recognition]] 157 | - [[file:readings/NeuralNetworks2.pdf][NeuralNetworks2.pdf]] slides 158 | 159 | ----- 160 | ** Nov 19, 22 : Computational Models of Learning part 2 161 | - [[file:code/assignment6.py][assignment6.py]] and [[file:code/traindata.pickle][traindata.pickle]] 162 | - [[file:readings/NeuralNetworks3.pdf][NeuralNetworks3.pdf]] slides 163 | - more code demos of feedforward networks 164 | - handwritten digit example [[file:readings/mnist.tgz][mnist.tgz]] 165 | - vowel classification example [[file:readings/PetersonBarneyVowels.tgz][PetersonBarneyVowels.tgz]] 166 | - facial expression example [[file:readings/RadboudFaces.tgz][RadboudFaces.tgz]] 167 | - recurrent neural networks ([[http://en.wikipedia.org/wiki/Recurrent_neural_network][wiki)]] 168 | - [[http://130.102.79.1/~mikael/papers/rn_dallas.pdf][A guide to recurrent neural networks and backpropagation]] (M. Boden) 169 | - [[http://www.bcl.hamilton.ie/~barak/papers/CMU-CS-90-196.pdf][Dynamic Recurrent Neural Networks]] (B.A. Pearlmutter) 170 | - [[http://minds.jacobs-university.de/sites/default/files/uploads/papers/ESNTutorialRev.pdf][A tutorial on training recurrent neural networks]] (H. Jaeger) 171 | - echo state networks [[http://www.scholarpedia.org/article/Echo_state_network][scholarpedia]] 172 | - Buonomano D. (2009) Harnessing Chaos in Recurrent Neural 173 | Networks. Neuron 63(4):423-425. 174 | - Sussillo, D., & Abbott, L. F. (2009). Generating coherent 175 | patterns of activity from chaotic neural networks. Neuron, 63(4), 176 | 544-557. 177 | - papers: 178 | - [[file:readings/Wada_1993_NeuralNetworks.pdf][Wada, 1993]] A Neural Network Model for Arm Trajectory Formation 179 | Using Forward and Inverse Dynamics Models 180 | - [[file:readings/Lukashin_1993_BiolCybern.pdf][Lukashin, 1993]] A dynamical neural network model for motor 181 | cortical activity during movement: population coding of movement 182 | trajectories 183 | - [[file:readings/Pearlmutter_1989_NeuralComputation.pdf][Pearlmutter, 1989]] Learning State Space Trajectories in Recurrent 184 | Neural Networks 185 | - intro to unsupervised learning 186 | - autoencoders [[http://en.wikipedia.org/wiki/Autoencoder][wiki]] 187 | - Hopfield nets [[http://en.wikipedia.org/wiki/Hopfield_net][wiki]] 188 | - Boltzmann machines [[http://en.wikipedia.org/wiki/Boltzmann_machine][wiki]] 189 | - Restricted Bolztmann machines [[http://en.wikipedia.org/wiki/Restricted_Boltzmann_machine][wiki]] 190 | - multi-layer generative networks 191 | - [[file:readings/Hinton2006Montreal.pdf][Hinton2006Montreal.pdf]] 192 | - [[file:readings/Hinton2007tics.pdf][Hinton2007tics.pdf]] 193 | - video: [[http://www.youtube.com/watch?v%3DAyzOUbkUf3M][The Next Generation of Neural Networks]] 194 | - [[http://www.cs.toronto.edu/~hinton/adi/index.htm][deep network digit demo]] 195 | - Mark Schmidt's [[http://www.di.ens.fr/~mschmidt/Software/minFunc.html][minFunc]] MATLAB routines for unconstrained optimization 196 | 197 | ----- 198 | ** Nov 26, 29 : Computational Models of Learning part 3 199 | - [[file:readings/NeuralNetworks4.pdf][NeuralNetworks4.pdf]] slides 200 | - self-organizing maps [[http://en.wikipedia.org/wiki/Kohonen_map][wiki]], [[file:readings/AflaloGraziano2006.pdf][AflaloGraziano2006.pdf]] 201 | - [[file:code/hopfield.tgz][hopfield.tgz]] MATLAB demo code 202 | - [[file:code/som1.m][som1.m]] MATLAB demo code 203 | - autoencoders & deep belief nets 204 | - [[https://code.google.com/p/matrbm/][RBM & DBN MATLAB code]] 205 | - reinforcement learning [[http://en.wikipedia.org/wiki/Reinforcement_learning][wiki]], [[http://webdocs.cs.ualberta.ca/~sutton/book/ebook/the-book.html][Sutton & Barto book]] 206 | 207 | ----- 208 | ** Dec 3 : student presentations 209 | - each of the 12 students registered in the course will present one 210 | paper from the literature in their research area in which a 211 | computational modelling approach was used to address a question 212 | about how the brain works. 213 | - presentations are limited to *7 minutes each*! Note: this is 214 | difficult to pull off, you will have to practice your talk out 215 | loud. Also be careful to choose your slides carefully. There will be 216 | a timer and a loud gong. 217 | - Question period will be limited to 1 to 2 minutes per talk. 218 | - The order of talks will be alphabetical by your last name. A first, 219 | Z last. We will need to start at 2pm sharp. 220 | 221 | - Each student giving a talk must also submit a short essay on their 222 | chosen paper. Your essay should follow the "Content and Format" 223 | style of the [[http://www.jneurosci.org/site/misc/ifa_features.xhtml]["Journal Club"]] feature in the Journal of 224 | Neuroscience. You can choose any paper you want, it doesn't have to 225 | be a J. Neurosci. paper and it doesn't have to have been published 226 | within the past 2 months. 227 | - Essays are due Sunday Dec 9th, 2012, no later than 11:59pm 228 | EST. Please send your essay to me by email, as a single .pdf 229 | file. The filename should be =_essay.pdf= 230 | (e.g. =gribble\_essay.pdf=). 231 | 232 | ----- 233 | * Links 234 | 235 | ** Python Introductory Tutorials 236 | 237 | - [[http://openbookproject.net/thinkcs/python/english2e/][How to Think Like a Computer Scientist: Learning with Python]] 238 | - [[http://learnpythonthehardway.org/book/][Learn Python The Hard Way]] 239 | - [[http://www.diveintopython.net/][Dive Into Python]] 240 | - [[file:readings/SciCompPython.pdf][Introduction to Scientific Computing with Python]] 241 | - [[http://www.pythontutor.com/][Online Python Tutor]] 242 | - [[https://github.com/profjsb/python-bootcamp][Python Bootcamp]] (github) 243 | - [[http://www.youtube.com/playlist?list=PLRdRinj2mDqsnazUsGeFq8Fi-2lL77vFF][Python Bootcamp August 2012]] (YouTube playlist) 244 | - [[http://register.pythonbootcamp.info/agenda][Python Bootcamp August 2012]] (list of topics & downloads) 245 | 246 | ** Numpy / SciPy / Matplotlib 247 | 248 | - [[http://youtu.be/vWkb7VahaXQ][Using Numpy Arrays to Perform Mathematical Operations in Python]] 249 | (youtube video) 250 | - [[http://scipy-lectures.github.com/][Python Scientific Lecture Notes]] 251 | - [[http://www.scipy.org/Plotting_Tutorial][SciPy Plotting Tutorial]] 252 | - [[http://docs.scipy.org/doc/][Numpy and Scipy Documentation]] 253 | - [[http://www.scipy.org/Tentative_Numpy_Tutorial][Numpy Tutorial]] 254 | - [[http://scipy.org/Cookbook][SciPy Cookbook]] 255 | - [[http://scipy.org/Getting_Started][SciPy Getting Started]] 256 | - [[http://matplotlib.org/gallery.html][matplotlib gallery]] 257 | 258 | ** iPython 259 | 260 | - [[http://ipython.org/videos.html][iPython videos]] 261 | - [[http://youtu.be/2G5YTlheCbw][iPython in-depth: high productivity interactive and parallel python]] 262 | (youtube video) iPython Notebook stuff starts at about 1:15:40, and 263 | parallel programming stuff starts at around 2:13:00 264 | - [[http://nbviewer.ipython.org/][IPython Notebook Viewer]] 265 | 266 | ** Machine Learning Resources 267 | - [[http://scikit-learn.org/][scikit-learn: machine learning in Python]] 268 | - [[http://yann.lecun.com/exdb/mnist/][The MNIST Database of handwritten digits]] 269 | - [[http://archive.ics.uci.edu/ml/][UCI Machine Learning Repository]] 270 | - [[http://cs.nyu.edu/~roweis/data.html][Some datasets for machine learning: digits, faces, text, speech]] 271 | - [[http://www.dacya.ucm.es/jam/download.htm][Software tools for reinforcement learning, neural networks and robotics]] 272 | - [[http://kasrl.org/jaffe.html][The Japanese Female Facial Expression (JAFFE) Database]] 273 | - [[http://www.socsci.ru.nl:8180/RaFD2/RaFD?p=main][Radboud Faces Database]] 274 | - [[http://mplab.ucsd.edu/wordpress/?page_id=48][Machine Perception Laboratory Demos]] 275 | - [[http://mplab.ucsd.edu/grants/project1/free-software/MPTWebSite/introduction.html][Machine Perception Toolbox]] 276 | - [[http://www.cs.toronto.edu/~hinton/][Geoff Hinton's Webpage]] (with lots of demos, tutorials, talks and 277 | papers on Neural Networks) 278 | - [[http://www.cs.toronto.edu/~hinton/csc321/][Introduction to Neural Networks and Machine Learning]] (U of T course 279 | by Geoff Hinton) 280 | - [[http://www.cnbc.cmu.edu/~mharm/research/tools/mikenet/][MikeNet Neural Network Simulator]] (C library) 281 | - [[http://deeplearning.net/][Deep Learning]] resource site for deep belief nets etc 282 | - [[http://www.iro.umontreal.ca/~bengioy/papers/ftml_book.pdf][Learning Deep Architectures for AI]] (book) by Yoshua Bengio 283 | - [[http://www.cs.cmu.edu/afs/cs/academic/class/15782-f06/matlab/][MATLAB neural network code]] demos by Dave Touretzky 284 | 285 | ----- 286 | 287 | * These notes 288 | 289 | These notes can be viewed (and downloaded) in their entirety from a 290 | [[https://github.com][github]] repository here: [[https://github.com/paulgribble/CompNeuro][CompNeuro]] 291 | 292 | -------------------------------------------------------------------------------- /org/mystyle.css: -------------------------------------------------------------------------------- 1 | html { 2 | font-family: sans-serif; 3 | font-weight:100; 4 | font-size: 11pt; 5 | height: 100%; 6 | width: 100%; 7 | margin: 0; 8 | padding: 0; 9 | } 10 | 11 | body { 12 | text-align: justify; 13 | padding-left: 8%; 14 | padding-right: 8%; 15 | padding-bottom: 5%; 16 | padding-top: 2%; 17 | line-height: 150%; 18 | } 19 | 20 | body a { 21 | color: #2580a2; 22 | text-decoration: none; 23 | } 24 | 25 | h1 { 26 | font-weight: normal; 27 | } 28 | 29 | h2 { 30 | font-weight: normal; 31 | } 32 | 33 | h3 { 34 | font-weight: normal; 35 | } 36 | 37 | pre { 38 | line-height: 110%; 39 | } 40 | 41 | div#table-of-contents { 42 | line-height: 120%; 43 | } 44 | 45 | div#postamble { 46 | line-height: 120%; 47 | } 48 | 49 | div#bibliography { 50 | line-height: 120%; 51 | } 52 | 53 | ol li { 54 | line-height: 120%; 55 | padding-bottom: 5pt; 56 | } 57 | 58 | ul li { 59 | line-height: 120%; 60 | padding-bottom: 3pt; 61 | } 62 | -------------------------------------------------------------------------------- /org/refs.bib: -------------------------------------------------------------------------------- 1 | @Article{HH1990, 2 | Author="Hodgkin, A. L. and Huxley, A. F. and Hodgkin, A. L. and Huxley, A. F. ", 3 | Title="{{A} quantitative description of membrane current and its application to conduction and excitation in nerve. 1952}", 4 | Journal="Bull. Math. Biol.", 5 | Year="1990", 6 | Volume="52", 7 | Number="1-2", 8 | Pages="25--71", 9 | } 10 | 11 | % 12991237 12 | @Article{HH1952, 13 | Author="Hodgkin, A. L. and Huxley, A. F. ", 14 | Title="{{A} quantitative description of membrane current and its application to conduction and excitation in nerve}", 15 | Journal="J. Physiol. (Lond.)", 16 | Year="1952", 17 | Volume="117", 18 | Number="4", 19 | Pages="500--544", 20 | Month="Aug", 21 | } 22 | 23 | @Article{ekeberg1991, 24 | Author="Ekeberg, O. and Wallen, P. and Lansner, A. and Traven, H. and Brodin, L. and Grillner, S. ", 25 | Title="{{A} computer based model for realistic simulations of neural networks. {I}. {T}he single neuron and synaptic interaction}", 26 | Journal="Biol Cybern", 27 | Year="1991", 28 | Volume="65", 29 | Number="2", 30 | Pages="81--90" 31 | } 32 | 33 | @book{kandel2000principles, 34 | title={Principles of neural science}, 35 | author={Kandel, E.R. and Schwartz, J.H. and Jessell, T.M. and others}, 36 | volume={4}, 37 | year={2000}, 38 | publisher={McGraw-Hill New York} 39 | } 40 | 41 | -------------------------------------------------------------------------------- /org/refs.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 8 | 16 | 17 | 18 | 19 | 20 | 23 | 31 | 32 | 33 | 34 | 35 | 38 | 46 | 47 | 48 | 49 | 50 | 53 | 60 | 61 |
6 | [1] 7 | 9 | A. L. Hodgkin and A. F. Huxley. 10 | A quantitative description of membrane current and its application 11 | to conduction and excitation in nerve. 12 | J. Physiol. (Lond.), 117(4):500--544, Aug 1952. 13 | [ bib ] 14 | 15 |
21 | [2] 22 | 24 | A. L. Hodgkin, A. F. Huxley, A. L. Hodgkin, and A. F. Huxley. 25 | A quantitative description of membrane current and its application 26 | to conduction and excitation in nerve. 1952. 27 | Bull. Math. Biol., 52(1-2):25--71, 1990. 28 | [ bib ] 29 | 30 |
36 | [3] 37 | 39 | O. Ekeberg, P. Wallen, A. Lansner, H. Traven, L. Brodin, and S. Grillner. 40 | A computer based model for realistic simulations of neural 41 | networks. I. The single neuron and synaptic interaction. 42 | Biol Cybern, 65(2):81--90, 1991. 43 | [ bib ] 44 | 45 |
51 | [4] 52 | 54 | E.R. Kandel, J.H. Schwartz, T.M. Jessell, et al. 55 | Principles of neural science, volume 4. 56 | McGraw-Hill New York, 2000. 57 | [ bib ] 58 | 59 |
-------------------------------------------------------------------------------- /org/refs_bib.html: -------------------------------------------------------------------------------- 1 |

refs.bib

 2 | @article{ekeberg1991,
 3 |   author = {Ekeberg, O.  and Wallen, P.  and Lansner, A.  and Traven, H.  and Brodin, L.  and Grillner, S. },
 4 |   title = {{{A} computer based model for realistic simulations of neural networks. {I}. {T}he single neuron and synaptic interaction}},
 5 |   journal = {Biol Cybern},
 6 |   year = {1991},
 7 |   volume = {65},
 8 |   number = {2},
 9 |   pages = {81--90}
10 | }
11 | 
12 | 13 |
14 | @book{kandel2000principles,
15 |   title = {Principles of neural science},
16 |   author = {Kandel, E.R. and Schwartz, J.H. and Jessell, T.M. and others},
17 |   volume = {4},
18 |   year = {2000},
19 |   publisher = {McGraw-Hill New York}
20 | }
21 | 
22 | 23 | --------------------------------------------------------------------------------