├── matlab ├── sahani_latent_factor │ ├── Code │ │ ├── startup.m │ │ ├── util │ │ │ ├── logdet.m │ │ │ ├── optlistwarn.m │ │ │ ├── plottile.m │ │ │ ├── fig.m │ │ │ ├── nestplotpos.m │ │ │ └── nestable.m │ │ ├── gpfa │ │ │ ├── util │ │ │ │ ├── precomp │ │ │ │ │ ├── makePautoSumFast.mexa64 │ │ │ │ │ ├── makePautoSumFast.mexglx │ │ │ │ │ ├── makePautoSumFast.mexmaci │ │ │ │ │ ├── makePautoSumFast.mexw32 │ │ │ │ │ └── makePautoSumFast.c │ │ │ │ ├── invToeplitz │ │ │ │ │ ├── invToeplitzFastZohar.mexa64 │ │ │ │ │ ├── invToeplitzFastZohar.mexglx │ │ │ │ │ ├── invToeplitzFastZohar.mexw32 │ │ │ │ │ ├── invToeplitzFastZohar.mexmaci │ │ │ │ │ └── invToeplitzFast.m │ │ │ │ ├── logdet.m │ │ │ │ ├── segmentByTrial.m │ │ │ │ ├── parseFilename.m │ │ │ │ ├── orthogonalize.m │ │ │ │ ├── fillPerSymm.m │ │ │ │ ├── smoother.m │ │ │ │ ├── getSeq.m │ │ │ │ ├── invPerSymm.m │ │ │ │ ├── cutTrials.m │ │ │ │ ├── getTrajNewTrials.m │ │ │ │ ├── assignopts.m │ │ │ │ └── postprocess.m │ │ │ ├── core_twostage │ │ │ │ ├── cosmoother_pca.m │ │ │ │ ├── fastfa_estep.m │ │ │ │ ├── cosmoother_fa.m │ │ │ │ ├── fastfa.m │ │ │ │ └── twoStageEngine.m │ │ │ ├── core_gpfa │ │ │ │ ├── grad_betgam.m │ │ │ │ ├── cosmoother_gpfa_viaOrth.m │ │ │ │ ├── make_K_big.m │ │ │ │ ├── learnGPparams.m │ │ │ │ └── gpfaEngine.m │ │ │ ├── plotting │ │ │ │ ├── plot3D.m │ │ │ │ └── plotEachDimVsTime.m │ │ │ └── startup.m │ │ ├── fa_infer.m │ │ ├── GenerateData.m │ │ ├── fa_em.m │ │ ├── ssm_em.m │ │ └── ssm_kalman.m │ └── SOLN │ │ ├── startup.m │ │ ├── util │ │ ├── logdet.m │ │ ├── optlistwarn.m │ │ ├── plottile.m │ │ ├── fig.m │ │ ├── nestplotpos.m │ │ └── nestable.m │ │ ├── gpfa │ │ ├── util │ │ │ ├── precomp │ │ │ │ ├── makePautoSumFast.mexa64 │ │ │ │ ├── makePautoSumFast.mexglx │ │ │ │ ├── makePautoSumFast.mexmaci │ │ │ │ ├── makePautoSumFast.mexw32 │ │ │ │ └── makePautoSumFast.c │ │ │ ├── invToeplitz │ │ │ │ ├── invToeplitzFastZohar.mexa64 │ │ │ │ ├── invToeplitzFastZohar.mexglx │ │ │ │ ├── invToeplitzFastZohar.mexw32 │ │ │ │ ├── invToeplitzFastZohar.mexmaci │ │ │ │ └── invToeplitzFast.m │ │ │ ├── logdet.m │ │ │ ├── segmentByTrial.m │ │ │ ├── parseFilename.m │ │ │ ├── orthogonalize.m │ │ │ ├── fillPerSymm.m │ │ │ ├── smoother.m │ │ │ ├── getSeq.m │ │ │ ├── invPerSymm.m │ │ │ ├── cutTrials.m │ │ │ ├── getTrajNewTrials.m │ │ │ ├── assignopts.m │ │ │ └── postprocess.m │ │ ├── core_twostage │ │ │ ├── cosmoother_pca.m │ │ │ ├── fastfa_estep.m │ │ │ ├── cosmoother_fa.m │ │ │ ├── fastfa.m │ │ │ └── twoStageEngine.m │ │ ├── core_gpfa │ │ │ ├── grad_betgam.m │ │ │ ├── cosmoother_gpfa_viaOrth.m │ │ │ ├── make_K_big.m │ │ │ ├── learnGPparams.m │ │ │ └── gpfaEngine.m │ │ ├── plotting │ │ │ ├── plot3D.m │ │ │ └── plotEachDimVsTime.m │ │ └── startup.m │ │ ├── fa_infer.m │ │ ├── GenerateData.m │ │ ├── fa_em.m │ │ ├── ssm_em.m │ │ └── ssm_kalman.m ├── konrad_tuning_curves │ ├── poplatent_soln.m │ ├── Solutions │ │ ├── evaluateScoreCosExp.m │ │ └── FullKonradTutorial.m │ ├── MyTuningCurve.m │ ├── myEvaluateScoreCosExp.m │ ├── MyBootstrapErrorBar.m │ ├── MyFitTuningCurve.m │ └── MyMachineLearningPredictions.m ├── kass_statistical_paradigm │ ├── firstlab_with_answers.mlx │ └── firstlab_without_answers.mlx ├── basset_connectivity │ ├── GenLouvain2 │ │ ├── private │ │ │ ├── group_handler.mexa64 │ │ │ ├── group_handler.mexw64 │ │ │ ├── group_handler.mexmaci64 │ │ │ ├── metanetwork_reduce.mexa64 │ │ │ ├── metanetwork_reduce.mexw64 │ │ │ └── metanetwork_reduce.mexmaci64 │ │ └── MEX_SRC │ │ │ ├── compile_mex.m │ │ │ ├── group_handler.h │ │ │ ├── group_index.h │ │ │ ├── matlab_matrix │ │ │ └── matlab_matrix.h │ │ │ ├── metanetwork_reduce.cpp │ │ │ └── group_index.cpp │ ├── fcn_order_partition.m │ ├── fcn_grid_communities.m │ ├── clustering_coef_wu.m │ └── fcn_plot_blocks.m └── add_all_paths.m ├── python ├── src │ └── opencourse │ │ ├── __init__.py │ │ ├── modeling_data_funcs.py │ │ └── io.py └── setup.ipynb ├── .gitignore └── README.md /matlab/sahani_latent_factor/Code/startup.m: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/startup.m: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /matlab/konrad_tuning_curves/poplatent_soln.m: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/src/opencourse/__init__.py: -------------------------------------------------------------------------------- 1 | from io import * 2 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/util/logdet.m: -------------------------------------------------------------------------------- 1 | function D = logdet(A) 2 | % logdet - fast robust log(det()) 3 | D = 2*sum(log(diag(chol(A)))); 4 | 5 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/util/logdet.m: -------------------------------------------------------------------------------- 1 | function D = logdet(A) 2 | % logdet - fast robust log(det()) 3 | D = 2*sum(log(diag(chol(A)))); 4 | 5 | -------------------------------------------------------------------------------- /matlab/kass_statistical_paradigm/firstlab_with_answers.mlx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/kass_statistical_paradigm/firstlab_with_answers.mlx -------------------------------------------------------------------------------- /matlab/kass_statistical_paradigm/firstlab_without_answers.mlx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/kass_statistical_paradigm/firstlab_without_answers.mlx -------------------------------------------------------------------------------- /matlab/basset_connectivity/GenLouvain2/private/group_handler.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/basset_connectivity/GenLouvain2/private/group_handler.mexa64 -------------------------------------------------------------------------------- /matlab/basset_connectivity/GenLouvain2/private/group_handler.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/basset_connectivity/GenLouvain2/private/group_handler.mexw64 -------------------------------------------------------------------------------- /matlab/basset_connectivity/GenLouvain2/private/group_handler.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/basset_connectivity/GenLouvain2/private/group_handler.mexmaci64 -------------------------------------------------------------------------------- /matlab/basset_connectivity/GenLouvain2/private/metanetwork_reduce.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/basset_connectivity/GenLouvain2/private/metanetwork_reduce.mexa64 -------------------------------------------------------------------------------- /matlab/basset_connectivity/GenLouvain2/private/metanetwork_reduce.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/basset_connectivity/GenLouvain2/private/metanetwork_reduce.mexw64 -------------------------------------------------------------------------------- /matlab/basset_connectivity/GenLouvain2/private/metanetwork_reduce.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/basset_connectivity/GenLouvain2/private/metanetwork_reduce.mexmaci64 -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/precomp/makePautoSumFast.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/sahani_latent_factor/Code/gpfa/util/precomp/makePautoSumFast.mexa64 -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/precomp/makePautoSumFast.mexglx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/sahani_latent_factor/Code/gpfa/util/precomp/makePautoSumFast.mexglx -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/precomp/makePautoSumFast.mexmaci: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/sahani_latent_factor/Code/gpfa/util/precomp/makePautoSumFast.mexmaci -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/precomp/makePautoSumFast.mexw32: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/sahani_latent_factor/Code/gpfa/util/precomp/makePautoSumFast.mexw32 -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/precomp/makePautoSumFast.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/sahani_latent_factor/SOLN/gpfa/util/precomp/makePautoSumFast.mexa64 -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/precomp/makePautoSumFast.mexglx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/sahani_latent_factor/SOLN/gpfa/util/precomp/makePautoSumFast.mexglx -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/precomp/makePautoSumFast.mexmaci: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/sahani_latent_factor/SOLN/gpfa/util/precomp/makePautoSumFast.mexmaci -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/precomp/makePautoSumFast.mexw32: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/sahani_latent_factor/SOLN/gpfa/util/precomp/makePautoSumFast.mexw32 -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/invToeplitz/invToeplitzFastZohar.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/sahani_latent_factor/Code/gpfa/util/invToeplitz/invToeplitzFastZohar.mexa64 -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/invToeplitz/invToeplitzFastZohar.mexglx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/sahani_latent_factor/Code/gpfa/util/invToeplitz/invToeplitzFastZohar.mexglx -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/invToeplitz/invToeplitzFastZohar.mexw32: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/sahani_latent_factor/Code/gpfa/util/invToeplitz/invToeplitzFastZohar.mexw32 -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/invToeplitz/invToeplitzFastZohar.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/sahani_latent_factor/SOLN/gpfa/util/invToeplitz/invToeplitzFastZohar.mexa64 -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/invToeplitz/invToeplitzFastZohar.mexglx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/sahani_latent_factor/SOLN/gpfa/util/invToeplitz/invToeplitzFastZohar.mexglx -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/invToeplitz/invToeplitzFastZohar.mexw32: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/sahani_latent_factor/SOLN/gpfa/util/invToeplitz/invToeplitzFastZohar.mexw32 -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/invToeplitz/invToeplitzFastZohar.mexmaci: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/sahani_latent_factor/Code/gpfa/util/invToeplitz/invToeplitzFastZohar.mexmaci -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/invToeplitz/invToeplitzFastZohar.mexmaci: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuro-data-science/neuro_data_science/HEAD/matlab/sahani_latent_factor/SOLN/gpfa/util/invToeplitz/invToeplitzFastZohar.mexmaci -------------------------------------------------------------------------------- /matlab/konrad_tuning_curves/Solutions/evaluateScoreCosExp.m: -------------------------------------------------------------------------------- 1 | function [ score ] = evaluateScoreCosExp(paras, spikes, angle) 2 | predictedF=exp(paras(1)+paras(2)*cos(angle-paras(3))); 3 | logP=spikes.*log(predictedF)-predictedF-log(factorial(spikes)); 4 | score=-sum(logP); 5 | end 6 | 7 | -------------------------------------------------------------------------------- /matlab/add_all_paths.m: -------------------------------------------------------------------------------- 1 | %generate a super path that contains all folders and subfolders 2 | %add it to your path 3 | 4 | %change this line so that it has your full directory 5 | %for instance, my directory is: 6 | directory='/Users/madsarv/Desktop/sfn_course/datasets and code share SFN data science/'; 7 | addpath(genpath(directory)); -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/logdet.m: -------------------------------------------------------------------------------- 1 | function y = logdet(A) 2 | % log(det(A)) where A is positive-definite. 3 | % This is faster and more stable than using log(det(A)). 4 | 5 | % Written by Tom Minka 6 | % (c) Microsoft Corporation. All rights reserved. 7 | 8 | U = chol(A); 9 | y = 2*sum(log(diag(U))); 10 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/logdet.m: -------------------------------------------------------------------------------- 1 | function y = logdet(A) 2 | % log(det(A)) where A is positive-definite. 3 | % This is faster and more stable than using log(det(A)). 4 | 5 | % Written by Tom Minka 6 | % (c) Microsoft Corporation. All rights reserved. 7 | 8 | U = chol(A); 9 | y = 2*sum(log(diag(U))); 10 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/util/optlistwarn.m: -------------------------------------------------------------------------------- 1 | function unassigned = optlistwarn(unassigned) 2 | % OPTLISTWARN - warn about unassigned options. rem=optlistwarn(assignopts(...)) 3 | % 4 | % OPTLISTWARN('var1', val1, 'var2', val2, ...) prints a warning 5 | % stating that 'var1', 'var2', ... are not recognised options. 6 | % This is useful in combination with OPTLISTASSIGN. 7 | 8 | if (length(unassigned)) 9 | warning(['unrecognized options:', sprintf(' %s', unassigned{1:2:end})]) 10 | end 11 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/util/optlistwarn.m: -------------------------------------------------------------------------------- 1 | function unassigned = optlistwarn(unassigned) 2 | % OPTLISTWARN - warn about unassigned options. rem=optlistwarn(assignopts(...)) 3 | % 4 | % OPTLISTWARN('var1', val1, 'var2', val2, ...) prints a warning 5 | % stating that 'var1', 'var2', ... are not recognised options. 6 | % This is useful in combination with OPTLISTASSIGN. 7 | 8 | if (length(unassigned)) 9 | warning(['unrecognized options:', sprintf(' %s', unassigned{1:2:end})]) 10 | end 11 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/fa_infer.m: -------------------------------------------------------------------------------- 1 | function [YY] = fa_infer(XX, LL, UU) 2 | % fa_em - ML factor analysis using EM: [L, P, like, Y] = fa_em(X, K) 3 | % 4 | % [LL, UU, like, Y] = FA_EM(X, K, ...) finds the varimax maximum 5 | % likelihood Factor Analysis fit to the data in X [nObservables x 6 | % nObservations] using the EM algorithm with K latents. It returns 7 | % the estimated loadings (LL), unique variances (UU), 8 | % log-likelihood after each iteration (like), and latent mean 9 | % estimates Y [K x nObservations]. 10 | 11 | 12 | % discover dimensions 13 | [DD,KK] = size(LL); 14 | II = eye(KK); 15 | 16 | UUinvLL = bsxfun(@rdivide,LL,UU); 17 | YYcov = inv(LL'*UUinvLL + II); 18 | YY = YYcov*UUinvLL'*XX; 19 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/fa_infer.m: -------------------------------------------------------------------------------- 1 | function [YY] = fa_infer(XX, LL, UU) 2 | % fa_em - ML factor analysis using EM: [L, P, like, Y] = fa_em(X, K) 3 | % 4 | % [LL, UU, like, Y] = FA_EM(X, K, ...) finds the varimax maximum 5 | % likelihood Factor Analysis fit to the data in X [nObservables x 6 | % nObservations] using the EM algorithm with K latents. It returns 7 | % the estimated loadings (LL), unique variances (UU), 8 | % log-likelihood after each iteration (like), and latent mean 9 | % estimates Y [K x nObservations]. 10 | 11 | 12 | % discover dimensions 13 | [DD,KK] = size(LL); 14 | II = eye(KK); 15 | 16 | UUinvLL = bsxfun(@rdivide,LL,UU); 17 | YYcov = inv(LL'*UUinvLL + II); 18 | YY = YYcov*UUinvLL'*XX; 19 | -------------------------------------------------------------------------------- /matlab/konrad_tuning_curves/MyTuningCurve.m: -------------------------------------------------------------------------------- 1 | %%load data 2 | load stevensonV2 3 | %% Remove all times where speeds are very slow 4 | isGood=find(handVel(1,:).^2+handVel(2,:).^2>.015) 5 | handVel=handVel(1:2,isGood); 6 | handPos=handPos(1:2,isGood); 7 | spikes=spikes(:,isGood); 8 | time=time(isGood); 9 | angle=atan2(handVel(1,:),handVel(2,:)); 10 | 11 | %% Plot Raw Data - PASCAL? %% 12 | nNeuron=193%193 13 | clf 14 | hold on % plotted points will stay on the screen 15 | plot(angle,spikes(nNeuron,:)+0.2*randn(size(spikes(nNeuron,:))),'r.') 16 | %% Now plot a tuning curve 17 | angles=-pi:pi/8:pi; 18 | for i=1:length(angles)-1 19 | % calculate the average firing rate for when the angle is withing the 20 | % corresponding angles bin 21 | end 22 | %make sure to plot the results -------------------------------------------------------------------------------- /python/src/opencourse/modeling_data_funcs.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def evaluate_score_ExpCos(params, spikes, angle): 5 | predictedF = np.exp(params[0] + params[1] * np.cos(angle - params[2])) 6 | logP = spikes * np.log(predictedF) - predictedF 7 | spikes_factorial = np.array([np.math.factorial(ii) for ii in spikes]) 8 | logP -= np.log(spikes_factorial) 9 | score = -1. * np.sum(logP) 10 | return score 11 | 12 | def format_plot(ax, y_max=6): 13 | ax.set_xlabel('angle (rad)') 14 | ax.set_ylabel('spike counts') 15 | ax.set_ylim(0, y_max) 16 | ax.set_xticks(np.linspace(-np.pi, np.pi, 5)) 17 | ax.set_xticklabels([r'$-\pi$', r'$-\frac{\pi}{2}$', r'$0$', r'$\frac{\pi}{2}$', r'$\pi$']) 18 | ax.set_xlim(-np.pi, np.pi) 19 | -------------------------------------------------------------------------------- /matlab/konrad_tuning_curves/myEvaluateScoreCosExp.m: -------------------------------------------------------------------------------- 1 | function [ score ] = evaluateScoreCosExp(paras, spikes, angle) 2 | %function [ score ] = evaluateScoreCosExp(paras, spikes, angle) 3 | % returns a score (which will be minimized), takes as input the parameters, and also spikes and angles 4 | 5 | % for making the predictions, keep in mind that we can change the baseline 6 | % (add a constant), scale the cosine, and shift the cosine 7 | %so the predictions are some kind of a function of the parameters and the 8 | %angle 9 | predictedF=?; 10 | %to score we can just calculate the log probability 11 | %we now need to calculate the probability or better logP (poisson 12 | %equation) 13 | 14 | %we want to maximize the log probability 15 | score=-sum(logP); % by default matlab will minimize 16 | end 17 | 18 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/segmentByTrial.m: -------------------------------------------------------------------------------- 1 | function seq = segmentByTrial(seq, X, fn) 2 | % 3 | % seq = segmentByTrial(seq, X, fn) 4 | % 5 | % Segment and store data by trial. 6 | % 7 | % INPUT: 8 | % 9 | % seq - data structure that has field T, the number of timesteps 10 | % X - data to be segmented 11 | % (any dimensionality x total number of timesteps) 12 | % fn - new field name of seq where segments of X are stored 13 | % 14 | % OUTPUT: 15 | % 16 | % seq - data structure with new field 'fn' 17 | % 18 | % @ 2009 Byron Yu -- byronyu@stanford.edu 19 | 20 | if sum([seq.T]) ~= size(X, 2) 21 | fprintf('Error: size of X incorrect.\n'); 22 | end 23 | 24 | ctr = 0; 25 | for n = 1:length(seq) 26 | T = seq(n).T; 27 | idx = (ctr+1) : (ctr+T); 28 | seq(n).(fn) = X(:, idx); 29 | 30 | ctr = ctr + T; 31 | end 32 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/segmentByTrial.m: -------------------------------------------------------------------------------- 1 | function seq = segmentByTrial(seq, X, fn) 2 | % 3 | % seq = segmentByTrial(seq, X, fn) 4 | % 5 | % Segment and store data by trial. 6 | % 7 | % INPUT: 8 | % 9 | % seq - data structure that has field T, the number of timesteps 10 | % X - data to be segmented 11 | % (any dimensionality x total number of timesteps) 12 | % fn - new field name of seq where segments of X are stored 13 | % 14 | % OUTPUT: 15 | % 16 | % seq - data structure with new field 'fn' 17 | % 18 | % @ 2009 Byron Yu -- byronyu@stanford.edu 19 | 20 | if sum([seq.T]) ~= size(X, 2) 21 | fprintf('Error: size of X incorrect.\n'); 22 | end 23 | 24 | ctr = 0; 25 | for n = 1:length(seq) 26 | T = seq(n).T; 27 | idx = (ctr+1) : (ctr+T); 28 | seq(n).(fn) = X(:, idx); 29 | 30 | ctr = ctr + T; 31 | end 32 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/core_twostage/cosmoother_pca.m: -------------------------------------------------------------------------------- 1 | function Ycs = cosmoother_pca(Y, params) 2 | % 3 | % Ycs = cosmoother_pca(Y, params) 4 | % 5 | % Performs leave-neuron-out prediction for PCA. 6 | % 7 | % INPUTS: 8 | % 9 | % Y - test data (# neurons x # data points) 10 | % params - PCA parameters fit to training data 11 | % 12 | % OUTPUTS: 13 | % 14 | % Ycs - leave-neuron-out prediction (# neurons x # data points) 15 | % 16 | % @ 2009 Byron Yu -- byronyu@stanford.edu 17 | 18 | L = params.L; 19 | d = params.d; 20 | 21 | [yDim, xDim] = size(L); 22 | 23 | Ycs = zeros(size(Y)); 24 | 25 | for i = 1:yDim 26 | % Indices 1:yDim with i removed 27 | mi = [1:(i-1) (i+1):yDim]; 28 | 29 | Xmi = inv(L(mi,:)' * L(mi,:)) * L(mi,:)' *... 30 | bsxfun(@minus, Y(mi,:), d(mi)); 31 | 32 | Ycs(i,:) = L(i,:) * Xmi + d(i); 33 | end 34 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/core_twostage/cosmoother_pca.m: -------------------------------------------------------------------------------- 1 | function Ycs = cosmoother_pca(Y, params) 2 | % 3 | % Ycs = cosmoother_pca(Y, params) 4 | % 5 | % Performs leave-neuron-out prediction for PCA. 6 | % 7 | % INPUTS: 8 | % 9 | % Y - test data (# neurons x # data points) 10 | % params - PCA parameters fit to training data 11 | % 12 | % OUTPUTS: 13 | % 14 | % Ycs - leave-neuron-out prediction (# neurons x # data points) 15 | % 16 | % @ 2009 Byron Yu -- byronyu@stanford.edu 17 | 18 | L = params.L; 19 | d = params.d; 20 | 21 | [yDim, xDim] = size(L); 22 | 23 | Ycs = zeros(size(Y)); 24 | 25 | for i = 1:yDim 26 | % Indices 1:yDim with i removed 27 | mi = [1:(i-1) (i+1):yDim]; 28 | 29 | Xmi = inv(L(mi,:)' * L(mi,:)) * L(mi,:)' *... 30 | bsxfun(@minus, Y(mi,:), d(mi)); 31 | 32 | Ycs(i,:) = L(i,:) * Xmi + d(i); 33 | end 34 | -------------------------------------------------------------------------------- /matlab/basset_connectivity/fcn_order_partition.m: -------------------------------------------------------------------------------- 1 | function [I,CI] = fcn_order_partition(x,ci,isrand) 2 | % clear all 3 | % close all 4 | % 5 | % load A 6 | % x = A; 7 | % ci = modularity_louvain_und(x,2.5); 8 | nc = max(ci); 9 | n = length(ci); 10 | h = hist(ci,1:nc); 11 | 12 | % [~,indsort] = sort(h,'descend'); 13 | I = zeros(n,1); 14 | CI = zeros(n,1); 15 | count = 0; 16 | for i = 1:nc 17 | ind = ci == i; 18 | y = x(ind,ind); 19 | z = mean(y,2) + mean(y)'; 20 | yy = x(ind,~ind); 21 | yy2 = x(~ind,ind); 22 | zz = mean(yy,2) + mean(yy2)'; 23 | z = z - zz; 24 | vtx = find(ind); 25 | [~,jnd] = sort(z,'descend'); 26 | if exist('isrand','var') 27 | jnd = jnd(randperm(length(jnd))) 28 | end 29 | vtx = vtx(jnd); 30 | 31 | I((count + 1):(count + length(jnd))) = vtx; 32 | CI((count + 1):(count + length(jnd))) = i; 33 | count = count + length(jnd); 34 | end -------------------------------------------------------------------------------- /python/src/opencourse/io.py: -------------------------------------------------------------------------------- 1 | import os.path as op 2 | import os 3 | import mne 4 | 5 | datasets = {'stevenson_v2': 'https://www.dropbox.com/s/a1ado3q51maxvct/StevensonV2.mat', 6 | 'stevenson_v4': 'https://www.dropbox.com/s/s3y05ozij1qwof9/StevensonV4.mat', 7 | 'connectivity': 'https://www.dropbox.com/s/ce9di12cibi85og/matrices_connectivity.mat'} 8 | 9 | 10 | def download_file(key): 11 | if key not in datasets.keys(): 12 | raise ValueError('key must be one of {}'.format(datasets.keys())) 13 | if not op.exists('../../data'): 14 | os.makedirs('../../data') 15 | url = datasets[key] 16 | name = op.basename(url) 17 | file_path = '../../data/{}'.format(name) 18 | _ = mne.utils._fetch_file(url, file_path) 19 | abs_path = op.abspath(file_path) 20 | print('Saved to: {}'.format(abs_path)) 21 | 22 | 23 | def download_all_files(): 24 | for key in datasets.keys(): 25 | download_file(key) 26 | -------------------------------------------------------------------------------- /matlab/basset_connectivity/GenLouvain2/MEX_SRC/compile_mex.m: -------------------------------------------------------------------------------- 1 | % Compile mex 2 | ext=mexext; 3 | mkdir('../private'); 4 | 5 | if exist('OCTAVE_VERSION','builtin') 6 | setenv('CXXFLAGS',[getenv('CXXFLAGS'),' -std=c++0x']); 7 | 8 | mex -DOCTAVE -Imatlab_matrix metanetwork_reduce.cpp matlab_matrix/full.cpp matlab_matrix/sparse.cpp group_index.cpp 9 | 10 | mex -DOCTAVE -Imatlab_matrix group_handler.cpp matlab_matrix/full.cpp matlab_matrix/sparse.cpp group_index.cpp 11 | else 12 | mex -largeArrayDims CXXFLAGS="\$CXXFLAGS -std=c++0x" -Imatlab_matrix metanetwork_reduce.cpp matlab_matrix/full.cpp matlab_matrix/sparse.cpp group_index.cpp 13 | 14 | mex -largeArrayDims CXXFLAGS="\$CXXFLAGS -std=c++0x" -Imatlab_matrix group_handler.cpp matlab_matrix/full.cpp matlab_matrix/sparse.cpp group_index.cpp 15 | end 16 | 17 | movefile(['metanetwork_reduce.',ext],['../private/metanetwork_reduce.',ext]); 18 | movefile(['group_handler.',ext],['../private/group_handler.',ext]); -------------------------------------------------------------------------------- /matlab/basset_connectivity/fcn_grid_communities.m: -------------------------------------------------------------------------------- 1 | function [X Y indsort] = fcn_grid_communities(c) 2 | %FCN_GRID_COMMUNITIES outline communities along diagonal 3 | % 4 | % [X Y INDSORT] = FCN_GRID_COMMUNITIES(C) returns {X,Y} coordinates for 5 | % highlighting modules located along the diagonal. 6 | % INDSORT are the indices to sort the matrix. 7 | % 8 | % Inputs: C, community assignments 9 | % 10 | % Outputs: X, x coor 11 | % Y, y coor 12 | % INDSORT, indices 13 | % 14 | % Richard Betzel, Indiana University, 2012 15 | % 16 | 17 | nc = max(c); 18 | [c,indsort] = sort(c); 19 | 20 | X = []; 21 | Y = []; 22 | for i = 1:nc 23 | ind = find(c == i); 24 | if ~isempty(ind) 25 | mn = min(ind) - 0.5; 26 | mx = max(ind) + 0.5; 27 | x = [mn mn mx mx mn NaN]; 28 | y = [mn mx mx mn mn NaN]; 29 | X = [X, x]; 30 | Y = [Y, y]; 31 | end 32 | end -------------------------------------------------------------------------------- /matlab/konrad_tuning_curves/MyBootstrapErrorBar.m: -------------------------------------------------------------------------------- 1 | %%load data 2 | load stevensonV2 3 | %% Remove all times where speeds are very slow 4 | isGood=find(handVel(1,:).^2+handVel(2,:).^2>.015) 5 | handVel=handVel(1:2,isGood); 6 | handPos=handPos(1:2,isGood); 7 | spikes=spikes(:,isGood); 8 | time=time(isGood); 9 | angle=atan2(handVel(1,:),handVel(2,:)); 10 | 11 | %% Plot Raw Data - PASCAL? %% 12 | nNeuron=193%193 13 | clf 14 | hold on 15 | plot(angle,spikes(nNeuron,:)+0.2*randn(size(spikes(nNeuron,:))),'r.') 16 | 17 | %% Make a simple tuning curve 18 | angles=-pi:pi/8:pi; 19 | for i=1:length(angles)-1 20 | angIndices=find(and(angle>angles(i),angle<=angles(i+1))); 21 | nSpikes(i)=mean(spikes(nNeuron,angIndices)); 22 | end 23 | plot(angles(1:end-1)+pi/16,nSpikes) 24 | 25 | %% PART I: KONRAD 26 | %% bootstrap error bars 27 | %hint: you can sample with replacement using 28 | inds=1+floor(rand(size(angle))*length(angle)); 29 | %another hint. Use matlab sort function 30 | %last hint. matlab errorbar wants means, upper range (not value), lower 31 | %range (not value) as parameters 32 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/parseFilename.m: -------------------------------------------------------------------------------- 1 | function [result, err] = parseFilename(str) 2 | % 3 | % [result, err] = parseFilename(str) 4 | % 5 | % Extracts method, xDim and cross-validation fold from results filename, 6 | % where filename has format [method]_xDim[xDim]_cv[cvf].mat. 7 | % 8 | % INPUTS: 9 | % 10 | % str - filename string to parse 11 | % 12 | % OUTPUTS: 13 | % 14 | % result - structure with fields method, xDim, and cvf 15 | % err - boolean that indicates if input string is invalid filename 16 | % 17 | % @ 2009 Byron Yu -- byronyu@stanford.edu 18 | 19 | result = []; 20 | err = false; 21 | 22 | undi = find(str == '_'); 23 | if isempty(undi) 24 | err = true; 25 | return 26 | end 27 | 28 | result.method = str(1:undi(1)-1); 29 | 30 | [A, count, errmsg] = sscanf(str(undi(1)+1:end), 'xDim%d_cv%d.mat'); 31 | 32 | if (count < 1) || (count > 2) 33 | err = true; 34 | return 35 | end 36 | 37 | result.xDim = A(1); 38 | if count == 1 39 | result.cvf = 0; 40 | else 41 | result.cvf = A(2); 42 | end 43 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/parseFilename.m: -------------------------------------------------------------------------------- 1 | function [result, err] = parseFilename(str) 2 | % 3 | % [result, err] = parseFilename(str) 4 | % 5 | % Extracts method, xDim and cross-validation fold from results filename, 6 | % where filename has format [method]_xDim[xDim]_cv[cvf].mat. 7 | % 8 | % INPUTS: 9 | % 10 | % str - filename string to parse 11 | % 12 | % OUTPUTS: 13 | % 14 | % result - structure with fields method, xDim, and cvf 15 | % err - boolean that indicates if input string is invalid filename 16 | % 17 | % @ 2009 Byron Yu -- byronyu@stanford.edu 18 | 19 | result = []; 20 | err = false; 21 | 22 | undi = find(str == '_'); 23 | if isempty(undi) 24 | err = true; 25 | return 26 | end 27 | 28 | result.method = str(1:undi(1)-1); 29 | 30 | [A, count, errmsg] = sscanf(str(undi(1)+1:end), 'xDim%d_cv%d.mat'); 31 | 32 | if (count < 1) || (count > 2) 33 | err = true; 34 | return 35 | end 36 | 37 | result.xDim = A(1); 38 | if count == 1 39 | result.cvf = 0; 40 | else 41 | result.cvf = A(2); 42 | end 43 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/orthogonalize.m: -------------------------------------------------------------------------------- 1 | function [Xorth, Lorth, TT] = orthogonalize(X, L) 2 | % 3 | % [Xorth, Lorth, TT] = orthogonalize(X, L) 4 | % 5 | % Orthonormalize the columns of the loading matrix and 6 | % apply the corresponding linear transform to the latent variables. 7 | % 8 | % yDim: data dimensionality 9 | % xDim: latent dimensionality 10 | % 11 | % INPUTS: 12 | % 13 | % X - latent variables (xDim x T) 14 | % L - loading matrix (yDim x xDim) 15 | % 16 | % OUTPUTS: 17 | % 18 | % Xorth - orthonormalized latent variables (xDim x T) 19 | % Lorth - orthonormalized loading matrix (yDim x xDim) 20 | % TT - linear transform applied to latent variables (xDim x xDim) 21 | % 22 | % @ 2009 Byron Yu -- byronyu@stanford.edu 23 | 24 | xDim = size(L, 2); 25 | 26 | if xDim == 1 27 | mag = sqrt(L' * L); 28 | Lorth = L / mag; 29 | Xorth = mag * X; 30 | else 31 | [UU, DD, VV] = svd(L); 32 | % TT is transform matrix 33 | TT = diag(diag(DD)) * VV'; 34 | 35 | Lorth = UU(:, 1:xDim); 36 | Xorth = TT * X; 37 | end 38 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/orthogonalize.m: -------------------------------------------------------------------------------- 1 | function [Xorth, Lorth, TT] = orthogonalize(X, L) 2 | % 3 | % [Xorth, Lorth, TT] = orthogonalize(X, L) 4 | % 5 | % Orthonormalize the columns of the loading matrix and 6 | % apply the corresponding linear transform to the latent variables. 7 | % 8 | % yDim: data dimensionality 9 | % xDim: latent dimensionality 10 | % 11 | % INPUTS: 12 | % 13 | % X - latent variables (xDim x T) 14 | % L - loading matrix (yDim x xDim) 15 | % 16 | % OUTPUTS: 17 | % 18 | % Xorth - orthonormalized latent variables (xDim x T) 19 | % Lorth - orthonormalized loading matrix (yDim x xDim) 20 | % TT - linear transform applied to latent variables (xDim x xDim) 21 | % 22 | % @ 2009 Byron Yu -- byronyu@stanford.edu 23 | 24 | xDim = size(L, 2); 25 | 26 | if xDim == 1 27 | mag = sqrt(L' * L); 28 | Lorth = L / mag; 29 | Xorth = mag * X; 30 | else 31 | [UU, DD, VV] = svd(L); 32 | % TT is transform matrix 33 | TT = diag(diag(DD)) * VV'; 34 | 35 | Lorth = UU(:, 1:xDim); 36 | Xorth = TT * X; 37 | end 38 | -------------------------------------------------------------------------------- /matlab/basset_connectivity/clustering_coef_wu.m: -------------------------------------------------------------------------------- 1 | function C=clustering_coef_wu(W) 2 | %CLUSTERING_COEF_WU Clustering coefficient 3 | % 4 | % C = clustering_coef_wu(W); 5 | % 6 | % The weighted clustering coefficient is the average "intensity" 7 | % (geometric mean) of all triangles associated with each node. 8 | % 9 | % Input: W, weighted undirected connection matrix 10 | % (all weights must be between 0 and 1) 11 | % 12 | % Output: C, clustering coefficient vector 13 | % 14 | % Note: All weights must be between 0 and 1. 15 | % This may be achieved using the weight_conversion.m function, 16 | % W_nrm = weight_conversion(W, 'normalize'); 17 | % 18 | % Reference: Onnela et al. (2005) Phys Rev E 71:065103 19 | % 20 | % 21 | % Mika Rubinov, UNSW/U Cambridge, 2007-2015 22 | 23 | % Modification history: 24 | % 2007: original 25 | % 2015: expanded documentation 26 | 27 | K=sum(W~=0,2); 28 | cyc3=diag((W.^(1/3))^3); 29 | K(cyc3==0)=inf; %if no 3-cycles exist, make C=0 (via K=inf) 30 | C=cyc3./(K.*(K-1)); %clustering coefficient 31 | -------------------------------------------------------------------------------- /matlab/basset_connectivity/GenLouvain2/MEX_SRC/group_handler.h: -------------------------------------------------------------------------------- 1 | // 2 | // group_handler.h 3 | // group_handler 4 | // 5 | // Created by Lucas Jeub on 21/11/2012. 6 | // 7 | // Last modified by Lucas Jeub on 25/07/2014 8 | 9 | #ifndef __group_handler__group_handler__ 10 | #define __group_handler__group_handler__ 11 | 12 | #include "mex.h" 13 | 14 | #ifndef OCTAVE 15 | #include "matrix.h" 16 | #endif 17 | 18 | #include "matlab_matrix.h" 19 | #include "group_index.h" 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | 28 | 29 | 30 | //move node i to group with most improvement in modularity 31 | double move(group_index & g, mwIndex node, const mxArray * mod); 32 | 33 | //move node i to random group that increases modularity 34 | double moverand(group_index & g, mwIndex node, const mxArray * mod); 35 | 36 | std::map mod_change(group_index &g, sparse &mod,std::set & unique_groups,mwIndex current_node); 37 | 38 | std::map mod_change(group_index &g, full & mod, std::set & unique_groups, mwIndex current_node); 39 | 40 | 41 | #endif /* defined(__group_handler__group_handler__) */ 42 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/fillPerSymm.m: -------------------------------------------------------------------------------- 1 | function Pout = fillPerSymm(Pin, blkSize, T, varargin) 2 | % 3 | % Pout = fillPerSymm(Pin, blkSize, T,...) 4 | % 5 | % Fills in the bottom half of a block persymmetric matrix, given the 6 | % top half. 7 | % 8 | % INPUTS: 9 | % 10 | % Pin - top half of block persymmetric matrix 11 | % (xDim*Thalf) x (xDim*T), where Thalf = ceil(T/2) 12 | % blkSize - edge length of one block 13 | % T - number of blocks making up a row of Pin 14 | % 15 | % OUTPUTS: 16 | % 17 | % Pout - full block persymmetric matrix 18 | % (xDim*T) x (xDim*T) 19 | % 20 | % OPTIONAL ARGUMENTS: 21 | % 22 | % blkSizeVert - vertical block edge length if blocks are not square. 23 | % 'blkSize' is assumed to be the horizontal block edge 24 | % length. 25 | % 26 | % @ 2009 Byron Yu byronyu@stanford.edu 27 | % John Cunningham jcunnin@stanford.edu 28 | 29 | blkSizeVert = blkSize; 30 | assignopts(who, varargin); 31 | 32 | % Fill in bottom half by doing blockwise fliplr and flipud 33 | Thalf = floor(T/2); 34 | idxHalf = bsxfun(@plus, (1:blkSizeVert)', ((Thalf-1):-1:0)*blkSizeVert); 35 | idxFull = bsxfun(@plus, (1:blkSize)', ((T-1):-1:0)*blkSize); 36 | 37 | Pout = [Pin; Pin(idxHalf(:), idxFull(:))]; 38 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/fillPerSymm.m: -------------------------------------------------------------------------------- 1 | function Pout = fillPerSymm(Pin, blkSize, T, varargin) 2 | % 3 | % Pout = fillPerSymm(Pin, blkSize, T,...) 4 | % 5 | % Fills in the bottom half of a block persymmetric matrix, given the 6 | % top half. 7 | % 8 | % INPUTS: 9 | % 10 | % Pin - top half of block persymmetric matrix 11 | % (xDim*Thalf) x (xDim*T), where Thalf = ceil(T/2) 12 | % blkSize - edge length of one block 13 | % T - number of blocks making up a row of Pin 14 | % 15 | % OUTPUTS: 16 | % 17 | % Pout - full block persymmetric matrix 18 | % (xDim*T) x (xDim*T) 19 | % 20 | % OPTIONAL ARGUMENTS: 21 | % 22 | % blkSizeVert - vertical block edge length if blocks are not square. 23 | % 'blkSize' is assumed to be the horizontal block edge 24 | % length. 25 | % 26 | % @ 2009 Byron Yu byronyu@stanford.edu 27 | % John Cunningham jcunnin@stanford.edu 28 | 29 | blkSizeVert = blkSize; 30 | assignopts(who, varargin); 31 | 32 | % Fill in bottom half by doing blockwise fliplr and flipud 33 | Thalf = floor(T/2); 34 | idxHalf = bsxfun(@plus, (1:blkSizeVert)', ((Thalf-1):-1:0)*blkSizeVert); 35 | idxFull = bsxfun(@plus, (1:blkSize)', ((T-1):-1:0)*blkSize); 36 | 37 | Pout = [Pin; Pin(idxHalf(:), idxFull(:))]; 38 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/smoother.m: -------------------------------------------------------------------------------- 1 | function yOut = smoother(yIn, kernSD, stepSize) 2 | % 3 | % yOut = smoother(yIn, kernSD, stepSize) 4 | % 5 | % Gaussian kernel smoothing of data across time. 6 | % 7 | % INPUTS: 8 | % 9 | % yIn - input data (yDim x T) 10 | % kernSD - standard deviation of Gaussian kernel, in msec 11 | % stepSize - time between 2 consecutive datapoints in yIn, in msec 12 | % 13 | % OUTPUTS: 14 | % 15 | % yOut - smoothed version of yIn (yDim x T) 16 | % 17 | % @ 2009 Byron Yu -- byronyu@stanford.edu 18 | 19 | if (kernSD == 0) || (size(yIn, 2)==1) 20 | yOut = yIn; 21 | return 22 | end 23 | 24 | % Filter half length 25 | % Go 3 standard deviations out 26 | fltHL = ceil(3 * kernSD / stepSize); 27 | 28 | % Length of flt is 2*fltHL + 1 29 | flt = normpdf(-fltHL*stepSize : stepSize : fltHL*stepSize, 0, kernSD); 30 | 31 | % Ensure that filter taps sum to 1 32 | flt = flt / sum(flt); 33 | 34 | [yDim, T] = size(yIn); 35 | yOut = nan(yDim, T); 36 | 37 | % Want to normalize by sum of filter taps actually used 38 | nm = conv(flt, ones(1, T)); 39 | 40 | for i = 1:yDim 41 | ys = conv(flt, yIn(i,:)) ./ nm; 42 | % Cut off edges so that result of convolution is same length 43 | % as original data 44 | yOut(i,:) = ys(fltHL+1:end-fltHL); 45 | end 46 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/smoother.m: -------------------------------------------------------------------------------- 1 | function yOut = smoother(yIn, kernSD, stepSize) 2 | % 3 | % yOut = smoother(yIn, kernSD, stepSize) 4 | % 5 | % Gaussian kernel smoothing of data across time. 6 | % 7 | % INPUTS: 8 | % 9 | % yIn - input data (yDim x T) 10 | % kernSD - standard deviation of Gaussian kernel, in msec 11 | % stepSize - time between 2 consecutive datapoints in yIn, in msec 12 | % 13 | % OUTPUTS: 14 | % 15 | % yOut - smoothed version of yIn (yDim x T) 16 | % 17 | % @ 2009 Byron Yu -- byronyu@stanford.edu 18 | 19 | if (kernSD == 0) || (size(yIn, 2)==1) 20 | yOut = yIn; 21 | return 22 | end 23 | 24 | % Filter half length 25 | % Go 3 standard deviations out 26 | fltHL = ceil(3 * kernSD / stepSize); 27 | 28 | % Length of flt is 2*fltHL + 1 29 | flt = normpdf(-fltHL*stepSize : stepSize : fltHL*stepSize, 0, kernSD); 30 | 31 | % Ensure that filter taps sum to 1 32 | flt = flt / sum(flt); 33 | 34 | [yDim, T] = size(yIn); 35 | yOut = nan(yDim, T); 36 | 37 | % Want to normalize by sum of filter taps actually used 38 | nm = conv(flt, ones(1, T)); 39 | 40 | for i = 1:yDim 41 | ys = conv(flt, yIn(i,:)) ./ nm; 42 | % Cut off edges so that result of convolution is same length 43 | % as original data 44 | yOut(i,:) = ys(fltHL+1:end-fltHL); 45 | end 46 | -------------------------------------------------------------------------------- /matlab/basset_connectivity/fcn_plot_blocks.m: -------------------------------------------------------------------------------- 1 | function [X,Y,ind] = fcn_plot_blocks(g) 2 | %FCN_PLOT_BLOCKS visualize block structure 3 | % 4 | % [X,Y,IND] = FCN_PLOT_BLOCKS is a visualization tool for plotting block 5 | % model boundaries. As input it takes a vec G of size N x 1 6 | % (N is the number of nodes) where each cell is the group 7 | % assignment for the corresponding node. As an output, it 8 | % returns X and Y, which are vectors of boundaries and also 9 | % IND, which is the ordering of the adjacency matrix to fit 10 | % with the block structure. 11 | % 12 | % Example: >> [ci,q] = modularity_louvain_und(A); % find modules 13 | % >> [x,y,ind] = fcn_plot_blocks(ci); % get vectors 14 | % >> imagesc(A(ind,ind)); hold on; plot(x,y,'w'); 15 | % 16 | % Inputs: G, vector of group assignments 17 | % 18 | % Outputs: X, x-coordinates 19 | % Y, y-coordinates 20 | % IND, reordering for adjacency matrix 21 | % 22 | % Richard Betzel, Indiana University, 2013 23 | % 24 | 25 | nc = max(g); 26 | n = length(g); 27 | x = 0.5; 28 | y = n + 0.5; 29 | [gsort,ind] = sort(g); 30 | X = []; 31 | Y = []; 32 | for i = 2:(nc) 33 | aa = find(gsort == i); 34 | xx = [x; y]; 35 | yy = (aa(1) - 0.5)*ones(2,1); 36 | X = [X; xx; NaN; yy; NaN]; 37 | Y = [Y; yy; NaN; xx; NaN]; 38 | end -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # dotenv 79 | .env 80 | 81 | # virtualenv 82 | venv/ 83 | ENV/ 84 | 85 | # Spyder project settings 86 | .spyderproject 87 | 88 | # Rope project settings 89 | .ropeproject 90 | 91 | # Data folder 92 | data/ 93 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/GenerateData.m: -------------------------------------------------------------------------------- 1 | function [xx,yy,zz,CC] = GenerateData(varargin); 2 | 3 | %% optional settings: 4 | nTime = 50; 5 | nTrial = 100; 6 | aligned = 1; 7 | 8 | optlistassign(who, varargin); 9 | 10 | %% these can't be changed 11 | nLatent = 4; 12 | nNeuron = 100; 13 | 14 | 15 | %% make the data predictable 16 | rng default; 17 | rng(0); 18 | 19 | yy = zeros(nLatent, nTime, nTrial); 20 | 21 | 22 | tt = [0:nTime-1]./nTime; 23 | 24 | %% Latents 25 | 26 | if (aligned) 27 | lat12phase = zeros([1, 1, nTrial]); 28 | lat34phase = zeros([1, 1, nTrial]); 29 | else 30 | lat12phase = rand([1, 1, nTrial]); 31 | lat34phase = rand([1, 1, nTrial]); 32 | end 33 | 34 | yy(1, :, :) = sin(2*pi*2*(bsxfun(@plus, tt, lat12phase))); 35 | yy(2, :, :) = cos(2*pi*3*(bsxfun(@plus, tt, lat12phase))); 36 | yy(3, :, :) = sin(2*pi*4*(bsxfun(@plus, tt, lat34phase))); 37 | yy(4, :, :) = cos(2*pi*5*(bsxfun(@plus, tt, lat34phase))); 38 | 39 | 40 | %% Loadings 41 | 42 | CC = zeros(nNeuron, nLatent); 43 | CC(1:30, 1:2) = [linspace(0,10,30); linspace(10,0,30)]'; 44 | CC(31:70, :) = [linspace(0,5,40); linspace(5,0,40); ... 45 | linspace(0,5,40); linspace(5,0,40)]'; 46 | CC(71:100, 3:4) = [linspace(0,10,30); linspace(10,0,30)]'; 47 | 48 | %% Mean 49 | 50 | mu(1:25,1) = 30; 51 | mu(26:50,1) = 10; 52 | mu(51:75,1) = 0; 53 | mu(76:100,1) = 5; 54 | 55 | zz = bsxfun(@plus, mu, CC*reshape(yy, nLatent, [])); 56 | 57 | xx = reshape(poissrnd(log(1+exp(zz))/50), [nNeuron, nTime, nTrial]); 58 | 59 | 60 | 61 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/GenerateData.m: -------------------------------------------------------------------------------- 1 | function [xx,yy,zz,CC] = GenerateData(varargin); 2 | 3 | %% optional settings: 4 | nTime = 50; 5 | nTrial = 100; 6 | aligned = 1; 7 | 8 | optlistassign(who, varargin); 9 | 10 | %% these can't be changed 11 | nLatent = 4; 12 | nNeuron = 100; 13 | 14 | 15 | %% make the data predictable 16 | rng default; 17 | rng(0); 18 | 19 | yy = zeros(nLatent, nTime, nTrial); 20 | 21 | 22 | tt = [0:nTime-1]./nTime; 23 | 24 | %% Latents 25 | 26 | if (aligned) 27 | lat12phase = zeros([1, 1, nTrial]); 28 | lat34phase = zeros([1, 1, nTrial]); 29 | else 30 | lat12phase = rand([1, 1, nTrial]); 31 | lat34phase = rand([1, 1, nTrial]); 32 | end 33 | 34 | yy(1, :, :) = sin(2*pi*2*(bsxfun(@plus, tt, lat12phase))); 35 | yy(2, :, :) = cos(2*pi*3*(bsxfun(@plus, tt, lat12phase))); 36 | yy(3, :, :) = sin(2*pi*4*(bsxfun(@plus, tt, lat34phase))); 37 | yy(4, :, :) = cos(2*pi*5*(bsxfun(@plus, tt, lat34phase))); 38 | 39 | 40 | %% Loadings 41 | 42 | CC = zeros(nNeuron, nLatent); 43 | CC(1:30, 1:2) = [linspace(0,10,30); linspace(10,0,30)]'; 44 | CC(31:70, :) = [linspace(0,5,40); linspace(5,0,40); ... 45 | linspace(0,5,40); linspace(5,0,40)]'; 46 | CC(71:100, 3:4) = [linspace(0,10,30); linspace(10,0,30)]'; 47 | 48 | %% Mean 49 | 50 | mu(1:25,1) = 30; 51 | mu(26:50,1) = 10; 52 | mu(51:75,1) = 0; 53 | mu(76:100,1) = 5; 54 | 55 | zz = bsxfun(@plus, mu, CC*reshape(yy, nLatent, [])); 56 | 57 | xx = reshape(poissrnd(log(1+exp(zz))/50), [nNeuron, nTime, nTrial]); 58 | 59 | 60 | 61 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/core_twostage/fastfa_estep.m: -------------------------------------------------------------------------------- 1 | function [Z, LL] = fastfa_estep(X, params) 2 | % 3 | % [Z, LL] = fastfa_estep(X, params) 4 | % 5 | % Compute the low-dimensional points and data likelihoods using a 6 | % previously learned FA or PPCA model. 7 | % 8 | % xDim: data dimensionality 9 | % zDim: latent dimensionality 10 | % N: number of data points 11 | % 12 | % INPUTS: 13 | % 14 | % X - data matrix (xDim x N) 15 | % params - learned FA or PPCA parameters (structure with fields L, Ph, d) 16 | % 17 | % OUTPUTS: 18 | % 19 | % Z.mean - posterior mean (zDim x N) 20 | % Z.cov - posterior covariance (zDim x zDim), which is the same for all data 21 | % LL - log-likelihood of data 22 | % 23 | % Note: the choice of FA vs. PPCA does not need to be specified because 24 | % the choice is reflected in params.Ph. 25 | % 26 | % Code adapted from ffa.m by Zoubin Ghaharamani. 27 | % 28 | % @ 2009 Byron Yu -- byronyu@stanford.edu 29 | 30 | [xDim, N] = size(X); 31 | zDim = size(params.L, 2); 32 | 33 | L = params.L; 34 | Ph = params.Ph; 35 | d = params.d; 36 | 37 | Xc = bsxfun(@minus, X, d); 38 | XcXc = Xc * Xc'; 39 | 40 | I=eye(zDim); 41 | 42 | const=-xDim/2*log(2*pi); 43 | 44 | iPh = diag(1./Ph); 45 | iPhL = iPh * L; 46 | MM = iPh - iPhL / (I + L' * iPhL) * iPhL'; 47 | beta = L' * MM; % zDim x xDim 48 | 49 | Z.mean = beta * Xc; % zDim x N 50 | Z.cov = I - beta * L; % zDim x zDim; same for all observations 51 | 52 | LL = N*const + 0.5*N*logdet(MM) - 0.5 * sum(sum(MM .* XcXc)); 53 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/core_twostage/fastfa_estep.m: -------------------------------------------------------------------------------- 1 | function [Z, LL] = fastfa_estep(X, params) 2 | % 3 | % [Z, LL] = fastfa_estep(X, params) 4 | % 5 | % Compute the low-dimensional points and data likelihoods using a 6 | % previously learned FA or PPCA model. 7 | % 8 | % xDim: data dimensionality 9 | % zDim: latent dimensionality 10 | % N: number of data points 11 | % 12 | % INPUTS: 13 | % 14 | % X - data matrix (xDim x N) 15 | % params - learned FA or PPCA parameters (structure with fields L, Ph, d) 16 | % 17 | % OUTPUTS: 18 | % 19 | % Z.mean - posterior mean (zDim x N) 20 | % Z.cov - posterior covariance (zDim x zDim), which is the same for all data 21 | % LL - log-likelihood of data 22 | % 23 | % Note: the choice of FA vs. PPCA does not need to be specified because 24 | % the choice is reflected in params.Ph. 25 | % 26 | % Code adapted from ffa.m by Zoubin Ghaharamani. 27 | % 28 | % @ 2009 Byron Yu -- byronyu@stanford.edu 29 | 30 | [xDim, N] = size(X); 31 | zDim = size(params.L, 2); 32 | 33 | L = params.L; 34 | Ph = params.Ph; 35 | d = params.d; 36 | 37 | Xc = bsxfun(@minus, X, d); 38 | XcXc = Xc * Xc'; 39 | 40 | I=eye(zDim); 41 | 42 | const=-xDim/2*log(2*pi); 43 | 44 | iPh = diag(1./Ph); 45 | iPhL = iPh * L; 46 | MM = iPh - iPhL / (I + L' * iPhL) * iPhL'; 47 | beta = L' * MM; % zDim x xDim 48 | 49 | Z.mean = beta * Xc; % zDim x N 50 | Z.cov = I - beta * L; % zDim x zDim; same for all observations 51 | 52 | LL = N*const + 0.5*N*logdet(MM) - 0.5 * sum(sum(MM .* XcXc)); 53 | -------------------------------------------------------------------------------- /matlab/konrad_tuning_curves/MyFitTuningCurve.m: -------------------------------------------------------------------------------- 1 | %%load data 2 | load stevensonV2 3 | %% Remove all times where speeds are very slow 4 | isGood=find(handVel(1,:).^2+handVel(2,:).^2>.015) 5 | handVel=handVel(1:2,isGood); 6 | handPos=handPos(1:2,isGood); 7 | spikes=spikes(:,isGood); 8 | time=time(isGood); 9 | angle=atan2(handVel(1,:),handVel(2,:)); 10 | 11 | %% Plot Raw Data - PASCAL? %% 12 | nNeuron=193%193 13 | clf 14 | hold on 15 | plot(angle,spikes(nNeuron,:)+0.2*randn(size(spikes(nNeuron,:))),'r.') 16 | 17 | %% Make a simple tuning curve 18 | angles=-pi:pi/8:pi; 19 | for i=1:length(angles)-1 20 | angIndices=find(and(angle>angles(i),angle<=angles(i+1))); 21 | nSpikes(i)=mean(spikes(nNeuron,angIndices)); 22 | end 23 | plot(angles(1:end-1)+pi/16,nSpikes) 24 | 25 | %% PART I: KONRAD 26 | %% bootstrap error bars 27 | angles=-pi:pi/8:pi; 28 | for k=1:1000 29 | inds=1+floor(rand(size(angle))*length(angle)); 30 | for i=1:length(angles)-1 31 | angIndices=inds(and(angle(inds)>angles(i),angle(inds)<=angles(i+1))); 32 | nS(i,k)=mean(spikes(nNeuron,angIndices)); 33 | end 34 | end 35 | nSS=sort(nS') 36 | U=nSS(25,:); 37 | L=nSS(975,:); 38 | M=mean(nS') 39 | errorbar(angles(1:end-1)+pi/16,M,M-L,U-M) 40 | %advanced exercise: do this for all neurons. Do they actually have cosine 41 | %tuning ad indicated by the research? 42 | 43 | %% PART II: KONRAD 44 | %% fit arbitrary functions 45 | %fit a model 46 | [bestParas,fvalCosExp(i)]=fminsearch(@whichFunction, YourInitialGuess,[],spikes(nNeuron,:),angle); 47 | 48 | %Now plot it. 49 | %Here you need the function that you are actually fitting to see how the 50 | %fit relates to the spikes 51 | % plot(-pi:pi/80:pi, ...))) -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/core_twostage/cosmoother_fa.m: -------------------------------------------------------------------------------- 1 | function [Ycs, Vcs] = cosmoother_fa(Y, params) 2 | % 3 | % [Ycs, Vcs] = cosmoother_fa(Y, params) 4 | % 5 | % Performs leave-neuron-out prediction for FA or PPCA. 6 | % 7 | % INPUTS: 8 | % 9 | % Y - test data (# neurons x # data points) 10 | % params - model parameters fit to training data using fastfa.m 11 | % 12 | % OUTPUTS: 13 | % 14 | % Ycs - leave-neuron-out prediction mean (# neurons x # data points) 15 | % Vcs - leave-neuron-out prediction variance (# neurons x 1) 16 | % 17 | % Note: the choice of FA vs. PPCA does not need to be specified because 18 | % the choice is reflected in params.Ph. 19 | % 20 | % @ 2009 Byron Yu -- byronyu@stanford.edu 21 | 22 | L = params.L; 23 | Ph = params.Ph; 24 | d = params.d; 25 | 26 | [yDim, xDim] = size(L); 27 | I = eye(xDim); 28 | 29 | Ycs = zeros(size(Y)); 30 | if nargout == 2 31 | % One variance for each observed dimension 32 | % Doesn't depend on observed data 33 | Vcs = zeros(yDim, 1); 34 | end 35 | 36 | for i = 1:yDim 37 | % Indices 1:yDim with i removed 38 | mi = [1:(i-1) (i+1):yDim]; 39 | 40 | Phinv = 1./Ph(mi); % (yDim-1) x 1 41 | LRinv = (L(mi,:) .* repmat(Phinv, 1, xDim))'; % xDim x (yDim - 1) 42 | LRinvL = LRinv * L(mi,:); % xDim x xDim 43 | 44 | term2 = L(i,:) * (I - LRinvL / (I + LRinvL)); % 1 x xDim 45 | 46 | dif = bsxfun(@minus, Y(mi,:), d(mi)); 47 | Ycs(i,:) = d(i) + term2 * LRinv * dif; 48 | 49 | if nargout == 2 50 | Vcs(i) = L(i,:)*L(i,:)' + Ph(i) - term2 * LRinvL * L(i,:)'; 51 | end 52 | end 53 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/core_twostage/cosmoother_fa.m: -------------------------------------------------------------------------------- 1 | function [Ycs, Vcs] = cosmoother_fa(Y, params) 2 | % 3 | % [Ycs, Vcs] = cosmoother_fa(Y, params) 4 | % 5 | % Performs leave-neuron-out prediction for FA or PPCA. 6 | % 7 | % INPUTS: 8 | % 9 | % Y - test data (# neurons x # data points) 10 | % params - model parameters fit to training data using fastfa.m 11 | % 12 | % OUTPUTS: 13 | % 14 | % Ycs - leave-neuron-out prediction mean (# neurons x # data points) 15 | % Vcs - leave-neuron-out prediction variance (# neurons x 1) 16 | % 17 | % Note: the choice of FA vs. PPCA does not need to be specified because 18 | % the choice is reflected in params.Ph. 19 | % 20 | % @ 2009 Byron Yu -- byronyu@stanford.edu 21 | 22 | L = params.L; 23 | Ph = params.Ph; 24 | d = params.d; 25 | 26 | [yDim, xDim] = size(L); 27 | I = eye(xDim); 28 | 29 | Ycs = zeros(size(Y)); 30 | if nargout == 2 31 | % One variance for each observed dimension 32 | % Doesn't depend on observed data 33 | Vcs = zeros(yDim, 1); 34 | end 35 | 36 | for i = 1:yDim 37 | % Indices 1:yDim with i removed 38 | mi = [1:(i-1) (i+1):yDim]; 39 | 40 | Phinv = 1./Ph(mi); % (yDim-1) x 1 41 | LRinv = (L(mi,:) .* repmat(Phinv, 1, xDim))'; % xDim x (yDim - 1) 42 | LRinvL = LRinv * L(mi,:); % xDim x xDim 43 | 44 | term2 = L(i,:) * (I - LRinvL / (I + LRinvL)); % 1 x xDim 45 | 46 | dif = bsxfun(@minus, Y(mi,:), d(mi)); 47 | Ycs(i,:) = d(i) + term2 * LRinv * dif; 48 | 49 | if nargout == 2 50 | Vcs(i) = L(i,:)*L(i,:)' + Ph(i) - term2 * LRinvL * L(i,:)'; 51 | end 52 | end 53 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/getSeq.m: -------------------------------------------------------------------------------- 1 | function seq = getSeq(dat, binWidth, varargin) 2 | % 3 | % seq = getSeq(dat, binWidth, ...) 4 | % 5 | % Converts 0/1 spike trains into spike counts. 6 | % 7 | % INPUTS: 8 | % 9 | % dat - structure whose nth entry (corresponding to the nth experimental 10 | % trial) has fields 11 | % trialId -- unique trial identifier 12 | % spikes -- 0/1 matrix of the raw spiking activity across 13 | % all neurons. Each row corresponds to a neuron. 14 | % Each column corresponds to a 1 msec timestep. 15 | % binWidth - spike bin width in msec 16 | % 17 | % OUTPUTS: 18 | % 19 | % seq - data structure, whose nth entry (corresponding to 20 | % the nth experimental trial) has fields 21 | % trialId -- unique trial identifier 22 | % T (1 x 1) -- number of timesteps 23 | % y (yDim x T) -- neural data 24 | % 25 | % OPTIONAL ARGUMENTS: 26 | % 27 | % useSqrt - logical specifying whether or not to use square-root transform 28 | % on spike counts (default: true) 29 | % 30 | % @ 2009 Byron Yu -- byronyu@stanford.edu 31 | 32 | useSqrt = true; 33 | assignopts(who, varargin); 34 | 35 | seq = []; 36 | for n = 1:length(dat) 37 | yDim = size(dat(n).spikes, 1); 38 | T = floor(size(dat(n).spikes, 2) / binWidth); 39 | 40 | seq(n).trialId = dat(n).trialId; 41 | seq(n).T = T; 42 | seq(n).y = nan(yDim, T); 43 | 44 | for t = 1:T 45 | iStart = binWidth * (t-1) + 1; 46 | iEnd = binWidth * t; 47 | 48 | seq(n).y(:,t) = sum(dat(n).spikes(:, iStart:iEnd), 2); 49 | end 50 | 51 | if useSqrt 52 | seq(n).y = sqrt(seq(n).y); 53 | end 54 | end 55 | 56 | % Remove trials that are shorter than one bin width 57 | if ~isempty(seq) 58 | trialsToKeep = ([seq.T] > 0); 59 | seq = seq(trialsToKeep); 60 | end 61 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/getSeq.m: -------------------------------------------------------------------------------- 1 | function seq = getSeq(dat, binWidth, varargin) 2 | % 3 | % seq = getSeq(dat, binWidth, ...) 4 | % 5 | % Converts 0/1 spike trains into spike counts. 6 | % 7 | % INPUTS: 8 | % 9 | % dat - structure whose nth entry (corresponding to the nth experimental 10 | % trial) has fields 11 | % trialId -- unique trial identifier 12 | % spikes -- 0/1 matrix of the raw spiking activity across 13 | % all neurons. Each row corresponds to a neuron. 14 | % Each column corresponds to a 1 msec timestep. 15 | % binWidth - spike bin width in msec 16 | % 17 | % OUTPUTS: 18 | % 19 | % seq - data structure, whose nth entry (corresponding to 20 | % the nth experimental trial) has fields 21 | % trialId -- unique trial identifier 22 | % T (1 x 1) -- number of timesteps 23 | % y (yDim x T) -- neural data 24 | % 25 | % OPTIONAL ARGUMENTS: 26 | % 27 | % useSqrt - logical specifying whether or not to use square-root transform 28 | % on spike counts (default: true) 29 | % 30 | % @ 2009 Byron Yu -- byronyu@stanford.edu 31 | 32 | useSqrt = true; 33 | assignopts(who, varargin); 34 | 35 | seq = []; 36 | for n = 1:length(dat) 37 | yDim = size(dat(n).spikes, 1); 38 | T = floor(size(dat(n).spikes, 2) / binWidth); 39 | 40 | seq(n).trialId = dat(n).trialId; 41 | seq(n).T = T; 42 | seq(n).y = nan(yDim, T); 43 | 44 | for t = 1:T 45 | iStart = binWidth * (t-1) + 1; 46 | iEnd = binWidth * t; 47 | 48 | seq(n).y(:,t) = sum(dat(n).spikes(:, iStart:iEnd), 2); 49 | end 50 | 51 | if useSqrt 52 | seq(n).y = sqrt(seq(n).y); 53 | end 54 | end 55 | 56 | % Remove trials that are shorter than one bin width 57 | if ~isempty(seq) 58 | trialsToKeep = ([seq.T] > 0); 59 | seq = seq(trialsToKeep); 60 | end 61 | -------------------------------------------------------------------------------- /matlab/basset_connectivity/GenLouvain2/MEX_SRC/group_index.h: -------------------------------------------------------------------------------- 1 | // 2 | // group_index.h 3 | // group_index 4 | // 5 | // Created by Lucas Jeub on 24/10/2012. 6 | // 7 | // Implements the group_index datastructure: 8 | // 9 | // nodes: vector storing the group memebership for each node 10 | // 11 | // groups: vector of lists, each list stores the nodes assigned to the group 12 | // 13 | // nodes_iterator: vector storing the position of each node in the list corresponding 14 | // to the group it is assigned to (allows constant time moving of nodes) 15 | // 16 | // 17 | // index(group): return matlab indeces of nodes in group 18 | // 19 | // move(node,group): move node to group 20 | // 21 | // export_matlab(matlab_array): output group vector to matlab_a 22 | // 23 | // 24 | // Last modified by Lucas Jeub on 25/07/2014 25 | 26 | #ifndef GROUP_INDEX_H 27 | #define GROUP_INDEX_H 28 | 29 | #include 30 | #include 31 | #include 32 | 33 | //interface with matlab 34 | #include "mex.h" 35 | 36 | #ifndef OCTAVE 37 | #include "matrix.h" 38 | #endif 39 | 40 | #include "matlab_matrix.h" 41 | 42 | 43 | 44 | struct group_index{ 45 | group_index(); 46 | group_index(const mxArray *matrix); //assign group index from matlab 47 | 48 | group_index & operator = (const mxArray * group_vec); //assign group index from matlab 49 | 50 | full index(mwIndex group); //index of all nodes in group 51 | 52 | void move(mwIndex node, mwIndex group); //move node to group 53 | 54 | void export_matlab(mxArray * & out); //output group vector to matlab 55 | 56 | mwSize n_nodes; 57 | mwSize n_groups; 58 | 59 | std::vector > groups; //the index of each node in a group is stored in a linked list 60 | std::vector::iterator> nodes_iterator; //stores the position of the node in the list for the group it belongs to 61 | std::vector nodes; //stores the group a node belongs to 62 | 63 | 64 | }; 65 | 66 | #endif 67 | 68 | 69 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/core_gpfa/grad_betgam.m: -------------------------------------------------------------------------------- 1 | function [f, df] = grad_betgam(p, precomp, const) 2 | % 3 | % [f, df] = grad_betgam(p, precomp, const) 4 | % 5 | % Gradient computation for GP timescale optimization. 6 | % This function is called by minimize.m. 7 | % 8 | % INPUTS: 9 | % 10 | % p - variable with respect to which optimization is performed, 11 | % where p = log(1 / timescale ^2) 12 | % precomp - structure containing precomputations 13 | % 14 | % OUTPUTS: 15 | % 16 | % f - value of objective function E[log P({x},{y})] at p 17 | % df - gradient at p 18 | % 19 | % @ 2009 Byron Yu byronyu@stanford.edu 20 | % John Cunningham jcunnin@stanford.edu 21 | 22 | Tall = precomp.Tall; 23 | Tmax = max(Tall); 24 | 25 | temp = (1-const.eps) * exp(-exp(p(1)) / 2 * precomp.difSq); % Tmax x Tmax 26 | Kmax = temp + const.eps * eye(Tmax); 27 | dKdgamma_max = -0.5 * temp .* precomp.difSq; 28 | 29 | dEdgamma = 0; 30 | f = 0; 31 | for j = 1:length(precomp.Tu) 32 | T = precomp.Tu(j).T; 33 | Thalf = ceil(T/2); 34 | 35 | [Kinv, logdet_K] = invToeplitz(Kmax(1:T, 1:T)); 36 | 37 | KinvM = Kinv(1:Thalf,:) * dKdgamma_max(1:T,1:T); % Thalf x T 38 | KinvMKinv = (KinvM * Kinv)'; % Thalf x T 39 | 40 | dg_KinvM = diag(KinvM); 41 | tr_KinvM = 2 * sum(dg_KinvM) - rem(T, 2) * dg_KinvM(end); 42 | 43 | mkr = ceil(0.5 * T^2); 44 | 45 | dEdgamma = dEdgamma - 0.5 * precomp.Tu(j).numTrials * tr_KinvM... 46 | + 0.5 * precomp.Tu(j).PautoSUM(1:mkr) * KinvMKinv(1:mkr)'... 47 | + 0.5 * precomp.Tu(j).PautoSUM(end:-1:mkr+1) * KinvMKinv(1:(T^2-mkr))'; 48 | 49 | f = f - 0.5 * precomp.Tu(j).numTrials * logdet_K... 50 | - 0.5 * precomp.Tu(j).PautoSUM(:)' * Kinv(:); 51 | end 52 | 53 | f = -f; 54 | % exp(p) is needed because we're computing gradients with 55 | % respect to log(gamma), rather than gamma 56 | df = -dEdgamma * exp(p(1)); 57 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/core_gpfa/grad_betgam.m: -------------------------------------------------------------------------------- 1 | function [f, df] = grad_betgam(p, precomp, const) 2 | % 3 | % [f, df] = grad_betgam(p, precomp, const) 4 | % 5 | % Gradient computation for GP timescale optimization. 6 | % This function is called by minimize.m. 7 | % 8 | % INPUTS: 9 | % 10 | % p - variable with respect to which optimization is performed, 11 | % where p = log(1 / timescale ^2) 12 | % precomp - structure containing precomputations 13 | % 14 | % OUTPUTS: 15 | % 16 | % f - value of objective function E[log P({x},{y})] at p 17 | % df - gradient at p 18 | % 19 | % @ 2009 Byron Yu byronyu@stanford.edu 20 | % John Cunningham jcunnin@stanford.edu 21 | 22 | Tall = precomp.Tall; 23 | Tmax = max(Tall); 24 | 25 | temp = (1-const.eps) * exp(-exp(p(1)) / 2 * precomp.difSq); % Tmax x Tmax 26 | Kmax = temp + const.eps * eye(Tmax); 27 | dKdgamma_max = -0.5 * temp .* precomp.difSq; 28 | 29 | dEdgamma = 0; 30 | f = 0; 31 | for j = 1:length(precomp.Tu) 32 | T = precomp.Tu(j).T; 33 | Thalf = ceil(T/2); 34 | 35 | [Kinv, logdet_K] = invToeplitz(Kmax(1:T, 1:T)); 36 | 37 | KinvM = Kinv(1:Thalf,:) * dKdgamma_max(1:T,1:T); % Thalf x T 38 | KinvMKinv = (KinvM * Kinv)'; % Thalf x T 39 | 40 | dg_KinvM = diag(KinvM); 41 | tr_KinvM = 2 * sum(dg_KinvM) - rem(T, 2) * dg_KinvM(end); 42 | 43 | mkr = ceil(0.5 * T^2); 44 | 45 | dEdgamma = dEdgamma - 0.5 * precomp.Tu(j).numTrials * tr_KinvM... 46 | + 0.5 * precomp.Tu(j).PautoSUM(1:mkr) * KinvMKinv(1:mkr)'... 47 | + 0.5 * precomp.Tu(j).PautoSUM(end:-1:mkr+1) * KinvMKinv(1:(T^2-mkr))'; 48 | 49 | f = f - 0.5 * precomp.Tu(j).numTrials * logdet_K... 50 | - 0.5 * precomp.Tu(j).PautoSUM(:)' * Kinv(:); 51 | end 52 | 53 | f = -f; 54 | % exp(p) is needed because we're computing gradients with 55 | % respect to log(gamma), rather than gamma 56 | df = -dEdgamma * exp(p(1)); 57 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/core_gpfa/cosmoother_gpfa_viaOrth.m: -------------------------------------------------------------------------------- 1 | function seq = cosmoother_gpfa_viaOrth(seq, params, mList) 2 | % 3 | % seq = cosmoother_gpfa_viaOrth(seq, params, mList) 4 | % 5 | % Performs leave-neuron-out prediction for GPFA. 6 | % 7 | % INPUTS: 8 | % 9 | % seq - test data structure 10 | % params - GPFA model parameters fit to training data 11 | % mList - number of top orthonormal latent coordinates to use for 12 | % prediction (e.g., 1:5) 13 | % 14 | % OUTPUTS: 15 | % 16 | % seq - test data structure with new fields ycsOrthXX, where XX are 17 | % elements of mList. seq(n).ycsOrthXX has the same dimensions 18 | % as seq(n).y. 19 | % 20 | % @ 2009 Byron Yu byronyu@stanford.edu 21 | % John Cunningham jcunnin@stanford.edu 22 | 23 | [yDim, xDim] = size(params.C); 24 | 25 | for n = 1:length(seq) 26 | for m = mList 27 | fn = sprintf('ycsOrth%02d', m); 28 | seq(n).(fn) = nan(yDim, seq(n).T); 29 | end 30 | end 31 | 32 | for i = 1:yDim 33 | % Indices 1:yDim with i removed 34 | mi = [1:(i-1) (i+1):yDim]; 35 | 36 | for n = 1:length(seq) 37 | seqCs(n).T = seq(n).T; 38 | seqCs(n).y = seq(n).y(mi,:); 39 | end 40 | paramsCs = params; 41 | paramsCs.C = params.C(mi,:); 42 | paramsCs.d = params.d(mi); 43 | paramsCs.R = params.R(mi,mi); 44 | 45 | seqCs = exactInferenceWithLL(seqCs, paramsCs, 'getLL', false); 46 | 47 | % Note: it is critical to use params.C here and not paramsCs.C 48 | [Xorth, Corth] = orthogonalize([seqCs.xsm], params.C); 49 | seqCs = segmentByTrial(seqCs, Xorth, 'xorth'); 50 | 51 | for n = 1:length(seq) 52 | for m = mList 53 | fn = sprintf('ycsOrth%02d', m); 54 | seq(n).(fn)(i,:) = Corth(i,1:m) * seqCs(n).xorth(1:m,:) + params.d(i); 55 | end 56 | end 57 | fprintf('Cross-validation complete for %3d of %d neurons\r', i, yDim); 58 | end 59 | fprintf('\n'); 60 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/core_gpfa/cosmoother_gpfa_viaOrth.m: -------------------------------------------------------------------------------- 1 | function seq = cosmoother_gpfa_viaOrth(seq, params, mList) 2 | % 3 | % seq = cosmoother_gpfa_viaOrth(seq, params, mList) 4 | % 5 | % Performs leave-neuron-out prediction for GPFA. 6 | % 7 | % INPUTS: 8 | % 9 | % seq - test data structure 10 | % params - GPFA model parameters fit to training data 11 | % mList - number of top orthonormal latent coordinates to use for 12 | % prediction (e.g., 1:5) 13 | % 14 | % OUTPUTS: 15 | % 16 | % seq - test data structure with new fields ycsOrthXX, where XX are 17 | % elements of mList. seq(n).ycsOrthXX has the same dimensions 18 | % as seq(n).y. 19 | % 20 | % @ 2009 Byron Yu byronyu@stanford.edu 21 | % John Cunningham jcunnin@stanford.edu 22 | 23 | [yDim, xDim] = size(params.C); 24 | 25 | for n = 1:length(seq) 26 | for m = mList 27 | fn = sprintf('ycsOrth%02d', m); 28 | seq(n).(fn) = nan(yDim, seq(n).T); 29 | end 30 | end 31 | 32 | for i = 1:yDim 33 | % Indices 1:yDim with i removed 34 | mi = [1:(i-1) (i+1):yDim]; 35 | 36 | for n = 1:length(seq) 37 | seqCs(n).T = seq(n).T; 38 | seqCs(n).y = seq(n).y(mi,:); 39 | end 40 | paramsCs = params; 41 | paramsCs.C = params.C(mi,:); 42 | paramsCs.d = params.d(mi); 43 | paramsCs.R = params.R(mi,mi); 44 | 45 | seqCs = exactInferenceWithLL(seqCs, paramsCs, 'getLL', false); 46 | 47 | % Note: it is critical to use params.C here and not paramsCs.C 48 | [Xorth, Corth] = orthogonalize([seqCs.xsm], params.C); 49 | seqCs = segmentByTrial(seqCs, Xorth, 'xorth'); 50 | 51 | for n = 1:length(seq) 52 | for m = mList 53 | fn = sprintf('ycsOrth%02d', m); 54 | seq(n).(fn)(i,:) = Corth(i,1:m) * seqCs(n).xorth(1:m,:) + params.d(i); 55 | end 56 | end 57 | fprintf('Cross-validation complete for %3d of %d neurons\r', i, yDim); 58 | end 59 | fprintf('\n'); 60 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/invPerSymm.m: -------------------------------------------------------------------------------- 1 | function [invM, logdet_M] = invPerSymm(M, blkSize, varargin) 2 | % 3 | % [invM, logdet_M] = invPerSymm(M, blkSize,...) 4 | % 5 | % Inverts a matrix that is block persymmetric. This function is 6 | % faster than calling inv(M) directly because it only computes the 7 | % top half of inv(M). The bottom half of inv(M) is made up of 8 | % elements from the top half of inv(M). 9 | % 10 | % WARNING: If the input matrix M is not block persymmetric, no 11 | % error message will be produced and the output of this function will 12 | % not be meaningful. 13 | % 14 | % INPUTS: 15 | % 16 | % M - the block persymmetric matrix to be inverted 17 | % ((blkSize*T) x (blkSize*T)). Each block is 18 | % blkSize x blkSize, arranged in a T x T grid. 19 | % blkSize - edge length of one block 20 | % 21 | % OUTPUTS: 22 | % 23 | % invM - inverse of M ((blkSize*T) x (blkSize*T)) 24 | % logdet_M - log determinant of M 25 | % 26 | % OPTIONAL ARGUMENTS: 27 | % 28 | % offDiagSparse - logical that specifies whether off-diagonal blocks are 29 | % sparse (default: false) 30 | % 31 | % @ 2009 Byron Yu byronyu@stanford.edu 32 | % John Cunningham jcunnin@stanford.edu 33 | 34 | offDiagSparse = 'false'; % specify if A12 is sparse 35 | assignopts(who, varargin); 36 | 37 | T = size(M, 1) / blkSize; 38 | Thalf = ceil(T/2); 39 | mkr = blkSize * Thalf; 40 | 41 | invA11 = inv(M(1:mkr, 1:mkr)); 42 | invA11 = (invA11 + invA11') / 2; 43 | 44 | if offDiagSparse 45 | A12 = sparse(M(1:mkr, (mkr+1):end)); 46 | else 47 | A12 = M(1:mkr, (mkr+1):end); 48 | end 49 | 50 | term = invA11 * A12; 51 | F22 = M(mkr+1:end, mkr+1:end) - A12' * term; 52 | 53 | res12 = -term / F22; 54 | res11 = invA11 - res12 * term'; 55 | res11 = (res11 + res11') / 2; 56 | 57 | % Fill in bottom half of invM by picking elements from res11 and res12 58 | invM = fillPerSymm([res11 res12], blkSize, T); 59 | 60 | if nargout == 2 61 | logdet_M = -logdet(invA11) + logdet(F22); 62 | end 63 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/invPerSymm.m: -------------------------------------------------------------------------------- 1 | function [invM, logdet_M] = invPerSymm(M, blkSize, varargin) 2 | % 3 | % [invM, logdet_M] = invPerSymm(M, blkSize,...) 4 | % 5 | % Inverts a matrix that is block persymmetric. This function is 6 | % faster than calling inv(M) directly because it only computes the 7 | % top half of inv(M). The bottom half of inv(M) is made up of 8 | % elements from the top half of inv(M). 9 | % 10 | % WARNING: If the input matrix M is not block persymmetric, no 11 | % error message will be produced and the output of this function will 12 | % not be meaningful. 13 | % 14 | % INPUTS: 15 | % 16 | % M - the block persymmetric matrix to be inverted 17 | % ((blkSize*T) x (blkSize*T)). Each block is 18 | % blkSize x blkSize, arranged in a T x T grid. 19 | % blkSize - edge length of one block 20 | % 21 | % OUTPUTS: 22 | % 23 | % invM - inverse of M ((blkSize*T) x (blkSize*T)) 24 | % logdet_M - log determinant of M 25 | % 26 | % OPTIONAL ARGUMENTS: 27 | % 28 | % offDiagSparse - logical that specifies whether off-diagonal blocks are 29 | % sparse (default: false) 30 | % 31 | % @ 2009 Byron Yu byronyu@stanford.edu 32 | % John Cunningham jcunnin@stanford.edu 33 | 34 | offDiagSparse = 'false'; % specify if A12 is sparse 35 | assignopts(who, varargin); 36 | 37 | T = size(M, 1) / blkSize; 38 | Thalf = ceil(T/2); 39 | mkr = blkSize * Thalf; 40 | 41 | invA11 = inv(M(1:mkr, 1:mkr)); 42 | invA11 = (invA11 + invA11') / 2; 43 | 44 | if offDiagSparse 45 | A12 = sparse(M(1:mkr, (mkr+1):end)); 46 | else 47 | A12 = M(1:mkr, (mkr+1):end); 48 | end 49 | 50 | term = invA11 * A12; 51 | F22 = M(mkr+1:end, mkr+1:end) - A12' * term; 52 | 53 | res12 = -term / F22; 54 | res11 = invA11 - res12 * term'; 55 | res11 = (res11 + res11') / 2; 56 | 57 | % Fill in bottom half of invM by picking elements from res11 and res12 58 | invM = fillPerSymm([res11 res12], blkSize, T); 59 | 60 | if nargout == 2 61 | logdet_M = -logdet(invA11) + logdet(F22); 62 | end 63 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/core_gpfa/make_K_big.m: -------------------------------------------------------------------------------- 1 | function [K_big, K_big_inv, logdet_K_big] = make_K_big(params, T) 2 | % 3 | % [K_big, K_big_inv] = make_K_big(params, T) 4 | % 5 | % Constructs full GP covariance matrix across all state dimensions and timesteps. 6 | % 7 | % INPUTS: 8 | % 9 | % params - GPFA model parameters 10 | % T - number of timesteps 11 | % 12 | % OUTPUTS: 13 | % 14 | % K_big - GP covariance matrix with dimensions (xDim * T) x (xDim * T). 15 | % The (t1, t2) block is diagonal, has dimensions xDim x xDim, and 16 | % represents the covariance between the state vectors at 17 | % timesteps t1 and t2. K_big is sparse and striped. 18 | % K_big_inv - inverse of K_big 19 | % logdet_K_big - log determinant of K_big 20 | % 21 | % @ 2009 Byron Yu byronyu@stanford.edu 22 | % John Cunningham jcunnin@stanford.edu 23 | 24 | xDim = size(params.C, 2); 25 | 26 | idx = 0 : xDim : (xDim*(T-1)); 27 | 28 | K_big = zeros(xDim*T); 29 | K_big_inv = zeros(xDim*T); 30 | Tdif = repmat((1:T)', 1, T) - repmat(1:T, T, 1); 31 | logdet_K_big = 0; 32 | 33 | for i = 1:xDim 34 | switch params.covType 35 | case 'rbf' 36 | K = (1 - params.eps(i)) * ... 37 | exp(-params.gamma(i) / 2 * Tdif.^2) +... 38 | params.eps(i) * eye(T); 39 | case 'tri' 40 | K = max(1 - params.eps(i) - params.a(i) * abs(Tdif), 0) +... 41 | params.eps(i) * eye(T); 42 | case 'logexp' 43 | z = params.gamma *... 44 | (1 - params.eps(i) - params.a(i) * abs(Tdif)); 45 | outUL = (z>36); 46 | outLL = (z<-19); 47 | inLim = ~outUL & ~outLL; 48 | 49 | hz = nan(size(z)); 50 | hz(outUL) = z(outUL); 51 | hz(outLL) = exp(z(outLL)); 52 | hz(inLim) = log(1 + exp(z(inLim))); 53 | 54 | K = hz / params.gamma + params.eps(i) * eye(T); 55 | end 56 | K_big(idx+i, idx+i) = K; 57 | [K_big_inv(idx+i, idx+i), logdet_K] = invToeplitz(K); 58 | 59 | logdet_K_big = logdet_K_big + logdet_K; 60 | end 61 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/core_gpfa/make_K_big.m: -------------------------------------------------------------------------------- 1 | function [K_big, K_big_inv, logdet_K_big] = make_K_big(params, T) 2 | % 3 | % [K_big, K_big_inv] = make_K_big(params, T) 4 | % 5 | % Constructs full GP covariance matrix across all state dimensions and timesteps. 6 | % 7 | % INPUTS: 8 | % 9 | % params - GPFA model parameters 10 | % T - number of timesteps 11 | % 12 | % OUTPUTS: 13 | % 14 | % K_big - GP covariance matrix with dimensions (xDim * T) x (xDim * T). 15 | % The (t1, t2) block is diagonal, has dimensions xDim x xDim, and 16 | % represents the covariance between the state vectors at 17 | % timesteps t1 and t2. K_big is sparse and striped. 18 | % K_big_inv - inverse of K_big 19 | % logdet_K_big - log determinant of K_big 20 | % 21 | % @ 2009 Byron Yu byronyu@stanford.edu 22 | % John Cunningham jcunnin@stanford.edu 23 | 24 | xDim = size(params.C, 2); 25 | 26 | idx = 0 : xDim : (xDim*(T-1)); 27 | 28 | K_big = zeros(xDim*T); 29 | K_big_inv = zeros(xDim*T); 30 | Tdif = repmat((1:T)', 1, T) - repmat(1:T, T, 1); 31 | logdet_K_big = 0; 32 | 33 | for i = 1:xDim 34 | switch params.covType 35 | case 'rbf' 36 | K = (1 - params.eps(i)) * ... 37 | exp(-params.gamma(i) / 2 * Tdif.^2) +... 38 | params.eps(i) * eye(T); 39 | case 'tri' 40 | K = max(1 - params.eps(i) - params.a(i) * abs(Tdif), 0) +... 41 | params.eps(i) * eye(T); 42 | case 'logexp' 43 | z = params.gamma *... 44 | (1 - params.eps(i) - params.a(i) * abs(Tdif)); 45 | outUL = (z>36); 46 | outLL = (z<-19); 47 | inLim = ~outUL & ~outLL; 48 | 49 | hz = nan(size(z)); 50 | hz(outUL) = z(outUL); 51 | hz(outLL) = exp(z(outLL)); 52 | hz(inLim) = log(1 + exp(z(inLim))); 53 | 54 | K = hz / params.gamma + params.eps(i) * eye(T); 55 | end 56 | K_big(idx+i, idx+i) = K; 57 | [K_big_inv(idx+i, idx+i), logdet_K] = invToeplitz(K); 58 | 59 | logdet_K_big = logdet_K_big + logdet_K; 60 | end 61 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/util/plottile.m: -------------------------------------------------------------------------------- 1 | function hhout = plottile(varargin) 2 | % plottile - plot N-D array in multiple subplots: plottile([x],y,...) 3 | % PLOTTILE(Y) plots each of the columns of the N-D array Y (N <=3) in 4 | % an individual subplot on the current figure. If Y is LxMxN, the 5 | % function uses a grid of MxN subplots, each containing a line 6 | % with L points. 7 | % PLOTTILE(X,Y) uses the vector X to set the X-axis. 8 | % PLOTTILE(X,Y,'prop','value',...) passes the property-value pairs 9 | % to the underlying plots. This can be used to set the line style 10 | % or color. 11 | % H = PLOTTILE(...) returns a vector of handles to the line 12 | % objects. 13 | 14 | % maneesh. 15 | % pre-20030205: created 16 | 17 | 18 | % OPTIONS: 19 | slice = [1]; 20 | xy = 'False'; 21 | Plot = @plot; 22 | axesOpts = {}; 23 | % OPTIONS DONE 24 | 25 | if nargin > 1 && isvector(varargin{1}) && ~ischar(varargin{2}) 26 | xx = varargin{1}; 27 | yy = varargin{2}; 28 | varargin(1:2) = []; 29 | else 30 | yy = varargin{1}; 31 | xx = 1:size(yy,1); 32 | varargin(1) = []; 33 | end 34 | 35 | plargs = optlistassign(who, varargin); 36 | 37 | if ndims(yy)-length(slice) > 2 38 | error ('cannot create tile arrays with more than two dimensions'); 39 | end 40 | 41 | 42 | 43 | % bring the dimensions for each slice to the front 44 | yy = permute(yy, [slice, setdiff(1:ndims(yy), slice)]); 45 | ysiz = size(yy); 46 | ssiz = ysiz(1:length(slice)); 47 | ssiz(end+1:2) = 1; 48 | 49 | dims = ysiz; 50 | dims(end+1:3) = 1; % fill out the dim vector 51 | 52 | if length(slice) > 1 53 | % reshape 54 | yy = reshape(yy, [prod(ssiz), ysiz(length(slice)+1:end)]); 55 | dims = size(yy); 56 | dims(length(dims)+1:3) = 1; % fill out the dim vector 57 | end 58 | 59 | %% create X axis 60 | xx = 1:ssiz(1); 61 | 62 | 63 | hh = []; 64 | 65 | Nrows = dims(end-1); 66 | Ncols = dims(end); 67 | 68 | for ii = 1:Nrows 69 | for jj = 1:Ncols 70 | nestplot(Nrows, Ncols, {ii,jj}); 71 | if ~isempty(axesOpts) 72 | set(gca, axesOpts{:}); 73 | end 74 | hh = [hh, Plot(xx, reshape(yy(:,ii,jj), ssiz), plargs{:})]; 75 | end 76 | end 77 | 78 | if (nargout) 79 | hhout = hh; 80 | end 81 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/util/plottile.m: -------------------------------------------------------------------------------- 1 | function hhout = plottile(varargin) 2 | % plottile - plot N-D array in multiple subplots: plottile([x],y,...) 3 | % PLOTTILE(Y) plots each of the columns of the N-D array Y (N <=3) in 4 | % an individual subplot on the current figure. If Y is LxMxN, the 5 | % function uses a grid of MxN subplots, each containing a line 6 | % with L points. 7 | % PLOTTILE(X,Y) uses the vector X to set the X-axis. 8 | % PLOTTILE(X,Y,'prop','value',...) passes the property-value pairs 9 | % to the underlying plots. This can be used to set the line style 10 | % or color. 11 | % H = PLOTTILE(...) returns a vector of handles to the line 12 | % objects. 13 | 14 | % maneesh. 15 | % pre-20030205: created 16 | 17 | 18 | % OPTIONS: 19 | slice = [1]; 20 | xy = 'False'; 21 | Plot = @plot; 22 | axesOpts = {}; 23 | % OPTIONS DONE 24 | 25 | if nargin > 1 && isvector(varargin{1}) && ~ischar(varargin{2}) 26 | xx = varargin{1}; 27 | yy = varargin{2}; 28 | varargin(1:2) = []; 29 | else 30 | yy = varargin{1}; 31 | xx = 1:size(yy,1); 32 | varargin(1) = []; 33 | end 34 | 35 | plargs = optlistassign(who, varargin); 36 | 37 | if ndims(yy)-length(slice) > 2 38 | error ('cannot create tile arrays with more than two dimensions'); 39 | end 40 | 41 | 42 | 43 | % bring the dimensions for each slice to the front 44 | yy = permute(yy, [slice, setdiff(1:ndims(yy), slice)]); 45 | ysiz = size(yy); 46 | ssiz = ysiz(1:length(slice)); 47 | ssiz(end+1:2) = 1; 48 | 49 | dims = ysiz; 50 | dims(end+1:3) = 1; % fill out the dim vector 51 | 52 | if length(slice) > 1 53 | % reshape 54 | yy = reshape(yy, [prod(ssiz), ysiz(length(slice)+1:end)]); 55 | dims = size(yy); 56 | dims(length(dims)+1:3) = 1; % fill out the dim vector 57 | end 58 | 59 | %% create X axis 60 | xx = 1:ssiz(1); 61 | 62 | 63 | hh = []; 64 | 65 | Nrows = dims(end-1); 66 | Ncols = dims(end); 67 | 68 | for ii = 1:Nrows 69 | for jj = 1:Ncols 70 | nestplot(Nrows, Ncols, {ii,jj}); 71 | if ~isempty(axesOpts) 72 | set(gca, axesOpts{:}); 73 | end 74 | hh = [hh, Plot(xx, reshape(yy(:,ii,jj), ssiz), plargs{:})]; 75 | end 76 | end 77 | 78 | if (nargout) 79 | hhout = hh; 80 | end 81 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/plotting/plot3D.m: -------------------------------------------------------------------------------- 1 | function plot3D(seq, xspec, varargin) 2 | % 3 | % plot3D(seq, xspec, ...) 4 | % 5 | % Plot neural trajectories in a three-dimensional space. 6 | % 7 | % INPUTS: 8 | % 9 | % seq - data structure containing extracted trajectories 10 | % xspec - field name of trajectories in 'seq' to be plotted 11 | % (e.g., 'xorth' or 'xsm') 12 | % 13 | % OPTIONAL ARGUMENTS: 14 | % 15 | % dimsToPlot - selects three dimensions in seq.(xspec) to plot 16 | % (default: 1:3) 17 | % nPlotMax - maximum number of trials to plot (default: 20) 18 | % redTrials - vector of trialIds whose trajectories are plotted in red 19 | % (default: []) 20 | % 21 | % @ 2009 Byron Yu -- byronyu@stanford.edu 22 | 23 | dimsToPlot = 1:3; 24 | nPlotMax = 20; 25 | redTrials = []; 26 | assignopts(who, varargin); 27 | 28 | if size(seq(1).(xspec), 1) < 3 29 | fprintf('ERROR: Trajectories have less than 3 dimensions.\n'); 30 | return 31 | end 32 | 33 | f = figure; 34 | pos = get(gcf, 'position'); 35 | set(f, 'position', [pos(1) pos(2) 1.3*pos(3) 1.3*pos(4)]); 36 | 37 | for n = 1:min(length(seq), nPlotMax) 38 | dat = seq(n).(xspec)(dimsToPlot,:); 39 | T = seq(n).T; 40 | 41 | if ismember(seq(n).trialId, redTrials) 42 | col = [1 0 0]; % red 43 | lw = 3; 44 | else 45 | col = 0.2 * [1 1 1]; % gray 46 | lw = 0.5; 47 | end 48 | plot3(dat(1,:), dat(2,:), dat(3,:), '.-', 'linewidth', lw, 'color', col); 49 | hold on; 50 | end 51 | 52 | axis equal; 53 | if isequal(xspec, 'xorth') 54 | str1 = sprintf('$$\\tilde{\\mathbf x}_{%d,:}$$', dimsToPlot(1)); 55 | str2 = sprintf('$$\\tilde{\\mathbf x}_{%d,:}$$', dimsToPlot(2)); 56 | str3 = sprintf('$$\\tilde{\\mathbf x}_{%d,:}$$', dimsToPlot(3)); 57 | else 58 | str1 = sprintf('$${\\mathbf x}_{%d,:}$$', dimsToPlot(1)); 59 | str2 = sprintf('$${\\mathbf x}_{%d,:}$$', dimsToPlot(2)); 60 | str3 = sprintf('$${\\mathbf x}_{%d,:}$$', dimsToPlot(3)); 61 | end 62 | xlabel(str1, 'interpreter', 'latex', 'fontsize', 24); 63 | ylabel(str2, 'interpreter', 'latex', 'fontsize', 24); 64 | zlabel(str3, 'interpreter', 'latex', 'fontsize', 24); 65 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/startup.m: -------------------------------------------------------------------------------- 1 | addpath core_gpfa 2 | addpath core_twostage 3 | addpath plotting 4 | addpath util 5 | addpath util/precomp 6 | 7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 8 | % The following code checks for the relevant MEX files (such as .mexa64 9 | % or .mexglx, depending on the machine architecture), and it creates the 10 | % mex file if it can not find the right one. 11 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 12 | % Toeplitz Inversion 13 | path(path,'util/invToeplitz'); 14 | % Create the mex file if necessary. 15 | if ~exist(sprintf('util/invToeplitz/invToeplitzFastZohar.%s',mexext),'file') 16 | try 17 | eval(sprintf('mex -outdir util/invToeplitz util/invToeplitz/invToeplitzFastZohar.c')); 18 | fprintf('NOTE: the relevant invToeplitz mex files were not found. They have been created.\n'); 19 | catch 20 | fprintf('NOTE: the relevant invToeplitz mex files were not found, and your machine failed to create them.\n'); 21 | fprintf(' This usually means that you do not have the proper C/MEX compiler setup.\n'); 22 | fprintf(' The code will still run identically, albeit slower (perhaps considerably).\n'); 23 | fprintf(' Please read the README file, section Notes on the Use of C/MEX.\n'); 24 | end 25 | end 26 | 27 | % Posterior Covariance Precomputation 28 | path(path,'util/precomp'); 29 | % Create the mex file if necessary. 30 | if ~exist(sprintf('util/precomp/makePautoSumFast.%s',mexext),'file') 31 | try 32 | eval(sprintf('mex -outdir util/precomp util/precomp/makePautoSumFast.c')); 33 | fprintf('NOTE: the relevant precomp mex files were not found. They have been created.\n'); 34 | catch 35 | fprintf('NOTE: the relevant precomp mex files were not found, and your machine failed to create them.\n'); 36 | fprintf(' This usually means that you do not have the proper C/MEX compiler setup.\n'); 37 | fprintf(' The code will still run identically, albeit slower (perhaps considerably).\n'); 38 | fprintf(' Please read the README file, section Notes on the Use of C/MEX.\n'); 39 | end 40 | end 41 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 42 | 43 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/plotting/plot3D.m: -------------------------------------------------------------------------------- 1 | function plot3D(seq, xspec, varargin) 2 | % 3 | % plot3D(seq, xspec, ...) 4 | % 5 | % Plot neural trajectories in a three-dimensional space. 6 | % 7 | % INPUTS: 8 | % 9 | % seq - data structure containing extracted trajectories 10 | % xspec - field name of trajectories in 'seq' to be plotted 11 | % (e.g., 'xorth' or 'xsm') 12 | % 13 | % OPTIONAL ARGUMENTS: 14 | % 15 | % dimsToPlot - selects three dimensions in seq.(xspec) to plot 16 | % (default: 1:3) 17 | % nPlotMax - maximum number of trials to plot (default: 20) 18 | % redTrials - vector of trialIds whose trajectories are plotted in red 19 | % (default: []) 20 | % 21 | % @ 2009 Byron Yu -- byronyu@stanford.edu 22 | 23 | dimsToPlot = 1:3; 24 | nPlotMax = 20; 25 | redTrials = []; 26 | assignopts(who, varargin); 27 | 28 | if size(seq(1).(xspec), 1) < 3 29 | fprintf('ERROR: Trajectories have less than 3 dimensions.\n'); 30 | return 31 | end 32 | 33 | f = figure; 34 | pos = get(gcf, 'position'); 35 | set(f, 'position', [pos(1) pos(2) 1.3*pos(3) 1.3*pos(4)]); 36 | 37 | for n = 1:min(length(seq), nPlotMax) 38 | dat = seq(n).(xspec)(dimsToPlot,:); 39 | T = seq(n).T; 40 | 41 | if ismember(seq(n).trialId, redTrials) 42 | col = [1 0 0]; % red 43 | lw = 3; 44 | else 45 | col = 0.2 * [1 1 1]; % gray 46 | lw = 0.5; 47 | end 48 | plot3(dat(1,:), dat(2,:), dat(3,:), '.-', 'linewidth', lw, 'color', col); 49 | hold on; 50 | end 51 | 52 | axis equal; 53 | if isequal(xspec, 'xorth') 54 | str1 = sprintf('$$\\tilde{\\mathbf x}_{%d,:}$$', dimsToPlot(1)); 55 | str2 = sprintf('$$\\tilde{\\mathbf x}_{%d,:}$$', dimsToPlot(2)); 56 | str3 = sprintf('$$\\tilde{\\mathbf x}_{%d,:}$$', dimsToPlot(3)); 57 | else 58 | str1 = sprintf('$${\\mathbf x}_{%d,:}$$', dimsToPlot(1)); 59 | str2 = sprintf('$${\\mathbf x}_{%d,:}$$', dimsToPlot(2)); 60 | str3 = sprintf('$${\\mathbf x}_{%d,:}$$', dimsToPlot(3)); 61 | end 62 | xlabel(str1, 'interpreter', 'latex', 'fontsize', 24); 63 | ylabel(str2, 'interpreter', 'latex', 'fontsize', 24); 64 | zlabel(str3, 'interpreter', 'latex', 'fontsize', 24); 65 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/startup.m: -------------------------------------------------------------------------------- 1 | addpath core_gpfa 2 | addpath core_twostage 3 | addpath plotting 4 | addpath util 5 | addpath util/precomp 6 | 7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 8 | % The following code checks for the relevant MEX files (such as .mexa64 9 | % or .mexglx, depending on the machine architecture), and it creates the 10 | % mex file if it can not find the right one. 11 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 12 | % Toeplitz Inversion 13 | path(path,'util/invToeplitz'); 14 | % Create the mex file if necessary. 15 | if ~exist(sprintf('util/invToeplitz/invToeplitzFastZohar.%s',mexext),'file') 16 | try 17 | eval(sprintf('mex -outdir util/invToeplitz util/invToeplitz/invToeplitzFastZohar.c')); 18 | fprintf('NOTE: the relevant invToeplitz mex files were not found. They have been created.\n'); 19 | catch 20 | fprintf('NOTE: the relevant invToeplitz mex files were not found, and your machine failed to create them.\n'); 21 | fprintf(' This usually means that you do not have the proper C/MEX compiler setup.\n'); 22 | fprintf(' The code will still run identically, albeit slower (perhaps considerably).\n'); 23 | fprintf(' Please read the README file, section Notes on the Use of C/MEX.\n'); 24 | end 25 | end 26 | 27 | % Posterior Covariance Precomputation 28 | path(path,'util/precomp'); 29 | % Create the mex file if necessary. 30 | if ~exist(sprintf('util/precomp/makePautoSumFast.%s',mexext),'file') 31 | try 32 | eval(sprintf('mex -outdir util/precomp util/precomp/makePautoSumFast.c')); 33 | fprintf('NOTE: the relevant precomp mex files were not found. They have been created.\n'); 34 | catch 35 | fprintf('NOTE: the relevant precomp mex files were not found, and your machine failed to create them.\n'); 36 | fprintf(' This usually means that you do not have the proper C/MEX compiler setup.\n'); 37 | fprintf(' The code will still run identically, albeit slower (perhaps considerably).\n'); 38 | fprintf(' Please read the README file, section Notes on the Use of C/MEX.\n'); 39 | end 40 | end 41 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 42 | 43 | -------------------------------------------------------------------------------- /matlab/basset_connectivity/GenLouvain2/MEX_SRC/matlab_matrix/matlab_matrix.h: -------------------------------------------------------------------------------- 1 | // 2 | // matlab_matrix.h 3 | // matlab_matrix 4 | // 5 | // Created by Lucas Jeub on 24/10/2012 6 | // 7 | // Implements thin wrapper classes for full and sparse matlab matrices 8 | // 9 | // 10 | // Last modified by Lucas Jeub on 25/07/2014 11 | 12 | 13 | 14 | 15 | 16 | 17 | #ifndef MATLAB_MATRIX_H 18 | #define MATLAB_MATRIX_H 19 | 20 | #define inf std::numeric_limits::infinity(); 21 | 22 | 23 | #include 24 | 25 | #include "mex.h" 26 | 27 | #ifndef OCTAVE 28 | #include "matrix.h" 29 | #endif 30 | 31 | struct full; 32 | 33 | struct sparse{ 34 | sparse(); 35 | sparse(mwSize m, mwSize n, mwSize nmax); 36 | sparse(const sparse &matrix); 37 | sparse(const mxArray *matrix); 38 | 39 | ~sparse(); 40 | 41 | 42 | 43 | sparse & operator = (const sparse & matrix); 44 | 45 | sparse & operator = (const full & matrix); 46 | 47 | sparse & operator = (const mxArray *matrix); 48 | 49 | 50 | /*operations*/ 51 | /*pointwise division*/ 52 | 53 | sparse operator / (const sparse & B); 54 | sparse operator / (const full & B); 55 | 56 | mwSize nzero() const { return col[n];} 57 | 58 | double get(mwIndex i, mwIndex j); 59 | 60 | void export_matlab(mxArray * & out); 61 | 62 | mwSize m; 63 | mwSize n; 64 | mwSize nmax; 65 | mwIndex *row; 66 | mwIndex *col; 67 | double *val; 68 | 69 | private: 70 | 71 | bool export_flag; 72 | }; 73 | 74 | 75 | struct full{ 76 | full(); 77 | full(mwSize m, mwSize n); 78 | full(const full &matrix); 79 | full(const mxArray * matrix); 80 | 81 | ~full(); 82 | 83 | void export_matlab(mxArray * & out); 84 | 85 | full & operator = (const full & matrix); 86 | 87 | full & operator = (const sparse & matrix); 88 | 89 | full & operator = (const mxArray * matrix); 90 | 91 | double & get(mwIndex i, mwIndex j); 92 | double get(mwIndex i,mwIndex j) const; 93 | double & get(mwIndex i); 94 | double get(mwIndex i) const; 95 | 96 | 97 | 98 | full operator / (const sparse &B); 99 | full operator / (const full &B); 100 | 101 | mwSize m; 102 | mwSize n; 103 | 104 | double *val; 105 | 106 | private: 107 | 108 | bool export_flag; 109 | }; 110 | 111 | 112 | #endif 113 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/plotting/plotEachDimVsTime.m: -------------------------------------------------------------------------------- 1 | function plotEachDimVsTime(seq, xspec, binWidth, varargin) 2 | % 3 | % plotEachDimVsTime(seq, xspec, binWidth, ...) 4 | % 5 | % Plot each state dimension versus time in a separate panel. 6 | % 7 | % INPUTS: 8 | % 9 | % seq - data structure containing extracted trajectories 10 | % xspec - field name of trajectories in 'seq' to be plotted 11 | % (e.g., 'xorth' or 'xsm') 12 | % binWidth - spike bin width used when fitting model 13 | % 14 | % OPTIONAL ARGUMENTS: 15 | % 16 | % nPlotMax - maximum number of trials to plot (default: 20) 17 | % redTrials - vector of trialIds whose trajectories are plotted in red 18 | % (default: []) 19 | % nCols - number of subplot columns (default: 4) 20 | % 21 | % @ 2009 Byron Yu -- byronyu@stanford.edu 22 | 23 | nPlotMax = 20; 24 | redTrials = []; 25 | nCols = 4; 26 | assignopts(who, varargin); 27 | 28 | f = figure; 29 | pos = get(gcf, 'position'); 30 | set(f, 'position', [pos(1) pos(2) 2*pos(3) pos(4)]); 31 | 32 | Xall = [seq.(xspec)]; 33 | xMax = ceil(10 * max(abs(Xall(:)))) / 10; % round max value to next highest 1e-1 34 | 35 | Tmax = max([seq.T]); 36 | xtkStep = ceil(Tmax/25)*5; 37 | xtk = 1:xtkStep:Tmax; 38 | xtkl = 0:(xtkStep*binWidth):(Tmax-1)*binWidth; 39 | ytk = [-xMax 0 xMax]; 40 | 41 | nRows = ceil(size(Xall, 1) / nCols); 42 | 43 | for n = 1:min(length(seq), nPlotMax) 44 | dat = seq(n).(xspec); 45 | T = seq(n).T; 46 | 47 | for k = 1:size(dat,1) 48 | subplot(nRows, nCols, k); 49 | hold on; 50 | 51 | if ismember(seq(n).trialId, redTrials) 52 | col = [1 0 0]; % red 53 | lw = 3; 54 | else 55 | col = 0.2 * [1 1 1]; % gray 56 | lw = 0.05; 57 | end 58 | plot(1:T, dat(k,:), 'linewidth', lw, 'color', col); 59 | end 60 | end 61 | 62 | for k = 1:size(dat,1) 63 | h = subplot(nRows, nCols, k); 64 | axis([1 Tmax 1.1*min(ytk) 1.1*max(ytk)]); 65 | 66 | if isequal(xspec, 'xorth') 67 | str = sprintf('$$\\tilde{\\mathbf x}_{%d,:}$$',k); 68 | else 69 | str = sprintf('$${\\mathbf x}_{%d,:}$$',k); 70 | end 71 | title(str, 'interpreter', 'latex', 'fontsize', 16); 72 | 73 | set(h, 'xtick', xtk, 'xticklabel', xtkl); 74 | set(h, 'ytick', ytk, 'yticklabel', ytk); 75 | xlabel('Time (ms)'); 76 | end 77 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/plotting/plotEachDimVsTime.m: -------------------------------------------------------------------------------- 1 | function plotEachDimVsTime(seq, xspec, binWidth, varargin) 2 | % 3 | % plotEachDimVsTime(seq, xspec, binWidth, ...) 4 | % 5 | % Plot each state dimension versus time in a separate panel. 6 | % 7 | % INPUTS: 8 | % 9 | % seq - data structure containing extracted trajectories 10 | % xspec - field name of trajectories in 'seq' to be plotted 11 | % (e.g., 'xorth' or 'xsm') 12 | % binWidth - spike bin width used when fitting model 13 | % 14 | % OPTIONAL ARGUMENTS: 15 | % 16 | % nPlotMax - maximum number of trials to plot (default: 20) 17 | % redTrials - vector of trialIds whose trajectories are plotted in red 18 | % (default: []) 19 | % nCols - number of subplot columns (default: 4) 20 | % 21 | % @ 2009 Byron Yu -- byronyu@stanford.edu 22 | 23 | nPlotMax = 20; 24 | redTrials = []; 25 | nCols = 4; 26 | assignopts(who, varargin); 27 | 28 | f = figure; 29 | pos = get(gcf, 'position'); 30 | set(f, 'position', [pos(1) pos(2) 2*pos(3) pos(4)]); 31 | 32 | Xall = [seq.(xspec)]; 33 | xMax = ceil(10 * max(abs(Xall(:)))) / 10; % round max value to next highest 1e-1 34 | 35 | Tmax = max([seq.T]); 36 | xtkStep = ceil(Tmax/25)*5; 37 | xtk = 1:xtkStep:Tmax; 38 | xtkl = 0:(xtkStep*binWidth):(Tmax-1)*binWidth; 39 | ytk = [-xMax 0 xMax]; 40 | 41 | nRows = ceil(size(Xall, 1) / nCols); 42 | 43 | for n = 1:min(length(seq), nPlotMax) 44 | dat = seq(n).(xspec); 45 | T = seq(n).T; 46 | 47 | for k = 1:size(dat,1) 48 | subplot(nRows, nCols, k); 49 | hold on; 50 | 51 | if ismember(seq(n).trialId, redTrials) 52 | col = [1 0 0]; % red 53 | lw = 3; 54 | else 55 | col = 0.2 * [1 1 1]; % gray 56 | lw = 0.05; 57 | end 58 | plot(1:T, dat(k,:), 'linewidth', lw, 'color', col); 59 | end 60 | end 61 | 62 | for k = 1:size(dat,1) 63 | h = subplot(nRows, nCols, k); 64 | axis([1 Tmax 1.1*min(ytk) 1.1*max(ytk)]); 65 | 66 | if isequal(xspec, 'xorth') 67 | str = sprintf('$$\\tilde{\\mathbf x}_{%d,:}$$',k); 68 | else 69 | str = sprintf('$${\\mathbf x}_{%d,:}$$',k); 70 | end 71 | title(str, 'interpreter', 'latex', 'fontsize', 16); 72 | 73 | set(h, 'xtick', xtk, 'xticklabel', xtkl); 74 | set(h, 'ytick', ytk, 'yticklabel', ytk); 75 | xlabel('Time (ms)'); 76 | end 77 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/fa_em.m: -------------------------------------------------------------------------------- 1 | function [LL, UU, like, YY] = fa_em(XX, KK, varargin) 2 | % fa_em - ML factor analysis using EM: [L, P, like, Y] = fa_em(X, K) 3 | % 4 | % [LL, UU, like, Y] = FA_EM(X, K, ...) finds the varimax maximum 5 | % likelihood Factor Analysis fit to the data in X [nObservables x 6 | % nObservations] using the EM algorithm with K latents. It returns 7 | % the estimated loadings (LL), unique variances (UU), 8 | % log-likelihood after each iteration (like), and latent mean 9 | % estimates Y [K x nObservations]. 10 | % 11 | % 'nIter' - Maximum number of iterations of the EM algorithm. 12 | % 'tol' - Convergence tolerance: when relative change in 13 | % likelihood per step drops below 14 | % this threshold, iteration is stopped. 15 | 16 | nIter = 1000; 17 | tol = 1e-7; 18 | init = []; 19 | 20 | optlistassign(who, varargin); 21 | 22 | 23 | 24 | % discover dimensions 25 | [DD,NN] = size(XX); 26 | 27 | % subtract mean 28 | XX=bsxfun(@minus, XX, mean(XX, 2)); 29 | 30 | % precompute variances 31 | XX2=XX*XX'; 32 | diagXX2=diag(XX2); 33 | 34 | if (isempty(init)) 35 | cX = cov(XX'); 36 | scale = det(cX)^(1/DD); 37 | if scale < eps scale = 1; end 38 | LL = randn(DD,KK)*sqrt(scale/KK); 39 | UU = diag(cX); 40 | else 41 | LL = init.loadings; 42 | UU = init.uniquenesses; 43 | end 44 | 45 | % latent prior 46 | II = eye(KK); 47 | 48 | if nargout > 2 | tol > 0 49 | like = zeros(1, nIter); 50 | end 51 | 52 | 53 | % run EM 54 | for iIter = 1:nIter 55 | 56 | UUinv = diag(1./UU); 57 | UUinvLL = bsxfun(@rdivide,LL, UU); 58 | 59 | YYcov = inv(LL'*UUinvLL + II); 60 | YY = YYcov*UUinvLL'*XX; 61 | YY2 = NN*YYcov + YY*YY'; 62 | 63 | if nargout > 2 | tol > 0 64 | XXprec = UUinv - UUinvLL*YYcov*UUinvLL'; 65 | like(iIter) = 0.5*NN*(logdet(XXprec)) - 0.5*sum(sum(XXprec.*XX2)); 66 | end 67 | 68 | LL = XX*YY'/YY2; 69 | UU = (diagXX2 - sum((LL*YY).*XX, 2))/NN; 70 | 71 | if tol > 0 & iIter > 1 72 | if abs(diff(like(iIter-1:iIter))) < tol*diff(like([1,iIter])) 73 | like = like(1:iIter); 74 | break; 75 | end 76 | end 77 | end 78 | 79 | 80 | %% rotate latents to variance-ranked projection 81 | [LL,vars] = eigs(LL*LL', KK); 82 | LL = LL*sqrt(vars); 83 | 84 | %% redo inference to be consistent with rotated latents 85 | UUinv = diag(1./UU); 86 | UUinvLL = bsxfun(@rdivide,LL, UU); 87 | YYcov = inv(LL'*UUinvLL + II); 88 | YY = YYcov*UUinvLL'*XX; 89 | 90 | 91 | 92 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/fa_em.m: -------------------------------------------------------------------------------- 1 | function [LL, UU, like, YY] = fa_em(XX, KK, varargin) 2 | % fa_em - ML factor analysis using EM: [L, P, like, Y] = fa_em(X, K) 3 | % 4 | % [LL, UU, like, Y] = FA_EM(X, K, ...) finds the varimax maximum 5 | % likelihood Factor Analysis fit to the data in X [nObservables x 6 | % nObservations] using the EM algorithm with K latents. It returns 7 | % the estimated loadings (LL), unique variances (UU), 8 | % log-likelihood after each iteration (like), and latent mean 9 | % estimates Y [K x nObservations]. 10 | % 11 | % 'nIter' - Maximum number of iterations of the EM algorithm. 12 | % 'tol' - Convergence tolerance: when relative change in 13 | % likelihood per step drops below 14 | % this threshold, iteration is stopped. 15 | 16 | nIter = 1000; 17 | tol = 1e-7; 18 | init = []; 19 | 20 | optlistassign(who, varargin); 21 | 22 | 23 | 24 | % discover dimensions 25 | [DD,NN] = size(XX); 26 | 27 | % subtract mean 28 | XX=bsxfun(@minus, XX, mean(XX, 2)); 29 | 30 | % precompute variances 31 | XX2=XX*XX'; 32 | diagXX2=diag(XX2); 33 | 34 | if (isempty(init)) 35 | cX = cov(XX'); 36 | scale = det(cX)^(1/DD); 37 | if scale < eps scale = 1; end 38 | LL = randn(DD,KK)*sqrt(scale/KK); 39 | UU = diag(cX); 40 | else 41 | LL = init.loadings; 42 | UU = init.uniquenesses; 43 | end 44 | 45 | % latent prior 46 | II = eye(KK); 47 | 48 | if nargout > 2 | tol > 0 49 | like = zeros(1, nIter); 50 | end 51 | 52 | 53 | % run EM 54 | for iIter = 1:nIter 55 | 56 | UUinv = diag(1./UU); 57 | UUinvLL = bsxfun(@rdivide,LL, UU); 58 | 59 | YYcov = inv(LL'*UUinvLL + II); 60 | YY = YYcov*UUinvLL'*XX; 61 | YY2 = NN*YYcov + YY*YY'; 62 | 63 | if nargout > 2 | tol > 0 64 | XXprec = UUinv - UUinvLL*YYcov*UUinvLL'; 65 | like(iIter) = 0.5*NN*(logdet(XXprec)) - 0.5*sum(sum(XXprec.*XX2)); 66 | end 67 | 68 | LL = XX*YY'/YY2; 69 | UU = (diagXX2 - sum((LL*YY).*XX, 2))/NN; 70 | 71 | if tol > 0 & iIter > 1 72 | if abs(diff(like(iIter-1:iIter))) < tol*diff(like([1,iIter])) 73 | like = like(1:iIter); 74 | break; 75 | end 76 | end 77 | end 78 | 79 | 80 | %% rotate latents to variance-ranked projection 81 | [LL,vars] = eigs(LL*LL', KK); 82 | LL = LL*sqrt(vars); 83 | 84 | %% redo inference to be consistent with rotated latents 85 | UUinv = diag(1./UU); 86 | UUinvLL = bsxfun(@rdivide,LL, UU); 87 | YYcov = inv(LL'*UUinvLL + II); 88 | YY = YYcov*UUinvLL'*XX; 89 | 90 | 91 | 92 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/util/fig.m: -------------------------------------------------------------------------------- 1 | function h = fig(id, varargin) 2 | % FIG - find or create a figure: [h=]fig('name') 3 | % 4 | % FIG and FIG(H) where H is a figure handle are almost identical to 5 | % the built-in FIGURE, expect that if a new figure is created the 6 | % name may include a prefix. 7 | % 8 | % FIG('name') looks for a figure with the specified name. If it finds 9 | % one it makes it current; if it fails it creates one with the given 10 | % name and any prefix. 11 | % 12 | % FIG(ID, ...) passes any unrecognised options to the figure. 13 | % 14 | % OPTIONS: 15 | % 'new' [off] force new fig, appending sequence number to name 16 | % 'prefix' [hostname] prefix for figure name 17 | % See also: FIGURE. 18 | 19 | % maneesh. 20 | % pre-20010416: created 21 | % 20130703: general prefix option; doc cleanup 22 | 23 | % OPTIONS: 24 | new = []; % [off] force new figure, appending sequence number to name 25 | prefix = 1; % [hostname] prefix for figure name 26 | figprops=optlistassign(who, varargin); 27 | 28 | if nargin < 1 29 | h = figure; 30 | return; 31 | end 32 | 33 | if (~isempty(prefix) && prefix) 34 | if (~ischar(prefix)) 35 | hostname=getenv('HOSTNAME'); 36 | if(isempty(hostname)) 37 | hostname=getenv('HOST'); 38 | end 39 | if(isempty(hostname)) 40 | [stat,hostname] = unix('uname -n'); 41 | end 42 | hostname=strtok(hostname, '.'); 43 | prefix = hostname; 44 | end 45 | else 46 | prefix = 0; 47 | end 48 | 49 | if (~ischar(id)) % handle passed in 50 | if (ishandle(id)) % already a figure: leave it alone 51 | h = figure(id); 52 | else % set the new style name 53 | h = figure(id); 54 | if (prefix) 55 | set(h, 'name', sprintf('%s: %d', prefix, id)); 56 | end 57 | end 58 | else % name passed in 59 | if prefix 60 | name = sprintf('%s: %s', prefix, id); 61 | else 62 | name = id; 63 | end 64 | 65 | h = findobj ('type', 'figure', 'name', name); 66 | if (~ isempty(h)) % already exists 67 | if (isempty(new)) % don't require a new figure 68 | set(0, 'CurrentFigure', h); % doing it this way avoids focus issues 69 | else % do require a new figure 70 | ii = 1; 71 | while 1 % look for a new name 72 | ii = ii + 1; 73 | name = sprintf('%s: %s %d', hostname, id, ii); 74 | h = findobj('type', 'figure', 'name', name); 75 | if (isempty(h)) 76 | h = figure('name', sprintf('%s: %s %d', hostname, id, ii)); 77 | break; 78 | end 79 | end 80 | end 81 | else 82 | h = figure('name', name); 83 | end 84 | end 85 | 86 | set(h, 'numbertitle', 'off', figprops{:}); 87 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/util/fig.m: -------------------------------------------------------------------------------- 1 | function h = fig(id, varargin) 2 | % FIG - find or create a figure: [h=]fig('name') 3 | % 4 | % FIG and FIG(H) where H is a figure handle are almost identical to 5 | % the built-in FIGURE, expect that if a new figure is created the 6 | % name may include a prefix. 7 | % 8 | % FIG('name') looks for a figure with the specified name. If it finds 9 | % one it makes it current; if it fails it creates one with the given 10 | % name and any prefix. 11 | % 12 | % FIG(ID, ...) passes any unrecognised options to the figure. 13 | % 14 | % OPTIONS: 15 | % 'new' [off] force new fig, appending sequence number to name 16 | % 'prefix' [hostname] prefix for figure name 17 | % See also: FIGURE. 18 | 19 | % maneesh. 20 | % pre-20010416: created 21 | % 20130703: general prefix option; doc cleanup 22 | 23 | % OPTIONS: 24 | new = []; % [off] force new figure, appending sequence number to name 25 | prefix = 1; % [hostname] prefix for figure name 26 | figprops=optlistassign(who, varargin); 27 | 28 | if nargin < 1 29 | h = figure; 30 | return; 31 | end 32 | 33 | if (~isempty(prefix) && prefix) 34 | if (~ischar(prefix)) 35 | hostname=getenv('HOSTNAME'); 36 | if(isempty(hostname)) 37 | hostname=getenv('HOST'); 38 | end 39 | if(isempty(hostname)) 40 | [stat,hostname] = unix('uname -n'); 41 | end 42 | hostname=strtok(hostname, '.'); 43 | prefix = hostname; 44 | end 45 | else 46 | prefix = 0; 47 | end 48 | 49 | if (~ischar(id)) % handle passed in 50 | if (ishandle(id)) % already a figure: leave it alone 51 | h = figure(id); 52 | else % set the new style name 53 | h = figure(id); 54 | if (prefix) 55 | set(h, 'name', sprintf('%s: %d', prefix, id)); 56 | end 57 | end 58 | else % name passed in 59 | if prefix 60 | name = sprintf('%s: %s', prefix, id); 61 | else 62 | name = id; 63 | end 64 | 65 | h = findobj ('type', 'figure', 'name', name); 66 | if (~ isempty(h)) % already exists 67 | if (isempty(new)) % don't require a new figure 68 | set(0, 'CurrentFigure', h); % doing it this way avoids focus issues 69 | else % do require a new figure 70 | ii = 1; 71 | while 1 % look for a new name 72 | ii = ii + 1; 73 | name = sprintf('%s: %s %d', hostname, id, ii); 74 | h = findobj('type', 'figure', 'name', name); 75 | if (isempty(h)) 76 | h = figure('name', sprintf('%s: %s %d', hostname, id, ii)); 77 | break; 78 | end 79 | end 80 | end 81 | else 82 | h = figure('name', name); 83 | end 84 | end 85 | 86 | set(h, 'numbertitle', 'off', figprops{:}); 87 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/core_gpfa/learnGPparams.m: -------------------------------------------------------------------------------- 1 | function res = learnGPparams(seq, params, varargin) 2 | % Updates parameters of GP state model given neural trajectories. 3 | % 4 | % INPUTS: 5 | % 6 | % seq - data structure containing neural trajectories 7 | % params - current GP state model parameters, which gives starting point 8 | % for gradient optimization 9 | % 10 | % OUTPUT: 11 | % 12 | % res - updated GP state model parameters 13 | % 14 | % OPTIONAL ARGUMENTS: 15 | % 16 | % MAXITERS - maximum number of line searches (if >0), maximum number 17 | % of function evaluations (if <0), for minimize.m (default:-8) 18 | % verbose - logical that specifies whether to display status messages 19 | % (default: false) 20 | % 21 | % @ 2009 Byron Yu byronyu@stanford.edu 22 | % John Cunningham jcunnin@stanford.edu 23 | 24 | MAXITERS = -8; % for minimize.m 25 | verbose = false; 26 | assignopts(who, varargin); 27 | 28 | switch params.covType 29 | case 'rbf' 30 | % If there's more than one type of parameter, put them in the 31 | % second row of oldParams. 32 | oldParams = params.gamma; 33 | fname = 'grad_betgam'; 34 | case 'tri' 35 | oldParams = params.a; 36 | fname = 'grad_trislope'; 37 | case 'logexp' 38 | oldParams = params.a; 39 | fname = 'grad_logexpslope'; 40 | end 41 | if params.notes.learnGPNoise 42 | oldParams = [oldParams; params.eps]; 43 | fname = [fname '_noise']; 44 | end 45 | 46 | xDim = size(oldParams, 2); 47 | precomp = makePrecomp(seq, xDim); 48 | 49 | % Loop once for each state dimension (each GP) 50 | for i = 1:xDim 51 | const = []; 52 | switch params.covType 53 | % No constants for 'rbf' or 'tri' 54 | case 'logexp' 55 | const.gamma = params.gamma; 56 | end 57 | if ~params.notes.learnGPNoise 58 | const.eps = params.eps(i); 59 | end 60 | 61 | initp = log(oldParams(:,i)); 62 | 63 | % This does the heavy lifting 64 | [res_p, res_f, res_iters] =... 65 | minimize(initp, fname, MAXITERS, precomp(i), const); 66 | 67 | switch params.covType 68 | case 'rbf' 69 | res.gamma(i) = exp(res_p(1)); 70 | case 'tri' 71 | res.a(i) = exp(res_p(1)); 72 | case 'logexp' 73 | res.a(i) = exp(res_p(1)); 74 | end 75 | if params.notes.learnGPNoise 76 | res.eps(i) = exp(res_p(2)); 77 | end 78 | 79 | if verbose 80 | fprintf('\nConverged p; xDim:%d, p:%s', i, mat2str(res_p, 3)); 81 | end 82 | end 83 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/core_gpfa/learnGPparams.m: -------------------------------------------------------------------------------- 1 | function res = learnGPparams(seq, params, varargin) 2 | % Updates parameters of GP state model given neural trajectories. 3 | % 4 | % INPUTS: 5 | % 6 | % seq - data structure containing neural trajectories 7 | % params - current GP state model parameters, which gives starting point 8 | % for gradient optimization 9 | % 10 | % OUTPUT: 11 | % 12 | % res - updated GP state model parameters 13 | % 14 | % OPTIONAL ARGUMENTS: 15 | % 16 | % MAXITERS - maximum number of line searches (if >0), maximum number 17 | % of function evaluations (if <0), for minimize.m (default:-8) 18 | % verbose - logical that specifies whether to display status messages 19 | % (default: false) 20 | % 21 | % @ 2009 Byron Yu byronyu@stanford.edu 22 | % John Cunningham jcunnin@stanford.edu 23 | 24 | MAXITERS = -8; % for minimize.m 25 | verbose = false; 26 | assignopts(who, varargin); 27 | 28 | switch params.covType 29 | case 'rbf' 30 | % If there's more than one type of parameter, put them in the 31 | % second row of oldParams. 32 | oldParams = params.gamma; 33 | fname = 'grad_betgam'; 34 | case 'tri' 35 | oldParams = params.a; 36 | fname = 'grad_trislope'; 37 | case 'logexp' 38 | oldParams = params.a; 39 | fname = 'grad_logexpslope'; 40 | end 41 | if params.notes.learnGPNoise 42 | oldParams = [oldParams; params.eps]; 43 | fname = [fname '_noise']; 44 | end 45 | 46 | xDim = size(oldParams, 2); 47 | precomp = makePrecomp(seq, xDim); 48 | 49 | % Loop once for each state dimension (each GP) 50 | for i = 1:xDim 51 | const = []; 52 | switch params.covType 53 | % No constants for 'rbf' or 'tri' 54 | case 'logexp' 55 | const.gamma = params.gamma; 56 | end 57 | if ~params.notes.learnGPNoise 58 | const.eps = params.eps(i); 59 | end 60 | 61 | initp = log(oldParams(:,i)); 62 | 63 | % This does the heavy lifting 64 | [res_p, res_f, res_iters] =... 65 | minimize(initp, fname, MAXITERS, precomp(i), const); 66 | 67 | switch params.covType 68 | case 'rbf' 69 | res.gamma(i) = exp(res_p(1)); 70 | case 'tri' 71 | res.a(i) = exp(res_p(1)); 72 | case 'logexp' 73 | res.a(i) = exp(res_p(1)); 74 | end 75 | if params.notes.learnGPNoise 76 | res.eps(i) = exp(res_p(2)); 77 | end 78 | 79 | if verbose 80 | fprintf('\nConverged p; xDim:%d, p:%s', i, mat2str(res_p, 3)); 81 | end 82 | end 83 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/cutTrials.m: -------------------------------------------------------------------------------- 1 | function seqOut = cutTrials(seqIn, varargin) 2 | % 3 | % seqOut = cutTrials(seqIn, ...) 4 | % 5 | % Extracts trial segments that are all of the same length. Uses 6 | % overlapping segments if trial length is not integer multiple 7 | % of segment length. Ignores trials with length shorter than 8 | % one segment length. 9 | % 10 | % INPUTS: 11 | % 12 | % seqIn - data structure, whose nth entry (corresponding to 13 | % the nth experimental trial) has fields 14 | % trialId -- unique trial identifier 15 | % T (1 x 1) -- number of timesteps in trial 16 | % y (yDim x T) -- neural data 17 | % 18 | % OUTPUTS: 19 | % 20 | % seqOut - data structure, whose nth entry (corresponding to 21 | % the nth segment) has fields 22 | % trialId -- identifier of trial from which 23 | % segment was taken 24 | % segId -- segment identifier within trial 25 | % T (1 x 1) -- number of timesteps in segment 26 | % y (yDim x T) -- neural data 27 | % 28 | % OPTIONAL ARGUMENTS: 29 | % 30 | % segLength - length of segments to extract, in number of timesteps. 31 | % If infinite, entire trials are extracted, i.e., no 32 | % segmenting. (default: 20) 33 | % 34 | % @ 2009 Byron Yu -- byronyu@stanford.edu 35 | 36 | segLength = 20; % number of timesteps in each segment 37 | assignopts(who, varargin); 38 | 39 | if isinf(segLength) 40 | seqOut = seqIn; 41 | return 42 | end 43 | 44 | seqOut = []; 45 | for n = 1:length(seqIn) 46 | T = seqIn(n).T; 47 | 48 | % Skip trials that are shorter than segLength 49 | if T < segLength 50 | fprintf('Warning: trialId %4d shorter than one segLength...skipping\n',... 51 | seqIn(n).trialId); 52 | continue 53 | end 54 | 55 | numSeg = ceil(T/segLength); 56 | 57 | if numSeg == 1 58 | cumOL = 0; 59 | else 60 | totalOL = (segLength*numSeg) - T; 61 | probs = ones(1,numSeg-1)/(numSeg-1); 62 | % mnrnd is very sensitive to sum(probs) being even slightly 63 | % away from 1 due to floating point round-off. 64 | probs(end) = 1-sum(probs(1:end-1)); 65 | randOL = mnrnd(totalOL, probs); 66 | cumOL = [0 cumsum(randOL)]; 67 | end 68 | 69 | seg.trialId = seqIn(n).trialId; 70 | seg.T = segLength; 71 | 72 | for s = 1:numSeg 73 | tStart = -cumOL(s) + segLength * (s-1) + 1; 74 | 75 | seg.segId = s; 76 | seg.y = seqIn(n).y(:, tStart:(tStart+segLength-1)); 77 | 78 | seqOut = [seqOut seg]; 79 | end 80 | end 81 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/cutTrials.m: -------------------------------------------------------------------------------- 1 | function seqOut = cutTrials(seqIn, varargin) 2 | % 3 | % seqOut = cutTrials(seqIn, ...) 4 | % 5 | % Extracts trial segments that are all of the same length. Uses 6 | % overlapping segments if trial length is not integer multiple 7 | % of segment length. Ignores trials with length shorter than 8 | % one segment length. 9 | % 10 | % INPUTS: 11 | % 12 | % seqIn - data structure, whose nth entry (corresponding to 13 | % the nth experimental trial) has fields 14 | % trialId -- unique trial identifier 15 | % T (1 x 1) -- number of timesteps in trial 16 | % y (yDim x T) -- neural data 17 | % 18 | % OUTPUTS: 19 | % 20 | % seqOut - data structure, whose nth entry (corresponding to 21 | % the nth segment) has fields 22 | % trialId -- identifier of trial from which 23 | % segment was taken 24 | % segId -- segment identifier within trial 25 | % T (1 x 1) -- number of timesteps in segment 26 | % y (yDim x T) -- neural data 27 | % 28 | % OPTIONAL ARGUMENTS: 29 | % 30 | % segLength - length of segments to extract, in number of timesteps. 31 | % If infinite, entire trials are extracted, i.e., no 32 | % segmenting. (default: 20) 33 | % 34 | % @ 2009 Byron Yu -- byronyu@stanford.edu 35 | 36 | segLength = 20; % number of timesteps in each segment 37 | assignopts(who, varargin); 38 | 39 | if isinf(segLength) 40 | seqOut = seqIn; 41 | return 42 | end 43 | 44 | seqOut = []; 45 | for n = 1:length(seqIn) 46 | T = seqIn(n).T; 47 | 48 | % Skip trials that are shorter than segLength 49 | if T < segLength 50 | fprintf('Warning: trialId %4d shorter than one segLength...skipping\n',... 51 | seqIn(n).trialId); 52 | continue 53 | end 54 | 55 | numSeg = ceil(T/segLength); 56 | 57 | if numSeg == 1 58 | cumOL = 0; 59 | else 60 | totalOL = (segLength*numSeg) - T; 61 | probs = ones(1,numSeg-1)/(numSeg-1); 62 | % mnrnd is very sensitive to sum(probs) being even slightly 63 | % away from 1 due to floating point round-off. 64 | probs(end) = 1-sum(probs(1:end-1)); 65 | randOL = mnrnd(totalOL, probs); 66 | cumOL = [0 cumsum(randOL)]; 67 | end 68 | 69 | seg.trialId = seqIn(n).trialId; 70 | seg.T = segLength; 71 | 72 | for s = 1:numSeg 73 | tStart = -cumOL(s) + segLength * (s-1) + 1; 74 | 75 | seg.segId = s; 76 | seg.y = seqIn(n).y(:, tStart:(tStart+segLength-1)); 77 | 78 | seqOut = [seqOut seg]; 79 | end 80 | end 81 | -------------------------------------------------------------------------------- /python/setup.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "This notebook is used to set up important files for running the notebooks. It will create a \"data\" folder in the root of the repository, and download approximately 60MB of data." 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": { 14 | "collapsed": false 15 | }, 16 | "outputs": [], 17 | "source": [ 18 | "import sys\n", 19 | "sys.path.append('./src/')\n", 20 | "import opencourse as oc" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 2, 26 | "metadata": { 27 | "collapsed": false 28 | }, 29 | "outputs": [ 30 | { 31 | "name": "stdout", 32 | "output_type": "stream", 33 | "text": [ 34 | "Saved to: /Users/choldgraf/Dropbox/github/publicRepos/neuro_datasci_open_course/data/matrices_connectivity.mat\n", 35 | "Saved to: /Users/choldgraf/Dropbox/github/publicRepos/neuro_datasci_open_course/data/StevensonV2.mat\n", 36 | "Saved to: /Users/choldgraf/Dropbox/github/publicRepos/neuro_datasci_open_course/data/StevensonV4.mat\n" 37 | ] 38 | } 39 | ], 40 | "source": [ 41 | "# Download all data\n", 42 | "oc.download_all_files()" 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "metadata": { 48 | "collapsed": true 49 | }, 50 | "source": [ 51 | "# Ensure that you have the right dependencies\n", 52 | "All of the below packages should import:" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": 5, 58 | "metadata": { 59 | "collapsed": false 60 | }, 61 | "outputs": [], 62 | "source": [ 63 | "import mne # <-- Package for electrophysiology analysis\n", 64 | "import pandas # <-- Package for representing data as a DataFrame\n", 65 | "import bct # <-- Brain Connectivity Toolbox" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": null, 71 | "metadata": { 72 | "collapsed": true 73 | }, 74 | "outputs": [], 75 | "source": [] 76 | } 77 | ], 78 | "metadata": { 79 | "hide_input": false, 80 | "kernelspec": { 81 | "display_name": "Python 2", 82 | "language": "python", 83 | "name": "python2" 84 | }, 85 | "language_info": { 86 | "codemirror_mode": { 87 | "name": "ipython", 88 | "version": 2 89 | }, 90 | "file_extension": ".py", 91 | "mimetype": "text/x-python", 92 | "name": "python", 93 | "nbconvert_exporter": "python", 94 | "pygments_lexer": "ipython2", 95 | "version": "2.7.11" 96 | } 97 | }, 98 | "nbformat": 4, 99 | "nbformat_minor": 1 100 | } 101 | -------------------------------------------------------------------------------- /matlab/konrad_tuning_curves/Solutions/FullKonradTutorial.m: -------------------------------------------------------------------------------- 1 | %%load data 2 | load stevensonV2 3 | %% Remove all times where speeds are very slow 4 | isGood=find(handVel(1,:).^2+handVel(2,:).^2>.015) 5 | handVel=handVel(1:2,isGood); 6 | handPos=handPos(1:2,isGood); 7 | spikes=spikes(:,isGood); 8 | time=time(isGood); 9 | angle=atan2(handVel(1,:),handVel(2,:)); 10 | 11 | %% Plot Raw Data - PASCAL? %% 12 | nNeuron=193%193 13 | clf 14 | hold on 15 | plot(angle,spikes(nNeuron,:)+0.2*randn(size(spikes(nNeuron,:))),'r.') 16 | 17 | %% Make a simple tuning curve 18 | angles=-pi:pi/8:pi; 19 | for i=1:length(angles)-1 20 | angIndices=find(and(angle>angles(i),angle<=angles(i+1))); 21 | nSpikes(i)=mean(spikes(nNeuron,angIndices)); 22 | end 23 | plot(angles(1:end-1)+pi/16,nSpikes) 24 | 25 | %% PART I: KONRAD 26 | %% bootstrap error bars 27 | angles=-pi:pi/8:pi; 28 | for k=1:1000 29 | inds=1+floor(rand(size(angle))*length(angle)); 30 | for i=1:length(angles)-1 31 | angIndices=inds(and(angle(inds)>angles(i),angle(inds)<=angles(i+1))); 32 | nS(i,k)=mean(spikes(nNeuron,angIndices)); 33 | end 34 | end 35 | nSS=sort(nS') 36 | U=nSS(25,:); 37 | L=nSS(975,:); 38 | M=mean(nS') 39 | errorbar(angles(1:end-1)+pi/16,M,M-L,U-M) 40 | %advanced exercise: do this for all neurons. Do they actually have cosine 41 | %tuning ad indicated by the research? 42 | 43 | %% PART II: KONRAD 44 | %% fit arbitrary functions 45 | %fit a model 46 | [bestParas,fvalCosExp(i)]=fminsearch(@evaluateScoreCosExp, [.8,0.1,4],[],spikes(nNeuron,:),angle); 47 | plot(-pi:pi/80:pi, exp(bestParas(1)+bestParas(2)*cos((-pi:pi/80:pi)-bestParas(3)))) 48 | 49 | 50 | %% PART 3 Konrad 51 | %% now do some machine learning. Matlab does not allow us to do this with poisson distributions 52 | nNeuron=193; %183 141 193 53 | %first lets have some meaningful regressors 54 | Y=spikes(nNeuron,:)'; 55 | X=handVel(1:2,:); 56 | X(3:4,:)=handPos(1:2,:); 57 | X=X'; 58 | 59 | %do trivial model and linear regression first 60 | for fold=1:100 61 | indsTrain=1:length(Y); 62 | indsTrain(find(mod(indsTrain-fold,100)==0))=[]; 63 | indsTest=fold:100:length(Y); 64 | [b, bint, r, rint, stats]=regress(Y(indsTrain,1),[X(indsTrain,:), 1+0*X(indsTrain,1)]); 65 | pred = b'*[X(indsTest,:), 1+0*X(indsTest,1)]'; 66 | mse(fold)=mean((Y(indsTest,1)'-pred).^2); 67 | mseConst(fold)=mean((Y(indsTest,1)'-mean(Y(indsTrain,1))).^2); 68 | end 69 | mseTotalLinearRegression=mean(mse) 70 | mseTotalConst=mean(mseConst) 71 | 72 | close all 73 | rng(1945,'twister') 74 | leaf = [12 25 50 100]; 75 | col = 'rbcm'; 76 | figure 77 | for i=1:length(leaf) 78 | b = TreeBagger(200,X,Y,'Method','R','OOBPred','On',... 79 | 'MinLeafSize',leaf(i)); 80 | plot(oobError(b)-mseTotalConst,col(i)); 81 | hold on 82 | end 83 | xlabel 'Number of Grown Trees' 84 | ylabel 'Mean Squared Error' 85 | legend({'12' '25' '50' '100'},'Location','NorthEast') 86 | hold off 87 | 88 | 89 | 90 | 91 | 92 | 93 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/getTrajNewTrials.m: -------------------------------------------------------------------------------- 1 | function seqNew = getTrajNewTrials(ws, dat, varargin) 2 | % 3 | % seqNew = getTrajNewTrials(ws, dat,...) 4 | % 5 | % Extract neural trajectories from a set of new trials using previously-fitted 6 | % model parameters. 7 | % 8 | % INPUT: 9 | % 10 | % ws - saved workspace variables that include the previously-fitted 11 | % model parameters 'estParams' 12 | % dat - data for new trials with fields 13 | % trialId -- unique trial identifier 14 | % spikes -- 0/1 matrix of the raw spiking activity across 15 | % all neurons. Each row corresponds to a neuron. 16 | % Each column corresponds to a 1 msec timestep. 17 | % 18 | % OUTPUTS: 19 | % 20 | % seqNew - data structure containing orthonormalized neural trajectories 21 | % ('xorth') for the new trials 22 | % 23 | % OPTIONAL ARGUMENT: 24 | % 25 | % kernSD - for two-stage methods, specify kernel smoothing width. 26 | % By default, the function uses ws.kern(1). 27 | % 28 | % @ 2009 Byron Yu -- byronyu@stanford.edu 29 | 30 | kernSD = []; 31 | assignopts(who, varargin); 32 | 33 | if isempty(ws) 34 | fprintf('ERROR: Input argument is empty.\n'); 35 | return 36 | end 37 | 38 | % Process data in the same way as in 'ws' 39 | % Obtain binned spike counts 40 | seqNew = getSeq(dat, ws.binWidth, ws.extraOpts{:}); 41 | 42 | % Remove inactive units 43 | for n = 1:length(seqNew) 44 | seqNew(n).y = seqNew(n).y(ws.hasSpikesBool,:); 45 | end 46 | 47 | if isfield(ws, 'kernSDList') 48 | % Two-stage methods 49 | if isempty(kernSD) 50 | k = 1; 51 | else 52 | k = find(ws.kernSDList == kernSD); 53 | if isempty(k) 54 | fprintf('ERROR: Selected kernSD not found.\n'); 55 | return 56 | end 57 | end 58 | 59 | % Kernel smoothing 60 | for n = 1:length(seqNew) 61 | seqNew(n).y = smoother(seqNew(n).y, ws.kernSDList(k), ws.binWidth); 62 | end 63 | end 64 | 65 | if ismember(ws.method, {'gpfa'}) 66 | seqNew = exactInferenceWithLL(seqNew, ws.estParams); 67 | C = ws.estParams.C; 68 | X = [seqNew.xsm]; 69 | [Xorth, Corth] = orthogonalize(X, C); 70 | seqNew = segmentByTrial(seqNew, Xorth, 'xorth'); 71 | 72 | elseif ismember(ws.method, {'fa', 'ppca'}) 73 | Y = [seqNew.y]; 74 | X = fastfa_estep(Y, ws.kern(k).estParams); 75 | L = ws.kern(k).estParams.L; 76 | [Xorth, Lorth] = orthogonalize(X.mean, L); 77 | seqNew = segmentByTrial(seqNew, Xorth, 'xorth'); 78 | 79 | elseif ismember(ws.method, {'pca'}) 80 | Y = [seqNew.y]; 81 | estParams = ws.kern(k).estParams; 82 | Xorth = estParams.L' * bsxfun(@minus, Y, estParams.d); 83 | seqNew = segmentByTrial(seqNew, Xorth, 'xorth'); 84 | end 85 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/getTrajNewTrials.m: -------------------------------------------------------------------------------- 1 | function seqNew = getTrajNewTrials(ws, dat, varargin) 2 | % 3 | % seqNew = getTrajNewTrials(ws, dat,...) 4 | % 5 | % Extract neural trajectories from a set of new trials using previously-fitted 6 | % model parameters. 7 | % 8 | % INPUT: 9 | % 10 | % ws - saved workspace variables that include the previously-fitted 11 | % model parameters 'estParams' 12 | % dat - data for new trials with fields 13 | % trialId -- unique trial identifier 14 | % spikes -- 0/1 matrix of the raw spiking activity across 15 | % all neurons. Each row corresponds to a neuron. 16 | % Each column corresponds to a 1 msec timestep. 17 | % 18 | % OUTPUTS: 19 | % 20 | % seqNew - data structure containing orthonormalized neural trajectories 21 | % ('xorth') for the new trials 22 | % 23 | % OPTIONAL ARGUMENT: 24 | % 25 | % kernSD - for two-stage methods, specify kernel smoothing width. 26 | % By default, the function uses ws.kern(1). 27 | % 28 | % @ 2009 Byron Yu -- byronyu@stanford.edu 29 | 30 | kernSD = []; 31 | assignopts(who, varargin); 32 | 33 | if isempty(ws) 34 | fprintf('ERROR: Input argument is empty.\n'); 35 | return 36 | end 37 | 38 | % Process data in the same way as in 'ws' 39 | % Obtain binned spike counts 40 | seqNew = getSeq(dat, ws.binWidth, ws.extraOpts{:}); 41 | 42 | % Remove inactive units 43 | for n = 1:length(seqNew) 44 | seqNew(n).y = seqNew(n).y(ws.hasSpikesBool,:); 45 | end 46 | 47 | if isfield(ws, 'kernSDList') 48 | % Two-stage methods 49 | if isempty(kernSD) 50 | k = 1; 51 | else 52 | k = find(ws.kernSDList == kernSD); 53 | if isempty(k) 54 | fprintf('ERROR: Selected kernSD not found.\n'); 55 | return 56 | end 57 | end 58 | 59 | % Kernel smoothing 60 | for n = 1:length(seqNew) 61 | seqNew(n).y = smoother(seqNew(n).y, ws.kernSDList(k), ws.binWidth); 62 | end 63 | end 64 | 65 | if ismember(ws.method, {'gpfa'}) 66 | seqNew = exactInferenceWithLL(seqNew, ws.estParams); 67 | C = ws.estParams.C; 68 | X = [seqNew.xsm]; 69 | [Xorth, Corth] = orthogonalize(X, C); 70 | seqNew = segmentByTrial(seqNew, Xorth, 'xorth'); 71 | 72 | elseif ismember(ws.method, {'fa', 'ppca'}) 73 | Y = [seqNew.y]; 74 | X = fastfa_estep(Y, ws.kern(k).estParams); 75 | L = ws.kern(k).estParams.L; 76 | [Xorth, Lorth] = orthogonalize(X.mean, L); 77 | seqNew = segmentByTrial(seqNew, Xorth, 'xorth'); 78 | 79 | elseif ismember(ws.method, {'pca'}) 80 | Y = [seqNew.y]; 81 | estParams = ws.kern(k).estParams; 82 | Xorth = estParams.L' * bsxfun(@minus, Y, estParams.d); 83 | seqNew = segmentByTrial(seqNew, Xorth, 'xorth'); 84 | end 85 | -------------------------------------------------------------------------------- /matlab/konrad_tuning_curves/MyMachineLearningPredictions.m: -------------------------------------------------------------------------------- 1 | %%load data 2 | load stevensonV2 3 | %% Remove all times where speeds are very slow 4 | isGood=find(handVel(1,:).^2+handVel(2,:).^2>.015) 5 | handVel=handVel(1:2,isGood); 6 | handPos=handPos(1:2,isGood); 7 | spikes=spikes(:,isGood); 8 | time=time(isGood); 9 | angle=atan2(handVel(1,:),handVel(2,:)); 10 | 11 | %% Plot Raw Data - PASCAL? %% 12 | nNeuron=193%193 13 | clf 14 | hold on 15 | plot(angle,spikes(nNeuron,:)+0.2*randn(size(spikes(nNeuron,:))),'r.') 16 | 17 | %% Make a simple tuning curve 18 | angles=-pi:pi/8:pi; 19 | for i=1:length(angles)-1 20 | angIndices=find(and(angle>angles(i),angle<=angles(i+1))); 21 | nSpikes(i)=mean(spikes(nNeuron,angIndices)); 22 | end 23 | plot(angles(1:end-1)+pi/16,nSpikes) 24 | 25 | %% PART I: KONRAD 26 | %% bootstrap error bars 27 | angles=-pi:pi/8:pi; 28 | for k=1:1000 29 | inds=1+floor(rand(size(angle))*length(angle)); 30 | for i=1:length(angles)-1 31 | angIndices=inds(and(angle(inds)>angles(i),angle(inds)<=angles(i+1))); 32 | nS(i,k)=mean(spikes(nNeuron,angIndices)); 33 | end 34 | end 35 | nSS=sort(nS') 36 | U=nSS(25,:); 37 | L=nSS(975,:); 38 | M=mean(nS') 39 | errorbar(angles(1:end-1)+pi/16,M,M-L,U-M) 40 | %advanced exercise: do this for all neurons. Do they actually have cosine 41 | %tuning ad indicated by the research? 42 | 43 | %% PART II: KONRAD 44 | %% fit arbitrary functions 45 | %fit a model 46 | [bestParas,fvalCosExp(i)]=fminsearch(@evaluateScoreCosExp, [.8,0.1,4],[],spikes(nNeuron,:),angle); 47 | plot(-pi:pi/80:pi, exp(bestParas(1)+bestParas(2)*cos((-pi:pi/80:pi)-bestParas(3)))) 48 | %advanced exercise: is exponential better than linear-threshold? 49 | 50 | %% PART 3 Konrad 51 | %% now do some machine learning. Matlab does not allow us to do this with poisson distributions 52 | nNeuron=193; %183 141 193 53 | %first lets have some meaningful regressors 54 | Y=spikes(nNeuron,:)'; 55 | X=handVel(1:2,:); 56 | X(3:4,:)=handPos(1:2,:); 57 | X=X'; 58 | 59 | %And now lets do the trivial linear things as a benchmark 60 | %do trivial model and linear regression first 61 | %100 fold crossvalidation 62 | for fold=1:100 63 | indsTrain=1:length(Y); 64 | indsTrain(find(mod(indsTrain-fold,100)==0))=[]; 65 | indsTest=fold:100:length(Y); 66 | [b, bint, r, rint, stats]=regress(Y(indsTrain,1),[X(indsTrain,:), 1+0*X(indsTrain,1)]); 67 | pred = b'*[X(indsTest,:), 1+0*X(indsTest,1)]'; 68 | mse(fold)=mean((Y(indsTest,1)'-pred).^2); 69 | mseConst(fold)=mean((Y(indsTest,1)'-mean(Y(indsTrain,1))).^2); 70 | end 71 | mseTotalLinearRegression=mean(mse) 72 | mseTotalConst=mean(mseConst) 73 | 74 | b = TreeBagger(numberTrees,X,Y,'Method','R','OOBPred','On',... 75 | 'MinLeafSize',minLeafSize); 76 | % how to know the crossvalidation error: oobError(b) 77 | % keep in mind that only the relative error matters. So It is oobError(b)-mseTotalConst 78 | % what are the right settings for numbertrees 79 | plot(oobError(b)-mseTotalConst,col(i)); %lets only plot how much better we are doing 80 | 81 | 82 | 83 | 84 | 85 | 86 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/invToeplitz/invToeplitzFast.m: -------------------------------------------------------------------------------- 1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 2 | % John P Cunningham 3 | % 2009 4 | % 5 | % invToeplitzFast() 6 | % 7 | % This function is simply a wrapper for the C-MEX 8 | % function invToeplitzFastZohar.mexa64 (or .mexglx, etc), 9 | % which is just a compiled version of invToeplitzFastZohar.c, 10 | % which should also be in this folder. Please see 11 | % that code for details. 12 | % 13 | % This algorithm inverts a positive definite (symmetric) 14 | % Toeplitz matrix in O(n^2) time, which is considerably better 15 | % than the O(n^3) that inv() offers. This follows the Trench 16 | % algorithm implementation of Zohar 1969. See that paper for 17 | % all explanation, as the C code is just an implementation of 18 | % the algorithm of p599 of that paper. 19 | % 20 | % This function also computes the log determinant, which is 21 | % calculated essentially for free as part of the calculation of 22 | % the inverse. This is often useful in applications when one really 23 | % needs to represent the inverse of a Toeplitz matrix. 24 | % 25 | % This function should be called from within invToeplitz.m, 26 | % which adds a try block so that it can default to a native 27 | % MATLAB inversion (either inv() or a vectorized version of 28 | % the Trench algorithm, depending on the size of the matrix) 29 | % should the MEX interface not work. This will happen, for example, 30 | % if you move to a new architecture and do not compile for .mexmaci 31 | % or similar (see mexext('all') and help mex for some info on this). 32 | % 33 | % Inputs: 34 | % T the positive definite (symmetric) Toeplitz matrix, which 35 | % does NOT need to be scaled to be 1 on the main diagonal. 36 | % 37 | % Outputs: 38 | % Ti the inverse of T 39 | % ld the log determinant of T, NOT Ti. 40 | % 41 | % 42 | % NOTE: This code is used to speed up the Toeplitz inversion as much 43 | % as possible. Accordingly, no error checking is done. The onus is 44 | % on the caller (which should be invToeplitz.m) to pass the correct args. 45 | % 46 | % NOTE: cf. invTopelitzFastGolub.c, which uses the algorithm in Golub and 47 | % Van Loan. This newer version was written because the Zohar version 48 | % also computes the log determinant for free, which is essential in the 49 | % application for which this algorithm was coded. 50 | % 51 | % NOTE: Whenever possible, do not actually invert a matrix. This code is 52 | % written just in case you really need to do so. Otherwise, for example 53 | % if you just want to solve inv(T)*x for some vector x, you are better off 54 | % using a fast inversion method, like PCG with fast matrix multiplication, 55 | % which could be something like an FFT method for the Toeplitz matrix. To 56 | % learn about this, see Cunningham, Sahani, Shenoy (2008), ICML, "Fast Gaussian 57 | % process methods for point process intensity estimation." 58 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 59 | 60 | function [Ti,ld] = invToeplitzFast(T) 61 | 62 | [Ti,ld] = invToeplitzFastZohar(T); 63 | 64 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/invToeplitz/invToeplitzFast.m: -------------------------------------------------------------------------------- 1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 2 | % John P Cunningham 3 | % 2009 4 | % 5 | % invToeplitzFast() 6 | % 7 | % This function is simply a wrapper for the C-MEX 8 | % function invToeplitzFastZohar.mexa64 (or .mexglx, etc), 9 | % which is just a compiled version of invToeplitzFastZohar.c, 10 | % which should also be in this folder. Please see 11 | % that code for details. 12 | % 13 | % This algorithm inverts a positive definite (symmetric) 14 | % Toeplitz matrix in O(n^2) time, which is considerably better 15 | % than the O(n^3) that inv() offers. This follows the Trench 16 | % algorithm implementation of Zohar 1969. See that paper for 17 | % all explanation, as the C code is just an implementation of 18 | % the algorithm of p599 of that paper. 19 | % 20 | % This function also computes the log determinant, which is 21 | % calculated essentially for free as part of the calculation of 22 | % the inverse. This is often useful in applications when one really 23 | % needs to represent the inverse of a Toeplitz matrix. 24 | % 25 | % This function should be called from within invToeplitz.m, 26 | % which adds a try block so that it can default to a native 27 | % MATLAB inversion (either inv() or a vectorized version of 28 | % the Trench algorithm, depending on the size of the matrix) 29 | % should the MEX interface not work. This will happen, for example, 30 | % if you move to a new architecture and do not compile for .mexmaci 31 | % or similar (see mexext('all') and help mex for some info on this). 32 | % 33 | % Inputs: 34 | % T the positive definite (symmetric) Toeplitz matrix, which 35 | % does NOT need to be scaled to be 1 on the main diagonal. 36 | % 37 | % Outputs: 38 | % Ti the inverse of T 39 | % ld the log determinant of T, NOT Ti. 40 | % 41 | % 42 | % NOTE: This code is used to speed up the Toeplitz inversion as much 43 | % as possible. Accordingly, no error checking is done. The onus is 44 | % on the caller (which should be invToeplitz.m) to pass the correct args. 45 | % 46 | % NOTE: cf. invTopelitzFastGolub.c, which uses the algorithm in Golub and 47 | % Van Loan. This newer version was written because the Zohar version 48 | % also computes the log determinant for free, which is essential in the 49 | % application for which this algorithm was coded. 50 | % 51 | % NOTE: Whenever possible, do not actually invert a matrix. This code is 52 | % written just in case you really need to do so. Otherwise, for example 53 | % if you just want to solve inv(T)*x for some vector x, you are better off 54 | % using a fast inversion method, like PCG with fast matrix multiplication, 55 | % which could be something like an FFT method for the Toeplitz matrix. To 56 | % learn about this, see Cunningham, Sahani, Shenoy (2008), ICML, "Fast Gaussian 57 | % process methods for point process intensity estimation." 58 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 59 | 60 | function [Ti,ld] = invToeplitzFast(T) 61 | 62 | [Ti,ld] = invToeplitzFastZohar(T); 63 | 64 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # The Open Course in Data Science for Neuroscience 2 | These materials are an introduction to the ways in which data is used in order to ask and answer questions about the brain. It cuts across many fields of neuroscience, including cellular, systems, and cognitive neuroscience, and is an introduction to those who are interested in using quantitative methods to study the brain. 3 | 4 | These materials are free for users 5 | 6 | # Course tools and materials 7 | The materials are split into two codebases - one in Matlab, one in Python. These aren't the only languages available for doing neuroscience work, but they are by far the two most commonly-used languages. You can get a copy of Matlab by purchasing a license [hereXXX](matlab.com), and you can download a free scientific distribution of python [hereXXX](anaconda.com). 8 | 9 | The open-source community has been instrumental in improving quantitiative sophistication in the analysis of the brain. As such these materials utilize open source materials wherever it is possible. Instead of custom code, a strong preference is given to utilizing pre-existing toolboxes. However, it is also important to "get your hands dirty" when analyzing data. As such, we have avoided toolboxes that serve as "one-click" solutions, or which require graphical UIs to perform analyses. 10 | 11 | ## Python Code 12 | All python tutorials are written as jupyter notebooks. In addition, we have included a python module that contains helper functions to simplify tutorial materials. We urger you to investigate these functions and understand how they work. 13 | 14 | ## Matlab code 15 | Matlab tutorials are ............ 16 | 17 | ## Datasets 18 | This course uses datasets which are completely open and free to use. As such, any user should be able to copy this repository, along with the relevant datasets, and run the tutorials on their own computers. 19 | 20 | # Course topics 21 | Below is a list of topics currently covered by this course. 22 | 23 | ## Basics 24 | These tutorials cover the basics of data storage, representation, and manipulation. 25 | 26 | * Exploring data 27 | * Goal: create all standard "initial plots" that one might make given this data 28 | - [x] Data munging - conditions, trials, time, observables 29 | - [x] Trial timeseries - raster plots 30 | - [x] Trial averaging - peristimulus time histogram (psth) 31 | - [x] Temporal smoothing 32 | - [ ] Inter-spike intervals 33 | * Calculating statistics and uncertainty 34 | 35 | ## Modeling 36 | These tutorials attempt to answer specific questions about the brain by building computational models. 37 | 38 | * Models for anatomical (single unit) response during task 39 | * Goal: create average response models (nonparametric and parametric) and quantify uncertainty in model 40 | - [x] Raw response versus task variable 41 | - [x] Average response versus task variable 42 | - [x] Parametric response fitting 43 | - [x] Bootstrapping errorbars for response 44 | - [ ] MSE on held-out data 45 | - [x] Machine learning response functions 46 | * Connectivity 47 | * Latent variables and dimensionality reduction 48 | * Linear models for timeseries 49 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/util/nestplotpos.m: -------------------------------------------------------------------------------- 1 | function [abspos, relpos] = nestplotpos(pp, varargin) 2 | % NESTPPLOTPOS - position vector for nestplot 3 | % 4 | % NESTPPLOTPOS(P) returns the position vector for the Pth nestplot 5 | % in the current grid. 6 | % 7 | % NESTPPLOTPOS(M,N,P) or NESTPPLOTPOS(P, 'grid', [M,N]) returns the 8 | % position vector for the Pth nestplot in an MxN grid within the 9 | % current container. It does not change the container grid. 10 | % 11 | % NESTPPLOTPOS(..., 'container', AX) acts on the specified 12 | % container. 13 | 14 | % maneesh 15 | % 20120704: created. 16 | % 20131018: fixed spacing bug in multicell coordinates 17 | % 20140501: added order option 18 | 19 | container = []; % [gca] container axes 20 | spacing = []; 21 | xspacing = []; 22 | yspacing = []; 23 | nochecks = 0; 24 | order = ''; 25 | grid = []; 26 | 27 | if nargin >= 3 && isnumeric(varargin{1}) 28 | grid = [pp, varargin{1}]; 29 | pp = varargin{2}; 30 | varargin(1:2) = []; 31 | end 32 | 33 | optlistwarn(optlistassign(who, varargin)); 34 | 35 | if isempty(container) 36 | container = gca; 37 | end 38 | 39 | pdat = getappdata(container, 'NestData'); 40 | 41 | if (isfield(pdat, 'nestable')) 42 | while pdat.nestable == 0 43 | container = pdat.container; 44 | pdat = getappdata(container, 'NestData'); 45 | end 46 | end 47 | 48 | if isempty(spacing) 49 | spacing = pdat.spacing; 50 | end 51 | 52 | if isempty(xspacing) 53 | xspacing = spacing(1); 54 | end 55 | 56 | if isempty(yspacing) 57 | yspacing = spacing(end); 58 | end 59 | 60 | if isempty(grid) 61 | grid = pdat.grid; 62 | if isempty(grid) 63 | error('No grid specified and none established.'); 64 | end 65 | end 66 | 67 | if isempty(order) 68 | order = pdat.gridorder; 69 | end 70 | 71 | oldunits = get(container, 'Units'); 72 | set (container, 'Units', 'normalized'); 73 | ppos = get(container, 'Position'); 74 | set (container, 'Units', oldunits); 75 | 76 | gridbox = ppos(3:4) ./ fliplr(grid); 77 | 78 | if iscell(pp) 79 | ii = pp{1}-1; 80 | jj = pp{2}-1; 81 | else 82 | switch order 83 | case {'rowfirst', 'row'} 84 | ii = floor((pp-1)/grid(2)); 85 | jj = mod ((pp-1),grid(2)); 86 | case {'colfirst', 'columnfirst', 'col', 'column'} 87 | ii = mod ((pp-1),grid(1)); 88 | jj = floor((pp-1)/grid(1)); 89 | end 90 | end 91 | 92 | if ~nochecks && (any(ii >= grid(1)) || any(jj >= grid(2))) 93 | if iscell(pp) 94 | error(sprintf('Grid [%d,%d] too small for [%s, %s]', grid, num2str(ii+1),num2str(jj+1))); 95 | else 96 | error(sprintf('Grid [%d,%d] too small for %d', grid, p)); 97 | end 98 | end 99 | 100 | abspos(1:2) = [min(jj), min(ii)] .* gridbox; 101 | abssiz(1:2) = [1+range(jj), 1+range(ii)].*gridbox; 102 | 103 | abspos(1) = ppos(1) + abspos(1) + xspacing/2 * gridbox(1); 104 | abspos(2) = ppos(2) + ppos(4) - abspos(2) - abssiz(2) + yspacing/2 * gridbox(2); 105 | 106 | abspos(3:4) = abssiz - [xspacing,yspacing].*gridbox; 107 | 108 | if nargout > 1 109 | relpos = [(abspos(1:2) - ppos(1:2))./ppos(3:4), abspos(3:4)./ ppos(3:4)]; 110 | end -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/util/nestplotpos.m: -------------------------------------------------------------------------------- 1 | function [abspos, relpos] = nestplotpos(pp, varargin) 2 | % NESTPPLOTPOS - position vector for nestplot 3 | % 4 | % NESTPPLOTPOS(P) returns the position vector for the Pth nestplot 5 | % in the current grid. 6 | % 7 | % NESTPPLOTPOS(M,N,P) or NESTPPLOTPOS(P, 'grid', [M,N]) returns the 8 | % position vector for the Pth nestplot in an MxN grid within the 9 | % current container. It does not change the container grid. 10 | % 11 | % NESTPPLOTPOS(..., 'container', AX) acts on the specified 12 | % container. 13 | 14 | % maneesh 15 | % 20120704: created. 16 | % 20131018: fixed spacing bug in multicell coordinates 17 | % 20140501: added order option 18 | 19 | container = []; % [gca] container axes 20 | spacing = []; 21 | xspacing = []; 22 | yspacing = []; 23 | nochecks = 0; 24 | order = ''; 25 | grid = []; 26 | 27 | if nargin >= 3 && isnumeric(varargin{1}) 28 | grid = [pp, varargin{1}]; 29 | pp = varargin{2}; 30 | varargin(1:2) = []; 31 | end 32 | 33 | optlistwarn(optlistassign(who, varargin)); 34 | 35 | if isempty(container) 36 | container = gca; 37 | end 38 | 39 | pdat = getappdata(container, 'NestData'); 40 | 41 | if (isfield(pdat, 'nestable')) 42 | while pdat.nestable == 0 43 | container = pdat.container; 44 | pdat = getappdata(container, 'NestData'); 45 | end 46 | end 47 | 48 | if isempty(spacing) 49 | spacing = pdat.spacing; 50 | end 51 | 52 | if isempty(xspacing) 53 | xspacing = spacing(1); 54 | end 55 | 56 | if isempty(yspacing) 57 | yspacing = spacing(end); 58 | end 59 | 60 | if isempty(grid) 61 | grid = pdat.grid; 62 | if isempty(grid) 63 | error('No grid specified and none established.'); 64 | end 65 | end 66 | 67 | if isempty(order) 68 | order = pdat.gridorder; 69 | end 70 | 71 | oldunits = get(container, 'Units'); 72 | set (container, 'Units', 'normalized'); 73 | ppos = get(container, 'Position'); 74 | set (container, 'Units', oldunits); 75 | 76 | gridbox = ppos(3:4) ./ fliplr(grid); 77 | 78 | if iscell(pp) 79 | ii = pp{1}-1; 80 | jj = pp{2}-1; 81 | else 82 | switch order 83 | case {'rowfirst', 'row'} 84 | ii = floor((pp-1)/grid(2)); 85 | jj = mod ((pp-1),grid(2)); 86 | case {'colfirst', 'columnfirst', 'col', 'column'} 87 | ii = mod ((pp-1),grid(1)); 88 | jj = floor((pp-1)/grid(1)); 89 | end 90 | end 91 | 92 | if ~nochecks && (any(ii >= grid(1)) || any(jj >= grid(2))) 93 | if iscell(pp) 94 | error(sprintf('Grid [%d,%d] too small for [%s, %s]', grid, num2str(ii+1),num2str(jj+1))); 95 | else 96 | error(sprintf('Grid [%d,%d] too small for %d', grid, p)); 97 | end 98 | end 99 | 100 | abspos(1:2) = [min(jj), min(ii)] .* gridbox; 101 | abssiz(1:2) = [1+range(jj), 1+range(ii)].*gridbox; 102 | 103 | abspos(1) = ppos(1) + abspos(1) + xspacing/2 * gridbox(1); 104 | abspos(2) = ppos(2) + ppos(4) - abspos(2) - abssiz(2) + yspacing/2 * gridbox(2); 105 | 106 | abspos(3:4) = abssiz - [xspacing,yspacing].*gridbox; 107 | 108 | if nargout > 1 109 | relpos = [(abspos(1:2) - ppos(1:2))./ppos(3:4), abspos(3:4)./ ppos(3:4)]; 110 | end -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/util/nestable.m: -------------------------------------------------------------------------------- 1 | function h = nestable(varargin) 2 | % nestable - control nestability of axes: H=nestable([AX],['on']|['off',P]) 3 | % 4 | % NESTABLE returns a handle to the current axes, unless they are not 5 | % nestable, in which case it returns the handle of the corresponding 6 | % container. 7 | % 8 | % NESTABLE('on') makes the current axes nestable, and returns a handle 9 | % to them. 10 | % 11 | % NESTABLE('off') makes the first *container* at or above the current axes 12 | % non-nestable, and returns a handle to the container above it in 13 | % the nestplot tree. The container must have a parent -- that is, 14 | % it must be a nested plot. 15 | % 16 | % NESTABLE('off', container) makes the first *container* at or above 17 | % the current axes non-nestable, sets its container to the 18 | % specified handle, and returns a handle to the new container. 19 | % 20 | % NESTABLE(H, ...) performs any of the above operations on the axes H. 21 | % 22 | % See also: NESTPLOT. 23 | 24 | % maneesh. 25 | % pre-20020324: created 26 | % 20130702: doc cleanup 27 | % 20130704: switched data store from userdata to appdata 28 | % 20140424: initialise empty children array in NestData 29 | % 20140501: bugfix -- nestable(h) now steps up as far as needed. 30 | % nestable(...,'on') adds a resize listener 31 | 32 | narg = nargin; 33 | 34 | if (narg > 0 & ~ischar(varargin{1})) 35 | chkaxes = varargin{1}; 36 | varargin = {varargin{2:end}}; 37 | narg = narg - 1; 38 | else 39 | chkaxes = gca; 40 | end 41 | 42 | switch (narg) 43 | case 0, 44 | 45 | h = chkaxes; 46 | ndat = getappdata(h, 'NestData'); 47 | 48 | while isfield(ndat, 'nestable') && ~ndat.nestable 49 | h = ndat.container; 50 | ndat = getappdata(h, 'NestData'); 51 | end 52 | 53 | 54 | otherwise, 55 | 56 | switch varargin{1} 57 | case 'on', 58 | ndat = getappdata(chkaxes, 'NestData'); 59 | if (isfield(ndat, 'nestable')) 60 | ndat.nestable = 1; 61 | else 62 | ndat = struct('nestable', 1, 'container', 0); 63 | end 64 | if ~isfield(ndat, 'children') 65 | ndat.children = []; 66 | end 67 | c_hand = handle(chkaxes); 68 | ndat.resizelistener = ... 69 | addlistener(c_hand, 'Position', 'PostSet',... 70 | @(x,y) nestresize(chkaxes)); 71 | 72 | setappdata (chkaxes, 'NestData', ndat); 73 | h = chkaxes; 74 | 75 | case 'off', 76 | chkaxes = nestable(chkaxes); 77 | ndat = getappdata(chkaxes, 'NestData'); 78 | if isfield(ndat, 'nestable') 79 | ndat.nestable = 0; 80 | if (narg < 2) 81 | if ~ishandle(ndat.container) 82 | error ('No container specified') 83 | end 84 | else 85 | ndat.container = varargin{2}; 86 | end 87 | else 88 | if (narg < 2) 89 | error ('No container specified') 90 | end 91 | ndat = struct('nestable', 0, 'container', varargin{2}); 92 | end 93 | setappdata(chkaxes, 'NestData', ndat); 94 | h = ndat.container; 95 | 96 | otherwise, 97 | error('usage: nestable|nestable on|nestable(''off'',P)|nestable(H,...)'); 98 | end 99 | 100 | end 101 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/util/nestable.m: -------------------------------------------------------------------------------- 1 | function h = nestable(varargin) 2 | % nestable - control nestability of axes: H=nestable([AX],['on']|['off',P]) 3 | % 4 | % NESTABLE returns a handle to the current axes, unless they are not 5 | % nestable, in which case it returns the handle of the corresponding 6 | % container. 7 | % 8 | % NESTABLE('on') makes the current axes nestable, and returns a handle 9 | % to them. 10 | % 11 | % NESTABLE('off') makes the first *container* at or above the current axes 12 | % non-nestable, and returns a handle to the container above it in 13 | % the nestplot tree. The container must have a parent -- that is, 14 | % it must be a nested plot. 15 | % 16 | % NESTABLE('off', container) makes the first *container* at or above 17 | % the current axes non-nestable, sets its container to the 18 | % specified handle, and returns a handle to the new container. 19 | % 20 | % NESTABLE(H, ...) performs any of the above operations on the axes H. 21 | % 22 | % See also: NESTPLOT. 23 | 24 | % maneesh. 25 | % pre-20020324: created 26 | % 20130702: doc cleanup 27 | % 20130704: switched data store from userdata to appdata 28 | % 20140424: initialise empty children array in NestData 29 | % 20140501: bugfix -- nestable(h) now steps up as far as needed. 30 | % nestable(...,'on') adds a resize listener 31 | 32 | narg = nargin; 33 | 34 | if (narg > 0 & ~ischar(varargin{1})) 35 | chkaxes = varargin{1}; 36 | varargin = {varargin{2:end}}; 37 | narg = narg - 1; 38 | else 39 | chkaxes = gca; 40 | end 41 | 42 | switch (narg) 43 | case 0, 44 | 45 | h = chkaxes; 46 | ndat = getappdata(h, 'NestData'); 47 | 48 | while isfield(ndat, 'nestable') && ~ndat.nestable 49 | h = ndat.container; 50 | ndat = getappdata(h, 'NestData'); 51 | end 52 | 53 | 54 | otherwise, 55 | 56 | switch varargin{1} 57 | case 'on', 58 | ndat = getappdata(chkaxes, 'NestData'); 59 | if (isfield(ndat, 'nestable')) 60 | ndat.nestable = 1; 61 | else 62 | ndat = struct('nestable', 1, 'container', 0); 63 | end 64 | if ~isfield(ndat, 'children') 65 | ndat.children = []; 66 | end 67 | c_hand = handle(chkaxes); 68 | ndat.resizelistener = ... 69 | addlistener(c_hand, 'Position', 'PostSet',... 70 | @(x,y) nestresize(chkaxes)); 71 | 72 | setappdata (chkaxes, 'NestData', ndat); 73 | h = chkaxes; 74 | 75 | case 'off', 76 | chkaxes = nestable(chkaxes); 77 | ndat = getappdata(chkaxes, 'NestData'); 78 | if isfield(ndat, 'nestable') 79 | ndat.nestable = 0; 80 | if (narg < 2) 81 | if ~ishandle(ndat.container) 82 | error ('No container specified') 83 | end 84 | else 85 | ndat.container = varargin{2}; 86 | end 87 | else 88 | if (narg < 2) 89 | error ('No container specified') 90 | end 91 | ndat = struct('nestable', 0, 'container', varargin{2}); 92 | end 93 | setappdata(chkaxes, 'NestData', ndat); 94 | h = ndat.container; 95 | 96 | otherwise, 97 | error('usage: nestable|nestable on|nestable(''off'',P)|nestable(H,...)'); 98 | end 99 | 100 | end 101 | -------------------------------------------------------------------------------- /matlab/basset_connectivity/GenLouvain2/MEX_SRC/metanetwork_reduce.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // metanetwork_reduce.cpp 3 | // metanetwork_reduce 4 | // 5 | // Created by Lucas Jeub on 22/11/2012. 6 | // 7 | // usage: 8 | // 9 | // [output]=metanetwork_reduce('function_handle',input) 10 | // 11 | // implemented functions are 'assign', 'move', 'return' 12 | // 13 | // assign: takes a group vector as input and uses it to initialise the "group_index" 14 | // 15 | // 16 | // reduce: takes a column of the modularity matrix as 17 | // input 18 | // 19 | // returns reduced column, where the i's entry is the sum of the original modularity 20 | // matrix over all nodes in group i 21 | // 22 | // 23 | // nodes: takes a group and returns the matlab index of all nodes in this group 24 | // 25 | // 26 | // Last modified by Lucas Jeub on 25/07/2014 27 | 28 | 29 | #include "mex.h" 30 | 31 | 32 | #include "matlab_matrix.h" 33 | #include "group_index.h" 34 | #include 35 | #include 36 | 37 | #ifndef OCTAVE 38 | #include "matrix.h" 39 | #endif 40 | 41 | using namespace std; 42 | static group_index group; 43 | 44 | void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){ 45 | if (nrhs>0) { 46 | //get handle to function to perform 47 | mwSize strleng = mxGetM(prhs[0])*mxGetN(prhs[0])+1; 48 | char * handle; 49 | handle=(char *) mxCalloc(strleng, sizeof(char)); 50 | 51 | if (mxGetString(prhs[0],handle,strleng)) { 52 | mexErrMsgIdAndTxt("group_handler:handle", "handle needs to be a string"); 53 | } 54 | 55 | //switch on handle 56 | if (!strcmp(handle, "assign")) { 57 | if (nrhs!=2) { 58 | mexErrMsgIdAndTxt("metanetwork_reduce:assign", "assign needs 1 input argument"); 59 | } 60 | group=prhs[1]; 61 | } 62 | else if (!strcmp(handle, "reduce")){ 63 | if (nrhs!=2||nlhs<1) { 64 | mexErrMsgIdAndTxt("metanetwork_reduce:reduce", "reduce needs 1 input and 1 output argument"); 65 | } 66 | if (mxIsDouble(prhs[1])) { 67 | full mod_out(group.n_groups,1); 68 | if (mxIsSparse(prhs[1])) { 69 | sparse mod_s(prhs[1]); 70 | for (mwIndex i=0; i::iterator it=group.groups[i].begin(); it!=group.groups[i].end(); it++) { 78 | mod_out.get(i)+=mod_d.get(*it); 79 | } 80 | } 81 | } 82 | mod_out.export_matlab(plhs[0]); 83 | } 84 | } 85 | else if (!strcmp(handle, "nodes")) { 86 | if (nrhs!=2||nlhs<1) { 87 | mexErrMsgIdAndTxt("metaneetwork_reduce:nodes", "nodes needs 1 input and 1 output argument"); 88 | } 89 | full nodes=group.index(*mxGetPr(prhs[1])-1); 90 | nodes.export_matlab(plhs[0]); 91 | } 92 | } 93 | 94 | } -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/ssm_em.m: -------------------------------------------------------------------------------- 1 | function [ssm, like] = ssm_em (data, varargin) 2 | % smm_em - fit an LGSSM to data using the EM algorithm 3 | % 4 | % [Params, like] = SSM_EM (X, ...) fits the SSM to the data in X 5 | % [nObservables x nTimes x nSequences] using the EM algorithm. 6 | % It returns the new model as well the log-likelihood after each 7 | % iteration, like. 8 | % 9 | % OPTIONS: 10 | % 11 | % 'nIter' - Maximum number of iterations of the EM algorithm. 12 | % 'tol' - Convergence tolerance: when change in 13 | % likelihood per point per step drops below 14 | % this threshold, iteration is stopped. 15 | % 'latentdim' - number of latent dimensions to use 16 | 17 | init = []; 18 | nIter = 100; 19 | tol = 1e-5; 20 | latentdim = 1; 21 | 22 | optlistassign(who, varargin{:}); 23 | 24 | % useful inline function 25 | cellsum = @(C)(sum(cat(3, C{:}), 3)); 26 | 27 | % discover dimensions 28 | [DD, nTime, nSeq] = size(data); % Output dim; # timepoints; # sequences 29 | KK = latentdim; % Latent dim 30 | 31 | if (isempty(init)) 32 | y0 = zeros(KK,1); 33 | Q0 = eye(KK); 34 | A = randn(KK,KK); 35 | Q = eye(KK); 36 | C = randn(DD,KK); 37 | R = eye(DD); 38 | else 39 | A = init.dynamics; 40 | C = init.output; 41 | Q = init.innovations; 42 | R = init.noise; 43 | y0 = init.initstate; 44 | Q0 = init.initvar; 45 | end 46 | 47 | 48 | like = zeros(1, nIter); % allocate likelihood list 49 | 50 | 51 | % precalculate some useful stuff 52 | Exx = reshape(data, DD, [])*reshape(data, DD, [])'; 53 | 54 | %% run EM 55 | iIter = 0; 56 | while (iIter < nIter) % avoid 'for' to allow nIter=Inf 57 | iIter = iIter + 1; 58 | 59 | Eyy = zeros(KK,KK); 60 | Ey2y2 = zeros(KK,KK); 61 | Ey1y1 = zeros(KK,KK); 62 | Ey2y1 = zeros(KK,KK); 63 | Exy = zeros(DD,KK); 64 | 65 | for iSeq = 1:nSeq 66 | [yhat, Vhat, Vjoint, ll] = ssm_kalman(data(:,:,iSeq), y0, Q0, A, Q, C, R, 'smooth'); 67 | 68 | if (any(cellfun(@det, Vhat) < 0)) 69 | warn('ssm_em: non PSD variance'); 70 | end 71 | 72 | Eyy_trial = yhat*yhat' + cellsum(Vhat); 73 | 74 | Eyy = Eyy + Eyy_trial; 75 | Ey2y2 = Ey2y2 + Eyy_trial - yhat(:,1)*yhat(:,1)' - Vhat{1}; 76 | Ey1y1 = Ey1y1 + Eyy_trial - yhat(:,end)*yhat(:,end)' - Vhat{end}; 77 | 78 | Ey2y1 = Ey2y1 + yhat(:,2:end)*yhat(:,1:end-1)' + cellsum(Vjoint); 79 | Exy = Exy + data(:,:,iSeq)*yhat'; 80 | 81 | like(iIter) = like(iIter) + sum(ll); 82 | end 83 | 84 | C = Exy / Eyy; 85 | R = (Exx - Exy*C')/(nSeq*nTime); 86 | R = diag(diag(R)); % keep only diagonal variance 87 | 88 | A = Ey2y1/Ey1y1; 89 | Q = (Ey2y2 - Ey2y1*A')/(nSeq*(nTime-1)); 90 | Q = (Q+Q')/2; % symmetrise to avoid accumulated numerical issues 91 | 92 | % don't update these 93 | % y0 = 94 | % Q0 = 95 | 96 | fprintf('\rSSM em iteration: %d \t\t likelihood = %g', iIter, like(iIter)); 97 | 98 | if tol > 0 & iIter > 1 99 | if abs(diff(like(iIter-1:iIter))) < tol*diff(like([1,iIter])) 100 | like = like(1:iIter); % truncate 101 | break; 102 | end 103 | end 104 | 105 | end 106 | fprintf('\n'); 107 | 108 | ssm.dynamics = A; 109 | ssm.output = C; 110 | ssm.innovations = Q; 111 | ssm.noise = R; 112 | ssm.initstate = y0; 113 | ssm.initvar = Q0; 114 | 115 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/ssm_em.m: -------------------------------------------------------------------------------- 1 | function [ssm, like] = ssm_em (data, varargin) 2 | % smm_em - fit an LGSSM to data using the EM algorithm 3 | % 4 | % [Params, like] = SSM_EM (X, ...) fits the SSM to the data in X 5 | % [nObservables x nTimes x nSequences] using the EM algorithm. 6 | % It returns the new model as well the log-likelihood after each 7 | % iteration, like. 8 | % 9 | % OPTIONS: 10 | % 11 | % 'nIter' - Maximum number of iterations of the EM algorithm. 12 | % 'tol' - Convergence tolerance: when change in 13 | % likelihood per point per step drops below 14 | % this threshold, iteration is stopped. 15 | % 'latentdim' - number of latent dimensions to use 16 | 17 | init = []; 18 | nIter = 100; 19 | tol = 1e-5; 20 | latentdim = 1; 21 | 22 | optlistassign(who, varargin{:}); 23 | 24 | % useful inline function 25 | cellsum = @(C)(sum(cat(3, C{:}), 3)); 26 | 27 | % discover dimensions 28 | [DD, nTime, nSeq] = size(data); % Output dim; # timepoints; # sequences 29 | KK = latentdim; % Latent dim 30 | 31 | if (isempty(init)) 32 | y0 = zeros(KK,1); 33 | Q0 = eye(KK); 34 | A = randn(KK,KK); 35 | Q = eye(KK); 36 | C = randn(DD,KK); 37 | R = eye(DD); 38 | else 39 | A = init.dynamics; 40 | C = init.output; 41 | Q = init.innovations; 42 | R = init.noise; 43 | y0 = init.initstate; 44 | Q0 = init.initvar; 45 | end 46 | 47 | 48 | like = zeros(1, nIter); % allocate likelihood list 49 | 50 | 51 | % precalculate some useful stuff 52 | Exx = reshape(data, DD, [])*reshape(data, DD, [])'; 53 | 54 | %% run EM 55 | iIter = 0; 56 | while (iIter < nIter) % avoid 'for' to allow nIter=Inf 57 | iIter = iIter + 1; 58 | 59 | Eyy = zeros(KK,KK); 60 | Ey2y2 = zeros(KK,KK); 61 | Ey1y1 = zeros(KK,KK); 62 | Ey2y1 = zeros(KK,KK); 63 | Exy = zeros(DD,KK); 64 | 65 | for iSeq = 1:nSeq 66 | [yhat, Vhat, Vjoint, ll] = ssm_kalman(data(:,:,iSeq), y0, Q0, A, Q, C, R, 'smooth'); 67 | 68 | if (any(cellfun(@det, Vhat) < 0)) 69 | warn('ssm_em: non PSD variance'); 70 | end 71 | 72 | Eyy_trial = yhat*yhat' + cellsum(Vhat); 73 | 74 | Eyy = Eyy + Eyy_trial; 75 | Ey2y2 = Ey2y2 + Eyy_trial - yhat(:,1)*yhat(:,1)' - Vhat{1}; 76 | Ey1y1 = Ey1y1 + Eyy_trial - yhat(:,end)*yhat(:,end)' - Vhat{end}; 77 | 78 | Ey2y1 = Ey2y1 + yhat(:,2:end)*yhat(:,1:end-1)' + cellsum(Vjoint); 79 | Exy = Exy + data(:,:,iSeq)*yhat'; 80 | 81 | like(iIter) = like(iIter) + sum(ll); 82 | end 83 | 84 | C = Exy / Eyy; 85 | R = (Exx - Exy*C')/(nSeq*nTime); 86 | R = diag(diag(R)); % keep only diagonal variance 87 | 88 | A = Ey2y1/Ey1y1; 89 | Q = (Ey2y2 - Ey2y1*A')/(nSeq*(nTime-1)); 90 | Q = (Q+Q')/2; % symmetrise to avoid accumulated numerical issues 91 | 92 | % don't update these 93 | % y0 = 94 | % Q0 = 95 | 96 | fprintf('\rSSM em iteration: %d \t\t likelihood = %g', iIter, like(iIter)); 97 | 98 | if tol > 0 & iIter > 1 99 | if abs(diff(like(iIter-1:iIter))) < tol*diff(like([1,iIter])) 100 | like = like(1:iIter); % truncate 101 | break; 102 | end 103 | end 104 | 105 | end 106 | fprintf('\n'); 107 | 108 | ssm.dynamics = A; 109 | ssm.output = C; 110 | ssm.innovations = Q; 111 | ssm.noise = R; 112 | ssm.initstate = y0; 113 | ssm.initvar = Q0; 114 | 115 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/core_gpfa/gpfaEngine.m: -------------------------------------------------------------------------------- 1 | function gpfaEngine(seqTrain, seqTest, fname, varargin) 2 | % 3 | % gpfaEngine(seqTrain, seqTest, fname, ...) 4 | % 5 | % Extract neural trajectories using GPFA. 6 | % 7 | % INPUTS: 8 | % 9 | % seqTrain - training data structure, whose nth entry (corresponding to 10 | % the nth experimental trial) has fields 11 | % trialId (1 x 1) -- unique trial identifier 12 | % y (# neurons x T) -- neural data 13 | % T (1 x 1) -- number of timesteps 14 | % seqTest - test data structure (same format as seqTrain) 15 | % fname - filename of where results are saved 16 | % 17 | % OPTIONAL ARGUMENTS: 18 | % 19 | % xDim - state dimensionality (default: 3) 20 | % binWidth - spike bin width in msec (default: 20) 21 | % startTau - GP timescale initialization in msec (default: 100) 22 | % startEps - GP noise variance initialization (default: 1e-3) 23 | % 24 | % @ 2009 Byron Yu byronyu@stanford.edu 25 | % John Cunningham jcunnin@stanford.edu 26 | 27 | xDim = 3; 28 | binWidth = 20; % in msec 29 | startTau = 100; % in msec 30 | startEps = 1e-3; 31 | extraOpts = assignopts(who, varargin); 32 | 33 | % For compute efficiency, train on equal-length segments of trials 34 | seqTrainCut = cutTrials(seqTrain, extraOpts{:}); 35 | if isempty(seqTrainCut) 36 | fprintf('WARNING: no segments extracted for training. Defaulting to segLength=Inf.\n'); 37 | seqTrainCut = cutTrials(seqTrain, 'segLength', Inf); 38 | end 39 | 40 | % ================================== 41 | % Initialize state model parameters 42 | % ================================== 43 | startParams.covType = 'rbf'; 44 | % GP timescale 45 | % Assume binWidth is the time step size. 46 | startParams.gamma = (binWidth / startTau)^2 * ones(1, xDim); 47 | % GP noise variance 48 | startParams.eps = startEps * ones(1, xDim); 49 | 50 | % ======================================== 51 | % Initialize observation model parameters 52 | % ======================================== 53 | fprintf('Initializing parameters using factor analysis...\n'); 54 | 55 | yAll = [seqTrainCut.y]; 56 | [faParams, faLL] = fastfa(yAll, xDim, extraOpts{:}); 57 | 58 | startParams.d = mean(yAll, 2); 59 | startParams.C = faParams.L; 60 | startParams.R = diag(faParams.Ph); 61 | 62 | % Define parameter constraints 63 | startParams.notes.learnKernelParams = true; 64 | startParams.notes.learnGPNoise = false; 65 | startParams.notes.RforceDiagonal = true; 66 | 67 | currentParams = startParams; 68 | 69 | % ===================== 70 | % Fit model parameters 71 | % ===================== 72 | fprintf('\nFitting GPFA model...\n'); 73 | 74 | [estParams, seqTrainCut, LL, iterTime] =... 75 | em(currentParams, seqTrainCut, extraOpts{:}); 76 | 77 | % Extract neural trajectories for original, unsegmented trials 78 | % using learned parameters 79 | [seqTrain, LLorig] = exactInferenceWithLL(seqTrain, estParams); 80 | 81 | % ======================================== 82 | % Leave-neuron-out prediction on test data 83 | % ======================================== 84 | if ~isempty(seqTest) % check if there are any test trials 85 | if estParams.notes.RforceDiagonal 86 | seqTest = cosmoother_gpfa_viaOrth_fast(seqTest, estParams, 1:xDim); 87 | else 88 | seqTest = cosmoother_gpfa_viaOrth(seqTest, estParams, 1:xDim); 89 | end 90 | end 91 | 92 | % ============= 93 | % Save results 94 | % ============= 95 | vars = who; 96 | fprintf('Saving %s...\n', fname); 97 | save(fname, vars{~ismember(vars, {'yAll'})}); 98 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/core_gpfa/gpfaEngine.m: -------------------------------------------------------------------------------- 1 | function gpfaEngine(seqTrain, seqTest, fname, varargin) 2 | % 3 | % gpfaEngine(seqTrain, seqTest, fname, ...) 4 | % 5 | % Extract neural trajectories using GPFA. 6 | % 7 | % INPUTS: 8 | % 9 | % seqTrain - training data structure, whose nth entry (corresponding to 10 | % the nth experimental trial) has fields 11 | % trialId (1 x 1) -- unique trial identifier 12 | % y (# neurons x T) -- neural data 13 | % T (1 x 1) -- number of timesteps 14 | % seqTest - test data structure (same format as seqTrain) 15 | % fname - filename of where results are saved 16 | % 17 | % OPTIONAL ARGUMENTS: 18 | % 19 | % xDim - state dimensionality (default: 3) 20 | % binWidth - spike bin width in msec (default: 20) 21 | % startTau - GP timescale initialization in msec (default: 100) 22 | % startEps - GP noise variance initialization (default: 1e-3) 23 | % 24 | % @ 2009 Byron Yu byronyu@stanford.edu 25 | % John Cunningham jcunnin@stanford.edu 26 | 27 | xDim = 3; 28 | binWidth = 20; % in msec 29 | startTau = 100; % in msec 30 | startEps = 1e-3; 31 | extraOpts = assignopts(who, varargin); 32 | 33 | % For compute efficiency, train on equal-length segments of trials 34 | seqTrainCut = cutTrials(seqTrain, extraOpts{:}); 35 | if isempty(seqTrainCut) 36 | fprintf('WARNING: no segments extracted for training. Defaulting to segLength=Inf.\n'); 37 | seqTrainCut = cutTrials(seqTrain, 'segLength', Inf); 38 | end 39 | 40 | % ================================== 41 | % Initialize state model parameters 42 | % ================================== 43 | startParams.covType = 'rbf'; 44 | % GP timescale 45 | % Assume binWidth is the time step size. 46 | startParams.gamma = (binWidth / startTau)^2 * ones(1, xDim); 47 | % GP noise variance 48 | startParams.eps = startEps * ones(1, xDim); 49 | 50 | % ======================================== 51 | % Initialize observation model parameters 52 | % ======================================== 53 | fprintf('Initializing parameters using factor analysis...\n'); 54 | 55 | yAll = [seqTrainCut.y]; 56 | [faParams, faLL] = fastfa(yAll, xDim, extraOpts{:}); 57 | 58 | startParams.d = mean(yAll, 2); 59 | startParams.C = faParams.L; 60 | startParams.R = diag(faParams.Ph); 61 | 62 | % Define parameter constraints 63 | startParams.notes.learnKernelParams = true; 64 | startParams.notes.learnGPNoise = false; 65 | startParams.notes.RforceDiagonal = true; 66 | 67 | currentParams = startParams; 68 | 69 | % ===================== 70 | % Fit model parameters 71 | % ===================== 72 | fprintf('\nFitting GPFA model...\n'); 73 | 74 | [estParams, seqTrainCut, LL, iterTime] =... 75 | em(currentParams, seqTrainCut, extraOpts{:}); 76 | 77 | % Extract neural trajectories for original, unsegmented trials 78 | % using learned parameters 79 | [seqTrain, LLorig] = exactInferenceWithLL(seqTrain, estParams); 80 | 81 | % ======================================== 82 | % Leave-neuron-out prediction on test data 83 | % ======================================== 84 | if ~isempty(seqTest) % check if there are any test trials 85 | if estParams.notes.RforceDiagonal 86 | seqTest = cosmoother_gpfa_viaOrth_fast(seqTest, estParams, 1:xDim); 87 | else 88 | seqTest = cosmoother_gpfa_viaOrth(seqTest, estParams, 1:xDim); 89 | end 90 | end 91 | 92 | % ============= 93 | % Save results 94 | % ============= 95 | vars = who; 96 | fprintf('Saving %s...\n', fname); 97 | save(fname, vars{~ismember(vars, {'yAll'})}); 98 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/assignopts.m: -------------------------------------------------------------------------------- 1 | function remain = assignopts (opts, varargin) 2 | % assignopts - assign optional arguments (matlab 5 or higher) 3 | % 4 | % REM = ASSIGNOPTS(OPTLIST, 'VAR1', VAL1, 'VAR2', VAL2, ...) 5 | % assigns, in the caller's workspace, the values VAL1,VAL2,... to 6 | % the variables that appear in the cell array OPTLIST and that match 7 | % the strings 'VAR1','VAR2',... . Any VAR-VAL pairs that do not 8 | % match a variable in OPTLIST are returned in the cell array REM. 9 | % The VAR-VAL pairs can also be passed to ASSIGNOPTS in a cell 10 | % array: REM = ASSIGNOPTS(OPTLIST, {'VAR1', VAL1, ...}); 11 | % 12 | % By default ASSIGNOPTS matches option names using the strmatch 13 | % defaults: matches are case sensitive, but a (unique) prefix is 14 | % sufficient. If a 'VAR' string is a prefix for more than one 15 | % option in OPTLIST, and does not match any of them exactly, no 16 | % assignment occurs and the VAR-VAL pair is returned in REM. 17 | % 18 | % This behaviour can be modified by preceding OPTLIST with one or 19 | % both of the following flags: 20 | % 'ignorecase' implies case-insensitive matches. 21 | % 'exact' implies exact string matches. 22 | % Both together imply case-insensitive, but otherwise exact, matches. 23 | % 24 | % ASSIGNOPTS useful for processing optional arguments to a function. 25 | % Thus in a function which starts: 26 | % function foo(x,y,varargin) 27 | % z = 0; 28 | % assignopts({'z'}, varargin{:}); 29 | % the variable z can be given a non-default value by calling the 30 | % function thus: foo(x,y,'z',4); When used in this way, a list 31 | % of currently defined variables can easily be obtained using 32 | % WHO. Thus if we define: 33 | % function foo(x,y,varargin) 34 | % opt1 = 1; 35 | % opt2 = 2; 36 | % rem = assignopts('ignorecase', who, varargin); 37 | % and call foo(x, y, 'OPT1', 10, 'opt', 20); the variable opt1 38 | % will have the value 10, the variable opt2 will have the 39 | % (default) value 2 and the list rem will have the value {'opt', 40 | % 20}. 41 | % 42 | % See also WARNOPTS, WHO. 43 | % 44 | % Copyright (C) by Maneesh Sahani 45 | 46 | ignorecase = 0; 47 | exact = 0; 48 | 49 | % check for flags at the beginning 50 | while (~iscell(opts)) 51 | switch(lower(opts)) 52 | case 'ignorecase', 53 | ignorecase = 1; 54 | case 'exact', 55 | exact = 1; 56 | otherwise, 57 | error(['unrecognized flag :', opts]); 58 | end 59 | 60 | opts = varargin{1}; 61 | varargin = varargin{2:end}; 62 | end 63 | 64 | % if passed cell array instead of list, deal 65 | if length(varargin) == 1 & iscell(varargin{1}) 66 | varargin = varargin{1}; 67 | end 68 | 69 | if rem(length(varargin),2)~=0, 70 | error('Optional arguments and values must come in pairs') 71 | end 72 | 73 | done = zeros(1, length(varargin)); 74 | 75 | origopts = opts; 76 | if ignorecase 77 | opts = lower(opts); 78 | end 79 | 80 | for i = 1:2:length(varargin) 81 | 82 | opt = varargin{i}; 83 | if ignorecase 84 | opt = lower(opt); 85 | end 86 | 87 | % look for matches 88 | 89 | if exact 90 | match = strmatch(opt, opts, 'exact'); 91 | else 92 | match = strmatch(opt, opts); 93 | end 94 | 95 | % if more than one matched, try for an exact match ... if this 96 | % fails we'll ignore this option. 97 | 98 | if (length(match) > 1) 99 | match = strmatch(opt, opts, 'exact'); 100 | end 101 | 102 | % if we found a unique match, assign in the corresponding value, 103 | % using the *original* option name 104 | 105 | if length(match) == 1 106 | assignin('caller', origopts{match}, varargin{i+1}); 107 | done(i:i+1) = 1; 108 | end 109 | end 110 | 111 | varargin(find(done)) = []; 112 | remain = varargin; 113 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/assignopts.m: -------------------------------------------------------------------------------- 1 | function remain = assignopts (opts, varargin) 2 | % assignopts - assign optional arguments (matlab 5 or higher) 3 | % 4 | % REM = ASSIGNOPTS(OPTLIST, 'VAR1', VAL1, 'VAR2', VAL2, ...) 5 | % assigns, in the caller's workspace, the values VAL1,VAL2,... to 6 | % the variables that appear in the cell array OPTLIST and that match 7 | % the strings 'VAR1','VAR2',... . Any VAR-VAL pairs that do not 8 | % match a variable in OPTLIST are returned in the cell array REM. 9 | % The VAR-VAL pairs can also be passed to ASSIGNOPTS in a cell 10 | % array: REM = ASSIGNOPTS(OPTLIST, {'VAR1', VAL1, ...}); 11 | % 12 | % By default ASSIGNOPTS matches option names using the strmatch 13 | % defaults: matches are case sensitive, but a (unique) prefix is 14 | % sufficient. If a 'VAR' string is a prefix for more than one 15 | % option in OPTLIST, and does not match any of them exactly, no 16 | % assignment occurs and the VAR-VAL pair is returned in REM. 17 | % 18 | % This behaviour can be modified by preceding OPTLIST with one or 19 | % both of the following flags: 20 | % 'ignorecase' implies case-insensitive matches. 21 | % 'exact' implies exact string matches. 22 | % Both together imply case-insensitive, but otherwise exact, matches. 23 | % 24 | % ASSIGNOPTS useful for processing optional arguments to a function. 25 | % Thus in a function which starts: 26 | % function foo(x,y,varargin) 27 | % z = 0; 28 | % assignopts({'z'}, varargin{:}); 29 | % the variable z can be given a non-default value by calling the 30 | % function thus: foo(x,y,'z',4); When used in this way, a list 31 | % of currently defined variables can easily be obtained using 32 | % WHO. Thus if we define: 33 | % function foo(x,y,varargin) 34 | % opt1 = 1; 35 | % opt2 = 2; 36 | % rem = assignopts('ignorecase', who, varargin); 37 | % and call foo(x, y, 'OPT1', 10, 'opt', 20); the variable opt1 38 | % will have the value 10, the variable opt2 will have the 39 | % (default) value 2 and the list rem will have the value {'opt', 40 | % 20}. 41 | % 42 | % See also WARNOPTS, WHO. 43 | % 44 | % Copyright (C) by Maneesh Sahani 45 | 46 | ignorecase = 0; 47 | exact = 0; 48 | 49 | % check for flags at the beginning 50 | while (~iscell(opts)) 51 | switch(lower(opts)) 52 | case 'ignorecase', 53 | ignorecase = 1; 54 | case 'exact', 55 | exact = 1; 56 | otherwise, 57 | error(['unrecognized flag :', opts]); 58 | end 59 | 60 | opts = varargin{1}; 61 | varargin = varargin{2:end}; 62 | end 63 | 64 | % if passed cell array instead of list, deal 65 | if length(varargin) == 1 & iscell(varargin{1}) 66 | varargin = varargin{1}; 67 | end 68 | 69 | if rem(length(varargin),2)~=0, 70 | error('Optional arguments and values must come in pairs') 71 | end 72 | 73 | done = zeros(1, length(varargin)); 74 | 75 | origopts = opts; 76 | if ignorecase 77 | opts = lower(opts); 78 | end 79 | 80 | for i = 1:2:length(varargin) 81 | 82 | opt = varargin{i}; 83 | if ignorecase 84 | opt = lower(opt); 85 | end 86 | 87 | % look for matches 88 | 89 | if exact 90 | match = strmatch(opt, opts, 'exact'); 91 | else 92 | match = strmatch(opt, opts); 93 | end 94 | 95 | % if more than one matched, try for an exact match ... if this 96 | % fails we'll ignore this option. 97 | 98 | if (length(match) > 1) 99 | match = strmatch(opt, opts, 'exact'); 100 | end 101 | 102 | % if we found a unique match, assign in the corresponding value, 103 | % using the *original* option name 104 | 105 | if length(match) == 1 106 | assignin('caller', origopts{match}, varargin{i+1}); 107 | done(i:i+1) = 1; 108 | end 109 | end 110 | 111 | varargin(find(done)) = []; 112 | remain = varargin; 113 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/core_twostage/fastfa.m: -------------------------------------------------------------------------------- 1 | function [estParams, LL] = fastfa(X, zDim, varargin) 2 | % 3 | % [estParams, LL] = fastfa(X, zDim, ...) 4 | % 5 | % Factor analysis and probabilistic PCA. 6 | % 7 | % xDim: data dimensionality 8 | % zDim: latent dimensionality 9 | % N: number of data points 10 | % 11 | % INPUTS: 12 | % 13 | % X - data matrix (xDim x N) 14 | % zDim - number of factors 15 | % 16 | % OUTPUTS: 17 | % 18 | % estParams.L - factor loadings (xDim x zDim) 19 | % estParams.Ph - diagonal of uniqueness matrix (xDim x 1) 20 | % estParams.d - data mean (xDim x 1) 21 | % LL - log likelihood at each EM iteration 22 | % 23 | % OPTIONAL ARGUMENTS: 24 | % 25 | % typ - 'fa' (default) or 'ppca' 26 | % tol - stopping criterion for EM (default: 1e-8) 27 | % cyc - maximum number of EM iterations (default: 1e8) 28 | % minVarFrac - fraction of overall data variance for each observed dimension 29 | % to set as the private variance floor. This is used to combat 30 | % Heywood cases, where ML parameter learning returns one or more 31 | % zero private variances. (default: 0.01) 32 | % (See Martin & McDonald, Psychometrika, Dec 1975.) 33 | % verbose - logical that specifies whether to display status messages 34 | % (default: false) 35 | % 36 | % Code adapted from ffa.m by Zoubin Ghahramani. 37 | % 38 | % @ 2009 Byron Yu -- byronyu@stanford.edu 39 | 40 | typ = 'fa'; 41 | tol = 1e-8; 42 | cyc = 1e8; 43 | minVarFrac = 0.01; 44 | verbose = false; 45 | assignopts(who, varargin); 46 | 47 | randn('state', 0); 48 | [xDim, N] = size(X); 49 | 50 | % Initialization of parameters 51 | cX = cov(X', 1); 52 | if rank(cX) == xDim 53 | scale = exp(2*sum(log(diag(chol(cX))))/xDim); 54 | else 55 | % cX may not be full rank because N < xDim 56 | fprintf('WARNING in fastfa.m: Data matrix is not full rank.\n'); 57 | r = rank(cX); 58 | e = sort(eig(cX), 'descend'); 59 | scale = geomean(e(1:r)); 60 | end 61 | L = randn(xDim,zDim)*sqrt(scale/zDim); 62 | Ph = diag(cX); 63 | d = mean(X, 2); 64 | 65 | varFloor = minVarFrac * diag(cX); 66 | 67 | I = eye(zDim); 68 | const = -xDim/2*log(2*pi); 69 | LLi = 0; 70 | LL = []; 71 | 72 | for i = 1:cyc 73 | % ======= 74 | % E-step 75 | % ======= 76 | iPh = diag(1./Ph); 77 | iPhL = iPh * L; 78 | MM = iPh - iPhL / (I + L' * iPhL) * iPhL'; 79 | beta = L' * MM; % zDim x xDim 80 | 81 | cX_beta = cX * beta'; % xDim x zDim 82 | EZZ = I - beta * L + beta * cX_beta; 83 | 84 | % Compute log likelihood 85 | LLold = LLi; 86 | ldM = sum(log(diag(chol(MM)))); 87 | LLi = N*const + N*ldM - 0.5*N*sum(sum(MM .* cX)); 88 | if verbose 89 | fprintf('EM iteration %5i lik %8.1f \r', i, LLi); 90 | end 91 | LL = [LL LLi]; 92 | 93 | % ======= 94 | % M-step 95 | % ======= 96 | L = cX_beta / EZZ; 97 | Ph = diag(cX) - sum(cX_beta .* L, 2); 98 | 99 | if isequal(typ, 'ppca') 100 | Ph = mean(Ph) * ones(xDim, 1); 101 | end 102 | if isequal(typ, 'fa') 103 | % Set minimum private variance 104 | Ph = max(varFloor, Ph); 105 | end 106 | 107 | if i<=2 108 | LLbase = LLi; 109 | elseif (LLi < LLold) 110 | disp('VIOLATION'); 111 | elseif ((LLi-LLbase) < (1+tol)*(LLold-LLbase)) 112 | break; 113 | end 114 | end 115 | 116 | if verbose 117 | fprintf('\n'); 118 | end 119 | 120 | if any(Ph == varFloor) 121 | fprintf('Warning: Private variance floor used for one or more observed dimensions in FA.\n'); 122 | end 123 | 124 | estParams.L = L; 125 | estParams.Ph = Ph; 126 | estParams.d = d; 127 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/core_twostage/fastfa.m: -------------------------------------------------------------------------------- 1 | function [estParams, LL] = fastfa(X, zDim, varargin) 2 | % 3 | % [estParams, LL] = fastfa(X, zDim, ...) 4 | % 5 | % Factor analysis and probabilistic PCA. 6 | % 7 | % xDim: data dimensionality 8 | % zDim: latent dimensionality 9 | % N: number of data points 10 | % 11 | % INPUTS: 12 | % 13 | % X - data matrix (xDim x N) 14 | % zDim - number of factors 15 | % 16 | % OUTPUTS: 17 | % 18 | % estParams.L - factor loadings (xDim x zDim) 19 | % estParams.Ph - diagonal of uniqueness matrix (xDim x 1) 20 | % estParams.d - data mean (xDim x 1) 21 | % LL - log likelihood at each EM iteration 22 | % 23 | % OPTIONAL ARGUMENTS: 24 | % 25 | % typ - 'fa' (default) or 'ppca' 26 | % tol - stopping criterion for EM (default: 1e-8) 27 | % cyc - maximum number of EM iterations (default: 1e8) 28 | % minVarFrac - fraction of overall data variance for each observed dimension 29 | % to set as the private variance floor. This is used to combat 30 | % Heywood cases, where ML parameter learning returns one or more 31 | % zero private variances. (default: 0.01) 32 | % (See Martin & McDonald, Psychometrika, Dec 1975.) 33 | % verbose - logical that specifies whether to display status messages 34 | % (default: false) 35 | % 36 | % Code adapted from ffa.m by Zoubin Ghahramani. 37 | % 38 | % @ 2009 Byron Yu -- byronyu@stanford.edu 39 | 40 | typ = 'fa'; 41 | tol = 1e-8; 42 | cyc = 1e8; 43 | minVarFrac = 0.01; 44 | verbose = false; 45 | assignopts(who, varargin); 46 | 47 | randn('state', 0); 48 | [xDim, N] = size(X); 49 | 50 | % Initialization of parameters 51 | cX = cov(X', 1); 52 | if rank(cX) == xDim 53 | scale = exp(2*sum(log(diag(chol(cX))))/xDim); 54 | else 55 | % cX may not be full rank because N < xDim 56 | fprintf('WARNING in fastfa.m: Data matrix is not full rank.\n'); 57 | r = rank(cX); 58 | e = sort(eig(cX), 'descend'); 59 | scale = geomean(e(1:r)); 60 | end 61 | L = randn(xDim,zDim)*sqrt(scale/zDim); 62 | Ph = diag(cX); 63 | d = mean(X, 2); 64 | 65 | varFloor = minVarFrac * diag(cX); 66 | 67 | I = eye(zDim); 68 | const = -xDim/2*log(2*pi); 69 | LLi = 0; 70 | LL = []; 71 | 72 | for i = 1:cyc 73 | % ======= 74 | % E-step 75 | % ======= 76 | iPh = diag(1./Ph); 77 | iPhL = iPh * L; 78 | MM = iPh - iPhL / (I + L' * iPhL) * iPhL'; 79 | beta = L' * MM; % zDim x xDim 80 | 81 | cX_beta = cX * beta'; % xDim x zDim 82 | EZZ = I - beta * L + beta * cX_beta; 83 | 84 | % Compute log likelihood 85 | LLold = LLi; 86 | ldM = sum(log(diag(chol(MM)))); 87 | LLi = N*const + N*ldM - 0.5*N*sum(sum(MM .* cX)); 88 | if verbose 89 | fprintf('EM iteration %5i lik %8.1f \r', i, LLi); 90 | end 91 | LL = [LL LLi]; 92 | 93 | % ======= 94 | % M-step 95 | % ======= 96 | L = cX_beta / EZZ; 97 | Ph = diag(cX) - sum(cX_beta .* L, 2); 98 | 99 | if isequal(typ, 'ppca') 100 | Ph = mean(Ph) * ones(xDim, 1); 101 | end 102 | if isequal(typ, 'fa') 103 | % Set minimum private variance 104 | Ph = max(varFloor, Ph); 105 | end 106 | 107 | if i<=2 108 | LLbase = LLi; 109 | elseif (LLi < LLold) 110 | disp('VIOLATION'); 111 | elseif ((LLi-LLbase) < (1+tol)*(LLold-LLbase)) 112 | break; 113 | end 114 | end 115 | 116 | if verbose 117 | fprintf('\n'); 118 | end 119 | 120 | if any(Ph == varFloor) 121 | fprintf('Warning: Private variance floor used for one or more observed dimensions in FA.\n'); 122 | end 123 | 124 | estParams.L = L; 125 | estParams.Ph = Ph; 126 | estParams.d = d; 127 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/ssm_kalman.m: -------------------------------------------------------------------------------- 1 | function [yhat, Vhat, Vjoint, like] = ssm_kalman(xx, y0, Q0, A, Q, C, R, pass) 2 | % SSM_KALMAN - kalman-smoother estimates of SSM state posterior 3 | % 4 | % [Y,V,Vj,L] = SSM_KALMAN(X,Y0,Q0,A,Q,C,R) peforms the Kalman 5 | % smoothing recursions on the (DxT) data matrix X, for the 6 | % LGSSM defined by the following parameters 7 | % y0 Kx1 - initial latent state 8 | % Q0 KxK - initial variance 9 | % A KxK - latent dynamics matrix 10 | % Q KxK - innovariations covariance matrix 11 | % C DxK - output loading matrix 12 | % R DxD - output noise matrix 13 | % The function returns: 14 | % Y KxT - posterior mean estimates 15 | % V 1xT cell array of KxK matrices - posterior variances on y_t 16 | % Vj 1xT-1 cell array of KxK matrices - posterior covariances between y_{t+1}, y_t 17 | % L 1xT - conditional log-likelihoods log(p(x_t|x_{1:t-1})) 18 | % 19 | % [Y,V,[],L] = SSM_KALMAN(..., 'filt') performs Kalman filtering. 20 | % The joint covariances (Vj) are not computed and an empty cell 21 | % array is returned. 22 | 23 | % check for ssm structure instead of individual arguments 24 | if (nargin < 7) && isstruct(y0) 25 | if (nargin < 3) 26 | pass = 'smooth'; 27 | else 28 | pass = Q0; 29 | end 30 | ssm = y0; 31 | y0 = ssm.initstate; 32 | Q0 = ssm.initvar; 33 | A = ssm.dynamics; 34 | Q = ssm.innovations; 35 | C = ssm.output; 36 | R = ssm.noise; 37 | elseif nargin < 8 38 | pass = 'smooth'; 39 | end 40 | 41 | % check dimensions 42 | 43 | [dd,kk] = size(C); 44 | [tt] = size(xx, 2); 45 | 46 | if any([size(y0) ~= [kk,1], ... 47 | size(Q0) ~= [kk,kk], ... 48 | size(A) ~= [kk,kk], ... 49 | size(Q) ~= [kk,kk], ... 50 | size(R) ~= [dd,dd]]) 51 | error ('inconsistent parameter dimensions'); 52 | end 53 | 54 | 55 | 56 | %%%% allocate arrays 57 | 58 | yfilt = zeros(kk,tt); % filtering estimate: \hat(y)_t^t 59 | Vfilt = cell(1,tt); % filtering variance: \hat(V)_t^t 60 | yhat = zeros(kk,tt); % smoothing estimate: \hat(y)_t^T 61 | Vhat = cell(1,tt); % smoothing variance: \hat(V)_t^T 62 | K = cell(1, tt); % Kalman gain 63 | J = cell(1, tt); % smoothing gain 64 | like = zeros(1, tt); % conditional log-likelihood: p(x_t|x_{1:t-1}) 65 | 66 | Ik = eye(kk); 67 | 68 | invR = diag(1./diag(R)); 69 | invRC = invR*C; 70 | CinvRC = C'*invR*C; 71 | 72 | %%%% forward pass 73 | 74 | Vpred = Q0; 75 | ypred = y0; 76 | 77 | for t = 1:tt 78 | xprederr = xx(:,t) - C*ypred; 79 | 80 | 81 | Vfilt{t} = inv(inv(Vpred) + CinvRC); 82 | %% symmetrise to avoid numerical drift 83 | Vfilt{t} = (Vfilt{t} + Vfilt{t}')/2; 84 | 85 | %% Vxpred = C*Vpred*C'+R; 86 | invVxpred = invR - invRC*Vfilt{t}*invRC'; 87 | 88 | %% like(t) = -0.5*logdet(2*pi*(Vxpred)) - 0.5*xprederr'/Vxpred*xprederr; 89 | like(t) = 0.5*logdet(2*pi*(invVxpred)) - 0.5*xprederr'*invVxpred*xprederr; 90 | 91 | %% K{t} = Vpred*C/Vxpred; 92 | K{t} = Vfilt{t}*invRC'; 93 | 94 | yfilt(:,t) = ypred + K{t}*xprederr; 95 | % Vfilt{t} = Vpred - K{t}*C*Vpred; 96 | 97 | ypred = A*yfilt(:,t); 98 | Vpred = A*Vfilt{t}*A' + Q; 99 | end 100 | 101 | 102 | %%%% backward pass 103 | 104 | if (strncmp(lower(pass), 'filt', 4) || strncmp(lower(pass), 'forw', 4)) 105 | % skip if filtering/forward pass only 106 | yhat = yfilt; 107 | Vhat = Vfilt; 108 | Vjoint = {}; 109 | else 110 | yhat(:,tt) = yfilt(:,tt); 111 | Vhat{tt} = Vfilt{tt}; 112 | 113 | for t = tt-1:-1:1 114 | J{t} = (Vfilt{t}*A')/(A*Vfilt{t}*A' + Q); 115 | yhat(:,t) = yfilt(:,t) + J{t}*(yhat(:,t+1) - A*yfilt(:,t)); 116 | Vhat{t} = Vfilt{t} + J{t}*(Vhat{t+1} - A*Vfilt{t}*A' - Q)* J{t}'; 117 | end 118 | 119 | Vjoint{tt-1} = (Ik - K{tt}*C)*A*Vfilt{tt-1}; 120 | for t = tt-2:-1:1 121 | Vjoint{t} = Vfilt{t+1}*J{t}' + J{t+1}*(Vjoint{t+1} - A*Vfilt{t+1})*J{t}'; 122 | end 123 | end 124 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/ssm_kalman.m: -------------------------------------------------------------------------------- 1 | function [yhat, Vhat, Vjoint, like] = ssm_kalman(xx, y0, Q0, A, Q, C, R, pass) 2 | % SSM_KALMAN - kalman-smoother estimates of SSM state posterior 3 | % 4 | % [Y,V,Vj,L] = SSM_KALMAN(X,Y0,Q0,A,Q,C,R) peforms the Kalman 5 | % smoothing recursions on the (DxT) data matrix X, for the 6 | % LGSSM defined by the following parameters 7 | % y0 Kx1 - initial latent state 8 | % Q0 KxK - initial variance 9 | % A KxK - latent dynamics matrix 10 | % Q KxK - innovariations covariance matrix 11 | % C DxK - output loading matrix 12 | % R DxD - output noise matrix 13 | % The function returns: 14 | % Y KxT - posterior mean estimates 15 | % V 1xT cell array of KxK matrices - posterior variances on y_t 16 | % Vj 1xT-1 cell array of KxK matrices - posterior covariances between y_{t+1}, y_t 17 | % L 1xT - conditional log-likelihoods log(p(x_t|x_{1:t-1})) 18 | % 19 | % [Y,V,[],L] = SSM_KALMAN(..., 'filt') performs Kalman filtering. 20 | % The joint covariances (Vj) are not computed and an empty cell 21 | % array is returned. 22 | 23 | % check for ssm structure instead of individual arguments 24 | if (nargin < 7) && isstruct(y0) 25 | if (nargin < 3) 26 | pass = 'smooth'; 27 | else 28 | pass = Q0; 29 | end 30 | ssm = y0; 31 | y0 = ssm.initstate; 32 | Q0 = ssm.initvar; 33 | A = ssm.dynamics; 34 | Q = ssm.innovations; 35 | C = ssm.output; 36 | R = ssm.noise; 37 | elseif nargin < 8 38 | pass = 'smooth'; 39 | end 40 | 41 | % check dimensions 42 | 43 | [dd,kk] = size(C); 44 | [tt] = size(xx, 2); 45 | 46 | if any([size(y0) ~= [kk,1], ... 47 | size(Q0) ~= [kk,kk], ... 48 | size(A) ~= [kk,kk], ... 49 | size(Q) ~= [kk,kk], ... 50 | size(R) ~= [dd,dd]]) 51 | error ('inconsistent parameter dimensions'); 52 | end 53 | 54 | 55 | 56 | %%%% allocate arrays 57 | 58 | yfilt = zeros(kk,tt); % filtering estimate: \hat(y)_t^t 59 | Vfilt = cell(1,tt); % filtering variance: \hat(V)_t^t 60 | yhat = zeros(kk,tt); % smoothing estimate: \hat(y)_t^T 61 | Vhat = cell(1,tt); % smoothing variance: \hat(V)_t^T 62 | K = cell(1, tt); % Kalman gain 63 | J = cell(1, tt); % smoothing gain 64 | like = zeros(1, tt); % conditional log-likelihood: p(x_t|x_{1:t-1}) 65 | 66 | Ik = eye(kk); 67 | 68 | invR = diag(1./diag(R)); 69 | invRC = invR*C; 70 | CinvRC = C'*invR*C; 71 | 72 | %%%% forward pass 73 | 74 | Vpred = Q0; 75 | ypred = y0; 76 | 77 | for t = 1:tt 78 | xprederr = xx(:,t) - C*ypred; 79 | 80 | 81 | Vfilt{t} = inv(inv(Vpred) + CinvRC); 82 | %% symmetrise to avoid numerical drift 83 | Vfilt{t} = (Vfilt{t} + Vfilt{t}')/2; 84 | 85 | %% Vxpred = C*Vpred*C'+R; 86 | invVxpred = invR - invRC*Vfilt{t}*invRC'; 87 | 88 | %% like(t) = -0.5*logdet(2*pi*(Vxpred)) - 0.5*xprederr'/Vxpred*xprederr; 89 | like(t) = 0.5*logdet(2*pi*(invVxpred)) - 0.5*xprederr'*invVxpred*xprederr; 90 | 91 | %% K{t} = Vpred*C/Vxpred; 92 | K{t} = Vfilt{t}*invRC'; 93 | 94 | yfilt(:,t) = ypred + K{t}*xprederr; 95 | % Vfilt{t} = Vpred - K{t}*C*Vpred; 96 | 97 | ypred = A*yfilt(:,t); 98 | Vpred = A*Vfilt{t}*A' + Q; 99 | end 100 | 101 | 102 | %%%% backward pass 103 | 104 | if (strncmp(lower(pass), 'filt', 4) || strncmp(lower(pass), 'forw', 4)) 105 | % skip if filtering/forward pass only 106 | yhat = yfilt; 107 | Vhat = Vfilt; 108 | Vjoint = {}; 109 | else 110 | yhat(:,tt) = yfilt(:,tt); 111 | Vhat{tt} = Vfilt{tt}; 112 | 113 | for t = tt-1:-1:1 114 | J{t} = (Vfilt{t}*A')/(A*Vfilt{t}*A' + Q); 115 | yhat(:,t) = yfilt(:,t) + J{t}*(yhat(:,t+1) - A*yfilt(:,t)); 116 | Vhat{t} = Vfilt{t} + J{t}*(Vhat{t+1} - A*Vfilt{t}*A' - Q)* J{t}'; 117 | end 118 | 119 | Vjoint{tt-1} = (Ik - K{tt}*C)*A*Vfilt{tt-1}; 120 | for t = tt-2:-1:1 121 | Vjoint{t} = Vfilt{t+1}*J{t}' + J{t+1}*(Vjoint{t+1} - A*Vfilt{t+1})*J{t}'; 122 | end 123 | end 124 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/Code/gpfa/util/postprocess.m: -------------------------------------------------------------------------------- 1 | function [estParams, seqTrain, seqTest] = postprocess(ws, varargin) 2 | % 3 | % [estParams, seqTrain, seqTest] = postprocess(ws, ...) 4 | % 5 | % Orthonormalization and other cleanup. 6 | % 7 | % INPUT: 8 | % 9 | % ws - workspace variables returned by neuralTraj.m 10 | % 11 | % OUTPUTS: 12 | % 13 | % estParams - estimated model parameters, including 'Corth' obtained 14 | % by orthonormalizing the columns of C 15 | % seqTrain - training data structure containing new field 'xorth', 16 | % the orthonormalized neural trajectories 17 | % seqTest - test data structure containing orthonormalized neural 18 | % trajectories in 'xorth', obtained using 'estParams' 19 | % 20 | % OPTIONAL ARGUMENT: 21 | % 22 | % kernSD - for two-stage methods, this function returns seqTrain 23 | % and estParams corresponding to kernSD. By default, 24 | % the function uses ws.kern(1). 25 | % 26 | % @ 2009 Byron Yu -- byronyu@stanford.edu 27 | 28 | kernSD = []; 29 | assignopts(who, varargin); 30 | 31 | estParams = []; 32 | seqTrain = []; 33 | seqTest = []; 34 | 35 | if isempty(ws) 36 | fprintf('ERROR: Input argument is empty.\n'); 37 | return 38 | end 39 | 40 | if isfield(ws, 'kern') 41 | if isempty(ws.kernSDList) 42 | k = 1; 43 | else 44 | k = find(ws.kernSDList == kernSD); 45 | if isempty(k) 46 | fprintf('ERROR: Selected kernSD not found.\n'); 47 | return 48 | end 49 | end 50 | end 51 | 52 | if ismember(ws.method, {'gpfa'}) 53 | C = ws.estParams.C; 54 | X = [ws.seqTrain.xsm]; 55 | [Xorth, Corth] = orthogonalize(X, C); 56 | seqTrain = segmentByTrial(ws.seqTrain, Xorth, 'xorth'); 57 | 58 | estParams = ws.estParams; 59 | estParams.Corth = Corth; 60 | 61 | if ~isempty(ws.seqTest) 62 | fprintf('Extracting neural trajectories for test data...\n'); 63 | 64 | ws.seqTest = exactInferenceWithLL(ws.seqTest, estParams); 65 | X = [ws.seqTest.xsm]; 66 | [Xorth, Corth] = orthogonalize(X, C); 67 | seqTest = segmentByTrial(ws.seqTest, Xorth, 'xorth'); 68 | end 69 | 70 | elseif ismember(ws.method, {'fa', 'ppca'}) 71 | L = ws.kern(k).estParams.L; 72 | X = [ws.kern(k).seqTrain.xpost]; 73 | [Xorth, Lorth] = orthogonalize(X, L); 74 | seqTrain = segmentByTrial(ws.kern(k).seqTrain, Xorth, 'xorth'); 75 | 76 | % Convert to GPFA naming/formatting conventions 77 | estParams.C = ws.kern(k).estParams.L; 78 | estParams.d = ws.kern(k).estParams.d; 79 | estParams.Corth = Lorth; 80 | estParams.R = diag(ws.kern(k).estParams.Ph); 81 | 82 | if ~isempty(ws.kern(k).seqTest) 83 | fprintf('Extracting neural trajectories for test data...\n'); 84 | 85 | Y = [ws.kern(k).seqTest.y]; 86 | X = fastfa_estep(Y, ws.kern(k).estParams); 87 | [Xorth, Lorth] = orthogonalize(X.mean, L); 88 | seqTest = segmentByTrial(ws.kern(k).seqTest, Xorth, 'xorth'); 89 | end 90 | 91 | elseif ismember(ws.method, {'pca'}) 92 | % PCA is already orthonormalized 93 | X = [ws.kern(k).seqTrain.xpost]; 94 | seqTrain = segmentByTrial(ws.kern(k).seqTrain, X, 'xorth'); 95 | 96 | estParams.Corth = ws.kern(k).estParams.L; 97 | estParams.d = ws.kern(k).estParams.d; 98 | 99 | if ~isempty(ws.kern(k).seqTest) 100 | fprintf('Extracting neural trajectories for test data...\n'); 101 | 102 | Y = [ws.kern(k).seqTest.y]; 103 | Xorth = estParams.Corth' * bsxfun(@minus, Y, estParams.d); 104 | seqTest = segmentByTrial(ws.kern(k).seqTest, Xorth, 'xorth'); 105 | end 106 | 107 | else 108 | fprintf('ERROR: method not recognized.\n'); 109 | end 110 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/postprocess.m: -------------------------------------------------------------------------------- 1 | function [estParams, seqTrain, seqTest] = postprocess(ws, varargin) 2 | % 3 | % [estParams, seqTrain, seqTest] = postprocess(ws, ...) 4 | % 5 | % Orthonormalization and other cleanup. 6 | % 7 | % INPUT: 8 | % 9 | % ws - workspace variables returned by neuralTraj.m 10 | % 11 | % OUTPUTS: 12 | % 13 | % estParams - estimated model parameters, including 'Corth' obtained 14 | % by orthonormalizing the columns of C 15 | % seqTrain - training data structure containing new field 'xorth', 16 | % the orthonormalized neural trajectories 17 | % seqTest - test data structure containing orthonormalized neural 18 | % trajectories in 'xorth', obtained using 'estParams' 19 | % 20 | % OPTIONAL ARGUMENT: 21 | % 22 | % kernSD - for two-stage methods, this function returns seqTrain 23 | % and estParams corresponding to kernSD. By default, 24 | % the function uses ws.kern(1). 25 | % 26 | % @ 2009 Byron Yu -- byronyu@stanford.edu 27 | 28 | kernSD = []; 29 | assignopts(who, varargin); 30 | 31 | estParams = []; 32 | seqTrain = []; 33 | seqTest = []; 34 | 35 | if isempty(ws) 36 | fprintf('ERROR: Input argument is empty.\n'); 37 | return 38 | end 39 | 40 | if isfield(ws, 'kern') 41 | if isempty(ws.kernSDList) 42 | k = 1; 43 | else 44 | k = find(ws.kernSDList == kernSD); 45 | if isempty(k) 46 | fprintf('ERROR: Selected kernSD not found.\n'); 47 | return 48 | end 49 | end 50 | end 51 | 52 | if ismember(ws.method, {'gpfa'}) 53 | C = ws.estParams.C; 54 | X = [ws.seqTrain.xsm]; 55 | [Xorth, Corth] = orthogonalize(X, C); 56 | seqTrain = segmentByTrial(ws.seqTrain, Xorth, 'xorth'); 57 | 58 | estParams = ws.estParams; 59 | estParams.Corth = Corth; 60 | 61 | if ~isempty(ws.seqTest) 62 | fprintf('Extracting neural trajectories for test data...\n'); 63 | 64 | ws.seqTest = exactInferenceWithLL(ws.seqTest, estParams); 65 | X = [ws.seqTest.xsm]; 66 | [Xorth, Corth] = orthogonalize(X, C); 67 | seqTest = segmentByTrial(ws.seqTest, Xorth, 'xorth'); 68 | end 69 | 70 | elseif ismember(ws.method, {'fa', 'ppca'}) 71 | L = ws.kern(k).estParams.L; 72 | X = [ws.kern(k).seqTrain.xpost]; 73 | [Xorth, Lorth] = orthogonalize(X, L); 74 | seqTrain = segmentByTrial(ws.kern(k).seqTrain, Xorth, 'xorth'); 75 | 76 | % Convert to GPFA naming/formatting conventions 77 | estParams.C = ws.kern(k).estParams.L; 78 | estParams.d = ws.kern(k).estParams.d; 79 | estParams.Corth = Lorth; 80 | estParams.R = diag(ws.kern(k).estParams.Ph); 81 | 82 | if ~isempty(ws.kern(k).seqTest) 83 | fprintf('Extracting neural trajectories for test data...\n'); 84 | 85 | Y = [ws.kern(k).seqTest.y]; 86 | X = fastfa_estep(Y, ws.kern(k).estParams); 87 | [Xorth, Lorth] = orthogonalize(X.mean, L); 88 | seqTest = segmentByTrial(ws.kern(k).seqTest, Xorth, 'xorth'); 89 | end 90 | 91 | elseif ismember(ws.method, {'pca'}) 92 | % PCA is already orthonormalized 93 | X = [ws.kern(k).seqTrain.xpost]; 94 | seqTrain = segmentByTrial(ws.kern(k).seqTrain, X, 'xorth'); 95 | 96 | estParams.Corth = ws.kern(k).estParams.L; 97 | estParams.d = ws.kern(k).estParams.d; 98 | 99 | if ~isempty(ws.kern(k).seqTest) 100 | fprintf('Extracting neural trajectories for test data...\n'); 101 | 102 | Y = [ws.kern(k).seqTest.y]; 103 | Xorth = estParams.Corth' * bsxfun(@minus, Y, estParams.d); 104 | seqTest = segmentByTrial(ws.kern(k).seqTest, Xorth, 'xorth'); 105 | end 106 | 107 | else 108 | fprintf('ERROR: method not recognized.\n'); 109 | end 110 | -------------------------------------------------------------------------------- /matlab/basset_connectivity/GenLouvain2/MEX_SRC/group_index.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // group_index.cpp 3 | // group_index 4 | // 5 | // Created by Lucas Jeub on 24/10/2012. 6 | // 7 | // Implements the group_index datastructure: 8 | // 9 | // nodes: vector storing the group memebership for each node 10 | // 11 | // groups: vector of lists, each list stores the nodes assigned to the group 12 | // 13 | // nodes_iterator: vector storing the position of each node in the list corresponding 14 | // to the group it is assigned to (allows constant time moving of nodes) 15 | // 16 | // index(group): return matlab indeces of nodes in group 17 | // 18 | // move(node,group): move node to group 19 | // 20 | // export_matlab(matlab_array): output group vector to matlab_array 21 | // 22 | // 23 | // 24 | // Last modified by Lucas Jeub on 20/02/2013 25 | 26 | #include "group_index.h" 27 | 28 | using namespace std; 29 | 30 | group_index::group_index():n_nodes(0), n_groups(0){} 31 | 32 | group_index::group_index(const mxArray *matrix){ 33 | mwSize m=mxGetM(matrix); 34 | mwSize n=mxGetN(matrix); 35 | n_nodes=m*n; 36 | double * temp_nodes = mxGetPr(matrix); 37 | 38 | nodes.resize(n_nodes); 39 | nodes_iterator.resize(n_nodes); 40 | for(mwIndex i=0; i::iterator it=groups[group].begin(); it != groups[group].end(); it++){ 93 | ind.get(i)=*it+1; 94 | i++; 95 | } 96 | return ind; 97 | } 98 | else { 99 | mexErrMsgIdAndTxt("group_index:index", "group number out of bounds"); 100 | } 101 | } 102 | 103 | //moves node to specified group 104 | void group_index::move(mwIndex node, mwIndex group){ 105 | //move node by splicing into list for new group 106 | groups[group].splice(groups[group].end(), groups[nodes[node]],nodes_iterator[node]); 107 | //update its position 108 | nodes_iterator[node]= --groups[group].end(); 109 | //update its group asignment 110 | nodes[node]=group; 111 | } 112 | 113 | void group_index::export_matlab(mxArray * & out){ 114 | //implements tidyconfig 115 | out=mxCreateDoubleMatrix(n_nodes,1,mxREAL); 116 | double * val=mxGetPr(out); 117 | //keep track of nodes that have already been assigned 118 | vector track_move(n_nodes,true); 119 | mwIndex g_n=1; 120 | list::iterator it; 121 | for(mwIndex i=0; i 37 | #include "mex.h" 38 | 39 | /* Input Arguments */ 40 | 41 | #define precomp_IN prhs[0] 42 | #define seq_IN prhs[1] 43 | 44 | /* Output Arguments */ 45 | 46 | /* None, precomp is altered in place */ 47 | 48 | 49 | void mexFunction( int nlhs, mxArray *plhs[], 50 | int nrhs, const mxArray*prhs[] ) 51 | { 52 | double *tmp, *xsm, *VsmGP, *nList; 53 | mxArray *Tu; 54 | int xDim, T, numTrials, trialLens; 55 | int i,j,n,k,l; 56 | 57 | /* Check for proper number of arguments*/ 58 | if (nrhs != 2) 59 | { 60 | mexErrMsgTxt("2 inputs args required."); 61 | } 62 | else if (nlhs > 0) 63 | { 64 | mexErrMsgTxt("0 output args only...precomp is modified in place"); 65 | } 66 | 67 | /* get parameters */ 68 | xDim = mxGetNumberOfElements(precomp_IN); 69 | 70 | 71 | /* loop once for each state dimension */ 72 | for (i = 0; i < xDim; i++) 73 | { 74 | /* get the appropriate precomp substruct */ 75 | Tu = mxGetField(precomp_IN,i,"Tu"); 76 | /* pull trialLens from here */ 77 | trialLens = mxGetNumberOfElements(Tu); 78 | 79 | /* loop once for each unique trial length */ 80 | for (j = 0; j < trialLens ; j++) 81 | { 82 | /* get the appropriate Tu struct from precomp */ 83 | /* the length of this trial */ 84 | T = (int) *mxGetPr(mxGetField(Tu,j,"T")); 85 | numTrials = (int) *mxGetPr(mxGetField(Tu,j,"numTrials")); 86 | /* get the appropriate list of trials */ 87 | nList = mxGetPr(mxGetField(Tu, j , "nList")); 88 | 89 | /* We should be able to get that field from the struct, just like xsm and VsmGP */ 90 | /* and then mess with it in place. */ 91 | tmp = mxGetPr(mxGetField(Tu,j,"PautoSUM")); 92 | 93 | /* loop once for each trial */ 94 | for (n = 0; n < numTrials ; n++) 95 | { 96 | /* get the appropriate sequence */ 97 | xsm = mxGetPr(mxGetField(seq_IN , (int) nList[n]-1 , "xsm")); 98 | VsmGP = mxGetPr(mxGetField(seq_IN , (int) nList[n]-1 , "VsmGP")); 99 | /* now do the matrix multiplication and add to tmp */ 100 | for (k = 0; k < T; k++) 101 | { 102 | for (l = 0; l < T; l++) 103 | { 104 | /* this is the main multiplication */ 105 | tmp[l*T + k] += xsm[k*xDim + i]*xsm[l*xDim + i]; 106 | tmp[l*T + k] += VsmGP[i*T*T + l*T + k]; 107 | } 108 | } 109 | } 110 | } 111 | } 112 | return; 113 | } 114 | -------------------------------------------------------------------------------- /matlab/sahani_latent_factor/SOLN/gpfa/util/precomp/makePautoSumFast.c: -------------------------------------------------------------------------------- 1 | /*================================================================= 2 | * John P Cunningham 3 | * 2009 4 | * 5 | * The calling syntax is: 6 | * 7 | * [ ] = makePautoSumFast( precomp , seq ) 8 | * 9 | * This function adds PautoSUM to the precomp structure. PautoSUM is the 10 | * posterior covariance of the latent variables, given data and a model in 11 | * the GPFA algorithm (see GPFA references noted elsewhere). Importantly, 12 | * the precomp structure is modified in place, that is, this function 13 | * is pass-by-reference. Though nothing unusual in C, the MATLAB user 14 | * must be careful, as call by reference is not typical in MATLAB. So, 15 | * calling makePautoSumFast(precomp, seq) will change the precomp struct 16 | * without any return argument. 17 | * 18 | * The following group post may be helpful: 19 | * http://www.mathworks.com/matlabcentral/newsreader/view_thread/164276 20 | * We explicitly want to avoid doing a mxDuplicateArray, as that would be 21 | * hugely wasteful, since we are trying to optimize this code. 22 | * 23 | * This part of the code is offloaded to MEX because a very costly for 24 | * loop is required for the posterior matrix computation. To see a perhaps 25 | * more readable version of what this computation is doing, see the caller 26 | * of this function: makePrecomp.m. That function calls this MEX in a try 27 | * catch block, and it will default to a native MATLAB version if the MEX 28 | * is unsuccessful. That native MATLAB version is doing precisely the same 29 | * calculation, albeit much slower (we find roughly a 10x speedup using this 30 | * C/MEX code). This speedup is gained because the main computation has 31 | * MATLAB-inefficient for loops which can be significantly parallelized/pipelined 32 | * and done with less overhead in C. 33 | * 34 | *=================================================================*/ 35 | 36 | #include 37 | #include "mex.h" 38 | 39 | /* Input Arguments */ 40 | 41 | #define precomp_IN prhs[0] 42 | #define seq_IN prhs[1] 43 | 44 | /* Output Arguments */ 45 | 46 | /* None, precomp is altered in place */ 47 | 48 | 49 | void mexFunction( int nlhs, mxArray *plhs[], 50 | int nrhs, const mxArray*prhs[] ) 51 | { 52 | double *tmp, *xsm, *VsmGP, *nList; 53 | mxArray *Tu; 54 | int xDim, T, numTrials, trialLens; 55 | int i,j,n,k,l; 56 | 57 | /* Check for proper number of arguments*/ 58 | if (nrhs != 2) 59 | { 60 | mexErrMsgTxt("2 inputs args required."); 61 | } 62 | else if (nlhs > 0) 63 | { 64 | mexErrMsgTxt("0 output args only...precomp is modified in place"); 65 | } 66 | 67 | /* get parameters */ 68 | xDim = mxGetNumberOfElements(precomp_IN); 69 | 70 | 71 | /* loop once for each state dimension */ 72 | for (i = 0; i < xDim; i++) 73 | { 74 | /* get the appropriate precomp substruct */ 75 | Tu = mxGetField(precomp_IN,i,"Tu"); 76 | /* pull trialLens from here */ 77 | trialLens = mxGetNumberOfElements(Tu); 78 | 79 | /* loop once for each unique trial length */ 80 | for (j = 0; j < trialLens ; j++) 81 | { 82 | /* get the appropriate Tu struct from precomp */ 83 | /* the length of this trial */ 84 | T = (int) *mxGetPr(mxGetField(Tu,j,"T")); 85 | numTrials = (int) *mxGetPr(mxGetField(Tu,j,"numTrials")); 86 | /* get the appropriate list of trials */ 87 | nList = mxGetPr(mxGetField(Tu, j , "nList")); 88 | 89 | /* We should be able to get that field from the struct, just like xsm and VsmGP */ 90 | /* and then mess with it in place. */ 91 | tmp = mxGetPr(mxGetField(Tu,j,"PautoSUM")); 92 | 93 | /* loop once for each trial */ 94 | for (n = 0; n < numTrials ; n++) 95 | { 96 | /* get the appropriate sequence */ 97 | xsm = mxGetPr(mxGetField(seq_IN , (int) nList[n]-1 , "xsm")); 98 | VsmGP = mxGetPr(mxGetField(seq_IN , (int) nList[n]-1 , "VsmGP")); 99 | /* now do the matrix multiplication and add to tmp */ 100 | for (k = 0; k < T; k++) 101 | { 102 | for (l = 0; l < T; l++) 103 | { 104 | /* this is the main multiplication */ 105 | tmp[l*T + k] += xsm[k*xDim + i]*xsm[l*xDim + i]; 106 | tmp[l*T + k] += VsmGP[i*T*T + l*T + k]; 107 | } 108 | } 109 | } 110 | } 111 | } 112 | return; 113 | } 114 | --------------------------------------------------------------------------------