├── 1.Intro to Bayesian Networks ├── 228_login_data.mat ├── AssignmentToIndex.m ├── ComputeJointDistribution.m ├── ComputeMarginal.m ├── ConvertNetwork.m ├── Credit_net.net ├── FactorMarginalization.m ├── FactorProduct.m ├── FactorTutorial.m ├── GetValueOfAssignment.m ├── IndexToAssignment.m ├── MonitorAssignment.m ├── ObserveEvidence.m ├── PGM_Programming_Assignment_1.pdf ├── SetValueOfAssignment.m ├── submit.m ├── submitWeb.m └── submit_input.mat ├── 2.Bayesian Network for Genetic Inheritance ├── 228_login_data.mat ├── AssignmentToIndex.m ├── FactorProduct.m ├── GetValueOfAssignment.m ├── IndexToAssignment.m ├── PA2Appendix.pdf ├── PA2Description.pdf ├── SetValueOfAssignment.m ├── childCopyGivenFreqsFactor.m ├── childCopyGivenParentalsFactor.m ├── computeSigmoid.m ├── constructDecoupledGeneticNetwork.m ├── constructGeneticNetwork.m ├── constructSigmoidPhenotypeFactor.m ├── cysticFibrosisBayesNet.net ├── cysticFibrosisBayesNetGeneCopy.net ├── generateAlleleGenotypeMappers.m ├── genotypeGivenAlleleFreqsFactor.m ├── genotypeGivenParentsGenotypesFactor.m ├── octave-core ├── phenotypeGivenCopiesFactor.m ├── phenotypeGivenGenotypeFactor.m ├── phenotypeGivenGenotypeMendelianFactor.m ├── pinterest.js ├── sampleFactorList.mat ├── sampleFactorListDecoupled.mat ├── sampleGeneticNetworks.m ├── sendToSamiam.m ├── sendToSamiamGeneCopy.m ├── sendToSamiamInfo.m ├── sendToSamiamInfoDecoupled.m ├── spinalMuscularAtrophyBayesNet.net ├── submit.m └── submitWeb.m ├── 3.Markov Networks for OCR ├── 228_login_data.mat ├── AssignmentToIndex.m ├── BuildOCRNetwork.m ├── ChooseTopSimilarityFactors.m ├── ComputeAllSimilarityFactors.m ├── ComputeEqualPairwiseFactors.m ├── ComputeImageFactor.m ├── ComputePairwiseFactors.m ├── ComputeSimilarityFactor.m ├── ComputeSingletonFactors.m ├── ComputeTripletFactors.m ├── ComputeWordPredictions.m ├── GetValueOfAssignment.m ├── ImageSimilarity.m ├── IndexToAssignment.m ├── PA3Data.mat ├── PA3Description.pdf ├── PA3Models.mat ├── PA3SampleCases.mat ├── PA3TestCases.mat ├── RunInference.m ├── ScoreModel.m ├── ScorePredictions.m ├── SerializeFactorsFg.m ├── SetValueOfAssignment.m ├── Test.txt ├── VisualizeWord.m ├── factors.fg ├── inf.log ├── inference │ ├── doinference-linux │ ├── doinference-mac │ ├── doinference.exe │ ├── inference-src.zip │ └── inference-src │ │ └── libdai │ │ ├── AUTHORS │ │ ├── ChangeLog │ │ ├── LICENSE │ │ ├── Makefile │ │ ├── Makefile.ALL │ │ ├── Makefile.CYGWIN │ │ ├── Makefile.LINUX │ │ ├── Makefile.MACOSX │ │ ├── Makefile.MACOSX64 │ │ ├── Makefile.WINDOWS │ │ ├── README │ │ ├── customdoxygen.css │ │ ├── doxygen.conf │ │ ├── examples │ │ ├── Makefile │ │ ├── doinference │ │ ├── doinference.cpp │ │ ├── example.cpp │ │ ├── example_bipgraph.cpp │ │ ├── example_bipgraph.out │ │ ├── example_imagesegmentation.cpp │ │ ├── example_img_in1.jpg │ │ ├── example_img_in2.jpg │ │ ├── example_permute.cpp │ │ ├── example_permute.out │ │ ├── example_sprinkler.cpp │ │ ├── example_sprinkler.dot │ │ ├── example_sprinkler.png │ │ ├── example_sprinkler_em.cpp │ │ ├── example_sprinkler_gibbs.cpp │ │ ├── example_varset.cpp │ │ ├── example_varset.out │ │ ├── sprinkler.em │ │ └── uai2010-aie-solver.cpp │ │ ├── include │ │ └── dai │ │ │ ├── alldai.h │ │ │ ├── bbp.h │ │ │ ├── bipgraph.h │ │ │ ├── bp.h │ │ │ ├── bp_dual.h │ │ │ ├── cbp.h │ │ │ ├── clustergraph.h │ │ │ ├── dag.h │ │ │ ├── daialg.h │ │ │ ├── decmap.h │ │ │ ├── doc.h │ │ │ ├── emalg.h │ │ │ ├── enum.h │ │ │ ├── evidence.h │ │ │ ├── exactinf.h │ │ │ ├── exceptions.h │ │ │ ├── factor.h │ │ │ ├── factorgraph.h │ │ │ ├── fbp.h │ │ │ ├── gibbs.h │ │ │ ├── graph.h │ │ │ ├── hak.h │ │ │ ├── index.h │ │ │ ├── io.h │ │ │ ├── jtree.h │ │ │ ├── lc.h │ │ │ ├── matlab │ │ │ └── matlab.h │ │ │ ├── mf.h │ │ │ ├── mr.h │ │ │ ├── prob.h │ │ │ ├── properties.h │ │ │ ├── regiongraph.h │ │ │ ├── smallset.h │ │ │ ├── treeep.h │ │ │ ├── trwbp.h │ │ │ ├── util.h │ │ │ ├── var.h │ │ │ ├── varset.h │ │ │ └── weightedgraph.h │ │ ├── matlab │ │ ├── dai.m │ │ ├── dai_potstrength.m │ │ ├── dai_readfg.m │ │ └── dai_writefg.m │ │ ├── scripts │ │ ├── convert-fastInf-DAI.pl │ │ ├── makeREADME │ │ └── regenerate-properties │ │ ├── src │ │ ├── alldai.cpp │ │ ├── bbp.cpp │ │ ├── bipgraph.cpp │ │ ├── bp.cpp │ │ ├── bp_dual.cpp │ │ ├── cbp.cpp │ │ ├── clustergraph.cpp │ │ ├── dag.cpp │ │ ├── daialg.cpp │ │ ├── decmap.cpp │ │ ├── emalg.cpp │ │ ├── evidence.cpp │ │ ├── exactinf.cpp │ │ ├── exceptions.cpp │ │ ├── factor.cpp │ │ ├── factorgraph.cpp │ │ ├── fbp.cpp │ │ ├── gibbs.cpp │ │ ├── graph.cpp │ │ ├── hak.cpp │ │ ├── io.cpp │ │ ├── jtree.cpp │ │ ├── lc.cpp │ │ ├── matlab │ │ │ ├── dai.cpp │ │ │ ├── dai_potstrength.cpp │ │ │ ├── dai_readfg.cpp │ │ │ ├── dai_writefg.cpp │ │ │ └── matlab.cpp │ │ ├── mf.cpp │ │ ├── mr.cpp │ │ ├── properties.cpp │ │ ├── regiongraph.cpp │ │ ├── treeep.cpp │ │ ├── trwbp.cpp │ │ ├── util.cpp │ │ ├── varset.cpp │ │ └── weightedgraph.cpp │ │ ├── swig │ │ ├── Makefile │ │ ├── README │ │ ├── dai.i │ │ ├── example_sprinkler.m │ │ └── example_sprinkler.py │ │ ├── tests │ │ ├── alarm.fg │ │ ├── aliases.conf │ │ ├── hoi1.fg │ │ ├── hoi2.fg │ │ ├── hoi3.fg │ │ ├── hoi4.fg │ │ ├── jtreemapbug.fg │ │ ├── maxprodbug.fg │ │ ├── maxprodbug2.fg │ │ ├── maxprodbug3.fg │ │ ├── testall │ │ ├── testall.bat │ │ ├── testbbp.cpp │ │ ├── testdai.cpp │ │ ├── testem │ │ │ ├── 2var.em │ │ │ ├── 2var.fg │ │ │ ├── 2var_data.tab │ │ │ ├── 3var.em │ │ │ ├── 3var.fg │ │ │ ├── hoi1_data.tab │ │ │ ├── hoi1_infer_f2.em │ │ │ ├── hoi1_share_f0_f1_f2.em │ │ │ ├── hoi1_share_f0_f2.em │ │ │ ├── runtests │ │ │ ├── runtests.bat │ │ │ ├── testem.cpp │ │ │ └── testem.out │ │ ├── testfast.fg │ │ ├── testfast.out │ │ ├── testregression │ │ ├── testregression.bat │ │ ├── twofactors.fg │ │ ├── unit │ │ │ ├── alldai_test.cpp │ │ │ ├── bipgraph_test.cpp │ │ │ ├── clustergraph_test.cpp │ │ │ ├── dag_test.cpp │ │ │ ├── daialg_test.cpp │ │ │ ├── enum_test.cpp │ │ │ ├── exceptions_test.cpp │ │ │ ├── factor_test.cpp │ │ │ ├── factorgraph_test.cpp │ │ │ ├── graph_test.cpp │ │ │ ├── index_test.cpp │ │ │ ├── prob_test.cpp │ │ │ ├── properties_test.cpp │ │ │ ├── regiongraph_test.cpp │ │ │ ├── smallset_test.cpp │ │ │ ├── util_test.cpp │ │ │ ├── var_test.cpp │ │ │ ├── varset_test.cpp │ │ │ └── weightedgraph_test.cpp │ │ └── zeroes1.fg │ │ └── utils │ │ ├── createfg.cpp │ │ ├── fg2dot.cpp │ │ ├── fginfo.cpp │ │ ├── uai2fg.cpp │ │ └── viewfg ├── pgm_login_data.mat ├── submit.m └── submitWeb.m ├── 4.Exact Inference ├── 228_login_data.mat ├── AssignmentToIndex.m ├── CliqueTreeCalibrate.m ├── ComputeExactMarginalsBP.m ├── ComputeInitialPotentials.m ├── ComputeJointDistribution.m ├── ComputeMarginal.m ├── CreateCliqueTree.m ├── DecodedMarginalsToChars.m ├── EliminateVar.m ├── FactorMarginalization.m ├── FactorMaxMarginalization.m ├── FactorProduct.m ├── FactorSum.m ├── GetNextCliques.m ├── GetValueOfAssignment.m ├── IndexToAssignment.m ├── MaxDecoding.m ├── ObserveEvidence.m ├── PA4Sample.mat ├── PA4Test.mat ├── ProgrammingAssignment4.pdf ├── PruneTree.m ├── SetValueOfAssignment.m ├── pgm_login_data.mat ├── submit.m ├── submitWeb.m └── test.m ├── 5.Approximate Inference ├── .email.swp ├── AssignmentToIndex.m ├── BlockLogDistribution.m ├── CheckConvergence.m ├── CliqueTreeCalibrate.m ├── ClusterGraphCalibrate.m ├── ComputeApproxMarginalsBP.m ├── ComputeExactMarginalsBP.m ├── ComputeInitialPotentials.m ├── ConstructRandNetwork.m ├── ConstructToyNetwork.m ├── CreateCliqueTree.m ├── CreateClusterGraph.m ├── EdgeToFactorCorrespondence.m ├── EliminateVar.m ├── ExtractMarginalsFromSamples.m ├── FactorMarginalization.m ├── FactorProduct.m ├── GetNextCliques.m ├── GetNextClusters.m ├── GetValueOfAssignment.m ├── GibbsTrans.m ├── IndexToAssignment.m ├── LogProbOfJointAssignment.m ├── MCMCInference.m ├── MHGibbsTrans.m ├── MHSWTrans.m ├── MHUniformTrans.m ├── NaiveGetNextClusters.m ├── ObserveEvidence.m ├── ProblemSet.m ├── ProblemSet4.mat ├── ProgrammingAssignment5.pdf ├── PruneTree.m ├── SetValueOfAssignment.m ├── SmartGetNextClusters.m ├── TestToy.m ├── VariableToFactorCorrespondence.m ├── VisualizeMCMCMarginals.m ├── VisualizeToyImageMarginals.m ├── email ├── exampleIOPA5.mat ├── gaimc │ ├── scomponents.m │ └── sparse_to_csr.m ├── octave-core ├── pgm_login_data.mat ├── rand.m ├── randi.m ├── randsample.m ├── scomponents.m ├── smooth.m ├── sparse_to_csr.m ├── submit.m ├── submit_input.mat └── testscript.m ├── 6.Decision Making ├── AssignmentToIndex.m ├── CPDFromFactor.m ├── CalculateExpectedUtilityFactor.m ├── EliminateVar.m ├── FactorMarginalization.m ├── FactorProduct.m ├── FactorSum.m ├── FullI.mat ├── GetValueOfAssignment.m ├── IndexToAssignment.m ├── MultipleUtilityI.mat ├── NormalizeCPDFactors.m ├── NormalizeFactorValues.m ├── ObserveEvidence.m ├── OptimizeLinearExpectations.m ├── OptimizeMEU.m ├── OptimizeWithJointUtility.m ├── PrintFactor.m ├── ProgrammingAssignment6.pdf ├── ReOrderVariables.m ├── SetValueOfAssignment.m ├── SimpleCalcExpectedUtility.m ├── SimpleOptimizeMEU.m ├── TestCases.m ├── TestI0.mat ├── VariableElimination.m ├── pgm_login_data.mat ├── submit.m ├── submitWeb.m └── variables.mat ├── 7.CRF Learning for OCR ├── .ComputeInitialPotentials.m.swp ├── .CreateCliqueTree.m.swp ├── .InstanceNegLogLikelihood.m.swp ├── AssignmentToIndex.m ├── CliqueTreeCalibrate.m ├── ComputeConditionedSingletonFeatures.m ├── ComputeExactMarginalsBP.m ├── ComputeInitialPotentials.m ├── ComputeJointDistribution.m ├── ComputeMarginal.m ├── ComputeUnconditionedPairFeatures.m ├── ComputeUnconditionedSingletonFeatures.m ├── CreateCliqueTree.m ├── EliminateVar.m ├── EmptyFactorStruct.m ├── EmptyFeatureStruct.m ├── FactorMarginalization.m ├── FactorMaxMarginalization.m ├── FactorProduct.m ├── FactorSum.m ├── GenerateAllFeatures.m ├── GetNextCliques.m ├── GetValueOfAssignment.m ├── IndexToAssignment.m ├── InstanceNegLogLikelihood.m ├── LRAccuracy.m ├── LRCostSGD.m ├── LRPredict.m ├── LRSearchLambdaSGD.m ├── LRTrainSGD.m ├── MaxDecoding.m ├── NumParamsForConditionedFeatures.m ├── NumParamsForUnconditionedFeatures.m ├── ObserveEvidence.m ├── PA7Description.pdf ├── Part1Lambdas.mat ├── Part2FullDataset.mat ├── Part2LogZTest.mat ├── Part2Sample.mat ├── Part2Test.mat ├── PruneTree.m ├── SetValueOfAssignment.m ├── StochasticGradientDescent.m ├── Test1X.mat ├── Test1Y.mat ├── Train1X.mat ├── Train1Y.mat ├── Train2X.mat ├── Train2Y.mat ├── Validation1X.mat ├── Validation1Y.mat ├── Validation2X.mat ├── Validation2Y.mat ├── ValidationAccuracy.mat ├── VisualizeCharacters.m ├── pgm_login_data.mat ├── pppp.m ├── sigmoid.m ├── submit.m └── submitWeb.m ├── 8.Learning Tree Structured Networks ├── ClassifyDataset.m ├── ComputeLogLikelihood.m ├── ConvertAtoG.m ├── FitGaussianParameters.m ├── FitLinearGaussianParameters.m ├── GaussianMutualInformation.m ├── LearnCPDsGivenGraph.m ├── LearnGraphAndCPDs.m ├── LearnGraphStructure.m ├── MaxSpanningTree.m ├── PA8Data.mat ├── PA8Description.pdf ├── PA8SampleCases.mat ├── SampleGaussian.m ├── SampleMultinomial.m ├── SamplePose.m ├── ShowPose.m ├── VisualizeDataset.m ├── VisualizeModels.m ├── func_DrawLine.m ├── lognormpdf.m ├── octave-core ├── pgm_login_data.mat ├── submit.m ├── submitWeb.m └── submit_input.mat └── 9.Learnign with Incomplete Data ├── AssignmentToIndex.m ├── CliqueTreeCalibrate.m ├── ComputeExactMarginalsHMM.m ├── CreateCliqueTreeHMM.m ├── EM_HMM.m ├── EM_cluster.m ├── FactorMarginalization.m ├── FitG.m ├── FitLG.m ├── IndexToAssignment.m ├── PA9Data.mat ├── PA9Description.pdf ├── PA9SampleCases.mat ├── Predictions.mat ├── RecognizeActions.m ├── RecognizeUnknownActions.m ├── SavePredictions.m ├── ShowPose.m ├── VisualizeDataset.m ├── YourMethod.txt ├── emission.m ├── func_DrawLine.m ├── lognormpdf.m ├── logsumexp.m ├── pgm_login_data.mat ├── submit.m ├── submitWeb.m └── submit_input.mat /1.Intro to Bayesian Networks/228_login_data.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 3.4.3, Tue Apr 03 17:38:51 2012 India Standard Time 2 | # name: login 3 | # type: string 4 | # elements: 1 5 | # length: 18 6 | anilkaraka@live.in 7 | 8 | 9 | # name: password 10 | # type: string 11 | # elements: 1 12 | # length: 10 13 | 3Kjg8WRg6p 14 | 15 | 16 | -------------------------------------------------------------------------------- /1.Intro to Bayesian Networks/AssignmentToIndex.m: -------------------------------------------------------------------------------- 1 | % AssignmentToIndex Convert assignment to index. 2 | % 3 | % I = AssignmentToIndex(A, D) converts an assignment, A, over variables 4 | % with cardinality D to an index into the .val vector for a factor. 5 | % If A is a matrix then the function converts each row of A to an index. 6 | % 7 | % See also IndexToAssignment.m and FactorTutorial.m 8 | 9 | function I = AssignmentToIndex(A, D) 10 | 11 | D = D(:)'; % ensure that D is a row vector 12 | if (any(size(A) == 1)), 13 | I = cumprod([1, D(1:end - 1)]) * (A(:) - 1) + 1; 14 | else 15 | I = sum(repmat(cumprod([1, D(1:end - 1)]), size(A, 1), 1) .* (A - 1), 2) + 1; 16 | end; 17 | 18 | end 19 | -------------------------------------------------------------------------------- /1.Intro to Bayesian Networks/ComputeJointDistribution.m: -------------------------------------------------------------------------------- 1 | %ComputeJointDistribution Computes the joint distribution defined by a set 2 | % of given factors 3 | % 4 | % Joint = ComputeJointDistribution(F) computes the joint distribution 5 | % defined by a set of given factors 6 | % 7 | % Joint is a factor that encapsulates the joint distribution given by F 8 | % F is a vector of factors (struct array) containing the factors 9 | % defining the distribution 10 | % 11 | 12 | function Joint = ComputeJointDistribution(F) 13 | 14 | % Check for empty factor list 15 | if (numel(F) == 0) 16 | warning('Error: empty factor list'); 17 | Joint = struct('var', [], 'card', [], 'val', []); 18 | return; 19 | end 20 | 21 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 22 | % YOUR CODE HERE: 23 | % Compute the joint distribution defined by F 24 | % You may assume that you are given legal CPDs so no input checking is required. 25 | % 26 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 27 | 28 | Joint = struct('var', [], 'card', [], 'val', []); % Returns empty factor. Change this. 29 | 30 | for i = 1:length(F) 31 | Joint = FactorProduct(Joint,F(i)); 32 | end 33 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 34 | end 35 | 36 | -------------------------------------------------------------------------------- /1.Intro to Bayesian Networks/ComputeMarginal.m: -------------------------------------------------------------------------------- 1 | %ComputeMarginal Computes the marginal over a set of given variables 2 | % M = ComputeMarginal(V, F, E) computes the marginal over variables V 3 | % in the distribution induced by the set of factors F, given evidence E 4 | % 5 | % M is a factor containing the marginal over variables V 6 | % V is a vector containing the variables in the marginal e.g. [1 2 3] for 7 | % X_1, X_2 and X_3. 8 | % F is a vector of factors (struct array) containing the factors 9 | % defining the distribution 10 | % E is an N-by-2 matrix, each row being a variable/value pair. 11 | % Variables are in the first column and values are in the second column. 12 | % If there is no evidence, pass in the empty matrix [] for E. 13 | 14 | 15 | function M = ComputeMarginal(V, F, E) 16 | % Check for empty factor list 17 | if (numel(F) == 0) 18 | warning('Warning: empty factor list'); 19 | M = struct('var', [], 'card', [], 'val', []); 20 | return; 21 | end 22 | 23 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 24 | % YOUR CODE HERE: 25 | % M should be a factor 26 | % Remember to renormalize the entries of M! 27 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 28 | V = V 29 | F = F 30 | E = E 31 | M = struct('var', [], 'card', [], 'val', []); % Returns empty factor. Change this. 32 | p = ComputeJointDistribution(F); 33 | x = ObserveEvidence(p,E); 34 | y = FactorMarginalization(x,setdiff(x.var,V)); 35 | M = y; 36 | M.val = y.val/(sum(y.val)); 37 | M = M 38 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 39 | end 40 | -------------------------------------------------------------------------------- /1.Intro to Bayesian Networks/FactorMarginalization.m: -------------------------------------------------------------------------------- 1 | % FactorMarginalization Sums given variables out of a factor. 2 | % B = FactorMarginalization(A,V) computes the factor with the variables 3 | % in V summed out. The factor data structure has the following fields: 4 | % .var Vector of variables in the factor, e.g. [1 2 3] 5 | % .card Vector of cardinalities corresponding to .var, e.g. [2 2 2] 6 | % .val Value table of size prod(.card) 7 | % 8 | % The resultant factor should have at least one variable remaining or this 9 | % function will throw an error. 10 | % 11 | % See also FactorProduct.m, IndexToAssignment.m, and AssignmentToIndex.m 12 | 13 | function B = FactorMarginalization(A, V) 14 | 15 | % Check for empty factor or variable list 16 | if (isempty(A.var) || isempty(V)), B = A; return; end; 17 | 18 | % Construct the output factor over A.var \ V (the variables in A.var that are not in V) 19 | % and mapping between variables in A and B 20 | [B.var, mapB] = setdiff(A.var, V); 21 | 22 | % Check for empty resultant factor 23 | if isempty(B.var) 24 | error('Error: Resultant factor has empty scope'); 25 | end; 26 | 27 | % Initialize B.card and B.val 28 | B.card = A.card(mapB); 29 | B.val = zeros(prod(B.card), 1); 30 | 31 | % Compute some helper indices 32 | % These will be very useful for calculating B.val 33 | % so make sure you understand what these lines are doing 34 | assignments = IndexToAssignment(1:length(A.val), A.card); 35 | indxB = AssignmentToIndex(assignments(:, mapB), B.card); 36 | 37 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 38 | % YOUR CODE HERE 39 | % Correctly populate the factor values of B 40 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 41 | for i = 1:length(indxB) 42 | B.val(indxB(i)) += A.val(i); 43 | end 44 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 45 | end 46 | -------------------------------------------------------------------------------- /1.Intro to Bayesian Networks/GetValueOfAssignment.m: -------------------------------------------------------------------------------- 1 | % GetValueOfAssignment Gets the value of a variable assignment in a factor. 2 | % 3 | % v = GetValueOfAssignment(F, A) returns the value of a variable assignment, 4 | % A, in factor F. The order of the variables in A are assumed to be the 5 | % same as the order in F.var. 6 | % 7 | % v = GetValueOfAssignment(F, A, VO) gets the value of a variable assignment, 8 | % A, in factor F. The order of the variables in A are given by the vector VO. 9 | % 10 | % See also SetValueOfAssignment.m and FactorTutorial.m 11 | 12 | function v = GetValueOfAssignment(F, A, VO) 13 | 14 | if (nargin == 2), 15 | indx = AssignmentToIndex(A, F.card); 16 | else 17 | map = zeros(length(F.var), 1); 18 | for i = 1:length(F.var), 19 | map(i) = find(VO == F.var(i)); 20 | end; 21 | indx = AssignmentToIndex(A(map), F.card); 22 | end; 23 | 24 | v = F.val(indx); 25 | 26 | end 27 | -------------------------------------------------------------------------------- /1.Intro to Bayesian Networks/IndexToAssignment.m: -------------------------------------------------------------------------------- 1 | % IndexToAssignment Convert index to variable assignment. 2 | % 3 | % A = IndexToAssignment(I, D) converts an index, I, into the .val vector 4 | % into an assignment over variables with cardinality D. If I is a vector, 5 | % then the function produces a matrix of assignments, one assignment 6 | % per row. 7 | % 8 | % See also AssignmentToIndex.m and FactorTutorial.m 9 | 10 | function A = IndexToAssignment(I, D) 11 | 12 | D = D(:)'; % ensure that D is a row vector 13 | A = mod(floor(repmat(I(:) - 1, 1, length(D)) ./ repmat(cumprod([1, D(1:end - 1)]), length(I), 1)), ... 14 | repmat(D, length(I), 1)) + 1; 15 | 16 | end 17 | -------------------------------------------------------------------------------- /1.Intro to Bayesian Networks/MonitorAssignment.m: -------------------------------------------------------------------------------- 1 | function MonitorAssignment( F, names, valNames, E ) 2 | 3 | % MonitorAssignment( F, names, valNames, E ) - Pretty prints all the 4 | % marginals for an assignment 5 | % 6 | % F contains the struct array of factors 7 | % names contains the variable names 8 | % valNames contains the assignment names for each variable, 9 | % as seen in SAMIAM 10 | % E is an N-by-2 cell array, each row being a variable/value pair. 11 | % Variables are in the first column and values are in the second column. 12 | % If there is no evidence, pass in the empty matrix [] for E. 13 | % 14 | % Example of use: 15 | % [F, names, valNames] = ConvertNetwork('Credit_net.net'); 16 | % MonitorAssignment( F, names, valNames, {'Assets', 'Low'; 'Age', 'Over65'} ) 17 | 18 | Enum = zeros(size(E)); 19 | for i=1:size(E,1) 20 | Enum(i,1) = find(strcmp(E{i,1}, names),1,'first'); 21 | Enum(i,2) = find(strcmp(E{i,2}, valNames{Enum(i,1)}),1,'first'); 22 | end 23 | maxNameLength = 0; 24 | for i=1:numel(valNames) 25 | for j=1:numel(valNames{i}) 26 | maxNameLength = max(maxNameLength, length(valNames{i}{j})); 27 | end 28 | end 29 | for i=1:numel(F) 30 | M = ComputeMarginal([i], F, Enum); 31 | fprintf('%s\n', names{i}) 32 | for j=1:numel(M.val) 33 | xtra_space = char(' '*ones(maxNameLength-length(valNames{i}{j})+3,1)); 34 | fprintf('\t%s:%s%.02f%%\n', valNames{i}{j}, xtra_space, M.val(j)*100); 35 | end 36 | end 37 | 38 | -------------------------------------------------------------------------------- /1.Intro to Bayesian Networks/PGM_Programming_Assignment_1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/1.Intro to Bayesian Networks/PGM_Programming_Assignment_1.pdf -------------------------------------------------------------------------------- /1.Intro to Bayesian Networks/SetValueOfAssignment.m: -------------------------------------------------------------------------------- 1 | % SetValueOfAssignment Sets the value of a variable assignment in a factor. 2 | % 3 | % F = SetValueOfAssignment(F, A, v) sets the value of a variable assignment, 4 | % A, in factor F to v. The order of the variables in A are assumed to be the 5 | % same as the order in F.var. 6 | % 7 | % F = SetValueOfAssignment(F, A, v, VO) sets the value of a variable 8 | % assignment, A, in factor F to v. The order of the variables in A are given 9 | % by the vector VO. 10 | % 11 | % Note that SetValueOfAssignment *does not modify* the factor F that is 12 | % passed into the function, but instead returns a modified factor with the 13 | % new value(s) for the specified assignment(s). This is why we have to 14 | % reassign F to the result of SetValueOfAssignment in the code snippets 15 | % shown above. 16 | % 17 | % See also GetValueOfAssignment.m and FactorTutorial.m 18 | 19 | function F = SetValueOfAssignment(F, A, v, VO) 20 | 21 | if (nargin == 3), 22 | indx = AssignmentToIndex(A, F.card); 23 | else 24 | map = zeros(length(F.var), 1); 25 | for i = 1:length(F.var), 26 | map(i) = find(VO == F.var(i)); 27 | end; 28 | indx = AssignmentToIndex(A(map), F.card); 29 | end; 30 | 31 | F.val(indx) = v; 32 | 33 | end 34 | -------------------------------------------------------------------------------- /1.Intro to Bayesian Networks/submitWeb.m: -------------------------------------------------------------------------------- 1 | % submitWeb Creates files from your code and output for web submission. 2 | % 3 | % If the submit function does not work for you, use the web-submission mechanism. 4 | % Call this function to produce a file for the part you wish to submit. Then, 5 | % submit the file to the class servers using the "Web Submission" button on the 6 | % Programming Assignments page on the course website. 7 | % 8 | 9 | function submitWeb(partId) 10 | if ~exist('partId', 'var') || isempty(partId) 11 | partId = []; 12 | end 13 | 14 | submit(partId, 1); 15 | end 16 | 17 | -------------------------------------------------------------------------------- /1.Intro to Bayesian Networks/submit_input.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/1.Intro to Bayesian Networks/submit_input.mat -------------------------------------------------------------------------------- /2.Bayesian Network for Genetic Inheritance/228_login_data.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 3.4.3, Wed Apr 11 07:47:33 2012 India Standard Time 2 | # name: login 3 | # type: sq_string 4 | # elements: 1 5 | # length: 18 6 | anilkaraka@live.in 7 | 8 | 9 | # name: password 10 | # type: sq_string 11 | # elements: 1 12 | # length: 10 13 | 3Kjg8WRg6p 14 | 15 | 16 | -------------------------------------------------------------------------------- /2.Bayesian Network for Genetic Inheritance/AssignmentToIndex.m: -------------------------------------------------------------------------------- 1 | % AssignmentToIndex Convert assignment to index. 2 | % 3 | % I = AssignmentToIndex(A, D) converts an assignment, A, over variables 4 | % with cardinality D to an index into the .val vector for a factor. 5 | % If A is a matrix then the function converts each row of A to an index. 6 | % 7 | % See also IndexToAssignment.m and SampleFactors.m 8 | 9 | function I = AssignmentToIndex(A, D) 10 | 11 | D = D(:)'; % ensure that D is a row vector 12 | if (any(size(A) == 1)), 13 | I = cumprod([1, D(1:end - 1)]) * (A(:) - 1) + 1; 14 | else 15 | I = sum(repmat(cumprod([1, D(1:end - 1)]), size(A, 1), 1) .* (A - 1), 2) + 1; 16 | end; 17 | 18 | end 19 | -------------------------------------------------------------------------------- /2.Bayesian Network for Genetic Inheritance/GetValueOfAssignment.m: -------------------------------------------------------------------------------- 1 | % GetValueOfAssignment Gets the value of a variable assignment in a factor. 2 | % 3 | % v = GetValueOfAssignment(F, A) returns the value of a variable assignment, 4 | % A, in factor F. The order of the variables in A are assumed to be the 5 | % same as the order in F.var. 6 | % 7 | % v = GetValueOfAssignment(F, A, VO) gets the value of a variable assignment, 8 | % A, in factor F. The order of the variables in A are given by the vector VO. 9 | % 10 | % See also SetValueOfAssignment.m and SampleFactors.m 11 | 12 | function v = GetValueOfAssignment(F, A, VO) 13 | 14 | if (nargin == 2), 15 | indx = AssignmentToIndex(A, F.card); 16 | else 17 | map = zeros(length(F.var), 1); 18 | for i = 1:length(F.var), 19 | map(i) = find(VO == F.var(i)); 20 | end; 21 | indx = AssignmentToIndex(A(map), F.card); 22 | end; 23 | 24 | v = F.val(indx); 25 | 26 | end 27 | -------------------------------------------------------------------------------- /2.Bayesian Network for Genetic Inheritance/IndexToAssignment.m: -------------------------------------------------------------------------------- 1 | % IndexToAssignment Convert index to variable assignment. 2 | % 3 | % A = IndexToAssignment(I, D) converts an index, I, into the .val vector 4 | % into an assignment over variables with cardinality D. If I is a vector, 5 | % then the function produces a matrix of assignments, one assignment 6 | % per row. 7 | % 8 | % See also AssignmentToIndex.m and SampleFactors.m 9 | 10 | function A = IndexToAssignment(I, D) 11 | 12 | D = D(:)'; % ensure that D is a row vector 13 | A = mod(floor(repmat(I(:) - 1, 1, length(D)) ./ repmat(cumprod([1, D(1:end - 1)]), length(I), 1)), ... 14 | repmat(D, length(I), 1)) + 1; 15 | 16 | end 17 | -------------------------------------------------------------------------------- /2.Bayesian Network for Genetic Inheritance/PA2Appendix.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/2.Bayesian Network for Genetic Inheritance/PA2Appendix.pdf -------------------------------------------------------------------------------- /2.Bayesian Network for Genetic Inheritance/PA2Description.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/2.Bayesian Network for Genetic Inheritance/PA2Description.pdf -------------------------------------------------------------------------------- /2.Bayesian Network for Genetic Inheritance/SetValueOfAssignment.m: -------------------------------------------------------------------------------- 1 | % SetValueOfAssignment Sets the value of a variable assignment in a factor. 2 | % 3 | % F = SetValueOfAssignment(F, A, v) sets the value of a variable assignment, 4 | % A, in factor F to v. The order of the variables in A are assumed to be the 5 | % same as the order in F.var. 6 | % 7 | % F = SetValueOfAssignment(F, A, v, VO) sets the value of a variable 8 | % assignment, A, in factor F to v. The order of the variables in A are given 9 | % by the vector VO. 10 | % 11 | % Note that SetValueOfAssignment *does not modify* the factor F that is 12 | % passed into the function, but instead returns a modified factor with the 13 | % new value(s) for the specified assignment(s). This is why we have to 14 | % reassign F to the result of SetValueOfAssignment in the code snippets 15 | % shown above. 16 | % 17 | % See also GetValueOfAssignment.m and SampleFactors.m 18 | 19 | function F = SetValueOfAssignment(F, A, v, VO) 20 | 21 | if (nargin == 3), 22 | indx = AssignmentToIndex(A, F.card); 23 | else 24 | map = zeros(length(F.var), 1); 25 | for i = 1:length(F.var), 26 | map(i) = find(VO == F.var(i)); 27 | end; 28 | indx = AssignmentToIndex(A(map), F.card); 29 | end; 30 | 31 | F.val(indx) = v; 32 | 33 | end 34 | -------------------------------------------------------------------------------- /2.Bayesian Network for Genetic Inheritance/childCopyGivenFreqsFactor.m: -------------------------------------------------------------------------------- 1 | function geneCopyFactor = childCopyGivenFreqsFactor(alleleFreqs, geneCopyVar) 2 | % This function creates a factor whose values are the frequencies of each 3 | % allele in the population. 4 | % 5 | % Input: 6 | % alleleFreqs: A list of the frequencies of the alleles in the population 7 | % genotypeVar: The variable number for the genotype 8 | % 9 | % Output: 10 | % geneCopyFactor: A factor for the prior probability of genotypeVar (note 11 | % that this is the FULL CPD with no evidence observed) 12 | 13 | numAlleles = length(alleleFreqs); 14 | geneCopyFactor = struct('var', [], 'card', [], 'val', []); 15 | geneCopyFactor.var(1) = geneCopyVar; 16 | geneCopyFactor.card(1) = numAlleles; 17 | geneCopyFactor.val = alleleFreqs'; 18 | -------------------------------------------------------------------------------- /2.Bayesian Network for Genetic Inheritance/computeSigmoid.m: -------------------------------------------------------------------------------- 1 | function sigVal = computeSigmoid(z) 2 | % This function computes the value of the sigmoid of all of the numbers in 3 | % an n x 1 vector, where n is the length of z. 4 | % 5 | % Input: 6 | % z: The n x 1 vector, where n is the length of z, of values whose 7 | % sigmoids need to be found 8 | % 9 | % Output: 10 | % sigVal: The value of the sigmoid 11 | 12 | expz = exp(z); 13 | sigVal = expz ./ (1 + expz); -------------------------------------------------------------------------------- /2.Bayesian Network for Genetic Inheritance/octave-core: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/2.Bayesian Network for Genetic Inheritance/octave-core -------------------------------------------------------------------------------- /2.Bayesian Network for Genetic Inheritance/pinterest.js: -------------------------------------------------------------------------------- 1 | void( 2 | ( 3 | function() 4 | { 5 | var e=document.createElement('script'); 6 | e.setAttribute('type','text/javascript'); 7 | e.setAttribute('charset','UTF-8'); 8 | e.setAttribute('src','http://assets.pinterest.com/js/pinmarklet.js?r='+Math.random()*99999999); 9 | document.body.appendChild(e) 10 | } 11 | )() 12 | ); 13 | -------------------------------------------------------------------------------- /2.Bayesian Network for Genetic Inheritance/sampleFactorList.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/2.Bayesian Network for Genetic Inheritance/sampleFactorList.mat -------------------------------------------------------------------------------- /2.Bayesian Network for Genetic Inheritance/sampleFactorListDecoupled.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/2.Bayesian Network for Genetic Inheritance/sampleFactorListDecoupled.mat -------------------------------------------------------------------------------- /2.Bayesian Network for Genetic Inheritance/sendToSamiamInfo.m: -------------------------------------------------------------------------------- 1 | % You can create a network and convert it to a format that can be viewed in 2 | % Samiam by running this script. 3 | 4 | pedigree = struct('parents', [0,0;1,3;0,0;1,3;2,6;0,0;2,6;4,9;0,0]); 5 | pedigree.names = {'Ira','James','Robin','Eva','Jason','Rene','Benjamin','Sandra','Aaron'}; 6 | alleleFreqs = [0.1; 0.9]; 7 | alleleList = {'F', 'f'}; 8 | alphaList = [0.8; 0.6; 0.1]; 9 | phenotypeList = {'CysticFibrosis', 'NoCysticFibrosis'}; 10 | positions = [520, 600, 520, 500; 650, 400, 650, 300; 390, 600, 390, 500; 260, 400, 260, 300; 780, 200, 780, 100; 1040, 400, 1040, 300; 910, 200, 910, 100; 130, 200, 130, 100; 0, 400, 0, 300]; 11 | 12 | % This will construct a Bayesian network and convert it into a file that 13 | % can be viewed in SamIam. 14 | 15 | factorList = constructGeneticNetwork(pedigree, alleleFreqs, alphaList); 16 | sendToSamiam(pedigree, factorList, alleleList, phenotypeList, positions, 'cysticFibrosisBayesNet'); 17 | -------------------------------------------------------------------------------- /2.Bayesian Network for Genetic Inheritance/sendToSamiamInfoDecoupled.m: -------------------------------------------------------------------------------- 1 | % You can create a decoupled network and convert it to a format that can be 2 | % viewed in Samiam by running this script. 3 | 4 | pedigree = struct('parents', [0,0;1,3;0,0;1,3;2,6;0,0;2,6;4,9;0,0]); 5 | pedigree.names = {'Ira','James','Robin','Eva','Jason','Rene','Benjamin','Sandra','Aaron'}; 6 | phenotypeList = {'CysticFibrosis', 'NoCysticFibrosis'}; 7 | alleleFreqsThree = [0.1; 0.7; 0.2]; 8 | alleleListThree = {'F', 'f', 'n'}; 9 | alphaListThree = [0.8; 0.6; 0.1; 0.5; 0.05; 0.01]; 10 | positionsGeneCopy = [1040, 600, 1170, 600, 1105, 500; 1300, 400, 1430, 400, 1365, 300; 780, 600, 910, 600, 845, 500; 520, 400, 650, 400, 585, 300; 1560, 200, 1690, 200, 1625, 100; 2080, 400, 2210, 400, 2145, 300; 1820, 200, 1950, 200, 1885, 100; 260, 200, 390, 200, 325, 100; 0, 400, 130, 400, 65, 300]; 11 | 12 | % This will construct a decoupled Bayesian network and convert it into a 13 | % file that can be viewed in SamIam. 14 | 15 | factorListDecoupled = constructDecoupledGeneticNetwork(pedigree, alleleFreqsThree, alphaListThree); 16 | sendToSamiamGeneCopy(pedigree, factorListDecoupled, alleleListThree, phenotypeList, positionsGeneCopy, 'cysticFibrosisBayesNetGeneCopy'); -------------------------------------------------------------------------------- /2.Bayesian Network for Genetic Inheritance/submitWeb.m: -------------------------------------------------------------------------------- 1 | % submitWeb Creates files from your code and output for web submission. 2 | % 3 | % If the submit function does not work for you, use the web-submission mechanism. 4 | % Call this function to produce a file for the part you wish to submit. Then, 5 | % submit the file to the class servers using the "Web Submission" button on the 6 | % Programming Assignments page on the course website. 7 | % 8 | 9 | function submitWeb(partId) 10 | if ~exist('partId', 'var') || isempty(partId) 11 | partId = []; 12 | end 13 | 14 | submit(partId, 1); 15 | end 16 | 17 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/228_login_data.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 3.4.3, Tue Apr 03 17:38:51 2012 India Standard Time 2 | # name: login 3 | # type: string 4 | # elements: 1 5 | # length: 18 6 | anilkaraka@live.in 7 | 8 | 9 | # name: password 10 | # type: string 11 | # elements: 1 12 | # length: 10 13 | 3Kjg8WRg6p 14 | 15 | 16 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/AssignmentToIndex.m: -------------------------------------------------------------------------------- 1 | % AssignmentToIndex Convert assignment to index. 2 | % 3 | % I = AssignmentToIndex(A, D) converts an assignment, A, over variables 4 | % with cardinality D to an index into the .val vector for a factor. 5 | % If A is a matrix then the function converts each row of A to an index. 6 | % 7 | % See also IndexToAssignment.m and SampleFactors.m 8 | % 9 | % Copyright (C) Daphne Koller, Stanford University, 2012 10 | 11 | 12 | function I = AssignmentToIndex(A, D) 13 | 14 | D = D(:)'; % ensure that D is a row vector 15 | if (any(size(A) == 1)), 16 | I = cumprod([1, D(1:end - 1)]) * (A(:) - 1) + 1; 17 | else 18 | I = sum(bsxfun(@times, A - 1, cumprod([1, D(1:end - 1)])), 2) + 1; 19 | end; 20 | 21 | end 22 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/ChooseTopSimilarityFactors.m: -------------------------------------------------------------------------------- 1 | function factors = ChooseTopSimilarityFactors (allFactors, F) 2 | % This function chooses the similarity factors with the highest similarity 3 | % out of all the possibilities. 4 | % 5 | % Input: 6 | % allFactors: An array of all the similarity factors. 7 | % F: The number of factors to select. 8 | % 9 | % Output: 10 | % factors: The F factors out of allFactors for which the similarity score 11 | % is highest. 12 | % 13 | % Hint: Recall that the similarity score for two images will be in every 14 | % factor table entry (for those two images' factor) where they are 15 | % assigned the same character value. 16 | % 17 | % Copyright (C) Daphne Koller, Stanford University, 2012 18 | 19 | % If there are fewer than F factors total, just return all of them. 20 | if (length(allFactors) <= F) 21 | factors = allFactors; 22 | return; 23 | end 24 | 25 | % Your code here: 26 | %factors = allFactors; %%% REMOVE THIS LINE 27 | l = length(allFactors); 28 | factors = repmat(struct('var', [], 'card', [], 'val', []), F, 1); 29 | values = ones(l,1); 30 | for i = 1:l 31 | values(i) = allFactors(i).val(1); 32 | end 33 | [s,ind] = sort(values,'descend'); 34 | for i=1:F 35 | factors(i) = allFactors(ind(i)); 36 | end 37 | end 38 | 39 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/ComputeAllSimilarityFactors.m: -------------------------------------------------------------------------------- 1 | function factors = ComputeAllSimilarityFactors (images, K) 2 | % This function computes all of the similarity factors for the images in 3 | % one word. 4 | % 5 | % Input: 6 | % images: An array of structs containing the 'img' value for each 7 | % character in the word. 8 | % K: The alphabet size (accessible in imageModel.K for the provided 9 | % imageModel). 10 | % 11 | % Output: 12 | % factors: Every similarity factor in the word. You should use 13 | % ComputeSimilarityFactor to compute these. 14 | % 15 | % Copyright (C) Daphne Koller, Stanford University, 2012 16 | 17 | n = length(images); 18 | nFactors = nchoosek (n, 2); 19 | 20 | factors = repmat(struct('var', [], 'card', [], 'val', []), nFactors, 1); 21 | k = 1; 22 | % Your code here: 23 | for i = 1:n-1 24 | for j = i+1:n 25 | factors(k) = ComputeSimilarityFactor(images,K,i,j); 26 | k+=1; 27 | end 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/ComputeEqualPairwiseFactors.m: -------------------------------------------------------------------------------- 1 | function factors = ComputeEqualPairwiseFactors (images, K) 2 | % This function computes the pairwise factors for one word in which every 3 | % factor value is set to be 1. 4 | % 5 | % Input: 6 | % images: An array of structs containing the 'img' value for each 7 | % character in the word. 8 | % K: The alphabet size (accessible in imageModel.K for the provided 9 | % imageModel). 10 | % 11 | % Output: 12 | % factors: The pairwise factors for this word. Every entry in the factor 13 | % vals should be 1. 14 | % 15 | % Copyright (C) Daphne Koller, Stanford University, 2012 16 | 17 | n = length(images); 18 | 19 | factors = repmat(struct('var', [], 'card', [], 'val', []), n - 1, 1); 20 | 21 | % Your code here: 22 | 23 | end 24 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/ComputeImageFactor.m: -------------------------------------------------------------------------------- 1 | function P = ComputeImageFactor (img, imgModel) 2 | % This function computes the singleton OCR factor values for a single 3 | % image. 4 | % 5 | % Input: 6 | % img: The 16x8 matrix of the image 7 | % imgModel: The provided, trained image model 8 | % 9 | % Output: 10 | % P: A K-by-1 array of the factor values for each of the K possible 11 | % character assignments to the given image 12 | % 13 | % Copyright (C) Daphne Koller, Stanford University, 2012 14 | 15 | X = img(:); 16 | N = length(X); 17 | K = imgModel.K; 18 | 19 | theta = reshape(imgModel.params(1:N*(K-1)), K-1, N); 20 | bias = reshape(imgModel.params((1+N*(K-1)):end), K-1, 1); 21 | 22 | W = [ bsxfun(@plus, theta * X, bias) ; 0 ]; 23 | W = bsxfun(@minus, W, max(W)); 24 | W = exp(W); 25 | 26 | P=bsxfun(@rdivide, W, sum(W)); 27 | 28 | 29 | end 30 | 31 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/ComputePairwiseFactors.m: -------------------------------------------------------------------------------- 1 | function factors = ComputePairwiseFactors (images, pairwiseModel, K) 2 | % This function computes the pairwise factors for one word and uses the 3 | % given pairwise model to set the factor values. 4 | % 5 | % Input: 6 | % images: An array of structs containing the 'img' value for each 7 | % character in the word. 8 | % pairwiseModel: The provided pairwise model. It is a K-by-K matrix. For 9 | % character i followed by character j, the factor value should be 10 | % pairwiseModel(i, j). 11 | % K: The alphabet size (accessible in imageModel.K for the provided 12 | % imageModel). 13 | % 14 | % Output: 15 | % factors: The pairwise factors for this word. 16 | % 17 | % Copyright (C) Daphne Koller, Stanford University, 2012 18 | 19 | n = length(images); 20 | 21 | % If there are fewer than 2 characters, return an empty factor list. 22 | if (n < 2) 23 | factors = []; 24 | return; 25 | end 26 | 27 | factors = repmat(struct('var', [], 'card', [], 'val', []), n - 1, 1); 28 | 29 | % Your code here: 30 | val = pairwiseModel(:); %to be computed 31 | 32 | for i = 1:n-1 33 | factors(i).var = [i,i+1]; 34 | factors(i).card = [K,K]; 35 | factors(i).val = val; 36 | end 37 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/ComputeSimilarityFactor.m: -------------------------------------------------------------------------------- 1 | function factor = ComputeSimilarityFactor (images, K, i, j) 2 | % This function computes the similarity factor between two character images 3 | % in one word --- which characters is given by indices i and j (a 4 | % description of how the factor should be computed is given below). 5 | % 6 | % Input: 7 | % images: A struct array of character images from one word. 8 | % K: The alphabet size. 9 | % i,j: The scope of that factor. That is, you should construct a factor 10 | % between characters i and j in the images array. 11 | % 12 | % Output: 13 | % factor: The similarity factor between these two characters. For any 14 | % assignment C_i != C_j, the factor value should be one. For any 15 | % assignment C_i == C_j, the factor value should be 16 | % ImageSimilarity(I_i, I_j) --- ie, the computed value given by 17 | % ImageSimilarity.m on the two images. 18 | % 19 | % Copyright (C) Daphne Koller, Stanford University, 2012 20 | 21 | factor = struct('var', [], 'card', [], 'val', []); 22 | 23 | % Your code here: 24 | factor.var = [i,j]; 25 | factor.card = [K,K]; 26 | factor.val = ones(K*K,1); 27 | value = ImageSimilarity(images(i).img,images(j).img); 28 | for i=1:K 29 | factor.val(AssignmentToIndex([i,i],[K,K])) = value; 30 | end 31 | end 32 | 33 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/ComputeSingletonFactors.m: -------------------------------------------------------------------------------- 1 | function factors = ComputeSingletonFactors (images, imageModel) 2 | % This function computes the single OCR factors for all of the images in a 3 | % word. 4 | % 5 | % Input: 6 | % images: An array of structs containing the 'img' value for each 7 | % character in the word. You could, for example, pass in allWords{1} to 8 | % use the first word of the provided dataset. 9 | % imageModel: The provided OCR image model. 10 | % 11 | % Output: 12 | % factors: An array of the OCR factors, one for every character in the 13 | % image. 14 | % 15 | % Hint: You will want to use ComputeImageFactor.m when computing the 'val' 16 | % entry for each factor. 17 | % 18 | % Copyright (C) Daphne Koller, Stanford University, 2012 19 | 20 | % The number of characters in the word 21 | n = length(images); 22 | 23 | % Preallocate the array of factors 24 | factors = repmat(struct('var', [], 'card', [], 'val', []), n, 1); 25 | 26 | % Your code here: 27 | for i=1:n 28 | factors(i).var = [i]; 29 | factors(i).card = imageModel.K; 30 | factors(i).val = ComputeImageFactor(images(i).img,imageModel); 31 | end 32 | 33 | end 34 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/ComputeTripletFactors.m: -------------------------------------------------------------------------------- 1 | function factors = ComputeTripletFactors (images, tripletList, K) 2 | % This function computes the triplet factor values for one word. 3 | % 4 | % Input: 5 | % images: An array of structs containing the 'img' value for each 6 | % character in the word. 7 | % tripletList: An array of the character triplets we will consider (other 8 | % factor values should be 1). tripletList(i).chars gives character 9 | % assignment, and triplistList(i).factorVal gives the value for that 10 | % entry in the factor table. 11 | % K: The alphabet size (accessible in imageModel.K for the provided 12 | % imageModel). 13 | % 14 | % Hint: Every character triple in the word will use the same 'val' table. 15 | % Consider computing that array once and then resusing for each factor. 16 | % 17 | % Copyright (C) Daphne Koller, Stanford University, 2012 18 | 19 | 20 | n = length(images); 21 | 22 | % If the word has fewer than three characters, then return an empty list. 23 | if (n < 3) 24 | factors = []; 25 | return 26 | end 27 | 28 | factors = repmat(struct('var', [], 'card', [], 'val', []), n - 2, 1); 29 | 30 | % Your code here: 31 | val = ones(K*K*K,1);% to be computed 32 | m = length(tripletList); 33 | for i = 1:m 34 | val(AssignmentToIndex(tripletList(i).chars,[K,K,K])) = tripletList(i).factorVal; 35 | end 36 | for i = 1:n-2 37 | factors(i).var = [i,i+1,i+2]; 38 | factors(i).card = [K,K,K]; 39 | factors(i).val = val; 40 | end 41 | end 42 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/ComputeWordPredictions.m: -------------------------------------------------------------------------------- 1 | function wordPredictions = ComputeWordPredictions (allWords, imageModel, pairwiseModel, tripletList) 2 | % This function computes the predicted character assignments for a list of 3 | % words. 4 | % 5 | % Input: 6 | % allWords: A cell array where allWords{i} is the struct array for the ith 7 | % word (this is the structure of the provided 'allWords' data). 8 | % imageModel: The provided image model struct. 9 | % pairwiseModel: A K-by-KwordPredictions: A cell array in which the ith entry is the array of 10 | % predicted characters for the ith word. For example, if you predict 11 | % that the 3rd word is cat, then wordPredictions{3} = [3 1 20]. 12 | % 13 | % Copyright (C) Daphne Koller, Stanford University, 2012 14 | 15 | numWords = length(allWords); 16 | wordPredictions = cell(numWords, 1); 17 | 18 | for i = 1:numWords 19 | wordPredictions{i} = RunInference(BuildOCRNetwork(allWords{i}, imageModel, pairwiseModel, tripletList)); 20 | end 21 | 22 | end 23 | 24 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/GetValueOfAssignment.m: -------------------------------------------------------------------------------- 1 | %GETVALUEOFASSIGNMENT Gets the value of a variable assignment in a factor. 2 | % 3 | % v = GETVALUEOFASSIGNMENT(F, A) returns the value of a variable assignment, 4 | % A, in factor F. The order of the variables in A are assumed to be the 5 | % same as the order in F.var. 6 | % 7 | % v = GETVALUEOFASSIGNMENT(F, A, VO) gets the value of a variable assignment, 8 | % A, in factor F. The order of the variables in A are given by the vector VO. 9 | % 10 | % See also SETVALUEOFASSIGNMENT 11 | 12 | % Copyright (C) Daphne Koller, Stanford University, 2012 13 | 14 | function v = GetValueOfAssignment(F, A, VO) 15 | 16 | if (nargin == 2), 17 | indx = AssignmentToIndex(A, F.card); 18 | else 19 | map = zeros(length(F.var), 1); 20 | for i = 1:length(F.var), 21 | map(i) = find(VO == F.var(i)); 22 | end; 23 | indx = AssignmentToIndex(A(map), F.card); 24 | end; 25 | 26 | v = F.val(indx); 27 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/ImageSimilarity.m: -------------------------------------------------------------------------------- 1 | function sim = ImageSimilarity (im1, im2) 2 | % This function computes the "similarity score" between two images. You 3 | % should use the value for the similarity factor value when the two images 4 | % are assigned the same character. 5 | % 6 | % Input: 7 | % im1, im2: Two images from the provided dataset (they should be 16x8 8 | % matrices of 0s and 1s). 9 | % 10 | % Output: 11 | % sim: The similarity score of those images. 12 | % 13 | % Copyright (C) Daphne Koller, Stanford University, 2012 14 | 15 | a = im1(:); 16 | b = im2(:); 17 | 18 | meanSim = 0.283; % Avg sim score computed over held-out data. 19 | 20 | cosDist = (a' * b) / (norm(a) * norm(b)); 21 | 22 | diff = (cosDist - meanSim) ^ 2; 23 | 24 | if (cosDist > meanSim) 25 | sim = 1 + 5*diff; 26 | else 27 | sim = 1 / (1 + 5*diff); 28 | end 29 | 30 | end 31 | 32 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/IndexToAssignment.m: -------------------------------------------------------------------------------- 1 | % IndexToAssignment Convert index to variable assignment. 2 | % 3 | % A = IndexToAssignment(I, D) converts an index, I, into the .val vector 4 | % into an assignment over variables with cardinality D. If I is a vector, 5 | % then the function produces a matrix of assignments, one assignment 6 | % per row. 7 | % 8 | % See also AssignmentToIndex.m 9 | % 10 | % Copyright (C) Daphne Koller, Stanford University, 2012 11 | 12 | function A = IndexToAssignment(I, D) 13 | 14 | D = D(:)'; % ensure that D is a row vector 15 | A = bsxfun(@mod, floor(bsxfun(@rdivide, I(:) - 1, cumprod([1, D(1:end - 1)]))), D) + 1; 16 | 17 | end 18 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/PA3Data.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/3.Markov Networks for OCR/PA3Data.mat -------------------------------------------------------------------------------- /3.Markov Networks for OCR/PA3Description.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/3.Markov Networks for OCR/PA3Description.pdf -------------------------------------------------------------------------------- /3.Markov Networks for OCR/PA3Models.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/3.Markov Networks for OCR/PA3Models.mat -------------------------------------------------------------------------------- /3.Markov Networks for OCR/PA3SampleCases.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/3.Markov Networks for OCR/PA3SampleCases.mat -------------------------------------------------------------------------------- /3.Markov Networks for OCR/PA3TestCases.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/3.Markov Networks for OCR/PA3TestCases.mat -------------------------------------------------------------------------------- /3.Markov Networks for OCR/ScoreModel.m: -------------------------------------------------------------------------------- 1 | function [charAcc, wordAcc] = ScoreModel (words, imageModel, pairwiseModel, tripletList) 2 | % This function runs the Markov network model end-to-end and computes the 3 | % per-character and per-word accuracy on provided data. 4 | % 5 | % Input: 6 | % words: A cell array where words{i} is the struct array for the ith 7 | % word (this is the structure of the provided 'allWords' data). 8 | % imageModel: The provided image model struct. 9 | % pairwiseModel: A K-by-K matrix (K is the alphabet size) where pairwiseModel(i,j) 10 | % is the factor value for the pairwise factor of character i followed by 11 | % character j. 12 | % tripletModel: The array of character triplets we will consider (along 13 | % with their corresponding factor values). 14 | % 15 | % Output: 16 | % charAcc: The percentage of all characters (across all words) correctly 17 | % identified. (Between 0 and 1) 18 | % wordAcc: The percentage of the words in which every character is 19 | % correctly identified. (Between 0 and 1) 20 | % 21 | % Copyright (C) Daphne Koller, Stanford University, 2012 22 | 23 | predictions = ComputeWordPredictions(words, imageModel, pairwiseModel, tripletList); 24 | [charAcc, wordAcc] = ScorePredictions(words, predictions, true); 25 | 26 | end 27 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/SerializeFactorsFg.m: -------------------------------------------------------------------------------- 1 | function out = SerializeFactorsFg(F) 2 | % Serializes a factor struct array into the .fg format for libDAI 3 | % http://cs.ru.nl/~jorism/libDAI/doc/fileformats.html 4 | % 5 | % To avoid incompatibilities with EOL markers, make sure you write the 6 | % string to a file using the appropriate file type ('wt' for windows, 'w' 7 | % for unix) 8 | % 9 | % Copyright (C) Daphne Koller, Stanford University, 2012 10 | 11 | 12 | lines = cell(5*numel(F) + 1, 1); 13 | 14 | lines{1} = sprintf('%d\n', numel(F)); 15 | lineIdx = 2; 16 | for i = 1:numel(F) 17 | lines{lineIdx} = sprintf('\n%d\n', numel(F(i).var)); 18 | lineIdx = lineIdx + 1; 19 | 20 | lines{lineIdx} = sprintf('%s\n', num2str(F(i).var(:)')); % ensure that we put in a row vector 21 | lineIdx = lineIdx + 1; 22 | 23 | lines{lineIdx} = sprintf('%s\n', num2str(F(i).card(:)')); % ensure that we put in a row vector 24 | lineIdx = lineIdx + 1; 25 | 26 | lines{lineIdx} = sprintf('%d\n', numel(F(i).val)); 27 | lineIdx = lineIdx + 1; 28 | 29 | % Internal storage of factor vals is already in the same indexing order 30 | % as what libDAI expects, so we don't need to convert the indices. 31 | vals = [0:(numel(F(i).val) - 1); F(i).val(:)']; 32 | lines{lineIdx} = sprintf('%d %0.8g\n', vals); 33 | lineIdx = lineIdx + 1; 34 | end 35 | 36 | out = sprintf('%s', lines{:}); 37 | end 38 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/SetValueOfAssignment.m: -------------------------------------------------------------------------------- 1 | %SETVALUEOFASSIGNMENT Sets the value of a variable assignment in a factor. 2 | % 3 | % F = SETVALUEOFASSIGNMENT(F, A, v) sets the value of a variable assignment, 4 | % A, in factor F to v. The order of the variables in A are assumed to be the 5 | % same as the order in F.var. 6 | % 7 | % F = SETVALUEOFASSIGNMENT(F, A, v, VO) sets the value of a variable 8 | % assignment, A, in factor F to v. The order of the variables in A are given 9 | % by the vector VO. 10 | % 11 | % See also GETVALUEOFASSIGNMENT 12 | 13 | % Copyright (C) Daphne Koller, Stanford University, 2012 14 | 15 | function F = SetValueOfAssignment(F, A, v, VO) 16 | 17 | if (nargin == 3), 18 | indx = AssignmentToIndex(A, F.card); 19 | else 20 | map = zeros(length(F.var), 1); 21 | for i = 1:length(F.var), 22 | map(i) = find(VO == F.var(i)); 23 | end; 24 | indx = AssignmentToIndex(A(map), F.card); 25 | end; 26 | 27 | F.val(indx) = v; 28 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/Test.txt: -------------------------------------------------------------------------------- 1 | OBJS = \ 2 | backup.o 3 | getopt.o \ 4 | getopt1.o \ 5 | inp.o \ 6 | patch.o \ 7 | pch.o \ 8 | util.o \ 9 | version.o \ 10 | vndbj 11 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/VisualizeWord.m: -------------------------------------------------------------------------------- 1 | function VisualizeWord (word) 2 | % This function allows you to visualize the characters for a single word. 3 | % 4 | % Input: 5 | % word: A struct array, each with an 'img' attribute that gives the 16x8 6 | % pixel matrix for that image. 7 | % 8 | % Copyright (C) Daphne Koller, Stanford University, 2012 9 | 10 | padding = zeros(size(word(1).img, 1), 1); 11 | 12 | totalWidth = 10 * length(word); 13 | im = zeros(16, totalWidth); 14 | for i = 1:length(word) 15 | charIm = [padding word(i).img padding]; 16 | im(:, (1 + 10 * (i-1)) + (1:10)) = charIm; 17 | end 18 | 19 | width = size(im, 2); 20 | padding = zeros(1, width); 21 | im = [padding; im; padding]; 22 | 23 | figure; 24 | colormap(gray); 25 | imagesc(1 - im); 26 | axis equal; 27 | [height, width] = size(im); 28 | axis([0 width 0 height]); 29 | 30 | end 31 | 32 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inf.log: -------------------------------------------------------------------------------- 1 | Estimate of needed memory: 1647kB 2 | Maximum memory: unlimited 3 | Exact MAP state (log score = -7.59317): 4 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/doinference-linux: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/3.Markov Networks for OCR/inference/doinference-linux -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/doinference-mac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/3.Markov Networks for OCR/inference/doinference-mac -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/doinference.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/3.Markov Networks for OCR/inference/doinference.exe -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/3.Markov Networks for OCR/inference/inference-src.zip -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/AUTHORS: -------------------------------------------------------------------------------- 1 | libDAI was written by Joris M. Mooij with the help of the following people: 2 | 3 | Martijn Leisink (laid down the foundations for the library), 4 | Frederik Eaton (contributed the Gibbs sampler, conditioned BP, fractional BP and various other improvements), 5 | Charles Vaske (contributed the parameter learning algorithms), 6 | Giuseppe Passino (contributed the findMaximum function, and various other improvements), 7 | Bastian Wemmenhove (contributed the MR algorithm), 8 | Patrick Pletscher (contributed the SWIG interface). 9 | 10 | Smaller contributions (bug fixes and miscellaneous smaller patches) have been made by: 11 | Claudio Lima, Christian Wojek, Sebastian Nowozin, Stefano Pellegrini, Ofer Meshi, 12 | Dan Preston, Peter Gober, Jiuxiang Hu, Peter Rockett, Dhruv Batra, Alexander Schwing, 13 | Alejandro Lage, Matt Dunham, Laurens van der Maaten, Jerome Maye, Priya, Hynek Urban, 14 | Avneesh Saluja, Thomas Mensink. 15 | 16 | Part of this work was part of the Interactive Collaborative Information Systems (ICIS) 17 | project, supported by the Dutch Ministry of Economic Affairs, grant BSIK03024. The 18 | Radboud University Nijmegen, The Netherlands, is one of the copyright holders. 19 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2006-2011, the libDAI authors. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are met: 5 | 6 | * Redistributions of source code must retain the above copyright notice, this 7 | list of conditions and the following disclaimer. 8 | * Redistributions in binary form must reproduce the above copyright notice, 9 | this list of conditions and the following disclaimer in the documentation 10 | and/or other materials provided with the distribution. 11 | 12 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 13 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 14 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 15 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 16 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 17 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 19 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 20 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 21 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/Makefile.ALL: -------------------------------------------------------------------------------- 1 | # This file is part of libDAI - http://www.libdai.org/ 2 | # 3 | # Copyright (c) 2006-2011, The libDAI authors. All rights reserved. 4 | # 5 | # Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. 6 | 7 | 8 | # This file can be used to configure compile time options of libDAI. 9 | # Here the user can enable or disable various approximate inference 10 | # methods and additional build targets (documentation, MatLab interface) 11 | # and specify whether to build with debug information included. 12 | # 13 | # It is platform independent and is included by Makefile. 14 | # 15 | # A boolean variable VAR can be set to true ("VAR=true") or to false ("VAR=") 16 | 17 | 18 | # COMPILATION AND BUILD FLAGS 19 | 20 | # Enable/disable various approximate inference methods 21 | WITH_BP=true 22 | WITH_FBP=true 23 | WITH_TRWBP=true 24 | WITH_MF=true 25 | WITH_HAK=true 26 | WITH_LC=true 27 | WITH_TREEEP=true 28 | WITH_JTREE=true 29 | WITH_MR=true 30 | WITH_GIBBS=true 31 | WITH_CBP=true 32 | WITH_DECMAP=true 33 | 34 | # Build with debug info? (slower but safer) 35 | DEBUG=true 36 | 37 | # Build doxygen documentation? (doxygen and TeX need to be installed) 38 | WITH_DOC= 39 | 40 | # Build MatLab interface? (MatLab needs to be installed) 41 | WITH_MATLAB= 42 | 43 | # Build image segmentation example? (CImg needs to be installed) 44 | WITH_CIMG= 45 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/examples/Makefile: -------------------------------------------------------------------------------- 1 | # Include flags 2 | INC=-I../include 3 | # Library path flags 4 | LIBS=-lgmpxx -lgmp 5 | # Location of libDAI library 6 | LIB=../lib 7 | # Compiler 8 | CC=g++ 9 | # Compiler flags 10 | CCFLAGS=-Wno-deprecated -Wall -W -Wextra -fpic -O3 -static $(INC) 11 | 12 | all : uai2010-aie-solver 13 | 14 | uai2010-aie-solver : uai2010-aie-solver.cpp $(LIB)/libdai.a 15 | $(CC) $(CCFLAGS) -o$@ $< $(LIB)/libdai.a $(LIBS) 16 | 17 | # CLEAN 18 | ######## 19 | 20 | .PHONY : clean 21 | clean : 22 | -rm uai2010-aie-solver 23 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/examples/doinference: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/3.Markov Networks for OCR/inference/inference-src/libdai/examples/doinference -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/examples/example_bipgraph.cpp: -------------------------------------------------------------------------------- 1 | /* This file is part of libDAI - http://www.libdai.org/ 2 | * 3 | * Copyright (c) 2006-2011, The libDAI authors. All rights reserved. 4 | * 5 | * Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. 6 | */ 7 | 8 | 9 | #include 10 | 11 | using namespace std; 12 | using namespace dai; 13 | 14 | int main() { 15 | // Create a list of edges 16 | vector edges; 17 | edges.reserve( 5 ); 18 | edges.push_back( Edge(0, 0) ); 19 | edges.push_back( Edge(1, 0) ); 20 | edges.push_back( Edge(2, 0) ); 21 | edges.push_back( Edge(1, 1) ); 22 | edges.push_back( Edge(2, 1) ); 23 | 24 | // Create a bipartite graph with 3 nodes of type 1, 25 | // 2 nodes of type 2 and edge list edges. 26 | BipartiteGraph G( 3, 2, edges.begin(), edges.end() ); 27 | 28 | // Display some information about G 29 | cout << "G has " << G.nrNodes1() << " nodes of type 1, " << G.nrNodes2() << " nodes of type 2 and " << G.nrEdges() << " edges." << endl << endl; 30 | 31 | // Iterate over all nodes n1 of type 1 32 | for( size_t n1 = 0; n1 < G.nrNodes1(); n1++ ) { 33 | cout << "Node " << n1 << " of type 1 has " << G.nb1(n1).size() << " neighbors:" << endl; 34 | // Iterate over all neighbors n2 of n1 35 | bforeach( const Neighbor &n2, G.nb1(n1) ) { 36 | // The n2.iter'th neighbor of n1 is n2: 37 | DAI_ASSERT( G.nb1(n1)[n2.iter] == n2 ); 38 | 39 | // The n2.dual'th neighbor of n2 is n1: 40 | DAI_ASSERT( G.nb2(n2)[n2.dual] == n1 ); 41 | 42 | // n2 can be used as an abbreviation of n2.node: 43 | DAI_ASSERT( static_cast(n2) == n2.node ); 44 | 45 | cout << " the " << n2.iter << "'th neighbor is node " << n2 << " of type 2" << endl; 46 | } 47 | cout << endl; 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/examples/example_bipgraph.out: -------------------------------------------------------------------------------- 1 | G has 3 nodes of type 1, 2 nodes of type 2 and 5 edges. 2 | 3 | Node 0 of type 1 has 1 neighbors: 4 | the 0'th neighbor is node 0 of type 2 5 | 6 | Node 1 of type 1 has 2 neighbors: 7 | the 0'th neighbor is node 0 of type 2 8 | the 1'th neighbor is node 1 of type 2 9 | 10 | Node 2 of type 1 has 2 neighbors: 11 | the 0'th neighbor is node 0 of type 2 12 | the 1'th neighbor is node 1 of type 2 13 | 14 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/examples/example_img_in1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/3.Markov Networks for OCR/inference/inference-src/libdai/examples/example_img_in1.jpg -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/examples/example_img_in2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/3.Markov Networks for OCR/inference/inference-src/libdai/examples/example_img_in2.jpg -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/examples/example_permute.out: -------------------------------------------------------------------------------- 1 | V = (x1, x2, x0) 2 | X = {x0, x1, x2} 3 | Note that the ordering of the variables in X is the canonical ordering 4 | (ascendingly according to their labels) but the ordering in V is different. 5 | 6 | The permutation between both variable orderings is sigma = (2, 0, 1), or more verbosely: 7 | sigma[0] = 2 8 | sigma[1] = 0 9 | sigma[2] = 1 10 | This means that variable V[sigma[n]] should correspond with the n'th variable in X (for n=0,...,2)...OK. 11 | 12 | The states of the variables x0,x1,x2 are, according to the ordering in V: 13 | SV: x0: x1: x2: 14 | 0 0 0 0 15 | 1 0 1 0 16 | 2 0 2 0 17 | 3 0 0 1 18 | 4 0 1 1 19 | 5 0 2 1 20 | 6 1 0 0 21 | 7 1 1 0 22 | 8 1 2 0 23 | 9 1 0 1 24 | 10 1 1 1 25 | 11 1 2 1 26 | 27 | The states of the variables x0,x1,x2 are, according to the canonical ordering in X: 28 | SX: x0: x1: x2: 29 | 0 0 0 0 30 | 1 1 0 0 31 | 2 0 1 0 32 | 3 1 1 0 33 | 4 0 2 0 34 | 5 1 2 0 35 | 6 0 0 1 36 | 7 1 0 1 37 | 8 0 1 1 38 | 9 1 1 1 39 | 10 0 2 1 40 | 11 1 2 1 41 | 42 | The permutation sigma induces the following permutation of linear indices of joint states: 43 | SV: SX: 44 | 0 0 45 | 1 2 46 | 2 4 47 | 3 6 48 | 4 8 49 | 5 10 50 | 6 1 51 | 7 3 52 | 8 5 53 | 9 7 54 | 10 9 55 | 11 11 56 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/examples/example_sprinkler.dot: -------------------------------------------------------------------------------- 1 | graph Sprinkler { 2 | node[shape=circle]; 3 | C; 4 | S; 5 | R; 6 | W; 7 | node[shape=box]; 8 | f0 [label="P (C)"]; 9 | f1 [label="P (S|C)"]; 10 | f2 [label="P (R|C)"]; 11 | f3 [label="P (W|S,R)"]; 12 | 13 | f0 -- C; 14 | f1 -- S; 15 | f1 -- C; 16 | f2 -- R; 17 | f2 -- C; 18 | f3 -- W; 19 | f3 -- S; 20 | f3 -- R; 21 | }; 22 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/examples/example_sprinkler.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/3.Markov Networks for OCR/inference/inference-src/libdai/examples/example_sprinkler.png -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/examples/example_varset.out: -------------------------------------------------------------------------------- 1 | X = {x0,x1} 2 | 3 | Var x0 has 2 states (possible values). 4 | Var x1 has 3 states. 5 | 6 | VarSet {x0,x1} has 6 states (joint assignments of its variables). 7 | 8 | States of VarSets correspond to states of their constituent Vars: 9 | state of x0: state of x1: state of X: 10 | 0 0 0 11 | 1 0 1 12 | 0 1 2 13 | 1 1 3 14 | 0 2 4 15 | 1 2 5 16 | 17 | And vice versa: 18 | state of x0: state of x1: state of X: 19 | 0 0 0 20 | 1 0 1 21 | 0 1 2 22 | 1 1 3 23 | 0 2 4 24 | 1 2 5 25 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/examples/sprinkler.em: -------------------------------------------------------------------------------- 1 | 1 2 | 3 | 4 4 | CondProbEstimation [target_dim=2,total_dim=2,pseudo_count=1] 5 | 1 6 | 0 0 7 | CondProbEstimation [target_dim=2,total_dim=4,pseudo_count=1] 8 | 1 9 | 1 2 0 10 | CondProbEstimation [target_dim=2,total_dim=4,pseudo_count=1] 11 | 1 12 | 2 1 0 13 | CondProbEstimation [target_dim=2,total_dim=8,pseudo_count=1] 14 | 1 15 | 3 3 1 2 16 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/include/dai/matlab/matlab.h: -------------------------------------------------------------------------------- 1 | /* This file is part of libDAI - http://www.libdai.org/ 2 | * 3 | * Copyright (c) 2006-2011, The libDAI authors. All rights reserved. 4 | * 5 | * Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. 6 | */ 7 | 8 | 9 | /// \file 10 | /// \brief Defines some utility functions for interfacing with MatLab 11 | 12 | 13 | #ifndef __defined_libdai_matlab_h 14 | #define __defined_libdai_matlab_h 15 | 16 | 17 | #include "mex.h" 18 | #include 19 | 20 | 21 | namespace dai { 22 | 23 | 24 | #ifdef SMALLMEM 25 | typedef int mwSize; 26 | typedef int mwIndex; 27 | #endif 28 | 29 | 30 | /// Convert vector structure to a cell vector of CPTAB-like structs 31 | mxArray *Factors2mx(const std::vector &Ps); 32 | 33 | /// Convert cell vector of CPTAB-like structs to vector 34 | std::vector mx2Factors(const mxArray *psi, long verbose); 35 | 36 | /// Convert CPTAB-like struct to Factor 37 | Factor mx2Factor(const mxArray *psi); 38 | 39 | 40 | } // end of namespace dai 41 | 42 | 43 | #endif 44 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/matlab/dai.m: -------------------------------------------------------------------------------- 1 | % [logZ,q,md,qv,qf,qmap] = dai(psi,method,opts) 2 | % 3 | % INPUT: psi = linear cell array containing the factors 4 | % (psi{i} should be a structure with a Member field 5 | % and a P field). 6 | % method = name of the method. 7 | % opts = string of options. 8 | % 9 | % OUTPUT: logZ = approximation of the logarithm of the partition sum. 10 | % q = linear cell array containing all final beliefs. 11 | % md = maxdiff (final linf-dist between new and old single node beliefs). 12 | % qv = linear cell array containing all variable beliefs. 13 | % qf = linear cell array containing all factor beliefs. 14 | % qmap = linear array containing the MAP state (only for BP,JTree). 15 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/matlab/dai_potstrength.m: -------------------------------------------------------------------------------- 1 | % N = dai_potstrength (psi, i, j) 2 | % 3 | % INPUT: psi = structure with a Member field and a P field, like a CPTAB. 4 | % i = label of a variable in psi. 5 | % j = label of another variable in psi. 6 | % 7 | % OUTPUT: N = strength of psi in direction i->j. 8 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/matlab/dai_readfg.m: -------------------------------------------------------------------------------- 1 | % [psi] = dai_readfg (filename) 2 | % 3 | % INPUT: filename = filename of a .fg file 4 | % 5 | % OUTPUT: psi = linear cell array containing the factors 6 | % (psi{i} is a structure with a Member field 7 | % and a P field, like a CPTAB). 8 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/matlab/dai_writefg.m: -------------------------------------------------------------------------------- 1 | % dai_writefg(psi,filename) 2 | % 3 | % INPUT: psi = linear cell array containing the factors 4 | % (psi{i} should be a structure with a Member field 5 | % and a P field, like a CPTAB). 6 | % filename = filename of a .fg file 7 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/scripts/makeREADME: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "libDAI - A free/open source C++ library for Discrete Approximate Inference" > README 3 | echo >> README 4 | echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" >> README 5 | echo >> README 6 | echo "Version: $DAI_VERSION" >> README 7 | echo "Date: $DAI_DATE" >> README 8 | echo "See also: http://www.libdai.org" >> README 9 | echo >> README 10 | w3m -dump doc/html/license.html | awk 'BEGIN {start=0}; $1 ~ /━/ {start=start+1; if (start<2) print $0}; $1 !~ /━/ {if (start>0 && start<2) print $0}' >> README 11 | echo >> README 12 | w3m -dump doc/html/citations.html | awk 'BEGIN {start=0}; $1 ~ /━/ {start=start+1; if (start<2) print $0}; $1 !~ /━/ {if (start>0 && start<2) print $0}' >> README 13 | w3m -dump doc/html/index.html | awk 'BEGIN {start=0}; $1 ~ /━/ {start=start+1; if (start<2) print $0}; $1 !~ /━/ {if (start>0 && start<2) print $0}' >> README 14 | w3m -dump doc/html/build.html | awk 'BEGIN {start=0}; $1 ~ /━/ {start=start+1; if (start>0 && start<5) print $0}; $1 !~ /━/ {if (start>0 && start<5) print $0}' >> README 15 | sed -i 's/━/-/g' README 16 | sed -i 's/•/*/g' README 17 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/src/exceptions.cpp: -------------------------------------------------------------------------------- 1 | /* This file is part of libDAI - http://www.libdai.org/ 2 | * 3 | * Copyright (c) 2006-2011, The libDAI authors. All rights reserved. 4 | * 5 | * Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. 6 | */ 7 | 8 | 9 | #include 10 | 11 | 12 | namespace dai { 13 | 14 | 15 | std::string Exception::ErrorStrings[NUM_ERRORS] = { 16 | "Feature not implemented", 17 | "Assertion failed", 18 | "Impossible typecast", 19 | "Requested object not found", 20 | "Requested belief not available", 21 | "Unknown ENUM value", 22 | "Unknown DAI algorithm", 23 | "Unrecognized parameter estimation method", 24 | "Unknown Property type", 25 | "Unknown Property", 26 | "Malformed Property", 27 | "Not all mandatory Properties specified", 28 | "Invalid alias", 29 | "Cannot read file", 30 | "Cannot write file", 31 | "Invalid FactorGraph file", 32 | "Invalid Evidence file", 33 | "Invalid Expectation-Maximization file", 34 | "Quantity not normalizable", 35 | "Multiple undo levels unsupported", 36 | "FactorGraph is not connected", 37 | "Internal error", 38 | "Runtime error", 39 | "Out of memory" 40 | }; 41 | 42 | 43 | } 44 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/src/matlab/dai_writefg.cpp: -------------------------------------------------------------------------------- 1 | /* This file is part of libDAI - http://www.libdai.org/ 2 | * 3 | * Copyright (c) 2006-2011, The libDAI authors. All rights reserved. 4 | * 5 | * Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. 6 | */ 7 | 8 | 9 | #include 10 | #include "mex.h" 11 | #include 12 | #include 13 | 14 | 15 | using namespace std; 16 | using namespace dai; 17 | 18 | 19 | /* Input Arguments */ 20 | 21 | #define PSI_IN prhs[0] 22 | #define FILENAME_IN prhs[1] 23 | #define NR_IN 2 24 | 25 | 26 | /* Output Arguments */ 27 | 28 | #define NR_OUT 0 29 | 30 | 31 | void mexFunction( int nlhs, mxArray * /*plhs*/[], int nrhs, const mxArray*prhs[] ) { 32 | char *filename; 33 | 34 | // Check for proper number of arguments 35 | if ((nrhs != NR_IN) || (nlhs != NR_OUT)) { 36 | mexErrMsgTxt("Usage: dai_writefg(psi,filename);\n\n" 37 | "\n" 38 | "INPUT: psi = linear cell array containing the factors\n" 39 | " (psi{i} should be a structure with a Member field\n" 40 | " and a P field, like a CPTAB).\n" 41 | " filename = filename of a .fg file\n"); 42 | } 43 | 44 | // Get input parameters 45 | vector factors = mx2Factors(PSI_IN,0); 46 | 47 | size_t buflen; 48 | buflen = mxGetN( FILENAME_IN ) + 1; 49 | filename = (char *)mxCalloc( buflen, sizeof(char) ); 50 | mxGetString( FILENAME_IN, filename, buflen ); 51 | 52 | // Construct factorgraph 53 | FactorGraph fg(factors); 54 | 55 | try { 56 | fg.WriteToFile( filename ); 57 | } catch( std::exception &e ) { 58 | mexErrMsgTxt( e.what() ); 59 | } 60 | 61 | return; 62 | } 63 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/src/varset.cpp: -------------------------------------------------------------------------------- 1 | /* This file is part of libDAI - http://www.libdai.org/ 2 | * 3 | * Copyright (c) 2006-2011, The libDAI authors. All rights reserved. 4 | * 5 | * Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. 6 | */ 7 | 8 | 9 | #include 10 | 11 | 12 | namespace dai { 13 | 14 | 15 | using namespace std; 16 | 17 | 18 | size_t calcLinearState( const VarSet &vs, const std::map &state ) { 19 | size_t prod = 1; 20 | size_t st = 0; 21 | for( VarSet::const_iterator v = vs.begin(); v != vs.end(); v++ ) { 22 | std::map::const_iterator m = state.find( *v ); 23 | if( m != state.end() ) 24 | st += prod * m->second; 25 | prod *= v->states(); 26 | } 27 | return st; 28 | } 29 | 30 | 31 | std::map calcState( const VarSet &vs, size_t linearState ) { 32 | std::map state; 33 | for( VarSet::const_iterator v = vs.begin(); v != vs.end(); v++ ) { 34 | state[*v] = linearState % v->states(); 35 | linearState /= v->states(); 36 | } 37 | DAI_ASSERT( linearState == 0 ); 38 | return state; 39 | } 40 | 41 | 42 | } // end of namespace dai 43 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/swig/Makefile: -------------------------------------------------------------------------------- 1 | # This file is part of libDAI - http://www.libdai.org/ 2 | # 3 | # Copyright (c) 2006-2011, The libDAI authors. All rights reserved. 4 | # 5 | # Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. 6 | 7 | 8 | include ../Makefile.conf 9 | 10 | .PHONY: all 11 | 12 | all: _dai.so dai.oct 13 | 14 | _dai.so: ../lib/libdai.a dai.i 15 | $(SWIG) -python -classic -c++ dai.i 16 | g++ -Wall -c -O3 -g -fPIC dai_wrap.cxx -I$(INCLUDE_PYTHON) -I$(INCLUDE_BOOST) -I../include 17 | g++ -shared dai_wrap.o -o _dai.so ../lib/libdai.a 18 | 19 | dai.oct: ../lib/libdai.a dai.i 20 | $(SWIG) -octave -c++ -o dai_wrap.cpp dai.i 21 | mkoctfile -I$(INCLUDE_BOOST) -I../include -o dai.oct dai_wrap.cpp ../lib/libdai.a 22 | 23 | .PHONY: clean 24 | 25 | clean: 26 | rm -f dai_wrap.cxx dai_wrap.cpp dai_wrap.o _dai.so dai.oct dai.py dai.pyc sprinkler.fg 27 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/swig/README: -------------------------------------------------------------------------------- 1 | This directory contains preliminary experimental SWIG wrappers for libDAI 2 | written by Patrick Pletscher. They enable usage of libDAI functionality 3 | directly from python and octave. 4 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/hoi1.fg: -------------------------------------------------------------------------------- 1 | # Factor graph made by utils/createfg 2 | # type = hoi 3 | # N = 10 4 | # M = 3 5 | # k = 3 6 | # beta = 1 7 | # seed = 12345 8 | 3 9 | 10 | 3 11 | 2 6 7 12 | 2 2 2 13 | 8 14 | 0 2.6190635748817 15 | 1 0.68885412652977 16 | 2 0.83464115122831 17 | 3 0.38475684223169 18 | 4 0.3255025197617 19 | 5 1.9692111887049 20 | 6 0.55208846715017 21 | 7 0.38595110280868 22 | 23 | 3 24 | 0 1 6 25 | 2 2 2 26 | 8 27 | 0 1.0352133626924 28 | 1 1.547478952122 29 | 2 2.3176521897449 30 | 3 1.2804190071868 31 | 4 4.9220798130027 32 | 5 2.5272557501946 33 | 6 0.83127929631575 34 | 7 0.26280563080263 35 | 36 | 3 37 | 1 2 4 38 | 2 2 2 39 | 8 40 | 0 0.2371680948972 41 | 1 3.7876163914702 42 | 2 5.3196087077347 43 | 3 0.96049191560722 44 | 4 4.8483648454236 45 | 5 0.45974506954001 46 | 6 2.1151447659773 47 | 7 0.57337737335857 48 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/hoi2.fg: -------------------------------------------------------------------------------- 1 | # Factor graph made by ../utils/createfg 2 | # type = hoi 3 | # N = 10 4 | # M = 2 5 | # k = 4 6 | # beta = 1 7 | # seed = 1234567 8 | 2 9 | 10 | 4 11 | 0 2 7 8 12 | 2 2 2 2 13 | 16 14 | 0 2.2435781084647 15 | 1 4.0907955014965 16 | 2 1.1211049952628 17 | 3 1.1924200451373 18 | 4 0.6827733720955 19 | 5 10.160805816224 20 | 6 0.67358126735639 21 | 7 0.11391058009404 22 | 8 0.48125062895484 23 | 9 0.72144061373552 24 | 10 0.42394324261549 25 | 11 1.8698790799175 26 | 12 0.751673014734 27 | 13 3.3944709353 28 | 14 3.0697466018763 29 | 15 1.3518859952286 30 | 31 | 4 32 | 1 2 6 7 33 | 2 2 2 2 34 | 16 35 | 0 0.73767149678205 36 | 1 1.0407773095719 37 | 2 3.128053583949 38 | 3 4.9605310970883 39 | 4 0.13720523936748 40 | 5 7.321038643904 41 | 6 0.32066394017723 42 | 7 1.4685064382197 43 | 8 0.17120387482343 44 | 9 0.98014607500624 45 | 10 0.10935224527934 46 | 11 0.67077127555589 47 | 12 1.1255976822923 48 | 13 2.0774604864366 49 | 14 0.36868871983683 50 | 15 0.8542593124821 51 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/hoi3.fg: -------------------------------------------------------------------------------- 1 | # Factor graph made by ../utils/createfg 2 | # type = hoi 3 | # N = 10 4 | # M = 3 5 | # k = 3 6 | # beta = 1 7 | # seed = 1234567 8 | 3 9 | 10 | 3 11 | 0 2 7 12 | 2 2 2 13 | 8 14 | 0 1.4681858088541 15 | 1 0.62677975912889 16 | 2 0.93772291212951 17 | 3 0.55789818196925 18 | 4 2.2169178835152 19 | 5 1.1158597402573 20 | 6 4.3668946864226 21 | 7 0.5386131975631 22 | 23 | 3 24 | 2 5 9 25 | 2 2 2 26 | 8 27 | 0 0.42394324261549 28 | 1 1.8698790799175 29 | 2 0.751673014734 30 | 3 3.3944709353 31 | 4 3.0697466018763 32 | 5 1.3518859952286 33 | 6 0.85108939407003 34 | 7 0.44249854609337 35 | 36 | 3 37 | 1 4 6 38 | 2 2 2 39 | 8 40 | 0 1.7323418161587 41 | 1 1.1774643424739 42 | 2 1.8134284856927 43 | 3 0.46535724537261 44 | 4 2.9520075062676 45 | 5 0.87717718658893 46 | 6 0.30844599722711 47 | 7 0.9045019040901 48 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/hoi4.fg: -------------------------------------------------------------------------------- 1 | # Factor graph made by ../utils/createfg 2 | # type = hoi 3 | # N = 10 4 | # M = 3 5 | # k = 3 6 | # beta = 1 7 | # seed = 123456 8 | 3 9 | 10 | 3 11 | 5 7 9 12 | 2 2 2 13 | 8 14 | 0 0.34695110012176 15 | 1 5.2445568426086 16 | 2 0.40353951923864 17 | 3 0.87337008674168 18 | 4 1.8484229288021 19 | 5 1.3558019751326 20 | 6 0.18957537662536 21 | 7 1.0106086348982 22 | 23 | 3 24 | 7 8 9 25 | 2 2 2 26 | 8 27 | 0 1.4346630217766 28 | 1 1.7381665310949 29 | 2 1.4305082973985 30 | 3 0.93926886261587 31 | 4 0.37893660692703 32 | 5 0.82593511761888 33 | 6 0.24322233722175 34 | 7 0.30679804692782 35 | 36 | 3 37 | 0 6 7 38 | 2 2 2 39 | 8 40 | 0 0.5061849993673 41 | 1 1.6117048972906 42 | 2 2.0693735419645 43 | 3 0.5455720871037 44 | 4 2.0160537987371 45 | 5 0.30172846959351 46 | 6 1.8962849316602 47 | 7 3.5838027075412 48 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/jtreemapbug.fg: -------------------------------------------------------------------------------- 1 | 1 2 | 3 | 2 4 | 0 1 5 | 2 2 6 | 4 7 | 0 1 8 | 1 2 9 | 2 2 10 | 3 1 11 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/maxprodbug.fg: -------------------------------------------------------------------------------- 1 | 3 2 | 3 | 2 4 | 1 2 5 | 2 2 6 | 4 7 | 0 1 8 | 1 0 9 | 2 0 10 | 3 1 11 | 12 | 2 13 | 1 3 14 | 2 2 15 | 4 16 | 0 1 17 | 1 1 18 | 2 1 19 | 3 1 20 | 21 | 2 22 | 2 3 23 | 2 2 24 | 4 25 | 0 0 26 | 1 1 27 | 2 1 28 | 3 0 29 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/testem/2var.em: -------------------------------------------------------------------------------- 1 | 1 2 | 3 | 1 4 | CondProbEstimation [target_dim=2,total_dim=4,pseudo_count=1] 5 | 1 6 | 0 1 0 7 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/testem/2var.fg: -------------------------------------------------------------------------------- 1 | 1 2 | 3 | 2 4 | 0 1 5 | 2 2 6 | 4 7 | 0 0.5 8 | 1 0.5 9 | 2 0.5 10 | 3 0.5 -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/testem/2var_data.tab: -------------------------------------------------------------------------------- 1 | 0 1 2 | 3 | 0 1 4 | 0 1 5 | 0 1 6 | 0 1 7 | 0 1 8 | 0 1 9 | 0 1 10 | 0 1 11 | 0 1 12 | 0 0 13 | 1 1 14 | 1 1 15 | 1 1 16 | 1 0 17 | 1 0 18 | 1 0 19 | 1 0 20 | 1 0 21 | 1 0 22 | 1 0 23 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/testem/3var.em: -------------------------------------------------------------------------------- 1 | 1 2 | 3 | 1 4 | CondProbEstimation [target_dim=2,total_dim=8,pseudo_count=1] 5 | 1 6 | 0 1 0 2 7 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/testem/3var.fg: -------------------------------------------------------------------------------- 1 | 1 2 | 3 | 3 4 | 0 1 2 5 | 2 2 2 6 | 8 7 | 0 0.5 8 | 1 0.5 9 | 2 0.5 10 | 3 0.5 11 | 4 0.5 12 | 5 0.5 13 | 6 0.5 14 | 7 0.5 -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/testem/hoi1_data.tab: -------------------------------------------------------------------------------- 1 | 0 1 2 4 6 7 2 | 3 | 0 0 1 1 0 4 | 1 1 1 0 5 | 0 1 0 0 0 1 6 | 0 0 0 1 0 0 7 | 1 0 0 1 0 8 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/testem/hoi1_infer_f2.em: -------------------------------------------------------------------------------- 1 | 1 2 | 3 | 1 4 | CondProbEstimation [target_dim=2,total_dim=8,pseudo_count=1] 5 | 1 6 | 2 1 2 4 7 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/testem/hoi1_share_f0_f1_f2.em: -------------------------------------------------------------------------------- 1 | 1 2 | 3 | 1 4 | CondProbEstimation [target_dim=2,total_dim=8,pseudo_count=1] 5 | 3 6 | 2 1 2 4 7 | 1 0 1 6 8 | 0 6 2 7 9 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/testem/hoi1_share_f0_f2.em: -------------------------------------------------------------------------------- 1 | 1 2 | 3 | 1 4 | CondProbEstimation [target_dim=2,total_dim=8,pseudo_count=1] 5 | 2 6 | 2 1 2 4 7 | 0 6 2 7 8 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/testem/runtests: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | TMPFILE1=`mktemp /var/tmp/testem.XXXXXX` 3 | trap 'rm -f $TMPFILE1' 0 1 15 4 | 5 | ./testem 2var.fg 2var_data.tab 2var.em > $TMPFILE1 6 | ./testem 3var.fg 2var_data.tab 3var.em >> $TMPFILE1 7 | ./testem ../hoi1.fg hoi1_data.tab hoi1_share_f0_f2.em >> $TMPFILE1 8 | ./testem ../hoi1.fg hoi1_data.tab hoi1_share_f0_f1_f2.em >> $TMPFILE1 9 | diff -s $TMPFILE1 testem.out || exit 1 10 | 11 | rm -f $TMPFILE1 12 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/testem/runtests.bat: -------------------------------------------------------------------------------- 1 | testem 2var.fg 2var_data.tab 2var.em > testem.out.tmp 2 | testem 3var.fg 2var_data.tab 3var.em >> testem.out.tmp 3 | testem ..\hoi1.fg hoi1_data.tab hoi1_share_f0_f2.em >> testem.out.tmp 4 | testem ..\hoi1.fg hoi1_data.tab hoi1_share_f0_f1_f2.em >> testem.out.tmp 5 | diff -s testem.out.tmp testem.out 6 | 7 | del testem.out.tmp 8 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/testregression: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | TMPFILE1=`mktemp /var/tmp/testfast.XXXXXX` 3 | trap 'rm -f $TMP_FILE' 0 1 15 4 | 5 | ./testall testfast.fg > $TMPFILE1 6 | diff -s $TMPFILE1 testfast.out || exit 1 7 | 8 | rm -f $TMPFILE1 9 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/testregression.bat: -------------------------------------------------------------------------------- 1 | testall testfast.fg | sed "s/\(e[+-]\)0/\1/g" > testfast.out.tmp 2 | diff -s testfast.out.tmp testfast.out 3 | del testfast.out.tmp 4 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/twofactors.fg: -------------------------------------------------------------------------------- 1 | 2 2 | 3 | 2 4 | 0 1 5 | 2 2 6 | 4 7 | 0 0.570273 8 | 1 0.702232 9 | 2 0.774333 10 | 3 3.41627 11 | 12 | 2 13 | 2 3 14 | 2 2 15 | 4 16 | 0 1.51657 17 | 1 0.920425 18 | 2 0.254792 19 | 3 1.50469 20 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/unit/var_test.cpp: -------------------------------------------------------------------------------- 1 | /* This file is part of libDAI - http://www.libdai.org/ 2 | * 3 | * Copyright (c) 2006-2011, The libDAI authors. All rights reserved. 4 | * 5 | * Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. 6 | */ 7 | 8 | 9 | #include 10 | #include 11 | 12 | 13 | using namespace dai; 14 | 15 | 16 | #define BOOST_TEST_MODULE VarTest 17 | 18 | 19 | #include 20 | 21 | 22 | BOOST_AUTO_TEST_CASE( ConstructorsTest ) { 23 | // check constructors 24 | Var x; 25 | BOOST_CHECK_EQUAL( x.label(), 0 ); 26 | BOOST_CHECK_EQUAL( x.states(), 0 ); 27 | 28 | x = Var( 0, 2 ); 29 | BOOST_CHECK_EQUAL( x.label(), 0 ); 30 | BOOST_CHECK_EQUAL( x.states(), 2 ); 31 | } 32 | 33 | BOOST_AUTO_TEST_CASE( AccMutTest ) { 34 | // check states and labels mutators 35 | Var x; 36 | x.states() = 3; 37 | BOOST_CHECK_EQUAL( x.states(), 3 ); 38 | 39 | x.label() = 5; 40 | BOOST_CHECK_EQUAL( x.label(), 5 ); 41 | } 42 | 43 | BOOST_AUTO_TEST_CASE( ComparisonTest ) { 44 | // check comparison operators 45 | Var x( 5, 3 ); 46 | Var y( 6, 3 ); 47 | Var z( 5, 3 ); 48 | BOOST_CHECK( x < y ); 49 | BOOST_CHECK( !(x < z) ); 50 | BOOST_CHECK( y > x ); 51 | BOOST_CHECK( !(z > x) ); 52 | BOOST_CHECK( x <= y ); 53 | BOOST_CHECK( x <= z ); 54 | BOOST_CHECK( !(x >= y) ); 55 | BOOST_CHECK( x >= z ); 56 | BOOST_CHECK( !(x == y) ); 57 | BOOST_CHECK( x == z ); 58 | BOOST_CHECK( x != y ); 59 | BOOST_CHECK( !(x != z) ); 60 | } 61 | 62 | BOOST_AUTO_TEST_CASE( StreamTest ) { 63 | // check stream output 64 | Var x( 5, 3 ); 65 | std::stringstream ss; 66 | ss << x; 67 | std::string s; 68 | ss >> s; 69 | BOOST_CHECK_EQUAL( s, "x5" ); 70 | } 71 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/tests/zeroes1.fg: -------------------------------------------------------------------------------- 1 | # Factor graph that contains many zeroes (useful for testing purposes) 2 | 3 3 | 4 | 1 5 | 0 6 | 2 7 | 2 8 | 0 0 9 | 1 1 10 | 11 | 2 12 | 0 1 13 | 2 2 14 | 4 15 | 0 1 16 | 1 2 17 | 2 2 18 | 3 1 19 | 20 | 2 21 | 1 2 22 | 2 2 23 | 4 24 | 0 1 25 | 1 2 26 | 2 2 27 | 3 1 28 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/utils/fg2dot.cpp: -------------------------------------------------------------------------------- 1 | /* This file is part of libDAI - http://www.libdai.org/ 2 | * 3 | * Copyright (c) 2006-2011, The libDAI authors. All rights reserved. 4 | * 5 | * Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. 6 | */ 7 | 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | 16 | using namespace dai; 17 | using namespace std; 18 | 19 | 20 | int main( int argc, char *argv[] ) { 21 | if( argc != 3 ) { 22 | // Display help message if number of command line arguments is incorrect 23 | cout << "This program is part of libDAI - http://www.libdai.org/" << endl << endl; 24 | cout << "Usage: ./fg2dot " << endl << endl; 25 | cout << "Converts a libDAI factor graph file to a GraphViz .dot file for visualization." << endl; 26 | cout << "The .dot file can be converted to .ps (PostScript) by" << endl; 27 | cout << "'neato -T ps out.dot > out.ps' or by 'dot -T ps out.dot > out.ps'" << endl << endl; 28 | return 1; 29 | } else { 30 | // Read factorgraph 31 | FactorGraph fg; 32 | char *infile = argv[1]; 33 | fg.ReadFromFile( infile ); 34 | 35 | // Open output file for writing (except if filename equals "-") 36 | ostream *os = &cout; 37 | ofstream outfile; 38 | if( string( argv[2] ) != "-" ) { 39 | outfile.open( argv[2] ); 40 | if( !outfile.is_open() ) { 41 | cerr << "Cannot open " << argv[2] << " for writing" << endl; 42 | return 1; 43 | } 44 | os = &outfile; 45 | } // else, write to cout 46 | 47 | // Write the .dot file 48 | fg.printDot( *os ); 49 | 50 | return 0; 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/inference/inference-src/libdai/utils/viewfg: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ./fg2dot $1 - | neato -T ps | gv - 3 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/pgm_login_data.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 3.4.3, Thu Apr 12 10:17:00 2012 India Standard Time 2 | # name: login 3 | # type: sq_string 4 | # elements: 1 5 | # length: 18 6 | anilkaraka@live.in 7 | 8 | 9 | # name: password 10 | # type: sq_string 11 | # elements: 1 12 | # length: 10 13 | 3Kjg8WRg6p 14 | 15 | 16 | -------------------------------------------------------------------------------- /3.Markov Networks for OCR/submitWeb.m: -------------------------------------------------------------------------------- 1 | % submitWeb Creates files from your code and output for web submission. 2 | % 3 | % If the submit function does not work for you, use the web-submission mechanism. 4 | % Call this function to produce a file for the part you wish to submit. Then, 5 | % submit the file to the class servers using the "Web Submission" button on the 6 | % Programming Assignments page on the course website. 7 | % 8 | % Copyright (C) Daphne Koller, Stanford University, 2012 9 | 10 | function submitWeb(partId) 11 | if ~exist('partId', 'var') || isempty(partId) 12 | partId = []; 13 | end 14 | 15 | submit(partId, 1); 16 | end 17 | 18 | -------------------------------------------------------------------------------- /4.Exact Inference/228_login_data.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 3.4.3, Tue Apr 03 17:38:51 2012 India Standard Time 2 | # name: login 3 | # type: string 4 | # elements: 1 5 | # length: 18 6 | anilkaraka@live.in 7 | 8 | 9 | # name: password 10 | # type: string 11 | # elements: 1 12 | # length: 10 13 | 3Kjg8WRg6p 14 | 15 | 16 | -------------------------------------------------------------------------------- /4.Exact Inference/AssignmentToIndex.m: -------------------------------------------------------------------------------- 1 | % AssignmentToIndex Convert assignment to index. 2 | % 3 | % I = AssignmentToIndex(A, D) converts an assignment, A, over variables 4 | % with cardinality D to an index into the .val vector for a factor. 5 | % If A is a matrix then the function converts each row of A to an index. 6 | % 7 | % See also IndexToAssignment.m 8 | % 9 | % Copyright (C) Daphne Koller, Stanford University, 2012 10 | 11 | 12 | function I = AssignmentToIndex(A, D) 13 | 14 | D = D(:)'; % ensure that D is a row vector 15 | if (any(size(A) == 1)), 16 | I = cumprod([1, D(1:end - 1)]) * (A(:) - 1) + 1; 17 | else 18 | I = sum(repmat(cumprod([1, D(1:end - 1)]), size(A, 1), 1) .* (A - 1), 2) + 1; 19 | end; 20 | 21 | end 22 | -------------------------------------------------------------------------------- /4.Exact Inference/ComputeExactMarginalsBP.m: -------------------------------------------------------------------------------- 1 | %COMPUTEEXACTMARGINALSBP Runs exact inference and returns the marginals 2 | %over all the variables (if isMax == 0) or the max-marginals (if isMax == 1). 3 | % 4 | % M = COMPUTEEXACTMARGINALSBP(F, E, isMax) takes a list of factors F, 5 | % evidence E, and a flag isMax, runs exact inference and returns the 6 | % final marginals for the variables in the network. If isMax is 1, then 7 | % it runs exact MAP inference, otherwise exact inference (sum-prod). 8 | % It returns an array of size equal to the number of variables in the 9 | % network where M(i) represents the ith variable and M(i).val represents 10 | % the marginals of the ith variable. 11 | % 12 | % Copyright (C) Daphne Koller, Stanford University, 2012 13 | 14 | 15 | function M = ComputeExactMarginalsBP(F, E, isMax) 16 | 17 | % initialization 18 | % you should set it to the correct value in your code 19 | M = []; 20 | vars = unique([F.var]); 21 | 22 | N = length(vars); 23 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 24 | % YOUR CODE HERE 25 | % 26 | % Implement Exact and MAP Inference. 27 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 28 | CliqTree = CreateCliqueTree(F,E); 29 | P = CliqueTreeCalibrate(CliqTree,isMax); 30 | M = repmat(struct('var',[],'card',[],'val',[]),1,N); 31 | for i = 1:N 32 | for j = 1:length(CliqTree.cliqueList) 33 | inter = intersect(vars(i),CliqTree.cliqueList(j).var); 34 | if length(inter)==1 35 | V = setdiff(CliqTree.cliqueList(j).var,vars(i)); 36 | if isMax == 0 37 | M(i) = FactorMarginalization(P.cliqueList(j),V); 38 | else 39 | M(i) = FactorMaxMarginalization(P.cliqueList(j),V); 40 | end 41 | break; 42 | end 43 | end 44 | end 45 | if isMax==0 46 | for i=1:N 47 | M(i).val = M(i).val/sum(M(i).val); 48 | end 49 | end 50 | end 51 | -------------------------------------------------------------------------------- /4.Exact Inference/ComputeJointDistribution.m: -------------------------------------------------------------------------------- 1 | %ComputeJointDistribution Computes the joint distribution defined by a set 2 | % of given factors 3 | % 4 | % Joint = ComputeJointDistribution(F) computes the joint distribution 5 | % defined by a set of given factors 6 | % 7 | % Joint is a factor that encapsulates the joint distribution given by F 8 | % F is a vector of factors (struct array) containing the factors 9 | % defining the distribution 10 | % 11 | % Copyright (C) Daphne Koller, Stanford University, 2012 12 | 13 | 14 | function Joint = ComputeJointDistribution(F) 15 | 16 | % Check for empty factor list 17 | assert(numel(F) ~= 0, 'Error: empty factor list'); 18 | 19 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 20 | % YOUR CODE HERE: 21 | % Compute the joint distribution defined by F 22 | % You may assume that you are given legal CPDs so no input checking is required. 23 | % Find the factor product of all of the factors 24 | if (length(F) == 0) 25 | % There are no factors, so create an empty factor list 26 | Joint = struct('var', [], 'card', [], 'val', []); 27 | else 28 | Joint = F(1); 29 | for i = 2:length(F) 30 | % Iterate through factors and incorporate them into the joint distribution 31 | Joint = FactorProduct(Joint, F(i)); 32 | end 33 | end 34 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 35 | end 36 | 37 | -------------------------------------------------------------------------------- /4.Exact Inference/ComputeMarginal.m: -------------------------------------------------------------------------------- 1 | %ComputeMarginal Computes the marginal over a set of given variables 2 | % M = ComputeMarginal(V, F, E) computes the marginal over variables V 3 | % in the distribution induced by the set of factors F, given evidence E 4 | % 5 | % M is a factor containing the marginal over variables V 6 | % V is a vector containing the variables in the marginal e.g. [1 2 3] for 7 | % X_1, X_2 and X_3. 8 | % F is a vector of factors (struct array) containing the factors 9 | % defining the distribution 10 | % E is an N-by-2 matrix, each row being a variable/value pair. 11 | % Variables are in the first column and values are in the second column. 12 | % 13 | % Copyright (C) Daphne Koller, Stanford University, 2012 14 | 15 | 16 | function M = ComputeMarginal(V, F, E) 17 | 18 | % Check for empty factor list 19 | assert(numel(F) ~= 0, 'Error: empty factor list'); 20 | 21 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 22 | % YOUR CODE HERE: 23 | % M should be a factor 24 | % Remember to renormalize the entries of M! 25 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 26 | F = ObserveEvidence(F, E); 27 | Joint = ComputeJointDistribution(F); 28 | Joint.val = Joint.val ./ sum(Joint.val); 29 | M = FactorMarginalization(Joint, setdiff(Joint.var, V)); 30 | %M.val = M.val ./ sum(M.val); 31 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 32 | end 33 | -------------------------------------------------------------------------------- /4.Exact Inference/DecodedMarginalsToChars.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford University, 2012 2 | 3 | function DecodedMarginalsToChars(decodedMarginals) 4 | chars = 'abcdefghijklmnopqrstuvwxyz'; 5 | fprintf('%c', chars(decodedMarginals)); 6 | fprintf('\n'); 7 | end 8 | -------------------------------------------------------------------------------- /4.Exact Inference/EliminateVar.m: -------------------------------------------------------------------------------- 1 | % Function used in production of clique trees 2 | % 3 | % Copyright (C) Daphne Koller, Stanford University, 2012 4 | 5 | function [newF C E] = EliminateVar(F, C, E, Z) 6 | 7 | useFactors = []; 8 | scope = []; 9 | 10 | for i=1:length(F) 11 | if any(F(i).var == Z) 12 | useFactors = [useFactors i]; 13 | scope = union(scope, F(i).var); 14 | end 15 | end 16 | 17 | % update edge map 18 | % These represent the induced edges for the VE graph. 19 | for i=1:length(scope) 20 | for j=1:length(scope) 21 | 22 | if i~=j 23 | E(scope(i),scope(j)) = 1; 24 | E(scope(j),scope(i)) = 1; 25 | end 26 | end 27 | end 28 | 29 | E(Z,:) = 0; 30 | E(:,Z) = 0; 31 | 32 | 33 | nonUseFactors = setdiff(1:length(F),[useFactors]); 34 | 35 | for i=1:length(nonUseFactors) 36 | newF(i) = F(nonUseFactors(i)); 37 | newmap(nonUseFactors(i)) = i; 38 | end 39 | 40 | newFactor = struct('var', [], 'card', [], 'val', []); 41 | 42 | for i=1:length(useFactors) 43 | newFactor = FactorProduct(newFactor,F(useFactors(i))); 44 | end 45 | 46 | newFactor = FactorMarginalization(newFactor,Z); 47 | newF(length(nonUseFactors)+1) = newFactor; 48 | 49 | newC = length(C.nodes)+1; 50 | C.nodes{newC} = scope; 51 | C.factorInds(newC) = length(nonUseFactors)+1; 52 | for i=1:newC-1 53 | if ismember(C.factorInds(i), useFactors) 54 | C.edges(i,newC) = 1; 55 | C.edges(newC,i) = 1; 56 | C.factorInds(i) = 0; 57 | else 58 | if C.factorInds(i) ~= 0 59 | C.factorInds(i) = newmap(C.factorInds(i)); 60 | end 61 | end 62 | end 63 | -------------------------------------------------------------------------------- /4.Exact Inference/FactorMarginalization.m: -------------------------------------------------------------------------------- 1 | % FactorMarginalization Sums given variables out of a factor. 2 | % B = FactorMarginalization(A,V) computes the factor with the variables 3 | % in V summed out. The factor data structure has the following fields: 4 | % .var Vector of variables in the factor, e.g. [1 2 3] 5 | % .card Vector of cardinalities corresponding to .var, e.g. [2 2 2] 6 | % .val Value table of size prod(.card) 7 | % 8 | % The resultant factor should have at least one variable remaining or this 9 | % function will throw an error. 10 | % 11 | % See also FactorProduct.m, IndexToAssignment.m, and AssignmentToIndex.m 12 | 13 | function B = FactorMarginalization(A, V) 14 | 15 | % Check for empty factor or variable list 16 | if (isempty(A.var) || isempty(V)), B = A; return; end; 17 | 18 | % Construct the output factor over A.var \ V (the variables in A.var that are not in V) 19 | % and mapping between variables in A and B 20 | [B.var, mapB] = setdiff(A.var, V); 21 | 22 | % Check for empty resultant factor 23 | if isempty(B.var) 24 | %error('Error: Resultant factor has empty scope'); 25 | B.var = []; 26 | B.card = []; 27 | B.val = []; 28 | return; 29 | end; 30 | 31 | % Initialize B.card and B.val 32 | B.card = A.card(mapB); 33 | B.val = zeros(1,prod(B.card)); 34 | 35 | % Compute some helper indices 36 | % These will be very useful for calculating C.val 37 | % so make sure you understand what these lines are doing 38 | assignments = IndexToAssignment(1:length(A.val), A.card); 39 | indxB = AssignmentToIndex(assignments(:, mapB), B.card); 40 | 41 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 42 | % YOUR CODE HERE 43 | % Correctly populate the factor values of B 44 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 45 | for i = 1:length(A.val), 46 | B.val(indxB(i)) = B.val(indxB(i)) + A.val(i); 47 | end; 48 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 49 | end 50 | -------------------------------------------------------------------------------- /4.Exact Inference/GetValueOfAssignment.m: -------------------------------------------------------------------------------- 1 | % GetValueOfAssignment Gets the value of a variable assignment in a factor. 2 | % 3 | % v = GetValueOfAssignment(F, A) returns the value of a variable assignment, 4 | % A, in factor F. The order of the variables in A are assumed to be the 5 | % same as the order in F.var. 6 | % 7 | % v = GetValueOfAssignment(F, A, VO) gets the value of a variable assignment, 8 | % A, in factor F. The order of the variables in A are given by the vector VO. 9 | % 10 | % See also SetValueOfAssignment.m and SampleFactors.m 11 | % 12 | % Copyright (C) Daphne Koller, Stanford University, 2012 13 | 14 | function v = GetValueOfAssignment(F, A, VO) 15 | 16 | if (nargin == 2), 17 | indx = AssignmentToIndex(A, F.card); 18 | else 19 | map = zeros(length(F.var), 1); 20 | for i = 1:length(F.var), 21 | map(i) = find(VO == F.var(i)); 22 | end; 23 | indx = AssignmentToIndex(A(map), F.card); 24 | end; 25 | 26 | v = F.val(indx); 27 | 28 | end 29 | -------------------------------------------------------------------------------- /4.Exact Inference/IndexToAssignment.m: -------------------------------------------------------------------------------- 1 | % IndexToAssignment Convert index to variable assignment. 2 | % 3 | % A = IndexToAssignment(I, D) converts an index, I, into the .val vector 4 | % into an assignment over variables with cardinality D. If I is a vector, 5 | % then the function produces a matrix of assignments, one assignment 6 | % per row. 7 | % 8 | % See also AssignmentToIndex.m and SampleFactors.m 9 | % 10 | % Copyright (C) Daphne Koller, Stanford University, 2012 11 | 12 | function A = IndexToAssignment(I, D) 13 | 14 | D = D(:)'; % ensure that D is a row vector 15 | A = mod(floor(repmat(I(:) - 1, 1, length(D)) ./ repmat(cumprod([1, D(1:end - 1)]), length(I), 1)), ... 16 | repmat(D, length(I), 1)) + 1; 17 | 18 | end 19 | -------------------------------------------------------------------------------- /4.Exact Inference/MaxDecoding.m: -------------------------------------------------------------------------------- 1 | %MAXDECODING Finds the best assignment for each variable from the marginals M 2 | %passed in. Returns A such that A(i) returns the index of the best 3 | %instantiation for variable i. 4 | % 5 | % For instance: Let's say we have two variables 1 and 2. 6 | % Marginals for 1 = [0.1, 0.3, 0.6] 7 | % Marginals for 2 = [0.92, 0.08] 8 | % A(1) = 3, A(2) = 1. 9 | % 10 | % M is a list of factors, where each factor is only over one variable. 11 | % 12 | % See also COMPUTEEXACTMARGINALSBP 13 | % 14 | % Copyright (C) Daphne Koller, Stanford University, 2012 15 | 16 | 17 | function A = MaxDecoding( M ) 18 | 19 | 20 | % initialization 21 | % you should set it to the correct value in your code 22 | A = []; 23 | A = zeros(1,length(M)); 24 | sp = 0; 25 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 26 | % YOUR CODE HERE 27 | % Compute the best assignment for variables in the network. 28 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 29 | for i = 1:length(M) 30 | [sp,A(i)] = max(M(i).val); 31 | end 32 | end 33 | 34 | -------------------------------------------------------------------------------- /4.Exact Inference/PA4Sample.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/4.Exact Inference/PA4Sample.mat -------------------------------------------------------------------------------- /4.Exact Inference/PA4Test.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/4.Exact Inference/PA4Test.mat -------------------------------------------------------------------------------- /4.Exact Inference/ProgrammingAssignment4.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/4.Exact Inference/ProgrammingAssignment4.pdf -------------------------------------------------------------------------------- /4.Exact Inference/SetValueOfAssignment.m: -------------------------------------------------------------------------------- 1 | % SetValueOfAssignment Sets the value of a variable assignment in a factor. 2 | % 3 | % F = SetValueOfAssignment(F, A, v) sets the value of a variable assignment, 4 | % A, in factor F to v. The order of the variables in A are assumed to be the 5 | % same as the order in F.var. 6 | % 7 | % F = SetValueOfAssignment(F, A, v, VO) sets the value of a variable 8 | % assignment, A, in factor F to v. The order of the variables in A are given 9 | % by the vector VO. 10 | % 11 | % Note that SetValueOfAssignment *does not modify* the factor F that is 12 | % passed into the function, but instead returns a modified factor with the 13 | % new value(s) for the specified assignment(s). This is why we have to 14 | % reassign F to the result of SetValueOfAssignment in the code snippets 15 | % shown above. 16 | % 17 | % See also GetValueOfAssignment.m and SampleFactors.m 18 | % 19 | % Copyright (C) Daphne Koller, Stanford University, 2012 20 | 21 | 22 | function F = SetValueOfAssignment(F, A, v, VO) 23 | 24 | if (nargin == 3), 25 | indx = AssignmentToIndex(A, F.card); 26 | else 27 | map = zeros(length(F.var), 1); 28 | for i = 1:length(F.var), 29 | map(i) = find(VO == F.var(i)); 30 | end; 31 | indx = AssignmentToIndex(A(map), F.card); 32 | end; 33 | 34 | F.val(indx) = v; 35 | 36 | end 37 | -------------------------------------------------------------------------------- /4.Exact Inference/pgm_login_data.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 3.4.3, Wed Apr 25 05:09:59 2012 India Standard Time 2 | # name: login 3 | # type: sq_string 4 | # elements: 1 5 | # length: 18 6 | anilkaraka@live.in 7 | 8 | 9 | # name: password 10 | # type: sq_string 11 | # elements: 1 12 | # length: 10 13 | 3Kjg8WRg6p 14 | 15 | 16 | -------------------------------------------------------------------------------- /4.Exact Inference/submitWeb.m: -------------------------------------------------------------------------------- 1 | % submitWeb Creates files from your code and output for web submission. 2 | % 3 | % If the submit function does not work for you, use the web-submission mechanism. 4 | % Call this function to produce a file for the part you wish to submit. Then, 5 | % submit the file to the class servers using the "Web Submission" button on the 6 | % Programming Assignments page on the course website. 7 | % 8 | % Copyright (C) Daphne Koller, Stanford University, 2012 9 | 10 | function submitWeb(partId) 11 | if ~exist('partId', 'var') || isempty(partId) 12 | partId = []; 13 | end 14 | 15 | submit(partId, 1); 16 | end 17 | 18 | -------------------------------------------------------------------------------- /4.Exact Inference/test.m: -------------------------------------------------------------------------------- 1 | A = struct('var', [1 2 3], 'card' ,[2 3 4], 'val' ,[ 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24]); 2 | 3 | B = FactorMaxMarginalization(A, [3]) 4 | % B.val: [19 20 21 22 23 24] 5 | 6 | C = struct('var', [1 2 3], 'card' ,[2 3 4], 'val' ,-1*[ 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24]); 7 | 8 | D = FactorMaxMarginalization(C, [3]) 9 | % D.val: [-1 -2 -3 -4 -5 -6] 10 | 11 | E = FactorMaxMarginalization(A, [1 2]) 12 | % E.val: [6 12 18 24] 13 | 14 | F = FactorMaxMarginalization(A, []) 15 | % F.val: [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24] 16 | 17 | G = FactorMaxMarginalization(A, [2]) 18 | % G.val: [5 6 11 12 17 18 23 24] 19 | 20 | H = struct('var', [1 2 3], 'card' ,[2 3 4], 'val' ,[ 1 2 3 4 5 -6 7 8 9 -10 11 12 13 14 -15 16 17 18 -19 20 21 22 23 24]); 21 | 22 | I = FactorMaxMarginalization(H, [2]) 23 | % I.val: [5 4 11 12 17 18 23 24] 24 | 25 | -------------------------------------------------------------------------------- /5.Approximate Inference/.email.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/5.Approximate Inference/.email.swp -------------------------------------------------------------------------------- /5.Approximate Inference/AssignmentToIndex.m: -------------------------------------------------------------------------------- 1 | % AssignmentToIndex Convert assignment to index. 2 | % 3 | % I = AssignmentToIndex(A, D) converts an assignment, A, over variables 4 | % with cardinality D to an index into the .val vector for a factor. 5 | % If A is a matrix then the function converts each row of A to an index. 6 | % 7 | % See also IndexToAssignment.m 8 | % Copyright (C) Daphne Koller, Stanford University, 2012 9 | 10 | function I = AssignmentToIndex(A, D) 11 | 12 | D = D(:)'; % ensure that D is a row vector 13 | if (any(size(A) == 1)), 14 | I = cumprod([1, D(1:end - 1)]) * (A(:) - 1) + 1; 15 | else 16 | I = sum(repmat(cumprod([1, D(1:end - 1)]), size(A, 1), 1) .* (A - 1), 2) + 1; 17 | end; 18 | 19 | end 20 | -------------------------------------------------------------------------------- /5.Approximate Inference/CheckConvergence.m: -------------------------------------------------------------------------------- 1 | % CHECKCONVERGENCE Ascertain whether the messages indicate that we have converged 2 | % converged = CHECKCONVERGENCE(MNEW,MOLD) compares lists of messages MNEW 3 | % and MOLD. If the values listed in any message differs by more than the 4 | % value 'thresh' then we determine that convergence has not occured and 5 | % return converged=0, otherwise we have converged and converged=1 6 | % 7 | % The 'message' data structure is an array of structs with the following 3 fields: 8 | % -.var: the variables covered in this message 9 | % -.card: the cardinalities of those variables 10 | % -.val: the value of the message w.r.t. the message's variables 11 | % 12 | % MNEW and MOLD are the message where M(i,j).val gives the values associated 13 | % with the message from cluster i to cluster j. 14 | % 15 | % Copyright (C) Daphne Koller, Stanford University, 2012 16 | 17 | function converged = CheckConvergence(mNew, mOld); 18 | converged = true; 19 | thresh = 1.0e-6; 20 | %converged should be 1 if converged, 0 otherwise. 21 | 22 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 23 | % YOUR CODE HERE 24 | % 25 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 26 | 27 | new = [mNew.val]; 28 | old = [mOld.val]; 29 | 30 | if sum(abs(new-old)>thresh)>0 31 | converged = 0; 32 | else 33 | converged = 1; 34 | end 35 | 36 | 37 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 38 | 39 | return; 40 | -------------------------------------------------------------------------------- /5.Approximate Inference/ComputeExactMarginalsBP.m: -------------------------------------------------------------------------------- 1 | %COMPUTEEXACTMARGINALSBP Runs exact inference and returns the marginals 2 | %over all the variables (if isMax == 0) or the max-marginals (if isMax == 1). 3 | % 4 | % M = COMPUTEEXACTMARGINALSBP(F, E, isMax) takes a list of factors F, 5 | % evidence E, and a flag isMax, runs exact inference and returns the 6 | % final marginals for the variables in the network. If isMax is 1, then 7 | % it runs exact MAP inference, otherwise exact inference (sum-prod). 8 | % It returns an array of size equal to the number of variables in the 9 | % network where M(i) represents the ith variable and M(i).val represents 10 | % the marginals of the ith variable. 11 | % 12 | % Copyright (C) Daphne Koller, Stanford University, 2012 13 | 14 | 15 | function M = ComputeExactMarginalsBP(F, E, isMax) 16 | 17 | % initialization 18 | % you should set it to the correct value in your code 19 | M = []; 20 | vars = unique([F.var]); 21 | 22 | N = length(vars); 23 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 24 | % YOUR CODE HERE 25 | % 26 | % Implement Exact and MAP Inference. 27 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 28 | CliqTree = CreateCliqueTree(F,E); 29 | P = CliqueTreeCalibrate(CliqTree,isMax); 30 | M = repmat(struct('var',[],'card',[],'val',[]),1,N); 31 | for i = 1:N 32 | for j = 1:length(CliqTree.cliqueList) 33 | inter = intersect(vars(i),CliqTree.cliqueList(j).var); 34 | if length(inter)==1 35 | V = setdiff(CliqTree.cliqueList(j).var,vars(i)); 36 | if isMax == 0 37 | M(i) = FactorMarginalization(P.cliqueList(j),V); 38 | else 39 | M(i) = FactorMaxMarginalization(P.cliqueList(j),V); 40 | end 41 | break; 42 | end 43 | end 44 | end 45 | if isMax==0 46 | for i=1:N 47 | M(i).val = M(i).val/sum(M(i).val); 48 | end 49 | end 50 | end 51 | -------------------------------------------------------------------------------- /5.Approximate Inference/EdgeToFactorCorrespondence.m: -------------------------------------------------------------------------------- 1 | % Returns a matrix that maps edges to a list of factors in which both ends partake 2 | % 3 | % Copyright (C) Daphne Koller, Stanford University, 2012 4 | 5 | function E2F = EdgeToFactorCorrespondence(V, F) 6 | 7 | E2F = cell(length(V), length(V)); 8 | 9 | for f = 1:length(F) 10 | for i = 1:length(F(f).var) 11 | for j = i+1:length(F(f).var) 12 | u = F(f).var(i); 13 | v = F(f).var(j); 14 | E2F{u,v} = union(E2F{u,v}, f); 15 | E2F{v,u} = union(E2F{v,u}, f); 16 | end 17 | end 18 | end 19 | -------------------------------------------------------------------------------- /5.Approximate Inference/EliminateVar.m: -------------------------------------------------------------------------------- 1 | % Function used in production of clique trees 2 | % 3 | % Copyright (C) Daphne Koller, Stanford University, 2012 4 | 5 | function [newF C E] = EliminateVar(F, C, E, Z) 6 | 7 | useFactors = []; 8 | scope = []; 9 | 10 | for i=1:length(F) 11 | if any(F(i).var == Z) 12 | useFactors = [useFactors i]; 13 | scope = union(scope, F(i).var); 14 | end 15 | end 16 | 17 | % update edge map 18 | % These represent the induced edges for the VE graph. 19 | for i=1:length(scope) 20 | for j=1:length(scope) 21 | 22 | if i~=j 23 | E(scope(i),scope(j)) = 1; 24 | E(scope(j),scope(i)) = 1; 25 | end 26 | end 27 | end 28 | 29 | E(Z,:) = 0; 30 | E(:,Z) = 0; 31 | 32 | 33 | nonUseFactors = setdiff(1:length(F),[useFactors]); 34 | 35 | for i=1:length(nonUseFactors) 36 | newF(i) = F(nonUseFactors(i)); 37 | newmap(nonUseFactors(i)) = i; 38 | end 39 | 40 | newFactor = struct('var', [], 'card', [], 'val', []); 41 | 42 | for i=1:length(useFactors) 43 | newFactor = FactorProduct(newFactor,F(useFactors(i))); 44 | end 45 | 46 | newFactor = FactorMarginalization(newFactor,Z); 47 | newF(length(nonUseFactors)+1) = newFactor; 48 | 49 | newC = length(C.nodes)+1; 50 | C.nodes{newC} = scope; 51 | C.factorInds(newC) = length(nonUseFactors)+1; 52 | for i=1:newC-1 53 | if ismember(C.factorInds(i), useFactors) 54 | C.edges(i,newC) = 1; 55 | C.edges(newC,i) = 1; 56 | C.factorInds(i) = 0; 57 | else 58 | if C.factorInds(i) ~= 0 59 | C.factorInds(i) = newmap(C.factorInds(i)); 60 | end 61 | end 62 | end 63 | -------------------------------------------------------------------------------- /5.Approximate Inference/ExtractMarginalsFromSamples.m: -------------------------------------------------------------------------------- 1 | %EXTRACTMARGINALSFROMSAMPLES 2 | % 3 | % ExtractMarginalsFromSamples takes in a probabilistic network G, a list of samples, and a set 4 | % of indices into samples that specify which samples to use in the computation of the 5 | % marginals. The marginals are then computed using this subset of samples and returned. 6 | % 7 | % Samples is a matrix where each row is the assignment to all variables in 8 | % the network (samples(i,j)=k means in sample i the jth variable takes label k) 9 | % 10 | % Copyright (C) Daphne Koller, Stanford University, 2012 11 | 12 | function M = ExtractMarginalsFromSamples(G, samples, collection_indx) 13 | 14 | collected_samples = samples(collection_indx, :); 15 | 16 | M = repmat(struct('var', 0, 'card', 0, 'val', []), length(G.names), 1); 17 | for i = 1:length(G.names) 18 | M(i).var = i; 19 | M(i).card = G.card(i); 20 | M(i).val = zeros(1, G.card(i)); 21 | end 22 | 23 | for s=1:size(collected_samples, 1) 24 | sample = collected_samples(s,:); 25 | for j=1:length(sample) 26 | M(j).val(sample(j)) = M(j).val(sample(j)) + 1/size(collected_samples,1); 27 | end 28 | end 29 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 30 | -------------------------------------------------------------------------------- /5.Approximate Inference/GetNextClusters.m: -------------------------------------------------------------------------------- 1 | %GETNEXTCLUSTERS Takes in a cluster graph and returns the indices 2 | % of the nodes between which the next message should be passed. 3 | % 4 | % [i j] = GetNextClusters(P,Messages,oldMessages,m,useSmart) 5 | % 6 | % INPUT 7 | % P - our cluster graph 8 | % Messages - the current values of all messages in P 9 | % oldMessages - the previous values of all messages in P. Thus, 10 | % oldMessages(i,j) contains the value that Messages(i,j) contained 11 | % immediately before it was updated to its current value 12 | % m - the index of the message we are passing (ie, m=0 indicates we have 13 | % passed 0 messages prior to this one. m=5 means we've passed 5 messages 14 | % useSmart - indicates whether we should use the Naive or Smart message 15 | % passing order 16 | % 17 | % 18 | % Output [i j] 19 | % i = the origin of the m+1th message 20 | % j = the destination of the m+1th message 21 | % 22 | % Copyright (C) Daphne Koller, Stanford University, 2012 23 | 24 | function [i j] = GetNextClusters(P,Messages,oldMessages,m,useSmart) 25 | 26 | if(~exist('useSmart','var')||~useSmart) 27 | [i j] = NaiveGetNextClusters(P,m); 28 | else 29 | [i j] = SmartGetNextClusters(P,Messages,oldMessages,m); 30 | end 31 | -------------------------------------------------------------------------------- /5.Approximate Inference/GetValueOfAssignment.m: -------------------------------------------------------------------------------- 1 | %GETVALUEOFASSIGNMENT Gets the value of a variable assignment in a factor. 2 | % 3 | % v = GETVALUEOFASSIGNMENT(F, A) returns the value of a variable assignment, 4 | % A, in factor F. The order of the variables in A are assumed to be the 5 | % same as the order in F.var. 6 | % 7 | % v = GETVALUEOFASSIGNMENT(F, A, VO) gets the value of a variable assignment, 8 | % A, in factor F. The order of the variables in A are given by the vector VO. 9 | % 10 | % See also SETVALUEOFASSIGNMENT 11 | % 12 | % Copyright (C) Daphne Koller, Stanford University, 2012 13 | 14 | function v = GetValueOfAssignment(F, A, VO); 15 | 16 | if (nargin == 2), 17 | indx = AssignmentToIndex(A, F.card); 18 | else 19 | map = zeros(length(F.var), 1); 20 | for i = 1:length(F.var), 21 | map(i) = find(VO == F.var(i)); 22 | end; 23 | indx = AssignmentToIndex(A(map), F.card); 24 | end; 25 | 26 | v = F.val(indx); 27 | -------------------------------------------------------------------------------- /5.Approximate Inference/GibbsTrans.m: -------------------------------------------------------------------------------- 1 | % GIBBSTRANS 2 | % 3 | % MCMC transition function that performs Gibbs sampling. 4 | % A - The current joint assignment. This should be 5 | % updated to be the next assignment 6 | % G - The network 7 | % F - List of all factors 8 | % 9 | % Copyright (C) Daphne Koller, Stanford University, 2012 10 | 11 | function A = GibbsTrans(A, G, F) 12 | 13 | for i = 1:length(G.names) 14 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 15 | % YOUR CODE HERE 16 | % For each variable in the network sample a new value for it given everything 17 | % else consistent with A. Then update A with this new value for the 18 | % variable. NOTE: Your code should call BlockLogDistribution(). 19 | % IMPORTANT: you should call the function randsample() exactly once 20 | % here, and it should be the only random function you call. 21 | % 22 | % Also, note that randsample() requires arguments in raw probability space 23 | % be sure that the arguments you pass to it meet that criteria 24 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 25 | lll = exp(BlockLogDistribution(i,G,F,A)); 26 | A(i)=randsample(G.card(i),1,true,lll); 27 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 28 | end 29 | -------------------------------------------------------------------------------- /5.Approximate Inference/IndexToAssignment.m: -------------------------------------------------------------------------------- 1 | % IndexToAssignment Convert index to variable assignment. 2 | % 3 | % A = IndexToAssignment(I, D) converts an index, I, into the .val vector 4 | % into an assignment over variables with cardinality D. If I is a vector, 5 | % then the function produces a matrix of assignments, one assignment 6 | % per row. 7 | % 8 | % See also AssignmentToIndex.m and SampleFactors.m 9 | % 10 | % Copyright (C) Daphne Koller, Stanford University, 2012 11 | 12 | function A = IndexToAssignment(I, D) 13 | 14 | D = D(:)'; % ensure that D is a row vector 15 | A = mod(floor(repmat(I(:) - 1, 1, length(D)) ./ repmat(cumprod([1, D(1:end - 1)]), length(I), 1)), ... 16 | repmat(D, length(I), 1)) + 1; 17 | 18 | end 19 | -------------------------------------------------------------------------------- /5.Approximate Inference/LogProbOfJointAssignment.m: -------------------------------------------------------------------------------- 1 | % Returns the log probability of an assignment A in a distribution defined by factors F 2 | % 3 | % Copyright (C) Daphne Koller, Stanford University, 2012 4 | 5 | function logp = LogProbOfJointAssignment(F, A) 6 | 7 | % work in log-space to prevent underflow 8 | logp = 0.0; 9 | for i = 1:length(F) 10 | logp = logp + log(GetValueOfAssignment(F(i), A, 1:length(A))); 11 | end 12 | 13 | -------------------------------------------------------------------------------- /5.Approximate Inference/MHGibbsTrans.m: -------------------------------------------------------------------------------- 1 | % MHGIBBSTRANS 2 | % 3 | % MCMC Metropolis-Hastings transition function that 4 | % utilizes the Gibbs sampling distribution for proposals. 5 | % A - The current joint assignment. This should be 6 | % updated to be the next assignment 7 | % G - The network 8 | % F - List of all factors 9 | % 10 | % Copyright (C) Daphne Koller, Stanford University, 2012 11 | 12 | function A = MHGibbsTrans(A, G, F) 13 | 14 | % Draw proposed new state from Gibbs Transition distribution 15 | A_prop = GibbsTrans(A, G, F); 16 | 17 | % Compute acceptance probability 18 | p_acceptance = 1.0; 19 | lnum = LogProbOfJointAssignment(F,A_prop); 20 | lden = LogProbOfJointAssignment(F,A); 21 | 22 | p_acceptance = min(1,exp(lnum-lden)); 23 | % Accept or reject proposal 24 | if rand() < p_acceptance 25 | A = A_prop; 26 | end 27 | -------------------------------------------------------------------------------- /5.Approximate Inference/MHUniformTrans.m: -------------------------------------------------------------------------------- 1 | % MHUNIFORMTRANS 2 | % 3 | % MCMC Metropolis-Hastings transition function that 4 | % utilizes the uniform proposal distribution. 5 | % A - The current joint assignment. This should be 6 | % updated to be the next assignment 7 | % G - The network 8 | % F - List of all factors 9 | % 10 | % Copyright (C) Daphne Koller, Stanford University, 2012 11 | 12 | function A = MHUniformTrans(A, G, F) 13 | 14 | % Draw proposed new state from uniform distribution 15 | A_prop = ceil(rand(1, length(A)) .* G.card); 16 | 17 | p_acceptance = 0.0; 18 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 19 | % YOUR CODE HERE 20 | % Compute acceptance probability 21 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 22 | lnum = LogProbOfJointAssignment(F,A_prop); 23 | lden = LogProbOfJointAssignment(F,A); 24 | 25 | p_acceptance = min(1,exp(lnum-lden)); 26 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 27 | 28 | % Accept or reject proposal 29 | if rand() < p_acceptance 30 | % disp('Accepted'); 31 | A = A_prop; 32 | end 33 | -------------------------------------------------------------------------------- /5.Approximate Inference/NaiveGetNextClusters.m: -------------------------------------------------------------------------------- 1 | %NAIVEGETNEXTCLUSTERS Takes in a node adjacency matrix and returns the indices 2 | % of the nodes between which the m+1th message should be passed. 3 | % 4 | % Output [i j] 5 | % i = the origin of the m+1th message 6 | % j = the destination of the m+1th message 7 | % 8 | % This method should iterate over the messages in increasing order where 9 | % messages are sorted in ascending ordered by their destination index and 10 | % ties are broken based on the origin index. (note: this differs from PA4's 11 | % ordering) 12 | % 13 | % Thus, if m is 0, [i j] will be the pair of clusters with the lowest j value 14 | % and (of those pairs over this j) lowest i value as this is the 'first' 15 | % element in our ordering. (this difference is because matlab is 1-indexed) 16 | % 17 | % Copyright (C) Daphne Koller, Stanford University, 2012 18 | 19 | function [i, j] = NaiveGetNextClusters(P, m) 20 | 21 | i = size(P.clusterList,1); 22 | j = size(P.clusterList,1); 23 | N = length(P.clusterList); 24 | 25 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 26 | % YOUR CODE HERE 27 | % Find the indices between which to pass a cluster 28 | % The 'find' function may be useful 29 | % 30 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 31 | dd = zeros(N,N); 32 | k = 0; 33 | for i = 1:N 34 | for j = 1:N 35 | if P.edges(i,j)==1 36 | k+=1; 37 | dd(i,j)=k; 38 | end 39 | end 40 | end 41 | 42 | 43 | ind = rem(m+1,k); 44 | 45 | if ind != 0 46 | [j,i] = find(dd==ind); 47 | else 48 | [j,i] = find(dd==k); 49 | end 50 | 51 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 52 | 53 | end 54 | 55 | -------------------------------------------------------------------------------- /5.Approximate Inference/ProblemSet.m: -------------------------------------------------------------------------------- 1 | load('exampleIOPA5.mat'); 2 | [G,F] = ConstructToyNetwork(1.0,0.1); 3 | %G = exampleINPUT.t8a1{1}; 4 | %F = exampleINPUT.t8a2{1}; 5 | E = exampleINPUT.t8a3{1}; 6 | TransName = exampleINPUT.t8a4{1}; 7 | mix_time = exampleINPUT.t8a5{1}; 8 | num_samples = exampleINPUT.t8a6{1}; 9 | sampling_interval= exampleINPUT.t8a7{1}; 10 | A0 = exampleINPUT.t8a8{1}; 11 | ExactMarginals = ComputeExactMarginalsBP(F,[],0); 12 | A1 = ones(1,16); 13 | A2 = A1*2; 14 | [M,all_samples1] = MCMCInference(G,F,E,TransName,mix_time,num_samples,sampling_interval,A1); 15 | [M,all_samples2] = MCMCInference(G,F,E,TransName,mix_time,num_samples,sampling_interval,A2); 16 | samples_list{1} = all_samples1; 17 | samples_list{2} = all_samples2; 18 | 19 | %VisualizeMCMCMarginals(samples_list,1,G.card,F,100,ExactMarginals,"woohoo"); 20 | 21 | 22 | 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /5.Approximate Inference/ProgrammingAssignment5.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/5.Approximate Inference/ProgrammingAssignment5.pdf -------------------------------------------------------------------------------- /5.Approximate Inference/SetValueOfAssignment.m: -------------------------------------------------------------------------------- 1 | %SETVALUEOFASSIGNMENT Sets the value of a variable assignment in a factor. 2 | % 3 | % F = SETVALUEOFASSIGNMENT(F, A, v) sets the value of a variable assignment, 4 | % A, in factor F to v. The order of the variables in A are assumed to be the 5 | % same as the order in F.var. 6 | % 7 | % F = SETVALUEOFASSIGNMENT(F, A, v, VO) sets the value of a variable 8 | % assignment, A, in factor F to v. The order of the variables in A are given 9 | % by the vector VO. 10 | % 11 | % See also GETVALUEOFASSIGNMENT 12 | % 13 | % Copyright (C) Daphne Koller, Stanford University, 2012 14 | 15 | function F = SetValueOfAssignment(F, A, v, VO); 16 | 17 | if (nargin == 3), 18 | indx = AssignmentToIndex(A, F.card); 19 | else 20 | map = zeros(length(F.var), 1); 21 | for i = 1:length(F.var), 22 | map(i) = find(VO == F.var(i)); 23 | end; 24 | indx = AssignmentToIndex(A(map), F.card); 25 | end; 26 | 27 | F.val(indx) = v; 28 | -------------------------------------------------------------------------------- /5.Approximate Inference/SmartGetNextClusters.m: -------------------------------------------------------------------------------- 1 | %SMARTGETNEXTCLUSTERS Takes in a cluster graph and returns the indices 2 | % of the nodes between which the next message should be passed. 3 | % 4 | % [i j] = SmartGetNextClusters(P,Messages,oldMessages,m,useSmart) 5 | % 6 | % INPUT 7 | % P - our cluster graph 8 | % Messages - the current values of all messages in P 9 | % oldMessages - the previous values of all messages in P. Thus, 10 | % oldMessages(i,j) contains the value that Messages(i,j) contained 11 | % immediately before it was updated to its current value 12 | % m - the index of the message we are passing (ie, m=0 indicates we have 13 | % passed 0 messages prior to this one. m=5 means we've passed 5 messages 14 | % 15 | % Implement any message passing routine that will converge in cases that the 16 | % naive routine would also converge. You may also change the inputs to 17 | % this function, but note you may also have to change GetNextClusters.m as 18 | % well. 19 | % 20 | % Copyright (C) Daphne Koller, Stanford University, 2012 21 | 22 | % Here we provide an implementation that might improve the convergence. 23 | % You may experiment with this and see how this changes the number of 24 | % iterations before convergence. 25 | 26 | function [i j] = SmartGetNextClusters(P,Messages,oldMessages,m) 27 | [messageFromIndx messageToIndx] = find(P.edges); 28 | m = mod(m+1,length(messageFromIndx))+1; 29 | i = messageFromIndx(m); 30 | j = messageToIndx(m); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /5.Approximate Inference/TestToy.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford University, 2012 2 | 3 | rand('seed', 1); 4 | 5 | % Construct the toy network 6 | [toy_network, toy_factors] = ConstructToyNetwork(1,0.2); 7 | toy_evidence = zeros(1, length(toy_network.names)); 8 | %toy_clique_tree = CreateCliqueTree(toy_factors, []); 9 | %toy_cluster_graph = CreateClusterGraph(toy_factors,[]); 10 | 11 | % Exact Inference 12 | ExactM = ComputeExactMarginalsBP(toy_factors, toy_evidence, 0); 13 | figure, VisualizeToyImageMarginals(toy_network, ExactM,1,'exact'); 14 | 15 | % Comment this in to run Approximate Inference on the toy network 16 | % Approximate Inference 17 | % % ApproxM = ApproxInference(toy_cluster_graph, toy_factors, toy_evidence); 18 | % figure, VisualizeToyImageMarginals(toy_network, ApproxM); 19 | 20 | 21 | 22 | % MCMC Inference 23 | transition_names = {'Gibbs', 'MHUniform', 'MHGibbs', 'MHSwendsenWang1', 'MHSwendsenWang2'}; 24 | 25 | for j = 1:length(transition_names) 26 | samples_list = {}; 27 | 28 | num_chains_to_run = 1; 29 | for i = 1:num_chains_to_run 30 | % Random Initialization 31 | A0 = ceil(rand(1, length(toy_network.names)) .* toy_network.card); 32 | 33 | % Initialization to all ones 34 | % A0 = i * ones(1, length(toy_network.names)); 35 | 36 | [M, all_samples] = ... 37 | MCMCInference(toy_network, toy_factors, toy_evidence, transition_names{j}, 0, 500, 1, A0); 38 | samples_list{i} = all_samples; 39 | figure, VisualizeToyImageMarginals(toy_network, M, i, transition_names{j}); 40 | end 41 | 42 | vis_vars = [3]; 43 | VisualizeMCMCMarginals(samples_list, vis_vars, toy_network.card(vis_vars), toy_factors, ... 44 | 500, ExactM(vis_vars),transition_names{j}); 45 | disp(['Displaying results for MCMC with transition ', transition_names{j}]); 46 | disp(['Hit enter to continue']); 47 | pause; 48 | end 49 | -------------------------------------------------------------------------------- /5.Approximate Inference/VariableToFactorCorrespondence.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford University, 2012 2 | 3 | function V2F = VariableToFactorCorrespondence(V, F) 4 | 5 | V2F = cell(length(V), 1); 6 | 7 | for f = 1:length(F) 8 | for i = 1:length(F(f).var) 9 | v = F(f).var(i); 10 | V2F{v} = union(V2F{v}, f); 11 | end 12 | end 13 | -------------------------------------------------------------------------------- /5.Approximate Inference/VisualizeToyImageMarginals.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford University, 2012 2 | 3 | function VisualizeToyImageMarginals(G, M, chain_num, tname) 4 | 5 | n = sqrt(length(G.names)); 6 | marginal_vector = []; 7 | for i = 1:length(M) 8 | marginal_vector(end+1) = M(i).val(1); 9 | end 10 | clims = [0, 1]; 11 | imagesc(reshape(marginal_vector, n, n), clims); 12 | colormap(gray); 13 | title(['Marginals for chain ' num2str(chain_num) ' ' tname]) 14 | -------------------------------------------------------------------------------- /5.Approximate Inference/email: -------------------------------------------------------------------------------- 1 | garaoanv@gmail.com 2 | -------------------------------------------------------------------------------- /5.Approximate Inference/exampleIOPA5.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/5.Approximate Inference/exampleIOPA5.mat -------------------------------------------------------------------------------- /5.Approximate Inference/octave-core: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/5.Approximate Inference/octave-core -------------------------------------------------------------------------------- /5.Approximate Inference/pgm_login_data.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 3.4.3, Mon Apr 30 21:45:10 2012 India Standard Time 2 | # name: login 3 | # type: sq_string 4 | # elements: 1 5 | # length: 18 6 | anilkaraka@live.in 7 | 8 | 9 | # name: password 10 | # type: sq_string 11 | # elements: 1 12 | # length: 10 13 | 3Kjg8WRg6p 14 | 15 | 16 | -------------------------------------------------------------------------------- /5.Approximate Inference/rand.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford University, 2012 2 | 3 | function [val] = rand(arg1,arg2); 4 | val = -1; 5 | gran = 1e6; 6 | 7 | if(nargin>0&&ischar(arg1)) 8 | if(nargin==1) 9 | arg2=1; 10 | end 11 | randi(arg1,arg2); 12 | val=0; 13 | else 14 | if(nargin==0) 15 | val = randi(1e6)/(1e6); 16 | else 17 | if(nargin==1) 18 | if(length(arg1)>1) 19 | arg2=arg1(2); 20 | arg1=arg1(1); 21 | else 22 | arg2=arg1; 23 | end 24 | end 25 | val = randi(1e6,arg1,arg2)/1e6; 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /5.Approximate Inference/randi.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford University, 2012 2 | 3 | function [num mv] = randi(arg1,arg2,arg3) 4 | 5 | num = -1; 6 | persistent x_i; 7 | persistent p1; 8 | persistent p2; 9 | if(isempty(x_i)) 10 | x_i = 1; 11 | p1 = 160481183; 12 | p2 = 179424673; 13 | end 14 | mv=p2; 15 | if(ischar(arg1)==1) 16 | if(strcmp(arg1,'seed')) 17 | if(nargin>1) 18 | x_i = arg2; 19 | num = 0; 20 | else 21 | x_i=1; 22 | num=0; 23 | end 24 | else 25 | 'Unrecognized option. The only accepted option to this random library is -seed-.' 26 | end 27 | else 28 | if(arg1>p2) 29 | 'Max too high, range cutoff at 1 million' 30 | end 31 | if(nargin>1) 32 | if(nargin==2) 33 | arg3=arg2; 34 | end 35 | num = zeros(arg2,arg3); 36 | for i=1:arg2 37 | for j = 1:arg3 38 | x_i = mod(x_i*(p1+1)+p1,p2); 39 | num(i,j) = mod(x_i,arg1)+1; 40 | end 41 | end 42 | else 43 | x_i = mod(x_i*(p1+1)+p1,p2); 44 | num=mod(x_i,arg1)+1; 45 | end 46 | end 47 | 48 | -------------------------------------------------------------------------------- /5.Approximate Inference/randsample.m: -------------------------------------------------------------------------------- 1 | %randsample(V,n,true,distribution) returns a set of n values sampled 2 | % at random from the integers 1 through V with replacement using distribution 3 | % 'distribution' 4 | % 5 | % replacing true with false causes sampling w/out replacement 6 | % omitting the distribution causes a default to the uniform distribution 7 | % 8 | % Copyright (C) Daphne Koller, Stanford University, 2012 9 | 10 | function [v] = randsample(vals,numSamp,replace,weightIncrements) 11 | 12 | vals = vals(:); 13 | if(length(vals)==1) 14 | maxval = vals; 15 | vals = 1:maxval; 16 | else 17 | maxval = length(vals); 18 | end 19 | 20 | if(exist('replace','var')~=1) 21 | replace = true; 22 | end 23 | if(exist('weightIncrements','var')~=1) 24 | weightIncrements = (1/maxval)*ones(maxval,1); 25 | weights = (1/maxval):(1/maxval):1; 26 | else 27 | weightIncrements = weightIncrements(:)/sum(weightIncrements(:)); 28 | weights = zeros(size(weightIncrements)); 29 | weights(1) = weightIncrements(1); 30 | for i = 2:length(weightIncrements) 31 | weights(i) = weightIncrements(i)+weights(i-1); 32 | end 33 | end 34 | 35 | weights = [0; weights(:)]; 36 | 37 | %now do the sampling 38 | v = []; 39 | probs = rand(numSamp,1); 40 | for i=1:numSamp 41 | curInd = find((weights(1:end-1)<=probs(i))&(weights(2:end)>=probs(i))); 42 | v(end+1)=vals(curInd); 43 | if(replace~=true) 44 | vals(curInd)=[]; 45 | weightIncrements(curInd)=[]; 46 | weightIncrements = weightIncrements(:)/sum(weightIncrements(:)); 47 | weights = zeros(size(weightIncrements)); 48 | for i = 2:length(weightIncrements) 49 | weights(i) = weightIncrements(i)+weights(i-1); 50 | end 51 | end 52 | end 53 | 54 | 55 | end 56 | -------------------------------------------------------------------------------- /5.Approximate Inference/smooth.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford University, 2012 2 | 3 | function [YY] = smooth(Y,window) 4 | if(~exist('window','var')) 5 | window =5; 6 | end 7 | if(mod(window,2)==0) 8 | window = window+1; 9 | end 10 | mid = (window+1)/2; 11 | 12 | len = length(Y); 13 | Smoother =zeros(len); 14 | for i=1:len 15 | dev = min([mid-1 min([i-1 len-i])]); 16 | % dev 17 | Smoother(i,(i-dev):(i+dev))=1; 18 | end 19 | if(size(Y,2)>size(Y,1)) 20 | Y = Y'; 21 | end 22 | 23 | col = sum(Smoother,2); 24 | YY = Smoother*Y; 25 | YY = YY./col; 26 | -------------------------------------------------------------------------------- /5.Approximate Inference/submit_input.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/5.Approximate Inference/submit_input.mat -------------------------------------------------------------------------------- /6.Decision Making/AssignmentToIndex.m: -------------------------------------------------------------------------------- 1 | % AssignmentToIndex Convert assignment to index. 2 | % 3 | % I = AssignmentToIndex(A, D) converts an assignment, A, over variables 4 | % with cardinality D to an index into the .val vector for a factor. 5 | % If A is a matrix then the function converts each row of A to an index. 6 | % 7 | % See also IndexToAssignment.m 8 | % 9 | % Copyright (C) Daphne Koller, Stanford University, 2012 10 | 11 | function I = AssignmentToIndex(A, D) 12 | 13 | D = D(:)'; % ensure that D is a row vector 14 | if (any(size(A) == 1)), 15 | I = cumprod([1, D(1:end - 1)]) * (A(:) - 1) + 1; 16 | else 17 | I = sum(repmat(cumprod([1, D(1:end - 1)]), size(A, 1), 1) .* (A - 1), 2) + 1; 18 | end; 19 | 20 | end 21 | -------------------------------------------------------------------------------- /6.Decision Making/CPDFromFactor.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford University, 2012 2 | 3 | function [CPD] = CPDFromFactor(F, Y) 4 | nvars = length(F.var); 5 | 6 | % Reorder the var, card and val fields of Fnew so that the last var is the 7 | % child variable. 8 | Fnew = F; 9 | YIndexInF = find(F.var == Y); 10 | this.card = F.card( YIndexInF ); 11 | 12 | % Parents is a dummy factor 13 | Parents.var = F.var(find(F.var ~= Y)); 14 | Parents.card = F.card(find(F.var ~= Y)); 15 | Parents.val = ones(prod(Parents.card),1); 16 | 17 | Fnew.var = [Parents.var Y]; 18 | Fnew.card = [Parents.card this.card]; 19 | for i=1:length(F.val) 20 | A = IndexToAssignment(i, F.card); 21 | y = A(YIndexInF); 22 | A( YIndexInF ) = []; 23 | A = [A y]; 24 | j = AssignmentToIndex(A, Fnew.card); 25 | Fnew.val(j) = F.val(i); 26 | end 27 | 28 | % For each assignment of Parents... 29 | for i=1:length(Parents.val) 30 | 31 | A = IndexToAssignment(i, Parents.card); 32 | SumValuesForA = 0; 33 | for j=1:this.card 34 | A_augmented = [A j]; 35 | idx = AssignmentToIndex(A_augmented, Fnew.card); 36 | SumValuesForA = SumValuesForA + Fnew.val( idx ); 37 | end 38 | 39 | for j=1:this.card 40 | A_augmented = [A j]; 41 | idx = AssignmentToIndex(A_augmented, Fnew.card); 42 | Fnew.val( idx ) = Fnew.val( idx ) / SumValuesForA; 43 | end 44 | 45 | end 46 | 47 | CPD = Fnew; -------------------------------------------------------------------------------- /6.Decision Making/CalculateExpectedUtilityFactor.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford University, 2012 2 | 3 | function EUF = CalculateExpectedUtilityFactor( I ) 4 | 5 | % Inputs: An influence diagram I with a single decision node and a single utility node. 6 | % I.RandomFactors = list of factors for each random variable. These are CPDs, with 7 | % the child variable = D.var(1) 8 | % I.DecisionFactors = factor for the decision node. 9 | % I.UtilityFactors = list of factors representing conditional utilities. 10 | % Return value: A factor over the scope of the decision rule D from I that 11 | % gives the conditional utility given each assignment for D.var 12 | % 13 | % Note - We assume I has a single decision node and utility node. 14 | EUF = []; 15 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 16 | % 17 | % YOUR CODE HERE... 18 | % 19 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 20 | 21 | facts = [I.RandomFactors,I.UtilityFactors]; 22 | elimvars = setdiff(unique([facts.var]),I.DecisionFactors(1).var); 23 | facts = VariableElimination(facts,elimvars); 24 | EUF = facts(1); 25 | for i = 2:length(facts) 26 | EUF = FactorProduct(EUF,facts(i)); 27 | end 28 | EUF = ReOrderVariables(EUF,I.DecisionFactors(1).var); 29 | 30 | end 31 | -------------------------------------------------------------------------------- /6.Decision Making/EliminateVar.m: -------------------------------------------------------------------------------- 1 | % Function used in production of clique trees 2 | % F = list of factors 3 | % E = adjacency matrix for variables 4 | % Z = variable to eliminate 5 | % 6 | % Copyright (C) Daphne Koller, Stanford University, 2012 7 | 8 | function [newF E] = EliminateVar(F, E, Z) 9 | 10 | % Index of factors to multiply (b/c they contain Z) 11 | useFactors = []; 12 | 13 | % Union of scopes of factors to multiply 14 | scope = []; 15 | 16 | for i=1:length(F) 17 | if any(F(i).var == Z) 18 | useFactors = [useFactors i]; 19 | scope = union(scope, F(i).var); 20 | end 21 | end 22 | 23 | % update edge map 24 | % These represent the induced edges for the VE graph. 25 | for i=1:length(scope) 26 | for j=1:length(scope) 27 | 28 | if i~=j 29 | E(scope(i),scope(j)) = 1; 30 | E(scope(j),scope(i)) = 1; 31 | end 32 | end 33 | end 34 | 35 | % Remove all adjacencies for the variable to be eliminated 36 | E(Z,:) = 0; 37 | E(:,Z) = 0; 38 | 39 | 40 | % nonUseFactors = list of factors (not indices!) which are passed through 41 | % in this round 42 | nonUseFactors = setdiff(1:length(F),[useFactors]); 43 | 44 | for i=1:length(nonUseFactors) 45 | 46 | % newF = list of factors we will return 47 | newF(i) = F(nonUseFactors(i)); 48 | 49 | % newmap = ? 50 | newmap(nonUseFactors(i)) = i; 51 | 52 | end 53 | 54 | % Multiply factors which involve Z -> newFactor 55 | newFactor = struct('var', [], 'card', [], 'val', []); 56 | for i=1:length(useFactors) 57 | newFactor = FactorProduct(newFactor,F(useFactors(i))); 58 | end 59 | 60 | newFactor = FactorMarginalization(newFactor,Z); 61 | newF(length(nonUseFactors)+1) = newFactor; 62 | 63 | -------------------------------------------------------------------------------- /6.Decision Making/FullI.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/6.Decision Making/FullI.mat -------------------------------------------------------------------------------- /6.Decision Making/GetValueOfAssignment.m: -------------------------------------------------------------------------------- 1 | % GetValueOfAssignment Gets the value of a variable assignment in a factor. 2 | % 3 | % v = GetValueOfAssignment(F, A) returns the value of a variable assignment, 4 | % A, in factor F. The order of the variables in A are assumed to be the 5 | % same as the order in F.var. 6 | % 7 | % v = GetValueOfAssignment(F, A, VO) gets the value of a variable assignment, 8 | % A, in factor F. The order of the variables in A are given by the vector VO. 9 | % 10 | % See also SetValueOfAssignment.m and SampleFactors.m 11 | % 12 | % Copyright (C) Daphne Koller, Stanford University, 2012 13 | 14 | function v = GetValueOfAssignment(F, A, VO) 15 | 16 | if (nargin == 2), 17 | indx = AssignmentToIndex(A, F.card); 18 | else 19 | map = zeros(length(F.var), 1); 20 | for i = 1:length(F.var), 21 | map(i) = find(VO == F.var(i)); 22 | end; 23 | indx = AssignmentToIndex(A(map), F.card); 24 | end; 25 | 26 | v = F.val(indx); 27 | 28 | end 29 | -------------------------------------------------------------------------------- /6.Decision Making/IndexToAssignment.m: -------------------------------------------------------------------------------- 1 | % IndexToAssignment Convert index to variable assignment. 2 | % 3 | % A = IndexToAssignment(I, D) converts an index, I, into the .val vector 4 | % into an assignment over variables with cardinality D. If I is a vector, 5 | % then the function produces a matrix of assignments, one assignment 6 | % per row. 7 | % 8 | % See also AssignmentToIndex.m and SampleFactors.m 9 | % 10 | % Copyright (C) Daphne Koller, Stanford University, 2012 11 | 12 | function A = IndexToAssignment(I, D) 13 | 14 | D = D(:)'; % ensure that D is a row vector 15 | A = mod(floor(repmat(I(:) - 1, 1, length(D)) ./ repmat(cumprod([1, D(1:end - 1)]), length(I), 1)), ... 16 | repmat(D, length(I), 1)) + 1; 17 | 18 | end 19 | -------------------------------------------------------------------------------- /6.Decision Making/MultipleUtilityI.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/6.Decision Making/MultipleUtilityI.mat -------------------------------------------------------------------------------- /6.Decision Making/NormalizeCPDFactors.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford University, 2012 2 | 3 | function [F] = NormalizeCPDFactors(F) 4 | 5 | NumFactors = length(F); 6 | for i=1:NumFactors 7 | 8 | f = F(i); 9 | dummy.var = f.var(2:end); 10 | dummy.card = f.card(2:end); 11 | dummy.val = zeros(1,prod(dummy.card)); 12 | 13 | % Now for each joint assignment to parents, renormalize the 14 | % values for that joint assignment to sum to 1. 15 | 16 | for a=1:length(dummy.val) 17 | A = IndexToAssignment(a, dummy.card); 18 | Indices = []; 19 | for d=1:f.card(end) 20 | Indices = [Indices AssignmentToIndex([d A], f.card);]; 21 | end 22 | if sum(f.val(Indices)) == 0 23 | % Set f.val(Indices) to 0 24 | f.val(Indices) = 0; 25 | else 26 | f.val(Indices) = f.val(Indices) / sum(f.val(Indices)); 27 | end 28 | end 29 | 30 | f.val(find(isnan(f.val))) = 0; 31 | 32 | F(i) = f; 33 | 34 | end 35 | 36 | -------------------------------------------------------------------------------- /6.Decision Making/NormalizeFactorValues.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford University, 2012 2 | 3 | function F = NormalizeFactorValues( F ) 4 | 5 | for i=1:length(F) 6 | ThisFactor = F(i); 7 | ThisFactor.val = ThisFactor.val / sum(ThisFactor.val); 8 | F(i) = ThisFactor; 9 | end 10 | -------------------------------------------------------------------------------- /6.Decision Making/OptimizeLinearExpectations.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford University, 2012 2 | 3 | function [MEU OptimalDecisionRule] = OptimizeLinearExpectations( I ) 4 | % Inputs: An influence diagram I with a single decision node and one or more utility nodes. 5 | % I.RandomFactors = list of factors for each random variable. These are CPDs, with 6 | % the child variable = D.var(1) 7 | % I.DecisionFactors = factor for the decision node. 8 | % I.UtilityFactors = list of factors representing conditional utilities. 9 | % Return value: the maximum expected utility of I and an optimal decision rule 10 | % (represented again as a factor) that yields that expected utility. 11 | % You may assume that there is a unique optimal decision. 12 | % 13 | % This is similar to OptimizeMEU except that we will have to account for 14 | % multiple utility factors. We will do this by calculating the expected 15 | % utility factors and combining them, then optimizing with respect to that 16 | % combined expected utility factor. 17 | MEU = []; 18 | OptimalDecisionRule = []; 19 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 20 | % 21 | % YOUR CODE HERE 22 | % 23 | % A decision rule for D assigns, for each joint assignment to D's parents, 24 | % probability 1 to the best option from the EUF for that joint assignment 25 | % to D's parents, and 0 otherwise. Note that when D has no parents, it is 26 | % a degenerate case we can handle separately for convenience. 27 | % 28 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 29 | 30 | Uti = I.UtilityFactors(1); 31 | for i = 2:length(I.UtilityFactors) 32 | Uti = FactorSum(Uti,I.UtilityFactors(i)); 33 | end 34 | I.UtilityFactors = Uti; 35 | [MEU OptimalDecisionRule] = OptimizeMEU(I); 36 | 37 | 38 | end 39 | -------------------------------------------------------------------------------- /6.Decision Making/OptimizeMEU.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford University, 2012 2 | 3 | function [MEU OptimalDecisionRule] = OptimizeMEU( I ) 4 | 5 | % Inputs: An influence diagram I with a single decision node and a single utility node. 6 | % I.RandomFactors = list of factors for each random variable. These are CPDs, with 7 | % the child variable = D.var(1) 8 | % I.DecisionFactors = factor for the decision node. 9 | % I.UtilityFactors = list of factors representing conditional utilities. 10 | % Return value: the maximum expected utility of I and an optimal decision rule 11 | % (represented again as a factor) that yields that expected utility. 12 | 13 | % We assume I has a single decision node. 14 | % You may assume that there is a unique optimal decision. 15 | D = I.DecisionFactors(1); 16 | DF = CalculateExpectedUtilityFactor(I); 17 | MEU = 0; 18 | D.val = D.val*0; 19 | ca=D.card(1); 20 | l = length(D.val)/ca; 21 | for i = 1:l 22 | start = i*ca-ca+1; 23 | endd = i*ca; 24 | [x,y] = max(DF.val(start:endd)); 25 | MEU+=x; 26 | D.val(start+y-1) = 1; 27 | end 28 | OptimalDecisionRule = D; 29 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 30 | % 31 | % YOUR CODE HERE... 32 | % 33 | % Some other information that might be useful for some implementations 34 | % (note that there are multiple ways to implement this): 35 | % 1. It is probably easiest to think of two cases - D has parents and D 36 | % has no parents. 37 | % 2. You may find the Matlab/Octave function setdiff useful. 38 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 39 | 40 | 41 | end 42 | -------------------------------------------------------------------------------- /6.Decision Making/OptimizeWithJointUtility.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford University, 2012 2 | 3 | function [MEU OptimalDecisionRule] = OptimizeWithJointUtility( I ) 4 | % Inputs: An influence diagram I with a single decision node and one or more utility nodes. 5 | % I.RandomFactors = list of factors for each random variable. These are CPDs, with 6 | % the child variable = D.var(1) 7 | % I.DecisionFactors = factor for the decision node. 8 | % I.UtilityFactors = list of factors representing conditional utilities. 9 | % Return value: the maximum expected utility of I and an optimal decision rule 10 | % (represented again as a factor) that yields that expected utility. 11 | % You may assume that there is a unique optimal decision. 12 | 13 | % This is similar to OptimizeMEU except that we must find a way to 14 | % combine the multiple utility factors. Note: This can be done with very 15 | % little code. 16 | 17 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 18 | % 19 | % YOUR CODE HERE 20 | % 21 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 22 | Uti = I.UtilityFactors(1); 23 | for i = 2:length(I.UtilityFactors) 24 | Uti = FactorSum(Uti,I.UtilityFactors(i)); 25 | end 26 | I.UtilityFactors = Uti; 27 | [MEU OptimalDecisionRule] = OptimizeMEU(I); 28 | 29 | end 30 | -------------------------------------------------------------------------------- /6.Decision Making/PrintFactor.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford University, 2012 2 | 3 | function [] = PrintFactor(F) 4 | % Pretty print the factor F. 5 | % The first row lists the variables and subsequent rows are 6 | % the joint assignment and their associated factor value in 7 | % the last column. 8 | 9 | for i=1:length(F.var) 10 | fprintf(1, '%d\t', F.var(i)); 11 | end 12 | fprintf(1, '\n'); 13 | 14 | for i=1:length(F.val) 15 | A = IndexToAssignment(i, F.card); 16 | for j=1:length(A) 17 | fprintf(1, '%d\t', A(j)); 18 | end 19 | fprintf(1, '%f\n', F.val(i)); 20 | end 21 | 22 | 23 | end -------------------------------------------------------------------------------- /6.Decision Making/ProgrammingAssignment6.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/6.Decision Making/ProgrammingAssignment6.pdf -------------------------------------------------------------------------------- /6.Decision Making/ReOrderVariables.m: -------------------------------------------------------------------------------- 1 | function FOut = ReOrderVariables( FIn, NewVarOrder ) 2 | 3 | %Check that new variable order is OK: 4 | [isMem MAP] = ismember( NewVarOrder, FIn.var ); 5 | assert( length(FIn.var) == length(NewVarOrder), 'Different # variables!' ); 6 | assert( all(isMem), 'Some of new variables not in factor!' ); 7 | assert( length(NewVarOrder) == length(unique(NewVarOrder)), 'Some variables repeated!'); 8 | 9 | %Set up variables and cardinality; preallocate 10 | FOut.var = NewVarOrder; 11 | FOut.card = FIn.card( MAP ); 12 | FOut.val = ones(1,prod(FIn.card)); 13 | 14 | for i=1:prod(FIn.card) 15 | %Loop through every value in FIn, and copy to FOut as approprate 16 | OldAssignment = IndexToAssignment(i, FIn.card); 17 | NewAssignment = OldAssignment( MAP ); 18 | 19 | newIdx = AssignmentToIndex( NewAssignment, FOut.card ); 20 | FOut.val( newIdx ) = FIn.val( i ); 21 | end 22 | end 23 | 24 | -------------------------------------------------------------------------------- /6.Decision Making/SetValueOfAssignment.m: -------------------------------------------------------------------------------- 1 | % SetValueOfAssignment Sets the value of a variable assignment in a factor. 2 | % 3 | % F = SetValueOfAssignment(F, A, v) sets the value of a variable assignment, 4 | % A, in factor F to v. The order of the variables in A are assumed to be the 5 | % same as the order in F.var. 6 | % 7 | % F = SetValueOfAssignment(F, A, v, VO) sets the value of a variable 8 | % assignment, A, in factor F to v. The order of the variables in A are given 9 | % by the vector VO. 10 | % 11 | % Note that SetValueOfAssignment *does not modify* the factor F that is 12 | % passed into the function, but instead returns a modified factor with the 13 | % new value(s) for the specified assignment(s). This is why we have to 14 | % reassign F to the result of SetValueOfAssignment in the code snippets 15 | % shown above. 16 | % 17 | % See also GetValueOfAssignment.m and SampleFactors.m 18 | % 19 | % Copyright (C) Daphne Koller, Stanford University, 2012 20 | 21 | function F = SetValueOfAssignment(F, A, v, VO) 22 | 23 | if (nargin == 3), 24 | indx = AssignmentToIndex(A, F.card); 25 | else 26 | map = zeros(length(F.var), 1); 27 | for i = 1:length(F.var), 28 | map(i) = find(VO == F.var(i)); 29 | end; 30 | indx = AssignmentToIndex(A(map), F.card); 31 | end; 32 | 33 | F.val(indx) = v; 34 | 35 | end 36 | -------------------------------------------------------------------------------- /6.Decision Making/SimpleCalcExpectedUtility.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford University, 2012 2 | 3 | function EU = SimpleCalcExpectedUtility(I) 4 | 5 | % Inputs: An influence diagram, I (as described in the writeup). 6 | % I.RandomFactors = list of factors for each random variable. These are CPDs, with 7 | % the child variable = D.var(1) 8 | % I.DecisionFactors = factor for the decision node. 9 | % I.UtilityFactors = list of factors representing conditional utilities. 10 | % Return Value: the expected utility of I 11 | % Given a fully instantiated influence diagram with a single utility node and decision node, 12 | % calculate and return the expected utility. Note - assumes that the decision rule for the 13 | % decision node is fully assigned. 14 | 15 | % In this function, we assume there is only one utility node. 16 | F = [I.RandomFactors I.DecisionFactors]; 17 | U = I.UtilityFactors(1); 18 | EU = []; 19 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 20 | % 21 | % YOUR CODE HERE 22 | % 23 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 24 | Fnew = VariableElimination(F,setdiff(unique([F.var]),U.var)); 25 | %Fnew = F; 26 | out = Fnew(1); 27 | for i = 2:length(Fnew) 28 | out = FactorProduct(out,Fnew(i)); 29 | end 30 | 31 | hha = U; 32 | hha.val = ones(1,length(U.val)); 33 | out = FactorProduct(hha,out); 34 | U = FactorProduct(hha,U); 35 | %out = FactorMarginalization(out,setdiff(unique([F.var]),U.var)); 36 | 37 | EU = [sum(out.val.*U.val)]; 38 | 39 | 40 | 41 | end 42 | -------------------------------------------------------------------------------- /6.Decision Making/SimpleOptimizeMEU.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford University, 2012 2 | 3 | function [MEU OptimalDecisionRule] = SimpleOptimizeMEU(I) 4 | 5 | % We assume there is only one decision rule in this function. 6 | D = I.DecisionFactors(1); 7 | 8 | PossibleDecisionRules = EnumerateDecisionRules(D); 9 | 10 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 11 | % 12 | % YOUR CODE HERE 13 | % 1. You must find which of the decision rules you have enumerated has the 14 | % highest expected utility. You should use your implementation of 15 | % SimpleCalcExpectedUtility from P1. Set the values of MEU and OptimalDecisionRule 16 | % to the best achieved expected utility and the corresponding decision 17 | % rule respectively. 18 | % 19 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 20 | 21 | 22 | end 23 | 24 | 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /6.Decision Making/TestI0.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/6.Decision Making/TestI0.mat -------------------------------------------------------------------------------- /6.Decision Making/VariableElimination.m: -------------------------------------------------------------------------------- 1 | % VariableElimination takes in a list of factors F and a list of variables to eliminate 2 | % and returns the resulting factor after running sum-product to eliminate 3 | % the given variables. 4 | % 5 | % Fnew = VariableElimination(F, Z) 6 | % F = list of factors 7 | % Z = list of variables to eliminate 8 | % 9 | % Copyright (C) Daphne Koller, Stanford University, 2012 10 | 11 | function Fnew = VariableElimination(F, Z) 12 | 13 | % List of all variables 14 | V = unique([F(:).var]); 15 | 16 | % Setting up the adjacency matrix. 17 | edges = zeros(length(V)); 18 | 19 | for i = 1:length(F) 20 | for j = 1:length(F(i).var) 21 | for k = 1:length(F(i).var) 22 | edges(F(i).var(j), F(i).var(k)) = 1; 23 | end 24 | end 25 | end 26 | 27 | variablesConsidered = 0; 28 | 29 | while variablesConsidered < length(Z) 30 | 31 | % Using Min-Neighbors where you prefer to eliminate the variable that has 32 | % the smallest number of edges connected to it. 33 | % Everytime you enter the loop, you look at the state of the graph and 34 | % pick the variable to be eliminated. 35 | bestVariable = 0; 36 | bestScore = inf; 37 | for i=1:length(Z) 38 | idx = Z(i); 39 | score = sum(edges(idx,:)); 40 | if score > 0 && score < bestScore 41 | bestScore = score; 42 | bestVariable = idx; 43 | end 44 | end 45 | 46 | variablesConsidered = variablesConsidered + 1; 47 | [F, edges] = EliminateVar(F, edges, bestVariable); 48 | 49 | end 50 | 51 | Fnew = F; 52 | 53 | -------------------------------------------------------------------------------- /6.Decision Making/pgm_login_data.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 3.4.3, Thu May 03 08:40:29 2012 India Standard Time 2 | # name: login 3 | # type: sq_string 4 | # elements: 1 5 | # length: 18 6 | anilkaraka@live.in 7 | 8 | 9 | # name: password 10 | # type: sq_string 11 | # elements: 1 12 | # length: 10 13 | 3Kjg8WRg6p 14 | 15 | 16 | -------------------------------------------------------------------------------- /6.Decision Making/submitWeb.m: -------------------------------------------------------------------------------- 1 | % submitWeb Creates files from your code and output for web submission. 2 | % 3 | % If the submit function does not work for you, use the web-submission mechanism. 4 | % Call this function to produce a file for the part you wish to submit. Then, 5 | % submit the file to the class servers using the "Web Submission" button on the 6 | % Programming Assignments page on the course website. 7 | % 8 | % Copyright (C) Daphne Koller, Stanford University, 2012 9 | 10 | function submitWeb(partId) 11 | if ~exist('partId', 'var') || isempty(partId) 12 | partId = []; 13 | end 14 | 15 | submit(partId, 1); 16 | end 17 | 18 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/.ComputeInitialPotentials.m.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/.ComputeInitialPotentials.m.swp -------------------------------------------------------------------------------- /7.CRF Learning for OCR/.CreateCliqueTree.m.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/.CreateCliqueTree.m.swp -------------------------------------------------------------------------------- /7.CRF Learning for OCR/.InstanceNegLogLikelihood.m.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/.InstanceNegLogLikelihood.m.swp -------------------------------------------------------------------------------- /7.CRF Learning for OCR/AssignmentToIndex.m: -------------------------------------------------------------------------------- 1 | % AssignmentToIndex Convert assignment to index. 2 | % 3 | % I = AssignmentToIndex(A, D) converts an assignment, A, over variables 4 | % with cardinality D to an index into the .val vector for a factor. 5 | % If A is a matrix then the function converts each row of A to an index. 6 | % 7 | % See also IndexToAssignment.m 8 | % 9 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 10 | 11 | function I = AssignmentToIndex(A, D) 12 | 13 | D = D(:)'; % ensure that D is a row vector 14 | if (any(size(A) == 1)), 15 | I = cumprod([1, D(1:end - 1)]) * (A(:) - 1) + 1; 16 | else 17 | I = sum(bsxfun(@times, A - 1, cumprod([1, D(1:end - 1)])), 2) + 1; 18 | end; 19 | 20 | end 21 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/ComputeConditionedSingletonFeatures.m: -------------------------------------------------------------------------------- 1 | function features = ComputeConditionedSingletonFeatures (X, modelParams) 2 | % Creatures feature structs for indicator features on single values on y. 3 | % They are "conditioned," since different elements of the weight vector are 4 | % used depending on the actual observation. 5 | % 6 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 7 | 8 | [len, featureSize] = size(X); 9 | 10 | K = modelParams.numHiddenStates; 11 | L = modelParams.numObservedStates; 12 | 13 | numFeatures = len * K * featureSize; 14 | features(numFeatures) = EmptyFeatureStruct(); 15 | 16 | featureIdx = 0; 17 | 18 | for hiddenSt = 1:K 19 | for featureNum = 1:featureSize 20 | for v = 1:len 21 | featureIdx = featureIdx + 1; 22 | obs = X(v, featureNum); 23 | features(featureIdx).var = v; 24 | features(featureIdx).assignment = hiddenSt; 25 | features(featureIdx).paramIdx = sub2ind([L featureSize K], ... 26 | obs, featureNum, hiddenSt); 27 | end 28 | end 29 | end 30 | 31 | end 32 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/ComputeJointDistribution.m: -------------------------------------------------------------------------------- 1 | %ComputeJointDistribution Computes the joint distribution defined by a set 2 | % of given factors 3 | % 4 | % Joint = ComputeJointDistribution(F) computes the joint distribution 5 | % defined by a set of given factors 6 | % 7 | % Joint is a factor that encapsulates the joint distribution given by F 8 | % F is a vector of factors (struct array) containing the factors 9 | % defining the distribution 10 | % 11 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 12 | 13 | function Joint = ComputeJointDistribution(F) 14 | 15 | % Check for empty factor list 16 | assert(numel(F) ~= 0, 'Error: empty factor list'); 17 | 18 | if (length(F) == 0) 19 | % There are no factors, so create an empty factor list 20 | Joint = struct('var', [], 'card', [], 'val', []); 21 | else 22 | Joint = F(1); 23 | for i = 2:length(F) 24 | % Iterate through factors and incorporate them into the joint distribution 25 | Joint = FactorProduct(Joint, F(i)); 26 | end 27 | end 28 | end 29 | 30 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/ComputeMarginal.m: -------------------------------------------------------------------------------- 1 | %ComputeMarginal Computes the marginal over a set of given variables 2 | % M = ComputeMarginal(V, F, E) computes the marginal over variables V 3 | % in the distribution induced by the set of factors F, given evidence E 4 | % 5 | % M is a factor containing the marginal over variables V 6 | % V is a vector containing the variables in the marginal e.g. [1 2 3] for 7 | % X_1, X_2 and X_3. 8 | % F is a vector of factors (struct array) containing the factors 9 | % defining the distribution 10 | % E is an N-by-2 matrix, each row being a variable/value pair. 11 | % Variables are in the first column and values are in the second column. 12 | % 13 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 14 | 15 | function M = ComputeMarginal(V, F, E) 16 | 17 | % Check for empty factor list 18 | assert(numel(F) ~= 0, 'Error: empty factor list'); 19 | 20 | F = ObserveEvidence(F, E); 21 | Joint = ComputeJointDistribution(F); 22 | Joint.val = Joint.val ./ sum(Joint.val); 23 | M = FactorMarginalization(Joint, setdiff(Joint.var, V)); 24 | end 25 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/ComputeUnconditionedPairFeatures.m: -------------------------------------------------------------------------------- 1 | function features = ComputeUnconditionedPairFeatures (len, modelParams) 2 | % Creates indicator features on assignments to adjacent variables in the 3 | % sequence. 4 | % 5 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 6 | 7 | if (len < 2) 8 | features = []; 9 | return; 10 | end 11 | 12 | K = modelParams.numHiddenStates; 13 | nPairFeatures = (len - 1) * K * K; 14 | 15 | features(nPairFeatures) = EmptyFeatureStruct(); 16 | 17 | featureIdx = 0; 18 | for s1 = 1:K 19 | for s2 = 1:K 20 | paramVal = sub2ind([K K], s2, s1); 21 | for v = 1:(len - 1) 22 | featureIdx = featureIdx + 1; 23 | features(featureIdx).var = [v v+1]; 24 | features(featureIdx).assignment = [s1 s2]; 25 | features(featureIdx).paramIdx = paramVal; 26 | end 27 | end 28 | end 29 | 30 | 31 | end 32 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/ComputeUnconditionedSingletonFeatures.m: -------------------------------------------------------------------------------- 1 | function features = ComputeUnconditionedSingletonFeatures (len, modelParams) 2 | % Creates indicator features on assignments to single variables in the 3 | % sequence. 4 | % 5 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 6 | 7 | nSingleFeatures = len * modelParams.numHiddenStates; 8 | features(nSingleFeatures) = EmptyFeatureStruct(); 9 | 10 | K = modelParams.numHiddenStates; 11 | featureIdx = 0; 12 | 13 | for st = 1:K 14 | paramVal = st; 15 | for v = 1:len 16 | featureIdx = featureIdx + 1; 17 | features(featureIdx).var = v; 18 | features(featureIdx).assignment = st; 19 | features(featureIdx).paramIdx = paramVal; 20 | 21 | end 22 | end 23 | 24 | end 25 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/EliminateVar.m: -------------------------------------------------------------------------------- 1 | % Function used in production of clique trees 2 | % 3 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 4 | 5 | function [newF C E] = EliminateVar(F, C, E, Z) 6 | 7 | useFactors = []; 8 | scope = []; 9 | 10 | for i=1:length(F) 11 | if any(F(i).var == Z) 12 | useFactors = [useFactors i]; 13 | scope = union(scope, F(i).var); 14 | end 15 | end 16 | 17 | % update edge map 18 | % These represent the induced edges for the VE graph. 19 | for i=1:length(scope) 20 | for j=1:length(scope) 21 | 22 | if i~=j 23 | E(scope(i),scope(j)) = 1; 24 | E(scope(j),scope(i)) = 1; 25 | end 26 | end 27 | end 28 | 29 | E(Z,:) = 0; 30 | E(:,Z) = 0; 31 | 32 | 33 | nonUseFactors = setdiff(1:length(F),[useFactors]); 34 | 35 | for i=1:length(nonUseFactors) 36 | newF(i) = F(nonUseFactors(i)); 37 | newmap(nonUseFactors(i)) = i; 38 | end 39 | 40 | newFactor = struct('var', [], 'card', [], 'val', []); 41 | for i=1:length(useFactors) 42 | newFactor = FactorProduct(newFactor,F(useFactors(i))); 43 | end 44 | 45 | newFactor = FactorMarginalization(newFactor,Z); 46 | newF(length(nonUseFactors)+1) = newFactor; 47 | 48 | newC = length(C.nodes)+1; 49 | C.nodes{newC} = scope; 50 | C.factorInds(newC) = length(nonUseFactors)+1; 51 | for i=1:newC-1 52 | if ismember(C.factorInds(i), useFactors) 53 | C.edges(i,newC) = 1; 54 | C.edges(newC,i) = 1; 55 | C.factorInds(i) = 0; 56 | else 57 | if C.factorInds(i) ~= 0 58 | C.factorInds(i) = newmap(C.factorInds(i)); 59 | end 60 | end 61 | end 62 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/EmptyFactorStruct.m: -------------------------------------------------------------------------------- 1 | function f = EmptyFactorStruct 2 | % Dummy function for the factor struct. 3 | % 4 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 5 | 6 | f = struct ('var', [], 'card', [], 'val', []); 7 | 8 | end 9 | 10 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/EmptyFeatureStruct.m: -------------------------------------------------------------------------------- 1 | function s = EmptyFeatureStruct 2 | % Dummy function for the feature struct. 3 | % 4 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 5 | 6 | s = struct('var', [], 'assignment', [], 'paramIdx', []); 7 | 8 | end 9 | 10 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/FactorMarginalization.m: -------------------------------------------------------------------------------- 1 | % FactorMarginalization Sums given variables out of a factor. 2 | % B = FactorMarginalization(A,V) computes the factor with the variables 3 | % in V summed out. The factor data structure has the following fields: 4 | % .var Vector of variables in the factor, e.g. [1 2 3] 5 | % .card Vector of cardinalities corresponding to .var, e.g. [2 2 2] 6 | % .val Value table of size prod(.card) 7 | % 8 | % The resultant factor should have at least one variable remaining or this 9 | % function will throw an error. 10 | % 11 | % See also FactorProduct.m, IndexToAssignment.m, and AssignmentToIndex.m 12 | % 13 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 14 | 15 | function B = FactorMarginalization(A, V) 16 | 17 | % Check for empty factor or variable list 18 | if (isempty(A.var) || isempty(V)), B = A; return; end; 19 | 20 | % Construct the output factor over A.var \ V (the variables in A.var that are not in V) 21 | % and mapping between variables in A and B 22 | [B.var, mapB] = setdiff(A.var, V); 23 | 24 | % Check for empty resultant factor 25 | if isempty(B.var) 26 | %error('Error: Resultant factor has empty scope'); 27 | B.var = []; 28 | B.card = []; 29 | B.val = []; 30 | return; 31 | end; 32 | 33 | % Initialize B.card and B.val 34 | B.card = A.card(mapB); 35 | B.val = zeros(1,prod(B.card)); 36 | 37 | % Compute some helper indices 38 | % These will be very useful for calculating C.val 39 | % so make sure you understand what these lines are doing 40 | assignments = IndexToAssignment(1:length(A.val), A.card); 41 | indxB = AssignmentToIndex(assignments(:, mapB), B.card); 42 | 43 | for i = 1:length(A.val), 44 | B.val(indxB(i)) = B.val(indxB(i)) + A.val(i); 45 | end; 46 | 47 | end 48 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/FactorMaxMarginalization.m: -------------------------------------------------------------------------------- 1 | % FactorMaxMarginalization Takes the max of given variables when marginalizing out of a factor. 2 | % B = FactorMarginalization(A,V) computes the factor with the variables 3 | % in V summed out. The factor data structure has the following fields: 4 | % .var Vector of variables in the factor, e.g. [1 2 3] 5 | % .card Vector of cardinalities corresponding to .var, e.g. [2 2 2] 6 | % .val Value table of size prod(.card) 7 | % 8 | % The resultant factor should have at least one variable remaining or this 9 | % function will throw an error. 10 | % 11 | % See also FactorProduct.m, IndexToAssignment.m, and AssignmentToIndex.m 12 | % 13 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 14 | 15 | function B = FactorMaxMarginalization(A, V) 16 | 17 | % Check for empty factor or variable list 18 | if (isempty(A.var) || isempty(V)), B = A; return; end; 19 | 20 | % Construct the output factor over A.var \ V (the variables in A.var that are not in V) 21 | % and mapping between variables in A and B 22 | [B.var, mapB] = setdiff(A.var, V); 23 | 24 | % Check for empty resultant factor 25 | if isempty(B.var) 26 | error('Error: Resultant factor has empty scope'); 27 | end; 28 | 29 | % Initialize B.card and B.val 30 | B.card = A.card(mapB); 31 | B.val = zeros(1,prod(B.card)); 32 | 33 | % Compute some helper indices 34 | % These will be very useful for calculating C.val 35 | % so make sure you understand what these lines are doing 36 | assignments = IndexToAssignment(1:length(A.val), A.card); 37 | indxB = AssignmentToIndex(assignments(:, mapB), B.card); 38 | 39 | for i = 1:length(A.val) 40 | % Iterate through the values of A 41 | if B.val(indxB(i)) == 0 42 | % B has not been initialized yet 43 | B.val(indxB(i)) = A.val(i); 44 | else 45 | B.val(indxB(i)) = max([B.val(indxB(i)), A.val(i)]); 46 | end 47 | end; 48 | 49 | end 50 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/GetValueOfAssignment.m: -------------------------------------------------------------------------------- 1 | %GETVALUEOFASSIGNMENT Gets the value of a variable assignment in a factor. 2 | % 3 | % v = GETVALUEOFASSIGNMENT(F, A) returns the value of a variable assignment, 4 | % A, in factor F. The order of the variables in A are assumed to be the 5 | % same as the order in F.var. 6 | % 7 | % v = GETVALUEOFASSIGNMENT(F, A, VO) gets the value of a variable assignment, 8 | % A, in factor F. The order of the variables in A are given by the vector VO. 9 | % 10 | % See also SETVALUEOFASSIGNMENT 11 | 12 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 13 | 14 | function v = GetValueOfAssignment(F, A, VO) 15 | 16 | if (nargin == 2), 17 | indx = AssignmentToIndex(A, F.card); 18 | else 19 | map = zeros(length(F.var), 1); 20 | for i = 1:length(F.var), 21 | map(i) = find(VO == F.var(i)); 22 | end; 23 | indx = AssignmentToIndex(A(map), F.card); 24 | end; 25 | 26 | v = F.val(indx); 27 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/IndexToAssignment.m: -------------------------------------------------------------------------------- 1 | % IndexToAssignment Convert index to variable assignment. 2 | % 3 | % A = IndexToAssignment(I, D) converts an index, I, into the .val vector 4 | % into an assignment over variables with cardinality D. If I is a vector, 5 | % then the function produces a matrix of assignments, one assignment 6 | % per row. 7 | % 8 | % See also AssignmentToIndex.m and SampleFactors.m 9 | % 10 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 11 | 12 | function A = IndexToAssignment(I, D) 13 | 14 | D = D(:)'; % ensure that D is a row vector 15 | A = bsxfun(@mod, floor(bsxfun(@rdivide, I(:) - 1, cumprod([1, D(1:end - 1)]))), D) + 1; 16 | 17 | end 18 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/LRAccuracy.m: -------------------------------------------------------------------------------- 1 | % function acc = LRAccuracy(GroundTruth, Predictions) compares the 2 | % vector of predictions with the vector of ground truth values, 3 | % and returns the accuracy (fraction of predictions that are correct). 4 | % 5 | % Input: 6 | % GroundTruth (numInstances x 1 vector) 7 | % Predictions (numInstances x 1 vector) 8 | % 9 | % Output: 10 | % err (scalar between 0 and 1 inclusive) 11 | % 12 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 13 | 14 | function acc = LRAccuracy(GroundTruth, Predictions) 15 | 16 | GroundTruth = GroundTruth(:); 17 | Predictions = Predictions(:); 18 | assert(all(size(GroundTruth) == size(Predictions))); 19 | 20 | acc = mean(GroundTruth == Predictions); 21 | 22 | end 23 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/LRCostSGD.m: -------------------------------------------------------------------------------- 1 | % [cost, grad] = LRCostSGD(X, y, theta, lambda, i) calculates the LR cost / objective 2 | % function with respect to data instance (i mod n), given the LR classifier parameterized 3 | % by theta and where n = the number of data instances. Also returns the gradient of 4 | % the cost function with respect to data instance (i mod n). 5 | % The aim of LR training is to find the theta that minimizes this function. 6 | % 7 | % Inputs: 8 | % X data. (numInstances x numFeatures matrix) 9 | % X(:,1) is all ones, i.e., it encodes the intercept/bias term. 10 | % y data labels. (numInstances x 1 vector) 11 | % theta LR parameters. (numFeatures x 1 vector) 12 | % lambda (L2) regularization parameter. (scalar) 13 | % i index of a data sample. (integer from 1:size(X,1)) 14 | % 15 | % Outputs: 16 | % cost cost function of the LR classifier evaluated on (X,y). (scalar) 17 | % grad gradient of the cost function. (numFeatures x 1 vector) 18 | % 19 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 20 | 21 | 22 | function [cost, grad] = LRCostSGD(X, y, theta, lambda, i) 23 | i = mod(i, size(X,1)) + 1; 24 | 25 | h = sigmoid (X(i,:) * theta); 26 | 27 | % Calculate cost function 28 | cost = sum((-y(i) .* log(h)) - ((1 - y(i)) .* log(1 - h))) + 0.5 * lambda * sum(theta(2:end) .^ 2); 29 | 30 | % Calculate gradient 31 | grad = X(i,:)' * (h - y(i)); 32 | 33 | % Apply regularization to the weights (but not the bias term) 34 | grad(2:end) = grad(2:end) + lambda * theta(2:end); 35 | 36 | end 37 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/LRPredict.m: -------------------------------------------------------------------------------- 1 | % pred = LRPredict(X, theta) uses the LR classifier encoded by theta 2 | % to predict the labels on data X. 3 | % 4 | % Inputs: 5 | % X data. (numInstances x numFeatures matrix) 6 | % theta LR parameters. (numFeatures x 1 vector) 7 | % 8 | % Outputs: 9 | % pred predicted labels for X. (numInstances x 1 binary vector). 10 | % 11 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 12 | 13 | function pred = LRPredict (X, theta) 14 | 15 | thresh = 0.5; 16 | h = sigmoid (X * theta); 17 | pred = h > thresh; 18 | 19 | end 20 | 21 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/LRSearchLambdaSGD.m: -------------------------------------------------------------------------------- 1 | % function allAcc = LRSearchLambdaSGD(Xtrain, Ytrain, Xvalidation, Yvalidation, lambdas) 2 | % For each value of lambda provided, fit parameters to the training data and return 3 | % the accuracy in the validation data in the corresponding entry of allAcc. 4 | % For instance, allAcc(i) = accuracy in the validation set using lambdas(i). 5 | % 6 | % Inputs: 7 | % Xtrain training data features (numTrainInstances x numFeatures) 8 | % Ytrain training set labels (numTrainInstances x 1) 9 | % Xvalidation validation data features (numValidInstances x num features) 10 | % Yvalidation validation set labels (numValidInstances x 1) 11 | % lambdas values of lambda to try (numLambdas x 1) 12 | % 13 | % Output: 14 | % allAcc vector of accuracies in validation set (numLambdas x 1) 15 | % 16 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 17 | 18 | function allAcc = LRSearchLambdaSGD(Xtrain, Ytrain, Xvalidation, Yvalidation, lambdas) 19 | 20 | % You may use the functions we have provided such as LRTrainSGD, LRPredict, and LRAccuracy. 21 | 22 | allAcc = zeros(size(lambdas)); 23 | 24 | %%%%%%%%%%%%%% 25 | %%% Student code 26 | for i = 1: length(lambdas) 27 | theta = LRTrainSGD(Xtrain,Ytrain,lambdas(i)); 28 | pred = LRPredict(Xvalidation,theta); 29 | allAcc(i) = LRAccuracy(pred,Yvalidation); 30 | end 31 | 32 | %%%%%%%%%%% 33 | 34 | end 35 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/LRTrainSGD.m: -------------------------------------------------------------------------------- 1 | % thetaOpt = LRTrainSGD(X, y, lambda) trains a logistic regression 2 | % classifier using stochastic gradient descent. It returns the optimal theta values. 3 | % 4 | % Inputs: 5 | % X data. (numInstances x numFeatures matrix) 6 | % X(:,1) is all ones, i.e., it encodes the intercept/bias term. 7 | % y data labels. (numInstances x 1 vector) 8 | % lambda (L2) regularization parameter. (scalar) 9 | % 10 | % Outputs: 11 | % thetaOpt optimal LR parameters. (numFeatures x 1 vector) 12 | % 13 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 14 | 15 | 16 | function thetaOpt = LRTrainSGD(X, y, lambda) 17 | 18 | numFeatures = size(X, 2); 19 | 20 | % This sets up an anonymous function gradFn 21 | % such that gradFn(theta, i) = LRCostSGD(X, y, theta, lambda, i). 22 | % We need to do this because GradientDescent takes in a function 23 | % handle gradFunc(theta, i), where gradFunc only takes two input params. 24 | % 25 | % For more info, you may check out the official documentation: 26 | % Matlab - http://www.mathworks.com/help/techdoc/matlab_prog/f4-70115.html 27 | % Octave - http://www.gnu.org/software/octave/doc/interpreter/Anonymous-Functions.html 28 | gradFn = @(theta, i)LRCostSGD(X, y, theta, lambda, i); 29 | 30 | % Calculate optimal theta values 31 | thetaOpt = StochasticGradientDescent(gradFn, zeros(numFeatures, 1), 5000); 32 | 33 | end 34 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/MaxDecoding.m: -------------------------------------------------------------------------------- 1 | %MAXDECODING Finds the best assignment for each variable from the marginals 2 | %passed in. Returns A such that A(i) returns the index of the best 3 | %instantiation for variable i. 4 | % 5 | % For instance: Let's say we have two variables 1 and 2. 6 | % Marginals for 1 = [0.1, 0.3, 0.6] 7 | % Marginals for 2 = [0.92, 0.08] 8 | % A(1) = 3, A(2) = 1. 9 | % 10 | % See also COMPUTEEXACTMARGINALSBP 11 | 12 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 13 | 14 | function A = MaxDecoding( M ) 15 | 16 | % Compute the best assignment for variables in the network. 17 | A = zeros(1, length(M)); 18 | for i = 1:length(M) 19 | % Iterate through variables 20 | [maxVal, idx] = max(M(i).val); 21 | A(i) = idx; 22 | end 23 | 24 | end 25 | 26 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/NumParamsForConditionedFeatures.m: -------------------------------------------------------------------------------- 1 | function n = NumParamsForConditionedFeatures (features, numObservedStates) 2 | % Number of parameters "consumed" by a set of conditioned features. 3 | % 4 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 5 | 6 | maxParam = max([features.paramIdx]); 7 | n = maxParam + numObservedStates - 1 - mod(maxParam - 1, numObservedStates); 8 | 9 | end 10 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/NumParamsForUnconditionedFeatures.m: -------------------------------------------------------------------------------- 1 | function n = NumParamsForUnconditionedFeatures (features) 2 | % Number of parameters "consumed" by a set of unconditioned features. 3 | % 4 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 5 | 6 | n = max([features.paramIdx]); 7 | 8 | end 9 | 10 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/PA7Description.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/PA7Description.pdf -------------------------------------------------------------------------------- /7.CRF Learning for OCR/Part1Lambdas.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/Part1Lambdas.mat -------------------------------------------------------------------------------- /7.CRF Learning for OCR/Part2FullDataset.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/Part2FullDataset.mat -------------------------------------------------------------------------------- /7.CRF Learning for OCR/Part2LogZTest.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/Part2LogZTest.mat -------------------------------------------------------------------------------- /7.CRF Learning for OCR/Part2Sample.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/Part2Sample.mat -------------------------------------------------------------------------------- /7.CRF Learning for OCR/Part2Test.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/Part2Test.mat -------------------------------------------------------------------------------- /7.CRF Learning for OCR/SetValueOfAssignment.m: -------------------------------------------------------------------------------- 1 | %SETVALUEOFASSIGNMENT Sets the value of a variable assignment in a factor. 2 | % 3 | % F = SETVALUEOFASSIGNMENT(F, A, v) sets the value of a variable assignment, 4 | % A, in factor F to v. The order of the variables in A are assumed to be the 5 | % same as the order in F.var. 6 | % 7 | % F = SETVALUEOFASSIGNMENT(F, A, v, VO) sets the value of a variable 8 | % assignment, A, in factor F to v. The order of the variables in A are given 9 | % by the vector VO. 10 | % 11 | % See also GETVALUEOFASSIGNMENT 12 | 13 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 14 | 15 | function F = SetValueOfAssignment(F, A, v, VO); 16 | 17 | if (nargin == 3), 18 | indx = AssignmentToIndex(A, F.card); 19 | else 20 | map = zeros(length(F.var), 1); 21 | for i = 1:length(F.var), 22 | map(i) = find(VO == F.var(i)); 23 | end; 24 | indx = AssignmentToIndex(A(map), F.card); 25 | end; 26 | 27 | F.val(indx) = v; 28 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/StochasticGradientDescent.m: -------------------------------------------------------------------------------- 1 | % function thetaOpt = StochasticGradientDescent (gradFunc, theta0, maxiter) 2 | % runs gradient descent until convergence, returning the optimal parameters thetaOpt. 3 | % 4 | % Inputs: 5 | % gradFunc function handle to a function [cost, grad] = gradFunc(theta, i) 6 | % that computes the LR cost / objective function and the gradient 7 | % of the negative log likelihood of the ith data instance, given 8 | % parameters theta. 9 | % theta0 initial value of theta to start gradient descent from. 10 | % maxIter number of iterations to run SGD for. 11 | % 12 | % Output: 13 | % thetaOpt optimal value of theta. 14 | % 15 | % 16 | % Note - function handles may be new to some of you. Briefly, function handles 17 | % are a way of passing a function to another function as an argument. The 18 | % syntax for calling a function handle is exactly the same as for calling any 19 | % other function. 20 | % 21 | % For more information, refer to the official documentation: 22 | % Matlab - http://www.mathworks.com/help/techdoc/matlab_prog/f2-38133.html 23 | % Octave - http://www.gnu.org/software/octave/doc/interpreter/Function-Handles.html 24 | % 25 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 26 | 27 | 28 | function thetaOpt = StochasticGradientDescent (gradFunc, theta0, maxIter) 29 | 30 | % The grader will accept all answers that are near enough 31 | % to the optimal value, so don't worry about being off by one 32 | % iteration etc. 33 | 34 | thetaOpt = zeros(size(theta0)); 35 | 36 | %%%%%%%%%%%%%% 37 | %%% Student code 38 | for i = 1:maxIter 39 | [cost, grad] = gradFunc(theta0,i); 40 | theta0 = theta0 - (0.1)*(grad)/(1+sqrt(i)); 41 | end 42 | thetaOpt = theta0; 43 | %%%%%%%%%%% 44 | 45 | end 46 | 47 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/Test1X.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/Test1X.mat -------------------------------------------------------------------------------- /7.CRF Learning for OCR/Test1Y.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/Test1Y.mat -------------------------------------------------------------------------------- /7.CRF Learning for OCR/Train1X.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/Train1X.mat -------------------------------------------------------------------------------- /7.CRF Learning for OCR/Train1Y.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/Train1Y.mat -------------------------------------------------------------------------------- /7.CRF Learning for OCR/Train2X.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/Train2X.mat -------------------------------------------------------------------------------- /7.CRF Learning for OCR/Train2Y.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/Train2Y.mat -------------------------------------------------------------------------------- /7.CRF Learning for OCR/Validation1X.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/Validation1X.mat -------------------------------------------------------------------------------- /7.CRF Learning for OCR/Validation1Y.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/Validation1Y.mat -------------------------------------------------------------------------------- /7.CRF Learning for OCR/Validation2X.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/Validation2X.mat -------------------------------------------------------------------------------- /7.CRF Learning for OCR/Validation2Y.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/Validation2Y.mat -------------------------------------------------------------------------------- /7.CRF Learning for OCR/ValidationAccuracy.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/7.CRF Learning for OCR/ValidationAccuracy.mat -------------------------------------------------------------------------------- /7.CRF Learning for OCR/VisualizeCharacters.m: -------------------------------------------------------------------------------- 1 | function VisualizeCharacters (X) 2 | % VisualizeCharacters(X) displays the characters of observation X. The 3 | % input X should be a numCharacters x 32 matrix, since each character is 4 | % 8x4 and is stored in one row. The entries are 1's and 2's corresponding 5 | % to white and black pixels, respectively. This is the format of the 6 | % provided data for PA7, so this function should "just work" on the 7 | % provided data. 8 | % 9 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 10 | 11 | if (~isequal(size(X, 2), 32)) 12 | error('Input to VisualizeCharacters.m of incorrect size.'); 13 | end 14 | 15 | if (any(X(:) == 2)) 16 | X = X - 1; 17 | end 18 | 19 | len = size(X, 1); 20 | totalWidth = 5 * len + 1; 21 | 22 | im = zeros(8, totalWidth); 23 | for i = 1:len 24 | charIm = reshape(X(i,:), 8, 4); 25 | im(:, (2:5) + (5 * (i-1))) = charIm; 26 | end 27 | 28 | figure; 29 | colormap(gray); 30 | imagesc(1 - im); 31 | axis equal; 32 | [height, width] = size(im); 33 | axis([0 width 0 height]); 34 | 35 | end 36 | 37 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/pgm_login_data.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 3.4.3, Sat May 19 16:18:25 2012 India Standard Time 2 | # name: login 3 | # type: sq_string 4 | # elements: 1 5 | # length: 18 6 | anilkaraka@live.in 7 | 8 | 9 | # name: password 10 | # type: sq_string 11 | # elements: 1 12 | # length: 10 13 | 3Kjg8WRg6p 14 | 15 | 16 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/pppp.m: -------------------------------------------------------------------------------- 1 | function ft = pppp(feat,p1,p2,p3,p12,p23) 2 | 3 | for i = 1:length(feat) 4 | if feat(i).var == 1 5 | feat(i).p = p1.val([feat(i).assignment]); 6 | end 7 | 8 | if feat(i).var == 2 9 | feat(i).p = p2.val([feat(i).assignment]); 10 | end 11 | 12 | if feat(i).var == 3 13 | feat(i).p = p3.val([feat(i).assignment]); 14 | end 15 | if feat(i).var == [1,2] 16 | feat(i).p = p12.val(AssignmentToIndex([feat(i).assignment],[26,26])); 17 | end 18 | if feat(i).var == [2,3] 19 | feat(i).p = p23.val(AssignmentToIndex([feat(i).assignment],[26,26])); 20 | end 21 | end 22 | ft = feat; 23 | return; 24 | end 25 | 26 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/sigmoid.m: -------------------------------------------------------------------------------- 1 | function s = sigmoid (z) 2 | % Sigmoid function (scalar or element-wise) 3 | % 4 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 5 | 6 | s = 1 ./ (1 + exp (-z)); 7 | 8 | end 9 | -------------------------------------------------------------------------------- /7.CRF Learning for OCR/submitWeb.m: -------------------------------------------------------------------------------- 1 | % submitWeb Creates files from your code and output for web submission. 2 | % 3 | % If the submit function does not work for you, use the web-submission mechanism. 4 | % Call this function to produce a file for the part you wish to submit. Then, 5 | % submit the file to the class servers using the "Web Submission" button on the 6 | % Programming Assignments page on the course website. 7 | % 8 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 9 | 10 | function submitWeb(partId) 11 | if ~exist('partId', 'var') || isempty(partId) 12 | partId = []; 13 | end 14 | 15 | submit(partId, 1); 16 | end 17 | 18 | -------------------------------------------------------------------------------- /8.Learning Tree Structured Networks/ConvertAtoG.m: -------------------------------------------------------------------------------- 1 | % File: ConvertAtoG.m 2 | % 3 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 4 | % 5 | % Author: Huayan Wang 6 | 7 | function G = ConvertAtoG(A) 8 | 9 | G = zeros(10,2); 10 | A = A + A'; 11 | 12 | G(1,:) = [0 0]; 13 | visited = zeros(10,1); 14 | visited(1) = 1; 15 | 16 | 17 | cnt = 0; 18 | while sum(visited) < 10 19 | cnt = cnt+1; 20 | for i=2:10 21 | for j=1:10 22 | if A(i,j) == 1 && visited(j) 23 | visited(i) = 1; 24 | G(i,1) = 1; 25 | G(i,2) = j; 26 | break; 27 | end 28 | end 29 | end 30 | end -------------------------------------------------------------------------------- /8.Learning Tree Structured Networks/FitGaussianParameters.m: -------------------------------------------------------------------------------- 1 | function [mu sigma] = FitGaussianParameters(X) 2 | % X: (N x 1): N examples (1 dimensional) 3 | % Fit N(mu, sigma^2) to the empirical distribution 4 | % 5 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 6 | 7 | mu = 0; 8 | sigma = 1; 9 | 10 | %%%%%%%%%%%%%%%%%%%%%%%%%% 11 | % YOUR CODE HERE 12 | mu = mean(X); 13 | sigma = std(X,1); 14 | %%%%%%%%%%%%%%%%%%%%%%%%%% 15 | -------------------------------------------------------------------------------- /8.Learning Tree Structured Networks/GaussianMutualInformation.m: -------------------------------------------------------------------------------- 1 | function I = GaussianMutualInformation(X, Y) 2 | % 3 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 4 | 5 | if isequal(X,Y) 6 | I = 0; 7 | return; 8 | end 9 | % X: (N x D1), D1 dimensions, N samples 10 | % Y: (N x D2), D2 dimensions, N samples 11 | 12 | % I(X, Y) = 1/2 * log( | Sigma_XX | * | Sigma_YY | / | Sigma |) 13 | % Sigma = [ Sigma_XX, Sigma_XY ; 14 | % Sigma_XY, Sigma_YY ] 15 | 16 | Sxx = cov(X); 17 | Syy = cov(Y); 18 | S = cov([X,Y]); 19 | I = .5*log(det(Sxx)*det(Syy)/det(S)); -------------------------------------------------------------------------------- /8.Learning Tree Structured Networks/LearnGraphAndCPDs.m: -------------------------------------------------------------------------------- 1 | function [P G loglikelihood] = LearnGraphAndCPDs(dataset, labels) 2 | 3 | % dataset: N x 10 x 3, N poses represented by 10 parts in (y, x, alpha) 4 | % labels: N x 2 true class labels for the examples. labels(i,j)=1 if the 5 | % the ith example belongs to class j 6 | % 7 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 8 | 9 | N = size(dataset, 1); 10 | K = size(labels,2); 11 | 12 | G = zeros(10,2,K); % graph structures to learn 13 | % initialization 14 | for k=1:K 15 | G(2:end,:,k) = ones(9,2); 16 | end 17 | 18 | % estimate graph structure for each class 19 | for k=1:K 20 | % fill in G(:,:,k) 21 | % use ConvertAtoG to convert a maximum spanning tree to a graph G 22 | %%%%%%%%%%%%%%%%%%%%%%%%% 23 | % YOUR CODE HERE 24 | A = LearnGraphStructure(dataset([labels(:,k)==1],:,:)); 25 | G(:,:,k) = ConvertAtoG(A); 26 | %%%%%%%%%%%%%%%%%%%%%%%%% 27 | end 28 | 29 | % estimate parameters 30 | 31 | P.c = zeros(1,K); 32 | % compute P.c 33 | 34 | % the following code can be copied from LearnCPDsGivenGraph.m 35 | % with little or no modification 36 | %%%%%%%%%%%%%%%%%%%%%%%%% 37 | % YOUR CODE HERE 38 | [P,loglikelihood] = LearnCPDsGivenGraph(dataset,G,labels); 39 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 40 | 41 | fprintf('log likelihood: %f\n', loglikelihood); 42 | -------------------------------------------------------------------------------- /8.Learning Tree Structured Networks/LearnGraphStructure.m: -------------------------------------------------------------------------------- 1 | function [A W] = LearnGraphStructure(dataset) 2 | 3 | % Input: 4 | % dataset: N x 10 x 3, N poses represented by 10 parts in (y, x, alpha) 5 | % 6 | % Output: 7 | % A: maximum spanning tree computed from the weight matrix W 8 | % W: 10 x 10 weight matrix, where W(i,j) is the mutual information between 9 | % node i and j. 10 | % 11 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 12 | 13 | N = size(dataset,1); 14 | K = size(dataset,3); 15 | 16 | W = zeros(10,10); 17 | % Compute weight matrix W 18 | % set the weights following Eq. (14) in PA description 19 | % you don't have to include M since all entries are scaled by the same M 20 | %%%%%%%%%%%%%%%%%%%%%%%%%%% 21 | % YOUR CODE HERE 22 | o = cell(); 23 | for i = 1:10 24 | o{i} = reshape(dataset(:,i,:),size(dataset,1),3); 25 | end 26 | for i = 1:10 27 | for j = i+1:10 28 | W(i,j) = W(j,i) = GaussianMutualInformation(o{i},o{j}); 29 | end 30 | end 31 | %%%%%%%%%%%%%%%%%%%%%%%%%%% 32 | 33 | % Compute maximum spanning tree 34 | A = MaxSpanningTree(W); 35 | -------------------------------------------------------------------------------- /8.Learning Tree Structured Networks/PA8Data.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/8.Learning Tree Structured Networks/PA8Data.mat -------------------------------------------------------------------------------- /8.Learning Tree Structured Networks/PA8Description.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/8.Learning Tree Structured Networks/PA8Description.pdf -------------------------------------------------------------------------------- /8.Learning Tree Structured Networks/PA8SampleCases.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/8.Learning Tree Structured Networks/PA8SampleCases.mat -------------------------------------------------------------------------------- /8.Learning Tree Structured Networks/SampleGaussian.m: -------------------------------------------------------------------------------- 1 | function sample = SampleGaussian(mu, sigma) 2 | 3 | % sample from the Gaussian distribution specifed by mean value mu and standard deviation sigma 4 | % 5 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 6 | 7 | sample = mu + sigma*randn(1,1); 8 | -------------------------------------------------------------------------------- /8.Learning Tree Structured Networks/SampleMultinomial.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 2 | 3 | function sample = SampleMultinomial(probabilities) 4 | 5 | dice = rand(1,1); 6 | accumulate = 0; 7 | for i=1:length(probabilities) 8 | accumulate = accumulate + probabilities(i); 9 | if accumulate/sum(probabilities) > dice 10 | break 11 | end 12 | end 13 | sample = i; 14 | 15 | 16 | -------------------------------------------------------------------------------- /8.Learning Tree Structured Networks/ShowPose.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 2 | % 3 | % Author: Huayan Wang, Andrew Duchi 4 | 5 | % visualize a configuration the body parts 6 | 7 | function img = ShowPose( pose ) 8 | 9 | % pose 10 x 3. 10 : body parts, 10 | % 3 : y, x, alpha 11 | 12 | pose(:,1) = pose(:,1) + 100; 13 | pose(:,2) = pose(:,2) + 150; 14 | 15 | 16 | 17 | pose = reshape(pose, [10 3]); 18 | part_length = [60, 20, 32, 33, 32, 33, 46, 49, 46, 49]; 19 | part_width = [18, 10, 7, 5, 7, 5, 10, 7, 10, 7]; 20 | img = zeros(300, 300); 21 | for part = 1:10 22 | 23 | startpt = round(pose(part, 1:2)); 24 | axis = [sin(pose(part,3) - pi/2) cos(pose(part,3) - pi/2)]; 25 | xaxis = [cos(pose(part,3) - pi/2) -sin(pose(part,3) - pi/2)]; 26 | endpt = round(startpt + part_length(part) * axis); 27 | 28 | corner1 = round(startpt + xaxis * part_width(part)); 29 | corner2 = round(startpt - xaxis * part_width(part)); 30 | corner3 = round(endpt + xaxis * part_width(part)); 31 | corner4 = round(endpt - xaxis * part_width(part)); 32 | 33 | img = func_DrawLine(img, corner1(1), corner1(2), corner2(1), corner2(2),1); 34 | img = func_DrawLine(img, corner1(1), corner1(2), corner3(1), corner3(2),1); 35 | img = func_DrawLine(img, corner4(1), corner4(2), corner2(1), corner2(2),1); 36 | img = func_DrawLine(img, corner4(1), corner4(2), corner3(1), corner3(2),1); 37 | 38 | if startpt(1) > 3 && startpt(1) < 298 && startpt(2) > 3 && startpt(2) < 298 39 | img(startpt(1)-3 : startpt(1) + 3, startpt(2) - 3 : startpt(2)+3) = ones(7,7); 40 | end 41 | end 42 | 43 | -------------------------------------------------------------------------------- /8.Learning Tree Structured Networks/VisualizeDataset.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 2 | % 3 | % Author: Huayan Wang 4 | 5 | function VisualizeDataset(Dataset) 6 | 7 | f = figure; 8 | for i=1:size(Dataset,1) 9 | img = ShowPose(reshape(Dataset(i,:,:), [10 3])); 10 | imshow(img); 11 | pause(0.3) 12 | if (~ishandle(f)) break; end; % quit loop when user closes the figure 13 | end 14 | -------------------------------------------------------------------------------- /8.Learning Tree Structured Networks/VisualizeModels.m: -------------------------------------------------------------------------------- 1 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 2 | % 3 | % Author: Huayan Wang 4 | 5 | function VisualizeModels(P, G) 6 | K = length(P.c); 7 | 8 | f = figure; 9 | while(1) 10 | for k=1:K 11 | subplot(1,K,k); 12 | if size(G,3) == 1 % same graph structure for all classes 13 | 14 | pose = SamplePose(P,G,k); 15 | 16 | else % different graph structure for each class 17 | 18 | pose = SamplePose(P,G(:,:,k),k); 19 | 20 | end 21 | 22 | img = ShowPose(pose); 23 | imshow(img); 24 | pause(0.3) 25 | if (~ishandle(f)) return; end; % quit loop when user closes the figure 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /8.Learning Tree Structured Networks/lognormpdf.m: -------------------------------------------------------------------------------- 1 | % 2 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 3 | function val = lognormpdf(x, mu, sigma) 4 | val = - (x - mu).^2 / (2*sigma^2) - log (sqrt(2*pi) * sigma); -------------------------------------------------------------------------------- /8.Learning Tree Structured Networks/octave-core: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/8.Learning Tree Structured Networks/octave-core -------------------------------------------------------------------------------- /8.Learning Tree Structured Networks/pgm_login_data.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 3.4.3, Tue May 22 18:56:50 2012 India Standard Time 2 | # name: login 3 | # type: sq_string 4 | # elements: 1 5 | # length: 18 6 | anilkaraka@live.in 7 | 8 | 9 | # name: password 10 | # type: sq_string 11 | # elements: 1 12 | # length: 10 13 | 3Kjg8WRg6p 14 | 15 | 16 | -------------------------------------------------------------------------------- /8.Learning Tree Structured Networks/submitWeb.m: -------------------------------------------------------------------------------- 1 | % submitWeb Creates files from your code and output for web submission. 2 | % 3 | % If the submit function does not work for you, use the web-submission mechanism. 4 | % Call this function to produce a file for the part you wish to submit. Then, 5 | % submit the file to the class servers using the "Web Submission" button on the 6 | % Programming Assignments page on the course website. 7 | % 8 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 9 | 10 | function submitWeb(partId) 11 | if ~exist('partId', 'var') || isempty(partId) 12 | partId = []; 13 | end 14 | 15 | submit(partId, 1); 16 | end 17 | 18 | -------------------------------------------------------------------------------- /8.Learning Tree Structured Networks/submit_input.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/8.Learning Tree Structured Networks/submit_input.mat -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/AssignmentToIndex.m: -------------------------------------------------------------------------------- 1 | % AssignmentToIndex Convert assignment to index. 2 | % 3 | % I = AssignmentToIndex(A, D) converts an assignment, A, over variables 4 | % with cardinality D to an index into the .val vector for a factor. 5 | % If A is a matrix then the function converts each row of A to an index. 6 | % 7 | % See also IndexToAssignment.m 8 | % 9 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 10 | 11 | function I = AssignmentToIndex(A, D) 12 | 13 | D = D(:)'; % ensure that D is a row vector 14 | if (any(size(A) == 1)), 15 | I = cumprod([1, D(1:end - 1)]) * (A(:) - 1) + 1; 16 | else 17 | I = sum(bsxfun(@times, A - 1, cumprod([1, D(1:end - 1)])), 2) + 1; 18 | end; 19 | 20 | end 21 | -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/ComputeExactMarginalsHMM.m: -------------------------------------------------------------------------------- 1 | %COMPUTEEXACTMARGINALSHMM Runs exact inference and returns the marginals 2 | %over all the variables and the calibrated clique tree. 3 | 4 | % M = COMPUTEEXACTMARGINALSHMM(F) Takes a list of factors F, 5 | % and runs exact inference and returns the calibrated clique tree (unnormalized) and 6 | % final marginals (normalized) for the variables in the network. 7 | % It returns an array of size equal to the number of variables in the 8 | % network where M(i) represents the ith variable and M(i).val represents 9 | % the marginals of the ith variable. 10 | 11 | 12 | % 13 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 14 | 15 | function [M, PCalibrated] = ComputeExactMarginalsHMM(F) 16 | 17 | % M = repmat(struct('var', 0, 'card', 0, 'val', []), length(N), 1); 18 | % 19 | % where N is the number of variables in the network, which can be determined 20 | % from the factors F. 21 | 22 | % Create a clique tree, compute the initial potentails, calibrate the 23 | % clique tree, and find the belief for each varaible at a clique that has 24 | % that variable in its scope 25 | 26 | compressedCliqueTree = CreateCliqueTreeHMM(F); 27 | PCalibrated = CliqueTreeCalibrate(compressedCliqueTree); 28 | varsList = sort(unique([F(:).var])); 29 | M = repmat(struct('var', 0, 'card', 0, 'val', []), length(varsList), 1); 30 | for i = 1:length(varsList) 31 | assert (varsList(i) == i); 32 | if (i == 1) 33 | clique = PCalibrated.cliqueList(i); 34 | M(i) = FactorMarginalization(clique, 2); 35 | else 36 | clique = PCalibrated.cliqueList(i-1); 37 | M(i) = FactorMarginalization(clique, i-1); 38 | end 39 | 40 | if any(M(i).val ~= 0) 41 | % Normalize 42 | M(i).val = M(i).val - logsumexp(M(i).val); 43 | end 44 | 45 | end 46 | 47 | end 48 | -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/FactorMarginalization.m: -------------------------------------------------------------------------------- 1 | % FactorMarginalization Sums given variables out of a factor in log space. 2 | % B = FactorMarginalization(A,V) computes the factor with the variables 3 | % in V summed out. The factor data structure has the following fields: 4 | % .var Vector of variables in the factor, e.g. [1 2 3] 5 | % .card Vector of cardinalities corresponding to .var, e.g. [2 2 2] 6 | % .val Value table of size prod(.card) 7 | % 8 | % The resultant factor should have at least one variable remaining or this 9 | % function will throw an error. 10 | % 11 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 12 | 13 | function B = FactorMarginalization(A,V) 14 | B.var = A.var(2); 15 | B.card = A.card(1); 16 | Val = reshape(A.val,B.card,B.card); 17 | 18 | if(V==A.var(2)) 19 | Val = Val'; 20 | B.var = A.var(1); 21 | end 22 | 23 | B.val = log(sum(exp(bsxfun(@minus, Val, max(Val)))))+max(Val); 24 | -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/FitG.m: -------------------------------------------------------------------------------- 1 | % File: FitG.m 2 | % 3 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 4 | 5 | function [mu sigma] = FitG(X, W) 6 | 7 | % X: (N x 1): N examples (1 dimensional) 8 | % W: (N x 1): Weights over examples (W(i) is the weight for X(i)) 9 | 10 | % Fit N(mu, sigma^2) to the empirical distribution 11 | 12 | mu = 0; 13 | sigma = 1; 14 | 15 | mu = W'*X/sum(W); 16 | v = W'*(X.*X)/sum(W) - mu^2; 17 | sigma = sqrt(v); 18 | -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/FitLG.m: -------------------------------------------------------------------------------- 1 | % File: FitLG.m 2 | % 3 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 4 | 5 | function [Beta sigma] = FitLG(X, U, W) 6 | 7 | % Estimate parameters of the linear Gaussian model: 8 | % X|U ~ N(Beta(1)*U(1) + ... + Beta(K)*U(K) + Beta(K+1), sigma^2); 9 | 10 | % Note that Matlab index from 1, we can't write Beta(0). So Beta(K+1) is 11 | % essentially Beta(0) in PA3 description (and the text book). 12 | 13 | % X: (N x 1), the child variable, N examples 14 | % U: (N x K), K parent variables, N examples 15 | % W: (N x 1), weights over the examples. 16 | 17 | N = size(U,1); 18 | K = size(U,2); 19 | 20 | Beta = zeros(K+1,1); 21 | sigma = 1; 22 | 23 | % collect expectations and solve the linear system 24 | % A = [ E[U(1)], E[U(2)], ... , E[U(K)], 1 ; 25 | % E[U(1)*U(1)], E[U(2)*U(1)], ... , E[U(K)*U(1)], E[U(1)]; 26 | % ... , ... , ... , ... , ... ; 27 | % E[U(1)*U(K)], E[U(2)*U(K)], ... , E[U(K)*U(K)], E[U(K)] ] 28 | 29 | A = zeros(K,K); 30 | for j = 1:K 31 | row = 1:K; 32 | for x = 1:K 33 | mu = W'*(U(:,x).*U(:,j))/sum(W); 34 | row(x) = mu; 35 | end 36 | A(j,:) = row; 37 | end 38 | row1 = 1:K; 39 | for x = 1:K 40 | mu = W'*U(:,x)/sum(W); 41 | row1(x) = mu; 42 | end 43 | col2 = [1;row1']; 44 | A = [row1;A]; 45 | A = [A, col2]; 46 | 47 | % B = [ E[X]; E[X*U(1)]; ... ; E[X*U(K)] ] 48 | 49 | B = 1:K; 50 | for x = 1:K 51 | mu = W'*(X.*U(:,x))/sum(W); 52 | B(x) = mu; 53 | end 54 | 55 | mu = W'*X/sum(W); 56 | B = [mu;B']; 57 | 58 | % solve A*Beta = B 59 | Beta = A\B; 60 | 61 | % then compute sigma according to eq. (17) in PA description 62 | CovU = A(2:end, 1:K) - A(2:end, K+1) * A(1, 1:K); 63 | [MuX, SigmaX] = FitG(X, W); 64 | sigma = sqrt( SigmaX^2 - Beta(1:K)' * CovU * Beta(1:K) ); 65 | 66 | % catch in case sigma is badly conditioned 67 | if sigma == 0 || ~isreal(sigma) 68 | sigma = .01; 69 | else 70 | sigma = sigma + .01; 71 | end 72 | -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/IndexToAssignment.m: -------------------------------------------------------------------------------- 1 | % IndexToAssignment Convert index to variable assignment. 2 | % 3 | % A = IndexToAssignment(I, D) converts an index, I, into the .val vector 4 | % into an assignment over variables with cardinality D. If I is a vector, 5 | % then the function produces a matrix of assignments, one assignment 6 | % per row. 7 | % 8 | % See also AssignmentToIndex.m and SampleFactors.m 9 | % 10 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 11 | 12 | function A = IndexToAssignment(I, D) 13 | 14 | D = D(:)'; % ensure that D is a row vector 15 | A = bsxfun(@mod, floor(bsxfun(@rdivide, I(:) - 1, cumprod([1, D(1:end - 1)]))), D) + 1; 16 | 17 | end 18 | -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/PA9Data.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/9.Learnign with Incomplete Data/PA9Data.mat -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/PA9Description.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/9.Learnign with Incomplete Data/PA9Description.pdf -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/PA9SampleCases.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/9.Learnign with Incomplete Data/PA9SampleCases.mat -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/Predictions.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 3.4.3, Tue Jun 05 00:41:36 2012 India Standard Time 2 | # name: yourPredictions 3 | # type: matrix 4 | # rows: 90 5 | # columns: 1 6 | 2 7 | 3 8 | 2 9 | 2 10 | 2 11 | 2 12 | 1 13 | 2 14 | 2 15 | 2 16 | 1 17 | 1 18 | 3 19 | 1 20 | 1 21 | 1 22 | 3 23 | 1 24 | 2 25 | 2 26 | 2 27 | 3 28 | 2 29 | 3 30 | 1 31 | 2 32 | 2 33 | 1 34 | 3 35 | 1 36 | 1 37 | 1 38 | 3 39 | 1 40 | 2 41 | 2 42 | 3 43 | 1 44 | 1 45 | 1 46 | 3 47 | 2 48 | 2 49 | 2 50 | 3 51 | 1 52 | 2 53 | 1 54 | 3 55 | 3 56 | 1 57 | 3 58 | 2 59 | 2 60 | 2 61 | 1 62 | 2 63 | 2 64 | 3 65 | 2 66 | 3 67 | 3 68 | 1 69 | 1 70 | 1 71 | 2 72 | 2 73 | 3 74 | 3 75 | 2 76 | 3 77 | 1 78 | 3 79 | 1 80 | 2 81 | 3 82 | 1 83 | 3 84 | 3 85 | 3 86 | 1 87 | 1 88 | 1 89 | 2 90 | 2 91 | 3 92 | 2 93 | 2 94 | 1 95 | 2 96 | 97 | 98 | -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/RecognizeUnknownActions.m: -------------------------------------------------------------------------------- 1 | % You should put all your code for recognizing unknown actions in this file. 2 | % Describe the method you used in YourMethod.txt. 3 | % Don't forget to call SavePrediction() at the end with your predicted labels to save them for submission, then submit using submit.m 4 | predicted_labels = RecognizeUnknownActions(datasetTrain, dataset) 5 | -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/SavePredictions.m: -------------------------------------------------------------------------------- 1 | function SavePredictions (yourPredictions) 2 | % This function will save your test set predictions into a mat file. The 3 | % submit script for will then read in that 4 | % file to send your predictions to the grading server. 5 | % 6 | % The input `yourPredictions' should be a 90x1 vector where 7 | % yourPredictions(i) is the predicted class (1-3) for the i'th action. 8 | % 9 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 10 | 11 | if (~isequal([90 1], size(yourPredictions))) 12 | error ('The input to SavePredictions is not the right size.'); 13 | end 14 | 15 | save('Predictions.mat', 'yourPredictions'); 16 | 17 | 18 | end 19 | 20 | -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/ShowPose.m: -------------------------------------------------------------------------------- 1 | % File: ShowPose.m 2 | % 3 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 4 | 5 | % visualize a configuration the body parts 6 | 7 | function img = ShowPose( pose ) 8 | 9 | % pose 10 x 3. 10 : body parts, 10 | % 3 : y, x, alpha 11 | 12 | pose(:,1) = pose(:,1) + 100; 13 | pose(:,2) = pose(:,2) + 150; 14 | 15 | 16 | 17 | pose = reshape(pose, [10 3]); 18 | part_length = [60, 20, 32, 33, 32, 33, 46, 49, 46, 49]; 19 | part_width = [18, 10, 7, 5, 7, 5, 10, 7, 10, 7]; 20 | img = zeros(300, 300); 21 | for part = 1:10 22 | 23 | startpt = round(pose(part, 1:2)); 24 | axis = [sin(pose(part,3) - pi/2) cos(pose(part,3) - pi/2)]; 25 | xaxis = [cos(pose(part,3) - pi/2) -sin(pose(part,3) - pi/2)]; 26 | endpt = round(startpt + part_length(part) * axis); 27 | 28 | corner1 = round(startpt + xaxis * part_width(part)); 29 | corner2 = round(startpt - xaxis * part_width(part)); 30 | corner3 = round(endpt + xaxis * part_width(part)); 31 | corner4 = round(endpt - xaxis * part_width(part)); 32 | 33 | img = func_DrawLine(img, corner1(1), corner1(2), corner2(1), corner2(2),1); 34 | img = func_DrawLine(img, corner1(1), corner1(2), corner3(1), corner3(2),1); 35 | img = func_DrawLine(img, corner4(1), corner4(2), corner2(1), corner2(2),1); 36 | img = func_DrawLine(img, corner4(1), corner4(2), corner3(1), corner3(2),1); 37 | 38 | if startpt(1) > 3 && startpt(1) < 298 && startpt(2) > 3 && startpt(2) < 298 39 | img(startpt(1)-3 : startpt(1) + 3, startpt(2) - 3 : startpt(2)+3) = ones(7,7); 40 | end 41 | end 42 | 43 | -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/VisualizeDataset.m: -------------------------------------------------------------------------------- 1 | % File: VisualizeDataset.m 2 | % 3 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 4 | 5 | function VisualizeDataset(Dataset) 6 | 7 | figure 8 | for i=1:size(Dataset,1) 9 | img = ShowPose(reshape(Dataset(i,:,:), [10 3])); 10 | imshow(img); 11 | pause(0.3); 12 | end 13 | -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/YourMethod.txt: -------------------------------------------------------------------------------- 1 | Describe your method used to recognize the unknown actions here. 2 | -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/emission.m: -------------------------------------------------------------------------------- 1 | function logEmissionProb = emission(poseData,G,P,N,K) 2 | logEmissionProb = zeros(N,K); 3 | for i = 1:N 4 | data = reshape(poseData(i,:,:),10,3); 5 | for j = 1:10 6 | if G(j,1) == 1 7 | parent = data(G(j,2),:); 8 | for k = 1:K 9 | theta = P.clg(j).theta(k,:); 10 | mu_y = sum(theta(1:4).*[1,parent]); 11 | mu_x = sum(theta(5:8).*[1,parent]); 12 | mu_a = sum(theta(9:12).*[1,parent]); 13 | logEmissionProb(i,k) += lognormpdf(data(j,1),mu_y,P.clg(j).sigma_y(k)); 14 | logEmissionProb(i,k) += lognormpdf(data(j,2),mu_x,P.clg(j).sigma_x(k)); 15 | logEmissionProb(i,k) += lognormpdf(data(j,3),mu_a,P.clg(j).sigma_angle(k)); 16 | end 17 | else 18 | for k = 1:K 19 | logEmissionProb(i,k) += lognormpdf(data(j,1),P.clg(j).mu_y(k),P.clg(j).sigma_y(k)); 20 | logEmissionProb(i,k) += lognormpdf(data(j,2),P.clg(j).mu_x(k),P.clg(j).sigma_x(k)); 21 | logEmissionProb(i,k) += lognormpdf(data(j,3),P.clg(j).mu_angle(k),P.clg(j).sigma_angle(k)); 22 | end 23 | end 24 | end 25 | end 26 | end 27 | -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/lognormpdf.m: -------------------------------------------------------------------------------- 1 | % File: lognormpdf.m 2 | % 3 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 4 | 5 | function [log_prob] = lognormpdf(x,mu,sigma) 6 | 7 | % LOGNORMPDF Natural logarithm of the normal probability density function (pdf) 8 | % Y = lognormpdf(X,MU,SIGMA) returns the log of the pdf of the normal 9 | % distribution parameterized by mean MU and standard deviation SIGMA evaluated 10 | % at each value in the vector X. Thus, the size of the return 11 | % vector Y is the size of X. 12 | % 13 | % MU and X should have the same dimensions. 14 | 15 | log_prob = -log(sigma*sqrt(2*pi))-(x-mu).^2 ./ (2*sigma.^2); 16 | -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/logsumexp.m: -------------------------------------------------------------------------------- 1 | % File: logsumexp.m 2 | % 3 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 4 | 5 | function out = logsumexp(A) 6 | 7 | % LOGSUMEXP 8 | % Computes log( sum( exp( ) ) ) of each row in A in a way that avoids underflow. 9 | % If A is an N x M matrix, then out is a N x 1 vector. 10 | 11 | pi_max = max(A, [], 2); 12 | out = pi_max + log(sum(exp(bsxfun(@minus, A, pi_max)), 2)); 13 | -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/pgm_login_data.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 3.4.3, Tue Jun 05 00:42:17 2012 India Standard Time 2 | # name: login 3 | # type: sq_string 4 | # elements: 1 5 | # length: 18 6 | anilkaraka@live.in 7 | 8 | 9 | # name: password 10 | # type: sq_string 11 | # elements: 1 12 | # length: 10 13 | 3Kjg8WRg6p 14 | 15 | 16 | -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/submitWeb.m: -------------------------------------------------------------------------------- 1 | % submitWeb Creates files from your code and output for web submission. 2 | % 3 | % If the submit function does not work for you, use the web-submission mechanism. 4 | % Call this function to produce a file for the part you wish to submit. Then, 5 | % submit the file to the class servers using the "Web Submission" button on the 6 | % Programming Assignments page on the course website. 7 | % 8 | % Copyright (C) Daphne Koller, Stanford Univerity, 2012 9 | 10 | function submitWeb(partId) 11 | if ~exist('partId', 'var') || isempty(partId) 12 | partId = []; 13 | end 14 | 15 | submit(partId, 1); 16 | end 17 | 18 | -------------------------------------------------------------------------------- /9.Learnign with Incomplete Data/submit_input.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anhncs/Probabilistic-Graphical-Models/7fd4ef255db59ecbfe1a134cadbc4be5ca839894/9.Learnign with Incomplete Data/submit_input.mat --------------------------------------------------------------------------------