├── setup.jl ├── REQUIRE ├── README.txt ├── README.txt~ ├── src ├── HMM │ ├── HMM.jl │ ├── HMMbackward.jl │ ├── HMMgamma.jl │ ├── HMMforward.jl │ ├── HMMviterbi.jl │ ├── HMMbackward_log.jl │ ├── HMMsmooth.jl │ ├── HMMforward_log.jl │ └── HMMem.jl ├── GraphPlot │ ├── make_layout.jl │ ├── poset.jl │ ├── toposort.jl │ └── GraphPlot.jl ├── PotentialInference │ ├── Potentials │ │ ├── PotConst.jl │ │ ├── ArrayPotential.jl │ │ ├── PotLogArray.jl │ │ ├── Potential.jl │ │ └── PotArray.jl │ ├── PotentialInference.jl │ └── Algorithms │ │ └── FactorGraph.jl ├── LogArrayFunctions.jl └── BrmlFunctions.jl ├── .gitattributes ├── brml.jl ├── Reexport.jl ├── Demos ├── demoPotLogArray.jl ├── #demoMaxProd.jl# ├── demoMaxProd.jl ├── demoSumProd.jl ├── demoPlotDAG.jl ├── demoChainIndepRational.jl ├── #demoChainIndepRational.jl# ├── demoSumProdStandardiseVariables.jl ├── demoBurglarSimple.jl ├── demoBurglarDictPot.jl ├── demoHMMinferenceSimple.jl ├── demoBurglar.jl ├── demoHMMinferenceFG.jl ├── demoHMMlearn.jl └── demoHMMburglar.jl ├── LICENSE.txt ├── .gitignore ├── docs └── BrmlFunctions.md └── notebooks ├── LearningNotesBrml.ipynb └── demoHMMinferenceSimple.ipynb /setup.jl: -------------------------------------------------------------------------------- 1 | push!(LOAD_PATH,pwd()) 2 | -------------------------------------------------------------------------------- /REQUIRE: -------------------------------------------------------------------------------- 1 | julia 0.4 2 | Cairo 3 | Docile 4 | IJulia 5 | Lexicon 6 | PyPlot 7 | Winston 8 | -------------------------------------------------------------------------------- /README.txt: -------------------------------------------------------------------------------- 1 | This is some basic Julia code for simple discrete probability table manipulation. 2 | 3 | This code was developed for Julia version 0.5.0 4 | 5 | To run the demos (see the Demos folder): 6 | start julia and then from within julia type: 7 | julia> cd("directory where this README file is") 8 | julia> include("setup.jl") 9 | julia> using brml 10 | julia> demoBurglar() 11 | julia> demoHMMInferenceSimple() 12 | 13 | and similarly for the other demos. 14 | -------------------------------------------------------------------------------- /README.txt~: -------------------------------------------------------------------------------- 1 | This is some basic Julia code for simple discrete probability table 2 | manipulation. 3 | 4 | This code was developed for Julia version 0.5.0 5 | 6 | To run the demos (see the Demos folder): 7 | start julia and then from within julia type: 8 | julia> cd("directory where this README file is") 9 | julia> include("setup.jl") 10 | julia> using brml 11 | julia> demoBurglar() 12 | julia> demoHMMInferenceSimple() 13 | 14 | and similarly for the other demos. 15 | -------------------------------------------------------------------------------- /src/HMM/HMM.jl: -------------------------------------------------------------------------------- 1 | module HMM 2 | importall BrmlFunctions 3 | #@reexport using BrmlFunctions, HMM, GraphPlot, Winston ## CHECK!!! 4 | 5 | export HMMforward 6 | include("HMMforward.jl") 7 | 8 | export HMMbackward 9 | include("HMMbackward.jl") 10 | 11 | export HMMgamma 12 | include("HMMgamma.jl") 13 | 14 | export HMMsmooth 15 | include("HMMsmooth.jl") 16 | 17 | export HMMviterbi 18 | include("HMMviterbi.jl") 19 | 20 | export HMMem 21 | include("HMMem.jl") 22 | 23 | import PyPlot 24 | export PyPlot 25 | 26 | end 27 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | 4 | # Custom for Visual Studio 5 | *.cs diff=csharp 6 | *.sln merge=union 7 | *.csproj merge=union 8 | *.vbproj merge=union 9 | *.fsproj merge=union 10 | *.dbproj merge=union 11 | 12 | # Standard to msysgit 13 | *.doc diff=astextplain 14 | *.DOC diff=astextplain 15 | *.docx diff=astextplain 16 | *.DOCX diff=astextplain 17 | *.dot diff=astextplain 18 | *.DOT diff=astextplain 19 | *.pdf diff=astextplain 20 | *.PDF diff=astextplain 21 | *.rtf diff=astextplain 22 | *.RTF diff=astextplain 23 | -------------------------------------------------------------------------------- /brml.jl: -------------------------------------------------------------------------------- 1 | module brml 2 | 3 | push!(LOAD_PATH, joinpath(pwd(), "src")) 4 | 5 | # push all subdirectories from src 6 | map(d -> push!(LOAD_PATH, joinpath(pwd(), "src", d)), 7 | filter(d -> isdir(joinpath("src", d)), readdir("src"))) 8 | 9 | push!(LOAD_PATH, joinpath(pwd(), "Demos")) 10 | 11 | println("\nAvailable Demos:\n") 12 | demos=readdir("Demos") 13 | for i=1:length(demos) 14 | try 15 | if demos[i][1:4]=="demo" && demos[i][end-2:end]==".jl" 16 | println(demos[i]) 17 | reload(demos[i]) 18 | end 19 | end 20 | end 21 | 22 | using Reexport 23 | 24 | @reexport using BrmlFunctions, HMM, PotentialInference, GraphPlot 25 | 26 | end 27 | -------------------------------------------------------------------------------- /Reexport.jl: -------------------------------------------------------------------------------- 1 | module Reexport 2 | 3 | macro reexport(ex) 4 | isa(ex, Expr) && (ex.head == :module || 5 | ex.head == :using || 6 | (ex.head == :toplevel && 7 | all(e->isa(e, Expr) && e.head == :using, ex.args))) || 8 | error("@reexport: syntax error") 9 | 10 | if ex.head == :module 11 | modules = {ex.args[2]} 12 | ex = Expr(:toplevel, ex, Expr(:using, :., ex.args[2])) 13 | elseif ex.head == :using 14 | modules = {ex.args[end]} 15 | else 16 | modules = {e.args[end] for e in ex.args} 17 | end 18 | 19 | esc(Expr(:toplevel, ex, 20 | [:(eval(Expr(:export, names($(mod))...))) for mod in modules]...)) 21 | end 22 | 23 | export @reexport 24 | 25 | end # module 26 | -------------------------------------------------------------------------------- /Demos/demoPotLogArray.jl: -------------------------------------------------------------------------------- 1 | function demoPotLogArray() 2 | 3 | 4 | M1=rand(2,2) 5 | M2=rand(2,2) 6 | p1=PotArray([1 2],M1) 7 | p2=PotArray([2 3],M2) 8 | lp1=PotLogArray([1 2],log(M1)) 9 | lp2=PotLogArray([2 3],log(M2)) 10 | lA=FactorGraph([lp1 lp2]) 11 | A=FactorGraph([p1 p2]) 12 | 13 | marg,mess,normconst=sumprod([p1 p2],A) 14 | lmarg,lmess,lnormconst=sumprod([lp1 lp2],lA) 15 | 16 | println("Marginal table computed using Factor Graph running on standard PotArray, versus marginal table computed using Factor Graph running on LogPotArray:\n") 17 | 18 | println("log(PotArray) \t\t PotLogArray") 19 | for i=1:length(marg) 20 | println("\nvariable $i :") 21 | println("$([log(marg[i].content) lmarg[i].content])") 22 | end 23 | 24 | end 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /Demos/#demoMaxProd.jl#: -------------------------------------------------------------------------------- 1 | function demoMaxProd() 2 | #DEMOMAXPROD Max-Product algorithm test : 3 | # Variable order is arbitary 4 | variables=1:5 5 | a, b, c, d, e=variables 6 | nstates=round(Int64,3*rand(1,5)+2) # random number of states for each variable 7 | 8 | pot=Array(PotArray,5) 9 | pot[1]=PotArray([a b],rand(nstates[[a b]]...)); 10 | pot[2]=PotArray([b c d],rand(nstates[[b c d]]...)); 11 | pot[3]=PotArray([c],rand(nstates[[c]]...)); 12 | pot[4]=PotArray([e d],rand(nstates[[e d]]...)); 13 | pot[5]=PotArray([d],rand(nstates[[d]]...)); 14 | 15 | A = FactorGraph(pot) 16 | maxstate, maxval, mess=maxprod(pot,A) 17 | 18 | # check if this is correct by brute force max: 19 | jointpot =prod(pot); V=length(pot) 20 | maxval2,maxstate2=max(jointpot,variables,ReturnState=true,Ind2Sub=true) 21 | println("\t\t maxprod \t brute") 22 | for i=1:V 23 | println("Variable $i \t $(maxstate[i]...)\t\t $(maxstate2[i])") 24 | end 25 | 26 | 27 | 28 | 29 | 30 | end 31 | -------------------------------------------------------------------------------- /Demos/demoMaxProd.jl: -------------------------------------------------------------------------------- 1 | function demoMaxProd() 2 | #DEMOMAXPROD Max-Product algorithm test : 3 | # Variable order is arbitary 4 | variables=1:5 5 | a, b, c, d, e=variables 6 | nstates=round(Int64,3*rand(1,5)+2) # random number of states for each variable 7 | 8 | pot=Array(PotArray,5) 9 | pot[1]=PotArray([a b],rand(nstates[[a b]]...)); 10 | pot[2]=PotArray([b c d],rand(nstates[[b c d]]...)); 11 | pot[3]=PotArray([c],rand(nstates[[c]]...)); 12 | pot[4]=PotArray([e d],rand(nstates[[e d]]...)); 13 | pot[5]=PotArray([d],rand(nstates[[d]]...)); 14 | 15 | A = FactorGraph(pot) 16 | maxstate, maxval, mess=maxprod(pot,A) 17 | 18 | # check if this is correct by brute force max: 19 | jointpot =prod(pot); V=length(pot) 20 | maxval2,maxstate2=max(jointpot,variables,ReturnState=true,Ind2Sub=true) 21 | println("\t\t maxprod \t brute") 22 | for i=1:V 23 | println("Variable $i \t $(maxstate[i]...)\t\t $(maxstate2[i])") 24 | end 25 | 26 | 27 | 28 | 29 | 30 | end 31 | -------------------------------------------------------------------------------- /src/HMM/HMMbackward.jl: -------------------------------------------------------------------------------- 1 | function HMMbackward(v,phghm,pvgh) 2 | #%HMMBACKWARD HMM Backward Pass 3 | #% beta=HMMbackward(v,phghm,pvgh) 4 | #% 5 | #% Inputs: 6 | #% v : visible (observation) sequence being a vector v=[2 1 3 3 1 ...] 7 | #% phghm : homogeneous transition distribution phghm(i,j)=p(h(t)=i|h(t-1)=j) 8 | #% pvgh : homogeneous emission distribution pvgh(i,j)=p(v(t)=i|h(t)=j) 9 | #% 10 | #% Outputs: 11 | #% beta: beta messages: \propto p(v(t+1:T)|h(t)) 12 | # p(v(t:T)|h(t)) = sum_{h(t)} {p(v(t)| h(t)) * p(h(t)|h(t-1)) * p(v(t+1:T)|h(t))} 13 | # beta(t-1) = p(v(t:T)|h(t) 14 | # beta(t) = p(v(t+1:T)|h(t) 15 | #% See also HMMbackward.m, HMMviterbi.m, demoHMMinference.m 16 | 17 | T=length(v); H=size(phghm,1) 18 | 19 | pvghtrans=pvgh' 20 | phghmtrans=phghm' 21 | beta=ones(H,T) 22 | for t=T:-1:2 23 | tmp = phghmtrans * (beta[:,t] .* pvghtrans[:,v[t]]) 24 | beta[:,t-1] = tmp ./ sum(tmp) 25 | end 26 | return beta 27 | end 28 | 29 | -------------------------------------------------------------------------------- /src/HMM/HMMgamma.jl: -------------------------------------------------------------------------------- 1 | function HMMgamma(alpha,phghm) 2 | #HMMGAMMA HMM Posterior smoothing using the Rauch-Tung-Striebel correction method 3 | # gamma=HMMbackward(alpha,phghm) 4 | # 5 | # Inputs: 6 | # alpha : alpha forward messages (see HMMforward.m) 7 | # phghm : transition distribution in a matrix 8 | # 9 | # Outputs: gamma(i,t) is p(h(t)=i|v(1:T)) 10 | # See also HMMbackward.m, HMMviterbi.m, demoHMMinference.m 11 | 12 | T=size(alpha,2); H=size(phghm, 1); 13 | # gamma recursion 14 | gamma=zeros(size(alpha)) 15 | gamma[:,T]=alpha[:,T]; 16 | for t=T-1:-1:1 17 | phghp=condp(phghm'.*repmat(alpha[:,t],1,H)); 18 | gamma[:,t]=condp(phghp*gamma[:,t+1]); 19 | end 20 | 21 | if 1==0 # gamma recursion: More human readable 22 | gamma[:, T]=alpha[:, T]./sum(alpha[:, T]) 23 | for t = T-1:-1:1 24 | phghp=phghm'.*repmat(alpha[:,t],1,H) 25 | phghp=phghp./repmat(sum(phghp, 1),H,1) 26 | gamma[:,t]=phghp*gamma[:,t+1] 27 | end 28 | end 29 | return gamma 30 | end 31 | -------------------------------------------------------------------------------- /Demos/demoSumProd.jl: -------------------------------------------------------------------------------- 1 | function demoSumProd() 2 | #DEMOSUMPROD Sum-Product algorithm test : 3 | # Variable order is arbitrary 4 | variables=1:5 5 | a, b, c, d, e=variables 6 | nstates=round(Int64,3*rand(1,5)+2) # random number of states for each variable 7 | 8 | pot=Array(PotArray,5) 9 | pot[1]=PotArray([a b],rand(nstates[[a b]]...)); 10 | pot[2]=PotArray([b c d],rand(nstates[[b c d]]...)); 11 | pot[3]=PotArray([c],rand(nstates[[c]]...)); 12 | pot[4]=PotArray([e d],rand(nstates[[e d]]...)); 13 | pot[5]=PotArray([d],rand(nstates[[d]]...)); 14 | 15 | A = FactorGraph(pot); 16 | marg, mess, normconst=sumprod(pot,A) 17 | 18 | # check if this is correct: 19 | jointpot =prod(pot); V=length(pot); 20 | for i=1:V 21 | mtable=[condpot(marg[i]).content condpot(jointpot,i).content] 22 | println("marginal of variable $i:\n Factor Graph\t\t Raw summation"); 23 | println(mtable) 24 | end 25 | tmp=sum(prod(pot)) 26 | println("Normalisation constant:\nFactorGraph\t=$(normconst.content)\nRaw summation\t=$(tmp.content)") 27 | 28 | end 29 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 David Barber 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Windows image file caches 2 | # ========================= 3 | Thumbs.db 4 | ehthumbs.db 5 | 6 | # Folder config file 7 | # ========================= 8 | Desktop.ini 9 | 10 | # Recycle Bin used on file shares 11 | # =============================== 12 | $RECYCLE.BIN/ 13 | 14 | # Windows Installer files 15 | # ========================= 16 | *.cab 17 | *.msi 18 | *.msm 19 | *.msp 20 | 21 | # ========================= 22 | # Operating System Files 23 | # ========================= 24 | 25 | # OSX 26 | # ========================= 27 | 28 | .DS_Store 29 | .AppleDouble 30 | .LSOverride 31 | 32 | # Icon must end with two \r 33 | # ========================= 34 | Icon 35 | 36 | 37 | # Thumbnails 38 | # ========================= 39 | ._* 40 | 41 | # Files that might appear on external disk 42 | # ========================= 43 | .Spotlight-V100 44 | .Trashes 45 | 46 | # Directories potentially created on remote AFP share 47 | # ========================= 48 | .AppleDB 49 | .AppleDesktop 50 | Network Trash Folder 51 | Temporary Items 52 | .apdisk 53 | 54 | # IPython/Jupyter 55 | # ========================= 56 | notebooks/.ipynb_checkpoints 57 | 58 | # Emacs 59 | # ========================= 60 | *.jl~ 61 | -------------------------------------------------------------------------------- /Demos/demoPlotDAG.jl: -------------------------------------------------------------------------------- 1 | function demoPlotDAG() 2 | # try 3 | # variables: 4 | D=Dict{Integer,VariablePlot}() 5 | D[1]=VariablePlot("1") 6 | D[2]=VariablePlot("2") 7 | D[3]=VariablePlot("3") 8 | D[4]=VariablePlot("4") 9 | # D[5]=VariablePlot("5") 10 | # D[6]=VariablePlot("6") 11 | 12 | # connections: 13 | L=Dict{Integer,Array{Integer}}() 14 | 15 | if false 16 | L[1]=[2,4] 17 | L[2]=[3,5] 18 | L[3]=[6] 19 | end 20 | 21 | L[1]=[2,3] 22 | L[4]=[3] 23 | L[3]=[4] 24 | 25 | 26 | if false 27 | L[1]=[2,5,4] # connection from variable 1 to 2 28 | L[2]=[1,5,3] 29 | L[3]=[2,5,4] 30 | L[4]=[3,5,1] 31 | L[5]=[1,2,3,4] 32 | end 33 | 34 | 35 | 36 | if false 37 | L[1]=[2,3,4] # connection from variable 1 to 2 38 | L[2]=[4] 39 | L[3]=[4] 40 | L[4]=[5] 41 | L[5]=[2,1] 42 | end 43 | 44 | PlotGraph(PlaceVertices(D,L,scale=2),L) 45 | #PlotGraph(PlaceVertices(D,L,scale=2,UseEnergyApproach=true),L,arrowsize=0.02) # rerun this if the vertex placement is not good 46 | # end 47 | end 48 | -------------------------------------------------------------------------------- /src/HMM/HMMforward.jl: -------------------------------------------------------------------------------- 1 | function HMMforward(v,phghm,ph1,pvgh;UseLogArray=true) 2 | #HMMFORWARD HMM Forward Pass 3 | # [alpha,loglik]=HMMforward(v,phghm,ph1,pvgh) 4 | # 5 | # Inputs: 6 | # v : visible (observation) sequence being a vector v=[2 1 3 3 1 ...] 7 | # phghm : homogeneous transition distribution phghm(i,j)=p(h(t)=i|h(t-1)=j) 8 | # ph1 : initial distribution 9 | # pvgh : homogeneous emission distribution pvgh(i,j)=p(v(t)=i|h(t)=j) 10 | # 11 | # Outputs: 12 | # alpha : p(h(t)|v(1:t)) 13 | # p(h(t)|v(1:t)) = p(v(t)|h(t)) * sum_{h(t-1)} {p(h(t)|h(t-1)) * p(h(t-1)|v(1:t-1))} 14 | # alpha(t) = p(h(t)|v(1:t)) 15 | # alpha(t-1) = p(h(t-1)|v(1:t-1)) 16 | # loglik : sequence log likelihood log p(v(1:T)) 17 | 18 | T=length(v); H=length(ph1); 19 | 20 | pvghtrans=pvgh' 21 | z=zeros(1,T) # local normalisation factors 22 | alpha=zeros(H,T) 23 | alpha[:,1] = pvghtrans[:,v[1]].*ph1 24 | z[1]=sum(alpha[:,1]) 25 | alpha[:,1]=alpha[:,1]./z[1] 26 | for t=2:T 27 | alpha[:,t]=pvghtrans[:,v[t]].*(phghm*alpha[:,t-1]) 28 | z[t]=sum(alpha[:,t]) 29 | alpha[:,t]=alpha[:,t]./z[t] 30 | end 31 | loglik = sum(log(z)) # log likelihood 32 | 33 | return alpha, loglik 34 | 35 | end 36 | 37 | -------------------------------------------------------------------------------- /Demos/demoChainIndepRational.jl: -------------------------------------------------------------------------------- 1 | function demoChainIndepRational() 2 | 3 | println("In this demo we consider the directed graph A->B->C") 4 | println("The chain is such that A and B are dependent, B and C are dependent, yet A and C are independent.") 5 | println("To show this we will define the table entries using rationals. Julia will then use rational arithmetic for the subsequent calculations.") 6 | 7 | A,B,C=1,2,3 8 | pA=PotArray(A,[3//5, 2//5]) 9 | pBgA=PotArray([B A],[1//4 15//40; 1//12 1//8; 2//3 1//2]) 10 | pCgB=PotArray([C B],[1//3 1//2 15//40; 2//3 1//2 5//8]) 11 | pABC=pCgB*pBgA*pA 12 | pAC=sum(pABC,B) 13 | pA=sum(pAC,C) 14 | pC=sum(pAC,A) 15 | 16 | println("pAC-pA*pC=") 17 | println(pAC-pA*pC) 18 | println("This shows that A and C are independent.\n") 19 | 20 | println("Contrast this with the same calculation using floating point arithmetic:") 21 | 22 | 23 | pA=PotArray(A,[3/5, 2/5]) 24 | pBgA=PotArray([B A],[1/4 15/40; 1/12 1/8; 2/3 1/2]) 25 | pCgB=PotArray([C B],[1/3 1/2 15/40; 2/3 1/2 5/8]) 26 | pABC=pCgB*pBgA*pA 27 | pAC=sum(pABC,B) 28 | pA=sum(pAC,C) 29 | pC=sum(pAC,A) 30 | 31 | println("pAC-pA*pC=") 32 | println(pAC-pA*pC) 33 | println("The loss of numerical accuracy means that we cannot confidently declare A and C are independent, even though they actually are.") 34 | 35 | 36 | end 37 | -------------------------------------------------------------------------------- /Demos/#demoChainIndepRational.jl#: -------------------------------------------------------------------------------- 1 | function demoChainIndepRational() 2 | 3 | println("In this demo we consider the directed graph A->B->C") 4 | println("The chain is such that A and B are dependent, B and C are dependent, yet A and C are independent.") 5 | println("To show this we will define the table entries using rationals. Julia will then use rational arithmetic for the subsequent calculations.") 6 | 7 | A,B,C=1,2,3 8 | pA=PotArray(A,[3//5, 2//5]) 9 | pBgA=PotArray([B A],[1//4 15//40; 1//12 1//8; 2//3 1//2]) 10 | pCgB=PotArray([C B],[1//3 1//2 15//40; 2//3 1//2 5//8]) 11 | pABC=pCgB*pBgA*pA 12 | pAC=sum(pABC,B) 13 | pA=sum(pAC,C) 14 | pC=sum(pAC,A) 15 | 16 | println("pAC-pA*pC=") 17 | println(pAC-pA*pC) 18 | println("This shows that A and C are independent.\n") 19 | 20 | println("Contrast this with the same calculation using floating point arithmetic:") 21 | 22 | 23 | pA=PotArray(A,[3/5, 2/5]) 24 | pBgA=PotArray([B A],[1/4 15/40; 1/12 1/8; 2/3 1/2]) 25 | pCgB=PotArray([C B],[1/3 1/2 15/40; 2/3 1/2 5/8]) 26 | pABC=pCgB*pBgA*pA 27 | pAC=sum(pABC,B) 28 | pA=sum(pAC,C) 29 | pC=sum(pAC,A) 30 | 31 | println("pAC-pA*pC=") 32 | println(pAC-pA*pC) 33 | println("The loss of numerical accuracy means that we cannot confidently declare A and C are independent, even though they actually are.") 34 | 35 | 36 | end 37 | -------------------------------------------------------------------------------- /src/HMM/HMMviterbi.jl: -------------------------------------------------------------------------------- 1 | function HMMviterbi(v,phghm,ph1,pvgh) 2 | #HMMVITERBI Viterbi most likely joint hidden state of a HMM 3 | # [maxstate logprob]=HMMviterbi(v,phghm,ph1,pvgh) 4 | # 5 | # Inputs: 6 | # v : visible (obervation) sequence being a vector v=[2 1 3 3 1 ...] 7 | # phghm : homogeneous transition distribution phghm(i,j)=p(h(t)=i|h(t-1)=j) 8 | # ph1 : initial distribution 9 | # pvgh : time-independent emission distribution pvgh(i,j)=p(v(t)=i|h(t)=j) 10 | # 11 | # Outputs: 12 | # maxstate : most likely joint hidden (latent) state sequence 13 | # logprob : associated log probability of the most likely hidden sequence 14 | 15 | T=length(v); H=size(phghm,1) 16 | mu=zeros(H,T) 17 | mu[:,T]=ones(H,1) 18 | hs=zeros(Int64,1,T) 19 | pvghtrans=pvgh' 20 | for t=T:-1:2 21 | tmp = repmat(pvghtrans[:,v[t]].*mu[:,t],1,H).*phghm 22 | tmp = maximum(tmp,1)' 23 | mu[:,t-1]= tmp./sum(tmp) # normalise to avoid underflow 24 | end 25 | 26 | # backtrack: 27 | val, hs[1]=findmax(ph1.*pvghtrans[:,v[1]].*mu[:,1]) 28 | for t=2:T 29 | tmp = pvghtrans[:,v[t]].*phghm[:,hs[t-1]] 30 | val, hs[t]=findmax(tmp.*mu[:,t]) 31 | end 32 | 33 | logprob=log(ph1[hs[1]])+log(pvgh[v[1],hs[1]]) 34 | for t=2:T 35 | logprob=logprob+log(phghm[hs[t],hs[t-1]])+log(pvgh[v[t],hs[t]]) 36 | end 37 | return hs, logprob 38 | end 39 | -------------------------------------------------------------------------------- /src/GraphPlot/make_layout.jl: -------------------------------------------------------------------------------- 1 | function make_layout(adj) 2 | #function [x, y] = make_layout(adj) 3 | #% MAKE_LAYOUT Creates a layout from an adjacency matrix 4 | #% 5 | #% [X, Y] = MAKE_LAYOUT(ADJ) 6 | #% 7 | #% Inputs : 8 | #% ADJ = adjacency matrix (source, sink) 9 | #% 10 | #% Outputs : 11 | #% X, Y : Positions of nodes 12 | #% 13 | #% Usage Example : [X, Y] = make_layout(adj); 14 | #% 15 | #% 16 | #% Note : Uses some very simple heuristics, so any other 17 | #% algorithm would create a nicer layout 18 | #% 19 | #% See also 20 | # 21 | # % Uses :# 22 | # 23 | #% Change History : 24 | #% Date Time Prog Note 25 | #% 13-Apr-2000 8:25 PM ATC Created under MATLAB 5.3.1.29215a (R11.1) 26 | # 27 | #% ATC = Ali Taylan Cemgil, 28 | #% SNN - University of Nijmegen, Department of Medical Physics and Biophysics 29 | #% e-mail : cemgil@mbfys.kun.nl 30 | 31 | N = size(adj,1) 32 | tps = toposort(adj) 33 | 34 | if !isempty(tps) # is directed ? 35 | level = zeros(1,N) 36 | for i=tps 37 | idx = find(adj[:,i]) 38 | if !isempty(idx) 39 | l = maximum(level[idx]) 40 | level[i]=l+1 41 | end 42 | end 43 | else 44 | level = poset(adj,1)-1 45 | end 46 | 47 | y = (level+1)./(maximum(level)+2) 48 | y = 1-y 49 | x = zeros(size(y)) 50 | for i=0:maximum(level) 51 | idx = find(level.==i); 52 | offset = (rem(i,2)-0.5)/10 53 | x[idx] = (1:length(idx))./(length(idx)+1)+offset 54 | end 55 | return x,y 56 | 57 | end 58 | -------------------------------------------------------------------------------- /src/GraphPlot/poset.jl: -------------------------------------------------------------------------------- 1 | function poset(adj, root) 2 | #function [depth] = poset(adj, root) 3 | #% POSET Identify a partial ordering among the nodes of a graph 4 | #% 5 | #% [DEPTH] = POSET(ADJ,ROOT) 6 | #% 7 | #% Inputs : 8 | #% ADJ : Adjacency Matrix 9 | #% ROOT : Node to start with 10 | #% 11 | #% Outputs : 12 | #% DEPTH : Depth of the Node 13 | #% 14 | #% Usage Example : [depth] = poset(adj,12); 15 | #% 16 | #% 17 | #% Note : All Nodes must be connected 18 | #% See also # 19 | # 20 | #% Uses : 21 | # 22 | #% Change History : 23 | #% Date Time Prog Note 24 | #% 17-Jun-1998 12:01 PM ATC Created under MATLAB 5.1.0.421 25 | 26 | #% ATC = Ali Taylan Cemgil, 27 | #% SNN - University of Nijmegen, Department of Medical Physics and Biophysics 28 | #% e-mail : cemgil@mbfys.kun.nl 29 | 30 | adj = adj+adj' 31 | 32 | N = size(adj,1) 33 | depth = zeros(N,1) 34 | depth[root] = 1 35 | queue=[] 36 | queue = vec(vcat(queue,root)) 37 | 38 | while true 39 | if isempty(queue) 40 | if all(depth.>0) 41 | break 42 | else 43 | root = find(depth.==0) 44 | root = root[1] 45 | depth[root] = 1 46 | queue=[] 47 | queue = vec(vcat(queue,root)) 48 | end 49 | end 50 | r = queue[1] 51 | queue=vec(queue) 52 | shift!(queue) 53 | idx = find(adj[r,:]) 54 | idx2 = find(depth[idx].==1); 55 | idx = idx[idx2] 56 | queue = vec(vcat(queue, idx)) 57 | depth[idx] = depth[r]+1 58 | end 59 | return depth 60 | end 61 | 62 | -------------------------------------------------------------------------------- /src/HMM/HMMbackward_log.jl: -------------------------------------------------------------------------------- 1 | function HMMbackward(v,phghm,pvgh;UseLogArray=true) 2 | #%HMMBACKWARD HMM Backward Pass 3 | #% logbeta=HMMbackward(v,phghm,pvgh) 4 | #% 5 | #% Inputs: 6 | #% v : visible (observation) sequence being a vector v=[2 1 3 3 1 ...] 7 | #% phghm : homogeneous transition distribution phghm(i,j)=p(h(t)=i|h(t-1)=j) 8 | #% pvgh : homogeneous emission disrtribution pvgh(i,j)=p(v(t)=i|h(t)=j) 9 | #% 10 | #% Outputs: 11 | #% logbeta: log beta messages: log p(v(t+1:T)|h(t)) 12 | # p(v(t:T)|h(t)) = sum_{h(t)} {p(v(t)| h(t)) * p(h(t)|h(t-1)) * p(v(t+1:T)|h(t))} 13 | # beta(t-1) = p(v(t:T)|h(t) 14 | # beta(t) = p(v(t+1:T)|h(t) 15 | #% See also HMMbackward.m, HMMviterbi.m, demoHMMinference.m 16 | 17 | T=length(v); H=size(phghm,1) 18 | 19 | if !UseLogArray # beta recursion (not recommended due to numerical underflow) 20 | beta=zeros(H,T) 21 | beta[:,T]=ones(H,1) 22 | for t=T:-1:2 23 | beta[:,t-1]=phghm'*(beta[:,t].*pvgh[v[t],:]') 24 | end 25 | return log(beta) 26 | end 27 | 28 | logpvgh=LogArray(log(pvgh)) 29 | logphghm=LogArray(log(phghm)) 30 | logbetaoutput=zeros(H,T) 31 | logbeta = LogArray(zeros(H,1)) 32 | for t=2:T 33 | logbeta=logphghm.'*(logbeta.*logpvgh[v[t],:].') 34 | logbetaoutput[:,t]=(logbeta).content 35 | end 36 | 37 | return logbetaoutput 38 | 39 | end 40 | 41 | -------------------------------------------------------------------------------- /Demos/demoSumProdStandardiseVariables.jl: -------------------------------------------------------------------------------- 1 | function demoSumProdStandardiseVariables() 2 | #DEMOSUMPROD Sum-Product algorithm test : 3 | # Variable order is arbitary 4 | r=randperm(10) 5 | variables=r[1:5] # In this code, the variables do not need to be numbered 1:end 6 | a, b, c, d, e=variables 7 | nstates=round(Int64,3*rand(1,5)+2) # random number of states for each variable 8 | 9 | pot=Array(PotArray,5) 10 | pot[1]=PotArray([a b],rand(nstates[memberinds([a b],variables)]...)) 11 | pot[2]=PotArray([b c d],rand(nstates[memberinds([b c d],variables)]...)); 12 | pot[3]=PotArray([c],rand(nstates[memberinds([c],variables)]...)); 13 | pot[4]=PotArray([e d],rand(nstates[memberinds([e d],variables)]...)); 14 | pot[5]=PotArray([d],rand(nstates[memberinds([d],variables)]...)); 15 | 16 | newpot,originalvariables=standardisevariables(pot) # translate the variables into 1:end form 17 | A = FactorGraph(newpot); 18 | newmarg, mess, normconst=sumprod(newpot,A) 19 | 20 | marg=returnvariables(newmarg,originalvariables) # translate back to orginal variables 21 | # check if this is correct: 22 | jointpot =prod(pot); V=length(pot); 23 | for i=variables 24 | mtable=[condpot(marg[whichpot(marg,i)]).content condpot(jointpot,i).content] 25 | println("marginal of variable $i:\n Factor Graph\t\t Raw summation"); 26 | println(mtable) 27 | end 28 | tmp=sum(prod(pot)) 29 | println("Normalisation constant:\nFactorGraph\t=$(normconst.content)\nRaw summation\t=$(tmp.content)") 30 | 31 | end 32 | -------------------------------------------------------------------------------- /src/GraphPlot/toposort.jl: -------------------------------------------------------------------------------- 1 | function toposort(adj) 2 | #function [seq] = toposort(adj) 3 | #% TOPOSORT A Topological ordering of nodes in a directed graph 4 | #% 5 | #% [SEQ] = TOPOSORT(ADJ) 6 | #% 7 | #% Inputs : 8 | #% ADJ : Adjacency Matrix. 9 | #% ADJ(i,j)==1 ==> there exists a directed edge 10 | #% from i to j 11 | #% 12 | #% Outputs : 13 | #% SEQ : A topological ordered sequence of nodes. 14 | #% empty matrix if graph contains cycles. 15 | #% 16 | #% Usage Example : 17 | #% N=5; 18 | #% [l,u] = lu(rand(N)); 19 | #% adj = ~diag(ones(1,N)) & u>0.5; 20 | #% seq = toposort(adj); 21 | #% 22 | #% 23 | #% Note : 24 | #% See also 25 | ## 26 | #% Uses : 27 | # 28 | #% Change History : 29 | #% Date Time Prog Note 30 | #% 18-May-1998 4:44 PM ATC Created under MATLAB 5.1.0.421 31 | # 32 | #% ATC = Ali Taylan Cemgil, 33 | #% SNN - University of Nijmegen, Department of Medical Physics and Biophysics 34 | #% e-mail : cemgil@mbfys.kun.nl 35 | 36 | N = size(adj,1) 37 | indeg = sum(adj,1) 38 | outdeg = sum(adj,2) 39 | seq = [] 40 | 41 | for i=1:N, 42 | # Find nodes with indegree 0 43 | idx = find(indeg.==0) 44 | # If can't find than graph contains a cycle 45 | if isempty(idx) 46 | seq = []; 47 | break 48 | end 49 | # Remove the node with the max number of connections 50 | idx2 = indmax(outdeg[idx]) 51 | indx = idx[idx2] 52 | seq = vcat(seq, indx) 53 | indeg[indx]=-1 54 | idx = find(adj[indx,:]) 55 | indeg[idx] = indeg[idx]-1 56 | end 57 | return seq 58 | end 59 | 60 | 61 | 62 | 63 | -------------------------------------------------------------------------------- /src/PotentialInference/Potentials/PotConst.jl: -------------------------------------------------------------------------------- 1 | function *(A::Const,B::PotLogArray) 2 | L=PotLogArray(B.variables,log(A.content)+B.content) 3 | return L 4 | end 5 | 6 | # Not sure I understand this but Julia automatically exports *,+,-,/ 7 | #function *(A::Const,B::Const) 8 | # C=Const(A.content*B.content) 9 | #end 10 | # must be a smarter way to do the following using macros:! 11 | function *(A::Const,B::Const) 12 | return Const(A.content*B.content) 13 | end 14 | function /(A::Const,B::Const) 15 | return Const(A.content/B.content) 16 | end 17 | function +(A::Const,B::Const) 18 | return Const(A.content+B.content) 19 | end 20 | function -(A::Const,B::Const) 21 | return Const(A.content-B.content) 22 | end 23 | function *(P::PotArray,C::Const) 24 | p = deepcopy(P); p.content=C.content*p.content 25 | return p 26 | end 27 | 28 | 29 | 30 | function *(C::Const,P::PotArray) 31 | p = deepcopy(P); p.content=C.content*p.content 32 | return p 33 | end 34 | function /(P::PotArray,C::Const) 35 | p = deepcopy(P); p.content=p.content/C.content 36 | return p 37 | end 38 | function +(P::PotArray,C::Const) 39 | p = deepcopy(P); p.content=C.content+p.content 40 | return p 41 | end 42 | function +(C::Const,P::PotArray) 43 | p = deepcopy(P); p.content=C.content+p.content 44 | return p 45 | end 46 | function -(P::PotArray,C::Const) 47 | p = deepcopy(P); p.content=C.content-p.content 48 | return p 49 | end 50 | function -(C::Const,P::PotArray) 51 | p = deepcopy(P); p.content=C.content-p.content 52 | return p 53 | end 54 | 55 | 56 | function convert(::Type{PotArray},c::Const) 57 | return PotArray([],c.content) 58 | end 59 | 60 | -------------------------------------------------------------------------------- /src/HMM/HMMsmooth.jl: -------------------------------------------------------------------------------- 1 | function HMMsmooth(v,phghm,ph1,pvgh,alpha=[],beta=[];ReturnPairwiseMarginals=false,ReturnLogLikelihood=false) 2 | #HMMSMOOTH Smoothing for a Hidden Markov Model (HMM) 3 | # [phtgV1T,phthtpgV1T]=HMMsmooth(v,phghm,ph1,pvgh) 4 | # Return the HMM pointwise p(h(t)|v(1:T)) and pairwise posterior p(h(t),h(t+1)|#v(1:T)). 5 | # 6 | # Inputs: 7 | # v : visible (observation) sequence 8 | # phghm : transition distribution 9 | # ph1 : initial distribution 10 | # pvgh : emission distribution p(v|h) 11 | # 12 | # Outputs: 13 | # phtgV1T : smoothed posterior p(h(t)|v(1:T)) 14 | # phthtpgV1T : smoothed pair p(h(t),h(t+1)|v(1:T)) 15 | T=length(v); H=size(phghm,1); 16 | 17 | if isempty(alpha) 18 | alpha,loglik=HMMforward(v,phghm,ph1,pvgh) 19 | end 20 | if isempty(beta) 21 | beta=HMMbackward(v,phghm,pvgh) 22 | end 23 | 24 | # smoothed posteriors: pointwise marginals: 25 | phtgV1T=zeros(H,T) 26 | for t=1:T 27 | tmp=alpha[:,t].*beta[:,t] 28 | phtgV1T[:,t]=tmp./sum(tmp) 29 | end 30 | 31 | if ReturnPairwiseMarginals 32 | # smoothed posteriors: pairwise marginals p(h(t),h(t+1)|v(1:T)): 33 | phthtpgV1T=zeros(H,H,T-1) 34 | for t=1:T-1 35 | atmp=alpha[:,t] 36 | btmp=beta[:,t+1] 37 | #ctmp = repmat(atmp,1,H).*phghm'.*repmat(pvgh[v[t+1],:].*btmp',H,1) 38 | ctmp = repmat(atmp,1,H).*phghm'.*repmat(pvgh[v[t+1],:]'.*btmp',H,1) 39 | phthtpgV1T[:,:,t]=ctmp./sum(ctmp) 40 | end 41 | if ReturnLogLikelihood 42 | return phtgV1T, phthtpgV1T, loglik 43 | end 44 | return phtgV1T, phthtpgV1T 45 | end 46 | 47 | if ReturnLogLikelihood 48 | return phtgV1T,loglik 49 | end 50 | return phtgV1T 51 | end 52 | -------------------------------------------------------------------------------- /Demos/demoBurglarSimple.jl: -------------------------------------------------------------------------------- 1 | function demoBurglarSimple() 2 | # This demo is the same as demoBurglar.jl, except that here we do not make use of the available potential indexing methods and also a potential dictionary (as in demoBurglarDictPot). 3 | 4 | burglar,earthquake,alarm,radio=1,2,3,4 # variables must start from 1 and be contiguous 5 | yes, no= 1,2 # define states, starting from 1. These do need to be contiguous, starting from 1, since currently the probability tables are defined by arrays of contiguous states. 6 | 7 | # This defines the meaning of the variables and their states. 8 | # It is not necessary for the computation, but is useful for 9 | # printing the results in a meaningful form: 10 | DictVariable=Dict{Integer,DiscreteVariable}() 11 | DictVariable[burglar]=DiscreteVariable("burglar",["yes","no"]) 12 | DictVariable[earthquake]=DiscreteVariable("earthquake",["yes","no"]) 13 | DictVariable[alarm]=DiscreteVariable("alarm",["yes","no"]) 14 | DictVariable[radio]=DiscreteVariable("radio",["yes","no"]) 15 | 16 | 17 | # Define the discrete probability tables: 18 | PotBurglar=PotArray(burglar,[0.01 0.99]) 19 | PotEarthquake=PotArray(earthquake,[0.000001 1-0.000001]) 20 | 21 | table=zeros(2,2) 22 | table[yes,yes]=1 23 | table[no,yes]=0 24 | table[yes,no]=0 25 | table[no,no]=1 26 | PotRadio=PotArray([radio earthquake],table) 27 | 28 | table=zeros(2,2,2) 29 | table[yes,yes,yes]=0.9999 30 | table[yes,yes,no]=0.99 31 | table[yes,no,yes]=0.99 32 | table[yes,no,no]=0.0001 33 | table[no,:,:]=1-table[yes,:,:] 34 | PotAlarm=PotArray([alarm burglar earthquake],table) 35 | 36 | jointpot=prod([PotBurglar PotRadio PotAlarm PotEarthquake]) 37 | 38 | println("p(burglar|alarm=yes):") 39 | show(condpot(setpot(jointpot,alarm,yes),burglar),DictVariable) 40 | 41 | println("p(burglar|alarm=yes,radio=yes):") 42 | show(condpot(setpot(jointpot,[alarm radio],[yes yes]),burglar),DictVariable) 43 | 44 | end 45 | -------------------------------------------------------------------------------- /src/PotentialInference/Potentials/ArrayPotential.jl: -------------------------------------------------------------------------------- 1 | 2 | import Base.max 3 | function max(pot::ArrayPotential,variables;MaxOver=true,ReturnState=false,Ind2Sub=false) 4 | #MAX Maximise a multi-dimensional array over a set of dimenions 5 | # maxval=max(x,variables) 6 | # find the values and states that maximize the multidimensional array x 7 | # over the dimensions in maxover 8 | # 9 | if !MaxOver 10 | variables = setdiff(pot.variables,variables) 11 | end 12 | 13 | newvars=setdiff(pot.variables,variables) 14 | max_variables=memberinds(variables,pot.variables) 15 | 16 | maxval,maxind=findmax(pot.content,max_variables) ## PROBABLY SHOULD CHANGE TO THE MAX FUNCTION -- NEED TO ENSURE THIS CAN WORK FOR ANY POTENTIAL 17 | 18 | #maxval=squeeze(maxval,find(memberinds(size(maxval),1))) 19 | maxval=squeeze(maxval,tuple(find(memberinds(size(maxval),1))...)) 20 | 21 | outpot=deepcopy(pot) 22 | outpot.variables=newvars; outpot.content=maxval 23 | 24 | if !ReturnState 25 | return outpot 26 | end 27 | 28 | if !Ind2Sub 29 | return outpot, maxind 30 | end 31 | 32 | s=ind2sub(size(pot.content),maxind[:]) # return in more readable form 33 | maxstate=zeros(Int64,length(s[1]),length(s)) 34 | for i=1:length(pot.variables) 35 | maxstate[:,i]=s[i] 36 | end 37 | 38 | return outpot, maxstate 39 | 40 | 41 | 42 | end 43 | 44 | ################################################################## 45 | # PotArray type functions: 46 | 47 | export numstates 48 | function numstates{T<:ArrayPotential}(A::T) 49 | 50 | if isavector(A.content) 51 | n=prod(size(A.content)); 52 | else 53 | n=size(A.content); 54 | end 55 | return n 56 | end 57 | 58 | 59 | 60 | export standardise 61 | function standardise{T<:ArrayPotential}(A::T) 62 | # If the ArrayPotential is a vector, make this a column vector, otherwise leave unchanged 63 | if isavector(A.content) 64 | A.content=vec(A.content) 65 | end 66 | return A 67 | end 68 | 69 | -------------------------------------------------------------------------------- /Demos/demoBurglarDictPot.jl: -------------------------------------------------------------------------------- 1 | function demoBurglarDictPot() 2 | # This demo is the same as demoBurglar.jl, except that here we use the ability to define non-contiguously indexed potentials and variables by demonstrating the use of the Dictoinary of PotArray technique. 3 | 4 | burglar,earthquake,alarm,radio=10,2,30,5 # Variable order and indexing is arbitary (thianks to using DictPot and doesn't need to be contiguous 5 | yes, no= 1,2 # define states, starting from 1. These do need to be contiguous, starting from 1, since currently the probability tables are defined by arrays of contiguous states. 6 | 7 | # This defines the meaning of the variables and their states. 8 | # It is not necessary for the computation, but is useful for 9 | # printing the results in a meaningful form: 10 | DictVariable=Dict{Int,DiscreteVariable}() 11 | DictVariable[burglar]=DiscreteVariable("burglar",["yes","no"]) 12 | DictVariable[earthquake]=DiscreteVariable("earthquake",["yes","no"]) 13 | DictVariable[alarm]=DiscreteVariable("alarm",["yes","no"]) 14 | DictVariable[radio]=DiscreteVariable("radio",["yes","no"]) 15 | 16 | # Define the discrete probability tables: 17 | DictPot=Dict{Integer,PotArray}() 18 | DictPot[burglar]=PotArray(burglar,[0.01 0.99]) 19 | DictPot[earthquake]=PotArray(earthquake,[0.000001 1-0.000001]) 20 | 21 | table=zeros(2,2) 22 | table[yes,yes]=1 23 | table[no,yes]=0 24 | table[yes,no]=0 25 | table[no,no]=1 26 | DictPot[radio]=PotArray([radio earthquake],table) 27 | 28 | table=zeros(2,2,2) 29 | table[yes,yes,yes]=0.9999 30 | table[yes,yes,no]=0.99 31 | table[yes,no,yes]=0.99 32 | table[yes,no,no]=0.0001 33 | table[no,:,:]=1-table[yes,:,:] 34 | DictPot[alarm]=PotArray([alarm burglar earthquake],table) 35 | 36 | jointpot=prod(DictPot) 37 | 38 | println("p(burglar|alarm=yes):") 39 | show(condpot(setpot(jointpot,alarm,yes),burglar),DictVariable) 40 | 41 | println("p(burglar|alarm=yes,radio=yes):") 42 | show(condpot(setpot(jointpot,[alarm radio],[yes yes]),burglar),DictVariable) 43 | 44 | end 45 | -------------------------------------------------------------------------------- /src/HMM/HMMforward_log.jl: -------------------------------------------------------------------------------- 1 | function HMMforward(v,phghm,ph1,pvgh;UseLogArray=true) 2 | #function [logalpha,loglik]=HMMforward(v,phghm,ph1,pvgh) 3 | #%HMMFORWARD HMM Forward Pass 4 | #% [alpha,loglik]=HMMforward(v,phghm,ph1,pvgh) 5 | #% 6 | #% Inputs: 7 | #% v : visible (observation) sequence being a vector v=[2 1 3 3 1 ...] 8 | #% phghm : homogeneous transition distribution phghm(i,j)=p(h(t)=i|h(t-1)=j) 9 | #% ph1 : initial distribution 10 | #% pvgh : homogeneous emission distribution pvgh(i,j)=p(v(t)=i|h(t)=j) 11 | #% 12 | #% Outputs: 13 | #% logalpha : log alpha messages: log p(h(t),v(1:t)) 14 | #% loglik : sequence log likelihood log p(v(1:T)) 15 | #% See also HMMbackward.m, HMMviterbi.m, HMMsmooth.m, demoHMMinference.m 16 | 17 | T=length(v); H=length(ph1); 18 | 19 | if !UseLogArray # alpha recursion (not recommended due to numerical underflow) 20 | z=zeros(1,T) 21 | alpha=zeros(H,T) 22 | alpha[:,1] = pvgh[v[1],:]'.*ph1 23 | z[1]=sum(alpha[:,1]) 24 | alpha[:,1]=condp(alpha[:,1]) 25 | for t=2:T 26 | alpha[:,t]=pvgh[v[t],:]'.*(phghm*alpha[:,t-1]) 27 | z[t]=sum(alpha[:,t]) 28 | alpha[:,t]=condp(alpha[:,t]) 29 | end 30 | loglik = sum(log(z)) # log likelihood 31 | end 32 | 33 | if UseLogArray 34 | logpvgh=LogArray(log(pvgh)) 35 | logphghm=LogArray(log(phghm)) 36 | logph1=LogArray(log(ph1)) 37 | logalpha=Dict{Integer,LogArray}() 38 | logalphaoutput=zeros(H,T) 39 | logalpha[1] = logpvgh[v[1],:].'.*logph1 40 | logalphaoutput[:,1]=(logalpha[1]).content 41 | for t=2:T 42 | logalpha[t]=logpvgh[v[t],:].'.*(logphghm*logalpha[t-1]) 43 | #if any(isnan(logalpha[t].content)) 44 | # return t,v,logpvgh,logphghm,logalpha 45 | #end 46 | 47 | logalphaoutput[:,t]=(logalpha[t]).content 48 | end 49 | loglik = (sum(logalpha[T])).content # log likelihood 50 | end 51 | 52 | return logalphaoutput, loglik 53 | 54 | end 55 | 56 | -------------------------------------------------------------------------------- /src/LogArrayFunctions.jl: -------------------------------------------------------------------------------- 1 | module LogArrayFunctions 2 | 3 | export LogArray 4 | type LogArray <: AbstractArray 5 | content 6 | function LogArray(content) 7 | new(content) 8 | end 9 | end 10 | 11 | import Base.display 12 | function display(L::LogArray) 13 | return display(L.content) 14 | end 15 | 16 | import Base.size 17 | function size(A::LogArray,I::Int64) 18 | return size(A.content,I) 19 | end 20 | 21 | function size(A::LogArray) 22 | return size(A.content) 23 | end 24 | 25 | import Base.permutedims 26 | function permutedims(A::LogArray,I::Array{Int64,1}) 27 | return permutedims(A.content,I) 28 | end 29 | 30 | function *(A::LogArray,B::LogArray) 31 | logprefactor=maximum([maximum(A.content[:]) maximum(B.content[:])]); 32 | AA=exp(A.content-logprefactor) 33 | BB=exp(B.content-logprefactor) 34 | L=LogArray(1); 35 | L.content=2*logprefactor+log(AA*BB) 36 | return L 37 | end 38 | 39 | function +(A::LogArray,B::LogArray) 40 | logprefactor=maximum([maximum(A.content[:]) maximum(B.content[:])]); 41 | AA=exp(A.content-logprefactor) 42 | BB=exp(B.content-logprefactor) 43 | L=LogArray(1); 44 | L.content=logprefactor+log(AA+BB) 45 | return L 46 | end 47 | 48 | import Base.sum 49 | function sum(A::LogArray,I...) 50 | logprefactor=maximum(A.content[:]) 51 | AA=exp(A.content-logprefactor) 52 | L=LogArray(1) 53 | L.content=logprefactor+log(sum(AA,I...)) 54 | return L 55 | end 56 | 57 | 58 | 59 | import Base.ndims 60 | function ndims(A::LogArray) 61 | return ndims(A.content) 62 | end 63 | 64 | import Base.getindex 65 | function getindex(A::LogArray, I...) 66 | tmp=getindex(A.content,I...) 67 | return LogArray(tmp) 68 | end 69 | 70 | 71 | import Base.transpose 72 | function transpose(A::LogArray) 73 | return LogArray((A.content)') 74 | end 75 | 76 | function .*(A::LogArray,B::LogArray) 77 | logprefactor=maximum([maximum(A.content[:]) maximum(B.content[:])]); 78 | AA=exp(A.content-logprefactor) 79 | BB=exp(B.content-logprefactor) 80 | L=LogArray(1); 81 | L.content=2*logprefactor+log(AA.*BB) 82 | return L 83 | end 84 | 85 | end #end module 86 | -------------------------------------------------------------------------------- /Demos/demoHMMinferenceSimple.jl: -------------------------------------------------------------------------------- 1 | function demoHMMinferenceSimple() 2 | 3 | V = 5 # number of visible states 4 | H = 10 # number of hidden states 5 | T = 20 # number of timesteps 6 | 7 | h = zeros(Integer,1,T) # holds the state value for the hidden variable at a specific timestep 8 | v = zeros(Integer,1,T) # holds the state value for the visible variable at a specific timestep 9 | 10 | ph1=condp(rand(H,1)) # probabilities for the states of the hidden variable at timestep 1 11 | 12 | phghm=condp(eye(H,H)) # transition matrix with sum(phghm, 1) = 1 with phghm[i, j] = p(hg=i | hm=j) 13 | 14 | # shuffle the column data in phghm while keeping sum(phghm, 1) = 1 15 | # below no 2 columns have value 1.0 on the same row 16 | phghmtmp=condp(eye(H,H)) 17 | r=randperm(H) 18 | for i=1:H 19 | phghm[:,i]=phghmtmp[:,r[i]] 20 | end 21 | 22 | pvgh=condp((rand(V,H))) # emision matrix with sum(pvgh, 1) = 1 with pvgh[i, j] = p(vg = i | h = j) 23 | 24 | h[1]=randgen(ph1) # initialize the hidden variable h(t=1) with a random state based on ph1 distribution 25 | v[1]=randgen(pvgh[:,h[1]]) # initialize the visible variable v(t=1) with a random state based on pvgh(vg | h) 26 | 27 | for t=2:T 28 | h[t]=randgen(phghm[:,h[t-1]]) # set the hidden variable state h(t) based on h(t-1) using the transition matrix 29 | v[t]=randgen(pvgh[:,h[t]]) # set the visible variable state v(t) based on h(t) using the emission matrix 30 | end 31 | 32 | alpha,loglik=HMMforward(v,phghm,ph1,pvgh) # filtering 33 | 34 | gamma = HMMsmooth(v,phghm,ph1,pvgh,alpha) # smoothing 35 | 36 | maxstate,logprob = HMMviterbi(v,phghm,ph1,pvgh) 37 | 38 | println("Inference log likelihood = $loglik\n") 39 | println("most likely path (viterbi):\n") 40 | println(maxstate) 41 | println("original path (hidden states):") 42 | println(h) 43 | println("original path (visible states):") 44 | println(v) 45 | 46 | PyPlot.figure() 47 | ax = PyPlot.subplot(2, 1, 1) 48 | ax[:set_title]("filtering") 49 | PyPlot.pcolor(alpha) 50 | ax = PyPlot.subplot(2, 1, 2) 51 | ax[:set_title]("smoothing") 52 | PyPlot.pcolor(gamma) 53 | 54 | end 55 | -------------------------------------------------------------------------------- /Demos/demoBurglar.jl: -------------------------------------------------------------------------------- 1 | function demoBurglar() 2 | 3 | burglar,earthquake,alarm,radio=1,2,3,4 # Variable order is arbitary 4 | yes,no= 1,2 # define states, starting from 1 5 | 6 | # This defines the meaning of the variables and their states. 7 | # It is not necessary for the computation, but is useful for 8 | # printing the results in a meaningful form: 9 | DictVariable=Dict{Integer,DiscreteVariable}() 10 | DictVariable[burglar]=DiscreteVariable("burglar",["yes","no"]) 11 | DictVariable[earthquake]=DiscreteVariable("earthquake",["yes","no"]) 12 | DictVariable[alarm]=DiscreteVariable("alarm",["yes","no"]) 13 | DictVariable[radio]=DiscreteVariable("radio",["yes","no"]) 14 | 15 | # Define the discrete probability tables: 16 | pot=Array(PotArray,4) # there are 4 discrete potentials (arrays) 17 | pot[burglar]=PotArray(burglar,[0.01 0.99]) 18 | pot[earthquake]=PotArray(earthquake,[0.000001 1-0.000001]) 19 | 20 | table=zeros(2,2) 21 | table[yes,yes]=1 22 | table[no,yes]=0 23 | table[yes,no]=0 24 | table[no,no]=1 25 | pot[radio]=PotArray([radio earthquake],table) 26 | 27 | table=zeros(2,2,2) 28 | table[yes,yes,yes]=0.9999 29 | table[yes,yes,no]=0.99 30 | table[yes,no,yes]=0.99 31 | table[yes,no,no]=0.0001 32 | table[no,:,:]=1-table[yes,:,:] 33 | pot[alarm]=PotArray([alarm burglar earthquake],table) 34 | 35 | jointpot=prod(pot) 36 | 37 | println("p(burglar|alarm=yes):") 38 | show(condpot(setpot(jointpot,alarm,yes),burglar),DictVariable) 39 | 40 | println("p(burglar|alarm=yes,radio=yes):") 41 | show(condpot(setpot(jointpot,[alarm radio],[yes yes]),burglar),DictVariable) 42 | 43 | L=dag(pot) 44 | #try 45 | # This is just for plotting the DAG: 46 | DictPlot=Dict{Integer,VariablePlot}() 47 | DictPlot[burglar]=VariablePlot("Burglar",x=0,y=1,nodesize=0.3) 48 | DictPlot[earthquake]=VariablePlot("Earthquake",x=3,y=1,nodesize=0.35) 49 | DictPlot[alarm]=VariablePlot("Alarm",x=0,y=0,nodesize=0.2) 50 | DictPlot[radio]=VariablePlot("Radio",x=3,y=0,nodesize=0.2) 51 | 52 | PlotGraph(DictPlot,L) 53 | 54 | #PlotGraph(PlaceVertices(DictPlot,L,scale=5),L,arrowsize=0.2) # automatic node placement 55 | 56 | #end 57 | 58 | end 59 | -------------------------------------------------------------------------------- /src/PotentialInference/PotentialInference.jl: -------------------------------------------------------------------------------- 1 | module PotentialInference 2 | 3 | importall BrmlFunctions 4 | 5 | # Type definitions: 6 | 7 | export Potential, DiscretePotential, ArrayPotential 8 | abstract Potential 9 | abstract DiscretePotential <: Potential 10 | abstract ArrayPotential <: DiscretePotential 11 | 12 | 13 | # Type Unions: 14 | export DictOrArray 15 | DictOrArray=Union{Dict,Array} 16 | 17 | 18 | # Variable Types: 19 | export Variable 20 | abstract Variable 21 | 22 | export DiscreteVariable 23 | type DiscreteVariable <: Variable 24 | name 25 | state 26 | function DiscreteVariable(varname,varstates) 27 | new(varname,varstates) 28 | end 29 | end 30 | 31 | 32 | # Potential Types 33 | 34 | export PotArray 35 | type PotArray <: ArrayPotential 36 | variables 37 | content 38 | function PotArray(variables, content) 39 | content=standardise(content) 40 | if length(variables)!= length(mysize(content)) 41 | error("number of variables does not match the size of the potential") 42 | end 43 | new(vec(variables),content) 44 | end 45 | end 46 | 47 | export Const 48 | type Const <: Potential 49 | variables 50 | content 51 | function Const(content) 52 | if !isa(content,Number) 53 | error("Content must be a numerical scalar") 54 | end 55 | new([],content) 56 | end 57 | end 58 | 59 | export PotLogArray 60 | type PotLogArray <: ArrayPotential 61 | variables 62 | content 63 | function PotLogArray(variables, content) 64 | content=standardise(content) 65 | if length(variables)!= length(mysize(content)) 66 | error("number of variables does not match the size of the potential") 67 | end 68 | new(vec(variables),content) 69 | end 70 | end 71 | 72 | try 73 | current_path = joinpath((pwd()), "src", "PotentialInference") 74 | PotentialsPath = joinpath(current_path, "Potentials") 75 | AlgorithmsPath = joinpath(current_path, "Algorithms") 76 | include(joinpath(PotentialsPath, "ArrayPotential.jl")) 77 | include(joinpath(PotentialsPath, "Potential.jl")) 78 | include(joinpath(PotentialsPath, "PotArray.jl")) 79 | include(joinpath(PotentialsPath, "PotConst.jl")) 80 | include(joinpath(PotentialsPath, "PotLogArray.jl")) 81 | include(joinpath(AlgorithmsPath, "FactorGraph.jl")) 82 | end 83 | 84 | end # module 85 | -------------------------------------------------------------------------------- /Demos/demoHMMinferenceFG.jl: -------------------------------------------------------------------------------- 1 | function demoHMMinferenceFG() 2 | 3 | H = 6 # number of Hidden states 4 | V = 2 # number of Visible states 5 | T = 5 # length of the time-series 6 | # setup the HMM 7 | phghm = rand(H,H); phghm = phghm./repmat(sum(phghm,1),H,1);# transition distribution p(h(t)|h(t-1)) 8 | pvgh = rand(V,H); pvgh = pvgh./repmat(sum(pvgh,1),V,1);# emission distribution p(v(t)|h(t)) 9 | ph1 = rand(H,1); ph1=ph1./sum(ph1); # initial p(h) 10 | # generate some fake data 11 | h=Array(Int64,1,T) 12 | v=Array(Int64,1,T) 13 | h[1] = randgen(ph1); v[1]=randgen(pvgh[:,h[1]]); 14 | for t=2:T 15 | h[t]=randgen(phghm[:,h[t-1]]); v[t]=randgen(pvgh[:,h[t]]); 16 | end 17 | # Perform Inference tasks: 18 | #[logalpha,loglik]=HMMforward(v,phghm,ph1,pvgh); % forward 19 | #logbeta=HMMbackward(v,phghm,pvgh); % backward 20 | #% smoothed posteriors: 21 | #[phtgV1T,phthtpgV1T]=HMMsmooth(logalpha,logbeta,pvgh,phghm,v); 22 | #gamma=HMMgamma(logalpha,phghm); % alternative alpha-gamma (RTS) method 23 | #[viterbimaxstate logprob]=HMMviterbi(v,phghm,ph1,pvgh); % most likely joint state 24 | 25 | # Factor graph approach: 26 | ht=1:T; vt=T+1:2*T; # assign numbers to variables 27 | pot=Array(PotArray,2*T) 28 | pot[ht[1]]=PotArray(ht[1],ph1); 29 | pot[vt[1]]=PotArray([vt[1] ht[1]], pvgh); 30 | for t=2:T 31 | pot[vt[t]]=PotArray([vt[t] ht[t]],pvgh); 32 | pot[ht[t]]=PotArray([ht[t] ht[t-1]],phghm); 33 | end 34 | 35 | newpot=Array(PotArray,T) 36 | for t=1:T; newpot[t]=multpots([setpot(pot[vt[t]],vt[t],v[t]) pot[ht[t]]]); end 37 | 38 | AFG = FactorGraph(newpot) 39 | marg, mess=sumprod(newpot,AFG) 40 | # likelihood 41 | dum1, fact2var, dum2=FactorConnectingVariable(ht[1],AFG); # can choose any of the variable nodes 42 | tmpmess = multpots(mess[fact2var]) 43 | FGloglik = log(sum(tmpmess.content)) 44 | 45 | # check: 46 | println("\nLog likelihood computed by SumProd Algorithm and Raw Summation:") 47 | println([FGloglik log((sum(prod(newpot))).content)]) 48 | 49 | println("\nSmoothed Posterior marginal inference (smoothing) computed by SumProd verus Raw Summation:") 50 | 51 | for ht=1:T 52 | println("\nVariable $ht:") 53 | rawmarg=condpot(prod(newpot),ht) 54 | sumprodmarg=condpot(marg[ht],ht) 55 | println([sumprodmarg.content rawmarg.content]) 56 | end 57 | 58 | end 59 | -------------------------------------------------------------------------------- /src/HMM/HMMem.jl: -------------------------------------------------------------------------------- 1 | # @doc """ 2 | # EM algorithm for HMM 3 | # phghm, ph1, pvgh, loglikelihood = HMMem(v, H, V; MaxIterations=10, PlotProgress=false) 4 | # 5 | # ##### Inputs: 6 | # * `v`: cell array containing sequences, so v{3}(5) is the 3rd sequence, 5th timestep 7 | # * `H`: number of hidden states 8 | # * `V`: number of visible (observation) states 9 | # 10 | # ##### Outputs: 11 | # * `phghm`: learned transition p(h(t)|h(t-1)) 12 | # * `ph1`: learned initial distribution p(h(1)) 13 | # * `pvgh`: learned emission p(v|h) 14 | # * `loglikelihood`: log likelihood of the sequences 15 | # """ -> 16 | function HMMem(v::Array{Any,1}, H, V; MaxIterations=10, PlotProgress=false) 17 | N = length(v) # number of sequences 18 | 19 | # random initialisation: 20 | phghm = condp(rand(H,H)) # transition distribution p(h(t)|h(t-1)) 21 | pvgh = condp(rand(V,H)) # emission distribution p(v(t)|h(t)) 22 | ph1 = condp(rand(H,1)) # initial p(h(1)) 23 | llik = zeros(MaxIterations, N) 24 | loglik = zeros(N) 25 | 26 | if PlotProgress 27 | PyPlot.figure() 28 | ax=PyPlot.subplot(1,1,1) 29 | end 30 | 31 | for emloop = 1:MaxIterations 32 | A = zeros(H, H) 33 | a = zeros(H, 1) 34 | B = zeros(V, H) 35 | for n = 1:N 36 | 37 | # Perform Inference tasks (E step): 38 | alpha, loglik = HMMforward(v[n], phghm, ph1, pvgh) 39 | llik[emloop, :] = llik[emloop, :] + loglik 40 | beta = HMMbackward(v[n], phghm, pvgh) 41 | phtgV1T, phthtpgV1T = HMMsmooth(v[n], phghm, ph1, pvgh, alpha, beta, ReturnPairwiseMarginals=true) 42 | 43 | # Collect the statistics for the M-step: 44 | A = A + sum(phthtpgV1T, 3)[:, :, 1] 45 | a = a + phtgV1T[:, 1] 46 | for t = 1:length(v[n]) 47 | i = v[n][t] 48 | #B[i, :] = B[i, :] + phtgV1T[:, t]' 49 | B[i, :] = B[i, :] + phtgV1T[:, t] 50 | end 51 | end 52 | # Perform M-step 53 | ph1 = condp(a) 54 | phghm = condp(A') 55 | pvgh = condp(B) 56 | end 57 | totalLogLik=sum(llik,2) 58 | if PlotProgress 59 | PyPlot.plot(totalLogLik); PyPlot.suptitle("log likelihood") 60 | println(minimum(totalLogLik)) 61 | ax[:set_ylim](totalLogLik[2],totalLogLik[end]) 62 | end 63 | loglikelihood = loglik[end] 64 | return phghm, ph1, pvgh, loglikelihood 65 | end 66 | -------------------------------------------------------------------------------- /src/PotentialInference/Potentials/PotLogArray.jl: -------------------------------------------------------------------------------- 1 | function *(A::PotLogArray,B::PotLogArray) 2 | logprefactor=maximum([maximum(A.content[:]) maximum(B.content[:])]); 3 | AA=PotArray(A.variables,exp(A.content-logprefactor)) 4 | BB=PotArray(B.variables,exp(B.content-logprefactor)) 5 | AB=AA*BB 6 | L=PotLogArray(AB.variables,2*logprefactor+log(AB.content)) 7 | return L 8 | end 9 | 10 | function /(A::PotLogArray,B::PotLogArray) 11 | BB=deepcopy(B) 12 | BB.content=-BB.content 13 | return A*BB 14 | end 15 | 16 | 17 | 18 | function +(A::PotLogArray,B::PotLogArray) 19 | logprefactor=maximum([maximum(A.content[:]) maximum(B.content[:])]); 20 | AA=PotArray(A.variables,exp(A.content-logprefactor)) 21 | BB=PotArray(B.variables,exp(B.content-logprefactor)) 22 | AB=AA+BB 23 | L=PotLogArray(AB.variables,logprefactor+log(AB.content)) 24 | return L 25 | end 26 | 27 | function -(A::PotLogArray,B::PotLogArray) 28 | logprefactor=maximum([maximum(A.content[:]) maximum(B.content[:])]); 29 | AA=PotArray(A.variables,exp(A.content-logprefactor)) 30 | BB=PotArray(B.variables,exp(B.content-logprefactor)) 31 | AB=AA-BB 32 | L=PotLogArray(AB.variables,logprefactor+log(AB.content)) 33 | return L 34 | end 35 | 36 | 37 | function *(A::PotArray,B::PotLogArray) 38 | logprefactor=maximum([maximum(log(A.content[:])) maximum(B.content[:])]); 39 | AA=PotArray(A.variables,exp(log(A.content)-logprefactor)) 40 | BB=PotArray(B.variables,exp(B.content-logprefactor)) 41 | AB=AA*BB 42 | L=PotLogArray(AB.variables,2*logprefactor+log(AB.content)) 43 | return L 44 | end 45 | 46 | 47 | function *(A::PotLogArray,B::Const) 48 | return B*A 49 | end 50 | 51 | function *(A::PotLogArray,B::PotArray) 52 | return B*A 53 | end 54 | 55 | 56 | 57 | function sum(A::PotLogArray,variables=A.variables;SumOver::Any=true) 58 | # Sum a PotLogArray over specified variables 59 | # eg sum(pot,[1 2]) 60 | # sum(pot,[1 2],false) sums over all variables in pot except for [1 2] 61 | 62 | A=standardise(A) # makes A a vector potential if A is 1 D array 63 | 64 | logprefactor=maximum(A.content[:]); 65 | pA=PotArray(A.variables,exp(A.content-logprefactor)) 66 | 67 | if !SumOver 68 | variables = setdiff(A.variables,variables); 69 | end 70 | 71 | newvars=setdiff(A.variables,variables) 72 | table_variables=memberinds(variables,A.variables) 73 | tmp=sum(pA.content,table_variables) 74 | #tmp=squeeze(tmp,find(memberinds(size(tmp),1))) 75 | tmp=squeeze(tmp,tuple(find(memberinds(size(tmp),1))...)) 76 | 77 | return PotLogArray(newvars,log(tmp)+logprefactor) 78 | end 79 | 80 | 81 | -------------------------------------------------------------------------------- /Demos/demoHMMlearn.jl: -------------------------------------------------------------------------------- 1 | function demoHMMlearn() 2 | V = 2 # number of visible states 3 | H = 3 # number of hidden states 4 | N = 1000 # number of sequences to analyze 5 | 6 | ph1_true = condp(rand(H, 1)) # probabilities for the states of the hidden variable at timestep 1 7 | 8 | phghm_true = condp(rand(H, H)) # transition matrix with sum(phghm, 1) = 1 with phghm[i, j] = p(hg=i | hm=j), hg@t, hm@t-1 9 | pvgh_true = condp((rand(V, H))) # emision matrix with sum(pvgh, 1) = 1 with pvgh[i, j] = p(vg = i | h = j), vg@t h@t 10 | 11 | # generate training data 12 | #h = cell(N) # array with several hidden states sequences of various lengths 13 | #v = cell(N) # array with corresponding visible states sequences; type: Array{Any,1} 14 | h = Array{Any}(N) # array with several hidden states sequences of various lengths 15 | v = Array{Any}(N) # array with corresponding visible states sequences; type: Array{Any,1} 16 | T = zeros(Int, N) # vector with number of timesteps per sequence 17 | 18 | # generate N sequence pairs of hidden and visible states 19 | for n = 1:N 20 | T[n] = 20 + ceil(10 * rand()) # length in timesteps of the current sequence 21 | # initialize the current sequences 22 | h[n] = zeros(Int, T[n]) # type: Array{Int64,1} 23 | v[n] = zeros(Int, T[n]) 24 | 25 | # populate the current sequences with random states based on transition and emission probabilities 26 | h[n][1] = randgen(ph1_true) 27 | v[n][1] = randgen(pvgh_true[:, h[n][1]]) 28 | for t = 2:T[n] 29 | h[n][t] = randgen(phghm_true[:, h[n][t - 1]]) 30 | v[n][t] = randgen(pvgh_true[:, h[n][t]]) 31 | end 32 | end 33 | 34 | # EM algorithm (see if we can recover the true HMM parameters): 35 | phghm, ph1, pvgh, loglik = HMMem(v, H, V, MaxIterations=50, PlotProgress=true) 36 | println(loglik) 37 | 38 | # visualise the results 39 | # get sorting indices for initial probabilities 40 | hord_true = sortperm(ph1_true[:,1]) 41 | hord = sortperm(ph1[:,1]) 42 | 43 | PyPlot.figure() 44 | ax = PyPlot.subplot(1, 2, 1) 45 | ax[:set_title]("learned initial probabilities") 46 | PyPlot.bar([1,2,3], sort(ph1[hord]), color=[0,1,1]) 47 | ax = PyPlot.subplot(1, 2, 2) 48 | ax[:set_title]("true initial probabilities") 49 | PyPlot.bar([1,2,3], sort(ph1_true[hord_true]), color=[0,1,0]) 50 | 51 | PyPlot.figure() 52 | ax = PyPlot.subplot(1, 2, 1) 53 | ax[:set_title]("learned transition") 54 | #PyPlot.imshow(phghm[hord,hord]) 55 | PyPlot.pcolor(phghm[hord,hord]) 56 | ax = PyPlot.subplot(1, 2, 2) 57 | ax[:set_title]("true transition") 58 | #PyPlot.imshow(phghm_true[hord_true,hord_true]) 59 | PyPlot.pcolor(phghm_true[hord_true,hord_true]) 60 | 61 | PyPlot.figure() 62 | ax = PyPlot.subplot(1, 2, 1) 63 | ax[:set_title]("learned emission") 64 | #PyPlot.imshow(pvgh[:,hord]) 65 | PyPlot.pcolor(pvgh[:,hord]) 66 | ax = PyPlot.subplot(1, 2, 2) 67 | ax[:set_title]("true emission") 68 | #PyPlot.imshow(pvgh_true[:,hord_true]) 69 | PyPlot.pcolor(pvgh_true[:,hord_true]) 70 | end 71 | -------------------------------------------------------------------------------- /src/PotentialInference/Potentials/Potential.jl: -------------------------------------------------------------------------------- 1 | export condpot 2 | function condpot{T<:Potential}(p::T,x::IntOrIntArray=[],y::IntOrIntArray=[]) 3 | # compute p(x|y)=p(x,y)/p(y) 4 | if isempty(x) &&!isempty(y) 5 | error("first argument of condpot is empty and second argument is non-empty") 6 | end 7 | if isempty(x) && isempty(y) 8 | return p/sum(p) 9 | else 10 | if isempty(y) 11 | return(sum(p,x,SumOver=false)/sum(p)) 12 | else 13 | pxy=sum(p,[x y],SumOver=false) 14 | py=sum(pxy,y,SumOver=false) 15 | return pxy/py 16 | end 17 | end 18 | end 19 | 20 | 21 | import Base.sum 22 | function sum{T<:Potential,I<:Integer}(pots::Dict{I,T}) 23 | # Sum potentials into a single potential 24 | return sum(convert(Array{T},pots)) 25 | end 26 | export sum # is this needed? 27 | 28 | 29 | import Base.prod 30 | function prod{I<:Integer,P<:Potential}(D::Dict{I,P}) 31 | p=convert(Array{Potential},D) 32 | return prod(p) 33 | end 34 | 35 | function prod(P::Potential) 36 | return P 37 | end 38 | 39 | # converters : 40 | import Base.convert 41 | function convert{T<:Potential,DT<:Potential,I<:Integer}(::Type{Array{T}}, D::Dict{I,DT}) 42 | L=length(collect(keys(D))) 43 | pot=Array(Potential,L) 44 | ky=collect(keys(D)) 45 | for k=1:length(keys(D)) 46 | pot[k]=D[ky[k]] 47 | end 48 | return pot 49 | end 50 | export convert 51 | 52 | 53 | 54 | export multpots 55 | function multpots{P<:Potential}(pot::Array{P}) 56 | if length(pot)==1 57 | return pot[1] 58 | else 59 | ind=[] 60 | for i=1:length(pot) 61 | if isdefined(pot,i) # remove #undef entries (missing) 62 | ind=vcat(ind, i) 63 | end 64 | end 65 | if length(ind)==1 66 | return pot[ind] 67 | else 68 | return prod(pot[ind]) 69 | end 70 | end 71 | end 72 | 73 | export standardisevariables 74 | #function standardisevariables{DictOrArray{T<:Potential}}(inpot::T) 75 | #function standardisevariables{T<:Potential}(inpot::DictOrArray{T}) 76 | # FIXME! NEED TO FIGURE OUT HOW TO DISPATCH ON ONLY DICTORARRY(subtype of Potential) 77 | function standardisevariables(inpot) 78 | # This just relabels the first from starting from 1 79 | pot=DictToArray(deepcopy(inpot)) 80 | variables=potvariables(pot) 81 | for i=1:length(pot) 82 | pot[i].variables=memberinds(pot[i].variables,variables) 83 | end 84 | return pot, variables 85 | end 86 | 87 | export returnvariables 88 | #function returnvariables{DictOrArray{T<:Potential}}(inpot::T,originalvariables) 89 | #function returnvariables{T<:Potential}(inpot::DictOrArray{T},originalvariables) 90 | function returnvariables(inpot,originalvariables) 91 | pot=DictToArray(deepcopy(inpot)) 92 | variables=potvariables(pot) 93 | for i=1:length(pot) 94 | pot[i].variables=originalvariables[memberinds(pot[i].variables,variables)] 95 | end 96 | return pot 97 | end 98 | 99 | export potvariables 100 | #function [variables nstates con convec]=potvariables(inpot) 101 | function potvariables(inpot) 102 | # NEED TO EXTEND THIS TO RETURN ADDITIONAL INFO, LIKE THE MATLAB FUNCTION 103 | #POTVARIABLES Returns information about all variables in a set of potentials 104 | # [variables nstates con convec]=potvariables(pot) 105 | # 106 | # return the variables and their number of states 107 | # If there is a dimension mismatch in the tables then return con=0 108 | # convec(i)=0 reports that variable i has conflicting dimension 109 | 110 | pot=deepcopy(inpot) # don't want to change anything in caller 111 | pot=DictToArray(pot) 112 | 113 | if isempty(pot);return;end 114 | 115 | if !isa(pot,Array) 116 | tmp=deepcopy(pot) 117 | pot=Array(Potential,1) 118 | pot[1]=tmp 119 | end 120 | 121 | variables=[]; 122 | for p=1:length(pot) 123 | variables=union(variables,pot[p].variables) 124 | end 125 | 126 | return variables 127 | 128 | end 129 | 130 | export dag 131 | function dag(pot) 132 | #DAG Return the Dictionary list of edge for a Belief Network 133 | # Assumes that pot[i] contains the distribution p(i|pa(i)) 134 | 135 | L=Dict{Integer,Array{Integer}}() 136 | vars=[]; 137 | for p=1:length(pot) 138 | vars=vcat(vars, pot[p].variables) 139 | end 140 | N=maximum(vars) 141 | A=zeros(N,N); 142 | #for p=1:length(pot) 143 | # L[p]=setdiff(pot[p].variables,p) 144 | #end 145 | for p=1:length(pot) ## might be better to insist that pot is itself a Dict 146 | pa=setdiff(pot[p].variables,p) 147 | for i=1:length(pa) 148 | if haskey(L,pa[i]) 149 | L[pa[i]]=union(L[pa[i]],p) 150 | else 151 | L[pa[i]]=union([],p) 152 | end 153 | end 154 | end 155 | 156 | return L 157 | 158 | end 159 | 160 | 161 | 162 | export whichpot 163 | function whichpot(pot,variables,n=1) 164 | #%WHICHPOT Returns potential that contain a set of variables 165 | #% potnum = whichpot(pot,variables,) 166 | #% 167 | #% Return potential numbers that contain all the specified variables 168 | #% The cliques are returned with those containing the smallest number of 169 | #% variables first. 170 | #% If optional n is used, returns at most n potential number 171 | potnum=[]; nvars=[]; 172 | for p=1:length(pot) 173 | # find the potential that contains variables 174 | if prod(memberinds(variables,pot[p].variables))>0 175 | nvars=vcat(nvars,length(pot[p].variables)); 176 | potnum=vcat(potnum,p); 177 | end 178 | end 179 | val=sort(nvars) 180 | ind=memberinds(val,nvars) 181 | potnum=potnum[ind[1:n]] 182 | if n==1 183 | potnum=potnum[1] 184 | end 185 | 186 | return potnum 187 | end 188 | 189 | 190 | export numvariables 191 | function numvariables{T<:Potential}(A::T) 192 | length(A.variables) 193 | end 194 | 195 | 196 | 197 | 198 | -------------------------------------------------------------------------------- /Demos/demoHMMburglar.jl: -------------------------------------------------------------------------------- 1 | function demoHMMburglar() 2 | 3 | # setup a grid representation of the rooom 4 | Gx = 5 # two dimensional grid size 5 | Gy = 5 6 | H = Gx * Gy # number of states on grid 7 | 8 | # matrix representing the possible states of the system 9 | st = reshape(1:H, Gx, Gy) # assign each grid point a state 10 | 11 | # make a deterministic state transition matrix HxH on a 2D grid: 12 | phgh = zeros(H, H) # transition from state j to state i 13 | for x = 1:Gx 14 | for y = 1:Gy 15 | # from the current state-cell (j coord in state transition matrix) 16 | # to next state-cell on (i coord in state transition matrix): 17 | # the next row, same column 18 | if validgridposition(x + 1, y, Gx, Gy) # sample for x = 1, y = 2 19 | phgh[st[x + 1, y], st[x, y]] = 1 # 2,2=7 1,2=6 20 | end 21 | # the previous row, same column 22 | if validgridposition(x - 1, y, Gx, Gy) 23 | phgh[st[x - 1, y], st[x, y]] = 1 # 0,2 1,2 24 | end 25 | # the same row, next column 26 | if validgridposition(x, y + 1, Gx, Gy) 27 | phgh[st[x, y + 1], st[x, y]] = 1 # 1,3=11 1,2=6 28 | end 29 | # the same row, previous column 30 | if validgridposition(x, y - 1, Gx, Gy) 31 | phgh[st[x, y - 1], st[x, y]] = 1 # 1,1=1 1,2=6 32 | end 33 | end 34 | end 35 | # conditional distribution from state transition matrix 36 | phghm = condp(phgh) # matrix with sum(phghm, 1) = 1 with phghm[i, j] = p(hg=i | hm=j) 37 | ph1=condp(ones(H,1)) # initialise probabilities for the states of the hidden variable at timestep 1 38 | pvgh=zeros(4,H) # initialise emision matrix 39 | 40 | pv1gh = 0.01 * ones(1,H); r = randperm(H); pv1gh[r[1:10]] = 0.9; # Creaks in 10 randomly chosen cells 41 | pv2gh = 0.01 * ones(1,H); r = randperm(H); pv2gh[r[1:10]] = 0.9; # Bumps in 10 randomly chosen cells 42 | 43 | PyPlot.figure() 44 | axc = PyPlot.subplot(2, 1, 1) 45 | axc[:set_title]("creaks layout") 46 | PyPlot.imshow(reshape(pv1gh, Gx, Gy), cmap="bone"); 47 | axb = PyPlot.subplot(2, 1, 2) 48 | axb[:set_title]("bumps layout") 49 | PyPlot.imshow(reshape(pv2gh, Gx, Gy), cmap="bone"); 50 | map([axb, axc]) do axesfig 51 | PyPlot.setp(axesfig[:get_xticklines](), visible=false) 52 | PyPlot.setp(axesfig[:get_xticklabels](), visible=false) 53 | PyPlot.setp(axesfig[:get_yticklines](), visible=false) 54 | PyPlot.setp(axesfig[:get_yticklabels](), visible=false) 55 | end 56 | 57 | # Form the joint distribution p(v|h)=p(v1|h)p(v2|h) 58 | # v = (v1, v2) and v1 and v2 are independent given h 59 | vv = zeros(4, 2) 60 | for i = 1:4 61 | pvgh[1, :] = pv1gh .* pv2gh; vv[1, :] = [1 1]; # p(v1=1|h)*p(v2=1|h) 62 | pvgh[2, :] = pv1gh.*(1-pv2gh); vv[2, :] = [1 2]; # p(v1=1|h)*p(v2=1|h) 63 | pvgh[3, :] = (1-pv1gh).*pv2gh; vv[3, :] = [2 1]; # p(v1=1|h)*p(v2=1|h) 64 | pvgh[4, :] = (1-pv1gh).*(1-pv2gh); vv[4, :] = [2 2]; # p(v1=1|h)*p(v2=1|h) 65 | end 66 | 67 | # draw some random samples: 68 | T=10 69 | h = zeros(Integer, 1, T) # holds the state value for the hidden variable at a specific timestep 70 | v = zeros(Integer, 1, T) # holds the state value for the visible variable at a specific timestep 71 | 72 | h[1]=randgen(ph1) # initialize the hidden variable @t=1 with a random state based on ph1 distribution 73 | v[1]=randgen(pvgh[:, h[1]]) # initialize the visible variable @t=1 with a random state based on pvgh( vg | h@t=1) 74 | 75 | for t=2:T 76 | h[t]=randgen(phghm[:, h[t-1]]) # set the hidden variable state @t based on h@t-1 using the transition matrix 77 | v[t]=randgen(pvgh[:, h[t]]) # set the visible variable state @t based on h@t using the emission matrix 78 | end 79 | 80 | # Perform inference based on the observed v: 81 | (alpha, loglik) = HMMforward(v, phghm, ph1, pvgh) # filtering 82 | phtgV1t = alpha # filtered posterior - infer the current hidden state p(ht | v_1:t) 83 | 84 | phtgV1T = HMMgamma(alpha, phghm) # Smoothed Burglar distribution 85 | maxstate, logprob = HMMviterbi(v, phghm, ph1, pvgh) # Most likely Burglar path 86 | 87 | PyPlot.figure() 88 | for t = 1:T 89 | axg = PyPlot.subplot(5, T, t); PyPlot.imshow(repmat(vv[v[t], :], 2, 1), cmap="bone"); 90 | if t == 2 # used t == 1 or t == 2 for title alignment only 91 | axg[:set_title]("Creaks and Bumps") 92 | end 93 | # add Filtering data row of T images from the previous row offset 94 | axf = PyPlot.subplot(5, T, T+t); PyPlot.imshow(reshape(phtgV1t[:, t], Gx, Gy), cmap="bone"); 95 | if t == 1 96 | axf[:set_title]("Filtering") 97 | end 98 | # add Smoothing data row of T images from the previous row offset 99 | axs = PyPlot.subplot(5, T, 2*T+t); PyPlot.imshow(reshape(phtgV1T[:, t], Gx, Gy), cmap="bone"); 100 | if t == 1 101 | axs[:set_title]("Smoothing") 102 | end 103 | z=zeros(H,1); z[maxstate[t]]=1; 104 | # add Viterbi data row of T images from the previous row offset 105 | axv = PyPlot.subplot(5,T,3*T+t); PyPlot.imshow(reshape(z,Gx,Gy), cmap="bone") 106 | if t == 1 107 | axv[:set_title]("Viterbi") 108 | end 109 | z = zeros(H,1); z[h[t]] = 1; 110 | # add true data row of T images from the previous row offset 111 | axt = PyPlot.subplot(5,T,4*T+t); PyPlot.imshow(reshape(z,Gx,Gy), cmap="bone") 112 | if t == 2 113 | axt[:set_title]("True Burglar position") 114 | end 115 | map([axg, axf, axs, axv, axt]) do axesfig 116 | PyPlot.setp(axesfig[:get_xticklines](), visible=false) 117 | PyPlot.setp(axesfig[:get_xticklabels](), visible=false) 118 | PyPlot.setp(axesfig[:get_yticklines](), visible=false) 119 | PyPlot.setp(axesfig[:get_yticklabels](), visible=false) 120 | end 121 | end 122 | 123 | end 124 | -------------------------------------------------------------------------------- /src/GraphPlot/GraphPlot.jl: -------------------------------------------------------------------------------- 1 | module GraphPlot 2 | 3 | 4 | importall BrmlFunctions 5 | using PyPlot 6 | 7 | export VariablePlot 8 | type VariablePlot 9 | text 10 | x 11 | y 12 | nodetype 13 | nodecolor 14 | nodesize 15 | function VariablePlot(text;x=0,y=0,nodetype="circle",nodecolor="black",nodesize=0.05) 16 | new(text,x,y,nodetype,nodecolor,nodesize) 17 | end 18 | end 19 | 20 | export PlotGraph 21 | function PlotGraph{I<:Number}(D::Dict{Integer,VariablePlot},LL::Dict{Integer,Array{I,}};arrowsize=0.1,arrowwidthscale=0.25,arrowcolor="black",PlotBidirectionalEdgeAsUndirected=true) # D is the Variable Dictionary and LL is the Dictionary List of directed connections (parent=>child) 22 | 23 | if false 24 | L=Dict{Integer,Array{Integer}}() 25 | # first switch to child=>parent list: 26 | for p=keys(LL) 27 | ch=LL[p] 28 | for c=ch 29 | if haskey(L,c) 30 | L[c]=union(L[c],p) 31 | else 32 | L[c]=union([],p) 33 | end 34 | end 35 | end 36 | end 37 | L=deepcopy(LL) 38 | 39 | an=linspace(0,2*pi,100) 40 | for i=1:length(D) 41 | xx=D[i].x; yy=D[i].y 42 | annotate(D[i].text,xy=[xx;yy],horizontalalignment="center",verticalalignment="center") 43 | if D[i].nodetype=="circle" 44 | r=D[i].nodesize 45 | plot(xx+r*cos(an),yy+r*sin(an),color=D[i].nodecolor) 46 | end 47 | end 48 | axis("equal") 49 | 50 | ky=collect(keys(L)) 51 | lambda=linspace(0,1,100) 52 | for i=ky 53 | for j=L[i] 54 | xstart=D[i].x; xend=D[j].x 55 | ystart=D[i].y; yend=D[j].y 56 | 57 | if D[j].nodetype=="circle" && D[i].nodetype=="circle" 58 | rstart=D[i].nodesize 59 | rend=D[j].nodesize 60 | e=zeros(2,1) 61 | e[1]=xend-xstart 62 | e[2]=yend-ystart 63 | xvec=zeros(2,1) 64 | xvec[1]=xstart 65 | xvec[2]=ystart 66 | yvec=zeros(2,1) 67 | yvec[1]=xend 68 | yvec[2]=yend 69 | e=e./sqrt(e'*e) 70 | 71 | xxvec=xvec+rstart*e 72 | yyvec=yvec-rend*e 73 | xxstart=xxvec[1] 74 | yystart=xxvec[2] 75 | xxend=yyvec[1] 76 | yyend=yyvec[2] 77 | 78 | plot(lambda*xxstart+(1-lambda)*xxend,lambda*yystart+(1-lambda)*yyend,color=arrowcolor) 79 | 80 | if haskey(L,j) && any(L[j].==i) 81 | bidirectionaledge=true 82 | else 83 | bidirectionaledge=false 84 | end 85 | 86 | if !bidirectionaledge | (bidirectionaledge && !PlotBidirectionalEdgeAsUndirected) 87 | # add an arrow: 88 | eb=zeros(2,1); eb[1]=e[2]; eb[2]=-e[1]; 89 | avec=yvec-(rend+arrowsize)*e; 90 | Bvec=avec+arrowwidthscale*arrowsize*eb; 91 | Bx=Bvec[1]; By=Bvec[2]; 92 | Cvec=avec-arrowwidthscale*arrowsize*eb; 93 | Cx=Cvec[1]; Cy=Cvec[2]; 94 | Ax=xxend; Ay=yyend; 95 | 96 | plot(lambda*Ax+(1-lambda)*Bx,lambda*Ay+(1-lambda)*By,color=arrowcolor) 97 | plot(lambda*Ax+(1-lambda)*Cx,lambda*Ay+(1-lambda)*Cy,color=arrowcolor) 98 | plot(lambda*Bx+(1-lambda)*Cx,lambda*By+(1-lambda)*Cy,color=arrowcolor) 99 | end 100 | end 101 | end 102 | end 103 | 104 | end 105 | 106 | include("make_layout.jl") 107 | include("poset.jl") 108 | include("toposort.jl") 109 | 110 | 111 | export PlaceVertices 112 | function PlaceVertices(D,L;UseEnergyApproach=false,scale=4,crosspenalty=0.5,proximitypenalty=2,linelengthpenalty=0.5,attempts=200) 113 | 114 | 115 | if UseEnergyApproach 116 | ## This is just a quick attempt to places vertices such that they prefer not to be too close and also for connecting lines not to cross. Needs more work! 117 | 118 | Ebest=1000000000 119 | DDD=deepcopy(D) 120 | for goes=1:attempts 121 | V=length(D) # number of variables 122 | an=linspace(0,2pi,V) 123 | DD=deepcopy(D) 124 | for i=1:V 125 | #DD[i].x=cos(an[i]); DD[i].y=sin(an[i]) 126 | DD[i].x=scale*rand(); DD[i].y=scale*rand() 127 | end 128 | 129 | E=DAGenergy(DD,L,scale,crosspenalty,proximitypenalty,linelengthpenalty) 130 | for loop=1:10 131 | for v=randperm(V) 132 | Dcand=deepcopy(DD) 133 | Dcand[v].x=scale*rand() 134 | Dcand[v].y=scale*rand() 135 | Ecand=DAGenergy(DD,L,scale,crosspenalty,proximitypenalty,linelengthpenalty) 136 | if Ecand0.5*scale 185 | E=E+1 186 | end 187 | end 188 | end 189 | E=proximitypenalty*E 190 | end 191 | if true 192 | for a=collect(keys(L)) 193 | for b=L[a] 194 | aa=vec([points[a].x points[a].y]) 195 | bb=vec([points[b].x points[b].y]) 196 | E=E+linelengthpenalty*sqrt(dot( bb-aa,bb-aa)) # encourage lines to not be too long 197 | 198 | for c=setdiff(collect(keys(L)),[a b]) 199 | for d=setdiff(L[c],[a b]) 200 | E=E+crosspenalty*linescross(points[a],points[b],points[c],points[d]) 201 | end 202 | end 203 | end 204 | end 205 | end 206 | return E 207 | end 208 | 209 | 210 | export linescross 211 | function linescross(line1start,line1end,line2start,line2end) 212 | # returns true is line1 crosses line2 213 | a=vec([line1start.x line1start.y]) 214 | b=vec([line1end.x line1end.y]) 215 | c=vec([line2start.x line2start.y]) 216 | d=vec([line2end.x line2end.y]) 217 | 218 | g=a-c;e=b-a;f=d-c; 219 | 220 | alpha=dot(e,e); beta=-dot(f,e); theta=-dot(g,e); 221 | gamma=dot(e,f); delta=-dot(f,f); phi=-dot(g,f); 222 | mu=(alpha*phi-theta*gamma)/(delta*alpha-beta*gamma) 223 | lambda=(theta-mu*beta)/alpha 224 | v=g+lambda*e-mu*f 225 | E=dot(v,v) 226 | 227 | if 0<= lambda <=1 && 0<=mu<=1 && E<0.0001 228 | return true 229 | else 230 | return false 231 | end 232 | end 233 | 234 | 235 | end 236 | 237 | -------------------------------------------------------------------------------- /src/PotentialInference/Algorithms/FactorGraph.jl: -------------------------------------------------------------------------------- 1 | 2 | export mess2var 3 | function mess2var(vars,FG;ShowFactors=false) 4 | #MESS2VAR Returns the message numbers that connect into variables v 5 | # [messnum fact]=mess2var(vars,FG) 6 | # vars is a vector of variables 7 | # 8 | # messnum(i) is the message number from factor fact(i) to vars(i) 9 | # FG is a Factor Graph 10 | # see also mess2fact.m, FactorConnectingVariable.m, FactorGraph.m 11 | 12 | V=minimum(find(FG[1,:]))-1; # variables are first in the order 13 | messnum=[]; fact=[]; 14 | for v=vars 15 | tmp=vec(full(FG[:,v])); 16 | tmp2=tmp.>0; 17 | messnum = vcat(messnum,tmp[tmp2]); 18 | fact=vcat(fact, find(tmp2)-V); 19 | end 20 | if ShowFactors 21 | return messnum, fact 22 | else 23 | return messnum 24 | end 25 | end 26 | 27 | 28 | export sumprod 29 | function sumprod(pot,A;InitialMessages=[]) 30 | 31 | variables=potvariables(pot) 32 | V=length(variables); N=size(A,1) 33 | fnodes=zeros(Int64,1,N); fnodes[V+1:end]=1:N-V; # factor nodes 34 | vnodes=zeros(Int64,1,N); vnodes[1:V]=1:V; # variable nodes 35 | nmess=maxarray(A); 36 | mess=Array(Potential,nmess) 37 | InitialMessages=[] 38 | if !isempty(InitialMessages); mess=InitialMessages; end 39 | if isempty(InitialMessages) # message initialisation 40 | for mcount=1:nmess 41 | mess[mcount]=Const(1) 42 | FGnodeA, FGnodeB=findone(A,mcount); 43 | if fnodes[FGnodeA].>0 # factor to variable message: 44 | # if the factor is at the edge (simplical), need to set message to the factor potentials 45 | if length(find(A[FGnodeA,:]))==1 46 | mess[mcount]=pot[fnodes[FGnodeA]]; 47 | end 48 | end 49 | end 50 | end 51 | 52 | # Do the message passing: 53 | for mcount=1:length(mess) 54 | FGnodeA, FGnodeB=findone(A,mcount); 55 | FGparents=setdiff(find(A[FGnodeA,:]),FGnodeB) # FG parent nodes of FGnodeA 56 | if !isempty(FGparents) 57 | # tmpmess = prod(mess[A[FGparents,FGnodeA]]) # product of incoming messages 58 | # At the moment (0.3.0 rc2) Julia assumes prod(single element) is an error. I don't know how to dispatch on the size of an input, so unfortunately need to define my own prod function: 59 | tmpmess = multpots(mess[A[FGparents,FGnodeA]]) # product of incoming messages 60 | mfactor=fnodes[FGnodeA] 61 | if mfactor==0 # variable to factor message: 62 | mess[mcount]=tmpmess 63 | else # factor to variable message: 64 | tmpmess = multpots([tmpmess pot[mfactor]]) 65 | mess[mcount] = sum(tmpmess,FGnodeB,SumOver=false) 66 | end 67 | end 68 | end 69 | # Get all the marginals: variable nodes are first in the ordering, so 70 | marg=Array(Potential,V) 71 | for i=1:V 72 | dum1, dum2, incoming=findnz(A[:,i]'); 73 | tmpmess = multpots(mess[incoming]); 74 | marg[i]=tmpmess; 75 | end 76 | normconstpot=sum(multpots(mess[mess2var(1,A)])); 77 | return marg, mess, normconstpot 78 | end 79 | 80 | 81 | 82 | export maxprod 83 | function maxprod(pot,A;InitialMessages=[]) 84 | 85 | variables=potvariables(pot) 86 | V=length(variables); N=size(A,1) 87 | fnodes=zeros(Int64,1,N); fnodes[V+1:end]=1:N-V; # factor nodes 88 | vnodes=zeros(Int64,1,N); vnodes[1:V]=1:V; # variable nodes 89 | nmess=maxarray(A); 90 | mess=Array(Potential,nmess) 91 | InitialMessages=[] 92 | if !isempty(InitialMessages); mess=InitialMessages; end 93 | if isempty(InitialMessages) # message initialisation 94 | for mcount=1:nmess 95 | mess[mcount]=Const(1) 96 | FGnodeA, FGnodeB=findone(A,mcount); 97 | if fnodes[FGnodeA].>0 # factor to variable message: 98 | # if the factor is at the edge (simplical), need to set message to the factor potentials 99 | if length(find(A[FGnodeA,:]))==1 100 | mess[mcount]=pot[fnodes[FGnodeA]]; 101 | end 102 | end 103 | end 104 | end 105 | 106 | # Do the message passing: 107 | for mcount=1:length(mess) 108 | FGnodeA, FGnodeB=findone(A,mcount); 109 | FGparents=setdiff(find(A[FGnodeA,:]),FGnodeB) # FG parent nodes of FGnodeA 110 | if !isempty(FGparents) 111 | tmpmess = multpots(mess[A[FGparents,FGnodeA]]) # product of incoming messages 112 | mfactor=fnodes[FGnodeA] 113 | if mfactor==0 # variable to factor message: 114 | mess[mcount]=tmpmess 115 | else # factor to variable message: 116 | tmpmess = multpots([tmpmess pot[mfactor]]) 117 | mess[mcount] = max(tmpmess,FGnodeB,MaxOver=false) 118 | end 119 | end 120 | end 121 | 122 | # now find the maximum states: variable nodes are first in the ordering: 123 | maxstate=Dict{Integer,Any}() 124 | maxval=[] # ensure that is in scope 125 | for i=1:V 126 | #dum1, dum2, incoming=findnz(A[:,i]) 127 | dum1, incoming=findnz(A[:,i]) 128 | tmpmess = multpots(mess[incoming]) 129 | tmpmess,maxstate[i]=max(tmpmess,i,ReturnState=true) 130 | maxval=tmpmess.content 131 | end 132 | 133 | return maxstate, maxval, mess 134 | end 135 | 136 | 137 | #function [f fact2varmess var2factmess] = FactorConnectingVariable(vars,A) 138 | export FactorConnectingVariable 139 | function FactorConnectingVariable(vars,A) 140 | #%FACTORCONNECTINGVARIABLE Factor nodes connecting to a set of variables 141 | #% [f fact2varmess var2factmess] = FactorConnectingVariable(vars,A) 142 | #% find the intersection of factor indices that connect to variables vars. 143 | #% fact2varmess are the message indices connecting the factors f to the variable 144 | #% var2factmess are the message indices from the variables to the factors f 145 | #% A is a Factor Graph 146 | #% vars are the variables 147 | #% see also FactorGraph.m, demoSumProd.m, mess2fact.m, fact2mess.m 148 | c=1 149 | for v=vars 150 | if c==1 151 | f = find(A[v,:]); 152 | else 153 | f = intersect(f,find(A[v,:])); 154 | end 155 | c=c+1 156 | end 157 | fact2varmess=[];var2factmess=[]; 158 | for fn=f 159 | fact2varmess=full(union(fact2varmess,A[fn,vars])); 160 | var2factmess=full(union(var2factmess,A[vars,fn])); 161 | end 162 | V=minimum(find(A[1,:]))-1; # variables are first in the order 163 | f=full(f-V) 164 | return f, fact2varmess, var2factmess 165 | end 166 | 167 | export FactorGraph 168 | function FactorGraph(pot::DictOrArray) 169 | #FACTORGRAPH Returns a Factor Graph adjacency matrix based on a cell of potentials. 170 | # A = FactorGraph(pot) 171 | # 172 | # pot is a set of potentials and the routine returns the Factor Graph 173 | # (sparse) adjacency matrix A. The size of A is equal to 174 | # (V+F)*(V+F) where V are the total number of variables and F the total 175 | # number of factors. A(1:V,1:V) and A(V+1:end,V+1:end) are empty; 176 | # A(1:V,V+1:end) contains the variable to factor message indices and 177 | # A(V+1:end,1:V) contains the factor to variable message indices 178 | # If the set of potentials is not singly-connected, all message indices are -1 179 | # 180 | # 181 | # A(i,j)=k means that message number k is from FGnodei -> FGnodej 182 | # Going through the messages in sequence corresponds to a valid 183 | # forward-backward procedure over all variable nodes in the Factor Graph. 184 | # See also FactorConnectingVariable.m, VariableConnectingFactor.m 185 | # Note that the variables in pot must be numbered 1,2,3,... 186 | # 187 | # See also demoSumProd.jl 188 | 189 | F=length(pot) # number of factors (potentials in distribution) 190 | variables=potvariables(pot) 191 | if variables[end]!=length(variables); 192 | error("potential variables used are not numbered 1:end. Use standarisevariables.jl. See demoSumProdStandariseVariables"); end 193 | 194 | V=length(variables) 195 | N=V+F # all nodes in factor graph 196 | 197 | vnodes=zeros(Int64,1,N); vnodes[1:V]=1:V # variable nodes 198 | fnodes=zeros(Int64,1,N); fnodes[V+1:end]=1:F # factor nodes 199 | A = SparseIntMatrix(N,N) 200 | for f=1:length(pot) 201 | FGnodeA=find(fnodes.==f) 202 | FGnodeB=vec(memberinds(pot[f].variables,variables)) 203 | fillmatrix!(A,FGnodeA,FGnodeB[:],1) 204 | fillmatrix!(A,FGnodeB[:],FGnodeA,1) 205 | end 206 | # get a message passing sequence and initialise the messages 207 | #tree, elimseq, forwardschedule=istree(full(A),ReturnElimSeq=true); ## change to sparse 208 | tree, elimseq, forwardschedule=istree(A,ReturnElimSeq=true); ## change to sparse 209 | #reverseschedule=flipud(fliplr(forwardschedule)); 210 | reverseschedule=flipdim(flipdim(forwardschedule,2),1); 211 | schedule=vcat(forwardschedule,reverseschedule); 212 | 213 | if tree 214 | for count=1:size(schedule,1) 215 | # setup the structure for a message from FGnodeA -> FGnodeB 216 | FGnodeA, FGnodeB = schedule[count,:] 217 | A[FGnodeA,FGnodeB]=count; # store the FG adjacency matrix with mess number on edge 218 | end 219 | else 220 | A = replace(A,1,-1); 221 | end 222 | 223 | 224 | return A 225 | 226 | end 227 | 228 | 229 | 230 | -------------------------------------------------------------------------------- /src/PotentialInference/Potentials/PotArray.jl: -------------------------------------------------------------------------------- 1 | import Base.* 2 | function *(A::PotArray,B::PotArray) 3 | # multiply two PotArray potentials 4 | # eg pot[1]*pot[2], or p*q 5 | 6 | A = standardise(A) 7 | B = standardise(B) 8 | 9 | allvars=sort(union(A.variables,B.variables)) 10 | 11 | nvars=length(allvars) 12 | inds=ones(Int64,nvars,1) 13 | inds[1:nvars]=1:nvars 14 | 15 | ncvarsA=nvars-length(A.variables) 16 | 17 | if ncvarsA>0 18 | DsingularA=ones(Int64,1,nvars) 19 | DsingularA[ncvarsA+1:end]=[numstates(A.content)...] 20 | singularA=ones(typeof(A.content[1]),DsingularA...) 21 | singularA[1,:]=A.content 22 | else 23 | singularA=A.content 24 | end 25 | 26 | ncvarsB=nvars-length(B.variables) 27 | if ncvarsB>0 28 | DsingularB=ones(Int64,1,nvars); 29 | DsingularB[ncvarsB+1:end]=[numstates(B.content)...] 30 | singularB=ones(typeof(B.content[1]),DsingularB...) 31 | singularB[1,:]=B.content 32 | else 33 | singularB=B.content 34 | end 35 | 36 | 37 | tmpindA=memberinds(A.variables,allvars) 38 | singularAinds=myvcat(setdiff(inds,tmpindA),tmpindA) 39 | 40 | singularA=myipermutedims(singularA,singularAinds) 41 | 42 | tmpindB=memberinds(B.variables,allvars) 43 | singularBinds=myvcat(setdiff(inds,tmpindB),tmpindB) 44 | singularB=myipermutedims(singularB,singularBinds) 45 | 46 | 47 | return PotArray(allvars,broadcast(*,singularA,singularB)) 48 | end 49 | 50 | import Base./ 51 | function /(A::PotArray,B::PotArray) 52 | # divide two PotArray potentials 53 | 54 | # need to decide at some point whether to add epsilon to the denominator table 55 | 56 | A = standardise(A) 57 | B = standardise(B) 58 | 59 | allvars=sort(union(A.variables,B.variables)) 60 | 61 | nvars=length(allvars) 62 | inds=ones(Int64,nvars,1) 63 | inds[1:nvars]=1:nvars 64 | 65 | ncvarsA=nvars-length(A.variables) 66 | 67 | 68 | if ncvarsA>0 69 | DsingularA=ones(Int64,1,nvars) 70 | DsingularA[ncvarsA+1:end]=[numstates(A.content)...] 71 | singularA=ones(typeof(A.content[1]),DsingularA...) 72 | singularA[1,:]=A.content 73 | else 74 | singularA=A.content 75 | end 76 | 77 | ncvarsB=nvars-length(B.variables) 78 | if ncvarsB>0 79 | DsingularB=ones(Int64,1,nvars); 80 | DsingularB[ncvarsB+1:end]=[numstates(B.content)...] 81 | singularB=ones(typeof(B.content[1]),DsingularB...) 82 | singularB[1,:]=B.content 83 | else 84 | singularB=B.content 85 | end 86 | 87 | tmpindA=memberinds(A.variables,allvars) 88 | singularAinds=myvcat(setdiff(inds,tmpindA),tmpindA) 89 | 90 | singularA=myipermutedims(singularA,singularAinds) 91 | 92 | tmpindB=memberinds(B.variables,allvars) 93 | singularBinds=myvcat(setdiff(inds,tmpindB),tmpindB) 94 | singularB=myipermutedims(singularB,singularBinds) 95 | 96 | return PotArray(allvars,broadcast(/,singularA,singularB)) 97 | end 98 | 99 | 100 | import Base.+ 101 | function +(A::PotArray,B::PotArray) 102 | # Add two PotArray potentials 103 | 104 | A = standardise(A) 105 | B = standardise(B) 106 | 107 | allvars=sort(union(A.variables,B.variables)) 108 | 109 | nvars=length(allvars) 110 | inds=ones(Int64,nvars,1) 111 | inds[1:nvars]=1:nvars 112 | 113 | ncvarsA=nvars-length(A.variables) 114 | 115 | if ncvarsA>0 116 | DsingularA=ones(Int64,1,nvars) 117 | DsingularA[ncvarsA+1:end]=[numstates(A.content)...] 118 | singularA=ones(typeof(A.content[1]),DsingularA...) 119 | singularA[1,:]=A.content 120 | else 121 | singularA=A.content 122 | end 123 | 124 | ncvarsB=nvars-length(B.variables) 125 | if ncvarsB>0 126 | DsingularB=ones(Int64,1,nvars); 127 | DsingularB[ncvarsB+1:end]=[numstates(B.content)...] 128 | singularB=ones(typeof(B.content[1]),DsingularB...) 129 | singularB[1,:]=B.content 130 | else 131 | singularB=B.content 132 | end 133 | tmpindA=memberinds(A.variables,allvars) 134 | 135 | singularAinds=myvcat(setdiff(inds,tmpindA),tmpindA) 136 | 137 | singularA=myipermutedims(singularA,singularAinds) 138 | 139 | tmpindB=memberinds(B.variables,allvars) 140 | singularBinds=myvcat(setdiff(inds,tmpindB),tmpindB) 141 | singularB=myipermutedims(singularB,singularBinds) 142 | 143 | return PotArray(allvars,broadcast(+,singularA,singularB)) 144 | end 145 | 146 | import Base.- 147 | function -(A::PotArray,B::PotArray) 148 | # subtract two PotArray potentials 149 | A = standardise(A) 150 | B = standardise(B) 151 | 152 | allvars=sort(union(A.variables,B.variables)) 153 | 154 | nvars=length(allvars) 155 | inds=ones(Int64,nvars,1) 156 | inds[1:nvars]=1:nvars 157 | 158 | ncvarsA=nvars-length(A.variables) 159 | 160 | if ncvarsA>0 161 | DsingularA=ones(Int64,1,nvars) 162 | DsingularA[ncvarsA+1:end]=[numstates(A.content)...] 163 | singularA=ones(typeof(A.content[1]),DsingularA...) 164 | singularA[1,:]=A.content 165 | else 166 | singularA=A.content 167 | end 168 | 169 | ncvarsB=nvars-length(B.variables) 170 | if ncvarsB>0 171 | DsingularB=ones(Int64,1,nvars); 172 | DsingularB[ncvarsB+1:end]=[numstates(B.content)...] 173 | singularB=ones(typeof(B.content[1]),DsingularB...) 174 | singularB[1,:]=B.content 175 | else 176 | singularB=B.content 177 | end 178 | tmpindA=memberinds(A.variables,allvars) 179 | 180 | singularAinds=myvcat(setdiff(inds,tmpindA),tmpindA) 181 | 182 | singularA=myipermutedims(singularA,singularAinds) 183 | 184 | tmpindB=memberinds(B.variables,allvars) 185 | singularBinds=myvcat(setdiff(inds,tmpindB),tmpindB) 186 | singularB=myipermutedims(singularB,singularBinds) 187 | 188 | return PotArray(allvars,broadcast(-,singularA,singularB)) 189 | end 190 | 191 | 192 | 193 | # this is automatically exported (not sure I understand why!) 194 | import Base.sum # need this since sum is part of Base 195 | 196 | function sum(A::PotArray,variables=A.variables;SumOver::Any=true) 197 | # Sum a PotArray over specified variables 198 | # eg sum(pot,[1 2]) 199 | # sum(pot,[1 2],false) sums over all variables in pot except for [1 2] 200 | 201 | A=standardise(A) # makes A a vector potential if A is 1 D array 202 | 203 | if !SumOver 204 | variables = setdiff(A.variables,variables); 205 | end 206 | 207 | newvars=setdiff(A.variables,variables) 208 | table_variables=memberinds(variables,A.variables) 209 | tmp=sum(A.content,table_variables) 210 | #tmp=squeeze(tmp,find(memberinds(size(tmp),1))) 211 | tmp=squeeze(tmp,tuple(find(memberinds(size(tmp),1))...)) 212 | return PotArray(newvars,tmp) 213 | end 214 | 215 | 216 | export setpot 217 | function setpot(A::PotArray,variables,evidstates) 218 | # Set the value of a PotArray potential 219 | # eg setpot(pot,[1 2],[3 4]) sets variable 1 to state 3 and variable 2 to state 4 220 | ns=numstates(A) 221 | m=memberinds(variables,A.variables) 222 | for i=1:length(variables) 223 | tmp=zeros(1,ns[m[i]]) 224 | tmp[evidstates[i]]=1 225 | q=PotArray(variables[i],tmp) 226 | A=A*q 227 | end 228 | A=sum(A,variables) 229 | end 230 | 231 | 232 | 233 | export setstate 234 | function setstate(A::PotArray,variables,evidstates,val) 235 | # Set the value of a particular state in a potential 236 | # eg setstate(pot,[1 2],[3 4],0.5) sets all states of the potential that match variable 1 being in state 3 and variable 2 being in state 4 to the value 0.5 237 | 238 | # do this as ind(x==1)ind(y==1)*phi(1,1)+(1-ind(x==1)*ind(y==1))*phi(x,y) 239 | ns=numstates(A) 240 | m=memberinds(variables,A.variables) 241 | 242 | z=zeros(typeof(A.content[1]),1,ns[m[1]]) 243 | z[evidstates[1]]=1 244 | d=PotArray(variables[1],z) 245 | for i=2:length(variables) 246 | z=zeros(typeof(A.content[1]),1,ns[m[i]]) 247 | z[evidstates[i]]=1 248 | d=d*PotArray(variables[i],z) 249 | end 250 | dbar=PotArray(d.variables,ones(typeof(A.content[1]),numstates(d))) 251 | dbar=dbar-d 252 | d.content=d.content*val 253 | return d+A*dbar 254 | end 255 | 256 | 257 | export show 258 | import Base.show 259 | function show{I<:Integer}(p::PotArray,v::Dict{I,DiscreteVariable}) 260 | # Display the values of a PotArray potential 261 | # eg show(pot,DictVariable) where DictVariable is Dictionary of variable information 262 | # see demoBurglar.jl 263 | # 264 | # disp() is equivalent to show() 265 | 266 | vars=p.variables 267 | ns=numstates(p) 268 | #st=states([ns...]) 269 | if length(ns)>1 270 | st=states(collect(ns)') 271 | else 272 | st=1:ns 273 | end 274 | t=p.content 275 | for i=1:prod(ns) 276 | s="" 277 | for j=1:length(ns) 278 | s=s*"$(v[vars[j]].name)=$(v[vars[j]].state[st[i,j]])\t" 279 | end 280 | println(s*"$(t[i])") 281 | end 282 | end 283 | 284 | export disp 285 | disp=show 286 | 287 | 288 | 289 | import Base.findmax 290 | function findmax(A::PotArray,variables;MaxOver=true,Ind2Sub=false) 291 | #MAX Maximise a multi-dimensional array over a set of dimenions 292 | # [maxval maxstate]=max(x,variables) 293 | # find the values and states that maximize the multidimensional array x 294 | # over the dimensions in maxover 295 | # 296 | 297 | maxval,maxind=findmax(A.content,variables) 298 | if !Ind2Sub 299 | return maxval, maxind 300 | end 301 | 302 | s=ind2sub(size(A),maxind[:]) # compatabilty with matlab BRML 303 | maxstate=zeros(Int64,length(s[1]),length(s)) 304 | for i=1:size(A,2) 305 | maxstate[:,i]=s[i] 306 | end 307 | return PotArray(maxval, maxstate) 308 | 309 | end 310 | 311 | 312 | 313 | 314 | 315 | -------------------------------------------------------------------------------- /src/BrmlFunctions.jl: -------------------------------------------------------------------------------- 1 | 2 | module BrmlFunctions 3 | # General helper functions: 4 | 5 | #VERSION < v"0.4-" && using Docile 6 | #@docstrings 7 | 8 | import Base: vec, findmax 9 | 10 | # Type Unions: 11 | export IntOrIntArray 12 | IntOrIntArray=Union{Integer,Array{Integer},Array{}} # include also the ranges and vectors 13 | 14 | 15 | export vec 16 | @doc """ 17 | Creates a vector with 1 element of value v 18 | """ -> 19 | function vec(v::Integer) 20 | return vec([v]) 21 | end 22 | 23 | export maxarray 24 | @doc """ 25 | Finds the largest non-zero value of matrix **`A`** 26 | """ -> 27 | function maxarray(A) 28 | p,q,r=findnz(A) 29 | return maximum(r) 30 | end 31 | 32 | export SparseIntMatrix 33 | @doc """ 34 | Initialises a new matrix of size **M x N** 35 | 36 | * `M`: number of rows 37 | * `N`: number of columns 38 | """ -> 39 | function SparseIntMatrix(M,N) 40 | #return Int(spzeros(M,N)) 41 | return round(Int64,spzeros(M,N)) 42 | end 43 | 44 | export fillmatrix! 45 | @doc """ 46 | Initialises a subarray of **`A`** with value **`val`** 47 | 48 | * `A`: array to fill 49 | * `indsi`: vector/range of row indeces 50 | * `indsj`: vector/range of column indeces 51 | * `val`: value to fill with the elements 52 | """ -> 53 | function fillmatrix!(A,indsi,indsj,val) 54 | for i=indsi 55 | for j=indsj 56 | A[i,j]=val 57 | end 58 | end 59 | return A 60 | end 61 | 62 | export states 63 | @doc """ 64 | Enumerates all states given the number of states of each variable from a given set similar to MATLAB _ind2subv_ 65 | 66 | * `ns`: vector with the total number of states for each variable 67 | """ -> 68 | function states(ns) 69 | # enumerate all states eg states([2 2 3]) (like ind2subv.m) 70 | # first index changes the most (like matlab) 71 | #ns=fliplr(ns) 72 | ns=flipdim(ns,2) 73 | n=length(ns) 74 | nstates=prod(vec(ns)) 75 | c=cumprod(vec(ns[end:-1:1])) 76 | c=vcat(1,c[1:end-1]) 77 | s=Array(Int64,nstates,n) 78 | for i=1:nstates 79 | tmp=i-1 80 | for j=n:-1:1 81 | s[i,n+1-j]=floor(tmp/c[j]) 82 | tmp=tmp-s[i,n+1-j]*c[j] 83 | end 84 | end 85 | #return fliplr(s.+1) 86 | return flipdim(s.+1,2) 87 | end 88 | 89 | export standardise 90 | @doc """ 91 | Transforms an one-dimensional array **`A`** of numerical values into a column vector, 92 | otherwise it leaves the input unchanged 93 | """ -> 94 | function standardise{T<:Number}(A::Array{T,}) 95 | # If the array is a vector, make this a column vector, otherwise leave unchanged 96 | if isavector(A) 97 | A=vec(A) 98 | end 99 | return A 100 | end 101 | 102 | export mysize 103 | @doc """ 104 | Returns the size for each dimension of a vector or matrix 105 | 106 | * `a`: vector or matrix to analyse 107 | """ -> 108 | function mysize(a) 109 | a=standardise(a) 110 | s=size(a) 111 | if isavector(a) 112 | sz=s[1] 113 | else 114 | sz=s[1:end] 115 | end 116 | return sz 117 | end 118 | 119 | export memberinds 120 | @doc """ 121 | Returns a vector mapping the indices of **`x`** and **`y`** for which the corresponding elements have the same value 122 | 123 | * `x`: item to compare 124 | * `y`: item to compare 125 | """ -> 126 | function memberinds(x,y) 127 | ind=zeros(Int64,length(x),1) 128 | for i=1:length(x) 129 | for j=1:length(y) 130 | if y[j]==x[i] 131 | ind[i]=j; 132 | end 133 | end 134 | end 135 | return ind 136 | end 137 | 138 | export myvcat 139 | @doc """ 140 | Wrapper for standard **`vcat`** used for 141 | backward compatibility from porting the MATLAB version of BRML code 142 | """ -> 143 | function myvcat(x,y) 144 | if length(x)==0 145 | return y 146 | end 147 | 148 | if length(y)==0 149 | return x 150 | end 151 | 152 | return vcat(x,y) 153 | end 154 | 155 | export isavector 156 | @doc """ 157 | Verifies if matrix **`A`** has one of the dimensions of size **1** 158 | """ -> 159 | function isavector(A) 160 | 161 | if length(size(A))>2 162 | return false 163 | elseif (size(A,1)==1 && size(A,2)>1) || (size(A,2)==1 && size(A,1)>1) 164 | return true 165 | else 166 | return false 167 | end 168 | 169 | end 170 | 171 | export numstates 172 | @doc """ 173 | Returns the number of elements of matrix **`A`** 174 | """ -> 175 | function numstates(A) 176 | if isavector(A) 177 | n=prod(size(A)); 178 | else 179 | n=size(A); 180 | end 181 | return n 182 | end 183 | 184 | export replace 185 | @doc """ 186 | Finds the elements having value **`f`** in matrix **`A`** and replaces their value with **`r`** 187 | """ -> 188 | function replace!(A,f,r) 189 | A[find(A.==f)]=r 190 | end 191 | 192 | export findone 193 | @doc """ 194 | Returns the indices of the first element in matrix **`A`** that has the value **`c`** 195 | """ -> 196 | function findone(A,c) 197 | A, B=findn(A.==c) 198 | A=A[1]; B=B[1] 199 | return A,B 200 | end 201 | 202 | 203 | # Basic graph routines 204 | 205 | export neighboursize 206 | @doc """ 207 | Returns the number of neighbours in an graph 208 | 209 | nsize = neighboursize(A,) 210 | 211 | * `A`: adjacency matrix of the graph 212 | * `node`: (_optional_) the node for which to find the number of neighbours 213 | 214 | If **`node`** is missing, return the neighbour sizes (including self) of each node. 215 | If **`A`** is directed, returns the number of parents of the specified node. 216 | """ -> 217 | function neighboursize(A,nodes=[]) 218 | if isempty(nodes) 219 | nsize=sum(A,1); 220 | else 221 | nsize=sum(A[:,vec(nodes)],1); 222 | end 223 | return nsize 224 | end 225 | 226 | export istree 227 | @doc """ 228 | Checks if the input graph is singly-connected (a polytree) 229 | 230 | tree, elimseq, schedule = istree(A, =[]; =false) 231 | 232 | ##### Input : 233 | * `A`: graph's adjacency matrix (zeros on diagonal) 234 | * `root`: (_optional_) root node of the graph 235 | 236 | ##### Outputs: 237 | * `tree`: _true_ if graph is singly connected, otherwise _false_ 238 | * `elimseq`: a variable elimination sequence in which simplical nodes of the tree are listed, 239 | as each simplical node is removed from the tree. 240 | * `schedule`: the sequence of messages from node to node corresponding to elimseq 241 | 242 | If **`A`** is a directed graph, the elimination schedule begins with the nodes with no children. 243 | If root is specified, the last node eliminated is root. 244 | 245 | If the graph is connected and the number of edges is less than the number of nodes, 246 | it must be a tree. However, to deal with the general case in which it is unknown if the graph 247 | is connected w check using elimination. 248 | 249 | A tree/singly-connected graph must admit a recursive simplical node elimination. That is at 250 | any stage in the elimination there must be a node with either 0 or 1 neighbours in the remaining graph. 251 | """ -> 252 | function istree(A,root=[];ReturnElimSeq=false) 253 | C = size(A,1); # number of nodes in the graph 254 | schedule=zeros(Int,C,2); 255 | tree=true; # assume A is singly connected 256 | AA=copy(A); # adjacency of the eliminated graph 257 | elimseq=[]; # set of variables eliminated (in sequence) 258 | 259 | for node=1:C 260 | # now find the number of neighbours: 261 | nn=(C+1)*ones(1,C); # ensures that we don't pick eliminated nodes 262 | s=1:C; r=zeros(1,C); 263 | r[elimseq]=1; # need to check this 264 | s=s[find(r.==0)]; 265 | nn[s]=neighboursize(AA',s) 266 | if !isempty(root) 267 | nn[root]=C+1 # ensures we don't pick root 268 | end 269 | 270 | val, elim=findmin(nn) # find node with least number of neighbours 271 | neigh = find(AA[:,vec(elim)]) # find the non zero elements 272 | 273 | if length(neigh)>1; # if least has more than 1 neighbour, cannot be a tree 274 | tree=false 275 | break; 276 | end 277 | 278 | AA[vec(elim),:]=0; AA[:,vec(elim)]=0; # eliminate node from graph 279 | elimseq=[elimseq... elim]; # add eliminated node to elimination set 280 | 281 | if isempty(neigh); schedule[node,:]=[elim elim]; 282 | else 283 | schedule[node,:]=[elim neigh]; 284 | end 285 | 286 | end 287 | if !tree; 288 | if ReturnElimSeq 289 | return false, [],[] 290 | else 291 | return false 292 | end 293 | end 294 | 295 | c=[] 296 | for i=1:size(schedule,1) 297 | if schedule[i,1]!=schedule[i,2] # remove self elimination 298 | c=[c... i] 299 | end 300 | end 301 | if ReturnElimSeq 302 | return tree, elimseq, schedule[vec(c),:] 303 | else 304 | return tree 305 | end 306 | end 307 | 308 | export findmax 309 | @doc """ 310 | Maximises a multi-dimensional array over a set of dimensions 311 | 312 | [maxval maxstate] = findmax(A, variables) 313 | 314 | * `A`: array to fill 315 | * `variables`: 316 | 317 | Finds the values and states that maximize the multi-dimensional 318 | array **`A`** over the dimensions in maxover 319 | """ -> 320 | function findmax(A,variables;Ind2Sub=false) 321 | 322 | maxval,maxind=findmax(A,variables) 323 | if !Ind2Sub 324 | return maxval, maxind 325 | end 326 | 327 | s=ind2sub(size(A),maxind[:]) # compatibilty with matlab BRML 328 | maxstate=zeros(Int64,length(s[1]),length(s)) 329 | for i=1:length(s) 330 | maxstate[:,i]=s[i] 331 | end 332 | return maxval, maxstate 333 | 334 | end 335 | 336 | export randgen 337 | @doc """ 338 | Returns a random value from distribution **`p`** 339 | """ -> 340 | function randgen(p) 341 | p=p./sum(p) 342 | f=find(rand(). 351 | function myipermutedims{T<:AbstractArray}(A::T,d) 352 | if length(d)==1 && d[1]==1 353 | return A 354 | else 355 | ipermutedims(A,d) 356 | end 357 | end 358 | 359 | 360 | export DictToArray 361 | @doc """ 362 | Copies the dictionary **D** values into a new vector 363 | with the size equal to the number of keys in the dictionary 364 | """ -> 365 | function DictToArray(D) 366 | if isa(D,Dict) 367 | L=length(collect(keys(D))) 368 | pot=Array(Any,L) 369 | ky=collect(keys(D)) 370 | for k=1:length(keys(D)) 371 | pot[k]=D[ky[k]] 372 | end 373 | return pot 374 | else 375 | return D 376 | end 377 | end 378 | 379 | #export iskey 380 | #function iskey(k::Any,D::Dict) 381 | # allkeys=collect(keys(D)) 382 | # return any(k.==allkeys) 383 | #end 384 | 385 | export condp 386 | @doc """ 387 | Creates a conditional distribution from an array 388 | 389 | pnew = condp(pin, varargin) 390 | 391 | ##### Input: 392 | * `pin`: a positive matrix 393 | * `varargin`: optional input specifying which indices form the distribution variables 394 | 395 | ##### Output: 396 | * `pnew`: a new matrix with **`sum(pnew, 1) = ones(1, size(p, 2))`** 397 | 398 | ##### Example: 399 | r = rand(4, 2, 3); 400 | p = condp(r, DistributionIndices=[3, 1]); 401 | 402 | **`p`** is now an array of the same size as **`r`**, but with **`sum(sum(p,3),1) = 1`** for each of the dimensions of the 2nd index. 403 | """ -> 404 | function condp(p; DistributionIndices::IntOrIntArray=[]) ## FIXME! This doesn't work when p is more than an 2D array 405 | p=p+realmin(); # in case all unnormalised probabilities are zero 406 | 407 | if isempty(DistributionIndices) 408 | p=p./repmat(sum(p,1),size(p,1),1) 409 | return p 410 | else 411 | if DistributionIndices==0 412 | #p=p./sum(p[:]) 413 | p=p./sum(p) 414 | return p 415 | end 416 | allvars=1:length(size(p)) 417 | sizevars=size(p) 418 | condvars=setdiff(allvars,DistributionIndices) 419 | newp=deepcopy(permutedims(p,vcat(DistributionIndices,condvars))) 420 | newp=reshape(newp,prod(sizevars[DistributionIndices]),prod(sizevars[condvars])) 421 | newp=newp./repmat(sum(newp,1),size(newp,1),1) 422 | pnew=reshape(newp,sizevars[vcat(DistributionIndices,condvars)]) 423 | pnew=ipermutedims(pnew,vcat(DistributionIndices,condvars)) 424 | return pnew 425 | end 426 | end 427 | 428 | 429 | export normp 430 | @doc """ 431 | Creates a normalised array from an array 432 | 433 | pnew = normp(pin) 434 | 435 | ##### Input: 436 | * `pin`: an array 437 | 438 | 439 | ##### Output: 440 | * `pnew`: a new array with **`sum(pnew) =1`** 441 | 442 | ##### Example: 443 | r = rand(4, 2, 3); 444 | p = normp(r); 445 | 446 | **`p`** is now an array of the same size as **`r`**, but with **`sum(p) = 1`** 447 | """ -> 448 | function normp(p) ## FIXME! This doesn't work when p is more than an 2D array 449 | p=p+realmin(); # in case all unnormalised probabilities are zero 450 | p=p./sum(p) 451 | return p 452 | end 453 | 454 | 455 | 456 | #function condp(p, DistributionIndices=[]) ## FIXME! This doesn't work when p is more than an 2D array 457 | 458 | # if isempty(DistributionIndices) 459 | # p=p./repmat(sum(p,1),size(p,1),1) 460 | # return p 461 | # else 462 | # if DistributionIndices==0 463 | # p=p./sum(p[:]) 464 | # return p 465 | # end 466 | # p=p+realmin(); # in case all unnormalised probabilities are zero 467 | # allvars=1:length(size(p)) 468 | # sizevars=size(p) 469 | # condvars=setdiff(allvars,DistributionIndices) 470 | # newp=deepcopy(permutedims(p,vcat(DistributionIndices,condvars))) 471 | # newp=reshape(newp,prod(sizevars[DistributionIndices]),prod(sizevars[condvars])) 472 | # newp=newp./repmat(sum(newp,1),size(newp,1),1) 473 | # pnew=reshape(newp,sizevars[vcat(DistributionIndices,condvars)]) 474 | # pnew=ipermutedims(pnew,vcat(DistributionIndices,condvars)) 475 | # return pnew 476 | # end 477 | #end 478 | 479 | 480 | 481 | export condexp 482 | @doc """ 483 | Computes **`p`** proportional to **`exp(log p)`** 484 | """ -> 485 | function condexp(logp) 486 | pmax=maximum(logp,1) 487 | P =size(logp,1) 488 | return condp(exp(logp-repmat(pmax,P,1))) 489 | end 490 | 491 | export validgridposition 492 | @doc """ 493 | Returns `true` if point (x, y) is on a defined grid (1:Gx, 1:Gy) 494 | 495 | v = validgridposition(x, y, Gx, Gy) 496 | """ -> 497 | function validgridposition(x, y, Gx, Gy) 498 | if x > Gx || x < 1 499 | return false 500 | end 501 | if y > Gy || y < 1 502 | return false 503 | end 504 | return true 505 | end 506 | 507 | end #module 508 | -------------------------------------------------------------------------------- /docs/BrmlFunctions.md: -------------------------------------------------------------------------------- 1 | # BrmlFunctions 2 | 3 | ## Exported 4 | 5 | --- 6 | 7 | 8 | #### DictToArray(D) [¶](#method__dicttoarray.1) 9 | Copies the dictionary **D** values into a new vector 10 | with the size equal to the number of keys in the dictionary 11 | 12 | 13 | *source:* 14 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:357](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L357) 15 | 16 | --- 17 | 18 | 19 | #### SparseIntMatrix(M, N) [¶](#method__sparseintmatrix.1) 20 | Initialises a new matrix of size **M x N** 21 | 22 | * `M`: number of rows 23 | * `N`: number of columns 24 | 25 | 26 | *source:* 27 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:34](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L34) 28 | 29 | --- 30 | 31 | 32 | #### condexp(logp) [¶](#method__condexp.1) 33 | Computes **`p`** proportional to **`exp(log p)`** 34 | 35 | 36 | *source:* 37 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:427](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L427) 38 | 39 | --- 40 | 41 | 42 | #### condp(p) [¶](#method__condp.1) 43 | Creates a conditional distribution from an array 44 | 45 | pnew = condp(pin, varargin) 46 | 47 | ##### Input: 48 | * `pin`: a positive matrix 49 | * `varargin`: optional input specifying which indices form the distribution variables 50 | 51 | ##### Output: 52 | * `pnew`: a new matrix with **`sum(pnew, 1) = ones(1, size(p, 2))`** 53 | 54 | ##### Example: 55 | r = rand(4, 2, 3); 56 | p = condp(r, DistributionIndices=[3 1]); 57 | 58 | **`p`** is now an array of the same size as **`r`**, but with **`sum(sum(p,3),1) = 1`** for each of the dimensions of the 2nd index. 59 | 60 | *Note:* 61 | 62 | **`p=condp(r,0)`** returns a normalised array **`p = r./sum(r(:))`** 63 | 64 | 65 | *source:* 66 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:400](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L400) 67 | 68 | --- 69 | 70 | 71 | #### fillmatrix!(A, indsi, indsj, val) [¶](#method__fillmatrix.1) 72 | Initialises a subarray of **`A`** with value **`val`** 73 | 74 | * `A`: array to fill 75 | * `indsi`: vector/range of row indeces 76 | * `indsj`: vector/range of column indeces 77 | * `val`: value to fill with the elements 78 | 79 | 80 | *source:* 81 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:47](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L47) 82 | 83 | --- 84 | 85 | 86 | #### findmax(A, variables) [¶](#method__findmax.1) 87 | Maximises a multi-dimensional array over a set of dimensions 88 | 89 | [maxval maxstate] = findmax(A, variables) 90 | 91 | * `A`: array to fill 92 | * `variables`: 93 | 94 | Finds the values and states that maximize the multi-dimensional 95 | array **`A`** over the dimensions in maxover 96 | 97 | 98 | *source:* 99 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:312](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L312) 100 | 101 | --- 102 | 103 | 104 | #### findone(A, c) [¶](#method__findone.1) 105 | Returns the indices of the first element in matrix **`A`** that has the value **`c`** 106 | 107 | 108 | *source:* 109 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:188](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L188) 110 | 111 | --- 112 | 113 | 114 | #### isavector(A) [¶](#method__isavector.1) 115 | Verifies if matrix **`A`** has one of the dimensions of size **1** 116 | 117 | 118 | *source:* 119 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:151](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L151) 120 | 121 | --- 122 | 123 | 124 | #### istree(A) [¶](#method__istree.1) 125 | Checks if the input graph is singly-connected (a polytree) 126 | 127 | [tree, elimseq, schedule] = istree(A, =[]; =false) 128 | 129 | ##### Input : 130 | * `A`: graph's adjacency matrix (zeros on diagonal) 131 | * `root`: (_optional_) root node of the graph 132 | 133 | ##### Outputs: 134 | * `tree`: _true_ if graph is singly connected, otherwise _false_ 135 | * `elimseq`: a variable elimination sequence in which simplical nodes of the tree are listed, 136 | as each simplical node is removed from the tree. 137 | * `schedule`: the sequence of messages from node to node corresponding to elimseq 138 | 139 | If **`A`** is a directed graph, the elimination schedule begins with the nodes with no children. 140 | If root is specified, the last node eliminated is root. 141 | 142 | If the graph is connected and the number of edges is less than the number of nodes, 143 | it must be a tree. However, to deal with the general case in which it is unknown if the graph 144 | is connected w check using elimination. 145 | 146 | A tree/singly-connected graph must admit a recursive simplical node elimination. That is at 147 | any stage in the elimination there must be a node with either 0 or 1 neighbours in the remaining graph. 148 | 149 | 150 | *source:* 151 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:244](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L244) 152 | 153 | --- 154 | 155 | 156 | #### istree(A, root) [¶](#method__istree.2) 157 | Checks if the input graph is singly-connected (a polytree) 158 | 159 | [tree, elimseq, schedule] = istree(A, =[]; =false) 160 | 161 | ##### Input : 162 | * `A`: graph's adjacency matrix (zeros on diagonal) 163 | * `root`: (_optional_) root node of the graph 164 | 165 | ##### Outputs: 166 | * `tree`: _true_ if graph is singly connected, otherwise _false_ 167 | * `elimseq`: a variable elimination sequence in which simplical nodes of the tree are listed, 168 | as each simplical node is removed from the tree. 169 | * `schedule`: the sequence of messages from node to node corresponding to elimseq 170 | 171 | If **`A`** is a directed graph, the elimination schedule begins with the nodes with no children. 172 | If root is specified, the last node eliminated is root. 173 | 174 | If the graph is connected and the number of edges is less than the number of nodes, 175 | it must be a tree. However, to deal with the general case in which it is unknown if the graph 176 | is connected w check using elimination. 177 | 178 | A tree/singly-connected graph must admit a recursive simplical node elimination. That is at 179 | any stage in the elimination there must be a node with either 0 or 1 neighbours in the remaining graph. 180 | 181 | 182 | *source:* 183 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:244](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L244) 184 | 185 | --- 186 | 187 | 188 | #### maxarray(A) [¶](#method__maxarray.1) 189 | Finds the largest non-zero value of matrix **`A`** 190 | 191 | 192 | *source:* 193 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:22](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L22) 194 | 195 | --- 196 | 197 | 198 | #### memberinds(x, y) [¶](#method__memberinds.1) 199 | Returns a vector mapping the indices of **`x`** and **`y`** for which the corresponding elements have the same value 200 | 201 | * `x`: item to compare 202 | * `y`: item to compare 203 | 204 | 205 | *source:* 206 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:118](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L118) 207 | 208 | --- 209 | 210 | 211 | #### myipermutedims{T<:AbstractArray{T, N}}(A::T<:AbstractArray{T, N}, d) [¶](#method__myipermutedims.1) 212 | Wrapper for **`ipermutedims`** used for backward compatibility 213 | from porting the MATLAB version of BRML code 214 | 215 | 216 | *source:* 217 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:343](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L343) 218 | 219 | --- 220 | 221 | 222 | #### mysize(a) [¶](#method__mysize.1) 223 | Returns the size for each dimension of a vector or matrix 224 | 225 | * `a`: vector or matrix to analyse 226 | 227 | 228 | *source:* 229 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:100](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L100) 230 | 231 | --- 232 | 233 | 234 | #### myvcat(x, y) [¶](#method__myvcat.1) 235 | Wrapper for standard **`vcat`** used for 236 | backward compatibility from porting the MATLAB version of BRML code 237 | 238 | 239 | *source:* 240 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:135](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L135) 241 | 242 | --- 243 | 244 | 245 | #### neighboursize(A) [¶](#method__neighboursize.1) 246 | Returns the number of neighbours in an graph 247 | 248 | nsize = neighboursize(A,) 249 | 250 | * `A`: adjacency matrix of the graph 251 | * `node`: (_optional_) the node for which to find the number of neighbours 252 | 253 | If **`node`** is missing, return the neighbour sizes (including self) of each node. 254 | If **`A`** is directed, returns the number of parents of the specified node. 255 | 256 | 257 | *source:* 258 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:209](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L209) 259 | 260 | --- 261 | 262 | 263 | #### neighboursize(A, nodes) [¶](#method__neighboursize.2) 264 | Returns the number of neighbours in an graph 265 | 266 | nsize = neighboursize(A,) 267 | 268 | * `A`: adjacency matrix of the graph 269 | * `node`: (_optional_) the node for which to find the number of neighbours 270 | 271 | If **`node`** is missing, return the neighbour sizes (including self) of each node. 272 | If **`A`** is directed, returns the number of parents of the specified node. 273 | 274 | 275 | *source:* 276 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:209](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L209) 277 | 278 | --- 279 | 280 | 281 | #### numstates(A) [¶](#method__numstates.1) 282 | Returns the number of elements of matrix **`A`** 283 | 284 | 285 | *source:* 286 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:167](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L167) 287 | 288 | --- 289 | 290 | 291 | #### randgen(p) [¶](#method__randgen.1) 292 | Returns a random value from distribution **`p`** 293 | 294 | 295 | *source:* 296 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:332](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L332) 297 | 298 | --- 299 | 300 | 301 | #### standardise{T<:Number}(A::Array{T<:Number, N}) [¶](#method__standardise.1) 302 | Transforms an one-dimensional array **`A`** of numerical values into a column vector, 303 | otherwise it leaves the input unchanged 304 | 305 | 306 | *source:* 307 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:86](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L86) 308 | 309 | --- 310 | 311 | 312 | #### states(ns) [¶](#method__states.1) 313 | Enumerates all states given the number of states of each variable from a given set similar to MATLAB _ind2subv_ 314 | 315 | * `ns`: vector with the total number of states for each variable 316 | 317 | 318 | *source:* 319 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:62](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L62) 320 | 321 | --- 322 | 323 | 324 | #### vec(v::Integer) [¶](#method__vec.1) 325 | Creates a vector with 1 element of value v 326 | 327 | 328 | *source:* 329 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:14](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L14) 330 | 331 | ## Internal 332 | 333 | --- 334 | 335 | 336 | #### replace!(A, f, r) [¶](#method__replace.1) 337 | Finds the elements having value **`f`** in matrix **`A`** and replaces their value with **`r`** 338 | 339 | 340 | *source:* 341 | [/Users/elfflorin/Documents/Projects/julia.hw/jpie/src/BrmlFunctions.jl:180](https://github.com/davidbarber/JuliaProbabilisticInferenceEngine/tree/b469ac67586c10247ab2baeeb0aeda089f041694/src/BrmlFunctions.jl#L180) 342 | 343 | -------------------------------------------------------------------------------- /notebooks/LearningNotesBrml.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "# cd(\"/Users/elfflorin/Documents/Projects/julia.hw/jpie-v0.4\"); include(\"setup.jl\"); using brml" 12 | ] 13 | }, 14 | { 15 | "cell_type": "markdown", 16 | "metadata": {}, 17 | "source": [ 18 | "###condp() functionality" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 2, 24 | "metadata": { 25 | "collapsed": false 26 | }, 27 | "outputs": [ 28 | { 29 | "data": { 30 | "text/plain": [ 31 | "4x2x3 Array{Float64,3}:\n", 32 | "[:, :, 1] =\n", 33 | " 1.0 5.0\n", 34 | " 2.0 6.0\n", 35 | " 3.0 7.0\n", 36 | " 4.0 8.0\n", 37 | "\n", 38 | "[:, :, 2] =\n", 39 | " 11.0 15.0\n", 40 | " 12.0 16.0\n", 41 | " 13.0 17.0\n", 42 | " 14.0 18.0\n", 43 | "\n", 44 | "[:, :, 3] =\n", 45 | " 201.0 205.0\n", 46 | " 202.0 206.0\n", 47 | " 203.0 207.0\n", 48 | " 204.0 208.0" 49 | ] 50 | }, 51 | "execution_count": 2, 52 | "metadata": {}, 53 | "output_type": "execute_result" 54 | } 55 | ], 56 | "source": [ 57 | "# generate a toy matrix to follow its cells movements\n", 58 | "N = 4; M = 2; L = 3\n", 59 | "p = zeros(N, M, L)\n", 60 | "for z = 1:L\n", 61 | " for y = 1:M\n", 62 | " for x = 1:N\n", 63 | " p[x, y, z] = x + (y - 1) * N + (z - 1) * 10^(z - 1)\n", 64 | " end\n", 65 | " end\n", 66 | "end\n", 67 | "p" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": 3, 73 | "metadata": { 74 | "collapsed": false 75 | }, 76 | "outputs": [ 77 | { 78 | "name": "stdout", 79 | "output_type": "stream", 80 | "text": [ 81 | " " 82 | ] 83 | }, 84 | { 85 | "data": { 86 | "text/plain": [ 87 | "3x4x2 Array{Float64,3}:\n", 88 | "[:, :, 1] =\n", 89 | " 1.0 2.0 3.0 4.0\n", 90 | " 11.0 12.0 13.0 14.0\n", 91 | " 201.0 202.0 203.0 204.0\n", 92 | "\n", 93 | "[:, :, 2] =\n", 94 | " 5.0 6.0 7.0 8.0\n", 95 | " 15.0 16.0 17.0 18.0\n", 96 | " 205.0 206.0 207.0 208.0" 97 | ] 98 | }, 99 | "execution_count": 3, 100 | "metadata": {}, 101 | "output_type": "execute_result" 102 | } 103 | ], 104 | "source": [ 105 | "@time permutedims(p, (3, 1, 2))" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": 4, 111 | "metadata": { 112 | "collapsed": false 113 | }, 114 | "outputs": [ 115 | { 116 | "data": { 117 | "text/plain": [ 118 | "3x4x2 Array{Float64,3}:\n", 119 | "[:, :, 1] =\n", 120 | " 1.0 4.0 7.0 12.0\n", 121 | " 2.0 5.0 8.0 13.0\n", 122 | " 3.0 6.0 11.0 14.0\n", 123 | "\n", 124 | "[:, :, 2] =\n", 125 | " 15.0 18.0 203.0 206.0\n", 126 | " 16.0 201.0 204.0 207.0\n", 127 | " 17.0 202.0 205.0 208.0" 128 | ] 129 | }, 130 | "execution_count": 4, 131 | "metadata": {}, 132 | "output_type": "execute_result" 133 | } 134 | ], 135 | "source": [ 136 | "@time reshape(p, (3, 4, 2))" 137 | ] 138 | }, 139 | { 140 | "cell_type": "code", 141 | "execution_count": 5, 142 | "metadata": { 143 | "collapsed": false 144 | }, 145 | "outputs": [ 146 | { 147 | "name": "stdout", 148 | "output_type": "stream", 149 | "text": [ 150 | "0.117024 seconds (55.74 k allocations: 2.873 MB)\n", 151 | " 0.033074 seconds (20.04 k allocations: 1.005 MB)\n", 152 | "Array{" 153 | ] 154 | }, 155 | { 156 | "data": { 157 | "text/plain": [ 158 | "2-element Array{Int64,1}:\n", 159 | " 3\n", 160 | " 1" 161 | ] 162 | }, 163 | "execution_count": 5, 164 | "metadata": {}, 165 | "output_type": "execute_result" 166 | } 167 | ], 168 | "source": [ 169 | "# COMMA makes a difference\n", 170 | "println(typeof([3, 1]), \" - size: \" , size([3, 1]), \" - 2x1 column vector\")\n", 171 | "[3, 1]" 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "execution_count": 6, 177 | "metadata": { 178 | "collapsed": false 179 | }, 180 | "outputs": [ 181 | { 182 | "name": "stdout", 183 | "output_type": "stream", 184 | "text": [ 185 | "Int64,1} - size: (2,) - 2x1 column vector\n", 186 | "Array{" 187 | ] 188 | }, 189 | { 190 | "data": { 191 | "text/plain": [ 192 | "1x2 Array{Int64,2}:\n", 193 | " 3 1" 194 | ] 195 | }, 196 | "execution_count": 6, 197 | "metadata": {}, 198 | "output_type": "execute_result" 199 | } 200 | ], 201 | "source": [ 202 | "println(typeof([3 1]), \" - size: \" , size([3 1]), \" - 1x2 row vector\")\n", 203 | "[3 1]" 204 | ] 205 | }, 206 | { 207 | "cell_type": "code", 208 | "execution_count": 7, 209 | "metadata": { 210 | "collapsed": false 211 | }, 212 | "outputs": [ 213 | { 214 | "name": "stdout", 215 | "output_type": "stream", 216 | "text": [ 217 | "Int64,2} - size: (1,2) - 1x2 row vector\n" 218 | ] 219 | }, 220 | { 221 | "data": { 222 | "text/plain": [ 223 | "1x2 Array{Int64,2}:\n", 224 | " 3 1" 225 | ] 226 | }, 227 | "execution_count": 7, 228 | "metadata": {}, 229 | "output_type": "execute_result" 230 | } 231 | ], 232 | "source": [ 233 | "# indices for dimensions to condition on\n", 234 | "dindices = [3 1]" 235 | ] 236 | }, 237 | { 238 | "cell_type": "code", 239 | "execution_count": 8, 240 | "metadata": { 241 | "collapsed": false 242 | }, 243 | "outputs": [ 244 | { 245 | "data": { 246 | "text/plain": [ 247 | "Tuple{Int64,Int64,Int64}" 248 | ] 249 | }, 250 | "execution_count": 8, 251 | "metadata": {}, 252 | "output_type": "execute_result" 253 | } 254 | ], 255 | "source": [ 256 | "# how many dimensions are in p? allvars\n", 257 | "# size() returns a vector with length of each dimension\n", 258 | "allvars = 1: length(size(p))\n", 259 | "sizevars = size(p)\n", 260 | "typeof(sizevars)" 261 | ] 262 | }, 263 | { 264 | "cell_type": "code", 265 | "execution_count": 9, 266 | "metadata": { 267 | "collapsed": false 268 | }, 269 | "outputs": [ 270 | { 271 | "data": { 272 | "text/plain": [ 273 | "1-element Array{Int64,1}:\n", 274 | " 2" 275 | ] 276 | }, 277 | "execution_count": 9, 278 | "metadata": {}, 279 | "output_type": "execute_result" 280 | } 281 | ], 282 | "source": [ 283 | "# what are the dimensions not included in conditioning\n", 284 | "condvars = setdiff(allvars, dindices)" 285 | ] 286 | }, 287 | { 288 | "cell_type": "markdown", 289 | "metadata": {}, 290 | "source": [ 291 | "####_transform the raw data into probability distributions using 2 sets: conditioning (dindices) and conditioned (condvars)_" 292 | ] 293 | }, 294 | { 295 | "cell_type": "code", 296 | "execution_count": 10, 297 | "metadata": { 298 | "collapsed": false 299 | }, 300 | "outputs": [ 301 | { 302 | "data": { 303 | "text/plain": [ 304 | "3x4x2 Array{Float64,3}:\n", 305 | "[:, :, 1] =\n", 306 | " 1.0 2.0 3.0 4.0\n", 307 | " 11.0 12.0 13.0 14.0\n", 308 | " 201.0 202.0 203.0 204.0\n", 309 | "\n", 310 | "[:, :, 2] =\n", 311 | " 5.0 6.0 7.0 8.0\n", 312 | " 15.0 16.0 17.0 18.0\n", 313 | " 205.0 206.0 207.0 208.0" 314 | ] 315 | }, 316 | "execution_count": 10, 317 | "metadata": {}, 318 | "output_type": "execute_result" 319 | } 320 | ], 321 | "source": [ 322 | "# create new matrix by swapping the matrix elements based on \n", 323 | "# the permutation of the dimension indices \n", 324 | "# in this case from NxMxL to LxNxM\n", 325 | "# grouping the elements in LxN sets along the M dimension\n", 326 | "newp = deepcopy(permutedims(p, [dindices condvars]))" 327 | ] 328 | }, 329 | { 330 | "cell_type": "code", 331 | "execution_count": 11, 332 | "metadata": { 333 | "collapsed": false 334 | }, 335 | "outputs": [ 336 | { 337 | "data": { 338 | "text/plain": [ 339 | "12x2 Array{Float64,2}:\n", 340 | " 1.0 5.0\n", 341 | " 11.0 15.0\n", 342 | " 201.0 205.0\n", 343 | " 2.0 6.0\n", 344 | " 12.0 16.0\n", 345 | " 202.0 206.0\n", 346 | " 3.0 7.0\n", 347 | " 13.0 17.0\n", 348 | " 203.0 207.0\n", 349 | " 4.0 8.0\n", 350 | " 14.0 18.0\n", 351 | " 204.0 208.0" 352 | ] 353 | }, 354 | "execution_count": 11, 355 | "metadata": {}, 356 | "output_type": "execute_result" 357 | } 358 | ], 359 | "source": [ 360 | "# reshape the new matrix for calculating the elements sum\n", 361 | "# for the condvars (M = 2 in this case) sets of elements \n", 362 | "# in column vector format with condvars columns\n", 363 | "# use prod() to use the cardinality for each set as dimension\n", 364 | "# in this case (L*N) for each of the condvars (M) columns\n", 365 | "# each of the condvars (M) sets is normalized as a distribution\n", 366 | "newp = reshape(newp, prod(sizevars[dindices]), prod(sizevars[condvars]))" 367 | ] 368 | }, 369 | { 370 | "cell_type": "code", 371 | "execution_count": 12, 372 | "metadata": { 373 | "collapsed": false 374 | }, 375 | "outputs": [ 376 | { 377 | "data": { 378 | "text/plain": [ 379 | "12x2 Array{Float64,2}:\n", 380 | " 870.0 918.0\n", 381 | " 870.0 918.0\n", 382 | " 870.0 918.0\n", 383 | " 870.0 918.0\n", 384 | " 870.0 918.0\n", 385 | " 870.0 918.0\n", 386 | " 870.0 918.0\n", 387 | " 870.0 918.0\n", 388 | " 870.0 918.0\n", 389 | " 870.0 918.0\n", 390 | " 870.0 918.0\n", 391 | " 870.0 918.0" 392 | ] 393 | }, 394 | "execution_count": 12, 395 | "metadata": {}, 396 | "output_type": "execute_result" 397 | } 398 | ], 399 | "source": [ 400 | "# calculate the sum of all elements in the \n", 401 | "# condvars (M=2) sets; replicate\n", 402 | "# the results in a matrix similar to current data matrix\n", 403 | "sump = repmat(sum(newp, 1), size(newp, 1), 1)" 404 | ] 405 | }, 406 | { 407 | "cell_type": "code", 408 | "execution_count": 13, 409 | "metadata": { 410 | "collapsed": false 411 | }, 412 | "outputs": [ 413 | { 414 | "name": "stderr", 415 | "output_type": "stream", 416 | "text": [ 417 | "WARNING: int(x::AbstractFloat) is deprecated, use round(Int,x) instead.\n" 418 | ] 419 | }, 420 | { 421 | "data": { 422 | "text/plain": [ 423 | "1" 424 | ] 425 | }, 426 | "execution_count": 13, 427 | "metadata": {}, 428 | "output_type": "execute_result" 429 | } 430 | ], 431 | "source": [ 432 | "int(sum(newp[:,2] ./ sump[:, 2]))" 433 | ] 434 | }, 435 | { 436 | "cell_type": "code", 437 | "execution_count": 14, 438 | "metadata": { 439 | "collapsed": false 440 | }, 441 | "outputs": [ 442 | { 443 | "data": { 444 | "text/plain": [ 445 | "12x2 Array{Float64,2}:\n", 446 | " 0.00114943 0.00544662\n", 447 | " 0.0126437 0.0163399 \n", 448 | " 0.231034 0.223312 \n", 449 | " 0.00229885 0.00653595\n", 450 | " 0.0137931 0.0174292 \n", 451 | " 0.232184 0.224401 \n", 452 | " 0.00344828 0.00762527\n", 453 | " 0.0149425 0.0185185 \n", 454 | " 0.233333 0.22549 \n", 455 | " 0.0045977 0.0087146 \n", 456 | " 0.016092 0.0196078 \n", 457 | " 0.234483 0.22658 " 458 | ] 459 | }, 460 | "execution_count": 14, 461 | "metadata": {}, 462 | "output_type": "execute_result" 463 | }, 464 | { 465 | "name": "stderr", 466 | "output_type": "stream", 467 | "text": [ 468 | " in depwarn at deprecated.jl:73\n", 469 | " in int at deprecated.jl:50\n", 470 | " in include_string at loading.jl:266\n", 471 | " in execute_request_0x535c5df2 at /Users/elfflorin/.julia/v0.4/IJulia/src/execute_request.jl:177\n", 472 | " in eventloop at /Users/elfflorin/.julia/v0.4/IJulia/src/IJulia.jl:141\n", 473 | " in anonymous at task.jl:447\n", 474 | "while loading In[13], in expression starting on line 1\n" 475 | ] 476 | } 477 | ], 478 | "source": [ 479 | "# normalize the condvars (M) sets in the current data matrix\n", 480 | "probp = newp ./ sump" 481 | ] 482 | }, 483 | { 484 | "cell_type": "code", 485 | "execution_count": 15, 486 | "metadata": { 487 | "collapsed": false 488 | }, 489 | "outputs": [ 490 | { 491 | "data": { 492 | "text/plain": [ 493 | "1x2 Array{Float64,2}:\n", 494 | " 1.0 1.0" 495 | ] 496 | }, 497 | "execution_count": 15, 498 | "metadata": {}, 499 | "output_type": "execute_result" 500 | } 501 | ], 502 | "source": [ 503 | "# check each set is a probability distribution\n", 504 | "sum(probp, 1)" 505 | ] 506 | }, 507 | { 508 | "cell_type": "markdown", 509 | "metadata": {}, 510 | "source": [ 511 | "####_rollback the operations to bring the data back into its original matrix format NxMxL_" 512 | ] 513 | }, 514 | { 515 | "cell_type": "code", 516 | "execution_count": 16, 517 | "metadata": { 518 | "collapsed": false 519 | }, 520 | "outputs": [ 521 | { 522 | "data": { 523 | "text/plain": [ 524 | "3x4x2 Array{Float64,3}:\n", 525 | "[:, :, 1] =\n", 526 | " 0.00114943 0.00229885 0.00344828 0.0045977\n", 527 | " 0.0126437 0.0137931 0.0149425 0.016092 \n", 528 | " 0.231034 0.232184 0.233333 0.234483 \n", 529 | "\n", 530 | "[:, :, 2] =\n", 531 | " 0.00544662 0.00653595 0.00762527 0.0087146\n", 532 | " 0.0163399 0.0174292 0.0185185 0.0196078\n", 533 | " 0.223312 0.224401 0.22549 0.22658 " 534 | ] 535 | }, 536 | "execution_count": 16, 537 | "metadata": {}, 538 | "output_type": "execute_result" 539 | } 540 | ], 541 | "source": [ 542 | "# reshape to previous reshaped format \n", 543 | "# in this case from (L*N)x(1*M) to LxNxM\n", 544 | "probp = reshape(probp, sizevars[[dindices condvars]])" 545 | ] 546 | }, 547 | { 548 | "cell_type": "code", 549 | "execution_count": 17, 550 | "metadata": { 551 | "collapsed": false 552 | }, 553 | "outputs": [ 554 | { 555 | "data": { 556 | "text/plain": [ 557 | "4x2x3 Array{Float64,3}:\n", 558 | "[:, :, 1] =\n", 559 | " 0.00114943 0.00544662\n", 560 | " 0.00229885 0.00653595\n", 561 | " 0.00344828 0.00762527\n", 562 | " 0.0045977 0.0087146 \n", 563 | "\n", 564 | "[:, :, 2] =\n", 565 | " 0.0126437 0.0163399\n", 566 | " 0.0137931 0.0174292\n", 567 | " 0.0149425 0.0185185\n", 568 | " 0.016092 0.0196078\n", 569 | "\n", 570 | "[:, :, 3] =\n", 571 | " 0.231034 0.223312\n", 572 | " 0.232184 0.224401\n", 573 | " 0.233333 0.22549 \n", 574 | " 0.234483 0.22658 " 575 | ] 576 | }, 577 | "execution_count": 17, 578 | "metadata": {}, 579 | "output_type": "execute_result" 580 | } 581 | ], 582 | "source": [ 583 | "probp = ipermutedims(probp, [dindices condvars])" 584 | ] 585 | }, 586 | { 587 | "cell_type": "code", 588 | "execution_count": 18, 589 | "metadata": { 590 | "collapsed": false 591 | }, 592 | "outputs": [ 593 | { 594 | "data": { 595 | "text/plain": [ 596 | "4x2x1 Array{Float64,3}:\n", 597 | "[:, :, 1] =\n", 598 | " 0.244828 0.245098\n", 599 | " 0.248276 0.248366\n", 600 | " 0.251724 0.251634\n", 601 | " 0.255172 0.254902" 602 | ] 603 | }, 604 | "execution_count": 18, 605 | "metadata": {}, 606 | "output_type": "execute_result" 607 | } 608 | ], 609 | "source": [ 610 | "testp = sum(probp, 3)" 611 | ] 612 | }, 613 | { 614 | "cell_type": "code", 615 | "execution_count": 19, 616 | "metadata": { 617 | "collapsed": false 618 | }, 619 | "outputs": [ 620 | { 621 | "data": { 622 | "text/plain": [ 623 | "1x2x1 Array{Float64,3}:\n", 624 | "[:, :, 1] =\n", 625 | " 1.0 1.0" 626 | ] 627 | }, 628 | "execution_count": 19, 629 | "metadata": {}, 630 | "output_type": "execute_result" 631 | } 632 | ], 633 | "source": [ 634 | "# final check\n", 635 | "sum(testp, 1)" 636 | ] 637 | }, 638 | { 639 | "cell_type": "markdown", 640 | "metadata": {}, 641 | "source": [ 642 | "### iu-hu-huu!!!" 643 | ] 644 | }, 645 | { 646 | "cell_type": "code", 647 | "execution_count": null, 648 | "metadata": { 649 | "collapsed": true 650 | }, 651 | "outputs": [], 652 | "source": [] 653 | } 654 | ], 655 | "metadata": { 656 | "kernelspec": { 657 | "display_name": "Julia 0.4.0", 658 | "language": "julia", 659 | "name": "julia-0.4" 660 | }, 661 | "language_info": { 662 | "file_extension": ".jl", 663 | "mimetype": "application/julia", 664 | "name": "julia", 665 | "version": "0.4.0" 666 | } 667 | }, 668 | "nbformat": 4, 669 | "nbformat_minor": 0 670 | } 671 | -------------------------------------------------------------------------------- /notebooks/demoHMMinferenceSimple.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": false 8 | }, 9 | "outputs": [ 10 | { 11 | "name": "stdout", 12 | "output_type": "stream", 13 | "text": [ 14 | "\n", 15 | "Available Demos:\n", 16 | "\n", 17 | "demoBurglar.jl\n", 18 | "demoBurglarDictPot.jl\n", 19 | "demoBurglarSimple.jl\n", 20 | "demoChainIndepRational.jl\n", 21 | "demoHMMburglar.jl\n", 22 | "demoHMMinferenceFG.jl\n", 23 | "demoHMMinferenceSimple.jl\n", 24 | "demoHMMlearn.jl\n", 25 | "demoMaxProd.jl\n", 26 | "demoPlotDAG.jl\n", 27 | "demoPotLogArray.jl\n", 28 | "demoSumProd.jl\n", 29 | "demoSumProdStandardiseVariables.jl\n" 30 | ] 31 | } 32 | ], 33 | "source": [ 34 | "cd(\"/Users/elfflorin/Documents/Projects/julia.hw/jpie-v0.4\")\n", 35 | "include(\"setup.jl\")\n", 36 | "using brml" 37 | ] 38 | }, 39 | { 40 | "cell_type": "markdown", 41 | "metadata": {}, 42 | "source": [ 43 | "The domain for visible states: $V = \\{1, 2, 3, 4, 5\\}$
\n", 44 | "The domain for hidden states: $H = \\{1, 2, 3, 4, 5, 6, 7, 8, 9, 10\\}$
\n", 45 | "The number of timesteps: $T = 20$" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": 2, 51 | "metadata": { 52 | "collapsed": false 53 | }, 54 | "outputs": [ 55 | { 56 | "data": { 57 | "text/plain": [ 58 | "20" 59 | ] 60 | }, 61 | "execution_count": 2, 62 | "metadata": {}, 63 | "output_type": "execute_result" 64 | } 65 | ], 66 | "source": [ 67 | "V = 5 # number of visible states\n", 68 | "H = 10 # number of hidden states\n", 69 | "T = 20 # number of timesteps" 70 | ] 71 | }, 72 | { 73 | "cell_type": "markdown", 74 | "metadata": {}, 75 | "source": [ 76 | "Initialize the time-series vectors **h, v** for the hidden and visible variables" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": 3, 82 | "metadata": { 83 | "collapsed": false 84 | }, 85 | "outputs": [ 86 | { 87 | "data": { 88 | "text/plain": [ 89 | "1x20 Array{Integer,2}:\n", 90 | " 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0" 91 | ] 92 | }, 93 | "execution_count": 3, 94 | "metadata": {}, 95 | "output_type": "execute_result" 96 | } 97 | ], 98 | "source": [ 99 | "h = zeros(Integer,1,T) # holds the state value for the hidden variable at a specific timestep\n", 100 | "v = zeros(Integer,1,T) # holds the state value for the visible variable at a specific timestep" 101 | ] 102 | }, 103 | { 104 | "cell_type": "markdown", 105 | "metadata": {}, 106 | "source": [ 107 | "The probabilities for the hidden variable states at timestep 1: $p(h_{1})$ " 108 | ] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": 4, 113 | "metadata": { 114 | "collapsed": false 115 | }, 116 | "outputs": [ 117 | { 118 | "data": { 119 | "text/plain": [ 120 | "10x1 Array{Float64,2}:\n", 121 | " 0.119668 \n", 122 | " 0.115798 \n", 123 | " 0.113779 \n", 124 | " 0.00521127\n", 125 | " 0.111174 \n", 126 | " 0.0551323 \n", 127 | " 0.016168 \n", 128 | " 0.103553 \n", 129 | " 0.190585 \n", 130 | " 0.168931 " 131 | ] 132 | }, 133 | "execution_count": 4, 134 | "metadata": {}, 135 | "output_type": "execute_result" 136 | } 137 | ], 138 | "source": [ 139 | "ph1 = condp(rand(H,1)) # probabilities for the states of the hidden variable at timestep 1" 140 | ] 141 | }, 142 | { 143 | "cell_type": "markdown", 144 | "metadata": {}, 145 | "source": [ 146 | "Initialize the transition matrix **$phghm_{10x10}$** with\n", 147 | "$$\\begin{eqnarray}\n", 148 | " phghm(i, j) = p(h_t = i | h_{t-1} = j) \\\\\n", 149 | " \\sum_{i = 1}^{10} phghm(i, j) = 1\n", 150 | " \\end{eqnarray}\n", 151 | "$$" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": 5, 157 | "metadata": { 158 | "collapsed": false 159 | }, 160 | "outputs": [ 161 | { 162 | "data": { 163 | "text/plain": [ 164 | "10x10 Array{Float64,2}:\n", 165 | " 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0\n", 166 | " 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0\n", 167 | " 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0\n", 168 | " 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0\n", 169 | " 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0\n", 170 | " 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0\n", 171 | " 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0\n", 172 | " 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0\n", 173 | " 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0\n", 174 | " 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0" 175 | ] 176 | }, 177 | "execution_count": 5, 178 | "metadata": {}, 179 | "output_type": "execute_result" 180 | } 181 | ], 182 | "source": [ 183 | "phghm=condp(eye(H,H)) # transition matrix with sum(phghm, 1) = 1 with phghm[i, j] = p(hg=i | hm=j)\n", 184 | "# shuffle the column data in phghm while keeping sum(phghm, 1) = 1\n", 185 | "# done as below no 2 columns have value 1.0 on the same row\n", 186 | "phghmtmp=condp(eye(H,H))\n", 187 | "r = randperm(H)\n", 188 | "for (i, value) in enumerate(r)\n", 189 | " phghm[:, i] = phghmtmp[:, value]\n", 190 | "end\n", 191 | "phghm" 192 | ] 193 | }, 194 | { 195 | "cell_type": "markdown", 196 | "metadata": {}, 197 | "source": [ 198 | "Initialize the emission matrix **$pvgh \\in R^{5x10}$** with\n", 199 | "$$\\begin{eqnarray}\n", 200 | " pvgh(i, j) = p(v_t = i | h_{t} = j) \\\\\n", 201 | " \\sum_{i = 1}^{5} pvgh(i, j) = 1\n", 202 | " \\end{eqnarray}\n", 203 | "$$" 204 | ] 205 | }, 206 | { 207 | "cell_type": "code", 208 | "execution_count": 6, 209 | "metadata": { 210 | "collapsed": false 211 | }, 212 | "outputs": [ 213 | { 214 | "data": { 215 | "text/plain": [ 216 | "5x10 Array{Float64,2}:\n", 217 | " 0.0497173 0.00971331 0.375005 … 0.287064 0.13402 0.089965\n", 218 | " 0.223909 0.34456 0.181126 0.145269 0.256912 0.225578\n", 219 | " 0.215437 0.220242 0.205583 0.0751164 0.268326 0.211817\n", 220 | " 0.24717 0.0528517 0.0226471 0.338549 0.336161 0.367161\n", 221 | " 0.263767 0.372633 0.215639 0.154002 0.00458047 0.105478" 222 | ] 223 | }, 224 | "execution_count": 6, 225 | "metadata": {}, 226 | "output_type": "execute_result" 227 | } 228 | ], 229 | "source": [ 230 | "pvgh=condp((rand(V,H))) # emision matrix with sum(pvgh, 1) = 1 with pvgh[i, j] = p(vg = i | h = j)" 231 | ] 232 | }, 233 | { 234 | "cell_type": "code", 235 | "execution_count": 7, 236 | "metadata": { 237 | "collapsed": false 238 | }, 239 | "outputs": [], 240 | "source": [ 241 | "h[1]=randgen(ph1) # initialize the hidden variable @t=1 with a random state based on ph1 distribution\n", 242 | "v[1]=randgen(pvgh[:,h[1]]) # initialize the visible variable @t=1 with a random state based on pvgh( vg | h@t=1)\n", 243 | "\n", 244 | "for t=2:T\n", 245 | " h[t]=randgen(phghm[:,h[t-1]]) # set the hidden variable state @t based on h@t-1 using the transition matrix\n", 246 | " v[t]=randgen(pvgh[:,h[t]]) # set the visible variable state @t based on h@t using the emission matrix\n", 247 | "end" 248 | ] 249 | }, 250 | { 251 | "cell_type": "code", 252 | "execution_count": 8, 253 | "metadata": { 254 | "collapsed": false 255 | }, 256 | "outputs": [ 257 | { 258 | "name": "stdout", 259 | "output_type": "stream", 260 | "text": [ 261 | "Log Likelihood = -28.374464749133278\n" 262 | ] 263 | } 264 | ], 265 | "source": [ 266 | "(alpha, loglik) = HMMforward(v, phghm, ph1, pvgh); # filtering\n", 267 | "println(\"Log Likelihood = $loglik\")" 268 | ] 269 | }, 270 | { 271 | "cell_type": "code", 272 | "execution_count": 9, 273 | "metadata": { 274 | "collapsed": false 275 | }, 276 | "outputs": [ 277 | { 278 | "data": { 279 | "text/plain": [ 280 | "10x20 Array{Float64,2}:\n", 281 | " 4.3189e-5 1.24647e-7 8.06147e-6 … 8.06147e-6 1.39984e-6 \n", 282 | " 1.39984e-6 4.3189e-5 1.24647e-7 1.24647e-7 8.06147e-6 \n", 283 | " 0.99957 3.34372e-9 7.70298e-8 0.99957 3.34372e-9 \n", 284 | " 7.70298e-8 0.99957 3.34372e-9 7.70298e-8 0.99957 \n", 285 | " 3.34372e-9 7.70298e-8 0.99957 3.34372e-9 7.70298e-8 \n", 286 | " 8.06147e-6 1.39984e-6 4.3189e-5 … 4.3189e-5 1.24647e-7 \n", 287 | " 4.0098e-7 0.000203892 4.0098e-7 4.0098e-7 0.000203892\n", 288 | " 0.000203892 4.0098e-7 0.000203892 0.000203892 4.0098e-7 \n", 289 | " 1.24647e-7 8.06147e-6 1.39984e-6 1.39984e-6 4.3189e-5 \n", 290 | " 0.00017332 0.00017332 0.00017332 0.00017332 0.00017332 " 291 | ] 292 | }, 293 | "execution_count": 9, 294 | "metadata": {}, 295 | "output_type": "execute_result" 296 | } 297 | ], 298 | "source": [ 299 | "gamma = HMMsmooth(v, phghm, ph1, pvgh, alpha); # smoothing" 300 | ] 301 | }, 302 | { 303 | "cell_type": "code", 304 | "execution_count": 10, 305 | "metadata": { 306 | "collapsed": false 307 | }, 308 | "outputs": [ 309 | { 310 | "name": "stdout", 311 | "output_type": "stream", 312 | "text": [ 313 | "most likely path (viterbi):\n", 314 | "[3 4 5 3 4 5 3 4 5 3 4 5 3 4 5 3 4 5 3 4]\n" 315 | ] 316 | } 317 | ], 318 | "source": [ 319 | "(maxstate, logprob) = HMMviterbi(v, phghm, ph1, pvgh)\n", 320 | "println(\"most likely path (viterbi):\")\n", 321 | "println(maxstate)" 322 | ] 323 | }, 324 | { 325 | "cell_type": "code", 326 | "execution_count": 11, 327 | "metadata": { 328 | "collapsed": false 329 | }, 330 | "outputs": [ 331 | { 332 | "name": "stdout", 333 | "output_type": "stream", 334 | "text": [ 335 | "original path (hidden states):\n", 336 | "Integer[3 4 5 3 4 5 3 4 5 3 4 5 3 4 5 3 4 5 3 4]\n" 337 | ] 338 | } 339 | ], 340 | "source": [ 341 | "println(\"original path (hidden states):\")\n", 342 | "println(h)" 343 | ] 344 | }, 345 | { 346 | "cell_type": "code", 347 | "execution_count": 12, 348 | "metadata": { 349 | "collapsed": false 350 | }, 351 | "outputs": [ 352 | { 353 | "name": "stdout", 354 | "output_type": "stream", 355 | "text": [ 356 | "original path (visible states):\n", 357 | "Integer[5 4 3 2 4 2 5 3 1 5 4 1 5 3 2 3 3 3 1 5]\n" 358 | ] 359 | } 360 | ], 361 | "source": [ 362 | "println(\"original path (visible states):\")\n", 363 | "println(v)" 364 | ] 365 | }, 366 | { 367 | "cell_type": "code", 368 | "execution_count": 17, 369 | "metadata": { 370 | "collapsed": false 371 | }, 372 | "outputs": [ 373 | { 374 | "data": { 375 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqEAAAIhCAYAAAB+EmOGAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAPYQAAD2EBqD+naQAAIABJREFUeJzs3Xl4FeXB/vF7ThKSAEkgYUtAwi5CoSQFBFQEVDYJKIL5URFZhNaCCFrFrZBKoaDwiuiriAtgsW5BVPAFFExAZQkgIFUpoS1LFnYMEZNAwvz+kJxyTBDQOc9wyPdzXecCnpkz9xOYjLczkzmWbdu2AAAAAIM8bk8AAAAAFQ8lFAAAAMZRQgEAAGAcJRQAAADGUUIBAABgHCUUAAAAxlFCAQAAYBwlFAAAAMZRQgEAAGAcJRTAZWX27Nlq0aKFwsPD5fF49Mwzz8jj8ahr164+66WkpMjj8WjNmjUuzfQH8+fPl8fj0YIFC1ydBwCYFuz2BADAKW+++abGjRunxMRE3X///QoNDVWHDh0kSZZlXdA2PB6Prr/+eqWlpflzql6WZXlfAFCRUEIBXDaWLl3q/bVOnTre8R07dqhy5coXvB2ThfDWW29Vx44dfeYLABUBJRTAZSMnJ0eWZZUpdM2aNXNpRucXGRmpyMhIt6cBAMZxTyiAgFd6f2d6erps25bH4/G+JJV7T+iPld6bKUnp6ek+2/jzn//ss+6GDRs0YMAA1alTR6Ghoapfv75+//vfKzc3t8x2u3TpIo/Ho1OnTumJJ57QlVdeqbCwMA0bNswn98f3hDZo0EANGzbU999/rwcffFD169dXWFiYmjZtqieffLLcr8G2bT3zzDPee2Lr1aune++9V3l5ed7tAcClgjOhAAJe165dZVmW5s+frz179iglJaXMOue7xJ6QkKBJkybpz3/+sxo0aKChQ4d6l3Xp0sX7+1dffVWjRo1SeHi4+vbtqyuuuEI7d+7Uyy+/rCVLlmj9+vW64oorymy/f//+2rRpk3r37q3+/furVq1aPzk/y7J06tQpde/eXbm5ubr55psVHBysxYsX6+GHH1ZhYaEmTpzo857Ro0drzpw5qlu3rn73u98pJCREH3zwgTIyMlRcXKxKlSr95N8BABhlA8Bl4vrrr7c9Hk+Zccuy7K5du/qMTZo0ybYsy169evV51y31z3/+0w4JCbGbNm1q5+Tk+CxbtWqVHRQUZN96661l5mRZlv3rX//aPnLkSJltzps3z7Ysy16wYIHPeHx8vG1Zln3zzTfbhYWF3vGDBw/a1apVs6tVq2afOnXKO75mzRrbsiy7efPmdl5ennf85MmTdufOnW3LsuyGDRuW+3UBgBu4HA8AF+iFF15QcXGxnnnmGcXGxvos69atm5KSkrRkyRKdOHGizHsnT56s6Ojoi8qzLEuzZ89WaGiod6xmzZrq27ev8vLytHPnTu946eX8xx57zOce05CQEP31r3+9qFwAMIHL8QBwgdatWyfph3tGN2zYUGb5wYMHVVJSon/+859KTEz0jluWpfbt2190XlRUlBo1alRmvPRy/7Fjx7xjW7ZskWVZuvbaa8usf/XVVysoKOii8wHAnyihAHCBjhw5Ikl66qmnzrmOZVnlngmtXbv2RedVq1at3PHg4B8O3SUlJd6xvLy8c+YEBQUpJibmovMBwJ8ooQBwgaKiomRZlvLy8lS1alW3p+Oj9BL8/v37y/wUfElJiY4cOXJRz0oFAH/jnlAAOItlWT5nGM/WsWNH2bbt+kd9licxMVG2beuzzz4rs2z9+vXn/JoAwC2UUAA4S0xMjPbt21fusjFjxigkJETjx49XZmZmmeUnT57Up59+6u8plmvIkCGSpClTpuj48eM+c3r00UddmRMA/BQuxwO4rNi2/Yvef+ONN+rNN99U3759lZCQoJCQEF1//fW67rrrdOWVV+rVV1/V8OHD1bJlS/Xs2VNNmzbVqVOntHfvXn366aeqXbu2vv76a0fndCE6d+6sUaNGae7cuWrZsqX69++vkJAQLVmyRNWrV1dcXJz3YfwAcCm44CPSiRMnNGnSJPXs2VPR0dHlfsJHqW+++UY9e/ZURESEYmJiNGTIEB0+fNixSQNAeSzLuuDPfT/Xus8884wGDRqkjIwMTZkyRZMmTVJaWpp3+R133KHNmzfrjjvu0Jdffqn//d//1d///nf9+9//1u23367nn3/+ouZ0ruU/5z0vvPCC/ud//kdVq1bViy++qDfffFPdu3fXRx99pLy8PD4eFMAlxbIv8H/Rd+/erUaNGik+Pl4NGzZUenq65s+f770EVCorK0sJCQmqXr26xo4dq/z8fM2YMUP169dXRkaGQkJC/PKFAADKl5mZqSuvvFKDBg3S66+/7vZ0AEDSRVyOj4uL0/79+1WrVi1t3rxZ7dq1K3e9qVOnqqCgQFu2bFG9evUkSe3bt9dNN92k+fPna+TIkc7MHADg48CBA6pZs6bPZffvv/9e48aNkyTdeuutbk0NAMq44BJaqVIl72cd/9TJ00WLFqlPnz7eAipJN9xwg5o1a6a3336bEgoAfvL000/rjTfeUNeuXVWnTh3t379fq1atUnZ2tnr37q0BAwa4PUUA8HL0B5Oys7N16NAhtW3btsyydu3aadmyZU7GAQDO0r17d3355Zf66KOPdPToUYWEhKhZs2YaN26c92woAFwqHC2hubm5klTmM5VLx44ePapTp05xXygA+EG3bt3UrVs3t6cBABfE0RJaUFAgSQoNDS2zLCwszLvOj0vo4cOHtWLFCjVo0EDh4eFOTgkAAAAOKCgo0O7du9WjRw/VqFHjF2/P0RJaWiCLiorKLCssLPRZ52wrVqzQ4MGDnZwKAAAA/GDhwoW64447fvF2HC2hpZfhSy/Lny03N1cxMTHlXopv0KDBmd89KOkKJ6d0Adx8Xv8/XMwudjHbbcsl9XR7EqgQ2NdgCvsaTDgs6d2zetsv42gDq1u3rmrWrKmNGzeWWZaRkaE2bdqU+77/nh29QlITJ6d0Ady8P/Woi9kVuYSGSYpzexKoENjXYAr7Gsxx6tZJxz/D7bbbbtPSpUuVlZXlHVu1apUyMzM1cOBAp+MAAAAQgC7qTOhzzz2nb7/9Vjk5OZKkDz74QHv37pUkjR07VpGRkXr00Uf1zjvvqGvXrrrvvvuUn5+vp556Sq1bt9awYcOc/woAAAAQcC6qhM6cOVN79uyR9MNnFy9evFjvvvuuLMvSkCFDFBkZqXr16mn16tW6//779fDDDys0NFR9+vTRzJkzeTQTAAAAJF1kCf3Pf/5zQeu1aNFCy5cv/1kTAvyvldsTQIXBvgZT2NcQeBy/JxS49HGwhinsazCFfQ2BhxIKAAAA4yihAAAAMI4SCgAAAOMooQAAADCOEgoAAADjKKEAAAAwjhIKAAAA4yihAAAAMI4SCgAAAOMooQAAADCOEgoAAADjKKEAAAAwjhIKAAAA4yihAAAAMI4SCgAAAOOC3Z6ArxJJxYYzuxjOO9uNLma/7mJ2oovZ77qWbLd63LVsa/t/XMuWFriYHe9itulj2dmyXcx2U08Xs5e7lvwfpbiW3dDF7Ior3MXsMEe3xplQAAAAGEcJBQAAgHGUUAAAABhHCQUAAIBxlFAAAAAYRwkFAACAcZRQAAAAGEcJBQAAgHGUUAAAABhHCQUAAIBxlFAAAAAYRwkFAACAcZRQAAAAGOeXErpp0yb169dPcXFxqlKliq666ipNnjxZBQUF/ogDAABAgAl2eoPbt2/Xtddeq7i4OI0bN07R0dFau3atJk2apM2bN+u9995zOhIAAAABxvES+uabb+rkyZP68MMPddVVV0mS7r77bp0+fVqvvfaa8vLyFBUV5XQsAAAAAojjl+PDw8MlSbVq1fIZr1OnjoKCglSpUiWnIwEAABBgHC+hw4cPV+3atTVixAht27ZN+/bt01tvvaU5c+Zo7Nix3pIKAACAisvxy/FxcXH6/PPP1bt3byUkJHjHH3/8cT3xxBNOxwEAACAAOV5CDxw4oF69ekmSXnrpJcXExGjp0qWaMmWKateurdGjRzsdCQAAgADjeAmdPHmysrOztXPnTsXFxUmSbrnlFp0+fVoTJkzQoEGDFB0dfY53vyypyo/GrpN0vdPTBAAAwDltPfM6W6GjCY6X0M8++0wJCQneAloqKSlJ8+fP19atW9WtW7dzvPtVSQnnWOYvXxjOO8u9V7uX/Wyia9Ev2E+6ln2P9XvXsq3txa5lS8tczD7X/3SasMfFbMcPrxehiYvZu1xLTlMH17K7KsW17IYuZrur1vlX8ZuDLmabeub6lWdeZ8uRNNexBMd/MOnUqVMqKSkpd1ySiovd/A8xAAAALgWOl9DExER98cUXyszM9Bl/4403FBQUpNatWzsdCQAAgADj+PWiBx98UIsWLdJ1112nMWPGKDo6WkuXLtXy5cs1cuRI1alTx+lIAAAABBjHS2jr1q2Vnp6uSZMm6amnnlJhYaEaNWqkqVOn6qGHHnI6DgAAAAHIL3fOt2/fXsuWufmDEAAAALiUOX5PKAAAAHA+lFAAAAAYRwkFAACAcZRQAAAAGEcJBQAAgHGUUAAAABhHCQUAAIBxlFAAAAAYRwkFAACAcZRQAAAAGEcJBQAAgHGUUAAAABhHCQUAAIBxlFAAAAAYRwkFAACAccFuT8BHULBkhZjNLC42m3e2Z5e5l61eriXfY93oWra9ooNr2VaPN13Llna4mB3tYnaEi9n5LmbvcjG7vWvJXZXiWvZCF7MHu5jtroMuZlfUY4uzOBMKAAAA4yihAAAAMI4SCgAAAOMooQAAADCOEgoAAADjKKEAAAAwjhIKAAAA4yihAAAAMI4SCgAAAOMooQAAADCOEgoAAADjKKEAAAAwjhIKAAAA4/xWQr/44gv17dtXMTExqlKlilq1aqVnn33WX3EAAAAIIMH+2OhHH32kpKQk/eY3v9HEiRNVtWpV7dq1S9nZ2f6IAwAAQIBxvIQeP35cQ4YMUVJSklJTU53ePAAAAC4Djl+O//vf/66DBw9qypQpkqQTJ07o9OnTTscAAAAggDleQleuXKnIyEjt27dPV155pSIiIhQVFaU//OEPKioqcjoOAAAAAcjxEpqZmani4mLdcsst6tWrl959910NHz5cc+bM0bBhw5yOAwAAQABy/J7Q7777Tt9//73uuecezZo1S5J0yy236OTJk3rxxRf1xBNPqEmTJk7HAgAAIIA4fiY0PDxckjRo0CCf8dI/r1+/3ulIAAAABBjHz4TGxcXp66+/Vu3atX3Ga9WqJUk6duzYud9cMk5S5I8GkyX9P0fn6OPGa/y37fOI/fg/rmXnWgWuZUu7XEu2evzFteyy+7ZJPV3MXu1itl+eQnfJ+0wprmVfq+muZbtpsNw8ttQ+/yp+U1Efvejm8TzfUM72M6+zFTqa4PgRum3btlq5cqWysrLUtGlT73hOTo4kqWbNmj/x7hmSEpyeEgAAAC5KqzOvs+VImutYguOX42+//XZJ0iuvvOIz/vLLLyskJERdunRxOhIAAAABxvEzoW3atNHw4cP16quvqri4WJ07d1Z6erpSU1P16KOPqk6dOk5HAgAAIMD45YapOXPmqH79+po3b54WL16sBg0aaNasWRo7dqw/4gAAABBg/FJCg4ODNXHiRE2cONEfmwcAAECAc/yeUAAAAOB8KKEAAAAwjhIKAAAA4yihAAAAMI4SCgAAAOMooQAAADCOEgoAAADjKKEAAAAwjhIKAAAA4yihAAAAMI4SCgAAAOMooQAAADCOEgoAAADjKKEAAAAwjhIKAAAA4yihAAAAMC7Y7Qn4CpYUYjZy5Z/N5p0l14pwLVu6x73o8EnuZRfMdy9b213MLnAt2Y5JcS3bOjLTtWw3XauXXEw/5WK2e/u5VOxidk8Xs19xLXm6UlzLnuBi9uWEM6EAAAAwjhIKAAAA4yihAAAAMI4SCgAAAOMooQAAADCOEgoAAADjKKEAAAAwjhIKAAAA4yihAAAAMI4SCgAAAOMooQAAADCOEgoAAADjKKEAAAAwzu8ldMqUKfJ4PGrVqpW/owAAABAg/FpCs7KyNHXqVFWpUkWWZfkzCgAAAAEk2J8b/+Mf/6hOnTqpuLhYhw8f9mcUAAAAAojfzoSuWbNGixYt0qxZs2TbNmdCAQAA4OWXElpSUqJ7771XI0eOVMuWLf0RAQAAgADml8vxc+bM0d69e/XJJ5/4Y/MAAAAIcI6fCT1y5IgmTpyoiRMnKiYmxunNAwAA4DLg+JnQxx9/XDVq1NC99977M949XlLUj8YGnXkBAADAjO1nXmcrdDTB0RKamZmpl156SbNmzVJWVpZ3vLCwUCdPntSePXsUGRmp6tWrl7+Bx56W6ic6OaXz+91DZvN8/I+L2c+6F11wq3vZqudidoGL2eGuJVtHUlzL/kwPuJZ9rWa7li1lu5jt3r4m1XIx+6CL2a+4mN3cteQJSnEtO6VCZLc68zpbjqS5jiU4ejk+Oztbp0+f1tixY9WoUSPvKyMjQzt37lTDhg01efJkJyMBAAAQgBw9E9qqVSstXrzY53FMtm3r8ccf13fffadnnnlGjRs3djISAAAAAcjREhoTE6N+/fqVGX/66aclSX379nUyDgAAAAHK758dL0mWZfGwegAAAHj59WM7S6WlpZmIAQAAQIAwciYUAAAAOBslFAAAAMZRQgEAAGAcJRQAAADGUUIBAABgHCUUAAAAxlFCAQAAYBwlFAAAAMZRQgEAAGAcJRQAAADGUUIBAABgHCUUAAAAxlFCAQAAYBwlFAAAAMZRQgEAAGAcJRQAAADGBbs9AR9TPpd0wGzmFb3M5p1t342uRd9rr3Et+1lPU9eyZf/dvWyFu5gdUSGzr9VLrmW7++991LXkNE1wLburUlzLdpeb/ynPdzHbPSmu7mvNXcwOdXRrnAkFAACAcZRQAAAAGEcJBQAAgHGUUAAAABhHCQUAAIBxlFAAAAAYRwkFAACAcZRQAAAAGEcJBQAAgHGUUAAAABhHCQUAAIBxlFAAAAAY53gJ3bhxo8aMGaOWLVuqatWqio+PV3JysjIzM52OAgAAQIAKdnqD06dP17p16zRw4EC1bt1aubm5eu6555SYmKj169erZcuWTkcCAAAgwDheQh944AG1a9dOwcH/3XRycrJatWqladOm6W9/+5vTkQAAAAgwjpfQjh07lhlr0qSJWrRooR07djgdBwAAgABk5AeTbNvWgQMHVKNGDRNxAAAAuMQZKaGvv/66cnJylJycbCIOAAAAlzi/l9AdO3Zo9OjR6tSpk+666y5/xwEAACAA+LWE7t+/XzfffLOqV6+u1NRUWZblzzgAAAAECMd/MKlUXl6eevXqpePHj+vTTz9VnTp1LuBdMySF/Wis1ZmXn+zz36bPr4lryc9a41zLll53Mft2F7Pfdi3ZvnuCa9nWy9tcy5YWu5hdq0Jmd1WKa9nuau9idoZryfM00rXsYRV2XzP1Q97bz7zOVuhogl9KaGFhoZKSkrRr1y6tXLlSzZs3v8B39pQU548pAQAA4IKVdxIwR9JcxxIcL6ElJSVKTk7Whg0b9P777+vqq692OgIAAAABzi8Pq1+yZImSkpJ0+PBhLVy40Gf54MGDnY4EAABAgHG8hG7btk2WZWnJkiVasmSJzzLLsiihAAAAcL6EpqWlOb1JAAAAXGaMPKweAAAAOBslFAAAAMZRQgEAAGAcJRQAAADGUUIBAABgHCUUAAAAxlFCAQAAYBwlFAAAAMZRQgEAAGAcJRQAAADGUUIBAABgHCUUAAAAxlFCAQAAYBwlFAAAAMZRQgEAAGAcJRQAAADGBbs9AR+1fydVSjSbebfZOB+TjroYPtXFbDd3u2tczG7uWrL18kuuZUsrXcx2z2b9wbXs32ima9nucvPYssvFbPcMU4qL6bVczD7oWvJMF//OsyQ97eD2OBMKAAAA4yihAAAAMI4SCgAAAOMooQAAADCOEgoAAADjKKEAAAAwjhIKAAAA4yihAAAAMI4SCgAAAOMooQAAADCOEgoAAADjKKEAAAAwjhIKAAAA4/xSQouKijRhwgTFxcWpcuXK6tChg1auXOmPKAAAAAQgv5TQoUOH6umnn9add96p2bNnKygoSL1799bnn3/ujzgAAAAEGMdLaEZGht566y1NmzZN06dP1913361PPvlE8fHxeuihh5yOA36GD9yeACqI5W5PABXIdrcnAFw0x0toamqqgoODNWrUKO9YaGioRowYoXXr1ik7O9vpSOAiUUJhxgq3J4AKhBKKwON4Cd2yZYuaNWumqlWr+oy3a9dOkrR161anIwEAABBgHC+hubm5io2NLTNeOpaTk+N0JAAAAAKM4yW0oKBAoaGhZcbDwsK8ywEAAFCxBTu9wfDwcBUVFZUZLyws9C7/MW8xPfWN09M5P1dPzOa5mO3mF+74bncR/iEp/8yvpv3HhcxSJ8g27Bv9sKe5cFSTlOVK6g/c/Pd289ji5vE8T1KhXP4PmgsKXcw+6lqym9/dB8786tQJRce/Y2NjY8u95J6bmytJiouLK7Ns9+7dP/zm6GCnp3N+L5qPhJueP/NrkquzwOVv8I9+NetpV1LhtrluTwAGXArf3bt379Y111zzi7fjeAlNSEhQenq68vPzFRER4R3fsGGDJKlNmzZl3tOjRw8tXLhQDRo0KPdMKQAAANxVUFCg3bt3q0ePHo5sz7Jt23ZkS2dkZGSoQ4cOeuqpp/TAAw9I+uETlH71q1+pZs2aWrt2rZNxAAAACECOnwlt3769Bg4cqEceeUQHDx5U48aNtWDBAu3du1fz5s1zOg4AAAAByPEzodIPZz7/9Kc/aeHChTp27Jh+/etfa/LkybrpppucjgIAAEAA8ksJBQAAAH6K488JBQAAAM7H1RJaVFSkCRMmKC4uTpUrV1aHDh20cuVKN6eEy1B6ero8Hk+5r4yMDLenhwB14sQJTZo0ST179lR0dLQ8Ho8WLFhQ7rrffPONevbsqYiICMXExGjIkCE6fPiw4RkjUF3ovjZ06NByj3NXXXWVC7NGoNm4caPGjBmjli1bqmrVqoqPj1dycrIyMzPLrOvUMc3NJ/tq6NChWrRokcaPH6+mTZtq3rx56t27t9LS0hx5/hRwtvvuu0/t2rXzGWvcuLFLs0GgO3TokCZPnqz4+Hi1adNG6enpsiyrzHpZWVnq3Lmzqlevrr/+9a/Kz8/XjBkztH37dmVkZCgkJMSF2SOQXOi+JkmhoaF65ZVXfMaioqJMTBMBbvr06Vq3bp0GDhyo1q1bKzc3V88995wSExO1fv16tWzZUpLDxzTbJRs2bLAty7JnzpzpHSssLLSbNGlid+rUya1p4TKUlpZmW5ZlL1q0yO2p4DJSVFRkHzhwwLZt2960aZNtWZa9YMGCMuvdc889dpUqVex9+/Z5x1auXGlblmXPnTvX2HwRuC50X7vrrrvsiIgI09PDZWLt2rX2qVOnfMYyMzPtsLAwe/Dgwd4xJ49prl2OT01NVXBwsEaNGuUdCw0N1YgRI7Ru3TplZ2e7NTVcpmzbVn5+voqLi92eCi4DlSpVUq1atST9sG+dy6JFi9SnTx/Vq1fPO3bDDTeoWbNmevvtt/0+TwS+C93XSpefPn1ax48fNzE1XEY6duyo4GDfC+RNmjRRixYttGPHDu+Yk8c010roli1b1KxZM1WtWtVnvPRy6datW92YFi5jw4YNU1RUlMLDw9WtWzdt3rzZ7SnhMpedna1Dhw6pbdu2ZZa1a9dOW7ZscWFWuJx9//33ioyMVLVq1RQTE6MxY8boxIkTbk8LAcq2bR04cEA1atSQ5PwxzbV7QnNzcxUbG1tmvHSsvM+fB36O0NBQDRgwQL1791aNGjX01VdfacaMGbruuuu0du3acj9KFnBCbm6uJJ3zWHf06FGdOnWK+0LhiLi4OE2YMEGJiYk6ffq0li1bpueff17btm1Tenq6goKC3J4iAszrr7+unJwc/eUvf5Hk/DHNtRJaUFCg0NDQMuNhYWHe5YATOnbsqI4dO3r/3KdPHw0YMECtW7fWI488omXLlrk4O1zOSo9j5zvWUULhhKlTp/r8+fbbb1ezZs302GOPKTU1VcnJyS7NDIFox44dGj16tDp16qS77rpLkvPHNNcux4eHh6uoqKjMeGFhoXc54C+NGzdWv379lJaWdt57rICfq/Q4xrEObhk/frw8Ho9WrVrl9lQQQPbv36+bb75Z1atXV2pqqvdpDE4f01wrobGxseVeci891RsXF2d6Sqhg6tWrp5MnT3K/FPym9JJV6XHtbLm5uYqJieEsKPwqLCxM0dHROnr0qNtTQYDIy8tTr169dPz4cS1fvlx16tTxLnP6mOZaCU1ISNDOnTuVn5/vM75hwwZJ4j49+N2///1vhYeHl/nhOMApdevWVc2aNbVx48YyyzIyMjjOwe/y8/N1+PBh1axZ0+2pIAAUFhYqKSlJu3bt0tKlS9W8eXOf5U4f01wroQMGDFBJSYnmzp3rHSsqKtK8efPUoUMH1a1b162p4TJz6NChMmPbtm3TBx98oO7du7swI1Qkt912m5YuXaqsrCzv2KpVq5SZmamBAwe6ODNcToqKisqc1JGkyZMnS5J69uxpekoIMCUlJUpOTtaGDRv0zjvv6Oqrry53PSePaZbt4g1xycnJWrx4scaPH6/GjRtrwYIF2rRpk1atWqVrr73WrWnhMtOtWzdVrlxZHTt2VK1atfT1119r7ty5Cg0N1bp163TllVe6PUUEqOeee07ffvutcnJyNGfOHPXv3997JmDs2LGKjIxUVlaWEhISVK1aNd13333Kz8/XU089pfr162vjxo1cjscFOd++dvToUSUkJOi3v/2t95i2YsUKLVu2TL169dKHH37o5vQRAMaNG6fZs2crKSmp3DI5ePBgSXL2mPbznqvvjMLCQvvBBx+0Y2Nj7bCwMPvqq6+2P/roIzenhMvQ7Nmz7auvvtqOiYmxQ0JC7Lp169pDhgyx//Wvf7k9NQS4Bg0a2JZl2ZZl2R6Px/Z4PN4M51I4AAAgAElEQVTf79mzx7veV199Zffo0cOuUqWKHR0dbd955532wYMHXZw5As359rVvv/3WvvPOO+2mTZvaVapUscPCwuxWrVrZ06ZNs4uLi92ePgJAly5dvPvVj18ej8dnXaeOaa6eCQUAAEDF5No9oQAAAKi4KKEA4LIuXbrI47m4w7HH41HXrl39NCMA8D9KKAD42dChQ+XxeLR3795zrlP6MOiL8XPeAwCXCtc+thMAKhKnC+OOHTtUuXJlR7cJACZRQgHAANu2Hf2I2GbNmjm2LQBwA5fjAVxSPvjgA91www2KjY1VWFiY6tatqy5duuiFF17wrlN6D2VxcbGeeOIJNW7cWOHh4WrevLleeukl73rPP/+8WrVqpcqVK+uKK65QSkrKOYvg22+/rc6dOysqKkqVK1dW69atNW3aNJ08ebLc9Tdv3qzbbrtNtWrVUlhYmBo0aKDRo0dr//79Put5PB699tprkqSGDRvK4/HI4/GoYcOGZbZZUlKiqVOnqmnTpgoLC1P9+vX18MMP69SpU2XWLe+e0JSUFHk8Hq1evVqpqalq3769qlSpopiYGA0aNKjcj0qWpI0bN6p79+6KiIhQVFSUbrrpJq1fv967vTVr1pT7PgD4JTgTCuCSMXfuXP3+979XbGys+vXrpxo1aujgwYPatm2b5s+fr3vuucdn/eTkZGVkZOjmm29WSEiI3nnnHf3ud79TUFCQtm3bptdff11JSUm66aab9P777+uJJ55Q5cqV9dBDD/ls59FHH9W0adNUs2ZNDR48WFWrVtX//d//6dFHH9WKFSv00Ucf+TyAeenSpbrttttkWZYGDBig+Ph4bdq0SS+88ILef/99ffbZZ2rQoIEkadKkSXrvvfe0bds2jRs3TtWqVZMk76+lbNvWoEGD9Nlnn6l3796KjIzUhx9+qCeffFIHDx7Uq6++Wubv61yX+J9//nl98MEH6tevn7p27ar169frrbfe0rZt27R161ZVqlTJu+6aNWvUvXt32bat/v37q3Hjxvryyy/VtWtXdevW7cL/8QDgYv3ip5sCgEMSExPtsLAw+9ChQ2WWHTlyxPv766+/3rYsy27fvr2dl5fnHf/3v/9tV6pUyY6MjLQbNWpk5+TkeJd9++23do0aNeyaNWv6PLx77dq1tmVZdnx8vH3gwAHveHFxsZ2UlGRblmVPnTrVO56fn29HR0fbwcHB9meffeYzx+nTp9uWZdndu3f3Gb/rrrtsy7J8HmB/ttKvp23btvaxY8e84ydOnLCbNGliBwUF2fv37/d5j2VZdteuXX3GJk2aZFuWZUdFRdn/+Mc/fJb99re/tS3Lst9++23vWElJid2kSRPb4/HYy5cv91l/zpw53odUr169utx5A8AvweV4AJeUoKAgBQeXvUgTHR1dZmzatGmKjIz0/rlhw4a65pprlJ+frz/96U+KjY31LouKilKfPn10+PBhn8vSpWcYH3/8cdWqVctnHjNnzpTH49HLL7/sHX///fd17NgxJScn65prrvGZzwMPPKD4+Hh9/PHH2rdv30V/7dOnT/c5Q1q5cmXdcccdOn36tDZv3nzB2xk7dqxatmzpMzZy5EhJP1x6L7V27Vr961//UteuXdWjRw+f9UeNGqVmzZo5eh8rAJyNEgrgkjF48GB9//33atGihe6//3699957OnToULnrWpaltm3blhmPi4uTJP3mN78ps6xu3bqSfvjs41JffPGFLMsq99Jz06ZNVbduXe3evVv5+fne9SWVu35QUJA6d+4sSdqyZctPfq0X+vXUq1dPknTs2LEL3taFbqd0jtdee2258+nYseMFZwLAxaKEArhkjB8/XgsWLFB8fLxmz56t/v37q06dOurWrVu5ZwIjIiLKjJWeRY2KijrnsrN/0CcvL0+SfM6ani02Nla2bevbb7+94PXPXu9inH1W98dzLikpueDt/Ph+03Ntp3SOtWvXLnc75xoHACdQQgFcUu68806tW7dOR44c0YcffqgRI0ZozZo16tGjhw4fPux4XmlZzc3NLXd5bm6uLMvyrlf6649/Cv7s9c9e71JWWnoPHDhQ7vJzjQOAEyihAC5JUVFR6tWrl+bOnauhQ4fq6NGj+vTTTx3PSUxMlG3bSk9PL7Ns165dysrKUsOGDb2FLTExUZKUlpZWZv3i4mJ9+umnsizLu570w2V66eLOZppQOsfy/l5Pnz6ttWvXmp4SgAqEEgrgklFesZP+e0bOH58QNHz4cEnSX/7yF58zrSUlJfrjH/8o27Y1YsQI7/gtt9yi6OhovfHGG9qwYYPPtmbNmqXdu3frxhtv9N6DKUkxMTGSpD179jg+/1/immuuUePGjZWWlqbly5f7LJs7d64yMzP5aFAAfsNzQgFcMm699VZFRESoQ4cOio+Pl23b+vTTT7Vp0ya1bdtWN954o3ddp35qu2PHjnrooYf05JNP6le/+pUGDBigypUra9myZfrqq6903XXX6cEHH/SuX6VKFb366qsaOHCgrr/+eg0cOFBXXHGFNm/erI8//lixsbF68cUXfTJuvPFGzZgxQyNHjlT//v0VERGh6tWra/To0Y5/PRfDsiy9/PLL6tmzp/r27avbbrtNjRo10pdffqmVK1eqV69eWrZsmTwezlcAcN4FH1lOnDihSZMmqWfPnoqOjpbH49GCBQvKXfebb75Rz549FRERoZiYGA0ZMsQv93IBuLxMnz5d7dq10xdffKEXXnhB8+fPV0lJiZ588kmlpaV5L2tblnXOM3Q/Z9m0adP0xhtvqGnTpnrttdf07LPPSpKmTJmijz/+uMwjo/r27avPP/9cvXv31ooVKzRz5kz985//1D333KPNmzd7H1Rfqnv37po5c6ZCQkL0zDPPaOLEiZo5c+YvmvMvXbfU9ddfr9WrV6tLly768MMP9eyzz6qoqEjp6eneT3Uq7wemAOCXsuwL/N/v3bt3q1GjRoqPj1fDhg2Vnp6u+fPna8iQIT7rZWVlKSEhQdWrV9fYsWOVn5+vGTNmqH79+srIyPD51BEAwKXrmmuu0caNG5WXl6fw8HC3pwPgMnPBl+Pj4uK0f/9+1apVS5s3b1a7du3KXW/q1KkqKCjQli1bvPdEtW/fXjfddJPmz5/vfWAyAMB9BQUFKioqKvNYp/nz52vdunXq3bs3BRSAX1xwCa1UqZL300R+6uTpokWL1KdPH5+b8m+44QY1a9ZMb7/9NiUUAC4he/bsUUJCgrp3767GjRuruLhYW7Zs0eeff67q1av73DYAAE5y9AeTsrOzdejQoXI/raNdu3ZatmyZk3EAgF+oTp06Gjx4sFavXq20tDQVFRUpNjZWw4cP12OPPea9LxQAnOZoCS19SHN5nyQSGxuro0eP6tSpU9wXCgCXiGrVqumll15yexoAKiBHS2hBQYEkKTQ0tMyysLAw7zo/LqGHDx/WihUr1KBBA+49AgAAuAQVFBRo9+7d6tGjh2rUqPGLt+doCS0tkEVFRWWWFRYW+qxzthUrVmjw4MFOTgUAAAB+sHDhQt1xxx2/eDuOltDSy/DlfQZzbm6uYmJiyr0U/99n6vWX9MubNfDTlkvq6fYkUCGwr8EU9jWYcFjSu2WehfxzOVpC69atq5o1a2rjxo1llmVkZKhNmzblvu+/Z0drSIpzckpAOcLEfgYz2NdgCvsazHHq1knHP4vttttu09KlS5WVleUdW7VqlTIzMzVw4ECn4wAAABCALupM6HPPPadvv/1WOTk5kqQPPvhAe/fulSSNHTtWkZGRevTRR/XOO++oa9euuu+++5Sfn6+nnnpKrVu31rBhw5z/CgAAABBwLqqEzpw5U3v27JH0w2cUL168WO+++64sy9KQIUMUGRmpevXqafXq1br//vv18MMPKzQ0VH369PF+bjIAAABwUSX0P//5zwWt16JFCy1fvvxnTQjwv1ZuTwAVBvsaTGFfQ+Bx/J5Q4NLHwRqmsK/BFPY1BB5KKAAAAIyjhAIAAMA4SigAAACMo4QCAADAOEooAAAAjKOEAgAAwDhKKAAAAIyjhAIAAMA4SigAAACMo4QCAADAOEooAAAAjKOEAgAAwDhKKAAAAIyjhAIAAMA4SigAAACMC3Z7Au4LdzG7wMVsNzV3MXuHa8kpSqmQ2RUXxxbzOLZUpOyKy81jS5ijW+NMKAAAAIyjhAIAAMA4SigAAACMo4QCAADAOEooAAAAjKOEAgAAwDhKKAAAAIyjhAIAAMA4SigAAACMo4QCAADAOEooAAAAjKOEAgAAwDi/lNBNmzapX79+iouLU5UqVXTVVVdp8uTJKigo8EccAAAAAkyw0xvcvn27rr32WsXFxWncuHGKjo7W2rVrNWnSJG3evFnvvfee05EAAAAIMI6X0DfffFMnT57Uhx9+qKuuukqSdPfdd+v06dN67bXXlJeXp6ioKKdjAQAAEEAcvxwfHh4uSapVq5bPeJ06dRQUFKRKlSo5HQkAAIAA43gJHT58uGrXrq0RI0Zo27Zt2rdvn9566y3NmTNHY8eO9ZZUAAAAVFyOX46Pi4vT559/rt69eyshIcE7/vjjj+uJJ55wOg4AAAAByPESeuDAAfXq1UuS9NJLLykmJkZLly7VlClTVLt2bY0ePdrpSAAAAAQYx0vo5MmTlZ2drZ07dyouLk6SdMstt+j06dOaMGGCBg0apOjoaKdjAQAAEEAcL6GfffaZEhISvAW0VFJSkubPn6+tW7eqW7du53j3cklhPxprdeblL24+u7S5i9k7XEtO0f9zMTulQma7y837wN38/ubYYhrHloqGY4t/bT/zOluhowmOl9BTp06ppKSk3HFJKi4u/ol395QU9xPLAQAA4H/lnQTMkTTXsQTHfzo+MTFRX3zxhTIzM33G33jjDQUFBal169ZORwIAACDAOH4m9MEHH9SiRYt03XXXacyYMYqOjtbSpUu1fPlyjRw5UnXq1HE6EgAAAAHG8RLaunVrpaena9KkSXrqqadUWFioRo0aaerUqXrooYecjgMAAEAAcryESlL79u21bNkyf2waAAAAlwHH7wkFAAAAzocSCgAAAOMooQAAADCOEgoAAADjKKEAAAAwjhIKAAAA4yihAAAAMI4SCgAAAOMooQAAADCOEgoAAADjKKEAAAAwjhIKAAAA4yihAAAAMI4SCgAAAOMooQAAADCOEgoAAADjgt2egK8wSeGGMwsM551th4vZzV1LTlEK2RWKm99jpo8nZ+PYYlpF/f7m2OKGinpscRZnQgEAAGAcJRQAAADGUUIBAABgHCUUAAAAxlFCAQAAYBwlFAAAAMZRQgEAAGAcJRQAAADGUUIBAABgHCUUAAAAxlFCAQAAYBwlFAAAAMZRQgEAAGCc30roF198ob59+yomJkZVqlRRq1at9Oyzz/orDgAAAAEk2B8b/eijj5SUlKTf/OY3mjhxoqpWrapdu3YpOzvbH3EAAAAIMI6X0OPHj2vIkCFKSkpSamqq05sHAADAZcDxy/F///vfdfDgQU2ZMkWSdOLECZ0+fdrpGAAAAAQwx0voypUrFRkZqX379unKK69URESEoqKi9Ic//EFFRUVOxwEAACAAOV5CMzMzVVxcrFtuuUW9evXSu+++q+HDh2vOnDkaNmyY03EAAAAIQI7fE/rdd9/p+++/1z333KNZs2ZJkm655RadPHlSL774op544gk1adLE6VgAAAAEEMfPhIaHh0uSBg0a5DNe+uf169c7HQkAAIAA4/iZ0Li4OH399deqXbu2z3itWrUkSceOHfuJd38sqfKPxjpJusbJKf7IDj9u+6elKKVCZrvJ3a873MXsAhez3RTvYjbHloqEY0tFUxGOLdvPvM5W6GiC4yW0bdu2WrlypbKystS0aVPveE5OjiSpZs2aP/HuIZIaOj0lAAAAXJRWZ15ny5E017EExy/H33777ZKkV155xWf85ZdfVkhIiLp06eJ0JAAAAAKM42dC27Rpo+HDh+vVV19VcXGxOnfurPT0dKWmpurRRx9VnTp1nI4EAABAgPHLx3bOmTNH9evX17x587R48WI1aNBAs2bN0tixY/0RBwAAgADjlxIaHBysiRMnauLEif7YPAAAAAKc4/eEAgAAAOdDCQUAAIBxlFAAAAAYRwkFAACAcZRQAAAAGEcJBQAAgHGUUAAAABhHCQUAAIBxlFAAAAAYRwkFAACAcZRQAAAAGEcJBQAAgHGUUAAAABhHCQUAAIBxlFAAAAAYRwkFAACAccFuT8DXfyQVuT0JY1KU4mJ6uIvZBS5muynexewdriW7uZ+7+z3mHo4tFQ3HloqUfTnhTCgAAACMo4QCAADAOEooAAAAjKOEAgAAwDhKKAAAAIyjhAIAAMA4SigAAACMo4QCAADAOEooAAAAjKOEAgAAwDhKKAAAAIyjhAIAAMA4SigAAACM83sJnTJlijwej1q1auXvKAAAAAQIv5bQrKwsTZ06VVWqVJFlWf6MAgAAQAAJ9ufG//jHP6pTp04qLi7W4cOH/RkFAACAAOK3M6Fr1qzRokWLNGvWLNm2zZlQAAAAePmlhJaUlOjee+/VyJEj1bJlS39EAAAAIID55XL8nDlztHfvXn3yySf+2DwAAAACnONnQo8cOaKJEydq4sSJiomJcXrzAAAAuAw4fib08ccfV40aNXTvvff+jHcvlxT2o7FWZ14AAAAwY/uZ19kKHU1wtIRmZmbqpZde0qxZs5SVleUdLyws1MmTJ7Vnzx5FRkaqevXq5b5/lPYqrszoTkmLnJymjxSl+G3bl7YCF7PDXcx28+ve4WJ2c9eS3fweq6jZ7uLYYh7HFrL9obyTgDmS5jqW4Ojl+OzsbJ0+fVpjx45Vo0aNvK+MjAzt3LlTDRs21OTJk52MBAAAQABy9Exoq1attHjxYp/HMdm2rccff1zfffednnnmGTVu3NjJSAAAAAQgR0toTEyM+vXrV2b86aefliT17dvXyTgAAAAEKL9/drwkWZbFw+oBAADg5deP7SyVlpZmIgYAAAABwsiZUAAAAOBslFAAAAAYRwkFAACAcZRQAAAAGEcJBQAAgHGUUAAAABhHCQUAAIBxlFAAAAAYRwkFAACAcZRQAAAAGEcJBQAAgHGUUAAAABhHCQUAAIBxlFAAAAAYRwkFAACAccFuT+BsczVKUpzh1OaG8862w7XkFKVUyOyKa4/bE3CFu/sax5aKlF1xcWwxz81jS6ijW+NMKAAAAIyjhAIAAMA4SigAAACMo4QCAADAOEooAAAAjKOEAgAAwDhKKAAAAIyjhAIAAMA4SigAAACMo4QCAADAOEooAAAAjKOEAgAAwDhKKAAAAIxzvIRu3LhRY8aMUcuWLVW1alXFx8crOTlZmZmZTkcBAAAgQAU7vcHp06dr3bp1GjhwoFq3bq3c3Fw999xzSkxM1Pr169WyZUunIwEAABBgHC+hDzzwgNq1a6fg4P9uOjk5Wa1atdK0adP0t7/9zelIAAAABBjHS2jHjh3LjDVp0kQtWrTQjh07nI4DAABAADLyg0m2bevAgQOqUaOGiTgAAABc4oyU0Ndff105OTlKTk42EQcAAIBLnN9L6I4dOzR69Gh16tRJd911l7/jAAAAEAD8WkL379+vm2++WdWrV1dqaqosy/JnHAAAAAKE4z+YVCovL0+9evXS8ePH9emnn6pOnToX8K7lksJ+NNbqzMtf3PxhqXDXklOU4lq2u5q7mO3evpaiCS5mp7iW7S6OLRULxxbz2SmuZbvL1L/39jOvsxU6muCXElpYWKikpCTt2rVLK1euVPPmF/rN2VNSnD+mBAAAgAtW3knAHElzHUtwvISWlJQoOTlZGzZs0Pvvv6+rr77a6QgAAAAEOL88rH7JkiVKSkrS4cOHtXDhQp/lgwcPdjoSAAAAAcbxErpt2zZZlqUlS5ZoyZIlPsssy6KEAgAAwPkSmpaW5vQmAQAAcJkx8rB6AAAA4GyUUAAAABhHCQUAAIBxlFAAAAAYRwkFAACAcZRQAAAAGEcJBQAAgHGUUAAAABhHCQUAAIBxlFAAAAAYRwkFAACAcZRQAAAAGEcJBQAAgHGUUAAAABhHCQUAAIBxlFAAAAAYF+z2BNyWopQKmV1x7XF7Aq5wd19r7mL2DteSObZUNBxbzOPYYlqOpLkObo8zoQAAADCOEgoAAADjKKEAAAAwjhIKAAAA4yihAAAAMI4SCgAAAOMooQAAADCOEgoAAADjKKEAAAAwjhIKAAAA4yihAAAAMI4SCgAAAOMooQAAADDOLyW0qKhIEyZMUFxcnCpXrqwOHTpo5cqV/ogCAABAAPJLCR06dKiefvpp3XnnnZo9e7aCgoLUu3dvff755/6IAy7SVrcngApiu9sTQAXC3obA43gJzcjI0FtvvaVp06Zp+vTpuvvuu/XJJ58oPj5eDz30kNNxwM9ACYUZ1AKYw96GwON4CU1NTVVwcLBGjRrlHQsNDdWIESO0bt06ZWdnOx0JAACAAON4Cd2yZYuaNWumqlWr+oy3a9dOkrR1K2ehAAAAKjrHS2hubq5iY2PLjJeO5eTkOB0JAACAABPs9AYLCgoUGhpaZjwsLMy7vLz3/OCw09M5L3crMYXcvDBJhZLcuC2k0IXMS0HZ44E57n2P5eiHf3F3ZsCxxbwwF7ML5ebe5p6Ke2xxS2lLK6/L/RyOl9Dw8HAVFRWVGS8sLPQu/7Hdu3ef+d27Tk/nvOYaT7x00iu22W5PAJe5uT/61Z10VCz8u1cEl8K/8u7du3XNNdf84u04XkJjY2PLveSem5srSYqLiyuzrEePHlq4cKEaNGhQbkkFAACAuwoKCrR792716NHDke05XkITEhKUnp6u/Px8RUREeMc3bNggSWrTpk2Z99SoUUN33HGH01MBAACAg5w4A1rK8R9MGjBggEpKSjR37n9PGBcVFWnevHnq0KGD6tat63QkAAAAAozjZ0Lbt2+vgQMH6pFHHtHBgwfVuHFjLViwQHv37tW8efOcjgMAAEAAsmzbtp3eaFFRkf70pz9p4cKFOnbsmH79619r8uTJuummm5yOAgAAQADySwkFAAAAforj94QCAAAA5+NqCS0qKtKECRMUFxenypUrq0OHDlq5cqWbU8JlKD09XR6Pp9xXRkaG29NDgDpx4oQmTZqknj17Kjo6Wh6PRwsWLCh33W+++UY9e/ZURESEYmJiNGTIEB0+bP7DORCYLnRfGzp0aLnHuauuusqFWSPQbNy4UWPGjFHLli1VtWpVxcfHKzk5WZmZmWXWdeqY5vgPJl2MoUOHatGiRRo/fryaNm2qefPmqXfv3kpLS3P0EQCAJN13331q166dz1jjxo1dmg0C3aFDhzR58mTFx8erTZs2Sk9Pl2VZZdbLyspS586dVb16df31r39Vfn6+ZsyYoe3btysjI0MhISEuzB6B5EL3NUkKDQ3VK6+84jMWFRVlYpoIcNOnT9e6des0cOBAtW7dWrm5uXruueeUmJio9evXq2XLlpIcPqbZLtmwYYNtWZY9c+ZM71hhYaHdpEkTu1OnTm5NC5ehtLQ027Ise9GiRW5PBZeRoqIi+8CBA7Zt2/amTZtsy7LsBQsWlFnvnnvusatUqWLv27fPO7Zy5Urbsix77ty5xuaLwHWh+9pdd91lR0REmJ4eLhNr1661T5065TOWmZlph4WF2YMHD/aOOXlMc+1yfGpqqoKDgzVq1CjvWGhoqEaMGKF169YpO9uNz/bG5cy2beXn56u4uNjtqeAyUKlSJdWqVUvSD/vWuSxatEh9+vRRvXr1vGM33HCDmjVrprffftvv80Tgu9B9rXT56dOndfz4cRNTw2WkY8eOCg72vUDepEkTtWjRQjt27PCOOXlMc62EbtmyRc2aNVPVqlV9xksvl27dutWNaeEyNmzYMEVFRSk8PFzdunXT5s2b3Z4SLnPZ2dk6dOiQ2rZtW2ZZu3bttGXLFhdmhcvZ999/r8jISFWrVk0xMTEaM2aMTpw44fa0EKBs29aBAwdUo0YNSc4f01y7JzQ3N1exsbFlxkvHyvv8eeDnCA0N1YABA9S7d2/VqFFDX331lWbMmKHrrrtOa9euLfejZAEn5ObmStI5j3VHjx7VqVOnuC8UjoiLi9OECROUmJio06dPa9myZXr++ee1bds2paenKygoyO0pIsC8/vrrysnJ0V/+8hdJzh/TXCuhBQUFCg0NLTMeFhbmXQ44oWPHjurYsaP3z3369NGAAQPUunVrPfLII1q2bJmLs8PlrPQ4dr5jHSUUTpg6darPn2+//XY1a9ZMjz32mFJTU5WcnOzSzBCIduzYodGjR6tTp0666667JDl/THPtcnx4eLiKiorKjBcWFnqXA/7SuHFj9evXT2lpaee9xwr4uUqPYxzr4Jbx48fL4/Fo1apVbk8FAWT//v26+eabVb16daWmpnqfxvD/27l7l/TaOI7jH4XIEiIUIzMiiKilwcmChmioHFx6QIjsT+hh6C9oCdqkIVyiuSEIowdwDrOloYaeIJCCCrFcNJLrNyV3d8Hv5sbOwXq/QJDrnOEzHL58uM6l1Z5ptpVQv9//5Sv3963etrY2qyPhl2lvb9fr6yvnpfBt3l9Zvc+1f7q/v5fX62UXFN/K5XLJ4/Eol8vZHQU14vn5WeFwWC8vL9rf31dra2vlWrVnmm0lNBgM6uLiQoVC4cN6Op2WJM7p4dvd3NyooaHh04/jgGoJBALy+XzKZDKfrh0fHzPn8O0KhYKenp7k8/nsjoIaUCwWFYlEdHV1pWQyqd7e3g/Xqz3TbCuhk5OTKpfLSiQSlbVSqaSNjQ319/crEAjYFQ0/zOPj46e109NT7ezsaGRkxIZE+E0mJiaUTCaVzWYra6lUSpeXl5qamrIxGX6SUqn0aVNHkpaXlyVJY2NjVkdCjSmXy4pGo9xpsloAAAG5SURBVEqn09ra2lIoFPryvmrONIex8UBcNBrV9va2FhcX1dXVpc3NTZ2cnCiVSmlwcNCuWPhhhoeH1djYqIGBAbW0tOj8/FyJREL19fU6OjpST0+P3RFRo9bW1pTP53V3d6f19XWNj49XdgLm5ubU1NSkbDarYDCo5uZmzc/Pq1AoaHV1VR0dHcpkMryOx3/yt2ctl8spGAxqenq6MtMODg60t7encDis3d1dO+OjBiwsLCgejysSiXxZJmdmZiSpujPt//2vfnUUi0WztLRk/H6/cblcJhQKmcPDQzsj4QeKx+MmFAoZr9dr6urqTCAQMLOzs+b6+truaKhxnZ2dxuFwGIfDYZxOp3E6nZXvt7e3lfvOzs7M6OiocbvdxuPxmFgsZh4eHmxMjlrzt2ctn8+bWCxmuru7jdvtNi6Xy/T19ZmVlRXz9vZmd3zUgKGhocpz9e+P0+n8cG+1ZpqtO6EAAAD4nWw7EwoAAIDfixIKAAAAy1FCAQAAYDlKKAAAACxHCQUAAIDlKKEAAACwHCUUAAAAlqOEAgAAwHKUUAAAAFiOEgoAAADLUUIBAABgOUooAAAALPcHjvrLwCRcjHcAAAAASUVORK5CYII=", 376 | "text/plain": [ 377 | "Figure(PyObject )" 378 | ] 379 | }, 380 | "metadata": {}, 381 | "output_type": "display_data" 382 | }, 383 | { 384 | "data": { 385 | "text/plain": [ 386 | "PyObject " 387 | ] 388 | }, 389 | "execution_count": 17, 390 | "metadata": {}, 391 | "output_type": "execute_result" 392 | } 393 | ], 394 | "source": [ 395 | "PyPlot.figure()\n", 396 | "ax = PyPlot.subplot(2, 1, 1)\n", 397 | "ax[:set_title](\"filtering\")\n", 398 | "PyPlot.pcolor(alpha)\n", 399 | "ax = PyPlot.subplot(2, 1, 2)\n", 400 | "ax[:set_title](\"smoothing\")\n", 401 | "PyPlot.pcolor(gamma)" 402 | ] 403 | }, 404 | { 405 | "cell_type": "code", 406 | "execution_count": null, 407 | "metadata": { 408 | "collapsed": true 409 | }, 410 | "outputs": [], 411 | "source": [] 412 | } 413 | ], 414 | "metadata": { 415 | "kernelspec": { 416 | "display_name": "Julia 0.4.0", 417 | "language": "julia", 418 | "name": "julia-0.4" 419 | }, 420 | "language_info": { 421 | "file_extension": ".jl", 422 | "mimetype": "application/julia", 423 | "name": "julia", 424 | "version": "0.4.0" 425 | } 426 | }, 427 | "nbformat": 4, 428 | "nbformat_minor": 0 429 | } 430 | --------------------------------------------------------------------------------