├── README.md ├── example ├── j1.jpg ├── j2.jpg ├── j3.jpg ├── j4.jpg └── j5.jpg ├── main.py ├── req.txt ├── test_data ├── B21.jpg ├── B22.jpg ├── B23.jpg ├── B24.jpg ├── B25.jpg ├── __init__.py ├── __init__.pyc ├── d1.txt ├── d2.txt ├── d3.txt ├── d4.txt ├── dataMergeTest.py ├── dataMergeTest.pyc ├── glumpyTest.py ├── graphsMergeStart.pkl ├── mergeTest.py └── varProp.txt └── utils ├── __init__.py ├── __init__.pyc ├── bundleAjust.py ├── bundleAjust.pyc ├── dense.py ├── dense.pyc ├── fundamental.py ├── fundamental.pyc ├── getPose.py ├── getPose.pyc ├── graph.py ├── graph.pyc ├── loadDatacsv.py ├── loadDatacsv.pyc ├── matchSift.py ├── matchSift.pyc ├── mergeGraph.py ├── mergeGraph.pyc ├── paresDescript.py ├── paresDescript.pyc ├── parser.py └── parser.pyc /README.md: -------------------------------------------------------------------------------- 1 | # Structure-from-motion-python 2 | Implementation based on SFMedu Princeton COS429: Computer Vision http://vision.princeton.edu/courses/SFMedu/ but on python + numpy 3 | 4 | The objective of this project was to understand the structure from motion problem so i take the MATLAB code from http://vision.princeton.edu/courses/SFMedu/ 5 | and translate it in python + numpy. The initial version is just a literal translation from the MATLAB code to python (so expect higher run times, if you want a fast and easy to use software see http://ccwu.me/vsfm/) 6 | 7 | Requeriments 8 | Numpy, cv2, https://github.com/dranjan/python-plyfile 9 | 10 | For an example just run main.py without any changes. It will generate a point cloud of a jirafe from 5 images included in examples folder. (can take up to 30 m) 11 | -------------------------------------------------------------------------------- /example/j1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/example/j1.jpg -------------------------------------------------------------------------------- /example/j2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/example/j2.jpg -------------------------------------------------------------------------------- /example/j3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/example/j3.jpg -------------------------------------------------------------------------------- /example/j4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/example/j4.jpg -------------------------------------------------------------------------------- /example/j5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/example/j5.jpg -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | import numpy as np 4 | from utils.bundleAjust import bundleAdjustment 5 | from utils.dense import denseMatch, denseReconstruction, outputPly 6 | from utils.fundamental import default, implementacionRansac 7 | from utils.getPose import getPose 8 | from utils.graph import createGraph, triangulateGraph, showGraph, visualizeDense 9 | from utils.mergeGraph import mergeG, removeOutlierPts 10 | from utils.paresDescript import getPairSIFT 11 | 12 | #Creditos a % SFMedu: Structrue From Motion for Education Purpose 13 | # % Written by Jianxiong Xiao (MIT License) el codigo se base en este 14 | 15 | 16 | def mergeAllGraph(gL,imsize): 17 | graphMerged = gL[0] 18 | # merge de vistas parciales 19 | for i in range(len(gL) - 1): 20 | graphMerged = updateMerged(graphMerged, gL[i+1],imageSize) 21 | return graphMerged 22 | def updateMerged(gA,gB,imsize): 23 | gt = mergeG(gA, gB) 24 | gt = triangulateGraph(gt, imsize) 25 | gt = bundleAdjustment(gt, False) 26 | gt = removeOutlierPts(gt, 10) 27 | gt = bundleAdjustment(gt) 28 | return gt 29 | 30 | if __name__ == "__main__": 31 | 32 | #---------------------------SET PARAMETERS 33 | maxSize = 640 #maxima resolucion de imagen 34 | carpetaImagenes = 'example/' 35 | debug = False 36 | outName = "jirafa" #out name for ply file (open with mesh lab to see poitn cloud) 37 | validFile = ['jpg','png','JPG'] #tipos validos de imagenes 38 | # Intentar conseguir la distancia focal 39 | # TODO agregar calculo este valor deberia funcionar con imagenes 480x640 focalLen 4mm 40 | f = 719.5459 41 | 42 | # ---------------------------SET PARAMETERS 43 | 44 | 45 | algoMatrizFundamental = implementacionRansac 46 | 47 | graphList = [] 48 | 49 | #Cargar imagenes 50 | listaArchivos = os.listdir(carpetaImagenes) 51 | listaImages = filter(lambda x : x.split('.')[-1] in validFile,listaArchivos ) 52 | 53 | 54 | 55 | 56 | #Carga las imagenes 57 | listaImages = map(lambda x : cv2.imread(carpetaImagenes+x),listaImages) 58 | 59 | imageSize = listaImages[0].shape 60 | print "Dimensiones originales ",imageSize 61 | #todo Escala la imagen si pasa de maxSize 62 | if imageSize[0] > maxSize: 63 | print "Escalando" 64 | print "Size image ",imageSize," max size ",maxSize 65 | #480 640 funciona 66 | listaImages = map(lambda x: np.transpose(cv2.resize(x,(640,480)),axes=[1,0,2]), listaImages) 67 | imageSize = listaImages[0].shape 68 | print "Result size ",imageSize 69 | 70 | #calculo de matriz K 71 | K = np.eye(3) 72 | K[0][0] = f 73 | K[1][1] = f 74 | 75 | graphList = [0 for i in range(len(listaImages)-1)] 76 | #calcula pares a partir de SIFT u otro descriptor local 77 | #Se calculan como imagenes sucesivas 78 | print "Inicia calculo de pares SIFT" 79 | for i in range(len(listaImages)-1): 80 | keypointsA,keypointsB = getPairSIFT(listaImages[i],listaImages[i+1],show=debug) 81 | 82 | 83 | #Calcular la matriz fundamental o la matriz escencial 84 | #TODO conseguir las demas 85 | if type(keypointsA[0]) == np.ndarray: 86 | assert(len(keypointsA.shape) == 2) 87 | assert (len(keypointsB.shape) == 2) 88 | pointsA = keypointsA 89 | pointsB = keypointsB 90 | else: 91 | pointsA = np.array([(keypointsA[idx].pt) for idx in range(len(keypointsA))]).reshape(-1, 1, 2) 92 | pointsB = np.array([(keypointsB[idx].pt) for idx in range(len(keypointsB))]).reshape(-1, 1, 2) 93 | pointsA = pointsA[:,[1,0]] 94 | pointsB = pointsB[:, [1, 0]] 95 | 96 | F = np.array(algoMatrizFundamental(pointsA,pointsB)) 97 | Fmat = F[0] 98 | K = np.array(K) 99 | E = np.dot(np.transpose(K),np.dot(Fmat,K)) 100 | 101 | # Conseguir pose de las camaras 102 | Rtbest = getPose(E,K, np.hstack([pointsA,pointsB]),imageSize) 103 | 104 | #Crear grafico 105 | graphList[i] = createGraph(i,i+1,K, pointsA, pointsB, Rtbest, f) 106 | 107 | #Triangular 108 | graphList[i] = triangulateGraph(graphList[i],imageSize) 109 | 110 | #visualizar grafico 111 | # showGraph(graphList[i],imageSize) 112 | 113 | #Bundle ajustement 114 | graphList[i]=bundleAdjustment(graphList[i]) 115 | 116 | #Visualiza con mejoras 117 | # showGraph(graphList[i], imageSize) 118 | 119 | gM = mergeAllGraph(graphList,imageSize) 120 | print "Merge de grafos finalizado" 121 | #Visualizar resultado parcial 122 | showGraph(gM,imageSize) 123 | #Dense matching 124 | for i in range(len(listaImages)-1): 125 | graphList[i] = denseMatch(graphList[i],listaImages[i], 126 | listaImages[i+1], imageSize, imageSize) 127 | 128 | print "Dense match finalizado" 129 | print "Inicializando dense Triangulation" 130 | #Dense reconstruction 131 | for i in range(len(listaImages) - 1): 132 | graphList[i] = denseReconstruction(graphList[i], gM,K,imageSize) 133 | print "Dense reconstruct finalizado" 134 | data = visualizeDense(graphList, gM, imageSize) 135 | 136 | outputPly(data,outName) 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | -------------------------------------------------------------------------------- /req.txt: -------------------------------------------------------------------------------- 1 | matplotlib==1.5.3 2 | numpy==1.11.1 3 | opencv-contrib-python==3.4.2.16 4 | plyfile==0.7 5 | scipy==1.2.1 6 | 7 | -------------------------------------------------------------------------------- /test_data/B21.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/test_data/B21.jpg -------------------------------------------------------------------------------- /test_data/B22.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/test_data/B22.jpg -------------------------------------------------------------------------------- /test_data/B23.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/test_data/B23.jpg -------------------------------------------------------------------------------- /test_data/B24.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/test_data/B24.jpg -------------------------------------------------------------------------------- /test_data/B25.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/test_data/B25.jpg -------------------------------------------------------------------------------- /test_data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/test_data/__init__.py -------------------------------------------------------------------------------- /test_data/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/test_data/__init__.pyc -------------------------------------------------------------------------------- /test_data/dataMergeTest.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/test_data/dataMergeTest.pyc -------------------------------------------------------------------------------- /test_data/glumpyTest.py: -------------------------------------------------------------------------------- 1 | # ----------------------------------------------------------------------------- 2 | # Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved. 3 | # Distributed under the (new) BSD License. 4 | # ----------------------------------------------------------------------------- 5 | import numpy as np 6 | from glumpy import app 7 | from glumpy.graphics.collections import PointCollection 8 | 9 | window = app.Window(1024,1024, color=(1,1,1,1)) 10 | points = PointCollection("agg", color="local", size="local") 11 | 12 | @window.event 13 | def on_draw(dt): 14 | window.clear() 15 | points.draw() 16 | if len(points) < 100000: 17 | points.append(np.random.normal(0.0,0.5,(1,3)), 18 | color = np.random.uniform(0,1,4), 19 | size = np.random.uniform(1,24,1)) 20 | 21 | window.attach(points["transform"]) 22 | window.attach(points["viewport"]) 23 | app.run() -------------------------------------------------------------------------------- /test_data/graphsMergeStart.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/test_data/graphsMergeStart.pkl -------------------------------------------------------------------------------- /test_data/mergeTest.py: -------------------------------------------------------------------------------- 1 | from main import updateMerged 2 | from utils.graph import showGraph 3 | from utils.mergeGraph import invertRt, catRt, dictToGraph 4 | from utils.parser import * 5 | from test.dataMergeTest import * 6 | 7 | def inverseTest(debug): 8 | if debug: 9 | print "----------Test Inverse ---------" 10 | 11 | Rtstr = "[0.980636494759034 -0.0848521010283081 -0.176499818973837 0.916986481160674;0.0852828285792061 0.996343432040814 -0.00515796264643823 -0.0706249414988878;0.176292099358551 -0.00999431739610548 0.984287157959272 -3.44237398527039]" 12 | RtInvStr = "[0.980636494759034 0.0852828285792061 0.176292099358551 -0.286343977206979;-0.0848521010283081 0.996343432040814 -0.00999431739610548 0.113770747936833;-0.176499818973837 -0.00515796264643823 0.984287157959272 3.54976817371088]" 13 | 14 | mat = pst(Rtstr) 15 | expected = pst(RtInvStr) 16 | result = invertRt(mat) 17 | 18 | assert(fullTest(expected, result,debug)) 19 | 20 | def cancatenateRtTest(debug): 21 | if debug: 22 | print "----------Test Concat ---------" 23 | rtStr1 = "[0.980636494759034 0.0852828285792061 0.176292099358551 -0.286343977206979;-0.0848521010283081 0.996343432040814 -0.00999431739610548 0.113770747936833;-0.176499818973837 -0.00515796264643823 0.984287157959272 3.54976817371088]" 24 | rtStr2 = "[0.999973627905639 -0.00211608063595445 0.00694735172399251 -0.189212428340614;0.00201958191092069 0.999901798484945 0.0138677566850883 -0.0264935267310269;-0.00697601477491304 -0.0138533602174298 0.999879702578538 2.10847679899011]" 25 | catStr = "[0.979553052688945 0.0807571098228607 0.184266400032201 -0.102444231255662;-0.0827679456224583 0.996563598373194 0.00323443579447091 0.0823563823207143;-0.183371982988695 -0.0184196528341118 0.982871015059588 5.65864742178288]" 26 | 27 | mat1 = pst(rtStr1) 28 | mat2 = pst(rtStr2) 29 | expected = pst(catStr) 30 | result = catRt(mat1,mat2) 31 | 32 | assert (fullTest(expected, result, debug)) 33 | 34 | def testFullMergeProcess(): 35 | # mergeAllGraph(gL, imsize) 36 | graphL = [g1,g2,g3,g4] 37 | graphL= [dictToGraph(parseStruct(st)) for st in graphL] 38 | expectedAtIt = [m12,m23,m34] 39 | expectedAtIt = [dictToGraph(parseStruct(st)) for st in expectedAtIt] 40 | 41 | imsize = (640, 480, 3) 42 | 43 | tempResult = graphL[0] 44 | # merge de vistas parciales 45 | 46 | #Aqui lamentablemtne la optimizacion tira valores algo distintos 47 | #Pero los errores se mantienen a 0.001 de distancia asi que se esperan 48 | #buenos resultaods. 49 | 50 | for i in range(len(graphL) - 1): 51 | tempResult = updateMerged(tempResult, graphL[i + 1], imsize) 52 | tempResult.closeEnought(expectedAtIt[i],0.5) 53 | showGraph(tempResult, imsize) 54 | 55 | if __name__ == "__main__": 56 | debug = True 57 | testFullMergeProcess() 58 | inverseTest(debug) 59 | cancatenateRtTest(debug) 60 | 61 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/utils/__init__.py -------------------------------------------------------------------------------- /utils/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/utils/__init__.pyc -------------------------------------------------------------------------------- /utils/bundleAjust.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.optimize import least_squares 3 | def bundleAdjustment(graph, adjustFocalLength=False): 4 | nCamaras = graph.mot.shape[2] 5 | mot = np.zeros((3,2,nCamaras)) 6 | 7 | for i in range(nCamaras): 8 | mot[:,0,i] = rotationMatrix2angleaxis(graph.mot[:,0:3,i]) 9 | mot[:,1,i] = graph.mot[:,3,i] 10 | 11 | stre = graph.str 12 | 13 | #Se asume px,py = 0 14 | 15 | px,py =0,0 16 | f = graph.f 17 | 18 | # t = packmotst(mot,stre) 19 | # unpackMotStr(t, nCamaras) 20 | 21 | res = reprojectionResidual(graph.ObsIdx,graph.obsVal,px,py,f,mot,stre) 22 | 23 | error = lambda x : 2* np.sqrt( np.sum(np.power(x,2)) / x.shape[0] ) 24 | 25 | 26 | print "Error inicial de ",error(res) 27 | #Realizar optimizacion de valores 28 | #Quiero conseguir minimizar la norma del vector reprojectionResidual 29 | fun = lambda x : wrapperFuntionStrMot(x,nCamaras,graph.ObsIdx,graph.obsVal,px,py,f) 30 | sol = least_squares(fun, packmotst(mot,stre), method='lm',max_nfev=1000) 31 | resultM, resultS = unpackMotStr(sol.x,nCamaras,graph.ObsIdx.shape[0]) 32 | print "Error despues de optimizar de ", error(sol.fun) 33 | 34 | if adjustFocalLength: 35 | # Realizar optimizacion de valores 36 | # Quiero conseguir minimizar la norma del vector reprojectionResidual 37 | fun = lambda x: wrapperFuntionStrMotF(x, nCamaras, graph.ObsIdx, graph.obsVal, px, py) 38 | sol = least_squares(fun, packMSF(mot, stre,f), method='lm') 39 | resultM, resultS,resultF = unpackMotStrf(sol.x, nCamaras) 40 | print "Error despues de optimizar de ", error(sol.fun) 41 | graph.focal = np.eye(3) * resultF 42 | graph.f = resultF 43 | 44 | for i in range(nCamaras): 45 | graph.mot[:,:, i] = np.hstack([AngleAxis2RotationMatrix(resultM[:, 0, i]) , resultM[:,1,i].reshape((3,1))]) 46 | graph.str = resultS 47 | return graph 48 | 49 | 50 | 51 | 52 | 53 | def packMSF(mot,st,f): 54 | a=mot.flatten(order='F') 55 | b=st.flatten(order='F') 56 | return np.concatenate((f,a,b)) 57 | def packmotst(mot,st): 58 | return np.concatenate((mot.flatten(order='F'), st.flatten(order='F'))) 59 | 60 | def wrapperFuntionStrMot(x,ncam,ObsIdx,ObsVal,px,py,f): 61 | mot, st = unpackMotStr(x,ncam,ObsIdx.shape[0]) 62 | return reprojectionResidual(ObsIdx, ObsVal, px, py, f, mot,st) 63 | def wrapperFuntionStrMotF(x,ncam,ObsIdx,ObsVal,px,py): 64 | mot, st,f = unpackMotStrf(x,ncam,ObsIdx.shape[0]) 65 | return reprojectionResidual(ObsIdx, ObsVal, px, py, f, mot,st) 66 | 67 | def rotationMatrix2angleaxis(R): 68 | #El problema ocurre que hay valores muy pequenos asi que se sigue el sigueinte proceso 69 | 70 | ax = [0,0,0] 71 | ax[0] = R[2,1] - R[1,2] 72 | ax[1] = R[0,2] - R[2,0] 73 | ax[2] = R[1,0] - R[0,1] 74 | ax = np.array(ax) 75 | 76 | 77 | costheta = max( (R[0,0] + R[1,1] + R[2,2] - 1.0) / 2.0 , -1.0) 78 | costheta = min(costheta, 1.0) 79 | 80 | sintheta = min(np.linalg.norm(ax) * 0.5 , 1.0) 81 | theta = np.arctan2(sintheta, costheta) 82 | 83 | #TODO (esto tenia problemas de precision en matlba nose si se mantienen) 84 | #por seguridad copie la version que arregla estos problemas 85 | 86 | kthreshold = 1e-12 87 | if (sintheta > kthreshold) or (sintheta < -kthreshold): 88 | r = theta / (2.0 *sintheta) 89 | ax = r * ax 90 | return ax 91 | else: 92 | if (costheta > 0.0): 93 | ax = ax *0.5 94 | return ax 95 | inv_one_minus_costheta = 1.0 / (1.0 - costheta) 96 | 97 | for i in range(3): 98 | ax[i] = theta * np.sqrt((R(i, i) - costheta) * inv_one_minus_costheta) 99 | cond1 = ((sintheta < 0.0) and (ax[i] > 0.0)) 100 | cond2 = ((sintheta > 0.0) and (ax[i] < 0.0)) 101 | if cond1 or cond2: 102 | ax[i] = -ax[i] 103 | return ax 104 | pass 105 | 106 | 107 | # La funcion reprojectionResidual tiene 3 usos. 1 Evaluar unn modelo, 2. Optimizar dado [Mot[:] ; Str[:]] 108 | # Y 3 optimizar daod [f; Mot(:); Str(:)] Por eso existen estas funciones unpack 109 | def unpackMotStr(vect,ncam,n): 110 | cut = 3 * 2 * ncam 111 | mot = np.reshape(vect[0:cut], (3, 2,ncam),order='F') 112 | st = np.reshape(vect[cut:], (n,3),order='F' ) 113 | return mot,st 114 | 115 | def unpackMotStrf(vect,ncam,n): 116 | cut = 1+3*2*ncam 117 | f = vect[0] 118 | mot = np.reshape(vect[1:cut], (3, 2,ncam),order='F') 119 | st = np.reshape(vect[cut:], (n,3),order='F' ) 120 | return f,mot,st 121 | 122 | def reprojectionResidual(ObsIdx,ObsVal,px,py,f,Mot,Str): 123 | nCam = len(ObsIdx[0]) 124 | 125 | residuals = np.zeros((0,0)) 126 | for i in range(nCam): 127 | 128 | validsPts = ObsIdx[:,i] != -1 129 | valIndexs = ObsIdx[validsPts,i] 130 | 131 | validMot = Mot[:,0,i] 132 | validStr = Str[validsPts,:] 133 | 134 | RP = AngleAxisRotatePts(validMot, validStr) 135 | 136 | TRX = RP[:,0] + Mot[0, 1, i] 137 | TRY = RP[:,1] + Mot[1, 1, i] 138 | TRZ = RP[:,2] + Mot[2, 1, i] 139 | 140 | TRXoZ = TRX / TRZ 141 | TRYoZ = TRY / TRZ 142 | 143 | x = f * TRXoZ + px 144 | y = f * TRYoZ + py 145 | 146 | ox = ObsVal[ valIndexs.astype('int'),0] 147 | oy = ObsVal[valIndexs.astype('int'),1] 148 | 149 | step = np.vstack([(x-ox),(y-oy)]) 150 | 151 | if i ==0: 152 | residuals = step 153 | else: 154 | residuals = np.hstack([residuals, step]) 155 | return residuals.flatten() 156 | 157 | def AngleAxisRotatePts(validMot, validStr): 158 | validStr=np.transpose(validStr) 159 | angle_axis = np.reshape(validMot[0:3], (1,3)) 160 | theta2 = np.inner(angle_axis, angle_axis) 161 | 162 | if (theta2 > 0.0): 163 | theta = np.sqrt(theta2) 164 | w = (1.0/theta) * angle_axis 165 | 166 | costheta = np.cos(theta) 167 | sintheta = np.sin(theta) 168 | 169 | w_cross_pt = np.dot(xprodmat(w),validStr) 170 | 171 | w_dot_pt = np.dot(w,validStr) 172 | t1= (validStr * costheta) 173 | t2 = (w_cross_pt * sintheta) 174 | t3 = np.dot( (1 - costheta) * np.transpose(w),w_dot_pt) 175 | result = t1 + t2 + t3 176 | 177 | else: 178 | w_cross_pt = np.dot(xprodmat(angle_axis),validStr) 179 | result = validStr + w_cross_pt 180 | return np.transpose(result) 181 | 182 | def xprodmat(a): 183 | assert(a.shape[0] == 1 and a.shape[1] ==3) 184 | ax=a[0,0] 185 | ay=a[0,1] 186 | az=a[0,2] 187 | A=np.array([[0, -az,ay],[az,0,-ax],[-ay,ax,0]]) 188 | return A 189 | 190 | 191 | def AngleAxis2RotationMatrix(angle_axis): 192 | R= np.zeros((3,3)) 193 | theta2 = np.inner(angle_axis, angle_axis) 194 | if (theta2 > 0.0): 195 | theta = np.sqrt(theta2) 196 | wx = angle_axis[0] / theta 197 | wy = angle_axis[1] / theta 198 | wz = angle_axis[2] / theta 199 | costheta = np.cos(theta) 200 | sintheta = np.sin(theta) 201 | 202 | R[0,0] = costheta + wx * wx * (1 - costheta) 203 | R[1,0] = wz * sintheta + wx * wy * (1 - costheta) 204 | R[2,0] = -wy * sintheta + wx * wz * (1 - costheta) 205 | R[0,1] = wx * wy * (1 - costheta) - wz * sintheta 206 | R[1,1] = costheta + wy * wy * (1 - costheta) 207 | R[ 2, 1] = wx * sintheta + wy * wz * (1 - costheta) 208 | R[0,2] = wy * sintheta + wx * wz * (1 - costheta) 209 | R[1,2] = -wx * sintheta + wy * wz * (1 - costheta) 210 | R[2,2] = costheta + wz * wz * (1 - costheta) 211 | else: 212 | R[0, 0] = 1 213 | R[1, 0] = -angle_axis[2] 214 | R[2, 0] = angle_axis[1] 215 | R[0, 1] = angle_axis[2] 216 | R[1,1] = 1 217 | R[2,1] = -angle_axis[0] 218 | R[0,2] = -angle_axis[1] 219 | R[1,2] = angle_axis[0] 220 | R[2,2] = 1 221 | return R 222 | if __name__ == '__main__': 223 | pass 224 | 225 | 226 | -------------------------------------------------------------------------------- /utils/bundleAjust.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/utils/bundleAjust.pyc -------------------------------------------------------------------------------- /utils/dense.py: -------------------------------------------------------------------------------- 1 | from test_data.dataMergeTest import * 2 | from utils.getPose import vgg_X_from_xP_nonlin 3 | from utils.graph import visualizeDense 4 | from utils.mergeGraph import dictToGraph 5 | from utils.parser import parseStruct, pst, fullTest 6 | import cv2 7 | import numpy as np 8 | import pylab as plt 9 | import heapq 10 | from plyfile import PlyData, PlyElement 11 | 12 | class dummyHeap: 13 | def __init__(self,n): 14 | self.list = [] 15 | def push(self,idx,val): 16 | heapq.heappush(self.list,(-val,idx)) 17 | def pop(self): 18 | val,idx = heapq.heappop(self.list) 19 | return (-val,idx) 20 | def size(self): 21 | return len(self.list) 22 | 23 | def denseMatch(graph,imA,imB,imSA,imSB): 24 | 25 | if imA.shape != imSA: 26 | cv2.resize(imA,imSA) 27 | if imB.shape != imSB: 28 | cv2.resize(imB,imSB) 29 | 30 | halfsizeprop = 2 31 | tempIm = [imA,imB] 32 | matchs = [] 33 | znccs = [] 34 | for i in range(2): 35 | #Aqui deberian ser imagenes en rango 0 1 (cv2 deja en 0-256) 36 | imt = tempIm[i].astype(np.float64) / 256 37 | if imt.shape[2] == 3: 38 | imt = toGrayScale(imt) 39 | matchs.append(reliableArea(imt)) 40 | znccs.append(ZNCCpath_all(imt,halfsizeprop)) 41 | hh = imA.shape[0] * 0.5 42 | hw = imA.shape[1] * 0.5 43 | points = int(graph.matches.shape[0]) 44 | x1p = graph.matches[:,0].reshape((points,1)) 45 | y1p = graph.matches[:,1].reshape((points,1)) 46 | x2p = graph.matches[:,2].reshape((points,1)) 47 | y2p = graph.matches[:,3].reshape((points,1)) 48 | 49 | initMatch = np.round(np.hstack([ 50 | hh - y1p -1, 51 | hw - x1p -1, 52 | hh - y2p -1, 53 | hw - x2p -1, 54 | np.zeros((points,1)) 55 | ])) 56 | matched_pair = propagate(initMatch,matchs[0],matchs[1],znccs[0],znccs[1],halfsizeprop) 57 | graph.denseMatch = matched_pair[0] 58 | graph.denseMatch[:,0] = (imA.shape[0] * 0.5) - graph.denseMatch[:,0] 59 | graph.denseMatch[:,1] = (imA.shape[1] * 0.5) - graph.denseMatch[:,1] 60 | graph.denseMatch[:,2] = (imB.shape[0] * 0.5) - graph.denseMatch[:,2] 61 | graph.denseMatch[:,3] = (imB.shape[1] * 0.5) - graph.denseMatch[:,3] 62 | 63 | graph.denseMatch = np.transpose(graph.denseMatch) 64 | graph.denseMatch = graph.denseMatch[[1,0,3,2,4],:] 65 | return graph 66 | 67 | def propagate(i_m,mim_i,mim_j,zncc_i,zncc_j,winhalfsize): 68 | """ 69 | Please cite this paper if you use this code. 70 | J. Xiao, J. Chen, D.-Y. Yeung, and L. Quan 71 | Learning Two-view Stereo Matching 72 | Proceedings of the 10th European Conference on Computer Vision (ECCV2008) 73 | Springer Lecture Notes in Computer Science (LNCS), Pages 15-27 74 | """ 75 | # testMat = None 76 | # with open('test/varProp.txt','r') as f: 77 | # testMat=pst(f.read()) 78 | # testMat[:,0] = testMat[:,0] - 1 79 | # testMat[:, 1] = testMat[:, 1] - 1 80 | # testMat[:, 2] = testMat[:, 2] - 1 81 | # testMat[:, 3] = testMat[:, 3] - 1 82 | 83 | 84 | hi = mim_i.shape[0] 85 | wi = mim_i.shape[1] 86 | hj = mim_j.shape[0] 87 | wj = mim_j.shape[1] 88 | 89 | max_cost = 0.5 90 | outIm_i = np.zeros((mim_i.shape[0],mim_i.shape[1],2)) 91 | outIm_j = np.zeros((mim_j.shape[0], mim_j.shape[1], 2)) 92 | 93 | outIm_i[:,:,0] = mim_i - 2 94 | outIm_i[:, :, 1] = outIm_i[:,:,0] 95 | outIm_j[:,:,0] = mim_j - 2 96 | outIm_j[:, :, 1] = outIm_j[:,:,0] 97 | 98 | elementosI = mim_i.shape[0]*mim_i.shape[1] 99 | elementosJ = mim_j.shape[0]*mim_j.shape[1] 100 | maxMatchingNo = min(elementosI , elementosJ) 101 | maxIndexValid = max(elementosI,elementosJ) 102 | nbMaxStart = maxIndexValid + 5*5*9 103 | 104 | #Crear priority queue de tamano 105 | maxSizeHeap = nbMaxStart*25 + maxIndexValid 106 | heap = dummyHeap(maxSizeHeap) 107 | 108 | 109 | match_pair = i_m 110 | match_pair_size=0 111 | for match_pair_size in range(match_pair.shape[0]): 112 | e1 = zncc_i[match_pair[match_pair_size,0],match_pair[match_pair_size,1],:] 113 | e2 = zncc_j[match_pair[match_pair_size,2],match_pair[match_pair_size,3],:] 114 | val = np.sum(e1*e2) 115 | match_pair[match_pair_size,4] = val 116 | heap.push(match_pair_size,val) 117 | 118 | while (maxMatchingNo >= 0 and heap.size() > 0): 119 | bestPri,bestInd = heap.pop() 120 | 121 | x0 = match_pair[bestInd,0] 122 | y0 = match_pair[bestInd,1] 123 | x1 = match_pair[bestInd,2] 124 | y1 = match_pair[bestInd,3] 125 | 126 | xMin0 = int(max(winhalfsize,x0-winhalfsize)) 127 | xMax0 = int(min(hi-winhalfsize-1,x0+winhalfsize+1)) 128 | yMin0 = int(max(winhalfsize,y0-winhalfsize)) 129 | yMax0 = int(min(wi-winhalfsize-1,y0+winhalfsize+1)) 130 | 131 | xMin1 = int(max(winhalfsize,x1-winhalfsize)) 132 | xMax1 = int(min(hj-winhalfsize-1,x1+winhalfsize+1)) 133 | yMin1 = int(max(winhalfsize,y1-winhalfsize)) 134 | yMax1 = int(min(wj-winhalfsize-1,y1+winhalfsize+1)) 135 | 136 | localH = [] 137 | for yy0 in range(yMin0,yMax0+1): 138 | for xx0 in range(xMin0,xMax0+1): 139 | if outIm_i[xx0,yy0,0] == -1: 140 | xx = int(xx0 + x1 - x0) 141 | yy = int(yy0 + y1 - y0) 142 | for yy1 in range(max(yMin1,yy-1), min(yMax1,yy+2)+1): 143 | for xx1 in range(max(xMin1,xx-1),min(xMax1,xx+2)+1): 144 | if outIm_j[xx1,yy1,0] == -1: 145 | auxCost =np.sum(zncc_i[xx0,yy0,:] * zncc_j[xx1,yy1,:]) 146 | if (1-auxCost) <= max_cost: 147 | localH.append([xx0,yy0,xx1,yy1,auxCost]) 148 | 149 | if len(localH) > 0: 150 | localH.sort(key=lambda x: x[4],reverse=True) 151 | for elem in localH: 152 | xx0, yy0, xx1, yy1, auxCost = elem 153 | if (outIm_i[xx0,yy0,0] < 0) and (outIm_j[xx1,yy1,0] < 0): 154 | outIm_i[xx0,yy0,:] = [xx1,yy1] 155 | outIm_j[xx1,yy1,:] = [xx0,yy0] 156 | #probablemente necesitare vstack 157 | match_pair_size+=1 158 | match_pair = np.vstack([match_pair,elem]) 159 | # cond = np.allclose(testMat[match_pair_size, :], 160 | # match_pair[match_pair_size, :]) 161 | # if not cond: 162 | # print "hi" 163 | heap.push(match_pair_size,auxCost) 164 | maxMatchingNo-=1 165 | match_pair = match_pair[i_m.shape[0]:, :] 166 | return match_pair,outIm_j,outIm_j 167 | 168 | def ZNCCpath_all(im,half_size_prop): 169 | dim3=np.power(2*half_size_prop + 1,2) 170 | zncc = np.zeros((im.shape[0],im.shape[1],dim3)) 171 | k=0 172 | for i in range(-half_size_prop,half_size_prop+1): 173 | for j in range(-half_size_prop, half_size_prop + 1): 174 | x0 = half_size_prop 175 | xf = zncc.shape[0] - half_size_prop 176 | y0 = half_size_prop 177 | yf = zncc.shape[1] - half_size_prop 178 | zncc[x0:xf,y0:yf,k] = im[x0+i:xf+i , y0+j:yf+j] 179 | k+=1 180 | zncc_m = np.mean(zncc,2) 181 | 182 | t=(2*half_size_prop+1) * zncc_m 183 | desv = np.sqrt( np.sum(zncc*zncc,axis=2) - (t*t) ) 184 | extMean = np.repeat(zncc_m.reshape((zncc_m.shape[0],zncc_m.shape[1],1)),dim3,axis=2) 185 | extDesv = np.repeat(desv.reshape((desv.shape[0],desv.shape[1],1)),dim3,axis=2) 186 | zncc = (zncc-extMean) / extDesv 187 | return zncc 188 | 189 | def reliableArea(im): 190 | permARow = range(1,im.shape[0]) + [0] 191 | permACol = range(1,im.shape[1]) + [0] 192 | permBRow = [im.shape[0] - 1] + range(0,im.shape[0]-1) 193 | permBCol = [im.shape[1] - 1] + range(0, im.shape[1] - 1) 194 | 195 | t1= np.maximum( np.abs( im - im[permARow,:] ) ,np.abs( im - im[permBRow,:] )) 196 | t2= np.maximum( np.abs( im - im[:,permACol] ) ,np.abs( im - im[:,permBCol] )) 197 | 198 | rim = np.maximum(t1, t2) 199 | rim[0,:] = 0 200 | rim[-1, :] = 0 201 | rim[:,0] = 0 202 | rim[:, -1] = 0 203 | rim = (rim < 0.01) 204 | return (1-rim).astype(np.float64) 205 | 206 | 207 | def toGrayScale(im): 208 | return (30 * im[:,:,0] + 150 * im[:,:,1] + 76 * im[:,:,2]) / 256 209 | def denseReconstruction(graph,merged,Kmat,ims): 210 | idFrame = graph.frames[0] 211 | p1 = np.dot(Kmat, merged.mot[:,:,idFrame]) 212 | p2 = np.dot(Kmat, merged.mot[:,:,idFrame+1]) 213 | p = np.zeros((p1.shape[0],p1.shape[1],2)) 214 | p[:,:,0] = p1 215 | p[:,:,1] = p2 216 | X = np.zeros((4,graph.denseMatch.shape[1])) 217 | colIms = np.array([ims[1], ims[0]]).reshape((2, 1)) 218 | imsize = colIms.repeat(2, axis=1) 219 | 220 | # import pickle 221 | # with open('testX.pkl','rb') as f: 222 | # X = pickle.load(f) 223 | for i in range(graph.denseMatch.shape[1]): 224 | X[:,i] = vgg_X_from_xP_nonlin(graph.denseMatch[0:4,i].reshape((2,2)),p,imsize,X=None) 225 | X=X[0:3,:] / X[[3,3,3],:] 226 | x1 = np.dot(p1, np.vstack([X, np.ones((1,X.shape[1]))])) 227 | x2 = np.dot(p2, np.vstack([X, np.ones((1, X.shape[1]))])) 228 | x1 = x1[0:2,:] / x1[[2,2],:] 229 | x2 = x2[0:2,:] / x2[[2,2],:] 230 | graph.denseX = X 231 | temp = np.vstack([x1+1, x2+1]) - graph.denseMatch[0:4,:] 232 | graph.denseRepError = np.sum(temp*temp,axis=0) 233 | 234 | #Consigue las camaras segun merged 235 | rt1 = merged.mot[:,:,idFrame] 236 | rt2 = merged.mot[:,:,idFrame+1] 237 | c1 = - np.dot( np.transpose(rt1[0:3,0:3]) , rt1[:,3]) 238 | c2 = - np.dot(np.transpose(rt2[0:3, 0:3]), rt2[:, 3]) 239 | 240 | nPo = graph.denseMatch.shape[1] 241 | 242 | t1 = np.repeat(c1.reshape((3,1)), nPo, axis=1) 243 | t2 = np.repeat(c2.reshape((3, 1)), nPo, axis=1) 244 | view_dirs_1 = X - t1 245 | view_dirs_2 = X - t2 246 | temp =(1.0 / np.sqrt(np.sum(view_dirs_1 * view_dirs_1,axis = 0))) 247 | t1= np.repeat(temp.reshape((1,nPo)), 3,axis=0) 248 | view_dirs_1 = view_dirs_1 * t1 249 | 250 | temp = (1.0 / np.sqrt(np.sum(view_dirs_2 * view_dirs_2,axis = 0))) 251 | t2= np.repeat(temp.reshape((1,nPo)), 3,axis=0) 252 | view_dirs_2 = view_dirs_2 * t2 253 | 254 | graph.cos_angles = np.sum(view_dirs_1 * view_dirs_2) 255 | c_dir1 = np.transpose(rt1[2,0:3]) 256 | c_dir2 = np.transpose(rt2[2,0:3]) 257 | 258 | t1= np.repeat(c_dir1.reshape((3,1)), nPo,axis=1) 259 | t2 = np.repeat(c_dir2.reshape((3,1)), nPo,axis=1) 260 | bt1=(np.sum(view_dirs_1 * t1,axis=0) > 0) 261 | bt2=(np.sum(view_dirs_2 * t2,axis=0) > 0) 262 | graph.visible = np.bitwise_and(bt1,bt2) 263 | 264 | return graph 265 | 266 | 267 | 268 | def outputPly(data,name): 269 | d2 = np.empty(data.shape[0], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')]) 270 | for i in range(data.shape[0]): 271 | d2[i] = tuple(data[i]) 272 | el = PlyElement.describe(d2, 'vertex') 273 | PlyData([el]).write(name+'.ply') 274 | 275 | 276 | def testDenseTriangulation(): 277 | import pickle 278 | #Cargo grafos esperados del resultado 279 | listGAfter = ['test/d1.txt','test/d2.txt','test/d3.txt','test/d4.txt'] 280 | for ind,filepat in enumerate(listGAfter): 281 | with open(filepat,'r') as f: 282 | listGAfter[ind] = f.read() 283 | listGAfter = [dictToGraph(parseStruct(st)) for st in listGAfter] 284 | #carga el grafo merged de prueba 285 | mer = dictToGraph(parseStruct(m34)) 286 | imsize = (640, 480, 3) 287 | derp = "[719.5459 0 0;0 719.5459 0;0 0 1]" 288 | 289 | gRes = denseReconstruction(listGAfter[0], mer, pst(derp), (imsize[0],imsize[1])) 290 | 291 | data = visualizeDense([gRes], mer, imsize) 292 | 293 | outputPly(data,"ble") 294 | 295 | #Agarrar la lista de grafos en dense match 296 | #Agarrar lista de reesultados en dense match 297 | def testFullDenseMatch(): 298 | import pickle 299 | imsize = (640,480,3) 300 | 301 | #Cargo imagenes 302 | imlist=["B21.jpg","B22.jpg","B23.jpg","B24.jpg","B25.jpg"] 303 | imlist = map(lambda x : cv2.imread("test/"+x), imlist) 304 | 305 | #Cargo grafos originales antes de dense 306 | with open("test/graphsMergeStart.pkl",'rb') as f: 307 | listGbefore = pickle.load(f) 308 | for i in range(len(listGbefore)): 309 | listGbefore[i].matches = np.transpose(listGbefore[i].matches) 310 | #Cargo grafos esperados del resultado 311 | listGAfter = ['test/d1.txt','test/d2.txt','test/d3.txt','test/d4.txt'] 312 | for ind,filepat in enumerate(listGAfter): 313 | with open(filepat,'r') as f: 314 | listGAfter[ind] = f.read() 315 | listGAfter = [dictToGraph(parseStruct(st)) for st in listGAfter] 316 | 317 | for ind,graph in enumerate(listGbefore): 318 | t=denseMatch(graph, imlist[ind], imlist[ind+1], imsize, imsize) 319 | assert(fullTest(t.denseMatch,listGAfter[ind].denseMatch,debug=True)) 320 | print "Iteracion ",ind," OK" 321 | 322 | if __name__ == "__main__": 323 | testDenseTriangulation() 324 | # testFullDenseMatch() 325 | -------------------------------------------------------------------------------- /utils/dense.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/utils/dense.pyc -------------------------------------------------------------------------------- /utils/fundamental.py: -------------------------------------------------------------------------------- 1 | 2 | import cv2 3 | import numpy as np 4 | 5 | def default(pointsA,pointsB): 6 | return cv2.findFundamentalMat(pointsA, pointsB,param1=0.002) 7 | 8 | 9 | def implementacionRansac(pointsA,pointsB): 10 | t = 0.002 #Distancia a la cual se considera outlier 11 | F, inliers = ransacfitfundmatrix(pointsA, pointsB, t ) 12 | print "Inliners ",len(inliers) 13 | print 'Puntos totales ',len(pointsA) 14 | print 'Porcentaje de INLIERS ', len(inliers)*1.0 / len(pointsA) 15 | 16 | 17 | return F,inliers 18 | 19 | def ransacfitfundmatrix(pA,pB,tolerancia): 20 | assert(pA.shape == pB.shape) 21 | 22 | #Normalizar de forma que el origen es cnetroide y distancia media del origen es sqrt(2) 23 | #Ademas se asegura que el parametro de escala es 1 24 | na,Ta = normalizeHomogeneous(pA) 25 | nb,Tb = normalizeHomogeneous(pB) 26 | 27 | #Puntos para realizar la estimacion de fundamental matrix 28 | s = 8 29 | 30 | #Mandar al algoritmo RANSAC (conseguir modelo con mas inliners) 31 | modeloF = fundamentalFit 32 | distFun = distanceModel 33 | isdegenerate = lambda x : False #Nada es degenerado 34 | 35 | #Agregar a la hstack en cada fila x1,x2 3+3 6 elementos por fila 36 | dataset = np.hstack([na,nb]) 37 | inliners,M = ransac(dataset,modeloF,distFun,isdegenerate,s,tolerancia) 38 | 39 | F = fundamentalFit(np.hstack([na[inliners,:],nb[inliners,:]])) 40 | 41 | F = np.dot(np.dot(Tb, F), np.transpose(Ta)) 42 | 43 | return F,inliners 44 | def fundamentalFit(data): 45 | assert(data.shape[1] == 6 ) 46 | 47 | p1,p2 = data[:,0:3],data[:,3:] 48 | n, d = p1.shape 49 | 50 | na,Ta = normalizeHomogeneous(p1) 51 | nb,Tb = normalizeHomogeneous(p2) 52 | 53 | p2x1p1x1 = nb[:,0] * na[:,0] 54 | p2x1p1x2 = nb[:,0] * na[:,1] 55 | p2x1 = nb[:, 0] 56 | p2x2p1x1 = nb[:,1] * na[:,0] 57 | p2x2p1x2 = nb[:,1] * na[:,1] 58 | p2x2 = nb[:,1] 59 | p1x1 = na[:,0] 60 | p1x2 = na[:,1] 61 | ones = np.ones((1,p1.shape[0])) 62 | 63 | A = np.vstack([p2x1p1x1,p2x1p1x2,p2x1,p2x2p1x1,p2x2p1x2,p2x2,p1x1,p1x2,ones]) 64 | A = np.transpose(A) 65 | 66 | u, D, v = np.linalg.svd(A) 67 | vt = v.T 68 | 69 | F = vt[:, 8].reshape(3,3) #Conseguir el vector con menor valor propio y eso es F 70 | 71 | #Como la matriz fundamental es de rango 2 hay que volver a hacer svd y reconstruir 72 | #A partir de rango 2 73 | u, D, v = np.linalg.svd(F) 74 | F=np.dot(np.dot(u, np.diag([D[0], D[1], 0])), v) 75 | 76 | F= np.dot(np.dot(Tb,F),np.transpose(Ta)) 77 | 78 | return F 79 | 80 | pass 81 | def distanceModel(F, x, t): 82 | p1, p2 = x[:, 0:3], x[:, 3:] 83 | 84 | x2tFx1 = np.zeros((p1.shape[0],1)) 85 | 86 | x2ftx1 = [np.dot(np.dot(p2[i], F), np.transpose(p1[i])) for i in range(p1.shape[0])] 87 | 88 | ft1 = np.dot(F,np.transpose(p1)) 89 | ft2 = np.dot(F.T,np.transpose(p2)) 90 | 91 | bestInliers = None 92 | bestF = None 93 | 94 | sumSquared = (np.power(ft1[0, :], 2) + 95 | np.power(ft1[1, :], 2)) + \ 96 | (np.power(ft2[0, :], 2) + 97 | np.power(ft2[1, :], 2)) 98 | d34 = np.power(x2ftx1, 2) / sumSquared 99 | bestInliers = np.where(np.abs(d34) < t)[0] 100 | bestF = F 101 | return bestInliers,bestF 102 | 103 | def ransac(x, fittingfn, distfn, degenfn, s, t): 104 | maxTrials = 2000 105 | maxDataTrials = 200 106 | p=0.99 107 | 108 | bestM = None 109 | trialCount = 0 110 | maxInlinersYet = 0 111 | N=1 112 | maxN = 120 113 | n, d = x.shape 114 | 115 | M = None 116 | bestInliners = None 117 | while N > trialCount: 118 | degenerate = 1 119 | degenerateCount = 1 120 | while degenerate: 121 | inds = np.random.choice(range(n),s,replace=False) 122 | sample = x[inds,:] 123 | degenerate = degenfn(sample) 124 | 125 | if not degenerate: 126 | M = fittingfn(sample) 127 | if M is None: 128 | degenerate = 1 129 | degenerateCount +=1 130 | if degenerateCount > maxDataTrials: 131 | raise Exception("Error muchas sample degeneradas saliendo") 132 | #Evaluar modelo 133 | inliners,M = distfn(M,x,t) 134 | nInliners = len(inliners) 135 | 136 | if maxInlinersYet < nInliners: 137 | maxInlinersYet = nInliners 138 | bestM = M 139 | bestInliners = inliners 140 | 141 | #Estimacion de probabilidad trials hasta conseguri 142 | eps = 0.000001 143 | fractIn = nInliners*1.0/n 144 | pNoOut = 1 - fractIn*fractIn 145 | pNoOut = max(eps,pNoOut) #Evitar division por 0 146 | N = np.log(1-p) / np.log(pNoOut) 147 | N = max(N,maxN) 148 | 149 | trialCount +=1 150 | if trialCount > maxTrials: 151 | print("Se alcanzo maxima iteracion saliendo") 152 | break 153 | if M is None: 154 | raise Exception("Error no se encontro el modelo") 155 | print "Se realizacion ",trialCount,' itentos' 156 | return bestInliners,bestM 157 | 158 | 159 | 160 | 161 | def normalizeHomogeneous(points): 162 | normPoints = [] 163 | if points.shape[1] == 2: 164 | # Agrego factor de escala (concadenar columna con 1) 165 | points = np.hstack([points, np.ones((points.shape[0],1))]) 166 | 167 | n = points.shape[0] 168 | d = points.shape[1] 169 | #Deja en escala 1 170 | factores = np.repeat((points[:, -1].reshape(n, 1)), d, axis=1) 171 | points = points / factores #NOTAR QUE ESTO ES POR ELEMENTO 172 | 173 | prom = np.mean(points[:,:-1],axis=0) 174 | newP = np.zeros(points.shape) 175 | #Dejar todas las dimensiones en promedio 0 (menos la de escala) 176 | newP[:,:-1] = points[:,:-1] - np.vstack([prom for i in range(n)]) 177 | 178 | #Calcular distancia promedio 179 | dist = np.sqrt(np.sum(np.power(newP[:,:-1],2),axis=1)) 180 | meanDis = np.mean(dist) 181 | scale = np.sqrt(2)*1.0/ meanDis 182 | 183 | T = [[scale,0,-scale*prom[0]], 184 | [0, scale, -scale * prom[1]], 185 | [0, 0, 1] 186 | ] 187 | #ESTA ES LA VERSION ORIGINAL QUE SE USABA T*points 188 | #Esta asume puntos DxN como se usan puntos en formato NxD 189 | #Se usa el transpuesto 190 | T = np.transpose(np.array(T)) 191 | transformedPoints = np.dot(points,T) 192 | return transformedPoints,T 193 | 194 | pass -------------------------------------------------------------------------------- /utils/fundamental.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/utils/fundamental.pyc -------------------------------------------------------------------------------- /utils/getPose.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def getPose(E,K,matches,ims): 4 | n,d = matches.shape 5 | 6 | U, s, V = np.linalg.svd(E, full_matrices=True) 7 | W = np.array([[0,-1,0],[1,0,0],[0,0,1]]) 8 | Z = np.array([[0,1,0],[-1,0,0],[0,0,0]]) 9 | 10 | S = np.dot(np.dot(U,Z),np.transpose(U)) 11 | 12 | R1 = np.dot(np.dot(U,W),V) 13 | R2 = np.dot(np.dot(U, np.transpose(W)), V) 14 | 15 | t1 = U[:,2] 16 | t2 = -1*U[:,2] 17 | 18 | if np.linalg.det(R1) < 0: 19 | print "Determinante negativo F1 multiplico por -1" 20 | R1 = -1*R1 21 | 22 | if np.linalg.det(R2) < 0: 23 | print "Determinante negativo R2 multiplico por -1" 24 | R2 = -1 * R2 25 | 26 | #Esto genera 4 posibles soluciones 27 | t1t = t1.reshape(3,1) 28 | t2t = t2.reshape(3,1) 29 | sols=[np.hstack((R1, t1t)), np.hstack((R1, t2t)), np.hstack((R2, t1t)), np.hstack((R2, t2t))] 30 | 31 | Rt = np.zeros((3,4,4)) 32 | Rt[:,:,0] = sols[0] 33 | Rt[:, :, 1] = sols[1] 34 | Rt[:, :, 2] = sols[2] 35 | Rt[:, :, 3] = sols[3] 36 | 37 | 38 | #Por cada solucion 39 | P0 = np.dot(K,np.hstack([np.eye(3),np.zeros((3,1))])) 40 | goodV = np.zeros((1,4)) 41 | for i in range(4): 42 | outX = np.zeros((n, 4)) 43 | P1 = np.dot(K,sols[i]) 44 | #Por cada par de puntos 2D 45 | for j in range(n): 46 | # aplicar vgg para calcular puntos 3D 47 | colIms = np.array([ims[1],ims[0]]).reshape((2,1)) 48 | imsize = colIms.repeat(2,axis=1) 49 | pt = np.zeros((P0.shape[0],P0.shape[1],2)) 50 | pt[:,:,0] = P0 51 | pt[:,:,1] = P1 52 | formatedMatched = np.reshape(matches[j,:],(2,2)) 53 | outX[j,:] = vgg_X_from_xP_nonlin(formatedMatched,pt,imsize) 54 | #Aplicar escala 55 | outX = outX[:,0:3] / outX[:,[3,3,3]] 56 | 57 | t = Rt[0:3, 3, i].reshape((3,1)) 58 | aux = np.transpose(outX[:,:]) - np.repeat(t, outX.shape[0], axis=1) 59 | t2 = Rt[2, 0:3, i].reshape((1,3)) 60 | dprd = np.dot(t2,aux) 61 | goodV[0,i] = np.sum([np.bitwise_and(outX[:,2] > 0,dprd > 0)]) 62 | 63 | 64 | 65 | 66 | #Calcular cual es mejor 67 | bestIndex = np.argmax(goodV) 68 | return Rt[:,:, bestIndex] 69 | 70 | 71 | def vgg_X_from_xP_nonlin(u,P,imsize=None,X=None): 72 | eps = 2.2204e-16 73 | 74 | K = P.shape[2] 75 | assert(K >= 2) 76 | 77 | #Primero consigo X si no fue proporcioando 78 | if (X is None): 79 | X = vgg_X_from_xP_lin(u, P, imsize) 80 | #Fixed el -1 ????? (en version antigua los indices diferenciaban de un numero) 81 | newu = u.copy()-1 82 | newP = P.copy() 83 | if not imsize is None: 84 | for i in range(K): 85 | H = np.array([[2.0/imsize[0,i],0 , -1], 86 | [0, 2.0/imsize[1,i], -1], 87 | [0,0,1]]) 88 | newP[:,:,i] = np.dot(H,newP[:,:,i]) 89 | newu[i, :] = np.dot(H[0:2,0:2], newu[i, :]) + H[0:2,2] 90 | 91 | T, s, U = np.linalg.svd(X.reshape((4,1))) 92 | lc = T.shape[1] 93 | T = T[:,[1,2,3,0]] 94 | Q = newP.copy() 95 | for i in range(K): 96 | Q[:, :, i] = np.dot(newP[:, :, i], T ) 97 | 98 | #DO THE NEWTON 99 | Y = np.zeros((3,1)) 100 | eprev = np.inf 101 | for i in range(10): 102 | e,j = resid(Y,newu,Q) 103 | if (1- (np.linalg.norm(e) / np.linalg.norm(eprev))) < 1000*eps: 104 | break 105 | eprev = e 106 | jj = np.dot(np.transpose(j),j) 107 | je = np.dot(np.transpose(j),e) 108 | Y = Y - np.linalg.solve(jj,je) 109 | X =np.dot(T,np.vstack([Y,1])) 110 | return X.flatten() 111 | 112 | def resid(Y,u,Q): 113 | K = Q.shape[2] 114 | 115 | q = Q[:, 0:3, 0] 116 | x0 = Q[:, 3, 0] 117 | x0 = x0.reshape((3, 1)) 118 | x = np.dot(q, Y) + x0 119 | 120 | tu = u[0,:].reshape((2,1)) 121 | e = x[0:2]/x[2]-tu 122 | 123 | t1 = x[2]* q[0, :] 124 | t2 = x[0]* q[2, :] 125 | t3 = x[2]* q[1, :] 126 | t4 = x[1]* q[2, :] 127 | aux = np.vstack([t1 - t2, t3 - t4]) 128 | J = (aux / (x[2] * x[2])) 129 | 130 | for i in range(1,K): 131 | q = Q[:,0:3,i] 132 | x0 = Q[:,3,i] 133 | x0 = x0.reshape((3,1)) 134 | x = np.dot(q,Y) + x0 135 | tu = u[i, :].reshape((2, 1)) 136 | e = np.vstack([e, x[0:2]/x[2]-tu ]) 137 | 138 | t1 = x[2] * q[0, :] 139 | t2 = x[0] * q[2, :] 140 | t3 = x[2] * q[1, :] 141 | t4 = x[1] * q[2, :] 142 | aux = np.vstack([t1-t2 , t3-t4 ]) 143 | J = np.vstack([J, (aux/(x[2]*x[2]))]) 144 | return e,J 145 | 146 | 147 | def vgg_X_from_xP_lin(u,P,imsize): 148 | K = P.shape[2] 149 | newu = u.copy() 150 | newP = P.copy() 151 | if not imsize is None: 152 | for i in range(K): 153 | H = np.array([[2.0/imsize[0,i],0 , -1], 154 | [0, 2.0/imsize[1,i], -1], 155 | [0,0,1]]) 156 | newP[:,:,i] = np.dot(H,newP[:,:,i]) 157 | newu[i, :] = np.dot(H[0:2,0:2], newu[i, :]) + H[0:2,2] 158 | 159 | A= np.dot( formatVgg(np.hstack([newu[0,:],1])), newP[:,:,0]) 160 | for i in range(1,K): 161 | newRow = np.dot( formatVgg(np.hstack([newu[i,:],1])), newP[:,:,i]) 162 | A = np.vstack([A, newRow ]) 163 | 164 | U, s, out = np.linalg.svd(A) 165 | out = out.T 166 | out = out[:,-1] 167 | s = np.dot( np.reshape(newP[2,:,:], (4,K)).T ,out) 168 | if np.any(s < 0): 169 | out = -out 170 | 171 | return out 172 | 173 | def formatVgg(X): #Debe ser vector de 3 elementos 174 | row = [0,X[2],-X[1]] 175 | row2 = [-X[2],0,X[0]] 176 | row3 = [X[1],-X[0],0] 177 | return np.array([row,row2,row3]) 178 | 179 | -------------------------------------------------------------------------------- /utils/getPose.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/utils/getPose.pyc -------------------------------------------------------------------------------- /utils/graph.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import pylab 4 | from utils.getPose import vgg_X_from_xP_nonlin 5 | from utils.parser import fullTest, checkIfPerm 6 | 7 | 8 | class tempGraph: 9 | def __init__(self): 10 | self.frames=[] 11 | self.f = None 12 | self.mot = None 13 | self.str = None 14 | self.obsVal = None 15 | self.ObsIdx = None 16 | self.focal = np.array([1]) 17 | self.denseMatch = None 18 | self.matches=None 19 | pass 20 | def closeEnought(self,other,tol): 21 | t1 = (self.frames == other.frames) 22 | t2 = (other.f == self.f) 23 | t3 = fullTest(self.mot, other.mot,tol) 24 | t4 = fullTest(self.str, other.str,tol) 25 | 26 | # Permite que los valores esten en otro orden, mientras sea un 27 | # Re ordenamiento de las filas 28 | t5 = checkIfPerm(self.obsVal, other.obsVal) 29 | 30 | # ObsIdx solo contiene indices , si se reordena estos indices cambian 31 | # Pero debe mantenerse que Ga.vals[gA.ObsIdx] = GB.vals[gB.ObsIdx] 32 | # Aun asi pueden venir desordenadas 33 | # TODO INEFINCIENTE A MEDIDA QUE AUMENTAN VALORES 34 | AllValuesFromIndexsA = self.obsVal[self.ObsIdx.astype(np.int), :] 35 | AllValuesFromIndexsB = other.obsVal[other.ObsIdx.astype(np.int), :] 36 | t6 = True 37 | for matchA in AllValuesFromIndexsA: 38 | fail = True 39 | for matchB in AllValuesFromIndexsB: 40 | if checkIfPerm(matchA, matchB): 41 | fail = False 42 | break 43 | if fail: 44 | t6 = False 45 | t7 = fullTest(self.focal, other.focal) 46 | return (t1 and t2 and t3 and t4 and t5 and t6 and t7) 47 | def __eq__(self, other): 48 | return self.closeEnought(other, 1e-02) 49 | def createGraph(id1,id2,focal,pA,pB,Rt,f): 50 | graph = tempGraph() 51 | graph.frames = [id1,id2] 52 | graph.focal = focal 53 | graph.f = f 54 | graph.mot = np.zeros((3,4,2)) 55 | n = pA.shape[0] 56 | 57 | graph.mot[:,:,0] = np.hstack([np.eye(3),np.zeros((3,1))]) 58 | graph.mot[:,:,1] = Rt 59 | 60 | graph.str = np.zeros((n,3)) 61 | graph.matches = np.hstack([pA, pB]) 62 | graph.obsVal = np.vstack([pA,pB]) 63 | 64 | graph.ObsIdx = np.zeros((n,2)) 65 | graph.ObsIdx[:,0] = range(n) 66 | graph.ObsIdx[:,1] = range(n,2*n) 67 | 68 | return graph 69 | 70 | 71 | def triangulateGraph(graph,imagesize): 72 | newGraph = graph 73 | n = newGraph.str.shape[0] 74 | X = np.zeros((n,4)) 75 | colIms = np.array([imagesize[1], imagesize[0]]).reshape((2, 1)) 76 | imsize = colIms.repeat(len(graph.frames), axis=1) 77 | for i in range(n): 78 | validCamera = np.where(graph.ObsIdx[i] != -1)[0] 79 | P = np.zeros((3,4,validCamera.shape[0])) 80 | x= np.zeros((validCamera.shape[0],2)) 81 | cnt=0 82 | #Consigue los puntos en el plano de la camara y la matriz de proyeccion 83 | for ind in validCamera: 84 | x[cnt,:] = newGraph.obsVal[newGraph.ObsIdx[i][ind],:] 85 | P[:,:,cnt] = np.dot(newGraph.focal,newGraph.mot[:,:,ind]) 86 | cnt+=1 87 | 88 | X[i,:] = vgg_X_from_xP_nonlin(x,P,imsize,X=None) 89 | allscales = X[:,3].reshape((n, 1)) 90 | newGraph.str = X[:,0:3] / np.hstack([allscales,allscales,allscales]) 91 | return newGraph 92 | 93 | 94 | def visualizeDense(listG,merged,imsize): 95 | #plotear merge 96 | 97 | 98 | ax = showGraph(merged, imsize,True) 99 | allPoints = np.empty((3,0)) 100 | #plotear dense 101 | for g in listG: 102 | goodPoint = g.denseRepError < 0.05; 103 | ax.scatter(g.denseX[0,goodPoint], g.denseX[1,goodPoint], g.denseX[2,goodPoint]) 104 | allPoints = np.hstack([allPoints,g.denseX[:, goodPoint]]) 105 | allPoints = np.hstack([allPoints, np.transpose(merged.str)]) 106 | allPoints = np.transpose(allPoints) 107 | plt.show() 108 | return allPoints 109 | 110 | def showGraph(graph,imsize,getAxis=False): 111 | from mpl_toolkits.mplot3d import Axes3D 112 | 113 | 114 | fig = pylab.figure() 115 | ax = fig.gca(projection='3d') 116 | 117 | #dibujar camaras 118 | for i in range(graph.mot.shape[2]): 119 | V = getCamera(graph.mot[:, :, i], imsize[1], imsize[0], graph.f, 0.001) 120 | xi,yi,zi = V[0, [0, 4]], V[1, [0, 4]], V[2, [0, 4]] 121 | ax.plot(xi,yi,zi) 122 | xi,yi,zi = V[0, [0, 5]], V[1, [0, 5]], V[2, [0, 5]] 123 | ax.plot(xi,yi,zi) 124 | xi,yi,zi = V[0, [0, 6]], V[1, [0, 6]], V[2, [0, 6]] 125 | ax.plot(xi,yi,zi) 126 | xi,yi,zi = V[0, [0, 7]], V[1, [0, 7]], V[2, [0, 7]] 127 | ax.plot(xi,yi,zi) 128 | ax.plot(V[0, [4, 5,6,7,4]], V[1, [4, 5,6,7,4]], V[2, [4, 5,6,7,4]]) 129 | 130 | ax.scatter(graph.str[:,0], graph.str[:,1], graph.str[:,2]) 131 | 132 | if getAxis: 133 | return ax 134 | else: 135 | plt.show() 136 | 137 | def getCamera(Rt, w, h, f, scale): 138 | V = np.array([ 139 | [0, 0, 0, f, -(w * 0.5), (w * 0.5), (w * 0.5), -(w * 0.5)], 140 | [0, 0, f, 0, -(h * 0.5), -(h * 0.5), (h * 0.5), (h * 0.5)], 141 | [0, f, 0, 0, f, f, f, f] 142 | ]) 143 | V = scale * V 144 | V = transformPtsByRt(V, Rt, True) 145 | return V 146 | 147 | 148 | def transformPtsByRt(X3D, Rt, isInverse=True): 149 | repMat = np.repeat(Rt[:, 3, np.newaxis], X3D.shape[1], axis=1) 150 | 151 | if isInverse: 152 | Y3D = np.dot( np.transpose(Rt[:,0:3]) , (X3D - repMat ) ) 153 | else: 154 | Y3D = np.dot( Rt[:,0:3] , X3D) + repMat 155 | return Y3D -------------------------------------------------------------------------------- /utils/graph.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/utils/graph.pyc -------------------------------------------------------------------------------- /utils/loadDatacsv.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def load(csvFilename): 4 | with open(csvFilename,'r') as f: 5 | rawText = f.read() 6 | data = rawText.split('\n')[1:] #Saca el header en elemento 0 7 | rows = [] 8 | for textRow in data: 9 | tempRow = [] 10 | for elem in textRow.split(','): 11 | if elem != '': 12 | tempRow.append(float(elem)) 13 | if len(tempRow) != 0: 14 | rows.append(tempRow) 15 | matrix = np.array(rows) 16 | return matrix 17 | 18 | 19 | 20 | if __name__ == '__main__': 21 | load("data/descriptLocI.csv") -------------------------------------------------------------------------------- /utils/loadDatacsv.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/utils/loadDatacsv.pyc -------------------------------------------------------------------------------- /utils/matchSift.py: -------------------------------------------------------------------------------- 1 | 2 | from scipy.spatial import KDTree 3 | import numpy as np 4 | def matchSIFTdescr(descri, descrj, tolerancia=0.36): 5 | #La entrada de datos es en formato N,K 6 | kdTreeI = KDTree(descri) 7 | kdTreeJ = KDTree(descrj) 8 | 9 | matchedI = [] 10 | matchedJ = [] 11 | for i in range(len(descri)): 12 | descriptor = descri[i] 13 | darrayJ,indexArrayJ = kdTreeJ.query(descriptor,k=2,p=2) 14 | realD1 = np.linalg.norm(descriptor-descrj[indexArrayJ[0]])**2 15 | realD2 = np.linalg.norm(descriptor - descrj[indexArrayJ[1]]) ** 2 16 | 17 | if realD1 < tolerancia*realD2: 18 | candidato = descrj[indexArrayJ[0]] 19 | darrayI, indexArrayI = kdTreeI.query(candidato, k=2,p=2) 20 | realD1 = np.linalg.norm(candidato - descri[indexArrayI[0]]) ** 2 21 | realD2 = np.linalg.norm(candidato - descri[indexArrayI[1]]) ** 2 22 | if (indexArrayI[0] == i) and (realD1 < tolerancia*realD2) : 23 | matchedI.append(i) 24 | matchedJ.append(indexArrayJ[0]) 25 | return matchedI,matchedJ 26 | -------------------------------------------------------------------------------- /utils/matchSift.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/utils/matchSift.pyc -------------------------------------------------------------------------------- /utils/mergeGraph.py: -------------------------------------------------------------------------------- 1 | from copy import deepcopy 2 | import numpy as np 3 | from utils.graph import transformPtsByRt, tempGraph 4 | from utils.parser import parseStruct, getIndexOfRow 5 | from test_data.dataMergeTest import * 6 | 7 | def mergeG(gA, gB): 8 | # Como ejemplo sean frames A 1 2 y B 2 3 9 | # los frarmes son las camaras o fotos 10 | 11 | # Primero se calculan frames que hacen overlap 12 | comFram = list(set(gA.frames).intersection(gB.frames)) 13 | 14 | # Luego las que son propias de A y las propias de B (en ej A1 B3) 15 | propB = list(set(gB.frames).difference(gA.frames)) 16 | indpB = [gB.frames.index(a) for a in propB] 17 | 18 | # Si no hay comunes retorna error 19 | # Si las propias de B son ninguna tira error 20 | if len(comFram) == 0: 21 | raise Exception("Comunes vacio ") 22 | if len(propB) == 0: 23 | raise Exception("No hay propias de B") 24 | 25 | # Crear grafo mezclca igual a grafo A 26 | merged = deepcopy(gA) 27 | 28 | # Para el primer overlap (pueden existir muchos) 29 | firstOv = comFram[0] 30 | 31 | # Transforma B.mot b.str al mismo sistema de cordenadas de A 32 | commonA = gA.frames.index(firstOv) 33 | commonB = gB.frames.index(firstOv) 34 | # Consigue transformada rtB 35 | 36 | transf = catRt(invertRt(gA.mot[:, :, commonA]), gB.mot[:, :, commonB]) 37 | gB.str = transformPtsByRt(np.transpose(gB.str), transf, False) # Aplicar a str B 38 | 39 | # Mot ahora es la concadenacion de mot y el inverso RtB 40 | for i in range(len(gB.frames)): 41 | gB.mot[:, :, i] = catRt(gB.mot[:, :, i], invertRt(transf)) 42 | merged.frames = list(set(gA.frames).union(set(gB.frames))) 43 | newMot = np.zeros((3, 4, len(merged.frames))) 44 | newMot[:, :, np.array(range(len(gA.frames)))] = gA.mot 45 | newMot[:, :, np.array(range(len(gA.frames), len(merged.frames)))] = gB.mot[:, :, indpB] 46 | 47 | merged.mot = newMot 48 | # Agrega frames a grafico 49 | 50 | # Ahora caso common frames mas de una 51 | for fr in comFram: 52 | cA = gA.frames.index(fr) 53 | cB = gB.frames.index(fr) 54 | 55 | obsIndA = gA.ObsIdx[:, cA][gA.ObsIdx[:, cA] != -1] 56 | obsIndA = gA.ObsIdx[:, cA] 57 | valA = gA.obsVal[obsIndA.astype(np.int), :] 58 | 59 | obsIndB = gB.ObsIdx[:, cB][gB.ObsIdx[:, cB] != -1] 60 | obsIndB = gB.ObsIdx[:, cB] 61 | valB = gB.obsVal[obsIndB.astype(np.int), :] 62 | 63 | iA = findInterIndexA(valA, valB)[0] 64 | comunes = valA[iA] 65 | iB = np.array([getIndexOfRow(valB, row)[0][0] for row in comunes]) 66 | 67 | iA,iB = deleteRepeated(iA.tolist(), iB.tolist(),valA,valB) 68 | iA = np.array(iA) 69 | iB = np.array(iB) 70 | 71 | for i in range(iA.shape[0]): 72 | # idA = obsIndA[iA[i]] 73 | # idB = obsIndB[iB[i]] 74 | for j in range(len(indpB)): 75 | bObbsIdx = gB.ObsIdx[iB[i], indpB[j]] 76 | # Agrego un elemento a obsVal y a ObsIdx 77 | merged.obsVal = np.vstack([merged.obsVal, gB.obsVal[bObbsIdx, :]]) 78 | while merged.ObsIdx.shape[1] < (len(gA.frames) + j + 1): 79 | merged.ObsIdx = np.hstack([merged.ObsIdx, minus1((merged.ObsIdx.shape[0], 1))]) 80 | merged.ObsIdx[iA[i], len(gA.frames) + j] = merged.obsVal.shape[0]-1 81 | 82 | # Calcula set diference 83 | diferentesB = setDif(valB, valA) 84 | idB = np.array([getIndexOfRow(valB, row)[0][0] for row in diferentesB]) 85 | 86 | for i in range(idB.shape[0]): 87 | bObbsIdx = gB.ObsIdx[idB[i], cB] 88 | merged.obsVal = np.vstack([merged.obsVal, gB.obsVal[bObbsIdx, :]]) 89 | merged.ObsIdx = np.vstack([merged.ObsIdx, minus1((1,merged.ObsIdx.shape[1]))]) 90 | merged.ObsIdx[merged.ObsIdx.shape[0]-1, cA] = merged.obsVal.shape[0]-1 91 | merged.str = np.vstack([ merged.str , gB.str[:,idB[i]].reshape((1,3)) ]) 92 | for j in range(len(indpB)): 93 | bObbsIdx = gB.ObsIdx[idB[i], indpB[j]] 94 | merged.obsVal = np.vstack([merged.obsVal, gB.obsVal[bObbsIdx, :]]) 95 | while merged.ObsIdx.shape[1] < (len(gA.frames) + j + 1): 96 | merged.ObsIdx = np.hstack([merged.ObsIdx, minus1((merged.ObsIdx.shape[0], 1))]) 97 | merged.ObsIdx[-1, len(gA.frames) + j] = merged.obsVal.shape[0]-1 98 | 99 | 100 | #Revisa si quedo alguna columna sin algun valor 101 | #Selecciona en ObsIdx los frames comunes y dif en Gb 102 | #Asegurate que para ningun punto se cumpla A and B 103 | #Siendo A = En columnas comunes todas tienen el valor de -1 104 | #Siendo B = En columnas dif la suma de los valores mayores que -1 es mayor que 0 105 | newB = np.zeros((1,len(gB.frames))) 106 | newB[:,np.array(indpB)] = 1 107 | A= (np.sum( gB.ObsIdx[:,np.bitwise_not(newB.astype(np.bool)).astype(int)[0]], axis=1) < 0 ) 108 | B= (np.sum( gB.ObsIdx[:,newB[0].astype(np.int)], axis=1) > 0 ) 109 | assert(not np.any(np.bitwise_and(A,B))) 110 | 111 | return merged 112 | 113 | def deleteRepeated(indexsA,indexsB,vA,vB): 114 | #Conseguir indices repetidos en vA,vB 115 | # filterRepetedA = lambda x: x not in uniqueRowsIndexs(vA) 116 | # repeteA = filter(filterRepetedA, range(vA.shape[0])) 117 | 118 | toDelete = [] 119 | for i in range(len(indexsA)): 120 | for j in range(i+1,len(indexsA)): 121 | if np.array_equal(vA[indexsA[i]],vA[indexsA[j]]): 122 | toDelete.append(indexsA[j]) 123 | toDelete = list(set(toDelete)) #Borra repetidos sino tira error. 124 | for e in toDelete: 125 | ind = indexsA.index(e) 126 | indexsA.remove(e) 127 | del indexsB[ind] 128 | assert(np.array_equal(vA[np.array(indexsA)],vB[np.array(indexsB)])) 129 | assert (len(set(indexsA)) == len(indexsA)) 130 | assert (len(set(indexsB)) == len(indexsB)) 131 | assert (len(indexsB) == len(indexsA)) 132 | return indexsA,indexsB 133 | 134 | def uniqueRows(a): 135 | b = np.ascontiguousarray(a).view(np.dtype((np.void, a.dtype.itemsize * a.shape[1]))) 136 | _, idx = np.unique(b, return_index=True) 137 | return a[idx] 138 | 139 | 140 | def uniqueRowsIndexs(a): 141 | b = np.ascontiguousarray(a).view(np.dtype((np.void, a.dtype.itemsize * a.shape[1]))) 142 | _, idx = np.unique(b, return_index=True) 143 | return idx 144 | 145 | 146 | def minus1(shape): 147 | return -1 * np.ones(shape) 148 | 149 | 150 | def setDif(a1, a2): 151 | a1_rows = a1.view([('', a1.dtype)] * a1.shape[1]) 152 | a2_rows = a2.view([('', a2.dtype)] * a2.shape[1]) 153 | return np.setdiff1d(a1_rows, a2_rows).view(a1.dtype).reshape(-1, a1.shape[1]) 154 | 155 | 156 | def findInterIndexA(x, y): 157 | x = x.astype(np.float64) 158 | y = y.astype(np.float64) 159 | return np.nonzero(np.in1d(x.view('d,d').reshape(-1), y.view('d,d').reshape(-1))) 160 | 161 | 162 | def catRt(rt1, rt2): 163 | temp = rt1[:, 0:3] 164 | temp2 = rt1[:, 3].reshape(3, 1) 165 | return np.hstack([np.dot(temp, rt2[:, 0:3]), np.dot(temp, rt2[:, 3]).reshape(3, 1) + temp2]) 166 | 167 | 168 | def invertRt(rt): 169 | temp = np.transpose(rt[0:3, 0:3]) 170 | return np.hstack([temp, np.dot(-temp, rt[0:3, 3]).reshape(3, 1)]) 171 | 172 | 173 | def removeOutlierPts(g, th_pix=10): 174 | sq_th_pix = th_pix * th_pix 175 | td = 2 176 | tincos = np.cos(np.pi * td *1.0 / 180 ) 177 | for i in range(g.ObsIdx.shape[1]): 178 | X = np.dot(g.focal,transformPtsByRt(np.transpose(g.str),g.mot[:,:,i],False)) 179 | xy = X[0:2,:] / X[[2,2],:] 180 | selector = np.where(g.ObsIdx[:,i] != -1)[0] 181 | 182 | dif = xy[:,selector] - np.transpose(g.obsVal[g.ObsIdx[selector,i].astype(np.int)]) 183 | outliers = np.sum(dif*dif,axis=0) > sq_th_pix 184 | cantB = np.sum(outliers) 185 | if cantB > 0: 186 | print "Se borraron ", cantB, " outliers de ", outliers.shape[0], \ 187 | " puntos totales con sq_th_pix de ", sq_th_pix 188 | p2keep = np.ones((1,g.str.shape[0])) 189 | p2keep[:, selector[outliers]] = False 190 | p2keep = p2keep[0].astype(np.bool) 191 | g.str = g.str[p2keep,:] 192 | g.ObsIdx = g.ObsIdx[p2keep,:] 193 | 194 | nF = len(g.frames) 195 | pos = np.zeros((3,nF)) 196 | 197 | for ii in range(nF): 198 | Rt = g.mot[:,:,ii] 199 | pos[:,ii] = - np.dot( np.transpose(Rt[0:3,0:3]), Rt[:,3]) 200 | 201 | view_dirs = np.zeros((g.str.shape[0],3,nF)) 202 | for c in range(g.ObsIdx.shape[1]): 203 | selector = np.where(g.ObsIdx[:,c] != -1)[0] 204 | t=np.repeat(pos[:,c].reshape((1,3)),g.str[selector,:].shape[0],axis=0) 205 | camera_v_d = g.str[selector,:] - t 206 | d_lengh = np.sqrt(np.sum(camera_v_d * camera_v_d,axis=1)) 207 | dt = 1.0 / d_lengh 208 | camera_v_d = camera_v_d * np.transpose(np.vstack([dt,dt,dt])) 209 | view_dirs[selector,:,c] = camera_v_d 210 | for c1 in range(g.ObsIdx.shape[1]): 211 | for c2 in range(g.ObsIdx.shape[1]): 212 | if c1 == c2: 213 | continue 214 | selector = np.where( np.bitwise_and(g.ObsIdx[:,c1] != -1 , g.ObsIdx[:,c2] != -1 ) )[0] 215 | v_d1 = view_dirs[selector,:,c1] 216 | v_d2 = view_dirs[selector,:,c2] 217 | cos_a = np.sum(v_d1 * v_d2,axis=1) 218 | outliers = cos_a > tincos 219 | 220 | cantB = np.sum(outliers) 221 | if cantB > 0: 222 | print "Se borraron ",cantB," outliers de ",outliers.shape[0],\ 223 | " puntos totales con cost_thr de ",tincos 224 | p2keep = np.ones((1, g.str.shape[0])) 225 | p2keep[:,selector[outliers]] = False 226 | p2keep = p2keep[0].astype(np.bool) 227 | g.str = g.str[p2keep, :] 228 | g.ObsIdx = g.ObsIdx[p2keep, :] 229 | 230 | return g 231 | 232 | 233 | 234 | def test3It(): 235 | Originals = [parseStruct(or1), parseStruct(or2), parseStruct(or3), parseStruct(or4)] 236 | 237 | expectedMerged = [parseStruct(st1), parseStruct(st2), parseStruct(st3), parseStruct(st4)] 238 | 239 | orgGraphs = [dictToGraph(g) for g in Originals] 240 | expectedGraphs = [dictToGraph(g) for g in expectedMerged] 241 | 242 | assert (expectedGraphs[0] == orgGraphs[0]) 243 | assert (orgGraphs[0] != orgGraphs[1]) 244 | 245 | graphMerged = orgGraphs[0] 246 | # merge de vistas parciales 247 | for i in range(len(orgGraphs) - 1): 248 | graphMerged = mergeG(graphMerged, orgGraphs[i + 1]) 249 | assert (graphMerged == expectedGraphs[i+1]) 250 | print "TEST MERGE OK" 251 | return Originals 252 | def testOutliers(): 253 | gbefore = dictToGraph(parseStruct(beforeMerge)) 254 | expectedG = dictToGraph(parseStruct(expectedMerge)) 255 | result = removeOutlierPts(gbefore, 10) 256 | assert(result == expectedG) 257 | print "TEST OUTLIERS OK" 258 | pass 259 | 260 | 261 | def dictToGraph(d): 262 | # Transform graph from matlab format to python 263 | graph = tempGraph() 264 | graph.frames = (d['frames'] - 1).tolist()[0] 265 | graph.f = d['f'] 266 | graph.focal = np.eye(3)*graph.f 267 | graph.focal[2,2] = 1 268 | graph.mot = d['Mot'] 269 | graph.str = np.transpose(d['Str']) 270 | graph.obsVal = np.transpose(d['ObsVal']) 271 | graph.ObsIdx = np.transpose(d['ObsIdx']) - 1 272 | 273 | if d.has_key('matches'): 274 | graph.matches = np.transpose(d['matches']) 275 | if d.has_key('denseMatch'): 276 | graph.denseMatch = d['denseMatch'] 277 | graph.denseMatch[0,:] = graph.denseMatch[0,:] + 1 278 | graph.denseMatch[1,:] = graph.denseMatch[1,:] + 1 279 | graph.denseMatch[2,:] = graph.denseMatch[2,:] + 1 280 | graph.denseMatch[3,:] = graph.denseMatch[3,:] + 1 281 | 282 | return graph 283 | 284 | 285 | if __name__ == '__main__': 286 | testOutliers() 287 | a = test3It() 288 | 289 | # import pickle 290 | # 291 | # with open("f.pkl", 'rb') as f: 292 | # graphList = pickle.load(f) 293 | # graphMerged = graphList[0] 294 | # # merge de vistas parciales 295 | # for i in range(len(graphList) - 1): 296 | # graphMerged = mergeG(graphMerged, graphList[i + 1]) 297 | # pass 298 | -------------------------------------------------------------------------------- /utils/mergeGraph.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/utils/mergeGraph.pyc -------------------------------------------------------------------------------- /utils/paresDescript.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import pylab as plt 3 | from matplotlib import pyplot as plt 4 | 5 | from utils.matchSift import matchSIFTdescr 6 | from utils.loadDatacsv import load 7 | sift = cv2.xfeatures2d.SIFT_create(contrastThreshold=0.04) 8 | 9 | import numpy as np 10 | 11 | def drawMatches(img1, kp1, img2, kp2, indicesI,indicesJ): 12 | 13 | # Create a new output image that concatenates the two images together 14 | # (a.k.a) a montage 15 | rows1 = img1.shape[0] 16 | cols1 = img1.shape[1] 17 | rows2 = img2.shape[0] 18 | cols2 = img2.shape[1] 19 | 20 | out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8') 21 | 22 | # Place the first image to the left 23 | out[:rows1,:cols1] = np.dstack([img1, img1, img1]) 24 | 25 | # Place the next image to the right of it 26 | out[:rows2,cols1:] = np.dstack([img2, img2, img2]) 27 | 28 | # For each pair of points we have between both images 29 | # draw circles, then connect a line between them 30 | for i in range(len(indicesI)): 31 | (i,j) = indicesI[i],indicesJ[i] 32 | 33 | # x - columns 34 | # y - rows 35 | if type(kp1[0]) == np.ndarray: 36 | (x1, y1) = kp1[i] 37 | (x2, y2) = kp2[j] 38 | (y1, x1) = kp1[i] 39 | (y2, x2) = kp2[j] 40 | else: 41 | (x1,y1) = kp1[i].pt 42 | (x2,y2) = kp2[j].pt 43 | 44 | cv2.circle(out, (int(x1),int(y1)), 4, (255, 0, 0), 1) 45 | cv2.circle(out, (int(x2)+cols1,int(y2)), 4, (255, 0, 0), 1) 46 | cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (255, 0, 0), 1) 47 | 48 | 49 | # Show the image 50 | cv2.imshow('Matched Features', out) 51 | cv2.waitKey(0) 52 | cv2.destroyWindow('Matched Features') 53 | 54 | # Also return the image if you'd like a copy 55 | return out 56 | 57 | def algoritmo1Match(desA,desB): 58 | #Asegurar que desA, desB tengan formato np float 32 59 | desA= desA.astype(np.float32) 60 | desB = desB.astype(np.float32) 61 | 62 | # BFMatcher with default params 63 | bf = cv2.BFMatcher() 64 | matches = bf.knnMatch(desA, desB, k=2) 65 | 66 | # Apply ratio test 67 | salidaI = [] 68 | salidaJ = [] 69 | for m, n in matches: 70 | if m.distance < 0.75 * n.distance: 71 | salidaI.append(m.queryIdx) 72 | salidaJ.append(m.trainIdx) 73 | return salidaI,salidaJ 74 | 75 | def algoritmo2Match(desA,desB): 76 | indicesI, indicesJ = matchSIFTdescr(desA,desB) 77 | # todo asumo que la distancia entre frames es sirmpre 1 (posible que cambie??) 78 | minNeighboringMatching = 15; 79 | 80 | toleranciaRelajada = [2,0.999, 0.90, 0.8, 0.7] 81 | 82 | # Filtra que sean al menos minNeighboringMatching o error 83 | while (len(indicesI) < minNeighboringMatching): 84 | if len(toleranciaRelajada) == 0: 85 | raise Exception("No se lograron cantidad minima de minNeighboringMatching se lograron ", len(indicesI), 86 | " el minimo es ", minNeighboringMatching) 87 | nuevaTol = toleranciaRelajada.pop() 88 | indicesI, indicesJ = matchSIFTdescr(desA, desB) 89 | print "Muy pocos puntos de match se relajara la tolerancia a", nuevaTol, " habian ", len(indicesI) 90 | print "Se lograron ", len(indicesI), " punto para matchear" 91 | return indicesI,indicesJ 92 | 93 | def getPairSIFT(imageA,imageB,show=False): 94 | 95 | algoritmoMatch = algoritmo2Match 96 | 97 | # calcula descriptores sift 98 | grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY) 99 | kpA, desA = sift.detectAndCompute(grayA, None) 100 | 101 | grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY) 102 | kpB, desB = sift.detectAndCompute(grayB, None) 103 | 104 | print "SIFT ha generado para A,B ",desA.shape[0],desB.shape[0]," descriptores" 105 | 106 | #Codigo de testoe 107 | # kpA,desA = load("data/descriptLocI.csv"),load("data/descI.csv") 108 | # kpB, desB = load("data/descriptLocJ.csv"),load("data/descJ.csv") 109 | 110 | if show: 111 | imga = cv2.drawKeypoints(grayA, kpA) 112 | imgB = cv2.drawKeypoints(grayB, kpB) 113 | cv2.imshow('ImageWindowA', imga) 114 | cv2.imshow('ImageWindowB', imgB) 115 | cv2.waitKey() 116 | 117 | indI,indJ = algoritmoMatch(desA,desB) 118 | 119 | 120 | if type(kpA[0]) != np.ndarray: 121 | rev = lambda l: [l[-1]] + (rev(l[:-1]) if len(l) > 1 else []) 122 | # kpA = np.hstack([[p.pt[0] for p in kpA],[p.pt[1] for p in kpA]]) 123 | # kpB = np.hstack([[p.pt[0] for p in kpB],[p.pt[1] for p in kpB]]) 124 | kpA = [rev(list(p.pt)) for p in kpA] 125 | kpB = [rev(list(p.pt)) for p in kpB] 126 | kpA = np.array(kpA) 127 | kpB = np.array(kpB) 128 | 129 | # todo si llegas a escalarlo recuerda escalar tambien las localizaciones de descriptores 130 | if show: 131 | img3 = drawMatches(grayA, kpA, grayB, kpB, indI,indJ) 132 | 133 | 134 | selectedI = kpA[np.array(indI)] 135 | selectedJ = kpB[np.array(indJ)] 136 | 137 | #Se pasa a cordenadas centradas en centro x,y de imagen 138 | selectedI = -1*(selectedI - (np.array(imageA.shape[0:2])*0.5)) 139 | selectedJ = -1 * (selectedJ - (np.array(imageA.shape[0:2])*0.5)) 140 | 141 | return selectedI,selectedJ 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | -------------------------------------------------------------------------------- /utils/paresDescript.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/utils/paresDescript.pyc -------------------------------------------------------------------------------- /utils/parser.py: -------------------------------------------------------------------------------- 1 | from Tkinter import Tk 2 | import numpy as np 3 | def p(): 4 | t=Tk().clipboard_get() 5 | return pst(t) 6 | def pst(string): 7 | if string.find('[') == -1: 8 | return float(string) 9 | if string.find("*") == -1: 10 | string = string[1:-1] 11 | matrix = [] 12 | for st in string.split(';'): 13 | fila = [] 14 | for elem in st.split(): 15 | fila.append(float(elem)) 16 | matrix.append(fila) 17 | dimX = len(matrix) 18 | dimY = len(matrix[0]) 19 | out = np.zeros((dimX, dimY)) 20 | out = np.array(matrix) 21 | return out 22 | else: 23 | mStr = string.split('*') 24 | listaMatrices = [] 25 | for m in mStr: 26 | m = m[1:-1] 27 | matrix = [] 28 | for st in m.split(';'): 29 | fila = [] 30 | for elem in st.split(): 31 | fila.append(float(elem)) 32 | matrix.append(fila) 33 | listaMatrices.append(matrix) 34 | dim3 = len(listaMatrices) 35 | dimX = len(listaMatrices[0]) 36 | dimY = len(listaMatrices[0][0]) 37 | out = np.zeros((dimX, dimY, dim3)) 38 | for i in range(dim3): 39 | out[:, :, i] = np.array(listaMatrices[i]) 40 | return out 41 | 42 | def cmp(toComp): 43 | return np.array_equal(toComp, p()) 44 | 45 | def cmpT(toComp): 46 | return np.array_equal(np.transpose(toComp), p()) 47 | def cmpC(toComp): 48 | return np.allclose(toComp, p(),rtol=1e-02) 49 | def cmpCT(toComp): 50 | return np.allclose(np.transpose(toComp), p()) 51 | 52 | def parseStruct(stringStr): 53 | newObj = {} 54 | tupes = stringStr.split("///") 55 | 56 | for stringT in tupes: 57 | if stringT == "": 58 | continue 59 | alm = stringT.split(":") 60 | newObj[alm[0]] = pst(alm[1]) 61 | return newObj 62 | 63 | def fullTest(ma1,ma2,tol=1e-02,debug=False): 64 | if ma1.shape != ma2.shape: 65 | return False 66 | t1 = np.array_equal(ma1, ma2) 67 | t2 = np.allclose(ma1, ma2,atol=tol) 68 | assert((not(t1) and not(t2)) or t1 or t2) 69 | if debug: 70 | print "Equals ",t1 71 | print "Close 1e-02 ",t2 72 | print "Max abs(a-n) ",np.max(np.abs(ma1-ma2)) 73 | print "Result ",(t1 or t2) 74 | return (t1 or t2) 75 | def getIndexOfRow(matrix, row): 76 | return np.where(np.all(matrix == row, axis=1)) 77 | def checkIfPerm(matrix1,matrix2): 78 | correspondencia = [getIndexOfRow(row, matrix1) for row in matrix2] 79 | sonEmpty = map(lambda x: len(x[0]), correspondencia) 80 | return reduce(lambda r1,r2 : r1 and r2, sonEmpty) 81 | 82 | #from parser import p,cmp,cmpT,cmpC,cmpCT 83 | """ 84 | Uso 85 | from utils.parser import * 86 | fullTest(p(),X) 87 | Donde se quiere comprobar que el valor copiado desde matlab(conseguido por p()) 88 | es igual a X 89 | """ 90 | 91 | if __name__ == "__main__": 92 | 93 | testStruct = "frames:[1 2]///matches:[104.55908203125 92.0968780517578 38.6970825195313 -24.7012939453125 -33.0030822753906 -35.4376831054688 -40.0213623046875 -112.054016113281 190.609760284424 76.6166076660156 72.4523162841797 23.0695953369141 -4.47067260742188 -13.7249603271484 -19.1539611816406 -34.3994750976563 -34.3994750976563 -40.783203125 -57.9983520507813 -78.2849426269531 -93.5857543945313 -136.96533203125 182.81184387207 116.188339233398 85.9875640869141 85.9875640869141 70.9907989501953 71.5550079345703 12.0646514892578 -5.68087768554688 -33.1934509277344 -81.1534118652344 -81.1534118652344 -6.27102661132813 -60.4061584472656 109.557891845703 12.2635498046875 -4.41680908203125 -4.41680908203125 -46.3473205566406 48.0803833007813 29.5469055175781 24.6024475097656 -21.559326171875;-169.703826904297 -210.972595214844 -214.204833984375 86.4859466552734 174.037139892578 -176.931030273438 -201.952514648438 -113.366912841797 296.570676803589 212.845184326172 134.630432128906 -193.782165527344 -216.070617675781 172.782928466797 149.472457885742 180.580520629883 180.580520629883 204.784965515137 -178.880767822266 -126.283264160156 -158.542205810547 -187.055725097656 301.770374298096 300.114555358887 -164.170379638672 -164.170379638672 168.964645385742 122.728820800781 159.390045166016 244.405693054199 153.297943115234 80.5236663818359 80.5236663818359 137.858520507813 -272.312622070313 -153.662170410156 171.588638305664 216.551078796387 216.551078796387 116.824081420898 147.335647583008 138.101333618164 107.753677368164 166.391799926758;122.296630859375 122.853096008301 77.1413726806641 13.1478881835938 8.95669555664063 -5.11518859863281 0.473953247070313 -108.58642578125 41.9714813232422 105.570739746094 115.571823120117 57.2670745849609 39.31201171875 28.0209655761719 19.4946594238281 6.22746276855469 6.22746276855469 -1.57060241699219 -27.008544921875 -68.9511413574219 -69.8277282714844 -124.752655029297 34.7072143554688 -28.4292602539063 103.559158325195 103.559158325195 110.076217651367 113.026176452637 56.6191864013672 28.5378265380859 4.1578369140625 -51.7969665527344 -51.7969665527344 29.6095123291016 2.35060119628906 120.404357910156 56.6713714599609 32.8003845214844 32.8003845214844 -14.0986633300781 88.6640625 68.3035888671875 63.8470458984375 20.8484649658203;-154.039306640625 -195.681457519531 -206.924194335938 84.9102630615234 174.001586914063 -181.541473388672 -207.793701171875 -128.435668945313 307.746163368225 217.97972869873 140.422317504883 -189.694274902344 -216.906921386719 173.055801391602 150.450836181641 180.296676635742 180.296676635742 205.346969604492 -186.964080810547 -137.448791503906 -171.301025390625 -196.00732421875 312.666220188141 308.584372520447 -150.715698242188 -150.715698242188 174.385787963867 127.80012512207 160.913101196289 245.769630432129 151.675430297852 75.2618408203125 75.2618408203125 136.95751953125 -282.993469238281 -137.334350585938 173.808364868164 217.878456115723 217.878456115723 114.759887695313 150.981353759766 140.516159057617 110.540878295898 166.806503295898]///F:[-1.05410048833945e-08 1.56356056795821e-07 -0.000104727065457066;-1.49334752816059e-06 2.90199408818357e-07 -0.00559477766743047;0.00064672506157313 0.00561546246388563 -0.00420600192824178]///E:[-0.00545756629992145 0.0809527702336744 -0.0753559305686634;-0.773175160614827 0.150249670818307 -4.02569933201115;0.465348366482193 4.0405829924928 -0.00420600192824178]///Rt:[0.980507892602122 -0.0944431127590168 -0.172292689912637 0.99963836870063;0.0976425288006969 0.995169579044828 0.0101708167393987 -0.0186917502406422;0.17049988010069 -0.0267956600241344 0.984993291088585 -0.0193326225454708]///f:719.5459///Mot:[0.999879130339609 0.00880498301125505 -0.0128139371575655 0.0966678885506464;-0.00888685806898615 0.999940369930756 -0.00634667916355482 -0.069978450682811;0.0127572906593925 0.0064597876834239 0.999897756112152 -3.51577617509314]*[0.980636494759034 -0.0848521010283081 -0.176499818973837 0.916986481160674;0.0852828285792061 0.996343432040814 -0.00515796264643823 -0.0706249414988878;0.176292099358551 -0.00999431739610548 0.984287157959272 -3.44237398527039]///Str:[0.166697945999278 0.130788190103594 0.033005790385496 -0.0760660105597759 -0.0866487129237495 -0.0969424742146559 -0.101955131658391 -0.266975060520048 29.679212861703 0.0652305349526345 0.0558478543963418 0.00710883712614587 -0.0415269750760221 -0.0606512159394499 -0.068123540220618 -0.0887717021824449 -0.0887717021824241 -0.0974691534701273 -0.137253997194628 -0.19139502440855 -0.206879391091686 -0.311397147720851 20.8457202791373 8.4806488350039 0.132580558777547 0.132580558941708 0.0541129197148793 0.0564703870321124 -0.0259530906380897 -0.0506403496469039 -0.0880213860654382 -0.163561911443076 -0.163561911443204 -0.0501057704796098 -0.127475006868633 0.186120772473202 -0.0259514343028657 -0.0485894100586125 -0.0485894100662701 -0.108742411428512 0.0239717694729195 -0.000230130418343862 -0.00604526988793999 -0.0711337828450526;-0.219043607438998 -0.271889416021196 -0.266597199966755 0.223256073932415 0.332996741988701 -0.219532526992627 -0.245361461838684 -0.141086502522958 45.7626592115044 0.397720737941238 0.280367625480153 -0.239640075909824 -0.262488285389485 0.331278969805976 0.306992716912077 0.342629366039846 0.342629366044183 0.374478215086936 -0.222618124712463 -0.159960652146311 -0.196979420516426 -0.277472375091828 34.084348228294 21.3228096152714 -0.210584776591545 -0.21058477675396 0.327977547933647 0.26656022641832 0.310906689164935 0.429472074090974 0.3121936540728 0.22112251438638 0.221122514384929 0.293496587131343 -0.321094469117112 -0.20043875278997 0.326335812496437 0.391142066415378 0.391142066397444 0.270388685317677 0.299769438930537 0.290652589824944 0.251080462821295 0.323014996190975;4.88303988000562 4.79358755951129 4.75178312108423 4.55886616603378 4.48655851162813 4.81392506359306 4.74239322569142 5.03455993593737 111.392497007316 4.52197690320794 4.48227407262891 4.77941771448688 4.71985998669983 4.4872479749785 4.51754089931871 4.49047408147902 4.49047408148802 4.48565757088674 4.80993375343906 4.98810763745147 4.85818058083656 5.00672821615129 82.4769476861183 53.3268324286072 4.89260620377052 4.89260620441218 4.48788973639764 4.4979488494814 4.47577948706275 4.49077589436965 4.52374813884181 4.61728275995637 4.61728275995084 4.53942020045568 4.62147342739383 4.94340644887821 4.47043239837595 4.48860400433432 4.48860400429316 4.57675689203552 4.49678835103818 4.51644739823863 4.53127591556444 4.48750468246158]///ObsVal:[104.55908203125 92.0968780517578 38.6970825195313 -24.7012939453125 -33.0030822753906 -35.4376831054688 -40.0213623046875 -112.054016113281 190.609760284424 76.6166076660156 72.4523162841797 23.0695953369141 -4.47067260742188 -13.7249603271484 -19.1539611816406 -34.3994750976563 -34.3994750976563 -40.783203125 -57.9983520507813 -78.2849426269531 -93.5857543945313 -136.96533203125 182.81184387207 116.188339233398 85.9875640869141 85.9875640869141 70.9907989501953 71.5550079345703 12.0646514892578 -5.68087768554688 -33.1934509277344 -81.1534118652344 -81.1534118652344 -6.27102661132813 -60.4061584472656 109.557891845703 12.2635498046875 -4.41680908203125 -4.41680908203125 -46.3473205566406 48.0803833007813 29.5469055175781 24.6024475097656 -21.559326171875 122.296630859375 122.853096008301 77.1413726806641 13.1478881835938 8.95669555664063 -5.11518859863281 0.473953247070313 -108.58642578125 41.9714813232422 105.570739746094 115.571823120117 57.2670745849609 39.31201171875 28.0209655761719 19.4946594238281 6.22746276855469 6.22746276855469 -1.57060241699219 -27.008544921875 -68.9511413574219 -69.8277282714844 -124.752655029297 34.7072143554688 -28.4292602539063 103.559158325195 103.559158325195 110.076217651367 113.026176452637 56.6191864013672 28.5378265380859 4.1578369140625 -51.7969665527344 -51.7969665527344 29.6095123291016 2.35060119628906 120.404357910156 56.6713714599609 32.8003845214844 32.8003845214844 -14.0986633300781 88.6640625 68.3035888671875 63.8470458984375 20.8484649658203;-169.703826904297 -210.972595214844 -214.204833984375 86.4859466552734 174.037139892578 -176.931030273438 -201.952514648438 -113.366912841797 296.570676803589 212.845184326172 134.630432128906 -193.782165527344 -216.070617675781 172.782928466797 149.472457885742 180.580520629883 180.580520629883 204.784965515137 -178.880767822266 -126.283264160156 -158.542205810547 -187.055725097656 301.770374298096 300.114555358887 -164.170379638672 -164.170379638672 168.964645385742 122.728820800781 159.390045166016 244.405693054199 153.297943115234 80.5236663818359 80.5236663818359 137.858520507813 -272.312622070313 -153.662170410156 171.588638305664 216.551078796387 216.551078796387 116.824081420898 147.335647583008 138.101333618164 107.753677368164 166.391799926758 -154.039306640625 -195.681457519531 -206.924194335938 84.9102630615234 174.001586914063 -181.541473388672 -207.793701171875 -128.435668945313 307.746163368225 217.97972869873 140.422317504883 -189.694274902344 -216.906921386719 173.055801391602 150.450836181641 180.296676635742 180.296676635742 205.346969604492 -186.964080810547 -137.448791503906 -171.301025390625 -196.00732421875 312.666220188141 308.584372520447 -150.715698242188 -150.715698242188 174.385787963867 127.80012512207 160.913101196289 245.769630432129 151.675430297852 75.2618408203125 75.2618408203125 136.95751953125 -282.993469238281 -137.334350585938 173.808364868164 217.878456115723 217.878456115723 114.759887695313 150.981353759766 140.516159057617 110.540878295898 166.806503295898]///ObsIdx:[1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44;45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88]///" 94 | t=parseStruct(testStruct) 95 | print "hi" 96 | 97 | pass -------------------------------------------------------------------------------- /utils/parser.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aferral/Structure-from-motion-python/374e2d13e0372ece081cfcd2e96a47664bfb3962/utils/parser.pyc --------------------------------------------------------------------------------