├── .gitignore ├── dataset ├── ground-truth │ ├── .directory │ ├── 00.txt │ ├── 01.txt │ ├── 02.txt │ ├── 03.txt │ ├── 04.txt │ ├── 05.txt │ ├── 06.txt │ ├── 07.txt │ ├── 08.txt │ ├── 09.txt │ └── 10.txt ├── kitti.py ├── kitti_lstm.py └── main_generate_data.py ├── evaluation.sh ├── evaluation ├── __init__.py ├── cpp │ ├── .directory │ ├── evaluate_odometry.cpp │ ├── mail.h │ ├── matrix.cpp │ └── matrix.h ├── plot_loss.py └── plot_main.py ├── main.py ├── main_experiment2.py ├── net ├── cnn.py ├── cnn_increase_kernal_size.py ├── cnn_lstm.py ├── cnn_seperate_conv.py ├── cnn_seperate_conv_1.py └── cnn_tb.py ├── readme.md ├── rnn ├── modules.py └── rnn.py ├── test.sh └── utils ├── misc.py ├── post_process.py └── preprocess.py /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .idea 3 | -------------------------------------------------------------------------------- /dataset/ground-truth/.directory: -------------------------------------------------------------------------------- 1 | [Dolphin] 2 | Timestamp=2017,4,20,17,38,13 3 | Version=3 4 | ViewMode=1 5 | -------------------------------------------------------------------------------- /dataset/ground-truth/04.txt: -------------------------------------------------------------------------------- 1 | 1.000000e+00 1.197625e-11 1.704638e-10 -5.551115e-17 1.197625e-11 1.000000e+00 3.562503e-10 0.000000e+00 1.704638e-10 3.562503e-10 1.000000e+00 2.220446e-16 2 | 9.999996e-01 -9.035185e-04 -2.101169e-04 1.289128e-03 9.037964e-04 9.999987e-01 1.325646e-03 -1.821616e-02 2.089193e-04 -1.325834e-03 9.999991e-01 1.310643e+00 3 | 9.999985e-01 -1.646347e-03 -5.538205e-04 4.448326e-04 1.647375e-03 9.999969e-01 1.859719e-03 -4.203318e-02 5.507574e-04 -1.860627e-03 9.999981e-01 2.625299e+00 4 | 9.999939e-01 -3.413454e-03 -7.312197e-04 2.037581e-03 3.413847e-03 9.999940e-01 5.361920e-04 -6.302801e-02 7.293855e-04 -5.386841e-04 9.999996e-01 3.941730e+00 5 | 9.999932e-01 -3.635348e-03 -6.434003e-04 2.777892e-03 3.635872e-03 9.999930e-01 8.141188e-04 -8.483761e-02 6.404366e-04 -8.164517e-04 9.999995e-01 5.261017e+00 6 | 9.999922e-01 -3.925219e-03 -5.162240e-04 4.157928e-03 3.926062e-03 9.999909e-01 1.640787e-03 -1.014678e-01 5.097792e-04 -1.642800e-03 9.999985e-01 6.581638e+00 7 | 9.999848e-01 -5.504052e-03 -4.169862e-04 7.571022e-03 5.505158e-03 9.999812e-01 2.694454e-03 -1.165874e-01 4.021483e-04 -2.696707e-03 9.999963e-01 7.908775e+00 8 | 9.999864e-01 -5.184552e-03 -5.975596e-04 8.323818e-03 5.186662e-03 9.999801e-01 3.583038e-03 -1.328035e-01 5.789716e-04 -3.586087e-03 9.999934e-01 9.238962e+00 9 | 9.999889e-01 -4.630870e-03 -8.604706e-04 7.728339e-03 4.632472e-03 9.999875e-01 1.867105e-03 -1.497649e-01 8.518139e-04 -1.871070e-03 9.999979e-01 1.056963e+01 10 | 9.999977e-01 -1.900807e-03 -9.601046e-04 6.780918e-03 1.900055e-03 9.999979e-01 -7.842219e-04 -1.634547e-01 9.615936e-04 7.823964e-04 9.999992e-01 1.190426e+01 11 | 9.999986e-01 1.543588e-03 -6.218670e-04 3.419395e-03 -1.544659e-03 9.999973e-01 -1.724973e-03 -1.767480e-01 6.192031e-04 1.725931e-03 9.999983e-01 1.324208e+01 12 | 9.999920e-01 3.962554e-03 -5.304797e-04 -1.862182e-03 -3.963388e-03 9.999909e-01 -1.579139e-03 -1.959202e-01 5.242178e-04 1.581229e-03 9.999986e-01 1.458048e+01 13 | 9.999865e-01 5.189813e-03 -3.869434e-04 -5.851755e-03 -5.190150e-03 9.999861e-01 -8.728856e-04 -2.177602e-01 3.824083e-04 8.748826e-04 9.999996e-01 1.593066e+01 14 | 9.999955e-01 2.993090e-03 2.876761e-05 -4.267715e-03 -2.993137e-03 9.999939e-01 1.769063e-03 -2.364140e-01 -2.347214e-05 -1.769141e-03 9.999984e-01 1.728222e+01 15 | 9.999996e-01 -8.993856e-04 2.524904e-04 4.827890e-04 8.983692e-04 9.999916e-01 3.998216e-03 -2.537475e-01 -2.560839e-04 -3.997986e-03 9.999920e-01 1.863469e+01 16 | 9.999934e-01 -3.638641e-03 -3.959215e-05 5.006086e-03 3.638708e-03 9.999917e-01 1.831392e-03 -2.699101e-01 3.292839e-05 -1.831523e-03 9.999983e-01 1.998670e+01 17 | 9.999869e-01 -5.119262e-03 -2.320356e-04 4.134686e-03 5.119134e-03 9.999867e-01 -5.539281e-04 -2.931092e-01 2.348686e-04 5.527336e-04 9.999998e-01 2.134127e+01 18 | 9.999959e-01 -2.848774e-03 4.262615e-04 -7.654745e-04 2.849650e-03 9.999938e-01 -2.066802e-03 -3.222297e-01 -4.203707e-04 2.068008e-03 9.999978e-01 2.270575e+01 19 | 9.999992e-01 1.424853e-04 1.245425e-03 -7.369324e-03 -1.415313e-04 9.999997e-01 -7.660729e-04 -3.468119e-01 -1.245534e-03 7.658967e-04 9.999989e-01 2.407210e+01 20 | 9.999971e-01 1.440634e-03 1.942058e-03 -9.719264e-03 -1.451156e-03 9.999842e-01 5.427244e-03 -3.687424e-01 -1.934208e-03 -5.430044e-03 9.999834e-01 2.544868e+01 21 | 9.999966e-01 -1.050891e-03 2.369238e-03 -5.061181e-03 1.037930e-03 9.999845e-01 5.465290e-03 -3.871399e-01 -2.374945e-03 -5.462811e-03 9.999823e-01 2.682030e+01 22 | 9.999839e-01 -5.339007e-03 1.951315e-03 1.962273e-03 5.331392e-03 9.999782e-01 3.887907e-03 -4.099385e-01 -1.972030e-03 -3.877439e-03 9.999905e-01 2.819472e+01 23 | 9.999804e-01 -5.933229e-03 1.984892e-03 5.281593e-03 5.930623e-03 9.999815e-01 1.317193e-03 -4.328977e-01 -1.992671e-03 -1.305395e-03 9.999972e-01 2.957012e+01 24 | 9.999781e-01 -6.177289e-03 2.396696e-03 7.939024e-03 6.179643e-03 9.999804e-01 -9.754601e-04 -4.591179e-01 -2.390623e-03 9.902499e-04 9.999967e-01 3.094984e+01 25 | 9.999909e-01 -2.864263e-03 3.173275e-03 6.827076e-03 2.868672e-03 9.999949e-01 -1.385713e-03 -4.850133e-01 -3.169290e-03 1.394804e-03 9.999940e-01 3.233531e+01 26 | 9.999918e-01 -1.916604e-03 3.569832e-03 8.305167e-03 1.929098e-03 9.999920e-01 -3.499497e-03 -5.122367e-01 -3.563096e-03 3.506355e-03 9.999875e-01 3.371780e+01 27 | 9.999934e-01 -1.021449e-03 3.498427e-03 1.017340e-02 1.035312e-03 9.999916e-01 -3.963092e-03 -5.431160e-01 -3.494350e-03 3.966688e-03 9.999860e-01 3.511083e+01 28 | 9.999937e-01 -9.562966e-04 3.408962e-03 1.211940e-02 9.619574e-04 9.999981e-01 -1.659242e-03 -5.746233e-01 -3.407369e-03 1.662511e-03 9.999928e-01 3.650789e+01 29 | 9.999955e-01 -1.889567e-04 2.986144e-03 1.511478e-02 1.891381e-04 1.000000e+00 -6.045352e-05 -5.999006e-01 -2.986132e-03 6.101873e-05 9.999955e-01 3.790589e+01 30 | 9.999925e-01 2.514536e-03 2.962671e-03 1.696851e-02 -2.513425e-03 9.999967e-01 -3.789502e-04 -6.238526e-01 -2.963614e-03 3.715016e-04 9.999955e-01 3.930156e+01 31 | 9.999824e-01 5.051551e-03 3.101410e-03 1.942949e-02 -5.049835e-03 9.999871e-01 -5.613031e-04 -6.437120e-01 -3.104205e-03 5.456323e-04 9.999950e-01 4.070437e+01 32 | 9.999692e-01 7.203652e-03 3.111047e-03 1.916344e-02 -7.203531e-03 9.999740e-01 -5.033623e-05 -6.707246e-01 -3.111328e-03 2.792485e-05 9.999952e-01 4.211100e+01 33 | 9.999615e-01 8.308311e-03 2.843621e-03 2.134431e-02 -8.308996e-03 9.999654e-01 2.285387e-04 -6.941320e-01 -2.841624e-03 -2.521567e-04 9.999959e-01 4.351858e+01 34 | 9.999707e-01 7.259716e-03 2.412259e-03 2.622364e-02 -7.259297e-03 9.999736e-01 -1.830541e-04 -7.178581e-01 -2.413524e-03 1.655381e-04 9.999971e-01 4.492821e+01 35 | 9.999779e-01 6.426238e-03 1.720933e-03 3.086036e-02 -6.424858e-03 9.999790e-01 -8.074730e-04 -7.390250e-01 -1.726085e-03 7.963989e-04 9.999982e-01 4.634127e+01 36 | 9.999829e-01 5.800169e-03 7.616191e-04 3.408031e-02 -5.800122e-03 9.999832e-01 -6.598169e-05 -7.610315e-01 -7.619887e-04 6.156377e-05 9.999997e-01 4.775704e+01 37 | 9.999819e-01 6.010565e-03 -2.411952e-04 3.359221e-02 -6.010374e-03 9.999816e-01 7.926521e-04 -7.839856e-01 2.459554e-04 -7.911873e-04 9.999997e-01 4.917963e+01 38 | 9.999771e-01 6.648910e-03 -1.303817e-03 3.175532e-02 -6.647540e-03 9.999773e-01 1.052800e-03 -8.056350e-01 1.310788e-03 -1.044108e-03 9.999986e-01 5.060218e+01 39 | 9.999749e-01 6.638251e-03 -2.479480e-03 2.895795e-02 -6.636601e-03 9.999777e-01 6.739965e-04 -8.265408e-01 2.483900e-03 -6.575234e-04 9.999967e-01 5.202831e+01 40 | 9.999699e-01 6.882776e-03 -3.588301e-03 2.536977e-02 -6.875797e-03 9.999744e-01 1.954123e-03 -8.453836e-01 3.601659e-03 -1.929391e-03 9.999917e-01 5.345716e+01 41 | 9.999753e-01 5.497092e-03 -4.387406e-03 2.082938e-02 -5.484306e-03 9.999807e-01 2.921095e-03 -8.668316e-01 4.403379e-03 -2.896960e-03 9.999861e-01 5.488464e+01 42 | 9.999794e-01 4.003115e-03 -5.028578e-03 1.680870e-02 -3.988904e-03 9.999880e-01 2.833038e-03 -8.868800e-01 5.039859e-03 -2.812920e-03 9.999834e-01 5.631660e+01 43 | 9.999825e-01 1.719377e-03 -5.654443e-03 1.322910e-02 -1.705846e-03 9.999956e-01 2.396848e-03 -9.072442e-01 5.658540e-03 -2.387160e-03 9.999812e-01 5.774550e+01 44 | 9.999835e-01 3.749396e-04 -5.730769e-03 1.007535e-02 -3.587007e-04 9.999959e-01 2.834406e-03 -9.249469e-01 5.731809e-03 -2.832302e-03 9.999796e-01 5.917365e+01 45 | 9.999856e-01 -8.364906e-04 -5.300789e-03 6.524255e-03 8.549023e-04 9.999936e-01 3.472042e-03 -9.446035e-01 5.297852e-03 -3.476522e-03 9.999799e-01 6.060318e+01 46 | 9.999862e-01 -1.858791e-03 -4.917931e-03 2.764210e-03 1.878972e-03 9.999898e-01 4.102186e-03 -9.647484e-01 4.910257e-03 -4.111369e-03 9.999795e-01 6.202795e+01 47 | 9.999827e-01 -3.792549e-03 -4.488790e-03 -1.308243e-03 3.809033e-03 9.999860e-01 3.669129e-03 -9.878100e-01 4.474812e-03 -3.686163e-03 9.999832e-01 6.345198e+01 48 | 9.999780e-01 -5.296196e-03 -4.008198e-03 -2.873604e-03 5.307526e-03 9.999819e-01 2.820928e-03 -1.008175e+00 3.993186e-03 -2.842138e-03 9.999880e-01 6.487187e+01 49 | 9.999807e-01 -5.220464e-03 -3.359595e-03 -5.953934e-03 5.228046e-03 9.999838e-01 2.251723e-03 -1.029700e+00 3.347786e-03 -2.269242e-03 9.999918e-01 6.628940e+01 50 | 9.999907e-01 -3.482462e-03 -2.553135e-03 -1.002031e-02 3.488378e-03 9.999912e-01 2.316104e-03 -1.050203e+00 2.545047e-03 -2.324987e-03 9.999941e-01 6.770311e+01 51 | 9.999977e-01 -6.481361e-04 -2.068772e-03 -1.936440e-02 6.556852e-04 9.999931e-01 3.650429e-03 -1.076075e+00 2.066392e-03 -3.651776e-03 9.999912e-01 6.911593e+01 52 | 9.999946e-01 3.012865e-03 -1.287971e-03 -2.702275e-02 -3.006442e-03 9.999832e-01 4.960905e-03 -1.097831e+00 1.302897e-03 -4.957004e-03 9.999869e-01 7.053000e+01 53 | 9.999777e-01 6.659933e-03 -5.287413e-04 -3.513442e-02 -6.656920e-03 9.999626e-01 5.510824e-03 -1.118231e+00 5.654236e-04 -5.507180e-03 9.999847e-01 7.193978e+01 54 | 9.999378e-01 1.115147e-02 1.358833e-04 -4.214650e-02 -1.115202e-02 9.999247e-01 5.112813e-03 -1.135987e+00 -7.885743e-05 -5.114008e-03 9.999869e-01 7.333986e+01 55 | 9.998986e-01 1.421691e-02 8.503240e-04 -4.677589e-02 -1.422070e-02 9.998882e-01 4.621051e-03 -1.153311e+00 -7.845315e-04 -4.632673e-03 9.999890e-01 7.474245e+01 56 | 9.998649e-01 1.636598e-02 1.544958e-03 -5.327678e-02 -1.637256e-02 9.998565e-01 4.344501e-03 -1.177226e+00 -1.473634e-03 -4.369207e-03 9.999894e-01 7.614450e+01 57 | 9.998598e-01 1.663766e-02 1.883621e-03 -5.458511e-02 -1.664595e-02 9.998514e-01 4.472345e-03 -1.197942e+00 -1.808931e-03 -4.503071e-03 9.999882e-01 7.754079e+01 58 | 9.998793e-01 1.540192e-02 2.040510e-03 -5.287325e-02 -1.541125e-02 9.998705e-01 4.635585e-03 -1.218035e+00 -1.968848e-03 -4.666470e-03 9.999872e-01 7.893496e+01 59 | 9.998967e-01 1.423630e-02 1.959369e-03 -4.954250e-02 -1.424345e-02 9.998917e-01 3.684603e-03 -1.236655e+00 -1.906701e-03 -3.712129e-03 9.999913e-01 8.032545e+01 60 | 9.999076e-01 1.348987e-02 1.677215e-03 -4.883212e-02 -1.349578e-02 9.999026e-01 3.560725e-03 -1.258287e+00 -1.629018e-03 -3.583030e-03 9.999923e-01 8.171095e+01 61 | 9.999074e-01 1.353416e-02 1.448463e-03 -4.838807e-02 -1.353963e-02 9.999010e-01 3.833191e-03 -1.278676e+00 -1.396440e-03 -3.852447e-03 9.999916e-01 8.309721e+01 62 | 9.999140e-01 1.308010e-02 9.085344e-04 -5.269073e-02 -1.308305e-02 9.999089e-01 3.317811e-03 -1.300031e+00 -8.650540e-04 -3.329411e-03 9.999941e-01 8.448367e+01 63 | 9.999097e-01 1.343478e-02 1.870473e-04 -6.072004e-02 -1.343539e-02 9.999029e-01 3.681202e-03 -1.320245e+00 -1.375727e-04 -3.683382e-03 9.999932e-01 8.586562e+01 64 | 9.999145e-01 1.305409e-02 -7.454492e-04 -7.518042e-02 -1.305069e-02 9.999051e-01 4.397269e-03 -1.343695e+00 8.027813e-04 -4.387163e-03 9.999901e-01 8.725015e+01 65 | 9.999385e-01 1.093064e-02 -1.877975e-03 -8.437620e-02 -1.092024e-02 9.999254e-01 5.458590e-03 -1.362982e+00 1.937501e-03 -5.437745e-03 9.999833e-01 8.862909e+01 66 | 9.999566e-01 8.790731e-03 -3.086647e-03 -9.813229e-02 -8.771544e-03 9.999424e-01 6.176185e-03 -1.382640e+00 3.140763e-03 -6.148840e-03 9.999762e-01 9.000634e+01 67 | 9.999700e-01 6.803260e-03 -3.711637e-03 -1.110475e-01 -6.779632e-03 9.999569e-01 6.342125e-03 -1.399781e+00 3.754624e-03 -6.316769e-03 9.999730e-01 9.137524e+01 68 | 9.999761e-01 5.509752e-03 -4.188780e-03 -1.283351e-01 -5.485989e-03 9.999689e-01 5.663659e-03 -1.418059e+00 4.219856e-03 -5.640542e-03 9.999752e-01 9.274128e+01 69 | 9.999809e-01 4.282217e-03 -4.468492e-03 -1.454603e-01 -4.259218e-03 9.999777e-01 5.143990e-03 -1.435964e+00 4.490421e-03 -5.124857e-03 9.999768e-01 9.410339e+01 70 | 9.999772e-01 4.941455e-03 -4.604768e-03 -1.697048e-01 -4.919521e-03 9.999765e-01 4.762664e-03 -1.456190e+00 4.628195e-03 -4.739901e-03 9.999781e-01 9.546076e+01 71 | 9.999787e-01 4.738575e-03 -4.487410e-03 -1.913409e-01 -4.720319e-03 9.999805e-01 4.070437e-03 -1.476213e+00 4.506611e-03 -4.049167e-03 9.999817e-01 9.681382e+01 72 | 9.999761e-01 5.462031e-03 -4.246260e-03 -2.120314e-01 -5.447632e-03 9.999794e-01 3.395396e-03 -1.496478e+00 4.264719e-03 -3.372181e-03 9.999852e-01 9.816545e+01 73 | 9.999676e-01 7.060840e-03 -3.876949e-03 -2.344056e-01 -7.047185e-03 9.999689e-01 3.524911e-03 -1.517683e+00 3.901718e-03 -3.497473e-03 9.999863e-01 9.951513e+01 74 | 9.999651e-01 7.529260e-03 -3.627207e-03 -2.485715e-01 -7.511447e-03 9.999598e-01 4.900143e-03 -1.534999e+00 3.663956e-03 -4.872725e-03 9.999814e-01 1.008603e+02 75 | 9.999550e-01 8.833811e-03 -3.458298e-03 -2.694021e-01 -8.811343e-03 9.999403e-01 6.459623e-03 -1.556018e+00 3.515155e-03 -6.428858e-03 9.999732e-01 1.022054e+02 76 | 9.999597e-01 8.302254e-03 -3.413289e-03 -2.867601e-01 -8.284616e-03 9.999524e-01 5.149958e-03 -1.575247e+00 3.455884e-03 -5.121471e-03 9.999809e-01 1.035459e+02 77 | 9.999610e-01 8.173871e-03 -3.338205e-03 -3.053256e-01 -8.165524e-03 9.999635e-01 2.507096e-03 -1.596046e+00 3.358576e-03 -2.479739e-03 9.999913e-01 1.048845e+02 78 | 9.999651e-01 7.633274e-03 -3.387356e-03 -3.153955e-01 -7.628806e-03 9.999700e-01 1.330447e-03 -1.613299e+00 3.397411e-03 -1.304558e-03 9.999934e-01 1.062208e+02 79 | 9.999796e-01 5.504928e-03 -3.239048e-03 -3.343086e-01 -5.496405e-03 9.999814e-01 2.634819e-03 -1.638432e+00 3.253493e-03 -2.616961e-03 9.999913e-01 1.075653e+02 80 | 9.999916e-01 3.142105e-03 -2.625883e-03 -3.508131e-01 -3.129622e-03 9.999838e-01 4.744784e-03 -1.661275e+00 2.640749e-03 -4.736525e-03 9.999853e-01 1.089064e+02 81 | 9.999973e-01 1.561465e-03 -1.709519e-03 -3.737761e-01 -1.552202e-03 9.999842e-01 5.406523e-03 -1.686739e+00 1.717934e-03 -5.403854e-03 9.999839e-01 1.102446e+02 82 | 9.999993e-01 6.761649e-04 -1.000188e-03 -3.943149e-01 -6.721988e-04 9.999919e-01 3.960469e-03 -1.710102e+00 1.002858e-03 -3.959792e-03 9.999917e-01 1.115766e+02 83 | 9.999966e-01 2.581200e-03 -3.337541e-04 -4.137129e-01 -2.580678e-03 9.999954e-01 1.556832e-03 -1.733997e+00 3.377714e-04 -1.555964e-03 9.999987e-01 1.129050e+02 84 | 9.999895e-01 4.532670e-03 7.482827e-04 -4.219683e-01 -4.532188e-03 9.999895e-01 -6.455749e-04 -1.753986e+00 -7.512008e-04 6.421774e-04 9.999995e-01 1.142286e+02 85 | 9.999690e-01 7.570197e-03 2.161282e-03 -4.295899e-01 -7.568322e-03 9.999710e-01 -8.754176e-04 -1.775004e+00 -2.167846e-03 8.590337e-04 9.999973e-01 1.155601e+02 86 | 9.999531e-01 8.996724e-03 3.596945e-03 -4.334779e-01 -9.001425e-03 9.999586e-01 1.292099e-03 -1.794230e+00 -3.585171e-03 -1.324415e-03 9.999927e-01 1.168930e+02 87 | 9.999479e-01 9.079888e-03 4.667313e-03 -4.334151e-01 -9.097321e-03 9.999516e-01 3.727156e-03 -1.811794e+00 -4.633246e-03 -3.769420e-03 9.999822e-01 1.182274e+02 88 | 9.999525e-01 8.077879e-03 5.449540e-03 -4.306137e-01 -8.102033e-03 9.999574e-01 4.424713e-03 -1.825847e+00 -5.413565e-03 -4.468654e-03 9.999754e-01 1.195613e+02 89 | 9.999422e-01 8.587162e-03 6.468939e-03 -4.334146e-01 -8.611020e-03 9.999562e-01 3.668979e-03 -1.846322e+00 -6.437149e-03 -3.724469e-03 9.999724e-01 1.209045e+02 90 | 9.999167e-01 1.035031e-02 7.709247e-03 -4.472932e-01 -1.037511e-02 9.999411e-01 3.183679e-03 -1.880929e+00 -7.675841e-03 -3.263396e-03 9.999652e-01 1.222684e+02 91 | 9.998842e-01 1.238024e-02 8.849720e-03 -4.560744e-01 -1.240543e-02 9.999191e-01 2.797299e-03 -1.910030e+00 -8.814373e-03 -2.906759e-03 9.999569e-01 1.236242e+02 92 | 9.998641e-01 1.313728e-02 9.963119e-03 -4.622549e-01 -1.315502e-02 9.999120e-01 1.716443e-03 -1.940325e+00 -9.939693e-03 -1.847274e-03 9.999489e-01 1.249835e+02 93 | 9.998377e-01 1.423201e-02 1.105094e-02 -4.641329e-01 -1.424547e-02 9.998979e-01 1.139845e-03 -1.966295e+00 -1.103359e-02 -1.297085e-03 9.999383e-01 1.263371e+02 94 | 9.998388e-01 1.339481e-02 1.195754e-02 -4.708495e-01 -1.340614e-02 9.999097e-01 8.676732e-04 -2.006026e+00 -1.194484e-02 -1.027837e-03 9.999281e-01 1.276925e+02 95 | 9.998362e-01 1.286455e-02 1.272995e-02 -4.718094e-01 -1.288658e-02 9.999156e-01 1.649653e-03 -2.038343e+00 -1.270765e-02 -1.813427e-03 9.999176e-01 1.290496e+02 96 | 9.998371e-01 1.188958e-02 1.357740e-02 -4.742800e-01 -1.193361e-02 9.999238e-01 3.166744e-03 -2.073189e+00 -1.353872e-02 -3.328255e-03 9.999028e-01 1.304070e+02 97 | 9.998441e-01 1.089170e-02 1.389623e-02 -4.701254e-01 -1.093766e-02 9.999349e-01 3.235384e-03 -2.099489e+00 -1.386008e-02 -3.386870e-03 9.998982e-01 1.317557e+02 98 | 9.998649e-01 8.812545e-03 1.387257e-02 -4.656937e-01 -8.863631e-03 9.999541e-01 3.625222e-03 -2.127702e+00 -1.383999e-02 -3.747692e-03 9.998972e-01 1.331097e+02 99 | 9.998827e-01 7.142773e-03 1.354834e-02 -4.651028e-01 -7.199653e-03 9.999654e-01 4.154062e-03 -2.162932e+00 -1.351820e-02 -4.251117e-03 9.998996e-01 1.344614e+02 100 | 9.998996e-01 5.636614e-03 1.300499e-02 -4.661197e-01 -5.686390e-03 9.999766e-01 3.793553e-03 -2.198388e+00 -1.298330e-02 -3.867122e-03 9.999082e-01 1.358146e+02 101 | 9.999228e-01 4.048643e-03 1.174935e-02 -4.644953e-01 -4.092637e-03 9.999847e-01 3.722651e-03 -2.226309e+00 -1.173410e-02 -3.770448e-03 9.999241e-01 1.371606e+02 102 | 9.999391e-01 2.791727e-03 1.067837e-02 -4.645723e-01 -2.842435e-03 9.999847e-01 4.736388e-03 -2.254854e+00 -1.066498e-02 -4.766451e-03 9.999318e-01 1.385124e+02 103 | 9.999526e-01 1.228059e-03 9.656581e-03 -4.641807e-01 -1.276893e-03 9.999864e-01 5.052512e-03 -2.281749e+00 -9.650245e-03 -5.064602e-03 9.999406e-01 1.398626e+02 104 | 9.999650e-01 4.853852e-04 8.355191e-03 -4.674418e-01 -5.268857e-04 9.999875e-01 4.965534e-03 -2.314866e+00 -8.352676e-03 -4.969760e-03 9.999528e-01 1.412129e+02 105 | 9.999748e-01 2.292746e-04 7.102144e-03 -4.704574e-01 -2.612741e-04 9.999898e-01 4.505001e-03 -2.341886e+00 -7.101039e-03 -4.506741e-03 9.999646e-01 1.425622e+02 106 | 9.999816e-01 -1.461826e-04 6.061113e-03 -4.747020e-01 1.219015e-04 9.999919e-01 4.006239e-03 -2.367455e+00 -6.061650e-03 -4.005425e-03 9.999736e-01 1.439099e+02 107 | 9.999856e-01 -1.774247e-03 5.074856e-03 -4.789013e-01 1.762936e-03 9.999959e-01 2.232536e-03 -2.395024e+00 -5.078797e-03 -2.223556e-03 9.999846e-01 1.452578e+02 108 | 9.999656e-01 -7.298614e-03 3.947404e-03 -4.813925e-01 7.295542e-03 9.999730e-01 7.925721e-04 -2.426593e+00 -3.953082e-03 -7.637455e-04 9.999919e-01 1.466011e+02 109 | 9.998967e-01 -1.409568e-02 2.802842e-03 -4.844150e-01 1.408553e-02 9.998943e-01 3.608253e-03 -2.462969e+00 -2.853406e-03 -3.568400e-03 9.999896e-01 1.479538e+02 110 | 9.997630e-01 -2.173080e-02 1.320181e-03 -4.832843e-01 2.172460e-02 9.997536e-01 4.543876e-03 -2.489366e+00 -1.418598e-03 -4.514118e-03 9.999888e-01 1.493030e+02 111 | 9.996862e-01 -2.504947e-02 1.332437e-04 -4.887986e-01 2.504914e-02 9.996840e-01 2.113372e-03 -2.518617e+00 -1.861401e-04 -2.109370e-03 9.999978e-01 1.506481e+02 112 | 9.996841e-01 -2.513252e-02 3.807422e-04 -4.979854e-01 2.513320e-02 9.996823e-01 -1.884703e-03 -2.545628e+00 -3.332535e-04 1.893677e-03 9.999982e-01 1.519917e+02 113 | 9.997662e-01 -2.158749e-02 1.248822e-03 -5.139897e-01 2.159192e-02 9.997602e-01 -3.645129e-03 -2.581792e+00 -1.169833e-03 3.671241e-03 9.999926e-01 1.533373e+02 114 | 9.997705e-01 -2.133187e-02 2.001939e-03 -5.203405e-01 2.133904e-02 9.997657e-01 -3.630061e-03 -2.611885e+00 -1.924034e-03 3.671947e-03 9.999914e-01 1.546843e+02 115 | 9.997071e-01 -2.405938e-02 2.609289e-03 -5.188374e-01 2.406072e-02 9.997104e-01 -4.795238e-04 -2.641449e+00 -2.596996e-03 5.421654e-04 9.999965e-01 1.560329e+02 116 | 9.995597e-01 -2.951217e-02 3.081805e-03 -5.104870e-01 2.950365e-02 9.995608e-01 2.776577e-03 -2.666163e+00 -3.162394e-03 -2.684429e-03 9.999914e-01 1.573829e+02 117 | 9.994284e-01 -3.366699e-02 3.070718e-03 -4.996047e-01 3.365623e-02 9.994273e-01 3.494311e-03 -2.689150e+00 -3.186602e-03 -3.388964e-03 9.999892e-01 1.587319e+02 118 | 9.993665e-01 -3.546242e-02 3.006804e-03 -4.908894e-01 3.545071e-02 9.993639e-01 3.864764e-03 -2.711383e+00 -3.141945e-03 -3.755721e-03 9.999880e-01 1.600786e+02 119 | 9.994176e-01 -3.391111e-02 3.821913e-03 -4.848759e-01 3.389483e-02 9.994163e-01 4.248695e-03 -2.736780e+00 -3.963760e-03 -4.116675e-03 9.999837e-01 1.614290e+02 120 | 9.995379e-01 -2.997704e-02 5.043696e-03 -4.836516e-01 2.995749e-02 9.995435e-01 3.909706e-03 -2.760604e+00 -5.158595e-03 -3.756801e-03 9.999796e-01 1.627788e+02 121 | 9.996297e-01 -2.654347e-02 5.992326e-03 -4.767803e-01 2.653482e-02 9.996467e-01 1.518786e-03 -2.787254e+00 -6.030523e-03 -1.359217e-03 9.999809e-01 1.641295e+02 122 | 9.996963e-01 -2.370307e-02 6.750227e-03 -4.687840e-01 2.369904e-02 9.997189e-01 6.776078e-04 -2.810667e+00 -6.764391e-03 -5.174273e-04 9.999770e-01 1.654807e+02 123 | 9.997390e-01 -2.170098e-02 7.145421e-03 -4.599264e-01 2.168704e-02 9.997627e-01 2.022974e-03 -2.835558e+00 -7.187626e-03 -1.867482e-03 9.999724e-01 1.668390e+02 124 | 9.997748e-01 -2.006189e-02 6.919389e-03 -4.494840e-01 2.004972e-02 9.997973e-01 1.825343e-03 -2.861986e+00 -6.954607e-03 -1.686199e-03 9.999744e-01 1.682000e+02 125 | 9.998149e-01 -1.813299e-02 6.434412e-03 -4.424438e-01 1.813719e-02 9.998353e-01 -5.943430e-04 -2.886560e+00 -6.422575e-03 7.109357e-04 9.999791e-01 1.695602e+02 126 | 9.998753e-01 -1.463466e-02 5.933089e-03 -4.389717e-01 1.465566e-02 9.998864e-01 -3.510450e-03 -2.914240e+00 -5.881041e-03 3.596966e-03 9.999762e-01 1.709198e+02 127 | 9.999362e-01 -9.733294e-03 5.740251e-03 -4.111543e-01 9.761015e-03 9.999407e-01 -4.820709e-03 -2.932731e+00 -5.692989e-03 4.876432e-03 9.999719e-01 1.722841e+02 128 | 9.999686e-01 -5.143146e-03 6.037007e-03 -3.881120e-01 5.167231e-03 9.999787e-01 -3.980535e-03 -2.960771e+00 -6.016406e-03 4.011605e-03 9.999739e-01 1.736528e+02 129 | 9.999768e-01 -1.828108e-03 6.567234e-03 -3.779301e-01 1.838035e-03 9.999972e-01 -1.505773e-03 -2.986448e+00 -6.564463e-03 1.517810e-03 9.999773e-01 1.750291e+02 130 | 9.999765e-01 -1.477610e-03 6.696021e-03 -3.588782e-01 1.474495e-03 9.999988e-01 4.701385e-04 -3.008539e+00 -6.696708e-03 -4.602534e-04 9.999775e-01 1.764086e+02 131 | 9.999731e-01 -4.339593e-03 5.909490e-03 -3.405308e-01 4.328926e-03 9.999890e-01 1.816912e-03 -3.013913e+00 -5.917309e-03 -1.791280e-03 9.999809e-01 1.777852e+02 132 | 9.999673e-01 -6.892799e-03 4.231974e-03 -3.214042e-01 6.885778e-03 9.999749e-01 1.671888e-03 -3.022953e+00 -4.243392e-03 -1.642692e-03 9.999897e-01 1.791607e+02 133 | 9.999709e-01 -7.309297e-03 2.170147e-03 -3.136545e-01 7.314585e-03 9.999703e-01 -2.437950e-03 -3.041123e+00 -2.152263e-03 2.453753e-03 9.999947e-01 1.805420e+02 134 | 9.999803e-01 -6.249158e-03 6.762724e-04 -3.089995e-01 6.253708e-03 9.999563e-01 -6.947885e-03 -3.062100e+00 -6.328241e-04 6.951976e-03 9.999756e-01 1.819214e+02 135 | 9.999989e-01 -1.472010e-03 7.625144e-05 -3.140564e-01 1.472565e-03 9.999679e-01 -7.868715e-03 -3.088158e+00 -6.466581e-05 7.868819e-03 9.999690e-01 1.833098e+02 136 | 9.999966e-01 2.593231e-03 -4.985571e-05 -3.186558e-01 -2.593500e-03 9.999756e-01 -6.476319e-03 -3.114466e+00 3.306026e-05 6.476426e-03 9.999790e-01 1.847039e+02 137 | 9.999907e-01 4.303533e-03 2.334260e-04 -3.141305e-01 -4.302714e-03 9.999849e-01 -3.405586e-03 -3.137555e+00 -2.480782e-04 3.404550e-03 9.999942e-01 1.860994e+02 138 | 9.999868e-01 5.091028e-03 6.512883e-04 -3.087277e-01 -5.090224e-03 9.999863e-01 -1.231497e-03 -3.164478e+00 -6.575487e-04 1.228166e-03 9.999990e-01 1.874988e+02 139 | 9.999898e-01 4.448722e-03 7.330863e-04 -3.089327e-01 -4.448418e-03 9.999900e-01 -4.171905e-04 -3.193945e+00 -7.349346e-04 4.139258e-04 9.999997e-01 1.889003e+02 140 | 9.999954e-01 2.954010e-03 6.755492e-04 -3.070211e-01 -2.954257e-03 9.999955e-01 3.634327e-04 -3.221858e+00 -6.744723e-04 -3.654260e-04 9.999997e-01 1.903048e+02 141 | 9.999997e-01 3.667254e-04 6.633030e-04 -3.030042e-01 -3.672563e-04 9.999996e-01 8.003941e-04 -3.250530e+00 -6.630089e-04 -8.006366e-04 9.999995e-01 1.917128e+02 142 | 9.999968e-01 -2.296786e-03 1.039530e-03 -3.007558e-01 2.295944e-03 9.999970e-01 8.102725e-04 -3.281162e+00 -1.041387e-03 -8.078824e-04 9.999991e-01 1.931213e+02 143 | 9.999851e-01 -5.294323e-03 1.369557e-03 -2.979338e-01 5.291893e-03 9.999844e-01 1.772738e-03 -3.314135e+00 -1.378920e-03 -1.765463e-03 9.999975e-01 1.945397e+02 144 | 9.999546e-01 -9.368607e-03 1.771587e-03 -2.945301e-01 9.362221e-03 9.999497e-01 3.580279e-03 -3.344375e+00 -1.805040e-03 -3.563529e-03 9.999920e-01 1.959592e+02 145 | 9.999160e-01 -1.280487e-02 1.997357e-03 -2.955048e-01 1.279759e-02 9.999115e-01 3.617325e-03 -3.384557e+00 -2.043500e-03 -3.591458e-03 9.999915e-01 1.973809e+02 146 | 9.999376e-01 -1.094347e-02 2.233356e-03 -2.998637e-01 1.094485e-02 9.999399e-01 -6.040287e-04 -3.419365e+00 -2.226612e-03 6.284354e-04 9.999973e-01 1.988011e+02 147 | 9.999866e-01 -4.404760e-03 2.705777e-03 -3.150034e-01 4.421524e-03 9.999709e-01 -6.220738e-03 -3.471363e+00 -2.678297e-03 6.232618e-03 9.999770e-01 2.002242e+02 148 | 9.999803e-01 4.937679e-03 3.867005e-03 -3.321987e-01 -4.911558e-03 9.999652e-01 -6.735801e-03 -3.515079e+00 -3.900129e-03 6.716675e-03 9.999698e-01 2.016573e+02 149 | 9.998989e-01 1.319129e-02 5.303121e-03 -3.485808e-01 -1.317803e-02 9.999099e-01 -2.527297e-03 -3.555576e+00 -5.335981e-03 2.457157e-03 9.999828e-01 2.030948e+02 150 | 9.998740e-01 1.466149e-02 6.089232e-03 -3.518432e-01 -1.466092e-02 9.998925e-01 -1.386890e-04 -3.580327e+00 -6.090611e-03 4.939844e-05 9.999815e-01 2.045325e+02 151 | 9.999049e-01 1.262628e-02 5.556508e-03 -3.481359e-01 -1.262551e-02 9.999203e-01 -1.752367e-04 -3.604083e+00 -5.558278e-03 1.050669e-04 9.999846e-01 2.059704e+02 152 | 9.999449e-01 9.535333e-03 4.390272e-03 -3.438313e-01 -9.534934e-03 9.999545e-01 -1.122394e-04 -3.628674e+00 -4.391142e-03 7.037296e-05 9.999904e-01 2.074183e+02 153 | 9.999698e-01 7.056709e-03 3.265357e-03 -3.421980e-01 -7.056163e-03 9.999751e-01 -1.791660e-04 -3.650886e+00 -3.266540e-03 1.561204e-04 9.999947e-01 2.088679e+02 154 | 9.999687e-01 7.421207e-03 2.746368e-03 -3.429638e-01 -7.418335e-03 9.999719e-01 -1.054972e-03 -3.674360e+00 -2.754120e-03 1.034566e-03 9.999957e-01 2.103232e+02 155 | 9.999572e-01 8.865968e-03 2.639868e-03 -3.479510e-01 -8.863573e-03 9.999603e-01 -9.180181e-04 -3.708945e+00 -2.647902e-03 8.945807e-04 9.999961e-01 2.117849e+02 156 | 9.999457e-01 1.011607e-02 2.524498e-03 -3.533161e-01 -1.011626e-02 9.999488e-01 6.078905e-05 -3.741769e+00 -2.523753e-03 -8.632351e-05 9.999968e-01 2.132453e+02 157 | 9.999520e-01 9.528196e-03 2.287637e-03 -3.575809e-01 -9.528055e-03 9.999546e-01 -7.338614e-05 -3.778065e+00 -2.288232e-03 5.158657e-05 9.999974e-01 2.147142e+02 158 | 9.999567e-01 9.109499e-03 1.882098e-03 -3.574839e-01 -9.106736e-03 9.999574e-01 -1.472665e-03 -3.809987e+00 -1.895433e-03 1.455462e-03 9.999972e-01 2.161864e+02 159 | 9.999636e-01 8.359215e-03 1.687676e-03 -3.559812e-01 -8.356573e-03 9.999638e-01 -1.567357e-03 -3.837087e+00 -1.700717e-03 1.553197e-03 9.999974e-01 2.176615e+02 160 | 9.999613e-01 8.710909e-03 1.240726e-03 -3.604882e-01 -8.709396e-03 9.999613e-01 -1.221145e-03 -3.867619e+00 -1.251315e-03 1.210292e-03 9.999985e-01 2.191419e+02 161 | 9.999614e-01 8.743635e-03 8.939218e-04 -3.669047e-01 -8.742048e-03 9.999602e-01 -1.766652e-03 -3.899694e+00 -9.093329e-04 1.758769e-03 9.999980e-01 2.206192e+02 162 | 9.999611e-01 8.794205e-03 6.937191e-04 -3.711882e-01 -8.792778e-03 9.999592e-01 -2.036552e-03 -3.933170e+00 -7.116004e-04 2.030374e-03 9.999977e-01 2.221053e+02 163 | 9.999510e-01 9.892701e-03 3.480351e-04 -3.755068e-01 -9.891997e-03 9.999491e-01 -1.976626e-03 -3.961696e+00 -3.675712e-04 1.973087e-03 9.999980e-01 2.235894e+02 164 | 9.999545e-01 9.535271e-03 3.060761e-06 -3.771503e-01 -9.535239e-03 9.999517e-01 -2.380069e-03 -3.990503e+00 -2.575488e-05 2.379932e-03 9.999972e-01 2.250789e+02 165 | 9.999496e-01 1.003812e-02 -2.604534e-04 -3.787707e-01 -1.003868e-02 9.999470e-01 -2.264917e-03 -4.017067e+00 2.377044e-04 2.267418e-03 9.999974e-01 2.265691e+02 166 | 9.999586e-01 9.094606e-03 -3.650914e-04 -3.785266e-01 -9.095365e-03 9.999563e-01 -2.127619e-03 -4.048136e+00 3.457260e-04 2.130852e-03 9.999977e-01 2.280602e+02 167 | 9.999692e-01 7.840322e-03 -2.576861e-04 -3.786614e-01 -7.841006e-03 9.999654e-01 -2.764804e-03 -4.075078e+00 2.360006e-04 2.766740e-03 9.999962e-01 2.295511e+02 168 | 9.999775e-01 6.708944e-03 -8.900665e-05 -3.766086e-01 -6.709141e-03 9.999746e-01 -2.414917e-03 -4.099044e+00 7.280319e-05 2.415460e-03 9.999971e-01 2.310454e+02 169 | 9.999869e-01 5.120322e-03 1.128453e-04 -3.745701e-01 -5.120128e-03 9.999855e-01 -1.666319e-03 -4.124994e+00 -1.213754e-04 1.665719e-03 9.999986e-01 2.325421e+02 170 | 9.999877e-01 4.961834e-03 1.048822e-04 -3.739626e-01 -4.961702e-03 9.999869e-01 -1.231555e-03 -4.153624e+00 -1.109912e-04 1.231020e-03 9.999992e-01 2.340378e+02 171 | 9.999858e-01 5.337975e-03 2.022159e-05 -3.764692e-01 -5.337943e-03 9.999848e-01 -1.389784e-03 -4.186333e+00 -2.763958e-05 1.389656e-03 9.999990e-01 2.355390e+02 172 | 9.999818e-01 6.028187e-03 2.363445e-04 -3.784077e-01 -6.027547e-03 9.999784e-01 -2.625311e-03 -4.220226e+00 -2.521649e-04 2.623838e-03 9.999965e-01 2.370446e+02 173 | 9.999731e-01 7.300761e-03 7.680277e-04 -3.814943e-01 -7.299344e-03 9.999717e-01 -1.833168e-03 -4.253315e+00 -7.813891e-04 1.827513e-03 9.999980e-01 2.385499e+02 174 | 9.999700e-01 7.615056e-03 1.421637e-03 -3.829065e-01 -7.615902e-03 9.999708e-01 5.892762e-04 -4.285388e+00 -1.417108e-03 -6.000848e-04 9.999988e-01 2.400603e+02 175 | 9.999712e-01 7.345183e-03 1.934962e-03 -3.820924e-01 -7.347076e-03 9.999725e-01 9.725281e-04 -4.315103e+00 -1.927765e-03 -9.867155e-04 9.999977e-01 2.415712e+02 176 | 9.999683e-01 7.596371e-03 2.404236e-03 -3.818797e-01 -7.594896e-03 9.999709e-01 -6.227482e-04 -4.347138e+00 -2.408896e-03 6.044691e-04 9.999969e-01 2.430873e+02 177 | 9.999670e-01 7.564991e-03 2.950398e-03 -3.796393e-01 -7.559141e-03 9.999694e-01 -1.989576e-03 -4.377646e+00 -2.965359e-03 1.967208e-03 9.999937e-01 2.446020e+02 178 | 9.999578e-01 8.594224e-03 3.260437e-03 -3.770780e-01 -8.589250e-03 9.999619e-01 -1.537157e-03 -4.404696e+00 -3.273523e-03 1.509088e-03 9.999935e-01 2.461231e+02 179 | 9.999617e-01 8.101687e-03 3.326840e-03 -3.721572e-01 -8.099747e-03 9.999670e-01 -5.968841e-04 -4.432022e+00 -3.331566e-03 5.699152e-04 9.999943e-01 2.476477e+02 180 | 9.999620e-01 8.179632e-03 3.022564e-03 -3.683885e-01 -8.180107e-03 9.999665e-01 1.440039e-04 -4.458219e+00 -3.021285e-03 -1.687226e-04 9.999954e-01 2.491682e+02 181 | 9.999599e-01 8.505824e-03 2.798118e-03 -3.658944e-01 -8.507187e-03 9.999637e-01 4.745749e-04 -4.487063e+00 -2.793979e-03 -4.983592e-04 9.999960e-01 2.506964e+02 182 | 9.999463e-01 9.971468e-03 2.839532e-03 -3.629857e-01 -9.973122e-03 9.999501e-01 5.680798e-04 -4.511301e+00 -2.833726e-03 -5.963674e-04 9.999958e-01 2.522241e+02 183 | 9.999354e-01 1.095121e-02 3.044268e-03 -3.598899e-01 -1.095307e-02 9.999398e-01 5.963360e-04 -4.535745e+00 -3.037554e-03 -6.296408e-04 9.999952e-01 2.537570e+02 184 | 9.999290e-01 1.155157e-02 2.931934e-03 -3.565283e-01 -1.155344e-02 9.999330e-01 6.206596e-04 -4.558441e+00 -2.924568e-03 -6.544886e-04 9.999955e-01 2.552885e+02 185 | 9.999459e-01 9.972903e-03 2.944504e-03 -3.518994e-01 -9.975714e-03 9.999498e-01 9.406382e-04 -4.584940e+00 -2.934975e-03 -9.699600e-04 9.999952e-01 2.568230e+02 186 | 9.999600e-01 8.473834e-03 2.849624e-03 -3.474488e-01 -8.479116e-03 9.999623e-01 1.846164e-03 -4.611578e+00 -2.833873e-03 -1.870251e-03 9.999942e-01 2.583612e+02 187 | 9.999798e-01 5.649029e-03 2.911618e-03 -3.407995e-01 -5.657374e-03 9.999799e-01 2.865668e-03 -4.637604e+00 -2.895371e-03 -2.882081e-03 9.999917e-01 2.599003e+02 188 | 9.999882e-01 4.111334e-03 2.589962e-03 -3.346776e-01 -4.116750e-03 9.999893e-01 2.088632e-03 -4.662237e+00 -2.581348e-03 -2.099269e-03 9.999945e-01 2.614352e+02 189 | 9.999932e-01 2.887471e-03 2.307492e-03 -3.291338e-01 -2.889040e-03 9.999956e-01 6.765815e-04 -4.686551e+00 -2.305528e-03 -6.832424e-04 9.999971e-01 2.629731e+02 190 | 9.999954e-01 9.355651e-04 2.883662e-03 -3.253738e-01 -9.427296e-04 9.999964e-01 2.484078e-03 -4.715745e+00 -2.881328e-03 -2.486784e-03 9.999928e-01 2.645113e+02 191 | 9.999945e-01 -1.234139e-04 3.329163e-03 -3.249046e-01 1.067289e-04 9.999874e-01 5.011498e-03 -4.748173e+00 -3.329740e-03 -5.011114e-03 9.999819e-01 2.660572e+02 192 | 9.999919e-01 -2.066105e-03 3.459414e-03 -3.208179e-01 2.052234e-03 9.999898e-01 4.008459e-03 -4.788372e+00 -3.467660e-03 -4.001325e-03 9.999860e-01 2.675981e+02 193 | 9.999887e-01 -2.908690e-03 3.750876e-03 -3.173943e-01 2.899428e-03 9.999927e-01 2.472455e-03 -4.824679e+00 -3.758040e-03 -2.461551e-03 9.999899e-01 2.691377e+02 194 | 9.999885e-01 -1.938031e-03 4.395994e-03 -3.166024e-01 1.927536e-03 9.999953e-01 2.390467e-03 -4.866052e+00 -4.400606e-03 -2.381965e-03 9.999875e-01 2.706771e+02 195 | 9.999849e-01 -4.661324e-04 5.479803e-03 -3.155943e-01 4.509532e-04 9.999960e-01 2.770953e-03 -4.906588e+00 -5.481073e-03 -2.768439e-03 9.999812e-01 2.722185e+02 196 | 9.999785e-01 1.232933e-03 6.440203e-03 -3.140401e-01 -1.248098e-03 9.999964e-01 2.351180e-03 -4.950219e+00 -6.437281e-03 -2.359167e-03 9.999765e-01 2.737649e+02 197 | 9.999697e-01 2.424589e-03 7.402642e-03 -3.112309e-01 -2.441393e-03 9.999944e-01 2.261700e-03 -4.987626e+00 -7.397117e-03 -2.279703e-03 9.999701e-01 2.753072e+02 198 | 9.999614e-01 3.182729e-03 8.193149e-03 -3.114206e-01 -3.198390e-03 9.999931e-01 1.899045e-03 -5.039836e+00 -8.187048e-03 -1.925175e-03 9.999646e-01 2.768535e+02 199 | 9.999530e-01 3.540242e-03 9.029623e-03 -3.062026e-01 -3.552063e-03 9.999928e-01 1.293327e-03 -5.082755e+00 -9.024980e-03 -1.325339e-03 9.999584e-01 2.783952e+02 200 | 9.999461e-01 3.002326e-03 9.935417e-03 -3.011292e-01 -3.010760e-03 9.999951e-01 8.339534e-04 -5.134136e+00 -9.932865e-03 -8.638208e-04 9.999503e-01 2.799451e+02 201 | 9.999391e-01 2.412082e-03 1.077454e-02 -2.897536e-01 -2.422969e-03 9.999965e-01 9.974198e-04 -5.172038e+00 -1.077210e-02 -1.023464e-03 9.999415e-01 2.814958e+02 202 | 9.999325e-01 1.490615e-03 1.152157e-02 -2.764899e-01 -1.517343e-03 9.999962e-01 2.311413e-03 -5.210037e+00 -1.151808e-02 -2.328738e-03 9.999310e-01 2.830498e+02 203 | 9.999296e-01 6.994984e-05 1.186441e-02 -2.599233e-01 -1.024084e-04 9.999962e-01 2.735204e-03 -5.241690e+00 -1.186417e-02 -2.736226e-03 9.999259e-01 2.846005e+02 204 | 9.999287e-01 -5.783054e-04 1.192680e-02 -2.447296e-01 5.738058e-04 9.999997e-01 3.807036e-04 -5.277598e+00 -1.192702e-02 -3.738321e-04 9.999288e-01 2.861454e+02 205 | 9.999319e-01 -1.492295e-03 1.157237e-02 -2.301583e-01 1.517665e-03 9.999964e-01 -2.183831e-03 -5.316244e+00 -1.156907e-02 2.201245e-03 9.999307e-01 2.876999e+02 206 | 9.999418e-01 -1.171813e-03 1.072948e-02 -2.170805e-01 1.198557e-03 9.999962e-01 -2.486478e-03 -5.355148e+00 -1.072652e-02 2.499193e-03 9.999394e-01 2.892536e+02 207 | 9.999459e-01 -2.783388e-03 1.002137e-02 -2.045251e-01 2.801633e-03 9.999944e-01 -1.806982e-03 -5.390475e+00 -1.001629e-02 1.834961e-03 9.999482e-01 2.908103e+02 208 | 9.999558e-01 -2.681787e-03 9.007871e-03 -1.926927e-01 2.703689e-03 9.999934e-01 -2.419995e-03 -5.424878e+00 -9.001322e-03 2.444243e-03 9.999565e-01 2.923692e+02 209 | 9.999614e-01 -2.412034e-03 8.447233e-03 -1.828271e-01 2.434845e-03 9.999934e-01 -2.691088e-03 -5.462578e+00 -8.440686e-03 2.711552e-03 9.999607e-01 2.939316e+02 210 | 9.999673e-01 -2.373487e-03 7.737565e-03 -1.753107e-01 2.390449e-03 9.999947e-01 -2.183543e-03 -5.502144e+00 -7.732342e-03 2.201968e-03 9.999677e-01 2.955007e+02 211 | 9.999723e-01 -2.819612e-03 6.887209e-03 -1.681708e-01 2.832591e-03 9.999942e-01 -1.875372e-03 -5.542654e+00 -6.881881e-03 1.894829e-03 9.999745e-01 2.970723e+02 212 | 9.999769e-01 -3.466837e-03 5.851379e-03 -1.571664e-01 3.473314e-03 9.999933e-01 -1.096977e-03 -5.573209e+00 -5.847537e-03 1.117276e-03 9.999823e-01 2.986502e+02 213 | 9.999842e-01 -1.884748e-03 5.291489e-03 -1.498301e-01 1.882308e-03 9.999981e-01 4.661426e-04 -5.605662e+00 -5.292358e-03 -4.561742e-04 9.999859e-01 3.002367e+02 214 | 9.999815e-01 -1.358135e-03 5.923153e-03 -1.436567e-01 1.352001e-03 9.999985e-01 1.039545e-03 -5.638056e+00 -5.924556e-03 -1.031517e-03 9.999819e-01 3.018211e+02 215 | 9.999759e-01 -7.817355e-04 6.894868e-03 -1.362308e-01 7.782359e-04 9.999995e-01 5.102687e-04 -5.674045e+00 -6.895264e-03 -5.048898e-04 9.999761e-01 3.034130e+02 216 | 9.999714e-01 -2.066171e-03 7.276388e-03 -1.245796e-01 2.065775e-03 9.999978e-01 6.192163e-05 -5.704927e+00 -7.276500e-03 -4.688778e-05 9.999735e-01 3.050095e+02 217 | 9.999651e-01 -3.784307e-03 7.453066e-03 -1.113075e-01 3.790433e-03 9.999925e-01 -8.078684e-04 -5.740563e+00 -7.449952e-03 8.360910e-04 9.999719e-01 3.066086e+02 218 | 9.999552e-01 -5.428677e-03 7.758261e-03 -1.009905e-01 5.450080e-03 9.999814e-01 -2.740138e-03 -5.775938e+00 -7.743241e-03 2.782299e-03 9.999662e-01 3.082116e+02 219 | 9.999537e-01 -5.687325e-03 7.763489e-03 -9.649990e-02 5.716383e-03 9.999767e-01 -3.725647e-03 -5.821106e+00 -7.742119e-03 3.769853e-03 9.999629e-01 3.098189e+02 220 | 9.999708e-01 -5.147094e-03 5.644397e-03 -9.324941e-02 5.173322e-03 9.999758e-01 -4.641742e-03 -5.863046e+00 -5.620369e-03 4.670807e-03 9.999733e-01 3.114331e+02 221 | 9.999850e-01 -4.221316e-03 3.496670e-03 -9.658434e-02 4.236103e-03 9.999820e-01 -4.231981e-03 -5.907766e+00 -3.478743e-03 4.246730e-03 9.999849e-01 3.130445e+02 222 | 9.999922e-01 -2.484768e-03 3.084130e-03 -1.065230e-01 2.488109e-03 9.999963e-01 -1.079636e-03 -5.952029e+00 -3.081436e-03 1.087301e-03 9.999947e-01 3.146682e+02 223 | 9.999867e-01 -3.716961e-03 3.583983e-03 -1.079811e-01 3.709068e-03 9.999907e-01 2.206630e-03 -5.990688e+00 -3.592151e-03 -2.193306e-03 9.999912e-01 3.162953e+02 224 | 9.999815e-01 -4.767094e-03 3.790247e-03 -1.086696e-01 4.755540e-03 9.999840e-01 3.051853e-03 -6.032426e+00 -3.804734e-03 -3.033771e-03 9.999882e-01 3.179241e+02 225 | 9.999725e-01 -6.194818e-03 4.072383e-03 -1.058419e-01 6.187770e-03 9.999793e-01 1.741229e-03 -6.068362e+00 -4.083086e-03 -1.715981e-03 9.999902e-01 3.195525e+02 226 | 9.999660e-01 -6.964861e-03 4.412163e-03 -1.039698e-01 6.957070e-03 9.999742e-01 1.779038e-03 -6.106845e+00 -4.424440e-03 -1.748280e-03 9.999887e-01 3.211823e+02 227 | 9.999660e-01 -6.841180e-03 4.609291e-03 -1.009179e-01 6.831105e-03 9.999742e-01 2.198320e-03 -6.143404e+00 -4.624211e-03 -2.166758e-03 9.999870e-01 3.228139e+02 228 | 9.999627e-01 -6.976154e-03 5.098733e-03 -9.883113e-02 6.966045e-03 9.999737e-01 1.998024e-03 -6.185282e+00 -5.112538e-03 -1.962430e-03 9.999850e-01 3.244397e+02 229 | 9.999691e-01 -5.531858e-03 5.582557e-03 -9.648438e-02 5.524152e-03 9.999837e-01 1.395066e-03 -6.224088e+00 -5.590184e-03 -1.364183e-03 9.999835e-01 3.260682e+02 230 | 9.999692e-01 -4.739913e-03 6.254823e-03 -9.411052e-02 4.736022e-03 9.999886e-01 6.369864e-04 -6.268819e+00 -6.257771e-03 -6.073430e-04 9.999802e-01 3.276976e+02 231 | 9.999576e-01 -5.867213e-03 7.096038e-03 -8.496956e-02 5.850059e-03 9.999799e-01 2.435870e-03 -6.305428e+00 -7.110188e-03 -2.394254e-03 9.999719e-01 3.293242e+02 232 | 9.999510e-01 -6.766778e-03 7.224057e-03 -7.442718e-02 6.741273e-03 9.999710e-01 3.549352e-03 -6.342113e+00 -7.247865e-03 -3.500477e-03 9.999676e-01 3.309508e+02 233 | 9.999441e-01 -7.988538e-03 6.922601e-03 -6.421231e-02 7.974252e-03 9.999660e-01 2.089082e-03 -6.372244e+00 -6.939054e-03 -2.033762e-03 9.999739e-01 3.325694e+02 234 | 9.999502e-01 -7.183819e-03 6.934729e-03 -5.844895e-02 7.168776e-03 9.999719e-01 2.191934e-03 -6.413497e+00 -6.950281e-03 -2.142110e-03 9.999736e-01 3.341893e+02 235 | 9.999652e-01 -4.787434e-03 6.833316e-03 -5.434961e-02 4.781851e-03 9.999882e-01 8.332573e-04 -6.450780e+00 -6.837225e-03 -8.005516e-04 9.999763e-01 3.358024e+02 236 | 9.999750e-01 -3.571959e-03 6.109299e-03 -4.949748e-02 3.593288e-03 9.999875e-01 -3.483711e-03 -6.492551e+00 -6.096779e-03 3.505576e-03 9.999753e-01 3.374097e+02 237 | 9.999843e-01 -1.994183e-03 5.234120e-03 -4.548042e-02 2.006544e-03 9.999952e-01 -2.357182e-03 -6.532188e+00 -5.229394e-03 2.367648e-03 9.999835e-01 3.390211e+02 238 | 9.999894e-01 -1.462301e-03 4.362284e-03 -4.257007e-02 1.457657e-03 9.999983e-01 1.067779e-03 -6.571397e+00 -4.363838e-03 -1.061409e-03 9.999899e-01 3.406287e+02 239 | 9.999945e-01 -2.826640e-04 3.299426e-03 -3.930296e-02 2.684306e-04 9.999906e-01 4.313590e-03 -6.601811e+00 -3.300614e-03 -4.312679e-03 9.999853e-01 3.422462e+02 240 | 9.999978e-01 2.062417e-04 2.091700e-03 -3.681003e-02 -2.126335e-04 9.999953e-01 3.056043e-03 -6.628918e+00 -2.091059e-03 -3.056480e-03 9.999932e-01 3.438528e+02 241 | 9.999999e-01 -4.441106e-04 2.716568e-04 -3.514659e-02 4.438341e-04 9.999994e-01 1.017490e-03 -6.652782e+00 -2.721082e-04 -1.017369e-03 9.999995e-01 3.454525e+02 242 | 9.999977e-01 -1.439658e-03 -1.625805e-03 -3.454790e-02 1.443853e-03 9.999956e-01 2.581640e-03 -6.677644e+00 1.622082e-03 -2.583980e-03 9.999954e-01 3.470554e+02 243 | 9.999919e-01 -3.062606e-03 -2.625489e-03 -3.707175e-02 3.071647e-03 9.999893e-01 3.446059e-03 -6.708152e+00 2.614908e-03 -3.454094e-03 9.999906e-01 3.486551e+02 244 | 9.999897e-01 -3.341987e-03 -3.057511e-03 -4.156332e-02 3.349607e-03 9.999913e-01 2.490201e-03 -6.737239e+00 3.049163e-03 -2.500416e-03 9.999922e-01 3.502532e+02 245 | 9.999866e-01 -4.006270e-03 -3.273666e-03 -5.261018e-02 4.011489e-03 9.999907e-01 1.588782e-03 -6.773059e+00 3.267271e-03 -1.601892e-03 9.999934e-01 3.518478e+02 246 | 9.999863e-01 -4.071348e-03 -3.294287e-03 -6.277075e-02 4.076787e-03 9.999903e-01 1.645910e-03 -6.805862e+00 3.287555e-03 -1.659316e-03 9.999932e-01 3.534464e+02 247 | 9.999843e-01 -4.565924e-03 -3.263315e-03 -7.506606e-02 4.570743e-03 9.999884e-01 1.470618e-03 -6.843564e+00 3.256563e-03 -1.485510e-03 9.999936e-01 3.550379e+02 248 | 9.999810e-01 -5.215829e-03 -3.293599e-03 -8.570356e-02 5.218715e-03 9.999860e-01 8.679440e-04 -6.882552e+00 3.289027e-03 -8.851150e-04 9.999942e-01 3.566308e+02 249 | 9.999796e-01 -5.401737e-03 -3.418004e-03 -9.729989e-02 5.402475e-03 9.999854e-01 2.062371e-04 -6.921763e+00 3.416841e-03 -2.246978e-04 9.999941e-01 3.582218e+02 250 | 9.999770e-01 -5.897304e-03 -3.345777e-03 -1.091014e-01 5.899221e-03 9.999824e-01 5.628509e-04 -6.958715e+00 3.342400e-03 -5.825747e-04 9.999943e-01 3.598125e+02 251 | 9.999797e-01 -5.468744e-03 -3.269773e-03 -1.223014e-01 5.467336e-03 9.999849e-01 -4.395206e-04 -6.995935e+00 3.272128e-03 4.216354e-04 9.999946e-01 3.614037e+02 252 | 9.999705e-01 -6.990990e-03 -3.199392e-03 -1.322206e-01 6.985695e-03 9.999742e-01 -1.663812e-03 -7.034017e+00 3.210942e-03 1.641413e-03 9.999935e-01 3.629890e+02 253 | 9.999604e-01 -8.343867e-03 -3.084171e-03 -1.419952e-01 8.343344e-03 9.999652e-01 -1.830997e-04 -7.072717e+00 3.085592e-03 1.573608e-04 9.999952e-01 3.645867e+02 254 | 9.999448e-01 -1.000719e-02 -3.215683e-03 -1.552999e-01 1.001035e-02 9.999494e-01 9.681153e-04 -7.112398e+00 3.205833e-03 -1.000251e-03 9.999944e-01 3.661838e+02 255 | 9.999393e-01 -1.055761e-02 -3.163105e-03 -1.668615e-01 1.055852e-02 9.999442e-01 2.692067e-04 -7.146884e+00 3.160086e-03 -3.025873e-04 9.999950e-01 3.677811e+02 256 | 9.999515e-01 -9.444498e-03 -2.786077e-03 -1.858911e-01 9.442854e-03 9.999552e-01 -6.033254e-04 -7.188290e+00 2.791651e-03 5.769883e-04 9.999959e-01 3.693866e+02 257 | 9.999697e-01 -7.434945e-03 -2.307470e-03 -2.029185e-01 7.436476e-03 9.999721e-01 6.546484e-04 -7.225081e+00 2.302539e-03 -6.717872e-04 9.999971e-01 3.709753e+02 258 | 9.999798e-01 -6.100116e-03 -1.805530e-03 -2.229981e-01 6.101847e-03 9.999809e-01 9.540591e-04 -7.268840e+00 1.799676e-03 -9.650559e-04 9.999979e-01 3.726154e+02 259 | 9.999827e-01 -5.698435e-03 -1.435322e-03 -2.378134e-01 5.699452e-03 9.999835e-01 7.046586e-04 -7.306808e+00 1.431283e-03 -7.128262e-04 9.999987e-01 3.742253e+02 260 | 9.999820e-01 -5.879431e-03 -1.199967e-03 -2.519893e-01 5.879326e-03 9.999827e-01 -9.211550e-05 -7.344936e+00 1.200488e-03 8.505955e-05 9.999993e-01 3.757811e+02 261 | 9.999832e-01 -5.717950e-03 -9.178671e-04 -2.625878e-01 5.717644e-03 9.999836e-01 -3.368161e-04 -7.379336e+00 9.197784e-04 3.315630e-04 9.999995e-01 3.773873e+02 262 | 9.999855e-01 -5.335829e-03 -6.929259e-04 -2.704012e-01 5.335561e-03 9.999857e-01 -3.900621e-04 -7.414418e+00 6.949976e-04 3.863599e-04 9.999997e-01 3.789929e+02 263 | 9.999912e-01 -4.183305e-03 -3.813569e-04 -2.783585e-01 4.183203e-03 9.999912e-01 -2.705875e-04 -7.448411e+00 3.824859e-04 2.689904e-04 9.999999e-01 3.806071e+02 264 | 9.999958e-01 -2.891363e-03 -5.685947e-05 -2.877925e-01 2.891389e-03 9.999957e-01 4.518743e-04 -7.485057e+00 5.555304e-05 -4.520360e-04 9.999999e-01 3.822202e+02 265 | 9.999985e-01 -1.738505e-03 2.114522e-04 -2.960866e-01 1.738539e-03 9.999985e-01 -1.625976e-04 -7.522079e+00 -2.111689e-04 1.629656e-04 1.000000e+00 3.838408e+02 266 | 9.999995e-01 -8.743833e-04 4.812968e-04 -3.039652e-01 8.747432e-04 9.999993e-01 -7.476725e-04 -7.558709e+00 -4.806424e-04 7.480937e-04 9.999996e-01 3.854512e+02 267 | 9.999997e-01 -1.279523e-04 8.073792e-04 -3.110174e-01 1.284791e-04 9.999998e-01 -6.525093e-04 -7.596748e+00 -8.072952e-04 6.526134e-04 9.999995e-01 3.870678e+02 268 | 9.999988e-01 1.116543e-03 1.060919e-03 -3.164538e-01 -1.116826e-03 9.999993e-01 2.659665e-04 -7.631598e+00 -1.060621e-03 -2.671503e-04 9.999994e-01 3.886857e+02 269 | 9.999964e-01 2.241238e-03 1.452395e-03 -3.206582e-01 -2.242667e-03 9.999970e-01 9.825513e-04 -7.664429e+00 -1.450189e-03 -9.858042e-04 9.999985e-01 3.903059e+02 270 | 9.999941e-01 2.903367e-03 1.839558e-03 -3.238003e-01 -2.903950e-03 9.999957e-01 3.144580e-04 -7.696195e+00 -1.838637e-03 -3.197974e-04 9.999983e-01 3.919359e+02 271 | 9.999935e-01 2.925452e-03 2.091742e-03 -3.237896e-01 -2.926418e-03 9.999956e-01 4.584597e-04 -7.731691e+00 -2.090391e-03 -4.645773e-04 9.999977e-01 3.935579e+02 272 | -------------------------------------------------------------------------------- /dataset/kitti.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | from PIL import Image, ImageFile 3 | import numpy as np 4 | from glob import glob 5 | import re 6 | from torch.utils.data import Dataset, DataLoader 7 | ImageFile.LOAD_TRUNCATED_IMAGES = True 8 | 9 | 10 | class KITTIDataSet(Dataset): 11 | """KITTI VO data set""" 12 | 13 | def __init__(self, dir_data, dir_label, samples=None, phase=None, seq=None): 14 | self.dir_data = dir_data 15 | self.dir_label = dir_label 16 | self.samples = samples 17 | self.phase = phase 18 | self.seq = seq 19 | if self.phase == 'Test': 20 | self.l1, self.l2, self.label = self.load_data_test() 21 | else: 22 | self.l1, self.l2, self.label = self.load_data() 23 | assert (len(self.l1) == len(self.l2)) and (len(self.l1) == len(self.label)), 'Length must be equal!' 24 | 25 | def load_data(self): 26 | """ 27 | :return: 28 | l1: image1 path list 29 | l2: image2 path list 30 | label: [image_pairs, 6], relative pose list 31 | """ 32 | samples_list = list(filter(None, re.split('[, ]', self.samples))) # 提取字符串,并去掉空格 33 | l1 = [] 34 | l2 = [] 35 | label = [] 36 | if self.phase == 'Train': 37 | for i in [0, 1, 2, 8, 9]: 38 | img_list_left = glob(self.dir_data + '/{:02d}/image_2/*.png'.format(i)) 39 | img_list_left.sort() 40 | img_list_right = glob(self.dir_data + '/{:02d}/image_3/*.png'.format(i)) 41 | img_list_right.sort() 42 | for j in range(len(samples_list)): 43 | if samples_list[j] == 'i0': # 左目 44 | l1.extend(img_list_left[:-1]) 45 | l2.extend(img_list_left[1:]) 46 | label.extend(np.loadtxt(self.dir_label + '/xyz-euler-relative-interval0/{:d}.txt'.format(i))) 47 | elif samples_list[j] == 'i0r': # 左目反向 48 | l1.extend(img_list_left[1:]) 49 | l2.extend(img_list_left[:-1]) 50 | label.extend(np.loadtxt(self.dir_label + '/xyz-euler-relative-reverse-interval0/{:d}.txt'. 51 | format(i))) 52 | elif samples_list[j] == 'ri0': # 右目 53 | l1.extend(img_list_right[:-1]) 54 | l2.extend(img_list_right[1:]) 55 | label.extend(np.loadtxt(self.dir_label + '/xyz-euler-relative-interval0/{:d}.txt'.format(i))) 56 | elif samples_list[j] == 'i1': # 左目隔一帧 57 | l1.extend(img_list_left[:-2]) 58 | l2.extend(img_list_left[2:]) 59 | label.extend(np.loadtxt(self.dir_label + '/xyz-euler-relative-interval1/{:d}.txt'.format(i))) 60 | elif samples_list[j] == 'i1r': # 左目隔一帧反向 61 | l1.extend(img_list_left[2:]) 62 | l2.extend(img_list_left[:-2]) 63 | label.extend(np.loadtxt(self.dir_label + '/xyz-euler-relative-reverse-interval1/{:d}.txt'. 64 | format(i))) 65 | elif samples_list[j] == 'ri1': # 右目隔一帧 66 | l1.extend(img_list_right[:-2]) 67 | l2.extend(img_list_right[2:]) 68 | label.extend(np.loadtxt(self.dir_label + '/xyz-euler-relative-interval1/{:d}.txt'.format(i))) 69 | elif samples_list[j] == 'i2': # 左目隔两帧 70 | l1.extend(img_list_left[:-3]) 71 | l2.extend(img_list_left[3:]) 72 | label.extend(np.loadtxt(self.dir_label + '/xyz-euler-relative-interval2/{:d}.txt'.format(i))) 73 | elif samples_list[j] == 'i2r': # 左目隔两帧反向 74 | l1.extend(img_list_left[3:]) 75 | l2.extend(img_list_left[:-3]) 76 | label.extend(np.loadtxt(self.dir_label + '/xyz-euler-relative-reverse-interval2/{:d}.txt'. 77 | format(i))) 78 | 79 | else: 80 | seq_val = 5 81 | img_list = glob(self.dir_data + '/{:02d}/image_2/*.png'.format(seq_val)) 82 | img_list.sort() 83 | max_index = int(len(img_list) // 64) * 32 84 | l1 = img_list[: max_index] 85 | l2 = img_list[1: max_index+1] 86 | label1 = np.loadtxt(self.dir_label + '/xyz-euler-relative-interval0/{:d}.txt'.format(seq_val)) 87 | label = label1[: max_index] 88 | 89 | return l1, l2, label 90 | 91 | def load_data_test(self): 92 | img_list = glob(self.dir_data + '/{:02d}/image_3/*.png'.format(self.seq)) 93 | img_list.sort() 94 | l1 = img_list[:-1] 95 | l2 = img_list[1:] 96 | label = np.zeros([len(l1), 6]) 97 | # label = np.loadtxt(self.dir_label + '/xyz-euler-relative-interval0/{:d}.txt'.format(self.seq)) 98 | 99 | return l1, l2, label 100 | 101 | def __len__(self): 102 | return len(self.l1) 103 | 104 | def __getitem__(self, idx): 105 | """ get one sample 106 | :param idx: the index of one sample, choose from range(len(self.l1)) 107 | :return: sample: {'img': size[6, H, W], 'label': size[6]} 108 | """ 109 | sample = dict() 110 | img1 = np.array(Image.open(self.l1[idx]).resize((1280, 384))) # - [88.61, 93.70, 92.11] 111 | img1 = img1.astype(np.float32) 112 | img2 = np.array(Image.open(self.l2[idx]).resize((1280, 384))) # - [88.61, 93.70, 92.11] 113 | img2 = img2.astype(np.float32) 114 | # sample['img'] = np.transpose(np.concatenate((img1, img2), 2), [2, 0, 1]) # 6xHxW 115 | sample['img1'] = np.transpose(img1, [2, 0, 1]) # 6xHxW 116 | sample['img2'] = np.transpose(img2, [2, 0, 1]) # 6xHxW 117 | label1 = self.label[idx] 118 | sample['label'] = label1.astype(np.float32) 119 | 120 | return sample 121 | 122 | 123 | def main(): 124 | 125 | dir_data = '/media/jiange/095df4a3-d72c-43d9-bfbd-e78651afba19/dataset-jiange/data_odometry_color/sequences' 126 | data_set = KITTIDataSet(dir_data=dir_data, 127 | dir_label='.', 128 | phase='Train', 129 | ) 130 | 131 | data_loader = DataLoader(data_set, batch_size=32, shuffle=False, num_workers=4) 132 | n_batch = int(len(data_set.l1)//data_loader.batch_size) 133 | for i_batch, sample_batch in enumerate(data_loader): 134 | print(i_batch, n_batch, sample_batch['img1'].size(), sample_batch['img1'].type(), sample_batch['label'].size()) 135 | 136 | 137 | if __name__ == '__main__': 138 | main() 139 | -------------------------------------------------------------------------------- /dataset/kitti_lstm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Author: Linjian Zhang 5 | Email: linjian93@foxmail.com 6 | Create Time: 2018-01-02 13:51:59 7 | Program: 8 | Description: 9 | """ 10 | 11 | from __future__ import print_function, division 12 | import numpy as np 13 | from PIL import Image, ImageFile 14 | from glob import glob 15 | from torch.utils.data import Dataset, DataLoader 16 | ImageFile.LOAD_TRUNCATED_IMAGES = True 17 | 18 | 19 | def read_image(im_path): 20 | im_data = np.array(Image.open(im_path).resize((1280, 384))).astype(float) - [88.61, 93.70, 92.11] 21 | return im_data.astype(np.float32) 22 | 23 | 24 | class KITTIDataSet(Dataset): 25 | """KITTI VO data set""" 26 | 27 | def __init__(self, dir_data, dir_label, img_pairs=2, start_interval=1, phase=None): 28 | self.dir_data = dir_data 29 | self.dir_label = dir_label 30 | self.img_pairs = img_pairs 31 | self.start_interval = start_interval 32 | self.phase = phase 33 | self.balance_idx = 4 34 | if self.img_pairs >= 8: 35 | self.balance_idx = 1 36 | 37 | self.l1, self.si, self.label = self.load_data() 38 | 39 | def load_data(self): 40 | """ 41 | :return: 42 | l1: image path list 43 | si: start index of one sequence 44 | label: [image_number, 10, 6], relative pose 45 | """ 46 | list1 = [] 47 | si = [] 48 | label = [] 49 | if self.phase == 'Train': 50 | count = 0 51 | for i in [0, 1, 2, 8, 9]: 52 | img_list = glob(self.dir_data + '/{:02d}/image_2/*.png'.format(i)) 53 | img_list.sort() 54 | list1.extend(img_list) 55 | label1 = np.loadtxt(self.dir_label + '/xyz-euler-relative-interval0/{:d}.txt'.format(i)) # [4540, 3] 56 | for j in np.arange(len(img_list)): 57 | label_sample = np.zeros((self.img_pairs, 6)) # [10, 6] 58 | if (j < len(img_list)-self.img_pairs-self.balance_idx) and (j % self.start_interval == 0): 59 | si.append(count) 60 | label_sample = label1[j: j+self.img_pairs] 61 | label.append(label_sample) 62 | count += 1 63 | # 64 | # img_list.sort(reverse=True) 65 | # list1.extend(img_list) 66 | # label1 = np.loadtxt(self.dir_label + '/xyz-euler-relative-reverse-interval0/{:d}.txt'.format(i)) 67 | # label1 = list(label1) 68 | # label1.reverse() 69 | # for j in np.arange(len(img_list)): 70 | # label_sample = np.zeros((self.img_pairs, 6)) # [10, 6] 71 | # if (j < len(img_list)-self.img_pairs-1) and (j % self.start_interval == 0): 72 | # si.append(count) 73 | # label_sample = label1[j: j+self.img_pairs] 74 | # label.append(label_sample) 75 | # count += 1 76 | else: 77 | seq_val = 5 78 | img_list = glob(self.dir_data + '/{:02d}/image_2/*.png'.format(seq_val)) 79 | img_list.sort() 80 | list1 = img_list 81 | label1 = np.loadtxt(self.dir_label + '/xyz-euler-relative-interval0/{:d}.txt'.format(seq_val)) 82 | for j in np.arange(len(img_list)): 83 | label_sample = np.zeros((self.img_pairs, 6)) 84 | if (j < len(img_list)-self.img_pairs-1) and (j % self.start_interval == 0): 85 | si.append(j) 86 | label_sample = label1[j: j+self.img_pairs] 87 | label.append(label_sample) 88 | 89 | return list1, si, label 90 | 91 | def __len__(self): 92 | return len(self.si) 93 | 94 | def __getitem__(self, idx): 95 | """ get one sample 96 | :param idx: the index of one sample, choose from range(len(self.si)) 97 | :return: sample: {'img': size[T, 6, H, W], 'label': size[T, 6]} 98 | """ 99 | 100 | idx = self.si[idx] 101 | img_list = [] 102 | for img_path in self.l1[idx: idx + self.img_pairs + 1]: 103 | img = np.array((Image.open(img_path).resize((1280, 384)))) # - [88.61, 93.70, 92.11] 104 | img_list.append(img.astype(np.float32)) 105 | 106 | sample = dict() 107 | sample['img1'] = [] 108 | sample['img2'] = [] 109 | sample['label'] = [] 110 | for img_0, img_1 in zip(img_list[:-1], img_list[1:]): 111 | # sample['img'].append(np.concatenate((img_0, img_1), 2)) 112 | sample['img1'].append(img_0) 113 | sample['img2'].append(img_1) 114 | sample['label'] = np.array(self.label[idx]).astype(np.float32) 115 | 116 | sample['img1'] = np.stack(sample['img1'], 0) # list ==> TxHxWxC 117 | sample['img1'] = np.transpose(sample['img1'], [0, 3, 1, 2]) # TxHxWx6 ==> TxCxHxW 118 | 119 | sample['img2'] = np.stack(sample['img2'], 0) 120 | sample['img2'] = np.transpose(sample['img2'], [0, 3, 1, 2]) 121 | 122 | return sample 123 | 124 | 125 | def main(): 126 | from time import time 127 | import math 128 | dir_data = '/media/csc105/Data/dataset-jiange/data_odometry_color/sequences' # 6099 129 | # dir_data = '/media/Data/dataset_jiange/data_odometry_color/sequences' # 6199 130 | data_set = KITTIDataSet(dir_data=dir_data, 131 | dir_label='.', 132 | img_pairs=2, 133 | start_interval=1, 134 | phase='Train') 135 | 136 | data_loader = DataLoader(data_set, batch_size=16, shuffle=True, num_workers=4) 137 | 138 | print('ip {}, si {}, Total samples {}, bs {}, Batch {}'.format(data_set.img_pairs, data_set.start_interval, 139 | data_set.__len__(), data_loader.batch_size, 140 | int(math.ceil( 141 | data_set.__len__() / data_loader.batch_size)))) 142 | 143 | # tic = time() 144 | # for i_batch, sample_batch in enumerate(data_loader): 145 | # spent = (time() - tic) / (i_batch+1) 146 | # print('{:.3f}s'.format(spent), i_batch, sample_batch['img'].size(), sample_batch['img'].type(), 147 | # sample_batch['label'].size()) 148 | 149 | 150 | if __name__ == '__main__': 151 | main() 152 | -------------------------------------------------------------------------------- /dataset/main_generate_data.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Author: Linjian Zhang 5 | Email: linjian93@foxmail.com 6 | Create Time: 2017-11-02 10:55:13 7 | Program: 8 | Description: 9 | 10 | """ 11 | import matplotlib 12 | matplotlib.use('TkAgg') 13 | import matplotlib.pyplot as plt 14 | from tqdm import tqdm 15 | import numpy as np 16 | from numpy import mat 17 | import math 18 | import os 19 | 20 | 21 | def generate_xyz_euler_absolute(): 22 | for i in range(11): 23 | t_12d = np.loadtxt('T-12d/{:d}.txt'.format(i)) 24 | ret = np.zeros((len(t_12d), 6)) 25 | for j in range(len(t_12d)): 26 | t1 = t_12d[j, :] 27 | ret[j, 0: 3] = [t1[3], t1[7], t1[11]] 28 | 29 | # rotation matrix to ypr 30 | pitch = math.atan2(-t1[8], np.sqrt(t1[0]*t1[0]+t1[4]*t1[4])) / 3.14 * 180 31 | if pitch == -90: 32 | yaw = math.atan2(-t1[9], -t1[2]) / 3.14 * 180 33 | roll = 0 34 | elif pitch == 90: 35 | yaw = math.atan2(t1[6], t1[2]) / 3.14 * 180 36 | roll = 0 37 | else: 38 | yaw = math.atan2(t1[4], t1[0]) / 3.14 * 180 39 | roll = math.atan2(t1[9], t1[10]) / 3.14 * 180 40 | ret[j, 3] = yaw 41 | ret[j, 4] = pitch 42 | ret[j, 5] = roll 43 | 44 | np.savetxt('xyz-euler-absolute/{:d}.txt'.format(i), ret) 45 | 46 | 47 | def generate_xyz_euler_relative(it): 48 | dir_data = 'xyz-euler-relative-interval{:d}/'.format(it) 49 | if not os.path.exists(dir_data): 50 | os.mkdir(dir_data) 51 | 52 | for i in tqdm(range(11)): 53 | t_12d = np.loadtxt('ground-truth/{:02d}.txt'.format(i)) 54 | ret = np.zeros((len(t_12d)-it-1, 6)) 55 | for j in range(len(t_12d)-it-1): 56 | t1 = mat(np.row_stack((np.reshape(t_12d[j, :], (3, 4)), [0, 0, 0, 1]))) 57 | t2 = mat(np.row_stack((np.reshape(t_12d[j+it+1, :], (3, 4)), [0, 0, 0, 1]))) 58 | t12 = t1.I * t2 59 | ret[j, 0: 3] = [t12[0, 3], t12[1, 3], t12[2, 3]] 60 | 61 | # rotation matrix to ypr 62 | theta_y = math.atan2(-t12[2, 0], np.sqrt(t12[0, 0]*t12[0, 0]+t12[1, 0]*t12[1, 0])) / 3.14 * 180 63 | if theta_y == -90: 64 | theta_x = 0 65 | theta_z = math.atan2(-t12[2, 0], -t12[0, 2]) / 3.14 * 180 66 | 67 | elif theta_y == 90: 68 | theta_x = 0 69 | theta_z = math.atan2(t12[1, 2], t12[0, 2]) / 3.14 * 180 70 | else: 71 | theta_x = math.atan2(t12[2, 1], t12[2, 2]) / 3.14 * 180 72 | theta_z = math.atan2(t12[1, 0], t12[0, 0]) / 3.14 * 180 73 | ret[j, 3] = theta_x 74 | ret[j, 4] = theta_y 75 | ret[j, 5] = theta_z 76 | 77 | np.savetxt(dir_data + '{:d}.txt'.format(i), ret) 78 | 79 | 80 | def validate_xyz_euler_relative(seq): 81 | pose_gt = np.loadtxt('ground-truth/{:02d}.txt'.format(seq)) 82 | xyz_euler = np.loadtxt('xyz-euler-relative-interval0/{:d}.txt'.format(seq)) 83 | t1 = mat(np.eye(4)) 84 | pose_absolute = [] # 12-d 85 | pose_absolute.extend([np.array(t1[0: 3, :]).reshape([-1])]) 86 | for i in tqdm(range(len(xyz_euler))): 87 | x12 = xyz_euler[i, 0] 88 | y12 = xyz_euler[i, 1] 89 | z12 = xyz_euler[i, 2] 90 | theta1 = xyz_euler[i, 3] / 180 * np.pi 91 | theta2 = xyz_euler[i, 4] / 180 * np.pi 92 | theta3 = xyz_euler[i, 5] / 180 * np.pi 93 | tx = mat([[1, 0, 0], [0, math.cos(theta1), -math.sin(theta1)], [0, math.sin(theta1), math.cos(theta1)]]) 94 | ty = mat([[math.cos(theta2), 0, math.sin(theta2)], [0, 1, 0], [-math.sin(theta2), 0, math.cos(theta2)]]) 95 | tz = mat([[math.cos(theta3), -math.sin(theta3), 0], [math.sin(theta3), math.cos(theta3), 0], [0, 0, 1]]) 96 | tr = tz * ty * tx 97 | t12 = np.row_stack((np.column_stack((tr, [[x12], [y12], [z12]])), [0, 0, 0, 1])) 98 | t2 = t1 * t12 99 | pose_absolute.extend([np.array(t2[0: 3, :]).reshape([-1])]) 100 | t1 = t2 101 | 102 | pose_absolute = np.array(pose_absolute) 103 | plt.plot(pose_gt[:, 3], pose_gt[:, 11], '--', c='k', lw=1.5, label='Ground truth') 104 | plt.plot(pose_absolute[:, 3], pose_absolute[:, 11], '-', c='r', lw=1.5, label='Test') 105 | plt.title('Sequence {:02d}'.format(seq)) 106 | plt.axis('equal') 107 | plt.grid(True) 108 | plt.legend() 109 | plt.show() 110 | 111 | 112 | def generate_xyz_euler_relative_reverse(it): 113 | dir_data = 'xyz-euler-relative-reverse-interval{:d}/'.format(it) 114 | if not os.path.exists(dir_data): 115 | os.mkdir(dir_data) 116 | for i in tqdm(range(11)): 117 | gt = np.loadtxt('ground-truth/{:02d}.txt'.format(i)) 118 | ret = np.zeros((len(gt)-it-1, 6)) 119 | for j in range(len(gt)-it-1): 120 | t1 = mat(np.row_stack((np.reshape(gt[j, :], (3, 4)), [0, 0, 0, 1]))) 121 | t2 = mat(np.row_stack((np.reshape(gt[j+it+1, :], (3, 4)), [0, 0, 0, 1]))) 122 | t12 = t2.I * t1 123 | ret[j, 0: 3] = [t12[0, 3], t12[1, 3], t12[2, 3]] 124 | 125 | # rotation matrix to ypr 126 | theta_y = math.atan2(-t12[2, 0], np.sqrt(t12[0, 0]*t12[0, 0]+t12[1, 0]*t12[1, 0])) / 3.14 * 180 127 | if theta_y == -90: 128 | theta_x = 0 129 | theta_z = math.atan2(-t12[2, 0], -t12[0, 2]) / 3.14 * 180 130 | 131 | elif theta_y == 90: 132 | theta_x = 0 133 | theta_z = math.atan2(t12[1, 2], t12[0, 2]) / 3.14 * 180 134 | else: 135 | theta_x = math.atan2(t12[2, 1], t12[2, 2]) / 3.14 * 180 136 | theta_z = math.atan2(t12[1, 0], t12[0, 0]) / 3.14 * 180 137 | ret[j, 3] = theta_x 138 | ret[j, 4] = theta_y 139 | ret[j, 5] = theta_z 140 | 141 | np.savetxt(dir_data + '{:d}.txt'.format(i), ret) 142 | 143 | 144 | def generate_xzp_absolute(): 145 | for i in range(11): 146 | t_12d = np.loadtxt('T-12d/{:d}.txt'.format(i)) 147 | xzp_absolute = np.zeros((len(t_12d), 3)) 148 | for j in range(len(t_12d)): 149 | t1 = t_12d[j, :] 150 | xzp_absolute[j, 0: 2] = [t1[3], t1[11]] 151 | 152 | # rotation matrix to ypr 153 | pitch = math.atan2(-t1[8], np.sqrt(t1[0]*t1[0]+t1[4]*t1[4])) / 3.14 * 180 154 | xzp_absolute[j, 2] = pitch 155 | 156 | np.savetxt('xzp-absolute/{:d}.txt'.format(i), xzp_absolute) 157 | 158 | 159 | def generate_xzp_relative(): 160 | for i in range(11): 161 | xzp_absolute = np.loadtxt('xzp-absolute/{:d}.txt'.format(i)) 162 | xzp_relative = np.zeros((len(xzp_absolute)-1, 3)) 163 | x1 = xzp_absolute[0, 0] 164 | z1 = xzp_absolute[0, 1] 165 | p1 = xzp_absolute[0, 2] / 180 * 3.14 166 | t1 = mat([[math.cos(p1), -math.sin(p1), x1], [math.sin(p1), math.cos(p1), z1], [0, 0, 1]]) 167 | for j in range(len(xzp_absolute)-1): 168 | x2 = xzp_absolute[j+1, 0] 169 | z2 = xzp_absolute[j+1, 1] 170 | p2 = xzp_absolute[j+1, 2] / 180 * 3.14 171 | t2 = mat([[math.cos(p2), -math.sin(p2), x2], [math.sin(p2), math.cos(p2), z2], [0, 0, 1]]) 172 | t12 = t1.I * t2 173 | xzp_relative[j, 0: 2] = [t12[0, 2], t12[1, 2]] 174 | pitch = math.atan2(t12[1, 0], t12[0, 0]) / 3.14 * 180 175 | xzp_relative[j, 2] = pitch 176 | t1 = t2 177 | np.savetxt('xzp-relative/{:d}.txt'.format(i), xzp_relative) 178 | 179 | 180 | def validate_xzp_relative(seq): 181 | xzp = np.loadtxt('xzp-relative/{:d}.txt'.format(seq)) 182 | t1 = mat([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) 183 | for i in tqdm(range(len(xzp))): 184 | x12 = xzp[i, 0] 185 | z12 = xzp[i, 1] 186 | p12 = xzp[i, 2] / 180 * 3.14 187 | t12 = mat([[math.cos(p12), -math.sin(p12), x12], [math.sin(p12), math.cos(p12), z12], [0, 0, 1]]) 188 | t2 = t1 * t12 189 | plt.plot((t1[0, 2], t2[0, 2]), (t1[1, 2], t2[1, 2]), '-b', label='Validation') 190 | t1 = t2 191 | 192 | plt.axis('equal') 193 | plt.grid('on') 194 | plt.title('Sequence {:d}'.format(seq)) 195 | plt.show() 196 | 197 | 198 | def generate_xz_new_absolute(): 199 | for i in range(11): 200 | t_12d = np.loadtxt('T-12d/{:d}.txt'.format(i)) 201 | xzp_absolute = np.zeros((len(t_12d), 2)) 202 | for j in range(len(t_12d)): 203 | xzp_absolute[j, :] = [t_12d[j, 3], t_12d[j, 11]] 204 | np.savetxt('xz-new-absolute/{:d}.txt'.format(i), xzp_absolute) 205 | 206 | 207 | def validate_xz_new_absolute(seq): 208 | vec = np.loadtxt('xz-new-absolute/{:d}.txt'.format(seq)) 209 | for i in range(1, len(vec), 3): 210 | plt.plot((vec[i-1, 0], vec[i, 0]), (vec[i-1, 1], vec[i, 1]), '-b', label='Ground truth') 211 | 212 | plt.axis('equal') 213 | plt.grid('on') 214 | plt.title('Sequence {:d}'.format(seq)) 215 | plt.show() 216 | 217 | 218 | def generate_xz_new_relative(): 219 | for i in range(11): 220 | t_12d = np.loadtxt('T-12d/{:d}.txt'.format(i)) 221 | xz_new = np.zeros((len(t_12d)-1, 2)) 222 | for j in range(len(t_12d)-1): 223 | xz_new[j, :] = [t_12d[j+1, 3]-t_12d[j, 3], t_12d[j+1, 11]-t_12d[j, 11]] 224 | np.savetxt('xz-new-relative/{:d}'.format(i), xz_new) 225 | 226 | 227 | def validate_xz_new_relative(seq): 228 | xz = np.loadtxt('xz-new-relative/{:d}'.format(seq)) 229 | t1 = [0, 0] 230 | for i in range(200): # range(len(xz)): 231 | t2 = t1 + xz[i] 232 | plt.plot((t1[0], t2[0]), (t1[1], t2[1]), '-b', label='Ground truth') 233 | t1 = t2 234 | plt.axis('equal') 235 | plt.grid('on') 236 | plt.title('Sequence {:d}'.format(seq)) 237 | plt.show() 238 | 239 | 240 | if __name__ == '__main__': 241 | # generate_xyz_euler_relative(0) 242 | # generate_xyz_euler_relative(1) 243 | # generate_xyz_euler_relative(2) 244 | # generate_xyz_euler_relative(3) 245 | # generate_xyz_euler_relative_reverse(0) 246 | # generate_xyz_euler_relative_reverse(1) 247 | # generate_xyz_euler_relative_reverse(2) 248 | validate_xyz_euler_relative(0) 249 | -------------------------------------------------------------------------------- /evaluation.sh: -------------------------------------------------------------------------------- 1 | ######################################################################### 2 | # File Name: evaluation.sh 3 | # Author: Linjian Zhang 4 | # Mail: linjian93@foxmail.com 5 | # Created Time: 2018年01月 2日 15:50:19 6 | ######################################################################### 7 | #!/bin/bash 8 | 9 | for i in 70 10 | do 11 | # cnn-vo-cons 12 | # 40 80 90 / 120 140 160 180 13 | # /home/jiange/dl/project/tf-cnn-vo/evaluation/cpp/test /home/jiange/dl/project/pytorch-deepvo/test/cnn-vo-cons/20180104_model-$i 14 | 15 | # 30 40 50 60 / 70 16 | /home/jiange/dl/project/tf-cnn-vo/evaluation/cpp/test /home/jiange/dl/project/pytorch-deepvo/test/cnn-vo-cons/20180106_model-$i 17 | 18 | # cnn-lstm-vo 19 | # 50 60 80 100 120 140 160 180 200 20 | # /home/jiange/dl/project/tf-cnn-vo/evaluation/cpp/test /home/jiange/dl/project/pytorch-deepvo/test/cnn-lstm-vo/20180104_model-$i 21 | done 22 | 23 | # nohup sh evaluation.sh > nohup/evaluation.log 2>&1 & -------------------------------------------------------------------------------- /evaluation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linjianz/pytorch-deepvo/7c623fbd07771c0c0ee597e378635271b5edbaf1/evaluation/__init__.py -------------------------------------------------------------------------------- /evaluation/cpp/.directory: -------------------------------------------------------------------------------- 1 | [Dolphin] 2 | Timestamp=2017,11,18,11,7,22 3 | Version=3 4 | ViewMode=1 5 | -------------------------------------------------------------------------------- /evaluation/cpp/evaluate_odometry.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include "matrix.h" 8 | 9 | using namespace std; 10 | 11 | // static parameter 12 | // float lengths[] = {5,10,50,100,150,200,250,300,350,400}; 13 | float lengths[] = {100,200,300,400,500,600,700,800}; 14 | int32_t num_lengths = 8; 15 | 16 | struct errors { 17 | int32_t first_frame; 18 | float r_err; 19 | float t_err; 20 | float len; 21 | float speed; 22 | errors (int32_t first_frame,float r_err,float t_err,float len,float speed) : 23 | first_frame(first_frame),r_err(r_err),t_err(t_err),len(len),speed(speed) {} 24 | }; 25 | 26 | vector loadPoses(string file_name) { 27 | vector poses; 28 | FILE *fp = fopen(file_name.c_str(),"r"); 29 | if (!fp) 30 | return poses; 31 | while (!feof(fp)) { 32 | Matrix P = Matrix::eye(4); 33 | if (fscanf(fp, "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf", 34 | &P.val[0][0], &P.val[0][1], &P.val[0][2], &P.val[0][3], 35 | &P.val[1][0], &P.val[1][1], &P.val[1][2], &P.val[1][3], 36 | &P.val[2][0], &P.val[2][1], &P.val[2][2], &P.val[2][3] )==12) { 37 | poses.push_back(P); 38 | } 39 | } 40 | fclose(fp); 41 | return poses; 42 | } 43 | 44 | vector trajectoryDistances (vector &poses) { 45 | vector dist; 46 | dist.push_back(0); 47 | for (int32_t i=1; i &dist,int32_t first_frame,float len) { 59 | for (int32_t i=first_frame; idist[first_frame]+len) 61 | return i; 62 | return -1; 63 | } 64 | 65 | inline float rotationError(Matrix &pose_error) { 66 | float a = pose_error.val[0][0]; 67 | float b = pose_error.val[1][1]; 68 | float c = pose_error.val[2][2]; 69 | float d = 0.5*(a+b+c-1.0); 70 | return acos(max(min(d,1.0f),-1.0f)); 71 | } 72 | 73 | inline float translationError(Matrix &pose_error) { 74 | float dx = pose_error.val[0][3]; 75 | float dy = pose_error.val[1][3]; 76 | float dz = pose_error.val[2][3]; 77 | return sqrt(dx*dx+dy*dy+dz*dz); 78 | } 79 | 80 | vector calcSequenceErrors (vector &poses_gt,vector &poses_result) { 81 | 82 | // error vector 83 | vector err; 84 | 85 | // parameters 86 | int32_t step_size = 10; // every second 87 | 88 | // pre-compute distances (from ground truth as reference) 89 | vector dist = trajectoryDistances(poses_gt); 90 | 91 | // for all start positions do 92 | for (int32_t first_frame=0; first_frame &err,string file_name) { 128 | 129 | // open file 130 | FILE *fp; 131 | fp = fopen(file_name.c_str(),"w"); 132 | 133 | // write to file 134 | for (vector::iterator it=err.begin(); it!=err.end(); it++) 135 | fprintf(fp,"%d %f %f %f %f\n",it->first_frame,it->r_err,it->t_err,it->len,it->speed); 136 | 137 | // close file 138 | fclose(fp); 139 | } 140 | 141 | void savePathPlot (vector &poses_gt,vector &poses_result,string file_name) { 142 | 143 | // parameters 144 | int32_t step_size = 3; 145 | 146 | // open file 147 | FILE *fp = fopen(file_name.c_str(),"w"); 148 | 149 | // save x/z coordinates of all frames to file 150 | for (int32_t i=0; i computeRoi (vector &poses_gt,vector &poses_result) { 159 | 160 | float x_min = numeric_limits::max(); 161 | float x_max = numeric_limits::min(); 162 | float z_min = numeric_limits::max(); 163 | float z_max = numeric_limits::min(); 164 | 165 | for (vector::iterator it=poses_gt.begin(); it!=poses_gt.end(); it++) { 166 | float x = it->val[0][3]; 167 | float z = it->val[2][3]; 168 | if (xx_max) x_max = x; 169 | if (zz_max) z_max = z; 170 | } 171 | 172 | for (vector::iterator it=poses_result.begin(); it!=poses_result.end(); it++) { 173 | float x = it->val[0][3]; 174 | float z = it->val[2][3]; 175 | if (xx_max) x_max = x; 176 | if (zz_max) z_max = z; 177 | } 178 | 179 | float dx = 1.1*(x_max-x_min); 180 | float dz = 1.1*(z_max-z_min); 181 | float mx = 0.5*(x_max+x_min); 182 | float mz = 0.5*(z_max+z_min); 183 | float r = 0.5*max(dx,dz); 184 | 185 | vector roi; 186 | roi.push_back((int32_t)(mx-r)); 187 | roi.push_back((int32_t)(mx+r)); 188 | roi.push_back((int32_t)(mz-r)); 189 | roi.push_back((int32_t)(mz+r)); 190 | return roi; 191 | } 192 | 193 | void plotPathPlot (string dir,vector &roi,int32_t idx) { 194 | 195 | // gnuplot file name 196 | char command[1024]; 197 | char file_name[256]; 198 | sprintf(file_name,"%02d.gp",idx); 199 | string full_name = dir + "/" + file_name; 200 | 201 | // create png + eps 202 | for (int32_t i=0; i<2; i++) { 203 | 204 | // open file 205 | FILE *fp = fopen(full_name.c_str(),"w"); 206 | 207 | // save gnuplot instructions 208 | if (i==0) { 209 | fprintf(fp,"set term png size 900,900\n"); 210 | fprintf(fp,"set output \"%02d.png\"\n",idx); 211 | } else { 212 | fprintf(fp,"set term postscript eps enhanced color\n"); 213 | fprintf(fp,"set output \"%02d.eps\"\n",idx); 214 | } 215 | 216 | fprintf(fp,"set size ratio -1\n"); 217 | fprintf(fp,"set xrange [%d:%d]\n",roi[0],roi[1]); 218 | fprintf(fp,"set yrange [%d:%d]\n",roi[2],roi[3]); 219 | fprintf(fp,"set xlabel \"x [m]\"\n"); 220 | fprintf(fp,"set ylabel \"z [m]\"\n"); 221 | fprintf(fp,"plot \"%02d.txt\" using 1:2 lc rgb \"#FF0000\" title 'Ground Truth' w lines,",idx); 222 | fprintf(fp,"\"%02d.txt\" using 3:4 lc rgb \"#0000FF\" title 'Visual Odometry' w lines,",idx); 223 | fprintf(fp,"\"< head -1 %02d.txt\" using 1:2 lc rgb \"#000000\" pt 4 ps 1 lw 2 title 'Sequence Start' w points\n",idx); 224 | 225 | // close file 226 | fclose(fp); 227 | 228 | // run gnuplot => create png + eps 229 | sprintf(command,"cd %s; gnuplot %s",dir.c_str(),file_name); 230 | if(system(command)); 231 | } 232 | 233 | // create pdf and crop 234 | sprintf(command,"cd %s; ps2pdf %02d.eps %02d_large.pdf",dir.c_str(),idx,idx); 235 | if(system(command)); 236 | sprintf(command,"cd %s; pdfcrop %02d_large.pdf %02d.pdf",dir.c_str(),idx,idx); 237 | if(system(command)); 238 | sprintf(command,"cd %s; rm %02d_large.pdf",dir.c_str(),idx); 239 | if(system(command)); 240 | } 241 | 242 | void saveErrorPlots(vector &seq_err,string plot_error_dir,char* prefix) { 243 | 244 | // file names 245 | char file_name_tl[1024]; sprintf(file_name_tl,"%s/%s_tl.txt",plot_error_dir.c_str(),prefix); 246 | char file_name_rl[1024]; sprintf(file_name_rl,"%s/%s_rl.txt",plot_error_dir.c_str(),prefix); 247 | char file_name_ts[1024]; sprintf(file_name_ts,"%s/%s_ts.txt",plot_error_dir.c_str(),prefix); 248 | char file_name_rs[1024]; sprintf(file_name_rs,"%s/%s_rs.txt",plot_error_dir.c_str(),prefix); 249 | 250 | // open files 251 | FILE *fp_tl = fopen(file_name_tl,"w"); 252 | FILE *fp_rl = fopen(file_name_rl,"w"); 253 | FILE *fp_ts = fopen(file_name_ts,"w"); 254 | FILE *fp_rs = fopen(file_name_rs,"w"); 255 | 256 | // for each segment length do 257 | for (int32_t i=0; i::iterator it=seq_err.begin(); it!=seq_err.end(); it++) { 265 | if (fabs(it->len-lengths[i])<1.0) { 266 | t_err += it->t_err; 267 | r_err += it->r_err; 268 | num++; 269 | } 270 | } 271 | 272 | // we require at least 3 values 273 | if (num>2.5) { 274 | fprintf(fp_tl,"%f %f\n",lengths[i],t_err/num); 275 | fprintf(fp_rl,"%f %f\n",lengths[i],r_err/num); 276 | } 277 | } 278 | 279 | // for each driving speed do (in m/s) 280 | for (float speed=2; speed<25; speed+=2) { 281 | 282 | float t_err = 0; 283 | float r_err = 0; 284 | float num = 0; 285 | 286 | // for all errors do 287 | for (vector::iterator it=seq_err.begin(); it!=seq_err.end(); it++) { 288 | if (fabs(it->speed-speed)<2.0) { 289 | t_err += it->t_err; 290 | r_err += it->r_err; 291 | num++; 292 | } 293 | } 294 | 295 | // we require at least 3 values 296 | if (num>2.5) { 297 | fprintf(fp_ts,"%f %f\n",speed,t_err/num); 298 | fprintf(fp_rs,"%f %f\n",speed,r_err/num); 299 | } 300 | } 301 | 302 | // close files 303 | fclose(fp_tl); 304 | fclose(fp_rl); 305 | fclose(fp_ts); 306 | fclose(fp_rs); 307 | } 308 | 309 | void plotErrorPlots (string dir,char* prefix) { 310 | 311 | char command[1024]; 312 | 313 | // for all four error plots do 314 | for (int32_t i=0; i<4; i++) { 315 | 316 | // create suffix 317 | char suffix[16]; 318 | switch (i) { 319 | case 0: sprintf(suffix,"tl"); break; 320 | case 1: sprintf(suffix,"rl"); break; 321 | case 2: sprintf(suffix,"ts"); break; 322 | case 3: sprintf(suffix,"rs"); break; 323 | } 324 | 325 | // gnuplot file name 326 | char file_name[1024]; char full_name[1024]; 327 | sprintf(file_name,"%s_%s.gp",prefix,suffix); 328 | sprintf(full_name,"%s/%s",dir.c_str(),file_name); 329 | 330 | // create png + eps 331 | for (int32_t j=0; j<2; j++) { 332 | 333 | // open file 334 | FILE *fp = fopen(full_name,"w"); 335 | 336 | // save gnuplot instructions 337 | if (j==0) { 338 | fprintf(fp,"set term png size 500,250 font \"Helvetica\" 11\n"); 339 | fprintf(fp,"set output \"%s_%s.png\"\n",prefix,suffix); 340 | } else { 341 | fprintf(fp,"set term postscript eps enhanced color\n"); 342 | fprintf(fp,"set output \"%s_%s.eps\"\n",prefix,suffix); 343 | } 344 | 345 | // start plot at 0 346 | fprintf(fp,"set size ratio 0.5\n"); 347 | fprintf(fp,"set yrange [0:*]\n"); 348 | 349 | // x label 350 | if (i<=1) fprintf(fp,"set xlabel \"Path Length [m]\"\n"); 351 | else fprintf(fp,"set xlabel \"Speed [km/h]\"\n"); 352 | 353 | // y label 354 | if (i==0 || i==2) fprintf(fp,"set ylabel \"Translation Error [%%]\"\n"); 355 | else fprintf(fp,"set ylabel \"Rotation Error [deg/m]\"\n"); 356 | 357 | // plot error curve 358 | fprintf(fp,"plot \"%s_%s.txt\" using ",prefix,suffix); 359 | switch (i) { 360 | case 0: fprintf(fp,"1:($2*100) title 'Translation Error'"); break; 361 | case 1: fprintf(fp,"1:($2*57.3) title 'Rotation Error'"); break; 362 | case 2: fprintf(fp,"($1*3.6):($2*100) title 'Translation Error'"); break; 363 | case 3: fprintf(fp,"($1*3.6):($2*57.3) title 'Rotation Error'"); break; 364 | } 365 | fprintf(fp," lc rgb \"#0000FF\" pt 4 w linespoints\n"); 366 | 367 | // close file 368 | fclose(fp); 369 | 370 | // run gnuplot => create png + eps 371 | sprintf(command,"cd %s; gnuplot %s",dir.c_str(),file_name); 372 | if(system(command)); 373 | } 374 | 375 | // create pdf and crop 376 | sprintf(command,"cd %s; ps2pdf %s_%s.eps %s_%s_large.pdf",dir.c_str(),prefix,suffix,prefix,suffix); 377 | if(system(command)); 378 | sprintf(command,"cd %s; pdfcrop %s_%s_large.pdf %s_%s.pdf",dir.c_str(),prefix,suffix,prefix,suffix); 379 | if(system(command)); 380 | sprintf(command,"cd %s; rm %s_%s_large.pdf",dir.c_str(),prefix,suffix); 381 | if(system(command)); 382 | } 383 | } 384 | 385 | void saveStats (vector err,string dir) { 386 | 387 | float t_err = 0; 388 | float r_err = 0; 389 | 390 | // for all errors do => compute sum of t_err, r_err 391 | for (vector::iterator it=err.begin(); it!=err.end(); it++) { 392 | t_err += it->t_err; 393 | r_err += it->r_err; 394 | } 395 | 396 | // open file 397 | FILE *fp = fopen((dir + "/stats.txt").c_str(),"w"); 398 | 399 | // save errors 400 | float num = err.size(); 401 | fprintf(fp,"%f %f\n",t_err/num,r_err/num); 402 | 403 | // close file 404 | fclose(fp); 405 | } 406 | 407 | bool eval (string prediction_dir) { 408 | // ground truth and result directories 409 | string gt_dir = "/home/jiange/mydocument/mycode/pytorch-deepvo/dataset/ground-truth"; 410 | string result_dir = prediction_dir; 411 | string error_dir = result_dir + "/errors"; 412 | string plot_path_dir = result_dir + "/plot_path"; 413 | string plot_error_dir = result_dir + "/plot_error"; 414 | 415 | // create output directories 416 | if(system(("mkdir " + error_dir).c_str())); 417 | if(system(("mkdir " + plot_path_dir).c_str())); 418 | if(system(("mkdir " + plot_error_dir).c_str())); 419 | 420 | // total errors 421 | vector total_err; 422 | 423 | // as for eval_train 424 | // int seq[]={0, 1, 2, 8, 9}; 425 | 426 | // as for eval_test 427 | int seq[]={3, 4, 5, 6, 7, 10}; 428 | 429 | // for all sequences do 430 | for (int32_t j=0; j poses_gt = loadPoses(gt_dir + "/" + file_name); 439 | vector poses_result = loadPoses(result_dir + "/" + file_name); 440 | 441 | // plot status 442 | printf("Processing: %s, poses: %zu/%zu\n",file_name,poses_result.size(),poses_gt.size()); 443 | 444 | // check for errors 445 | if (poses_gt.size()==0 || poses_result.size()!=poses_gt.size()) { 446 | printf("ERROR: Couldn't read (all) poses of: %s", file_name); 447 | return false; 448 | } 449 | 450 | // compute sequence errors 451 | vector seq_err = calcSequenceErrors(poses_gt,poses_result); 452 | saveSequenceErrors(seq_err,error_dir + "/" + file_name); 453 | 454 | // add to total errors 455 | total_err.insert(total_err.end(),seq_err.begin(),seq_err.end()); 456 | 457 | // save + plot bird's eye view trajectories 458 | savePathPlot(poses_gt,poses_result,plot_path_dir + "/" + file_name); 459 | vector roi = computeRoi(poses_gt,poses_result); 460 | plotPathPlot(plot_path_dir,roi,i); 461 | 462 | // save + plot individual errors 463 | char prefix[16]; 464 | sprintf(prefix,"%02d",i); 465 | saveErrorPlots(seq_err,plot_error_dir,prefix); 466 | plotErrorPlots(plot_error_dir,prefix); 467 | } 468 | 469 | // save + plot total errors + summary statistics 470 | if (total_err.size()>0) { 471 | char prefix[16]; 472 | sprintf(prefix,"avg"); 473 | saveErrorPlots(total_err,plot_error_dir,prefix); 474 | plotErrorPlots(plot_error_dir,prefix); 475 | saveStats(total_err,result_dir); 476 | } 477 | 478 | // success 479 | return true; 480 | } 481 | 482 | int32_t main (int32_t argc, char *argv[]) { 483 | // need only 2 arguments 484 | if (argc != 2) { 485 | cout << "Usage: ./eval_odometry result_dir" << endl; 486 | return 1; 487 | } 488 | 489 | string result_dir = argv[1]; 490 | bool success = eval(result_dir); 491 | 492 | return 0; 493 | } 494 | -------------------------------------------------------------------------------- /evaluation/cpp/mail.h: -------------------------------------------------------------------------------- 1 | #ifndef MAIL_H 2 | #define MAIL_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | class Mail { 9 | 10 | public: 11 | 12 | Mail (std::string email = "") { 13 | if (email.compare("")) { 14 | mail = popen("/usr/lib/sendmail -t -f noreply@cvlibs.net","w"); 15 | fprintf(mail,"To: %s\n", email.c_str()); 16 | fprintf(mail,"From: noreply@cvlibs.net\n"); 17 | fprintf(mail,"Subject: KITTI Evaluation Benchmark\n"); 18 | fprintf(mail,"\n\n"); 19 | } else { 20 | mail = 0; 21 | } 22 | } 23 | 24 | ~Mail() { 25 | if (mail) { 26 | pclose(mail); 27 | } 28 | } 29 | 30 | void msg (const char *format, ...) { 31 | va_list args; 32 | va_start(args,format); 33 | if (mail) { 34 | vfprintf(mail,format,args); 35 | fprintf(mail,"\n"); 36 | } 37 | vprintf(format,args); 38 | printf("\n"); 39 | va_end(args); 40 | } 41 | 42 | private: 43 | 44 | FILE *mail; 45 | 46 | }; 47 | 48 | #endif 49 | -------------------------------------------------------------------------------- /evaluation/cpp/matrix.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2011. All rights reserved. 3 | Institute of Measurement and Control Systems 4 | Karlsruhe Institute of Technology, Germany 5 | 6 | This file is part of libviso2. 7 | Authors: Andreas Geiger 8 | 9 | libviso2 is free software; you can redistribute it and/or modify it under the 10 | terms of the GNU General Public License as published by the Free Software 11 | Foundation; either version 2 of the License, or any later version. 12 | 13 | libviso2 is distributed in the hope that it will be useful, but WITHOUT ANY 14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A 15 | PARTICULAR PURPOSE. See the GNU General Public License for more details. 16 | 17 | You should have received a copy of the GNU General Public License along with 18 | libviso2; if not, write to the Free Software Foundation, Inc., 51 Franklin 19 | Street, Fifth Floor, Boston, MA 02110-1301, USA 20 | */ 21 | 22 | #include "matrix.h" 23 | #include 24 | 25 | #define SWAP(a,b) {temp=a;a=b;b=temp;} 26 | #define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a)) 27 | static FLOAT sqrarg; 28 | #define SQR(a) ((sqrarg=(a)) == 0.0 ? 0.0 : sqrarg*sqrarg) 29 | static FLOAT maxarg1,maxarg2; 30 | #define FMAX(a,b) (maxarg1=(a),maxarg2=(b),(maxarg1) > (maxarg2) ? (maxarg1) : (maxarg2)) 31 | static int32_t iminarg1,iminarg2; 32 | #define IMIN(a,b) (iminarg1=(a),iminarg2=(b),(iminarg1) < (iminarg2) ? (iminarg1) : (iminarg2)) 33 | 34 | 35 | using namespace std; 36 | 37 | Matrix::Matrix () { 38 | m = 0; 39 | n = 0; 40 | val = 0; 41 | } 42 | 43 | Matrix::Matrix (const int32_t m_,const int32_t n_) { 44 | allocateMemory(m_,n_); 45 | } 46 | 47 | Matrix::Matrix (const int32_t m_,const int32_t n_,const FLOAT* val_) { 48 | allocateMemory(m_,n_); 49 | int32_t k=0; 50 | for (int32_t i=0; i0) 72 | for (int32_t i=0; i=m || j1<0 || j2>=n || i2m || j1+M.n>n) { 105 | cerr << "ERROR: Cannot set submatrix [" << i1 << ".." << i1+M.m-1 << 106 | "] x [" << j1 << ".." << j1+M.n-1 << "]" << 107 | " of a (" << m << "x" << n << ") matrix." << endl; 108 | exit(0); 109 | } 110 | for (int32_t i=0; i idx) { 138 | Matrix M(m,idx.size()); 139 | for (int32_t j=0; j1 && M.n==1) { 163 | Matrix D(M.m,M.m); 164 | for (int32_t i=0; i1) { 168 | Matrix D(M.n,M.n); 169 | for (int32_t i=0; i=big) { 448 | big=fabs(A.val[j][k]); 449 | irow=j; 450 | icol=k; 451 | } 452 | ++(ipiv[icol]); 453 | 454 | // We now have the pivot element, so we interchange rows, if needed, to put the pivot 455 | // element on the diagonal. The columns are not physically interchanged, only relabeled. 456 | if (irow != icol) { 457 | for (l=0;l=0;l--) { 491 | if (indxr[l]!=indxc[l]) 492 | for (k=0;kbig) 525 | big = temp; 526 | if (big == 0.0) { // No nonzero largest element. 527 | free(vv); 528 | return false; 529 | } 530 | vv[i] = 1.0/big; // Save the scaling. 531 | } 532 | for (j=0; j=big) { 546 | big = dum; 547 | imax = i; 548 | } 549 | } 550 | if (j!=imax) { // Do we need to interchange rows? 551 | for (k=0; k=0;i--) { // Accumulation of right-hand transformations. 635 | if (i=0;i--) { // Accumulation of left-hand transformations. 651 | l = i+1; 652 | g = w[i]; 653 | for (j=l;j=0;k--) { // Diagonalization of the bidiagonal form: Loop over singular values, 666 | for (its=0;its<30;its++) { // and over allowed iterations. 667 | flag = 1; 668 | for (l=k;l>=0;l--) { // Test for splitting. 669 | nm = l-1; 670 | if ((FLOAT)(fabs(rv1[l])+anorm) == anorm) { flag = 0; break; } 671 | if ((FLOAT)(fabs( w[nm])+anorm) == anorm) { break; } 672 | } 673 | if (flag) { 674 | c = 0.0; // Cancellation of rv1[l], if l > 1. 675 | s = 1.0; 676 | for (i=l;i<=k;i++) { 677 | f = s*rv1[i]; 678 | rv1[i] = c*rv1[i]; 679 | if ((FLOAT)(fabs(f)+anorm) == anorm) break; 680 | g = w[i]; 681 | h = pythag(f,g); 682 | w[i] = h; 683 | h = 1.0/h; 684 | c = g*h; 685 | s = -f*h; 686 | for (j=0;j 1); 782 | for (k=0;k (m+n)/2) { 787 | for (i=0;i absb) 846 | return absa*sqrt(1.0+SQR(absb/absa)); 847 | else 848 | return (absb == 0.0 ? 0.0 : absb*sqrt(1.0+SQR(absa/absb))); 849 | } 850 | 851 | -------------------------------------------------------------------------------- /evaluation/cpp/matrix.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2011. All rights reserved. 3 | Institute of Measurement and Control Systems 4 | Karlsruhe Institute of Technology, Germany 5 | 6 | This file is part of libviso2. 7 | Authors: Andreas Geiger 8 | 9 | libviso2 is free software; you can redistribute it and/or modify it under the 10 | terms of the GNU General Public License as published by the Free Software 11 | Foundation; either version 2 of the License, or any later version. 12 | 13 | libviso2 is distributed in the hope that it will be useful, but WITHOUT ANY 14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A 15 | PARTICULAR PURPOSE. See the GNU General Public License for more details. 16 | 17 | You should have received a copy of the GNU General Public License along with 18 | libviso2; if not, write to the Free Software Foundation, Inc., 51 Franklin 19 | Street, Fifth Floor, Boston, MA 02110-1301, USA 20 | */ 21 | 22 | #ifndef MATRIX_H 23 | #define MATRIX_H 24 | 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | 31 | #ifndef _MSC_VER 32 | #include 33 | #else 34 | typedef __int8 int8_t; 35 | typedef __int16 int16_t; 36 | typedef __int32 int32_t; 37 | typedef __int64 int64_t; 38 | typedef unsigned __int8 uint8_t; 39 | typedef unsigned __int16 uint16_t; 40 | typedef unsigned __int32 uint32_t; 41 | typedef unsigned __int64 uint64_t; 42 | #endif 43 | 44 | #define endll endl << endl // double end line definition 45 | 46 | typedef double FLOAT; // double precision 47 | //typedef float FLOAT; // single precision 48 | 49 | class Matrix { 50 | 51 | public: 52 | 53 | // constructor / deconstructor 54 | Matrix (); // init empty 0x0 matrix 55 | Matrix (const int32_t m,const int32_t n); // init empty mxn matrix 56 | Matrix (const int32_t m,const int32_t n,const FLOAT* val_); // init mxn matrix with values from array 'val' 57 | Matrix (const Matrix &M); // creates deepcopy of M 58 | ~Matrix (); 59 | 60 | // assignment operator, copies contents of M 61 | Matrix& operator= (const Matrix &M); 62 | 63 | // copies submatrix of M into array 'val', default values copy whole row/column/matrix 64 | void getData(FLOAT* val_,int32_t i1=0,int32_t j1=0,int32_t i2=-1,int32_t j2=-1); 65 | 66 | // set or get submatrices of current matrix 67 | Matrix getMat(int32_t i1,int32_t j1,int32_t i2=-1,int32_t j2=-1); 68 | void setMat(const Matrix &M,const int32_t i,const int32_t j); 69 | 70 | // set sub-matrix to scalar (default 0), -1 as end replaces whole row/column/matrix 71 | void setVal(FLOAT s,int32_t i1=0,int32_t j1=0,int32_t i2=-1,int32_t j2=-1); 72 | 73 | // set (part of) diagonal to scalar, -1 as end replaces whole diagonal 74 | void setDiag(FLOAT s,int32_t i1=0,int32_t i2=-1); 75 | 76 | // clear matrix 77 | void zero(); 78 | 79 | // extract columns with given index 80 | Matrix extractCols (std::vector idx); 81 | 82 | // create identity matrix 83 | static Matrix eye (const int32_t m); 84 | void eye (); 85 | 86 | // create diagonal matrix with nx1 or 1xn matrix M as elements 87 | static Matrix diag(const Matrix &M); 88 | 89 | // returns the m-by-n matrix whose elements are taken column-wise from M 90 | static Matrix reshape(const Matrix &M,int32_t m,int32_t n); 91 | 92 | // create 3x3 rotation matrices (convention: http://en.wikipedia.org/wiki/Rotation_matrix) 93 | static Matrix rotMatX(const FLOAT &angle); 94 | static Matrix rotMatY(const FLOAT &angle); 95 | static Matrix rotMatZ(const FLOAT &angle); 96 | 97 | // simple arithmetic operations 98 | Matrix operator+ (const Matrix &M); // add matrix 99 | Matrix operator- (const Matrix &M); // subtract matrix 100 | Matrix operator* (const Matrix &M); // multiply with matrix 101 | Matrix operator* (const FLOAT &s); // multiply with scalar 102 | Matrix operator/ (const Matrix &M); // divide elementwise by matrix (or vector) 103 | Matrix operator/ (const FLOAT &s); // divide by scalar 104 | Matrix operator- (); // negative matrix 105 | Matrix operator~ (); // transpose 106 | FLOAT l2norm (); // euclidean norm (vectors) / frobenius norm (matrices) 107 | FLOAT mean (); // mean of all elements in matrix 108 | 109 | // complex arithmetic operations 110 | static Matrix cross (const Matrix &a, const Matrix &b); // cross product of two vectors 111 | static Matrix inv (const Matrix &M); // invert matrix M 112 | bool inv (); // invert this matrix 113 | FLOAT det (); // returns determinant of matrix 114 | bool solve (const Matrix &M,FLOAT eps=1e-20); // solve linear system M*x=B, replaces *this and M 115 | bool lu(int32_t *idx, FLOAT &d, FLOAT eps=1e-20); // replace *this by lower upper decomposition 116 | void svd(Matrix &U,Matrix &W,Matrix &V); // singular value decomposition *this = U*diag(W)*V^T 117 | 118 | // print matrix to stream 119 | friend std::ostream& operator<< (std::ostream& out,const Matrix& M); 120 | 121 | // direct data access 122 | FLOAT **val; 123 | int32_t m,n; 124 | 125 | private: 126 | 127 | void allocateMemory (const int32_t m_,const int32_t n_); 128 | void releaseMemory (); 129 | inline FLOAT pythag(FLOAT a,FLOAT b); 130 | 131 | }; 132 | 133 | #endif // MATRIX_H 134 | -------------------------------------------------------------------------------- /evaluation/plot_loss.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Author: Linjian Zhang 5 | Email: linjian93@foxmail.com 6 | Create Time: 2017-12-17 17:26:34 7 | Program: 8 | Description: 9 | """ 10 | 11 | import matplotlib 12 | matplotlib.use('Agg') 13 | import matplotlib.pyplot as plt 14 | import json 15 | import numpy as np 16 | import os 17 | 18 | # experiment1-cnn 19 | loss_t = '/home/jiange/Downloads/run-20171214_10-train-val-loss_t-tag-.-train-val.json' 20 | loss_v = '/home/jiange/Downloads/run-20171214_10-train-val-loss_v-tag-.-train-val.json' 21 | loss1_t = '/home/jiange/Downloads/run-20171214_10-train-val-loss1_t-tag-.-train-val.json' 22 | loss1_v = '/home/jiange/Downloads/run-20171214_10-train-val-loss1_v-tag-.-train-val.json' 23 | loss2_t = '/home/jiange/Downloads/run-20171214_10-train-val-loss2_t-tag-.-train-val.json' 24 | loss2_v = '/home/jiange/Downloads/run-20171214_10-train-val-loss2_v-tag-.-train-val.json' 25 | loss_tx_t = '/home/jiange/Downloads/run-20171214_10-train-val-loss_tx_t-tag-.-train-val.json' 26 | loss_tx_v = '/home/jiange/Downloads/run-20171214_10-train-val-loss_tx_v-tag-.-train-val.json' 27 | loss_ty_t = '/home/jiange/Downloads/run-20171214_10-train-val-loss_ty_t-tag-.-train-val.json' 28 | loss_ty_v = '/home/jiange/Downloads/run-20171214_10-train-val-loss_ty_v-tag-.-train-val.json' 29 | loss_tz_t = '/home/jiange/Downloads/run-20171214_10-train-val-loss_tz_t-tag-.-train-val.json' 30 | loss_tz_v = '/home/jiange/Downloads/run-20171214_10-train-val-loss_tz_v-tag-.-train-val.json' 31 | loss_x_t = '/home/jiange/Downloads/run-20171214_10-train-val-loss_x_t-tag-.-train-val.json' 32 | loss_x_v = '/home/jiange/Downloads/run-20171214_10-train-val-loss_x_v-tag-.-train-val.json' 33 | loss_y_t = '/home/jiange/Downloads/run-20171214_10-train-val-loss_y_t-tag-.-train-val.json' 34 | loss_y_v = '/home/jiange/Downloads/run-20171214_10-train-val-loss_y_v-tag-.-train-val.json' 35 | loss_z_t = '/home/jiange/Downloads/run-20171214_10-train-val-loss_z_t-tag-.-train-val.json' 36 | loss_z_v = '/home/jiange/Downloads/run-20171214_10-train-val-loss_z_v-tag-.-train-val.json' 37 | 38 | # dir_t = [loss_t, loss1_t, loss2_t, loss_x_t, loss_y_t, loss_z_t, loss_tx_t, loss_ty_t, loss_tz_t] 39 | # dir_v = [loss_v, loss1_v, loss2_v, loss_x_v, loss_y_v, loss_z_v, loss_tx_v, loss_ty_v, loss_tz_v] 40 | # name_title = ['loss', 'translation', 'rotation', 'x', 'y', 'z', '$\psi$', '$\chi$', '$\phi$'] 41 | # dir_save = 'loss/cnn-vo-cons' 42 | 43 | # experiment1-cnn-lstm 44 | loss_t_1 = 'loss-json/run_20171224_1_train-tag-loss.json' 45 | loss_v_1 = 'loss-json/run_20171224_1_val-tag-loss.json' 46 | loss1_t_1 = 'loss-json/run_20171224_1_train-tag-loss1.json' 47 | loss1_v_1 = 'loss-json/run_20171224_1_val-tag-loss1.json' 48 | loss2_t_1 = 'loss-json/run_20171224_1_train-tag-loss2.json' 49 | loss2_v_1 = 'loss-json/run_20171224_1_val-tag-loss2.json' 50 | # dir_t = [loss_t_1, loss1_t_1, loss2_t_1] 51 | # dir_v = [loss_v_1, loss1_v_1, loss2_v_1] 52 | # name_title = ['loss', 'translation', 'rotation'] 53 | # dir_save = 'loss/experiment1-cnn-lstm' 54 | 55 | # experiment1-cnn-lstm-cons 56 | loss_t_2 = 'loss-json/cnn-lstm-vo-cons/run_train-val_loss_t-tag-._train-val.json' 57 | loss_v_2 = 'loss-json/cnn-lstm-vo-cons/run_train-val_loss_v-tag-._train-val.json' 58 | loss1_t_2 = 'loss-json/cnn-lstm-vo-cons/run_train-val_loss1_t-tag-._train-val.json' 59 | loss1_v_2 = 'loss-json/cnn-lstm-vo-cons/run_train-val_loss1_v-tag-._train-val.json' 60 | loss2_t_2 = 'loss-json/cnn-lstm-vo-cons/run_train-val_loss2_t-tag-._train-val.json' 61 | loss2_v_2 = 'loss-json/cnn-lstm-vo-cons/run_train-val_loss2_v-tag-._train-val.json' 62 | # dir_t = [loss_t_2, loss1_t_2, loss2_t_2] 63 | # dir_v = [loss_v_2, loss1_v_2, loss2_v_2] 64 | # name_title = ['loss', 'translation', 'rotation'] 65 | # dir_save = 'loss/experiment1-cnn-lstm-cons' 66 | 67 | # experiment2-cnn-lstm 68 | loss1_t_3 = 'loss-json/experiment2-cnn-lstm/run-train-loss1_t-tag-.-train.json' 69 | loss2_t_3 = 'loss-json/experiment2-cnn-lstm/run-train-loss2_t-tag-.-train.json' 70 | dir_t = [loss1_t_3] 71 | dir_v = [loss2_t_3] 72 | name_title = ['loss'] 73 | dir_save = 'loss/experiment2-cnn-lstm' 74 | 75 | 76 | if not os.path.exists(dir_save): 77 | os.makedirs(dir_save) 78 | 79 | 80 | def plot_train_val(): 81 | for dir1, dir2, name in zip(dir_t, dir_v, name_title): 82 | plt.close('all') 83 | data_t = np.array(json.load(open(dir1, 'r'))) 84 | data_v = np.array(json.load(open(dir2, 'r'))) 85 | x_t = data_t[5::2, 1] 86 | y_t = data_t[5::2, 2] 87 | x_v = data_v[5::2, 1] 88 | y_v = data_v[5::2, 2] 89 | plt.plot(x_t, y_t, '-b', label='Train loss1') 90 | plt.plot(x_v, y_v, '-r', label='Train loss2') 91 | 92 | if name != 'loss': 93 | plt.title('Loss of {:s}'.format(name)) 94 | else: 95 | plt.title('Train loss on sequence 00-10') 96 | plt.xlabel('Iterations') 97 | plt.ylabel('Loss') 98 | plt.legend() 99 | plt.grid('on') 100 | plt.savefig(dir_save + '/{:s}.png'.format(name)) 101 | # plt.show() 102 | 103 | 104 | if __name__ == '__main__': 105 | plot_train_val() 106 | -------------------------------------------------------------------------------- /evaluation/plot_main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Author: Linjian Zhang 5 | Email: linjian93@foxmail.com 6 | Create Time: 2017-11-21 15:42:03 7 | Program: 8 | Description: 9 | """ 10 | import os 11 | import matplotlib 12 | from matplotlib import style 13 | matplotlib.use('Agg') 14 | import numpy as np 15 | from utils.my_color import color_line as col 16 | from utils.plot_misc import plot_evaluation, plot_trajectory 17 | co = col() 18 | 19 | 20 | def ll(): 21 | l1 = ['ll/105', '-', co[1], 'll-105'] 22 | l2 = ['ll/nb', '-', co[1], 'll-nb'] 23 | l3 = ['ll/130', '-', co[0], 'll-130'] 24 | return l1, l2, l3 25 | 26 | 27 | def generate_list(dir_net, dir_name, epoch_list): 28 | l_dict = dict() 29 | for i, epoch in enumerate(epoch_list): 30 | l_dict[i] = [dir_net+'/{:s}_model-{:d}'.format(dir_name, epoch), '-', 'epoch-{:d}'.format(epoch)] 31 | return l_dict 32 | 33 | 34 | def regroup_for_plot(items): 35 | files = [] 36 | fmts = [] 37 | legends = [] 38 | for item in items: 39 | files.append(item[0]) 40 | fmts.append(item[1]) 41 | legends.append(item[2]) 42 | return files, fmts, legends 43 | 44 | 45 | class MyExperiments(object): 46 | def __init__(self): 47 | self.dir0 = '/home/jiange/dl/project' 48 | self.dir1_tf = self.dir0 + '/tf-cnn-vo/test/cnn-vo' # cnn-vo-tf 49 | self.dir2_tf = self.dir0 + '/tf-cnn-vo/test/cnn-vo-cons' # cnn-vo-cons-tf 50 | self.dir3_tf = self.dir0 + '/tf-cnn-lstm-vo/test/cnn-lstm-vo' # cnn-lstm-vo-tf 51 | self.dir4_tf = self.dir0 + '/tf-cnn-lstm-vo/test/cnn-lstm-vo-cons' # cnn-lstm-vo-cons-tf 52 | self.dir1_pt = self.dir0 + '/pytorch-deepvo/test/cnn-vo' 53 | self.dir2_pt = self.dir0 + '/pytorch-deepvo/test/cnn-vo-cons' # cnn-vo-cons-pt 54 | self.dir3_pt = self.dir0 + '/pytorch-deepvo/test/cnn-lstm-vo' # cnn-lstm-vo-pt 55 | self.dir4_pt = self.dir0 + '/pytorch-cnn-lstm-vo/test/cnn-lstm-vo-cons' # cnn-lstm-vo-cons-pt 56 | 57 | # baseline 58 | self.gt = [self.dir0 + '/tf-cnn-vo/dataset/ground-truth', '--', 'Ground truth'] 59 | self.b1 = ['viso2-m', '-', 'VISO2-M'] 60 | self.b2 = ['viso2-s', '-', 'VISO2-S'] 61 | 62 | # experiment1 63 | self.e1_1 = [self.dir1_tf, '20171124_50_restore', [80]] # ==2nd== 64 | self.e1_2 = [self.dir1_pt, '20171230', [90]] 65 | self.e1_3 = [self.dir1_pt, '20180101', [120]] # it0 ==1st== 66 | self.e1_4 = [self.dir1_pt, '20180101_tb', [90]] 67 | self.e1_5 = [self.dir1_pt, '20180102_iks', [120]] 68 | 69 | # experiment2 70 | self.e2_1 = [self.dir2_tf, '1130', [75]] 71 | self.e2_2 = [self.dir2_tf, '20171222', [25]] # ==1st-old== 72 | self.e2_3 = [self.dir2_pt, '20171209_10', [150]] 73 | self.e2_4 = [self.dir2_pt, '20171211_10', [150]] 74 | self.e2_5 = [self.dir2_pt, '20171214_10', [90]] 75 | self.e2_6 = [self.dir2_pt, '20171217', [80]] 76 | self.e2_7 = [self.dir2_pt, '20171218', [120]] 77 | self.e2_8 = [self.dir2_pt, '20180103', [70]] # fine-tuning it0+it0r 78 | self.e2_9 = [self.dir2_pt, '20180103_triple', [110]] # fine-tuning it0+it0r+it1 ==1st of all== 79 | self.e2_10 = [self.dir2_pt, '20180104', [160]] # fine-tuning it0+it1 160 80 | 81 | self.e2_11 = [self.dir2_pt, '20180106_triple', [30, 40, 50, 60, 70]] # scratch it0+it0r+it1 30 or 50 tested 82 | self.e2_12 = [self.dir2_pt, '20180106', [70]] # scratch it0+it1 70 tested 83 | 84 | # experiment3 85 | self.e3_1 = [self.dir3_tf, '20171224_1', [75]] # ==1st== 86 | self.e3_2 = [self.dir3_tf, '6499_20171224', [45]] 87 | self.e3_3 = [self.dir3_pt, '20171219', [110]] 88 | self.e3_4 = [self.dir3_pt, '20180104', [60]] # bad 89 | 90 | self.e3_5 = [self.dir3_pt, '20180107', []] # to be tested 91 | 92 | # experiment4 93 | self.e4_1 = [self.dir4_tf, '6499_20171225', [5]] # [5, 20, 25, 30, 35, 40] (best of M4) 94 | self.e4_2 = [self.dir4_pt, '20171226', [110]] # [80 ... 150]] 95 | self.e4_3 = [self.dir4_pt, '6499_20171226', [80, 120, 150]] # [35, 45, 55, 60, 70, 80, 90, 100, 110] 80 96 | 97 | def list_now(self): 98 | _dir_save = 'evaluation/now' 99 | l1 = generate_list(*self.e2_9) 100 | l2 = generate_list(*self.e2_12) 101 | _methods = [self.gt, l1[0], l2[0]] 102 | _colors = ['k', co[1], co[0]] 103 | return _methods, _colors, _dir_save 104 | 105 | def list_cnn_cons_compare(self): 106 | _dir_save = 'evaluation/cnn-cons-20180104' 107 | l1 = generate_list(*self.e1_3) 108 | l2 = generate_list(*self.e2_8) 109 | l3 = generate_list(*self.e2_10) 110 | l4 = generate_list(*self.e2_9) 111 | _methods = [self.gt, l1[0], l2[0], l3[0], l4[0]] 112 | _colors = ['k', co[3], co[2], co[1], co[0]] 113 | _methods[1][2] = 'it0-epoch-120' 114 | _methods[2][2] = 'it0-it0r-epoch-70' 115 | _methods[3][2] = 'it0-it1-epoch-40' 116 | _methods[4][2] = 'it0-it0r-it1-epoch-110' 117 | return _methods, _colors, _dir_save 118 | 119 | def list_compare(self): 120 | _dir_save = 'evaluation/cnn-compare' 121 | l1 = generate_list(*self.e2_1) 122 | l2 = generate_list(*self.e2_2) 123 | _methods = [self.gt, l1[0], l1[1], l1[2], l2[0]] 124 | _colors = ['k', co[3], co[2], co[1], co[0]] 125 | _methods[4][2] = 'epoch-100' 126 | return _methods, _colors, _dir_save 127 | 128 | def list_cnn(self): 129 | _dir_save = 'evaluation/cnn-vo' 130 | l1 = generate_list(*self.e1_1) 131 | l2 = generate_list(*self.e2_2) 132 | _methods = [self.gt, self.b1, self.b2, l1[0], l2[0]] 133 | _colors = ['k', co[3], co[2], co[1], co[0]] 134 | _methods[3][2] = 'CNN-VO' 135 | _methods[4][2] = 'CNN-VO-cons' 136 | return _methods, _colors, _dir_save 137 | 138 | def list_lstm(self): 139 | _dir_save = 'evaluation/lstm-vo' 140 | l1 = generate_list(*self.e1_1) 141 | l2 = generate_list(*self.e2_1) 142 | l3 = generate_list(*self.e3_1) 143 | l4 = generate_list(*self.e4_1) 144 | _methods = [self.gt, self.b1, self.b2, l1[0], l2[2], l3[0], l4[0]] 145 | _colors = ['k', co[3], co[2], co[1], co[0], co[4], co[5]] 146 | _methods[3][2] = 'CNN-VO' 147 | _methods[4][2] = 'CNN-VO-cons' 148 | _methods[5][2] = 'CNN-LSTM-VO' 149 | _methods[6][2] = 'CNN-LSTM-VO-cons' 150 | return _methods, _colors, _dir_save 151 | 152 | 153 | if __name__ == '__main__': 154 | my_plot = MyExperiments() 155 | methods, colors, dir_save = my_plot.list_now() 156 | 157 | if not os.path.exists(dir_save): 158 | os.makedirs(dir_save) 159 | file_list, fmt_list, legend_list = regroup_for_plot(methods) 160 | print('Plot evaluation...') 161 | plot_evaluation(file_list, fmt_list, colors, legend_list, dir_save) 162 | for sequence in np.arange(11): # [3, 4, 5, 6, 7, 10]: 163 | style.use("ggplot") 164 | plot_trajectory(sequence, file_list, fmt_list, colors, legend_list, dir_save) 165 | 166 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Author: Linjian Zhang 5 | Email: linjian93@foxmail.com 6 | Create Time: 2017-12-08 10:42:02 7 | Program: 8 | Description: 9 | """ 10 | import torch 11 | import torch.nn as nn 12 | from torch.utils.data import DataLoader 13 | import re 14 | import os 15 | import math 16 | import argparse 17 | import numpy as np 18 | from time import time 19 | from tqdm import tqdm 20 | from glob import glob 21 | from tensorboardX import SummaryWriter 22 | from utils.post_process import cal_absolute_from_relative, plot_from_pose 23 | from utils.misc import to_var, adjust_learning_rate, pre_create_file_train, pre_create_file_test, \ 24 | display_loss_tb, display_loss_tb_val 25 | 26 | 27 | parser = argparse.ArgumentParser() 28 | parser.add_argument('--server', default=None, type=int, help='[6099 / 6199 / 6499]') 29 | parser.add_argument('--net_architecture', default=None, help='[cnn / cnn-tb / cnn-iks / cnn-lstm]') 30 | parser.add_argument("--samples", default='i0', help='samples for train') 31 | parser.add_argument('--phase', default=None, help='[Train / Test]') 32 | parser.add_argument('--resume', default=None, help='[Yes / No] for cnn, [cnn / lstm / No] for cnn-lstm') 33 | 34 | # 模型载入的参数 35 | parser.add_argument('--net_restore', default='cnn-vo', help='Restore net name') 36 | parser.add_argument('--dir_restore', default='20180101', help='Restore file name') 37 | parser.add_argument('--model_restore', default='model-200', help='Restore model-id') 38 | 39 | parser.add_argument('--net_name', default=None, help='[cnn-vo / cnn-vo-cons / cnn-lstm-vo / cnn-lstm-vo-cons]') 40 | parser.add_argument('--dir0', default=None, help='Name it with date, such as 20180102') 41 | parser.add_argument('--batch_size', default=32, type=int, help='Batch size') 42 | parser.add_argument('--epoch_max', default=100, type=int, help='Max epoch') 43 | parser.add_argument('--epoch_test', default=10, type=int, help='Test epoch during train process') 44 | parser.add_argument('--epoch_save', default=10, type=int, help='Max epoch number') 45 | parser.add_argument('--lr_base', default=1e-4, type=float, help='Base learning rate') 46 | parser.add_argument('--lr_decay_rate', default=0.316, type=float, help='Decay rate of lr') 47 | parser.add_argument('--epoch_lr_decay', default=30, type=int, help='Every # epoch, lr decay lr_decay_rate') 48 | parser.add_argument('--beta', default=10, type=int, help='loss = loss_t + beta * loss_r') 49 | 50 | # lstm 参数 51 | parser.add_argument('--img_pairs', default=10, type=int, help='Image pairs') 52 | parser.add_argument('--si', default=3, type=int, help='Start interval') 53 | parser.add_argument('--num_layer', default=2, type=int, help='Lstm layer number') 54 | parser.add_argument('--hidden_size', default=1024, type=int, help='Lstm hidden units') 55 | 56 | parser.add_argument("--gpu", default='0', help='GPU id list') 57 | parser.add_argument("--workers", default=4, type=int, help='Workers number') 58 | args = parser.parse_args() 59 | 60 | os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu # 设置可见的gpu的列表,例如:'2,3,4' 61 | gpu_list = re.split('[, ]', args.gpu) # 提取出列表中gpu的id 62 | args.gpu = range(len(list(filter(None, gpu_list)))) # 传给PyTorch中多gpu并行的列表 63 | 64 | if args.server == 6099: 65 | dir_data = '/media/csc105/Data/dataset-jiange/data_odometry_color/sequences' 66 | dir_label = '/home/jiange/dl/project/pytorch-deepvo/dataset' 67 | model_dir = 'model' 68 | log_dir = '/home/jiange/dl/project/pytorch-deepvo/log' 69 | elif args.server == 6199: 70 | dir_data = '/media/Data/dataset_jiange/data_odometry_color/sequences' 71 | dir_label = '/home/jiange/dl/project/pytorch-deepvo/dataset' 72 | model_dir = 'model' 73 | log_dir = '/home/jiange/dl/project/pytorch-deepvo/log' 74 | elif args.server == 6499: 75 | dir_data = '/media/jiange/095df4a3-d72c-43d9-bfbd-e78651afba19/dataset-jiange/data_odometry_color/sequences' 76 | dir_label = '/home/jiange/mydocument/mycode/pytorch-deepvo/dataset' 77 | model_dir = '/media/jiange/095df4a3-d72c-43d9-bfbd-e78651afba19/model-jiange/pytorch-deepvo' 78 | log_dir = '/home/jiange/mydocument/mycode/pytorch-deepvo/log' 79 | else: 80 | raise Exception('Must give the right server id!') 81 | 82 | dir_restore = model_dir + '/' + args.net_restore + '/' + args.dir_restore + '/' + args.model_restore + '.pkl' 83 | 84 | if args.net_architecture == 'cnn': 85 | from net.cnn import Net 86 | from dataset.kitti import KITTIDataSet 87 | elif args.net_architecture == 'cnn-sc': 88 | from net.cnn_seperate_conv import Net 89 | from dataset.kitti import KITTIDataSet 90 | elif args.net_architecture == 'cnn-sc1': 91 | from net.cnn_seperate_conv_1 import Net 92 | from dataset.kitti import KITTIDataSet 93 | elif args.net_architecture == 'cnn-tb': 94 | from net.cnn_tb import Net 95 | from dataset.kitti import KITTIDataSet 96 | elif args.net_architecture == 'cnn-iks': 97 | from net.cnn_increase_kernal_size import Net 98 | from dataset.kitti import KITTIDataSet 99 | elif args.net_architecture == 'cnn-lstm': 100 | from net.cnn_lstm import Net 101 | from dataset.kitti_lstm import KITTIDataSet, read_image 102 | else: 103 | raise Exception('Must give the right cnn architecture') 104 | 105 | 106 | def run_batch(sample, model, loss_func=None, optimizer=None, phase=None): 107 | """ 108 | 训练、验证: 109 | run_batch(sample, model, loss_func, optimizer, phase='Train') 110 | run_batch(sample, model, loss_func, phase='Valid') 111 | 返回估计位姿以及loss 112 | 测试: 113 | run_batch(sample, model, phase='Test') 114 | 返回估计位姿 115 | """ 116 | if phase == 'Train': 117 | model.train() 118 | else: 119 | model.eval() # 启用测试模式,关闭dropout 120 | 121 | img1 = to_var(sample['img1']) # as for cnn: [bs, 6, H, W], as for cnn-lstm: [N, T, 6, H, W] 122 | img2 = to_var(sample['img2']) 123 | label_pre = model(img1, img2) # [32, 6] 124 | # conv_out = x_conv.data.cpu().numpy() 125 | # lstm_out = x_lstm.data.cpu().numpy() 126 | # print('Conv >>> min: {:.5f}, max: {:.5f}'.format(np.min(conv_out), np.max(conv_out))) 127 | # print('LSTM >>> min: {:.5f}, max: {:.5f}'.format(np.min(lstm_out), np.max(lstm_out))) 128 | 129 | if phase == 'Train' or phase == 'Valid': 130 | label = to_var(sample['label']) # [bs, 6] 131 | label = label.view(-1, 6) 132 | loss1 = loss_func(label_pre[:, :3], label[:, :3]) 133 | loss2 = loss_func(label_pre[:, 3:], label[:, 3:]) 134 | loss = loss1 + args.beta * loss2 135 | 136 | # loss_x = loss_func(label_pre[:, 0], label[:, 0]) 137 | # loss_y = loss_func(label_pre[:, 1], label[:, 1]) 138 | # loss_z = loss_func(label_pre[:, 2], label[:, 2]) 139 | # loss_tx = loss_func(label_pre[:, 3], label[:, 3]) 140 | # loss_ty = loss_func(label_pre[:, 4], label[:, 4]) 141 | # loss_tz = loss_func(label_pre[:, 5], label[:, 5]) 142 | 143 | if phase == 'Train': 144 | optimizer.zero_grad() # clear gradients for this training step 145 | loss.backward() # bp, compute gradients 146 | optimizer.step() # apply gradients 147 | 148 | return loss.data[0], loss1.data[0], loss2.data[0], label_pre.data 149 | # return loss.data[0], loss1.data[0], loss2.data[0], label_pre.data, \ 150 | # loss_x.data[0], loss_y.data[0], loss_z.data[0], loss_tx.data[0], loss_ty.data[0], loss_tz.data[0] 151 | else: 152 | return label_pre.data 153 | 154 | 155 | def run_batch_2(sample, model, loss_func=None, optimizer=None): 156 | """ 157 | cnn-lstm 不同time_step一起训练 158 | """ 159 | model.train() 160 | 161 | loss_mean = [] 162 | loss1_mean = [] 163 | loss2_mean = [] 164 | for sample_batch in sample: 165 | img1 = to_var(sample_batch['img1']) # as for cnn: [bs, 6, H, W], as for cnn-lstm: [N, T, 6, H, W] 166 | img2 = to_var(sample_batch['img2']) 167 | label_pre = model(img1, img2) # [32, 6] 168 | 169 | label = to_var(sample_batch['label']) # [bs, 6] 170 | label = label.view(-1, 6) 171 | loss1 = loss_func(label_pre[:, :3], label[:, :3]) 172 | loss2 = loss_func(label_pre[:, 3:], label[:, 3:]) 173 | loss = loss1 + args.beta * loss2 174 | 175 | loss1_mean.append(loss1.data[0]) 176 | loss2_mean.append(loss2.data[0]) 177 | loss_mean.append(loss.data[0]) 178 | 179 | optimizer.zero_grad() # clear gradients for this training step 180 | loss.backward() # bp, compute gradients 181 | optimizer.step() # apply gradients 182 | 183 | loss1_mean = np.mean(loss1_mean) 184 | loss2_mean = np.mean(loss2_mean) 185 | loss_mean = np.mean(loss_mean) 186 | return loss1_mean.data[0], loss2_mean.data[0], loss_mean.data[0] 187 | 188 | 189 | def run_val(model, loss_func, loader): 190 | """ 191 | 验证多个batch,并返回平均误差 192 | """ 193 | loss_ret = [] 194 | loss1_ret = [] 195 | loss2_ret = [] 196 | 197 | for _, sample_v in enumerate(loader): 198 | loss_v, loss1_v, loss2_v, _ = run_batch(sample=sample_v, model=model, loss_func=loss_func, phase='Valid') 199 | loss_ret.append(loss_v) 200 | loss1_ret.append(loss1_v) 201 | loss2_ret.append(loss2_v) 202 | 203 | loss_mean = np.mean(loss_ret) 204 | loss1_mean = np.mean(loss1_ret) 205 | loss2_mean = np.mean(loss2_ret) 206 | 207 | return loss_mean, loss1_mean, loss2_mean 208 | 209 | 210 | def run_test(model, seq, dir_model=None, epoch=None, dir_time=None): 211 | """ 212 | 训练阶段对一段完整的轨迹进行测试,或者测试阶段直接用于测试 213 | 214 | 训练过程中测试: 215 | 1. 计算一段完整场景中所有相对姿态的预测值 216 | cnn-lstm: 217 | 手动写读图的代码,从而可以处理场景末尾图片序列长度不足一个batch的情况 218 | cnn: 219 | 采用DataLoader读取,较为方便 220 | 221 | 2. 计算绝对姿态,并画出轨迹 222 | 训练阶段保存轨迹图 223 | 测试阶保存轨迹图、相对位姿、绝对位姿 224 | """ 225 | print('\nTest sequence {:02d} >>>'.format(seq)) 226 | if args.net_architecture == 'cnn-lstm': 227 | model.eval() 228 | img_list = glob(dir_data + '/{:02d}/image_2/*.png'.format(seq)) 229 | img_list.sort() 230 | ip = args.img_pairs 231 | iter_1 = int(math.floor((len(img_list) - 1) / ip)) 232 | iter_2 = int(math.ceil((len(img_list) - 1) / ip)) 233 | pose_ret = [] 234 | for i in tqdm(np.arange(iter_1)): 235 | img_seq = [] 236 | for img_path in img_list[i * ip: (i + 1) * ip + 1]: 237 | img = read_image(img_path) 238 | img_seq.append(img) 239 | x1 = np.stack(img_seq[:-1], 0) 240 | x1 = np.transpose(x1, [0, 3, 1, 2]) # [10, C, H, W] 241 | x1 = x1[np.newaxis, :, :, :, :] # [1, 10, C, H, W] 242 | x1 = to_var(torch.from_numpy(x1)) 243 | 244 | x2 = np.stack(img_seq[1:], 0) 245 | x2 = np.transpose(x2, [0, 3, 1, 2]) # [10, C, H, W] 246 | x2 = x2[np.newaxis, :, :, :, :] # [1, 10, C, H, W] 247 | x2 = to_var(torch.from_numpy(x2)) 248 | pose_out = model(x1, x2) 249 | pose_ret.extend(pose_out.data.cpu().numpy()) 250 | 251 | ns = iter_1 * ip 252 | if iter_1 != iter_2: 253 | print('Process for the last {:d} images...'.format(len(img_list) - ns)) 254 | img_seq = [] 255 | for img_path in img_list[ns:]: 256 | img = read_image(img_path) 257 | img_seq.append(img) 258 | x1 = np.stack(img_seq[:-1], 0) 259 | x1 = np.transpose(x1, [0, 3, 1, 2]) # [10, C, H, W] 260 | x1 = x1[np.newaxis, :, :, :, :] # [1, 10, C, H, W] 261 | x1 = to_var(torch.from_numpy(x1)) 262 | 263 | x2 = np.stack(img_seq[1:], 0) 264 | x2 = np.transpose(x2, [0, 3, 1, 2]) # [10, C, H, W] 265 | x2 = x2[np.newaxis, :, :, :, :] # [1, 10, C, H, W] 266 | x2 = to_var(torch.from_numpy(x2)) 267 | pose_out = model(x1, x2) 268 | pose_ret.extend(pose_out.data.cpu().numpy()) 269 | else: 270 | data_set = KITTIDataSet(dir_data=dir_data, dir_label=dir_label, phase='Test', seq=seq) 271 | loader = DataLoader(data_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) 272 | pose_ret = [] 273 | for _, sample_batch in enumerate(tqdm(loader)): 274 | pose_pre = run_batch(sample=sample_batch, model=model, phase='Test') 275 | pose_ret.extend(pose_pre.cpu().numpy()) 276 | 277 | pose_abs = cal_absolute_from_relative(pose_ret) 278 | 279 | if args.phase == 'Test': 280 | np.savetxt(dir_time+'/pose_{:d}.txt'.format(seq), pose_ret) 281 | np.savetxt((dir_time + '/{:02d}.txt'.format(seq)), pose_abs) 282 | plot_from_pose(seq=seq, dir_save=dir_time, pose_abs=pose_abs, args=args) 283 | print('Save pose and trajectory in {:s}'.format(dir_time)) 284 | else: 285 | plot_from_pose(seq=seq, dir_save=dir_model, pose_abs=pose_abs, epoch=epoch, args=args) 286 | print('Save trajectory in {:s}'.format(dir_model)) 287 | 288 | 289 | def main(): 290 | torch.set_default_tensor_type('torch.FloatTensor') 291 | model = Net() 292 | if torch.cuda.is_available(): 293 | model = nn.DataParallel(model.cuda(), device_ids=args.gpu) 294 | 295 | # Set weights 296 | print('\n========================================') 297 | print('Phase: {:s}\nNet architecture: {:s}'.format(args.phase, args.net_architecture)) 298 | if args.net_architecture == 'cnn-lstm': 299 | if args.resume == 'cnn': 300 | print('Restore from CNN: {:s}'.format(dir_restore)) 301 | pre_trained_dict = torch.load(dir_restore) 302 | model_dict = model.state_dict() 303 | pre_trained_dict = {k: v for k, v in pre_trained_dict.items() if k in model_dict} # tick the useless dict 304 | model_dict.update(pre_trained_dict) # update the dict 305 | model.load_state_dict(model_dict) # load updated dict into the model 306 | elif args.resume == 'lstm' or args.phase == 'Test': 307 | print('Restore from CNN-LSTM: {:s}'.format(dir_restore)) 308 | model.load_state_dict(torch.load(dir_restore)) 309 | else: 310 | print('Initialize from scratch') 311 | else: 312 | if args.resume == 'Yes' or args.phase == 'Test': 313 | print('Restore from CNN: {:s}'.format(dir_restore)) 314 | model.load_state_dict(torch.load(dir_restore)) 315 | else: 316 | print('Initialize from scratch') 317 | print('========================================') 318 | 319 | # Start training 320 | if args.phase == 'Train': 321 | dir_model, dir_log = pre_create_file_train(model_dir, log_dir, args) 322 | writer = SummaryWriter(dir_log) 323 | loss_func = nn.MSELoss() 324 | optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_base) 325 | 326 | if args.net_architecture == 'cnn-lstm': 327 | data_set_t_1 = KITTIDataSet(dir_data, dir_label, img_pairs=2, start_interval=1, phase='Train') 328 | data_set_t_2 = KITTIDataSet(dir_data, dir_label, img_pairs=4, start_interval=2, phase='Train') 329 | data_set_t_3 = KITTIDataSet(dir_data, dir_label, img_pairs=8, start_interval=4, phase='Train') 330 | data_set_v = KITTIDataSet(dir_data, dir_label, img_pairs=4, start_interval=40, phase='Valid') 331 | loader_t_1 = DataLoader(data_set_t_1, batch_size=16, shuffle=True, num_workers=args.workers) 332 | loader_t_2 = DataLoader(data_set_t_2, batch_size=8, shuffle=True, num_workers=args.workers) 333 | loader_t_3 = DataLoader(data_set_t_3, batch_size=4, shuffle=True, num_workers=args.workers) 334 | loader_v = DataLoader(data_set_v, batch_size=4, shuffle=False, num_workers=args.workers) 335 | 336 | step_per_epoch = int(math.ceil(len(data_set_t_1) / loader_t_1.batch_size)) 337 | step_val = int(math.floor(step_per_epoch / 3)) # 每个epoch验证3次 338 | 339 | for epoch in np.arange(args.epoch_max): 340 | adjust_learning_rate(optimizer, epoch, args.lr_base, args.lr_decay_rate, args.epoch_lr_decay) 341 | 342 | # test a complete sequence and plot trajectory 343 | if epoch != 0 and epoch % args.epoch_test == 0: 344 | run_test(model, seq=9, dir_model=dir_model, epoch=epoch) 345 | run_test(model, seq=5, dir_model=dir_model, epoch=epoch) 346 | 347 | loss_list = [] # 记录每个epoch的loss 348 | loss1_list = [] 349 | loss2_list = [] 350 | for step, (sample_t_1, sample_t_2, sample_t_3) in enumerate(zip(loader_t_1, loader_t_2, loader_t_3)): 351 | tic = time() 352 | step_global = epoch * step_per_epoch + step 353 | loss1, loss2, loss = run_batch_2(sample=[sample_t_1, sample_t_2, sample_t_3], model=model, 354 | loss_func=loss_func, optimizer=optimizer) 355 | hour_per_epoch = step_per_epoch * ((time() - tic) / 3600) 356 | loss_list.append(loss) 357 | loss1_list.append(loss1) 358 | loss2_list.append(loss2) 359 | 360 | # display and add to tensor board 361 | if (step + 1) % 5 == 0: 362 | display_loss_tb(hour_per_epoch, epoch, args, step, step_per_epoch, optimizer, loss, loss1, 363 | loss2, loss_list, loss1_list, loss2_list, writer, step_global) 364 | 365 | if (step + 1) % step_val == 0: 366 | batch_v = int(math.ceil(len(data_set_v) / loader_v.batch_size)) 367 | loss_v, loss1_v, loss2_v = run_val(model, loss_func, loader_v) 368 | display_loss_tb_val(batch_v, loss_v, loss1_v, loss2_v, args, writer, step_global) 369 | 370 | # save 371 | if (epoch + 1) % args.epoch_save == 0: 372 | print('\nSaving model: {:s}/model-{:d}.pkl'.format(dir_model, epoch + 1)) 373 | torch.save(model.state_dict(), (dir_model + '/model-{:d}.pkl'.format(epoch + 1))) 374 | else: 375 | data_set_t = KITTIDataSet(dir_data=dir_data, dir_label=dir_label, samples=args.samples, phase='Train') 376 | data_set_v = KITTIDataSet(dir_data=dir_data, dir_label=dir_label, phase='Valid') 377 | loader_t = DataLoader(data_set_t, batch_size=args.batch_size, shuffle=True, num_workers=args.workers) 378 | loader_v = DataLoader(data_set_v, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) 379 | 380 | step_per_epoch = int(math.floor(len(data_set_t) / loader_t.batch_size)) 381 | step_val = int(math.floor(step_per_epoch / 3)) # 每个epoch验证3次 382 | 383 | for epoch in np.arange(args.epoch_max): 384 | adjust_learning_rate(optimizer, epoch, args.lr_base, args.lr_decay_rate, args.epoch_lr_decay) 385 | 386 | # test a complete sequence and plot trajectory 387 | if epoch != 0 and epoch % args.epoch_test == 0: 388 | run_test(model, seq=9, dir_model=dir_model, epoch=epoch) 389 | run_test(model, seq=5, dir_model=dir_model, epoch=epoch) 390 | 391 | loss_list = [] # 记录每个epoch的loss 392 | loss1_list = [] 393 | loss2_list = [] 394 | for step, sample_t in enumerate(loader_t): 395 | step_global = epoch * step_per_epoch + step 396 | tic = time() 397 | loss, loss1, loss2, _ = \ 398 | run_batch(sample=sample_t, model=model, loss_func=loss_func, optimizer=optimizer, phase='Train') 399 | hour_per_epoch = step_per_epoch * ((time() - tic) / 3600) 400 | loss_list.append(loss) 401 | loss1_list.append(loss1) 402 | loss2_list.append(loss2) 403 | 404 | # display and add to tensor board 405 | if (step+1) % 10 == 0: 406 | display_loss_tb(hour_per_epoch, epoch, args, step, step_per_epoch, optimizer, loss, loss1, 407 | loss2, loss_list, loss1_list, loss2_list, writer, step_global) 408 | 409 | if (step+1) % step_val == 0: 410 | batch_v = int(math.ceil(len(data_set_v)/loader_v.batch_size)) 411 | loss_v, loss1_v, loss2_v = run_val(model, loss_func, loader_v) 412 | display_loss_tb_val(batch_v, loss_v, loss1_v, loss2_v, args, writer, step_global) 413 | 414 | # save 415 | if (epoch+1) % args.epoch_save == 0: 416 | print('\nSaving model: {:s}/model-{:d}.pkl'.format(dir_model, epoch+1)) 417 | torch.save(model.state_dict(), (dir_model + '/model-{:d}.pkl'.format(epoch+1))) 418 | 419 | else: 420 | dir_time = pre_create_file_test(args) 421 | for seq in range(11): 422 | run_test(model, seq=seq, dir_time=dir_time) 423 | 424 | 425 | if __name__ == '__main__': 426 | main() 427 | -------------------------------------------------------------------------------- /main_experiment2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Author: Linjian Zhang 5 | Email: linjian93@foxmail.com 6 | Creat Time: 2017-12-27 16:20:53 7 | Program: 8 | Description: 9 | """ 10 | import torch 11 | from torch.utils.data import DataLoader 12 | from time import time 13 | from tqdm import tqdm 14 | from net.cnn import * 15 | from dataset.kitti import KITTIDataSet 16 | from utils.post_process import cal_absolute_from_relative, plot_from_pose 17 | from utils.misc import to_var, adjust_learning_rate, pre_create_file_train, pre_create_file_test 18 | import numpy as np 19 | import math 20 | import argparse 21 | from tensorboardX import SummaryWriter 22 | import re 23 | import os 24 | 25 | parser = argparse.ArgumentParser() 26 | parser.add_argument('--server', default=None, type=int, help='server') 27 | parser.add_argument('--phase', default=None, help='Train or Test') 28 | parser.add_argument('--resume', default=None, help='Resume or scratch') 29 | 30 | # resume training or test 31 | parser.add_argument('--net_restore', default=None, help='Restore net name') 32 | parser.add_argument('--dir_restore', default=None, help='Restore file name') 33 | parser.add_argument('--model_restore', default=None, help='Restore model-id') 34 | 35 | parser.add_argument('--net_name', default=None, help='different net with different name') 36 | parser.add_argument('--dir0', default=None, help='change it every time when you run the code') 37 | parser.add_argument('--batch_size', default=32, type=int, help='batch size') 38 | parser.add_argument('--epoch_max', default=200, type=int, help='max epoch numbers') 39 | parser.add_argument('--epoch_test', default=5, type=int, help='test a complete sequence') 40 | parser.add_argument('--epoch_save', default=5, type=int, help='max epoch numbers') 41 | parser.add_argument('--lr_base', default=1e-4, type=float, help='base learning rate') 42 | parser.add_argument('--lr_decay_rate', default=0.5, type=float, help='decay rate') 43 | parser.add_argument('--epoch_lr_decay', default=40, type=int, help='every # epoch, lr decay 0.5') 44 | parser.add_argument('--beta', default=50, type=int, help='loss = loss_t + beta * loss_r') 45 | 46 | parser.add_argument("--gpu", default='0', help='GPU id list') 47 | parser.add_argument("--workers", default=4, type=int, help='workers numbers') 48 | args = parser.parse_args() 49 | 50 | os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu 51 | gpu_list = re.split('[, ]', args.gpu) 52 | args.gpu = range(len(list(filter(None, gpu_list)))) 53 | args.workers = int(args.workers) 54 | args.batch_size = int(args.batch_size) 55 | 56 | 57 | if args.server == 6099: 58 | dir_data = '/media/csc105/Data/dataset-jiange/data_odometry_color/sequences' 59 | dir_label = 'dataset' 60 | elif args.server == 6199: 61 | dir_data = '/media/Data/dataset_jiange/data_odometry_color/sequences' 62 | dir_label = 'dataset' 63 | elif args.server == 6499: 64 | dir_data = '/media/jiange/095df4a3-d72c-43d9-bfbd-e78651afba19/dataset-jiange/data_odometry_color/sequences' 65 | dir_label = 'dataset' 66 | else: 67 | raise Exception('Must give the right server id!') 68 | 69 | 70 | def run_batch(sample, model, loss_func, optimizer=None, phase=None): 71 | if phase == 'Train': 72 | model.train() 73 | else: 74 | model.eval() 75 | 76 | img = to_var(sample['img']) # [bs, 6, H, W] 77 | label_pre = model(img) 78 | 79 | if phase == 'Train' or phase == 'Valid': 80 | label = to_var(sample['label']) # [bs, 6] 81 | loss1 = loss_func(label_pre[:, :3], label[:, :3]) 82 | loss2 = loss_func(label_pre[:, 3:], label[:, 3:]) 83 | loss = loss1 + args.beta * loss2 84 | 85 | loss_x = loss_func(label_pre[:, 0], label[:, 0]) 86 | loss_y = loss_func(label_pre[:, 1], label[:, 1]) 87 | loss_z = loss_func(label_pre[:, 2], label[:, 2]) 88 | loss_tx = loss_func(label_pre[:, 3], label[:, 3]) 89 | loss_ty = loss_func(label_pre[:, 4], label[:, 4]) 90 | loss_tz = loss_func(label_pre[:, 5], label[:, 5]) 91 | 92 | if phase == 'Train': 93 | optimizer.zero_grad() # clear gradients for this training step 94 | loss.backward() # bp, compute gradients 95 | optimizer.step() # apply gradients 96 | 97 | return loss.data[0], loss1.data[0], loss2.data[0], label_pre.data, \ 98 | loss_x.data[0], loss_y.data[0], loss_z.data[0], loss_tx.data[0], loss_ty.data[0], loss_tz.data[0] 99 | else: 100 | return label_pre.data 101 | 102 | 103 | def run_val(model, loss_func, loader): 104 | """ 105 | evaluate multi-batches 106 | :param model: model 107 | :param loss_func: MSELoss 108 | :param loader: loader_v 109 | :return: mean loss 110 | """ 111 | loss_ret = [] 112 | loss1_ret = [] 113 | loss2_ret = [] 114 | loss_x_ret = [] 115 | loss_y_ret = [] 116 | loss_z_ret = [] 117 | loss_tx_ret = [] 118 | loss_ty_ret = [] 119 | loss_tz_ret = [] 120 | for _, sample_v in enumerate(loader): 121 | loss_v, loss1_v, loss2_v, _, loss_x_v, loss_y_v, loss_z_v, loss_tx_v, loss_ty_v, loss_tz_v = \ 122 | run_batch(sample=sample_v, model=model, loss_func=loss_func, phase='Valid') 123 | loss_ret.append(loss_v) 124 | loss1_ret.append(loss1_v) 125 | loss2_ret.append(loss2_v) 126 | loss_x_ret.append(loss_x_v) 127 | loss_y_ret.append(loss_y_v) 128 | loss_z_ret.append(loss_z_v) 129 | loss_tx_ret.append(loss_tx_v) 130 | loss_ty_ret.append(loss_ty_v) 131 | loss_tz_ret.append(loss_tz_v) 132 | loss_mean = np.mean(loss_ret) 133 | loss1_mean = np.mean(loss1_ret) 134 | loss2_mean = np.mean(loss2_ret) 135 | loss_x_mean = np.mean(loss_x_ret) 136 | loss_y_mean = np.mean(loss_y_ret) 137 | loss_z_mean = np.mean(loss_z_ret) 138 | loss_tx_mean = np.mean(loss_tx_ret) 139 | loss_ty_mean = np.mean(loss_ty_ret) 140 | loss_tz_mean = np.mean(loss_tz_ret) 141 | 142 | return loss_mean, loss1_mean, loss2_mean, loss_x_mean, loss_y_mean, loss_z_mean, \ 143 | loss_tx_mean, loss_ty_mean, loss_tz_mean 144 | 145 | 146 | def run_test(model, loss_func, seq, dir_model=None, epoch=None, dir_time=None, is_testing=True): 147 | print('\nTest sequence {:02d} >>>'.format(seq)) 148 | data_set = KITTIDataSet(dir_data=dir_data, dir_label=dir_label, phase='Test', seq=seq) 149 | loader = DataLoader(data_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) 150 | pose_ret = [] 151 | for _, sample_batch in enumerate(tqdm(loader)): 152 | pose_pre = run_batch(sample=sample_batch, model=model, loss_func=loss_func, phase='Test') 153 | pose_ret.extend(pose_pre.cpu().numpy()) 154 | 155 | if is_testing: 156 | print('Save pose in {:s}'.format(dir_time)) 157 | np.savetxt(dir_time+'/pose_{:d}.txt'.format(seq), pose_ret) 158 | cal_absolute_from_relative(seq, dir_test=dir_time, is_testing=is_testing) 159 | else: 160 | print('Calculate absolute pose') 161 | pose_abs = cal_absolute_from_relative(seq, xyz_euler=pose_ret, is_testing=is_testing) 162 | print('Plot trajectory') 163 | plot_from_pose(seq=seq, dir_save=dir_model, pose_abs=pose_abs, epoch=epoch) 164 | 165 | del data_set 166 | 167 | 168 | def main(): 169 | torch.set_default_tensor_type('torch.FloatTensor') 170 | model = CNN() 171 | if torch.cuda.is_available(): 172 | model = nn.DataParallel(model.cuda(), device_ids=args.gpu) 173 | 174 | if (args.phase == 'Train' and args.resume == 'Yes') or args.phase == 'Test': 175 | dir_restore = 'model/' + args.net_restore + '/' + args.dir_restore + '/' + args.model_restore + '.pkl' 176 | print('\nRestore from {:s}'.format(dir_restore)) 177 | model.load_state_dict(torch.load(dir_restore)) 178 | 179 | if args.phase == 'Train': 180 | if args.resume == 'No': 181 | print('\nInitialize from scratch') 182 | dir_model, dir_log = pre_create_file_train(args) 183 | data_set_t = KITTIDataSet(dir_data=dir_data, dir_label=dir_label, phase='Train') 184 | loader_t = DataLoader(data_set_t, batch_size=args.batch_size, shuffle=True, num_workers=args.workers) 185 | # data_set_v = KITTIDataSet(dir_data=dir_data, dir_label=dir_label, phase='Val') 186 | # loader_v = DataLoader(data_set_v, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) 187 | 188 | loss_func = nn.MSELoss() 189 | optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_base) 190 | step_per_epoch = int(math.floor(len(data_set_t)/loader_t.batch_size)) 191 | # step_val = int(math.floor(step_per_epoch / 3)) 192 | 193 | writer = SummaryWriter(dir_log) 194 | for epoch in np.arange(args.epoch_max): 195 | adjust_learning_rate(optimizer, epoch, args.lr_base, 196 | gamma=args.lr_decay_rate, 197 | epoch_lr_decay=args.epoch_lr_decay) 198 | 199 | # plot trajectory 200 | # if epoch % args.epoch_test == 0: 201 | # run_test(model, loss_func, seq=9, dir_model=dir_model, epoch=epoch, is_testing=False) 202 | # run_test(model, loss_func, seq=5, dir_model=dir_model, epoch=epoch, is_testing=False) 203 | 204 | for step, sample_t in enumerate(loader_t): 205 | step_global = epoch * step_per_epoch + step 206 | tic = time() 207 | loss, loss1, loss2, _, loss_x, loss_y, loss_z, loss_tx, loss_ty, loss_tz = \ 208 | run_batch(sample=sample_t, model=model, loss_func=loss_func, optimizer=optimizer, phase='Train') 209 | hour_per_epoch = step_per_epoch * ((time() - tic) / 3600) 210 | 211 | # display and add to tensor board 212 | if (step+1) % 10 == 0: 213 | print('\n{:.3f} [{:03d}/{:03d}] [{:03d}/{:03d}] lr {:.6f} L {:.4f}={:.4f}+{:d}*{:.4f} ' 214 | '[{:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}]'. 215 | format(hour_per_epoch, epoch+1, args.epoch_max, step+1, step_per_epoch, 216 | optimizer.param_groups[0]['lr'], loss, loss1, args.beta, loss2, 217 | loss_x, loss_y, loss_z, loss_tx, loss_ty, loss_tz)) 218 | writer.add_scalars('./train', 219 | {'loss_t': loss, 'loss1_t': loss1, 'loss2_t': loss2, 'loss_x_t': loss_x, 220 | 'loss_y_t': loss_y, 'loss_z_t': loss_z, 'loss_tx_t': loss_tx, 221 | 'loss_ty_t': loss_ty, 'loss_tz_t': loss_tz}, 222 | step_global) 223 | 224 | # if (step+1) % step_val == 0: 225 | # batch_v = int(math.ceil(len(data_set_v)/loader_v.batch_size)) 226 | # loss_v, loss1_v, loss2_v, loss_x_v, loss_y_v, loss_z_v, loss_tx_v, loss_ty_v, loss_tz_v = \ 227 | # run_val(model, loss_func, loader_v) 228 | # print('\n{:d} batches: L {:.4f}={:.4f}+{:d}*{:.4f} [{:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}]'. 229 | # format(batch_v, loss_v, loss1_v, args.beta, loss2_v, loss_x_v, loss_y_v, loss_z_v, loss_tx_v, 230 | # loss_ty_v, loss_tz_v)) 231 | # writer.add_scalars('./train-val', 232 | # {'loss_v': loss_v, 'loss1_v': loss1_v, 'loss2_v': loss2_v, 'loss_x_v': loss_x_v, 233 | # 'loss_y_v': loss_y_v, 'loss_z_v': loss_z_v, 'loss_tx_v': loss_tx_v, 234 | # 'loss_ty_v': loss_ty_v, 'loss_tz_v': loss_tz_v}, 235 | # step_global) 236 | 237 | # save 238 | if (epoch+1) % args.epoch_save == 0: 239 | print('\nSaving model: {:s}/model-{:d}.pkl'.format(dir_model, epoch+1)) 240 | torch.save(model.state_dict(), (dir_model + '/model-{:d}.pkl'.format(epoch+1))) 241 | 242 | if args.phase == 'Test': 243 | dir_time = pre_create_file_test(args) 244 | loss_func = nn.MSELoss() 245 | for seq in range(11): 246 | run_test(model, loss_func, seq=seq, dir_time=dir_time, is_testing=True) 247 | 248 | 249 | if __name__ == '__main__': 250 | main() 251 | -------------------------------------------------------------------------------- /net/cnn.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn.init import xavier_normal 4 | 5 | 6 | def conv(batch_norm, c_in, c_out, ks=3, sd=1): 7 | if batch_norm: 8 | return nn.Sequential( 9 | nn.Conv2d(c_in, c_out, kernel_size=ks, stride=sd, padding=(ks-1)//2, bias=False), 10 | nn.BatchNorm2d(c_out), 11 | nn.ReLU(), 12 | ) 13 | else: 14 | return nn.Sequential( 15 | nn.Conv2d(c_in, c_out, kernel_size=ks, stride=sd, padding=(ks-1)//2, bias=True), 16 | nn.ReLU(), 17 | ) 18 | 19 | 20 | def fc(c_in, c_out, activation=False): 21 | if activation: 22 | return nn.Sequential( 23 | nn.Linear(c_in, c_out), 24 | nn.ReLU(), 25 | ) 26 | else: 27 | return nn.Linear(c_in, c_out) 28 | 29 | 30 | class Net(nn.Module): 31 | def __init__(self, batch_norm=False): 32 | super(Net, self).__init__() 33 | self.batch_norm = batch_norm 34 | self.conv1 = conv(self.batch_norm, 6, 64, ks=7, sd=2) 35 | self.conv2 = conv(self.batch_norm, 64, 128, ks=5, sd=2) 36 | self.conv3 = conv(self.batch_norm, 128, 256, ks=5, sd=2) 37 | self.conv3_1 = conv(self.batch_norm, 256, 256) 38 | self.conv4 = conv(self.batch_norm, 256, 512, sd=2) 39 | self.conv4_1 = conv(self.batch_norm, 512, 512) 40 | self.conv5 = conv(self.batch_norm, 512, 512, sd=2) 41 | self.conv5_1 = conv(self.batch_norm, 512, 512) 42 | self.conv6 = conv(self.batch_norm, 512, 1024, sd=2) 43 | self.conv6_1 = conv(self.batch_norm, 1024, 1024) 44 | self.pool_1 = nn.MaxPool2d(2, stride=2) 45 | self.dropout1 = nn.Dropout(0.5) 46 | self.fc_1 = fc(1024*3*10, 4096, activation=True) 47 | self.dropout2 = nn.Dropout(0.5) 48 | self.fc_2 = fc(4096, 1024, activation=True) 49 | self.fc_3 = fc(1024, 128, activation=True) 50 | self.fc_4 = fc(128, 6) 51 | 52 | for m in self.modules(): 53 | if isinstance(m, nn.Conv2d): 54 | xavier_normal(m.weight.data) 55 | if m.bias is not None: 56 | m.bias.data.zero_() 57 | elif isinstance(m, nn.BatchNorm2d): 58 | m.weight.data.fill_(1) 59 | m.bias.data.zero_() 60 | 61 | def forward(self, x1, x2): 62 | # x1: NxCxHxW 63 | x = torch.cat([x1, x2], dim=1) 64 | x = self.conv2(self.conv1(x)) 65 | x = self.conv3_1(self.conv3(x)) 66 | x = self.conv4_1(self.conv4(x)) 67 | x = self.conv5_1(self.conv5(x)) 68 | x = self.conv6_1(self.conv6(x)) 69 | x = self.pool_1(x) 70 | x = x.view(x.size(0), -1) 71 | x = self.dropout1(x) 72 | x = self.fc_1(x) 73 | x = self.dropout2(x) 74 | x = self.fc_2(x) 75 | x = self.fc_3(x) 76 | x = self.fc_4(x) 77 | 78 | return x 79 | 80 | def weight_parameters(self): 81 | return [param for name, param in self.named_parameters() if 'weight' in name] 82 | 83 | def bias_parameters(self): 84 | return [param for name, param in self.named_parameters() if 'bias' in name] 85 | 86 | 87 | def main(): 88 | net = Net() 89 | print(net) 90 | 91 | 92 | if __name__ == '__main__': 93 | main() 94 | -------------------------------------------------------------------------------- /net/cnn_increase_kernal_size.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Author: Linjian Zhang 5 | Email: linjian93@foxmail.com 6 | Create Time: 2018-01-02 11:04:08 7 | Program: 8 | Description: 9 | 10 | 增大卷积大小 11 | 12 | """ 13 | import torch.nn as nn 14 | from torch.nn.init import xavier_normal 15 | 16 | 17 | def conv(batch_norm, c_in, c_out, ks=3, sd=1): 18 | if batch_norm: 19 | return nn.Sequential( 20 | nn.Conv2d(c_in, c_out, kernel_size=ks, stride=sd, padding=(ks-1)//2, bias=False), 21 | nn.BatchNorm2d(c_out), 22 | nn.ReLU(), 23 | ) 24 | else: 25 | return nn.Sequential( 26 | nn.Conv2d(c_in, c_out, kernel_size=ks, stride=sd, padding=(ks-1)//2, bias=True), 27 | nn.ReLU(), 28 | ) 29 | 30 | 31 | def fc(c_in, c_out, activation=False): 32 | if activation: 33 | return nn.Sequential( 34 | nn.Linear(c_in, c_out), 35 | nn.ReLU(), 36 | ) 37 | else: 38 | return nn.Linear(c_in, c_out) 39 | 40 | 41 | class Net(nn.Module): 42 | def __init__(self, batch_norm=False): 43 | super(Net, self).__init__() 44 | self.batch_norm = batch_norm 45 | self.conv1 = conv(self.batch_norm, 6, 64, ks=11, sd=2) 46 | self.conv2 = conv(self.batch_norm, 64, 128, ks=7, sd=2) 47 | self.conv3 = conv(self.batch_norm, 128, 256, ks=7, sd=2) 48 | self.conv3_1 = conv(self.batch_norm, 256, 256, ks=5) 49 | self.conv4 = conv(self.batch_norm, 256, 512, ks=5, sd=2) 50 | self.conv4_1 = conv(self.batch_norm, 512, 512) 51 | self.conv5 = conv(self.batch_norm, 512, 512, sd=2) 52 | self.conv5_1 = conv(self.batch_norm, 512, 512) 53 | self.conv6 = conv(self.batch_norm, 512, 1024, sd=2) 54 | self.conv6_1 = conv(self.batch_norm, 1024, 1024) 55 | self.pool_1 = nn.MaxPool2d(2, stride=2) 56 | self.dropout1 = nn.Dropout(0.5) 57 | self.fc_1 = fc(1024*3*10, 4096, activation=True) 58 | self.dropout2 = nn.Dropout(0.5) 59 | self.fc_2 = fc(4096, 1024, activation=True) 60 | self.fc_3 = fc(1024, 128, activation=True) 61 | self.fc_4 = fc(128, 6) 62 | 63 | for m in self.modules(): 64 | if isinstance(m, nn.Conv2d): 65 | xavier_normal(m.weight.data) 66 | if m.bias is not None: 67 | m.bias.data.zero_() 68 | elif isinstance(m, nn.BatchNorm2d): 69 | m.weight.data.fill_(1) 70 | m.bias.data.zero_() 71 | 72 | def forward(self, x): 73 | x = self.conv2(self.conv1(x)) 74 | x = self.conv3_1(self.conv3(x)) 75 | x = self.conv4_1(self.conv4(x)) 76 | x = self.conv5_1(self.conv5(x)) 77 | x = self.conv6_1(self.conv6(x)) 78 | x = self.pool_1(x) 79 | x = x.view(x.size(0), -1) 80 | x = self.dropout1(x) 81 | x = self.fc_1(x) 82 | x = self.dropout2(x) 83 | x = self.fc_2(x) 84 | x = self.fc_3(x) 85 | x = self.fc_4(x) 86 | 87 | return x 88 | 89 | def weight_parameters(self): 90 | return [param for name, param in self.named_parameters() if 'weight' in name] 91 | 92 | def bias_parameters(self): 93 | return [param for name, param in self.named_parameters() if 'bias' in name] 94 | 95 | 96 | def main(): 97 | import numpy as np 98 | import torch 99 | from torch.autograd import Variable 100 | torch.set_default_tensor_type('torch.FloatTensor') 101 | model = Net() 102 | a = np.random.randn(32, 6, 384, 1280).astype(np.float32) 103 | x = torch.from_numpy(a) 104 | 105 | img = Variable(x) # [bs, 6, H, W] 106 | output = model(img) 107 | print(output.size()) 108 | 109 | 110 | if __name__ == '__main__': 111 | main() 112 | -------------------------------------------------------------------------------- /net/cnn_lstm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Author: Linjian Zhang 5 | Email: linjian93@foxmail.com 6 | Create Time: 2018-01-02 13:49:52 7 | Program: 8 | Description: 9 | """ 10 | import torch 11 | import torch.nn as nn 12 | # import torch.nn.functional as F 13 | from torch.nn.init import xavier_normal 14 | 15 | 16 | def conv(batch_norm, c_in, c_out, ks=3, sd=1): 17 | if batch_norm: 18 | return nn.Sequential( 19 | nn.Conv2d(c_in, c_out, kernel_size=ks, stride=sd, padding=(ks-1)//2, bias=False), 20 | nn.BatchNorm2d(c_out), 21 | nn.ReLU(), 22 | ) 23 | else: 24 | return nn.Sequential( 25 | nn.Conv2d(c_in, c_out, kernel_size=ks, stride=sd, padding=(ks-1)//2, bias=True), 26 | nn.ReLU(), 27 | ) 28 | 29 | 30 | def fc(c_in, c_out, activation=False): 31 | if activation: 32 | return nn.Sequential( 33 | nn.Linear(c_in, c_out), 34 | nn.ReLU(), 35 | ) 36 | else: 37 | return nn.Linear(c_in, c_out) 38 | 39 | 40 | class Net(nn.Module): 41 | def __init__(self, batch_norm=False): 42 | super(Net, self).__init__() 43 | self.batch_norm = batch_norm 44 | self.conv1 = conv(self.batch_norm, 6, 64, ks=7, sd=2) 45 | self.conv2 = conv(self.batch_norm, 64, 128, ks=5, sd=2) 46 | self.conv3 = conv(self.batch_norm, 128, 256, ks=5, sd=2) 47 | self.conv3_1 = conv(self.batch_norm, 256, 256) 48 | self.conv4 = conv(self.batch_norm, 256, 512, sd=2) 49 | self.conv4_1 = conv(self.batch_norm, 512, 512) 50 | self.conv5 = conv(self.batch_norm, 512, 512, sd=2) 51 | self.conv5_1 = conv(self.batch_norm, 512, 512) 52 | self.conv6 = conv(self.batch_norm, 512, 1024, sd=2) 53 | self.conv6_1 = conv(self.batch_norm, 1024, 1024) 54 | self.pool_1 = nn.MaxPool2d(2, stride=2) 55 | self.dropout1 = nn.Dropout(0.5) 56 | self.lstm = nn.LSTM(input_size=1024*3*10, hidden_size=1024, num_layers=2, batch_first=True, dropout=0.5) 57 | self.dropout2 = nn.Dropout(0.5) 58 | self.fc_lstm_1 = fc(1024, 128, activation=True) 59 | self.fc_lstm_2 = fc(128, 6) 60 | 61 | for m in self.modules(): 62 | if isinstance(m, nn.Conv2d): 63 | xavier_normal(m.weight.data) 64 | if m.bias is not None: 65 | m.bias.data.zero_() 66 | elif isinstance(m, nn.BatchNorm2d): 67 | m.weight.data.fill_(1) 68 | m.bias.data.zero_() 69 | 70 | def forward(self, x1, x2): 71 | N, T, C, H, W = tuple(x1.size()) 72 | x1 = x1.view(-1, C, H, W) 73 | x2 = x2.view(-1, C, H, W) 74 | x = torch.cat([x1, x2], dim=1) 75 | x = self.conv2(self.conv1(x)) 76 | x = self.conv3_1(self.conv3(x)) 77 | x = self.conv4_1(self.conv4(x)) 78 | x = self.conv5_1(self.conv5(x)) 79 | x = self.conv6_1(self.conv6(x)) 80 | x = self.pool_1(x) # [N, T, 1024, 3, 10] 81 | x = x.view(N, -1, 1024*3*10) # N x T x 30720 82 | x = self.dropout1(x) 83 | self.lstm.flatten_parameters() # cuda加速 84 | x, _ = self.lstm(x) # N x T x 1024 85 | x = self.dropout2(x) 86 | x = x.contiguous().view(-1, 1024) # (NxT)x1024 87 | x = self.fc_lstm_1(x) 88 | x = self.fc_lstm_2(x) 89 | 90 | return x 91 | 92 | def weight_parameters(self): 93 | return [param for name, param in self.named_parameters() if 'weight' in name] 94 | 95 | def bias_parameters(self): 96 | return [param for name, param in self.named_parameters() if 'bias' in name] 97 | 98 | 99 | def main(): 100 | net = Net() 101 | print(net) 102 | 103 | 104 | if __name__ == '__main__': 105 | main() 106 | -------------------------------------------------------------------------------- /net/cnn_seperate_conv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Author: Linjian Zhang 5 | Email: linjian93@foxmail.com 6 | Create Time: 2018-01-08 18:07:50 7 | Program: 8 | Description: 9 | """ 10 | import torch 11 | import torch.nn as nn 12 | from torch.nn.init import xavier_normal 13 | 14 | 15 | def conv(batch_norm, c_in, c_out, ks=3, sd=1): 16 | if batch_norm: 17 | return nn.Sequential( 18 | nn.Conv2d(c_in, c_out, kernel_size=ks, stride=sd, padding=(ks-1)//2, bias=False), 19 | nn.BatchNorm2d(c_out), 20 | nn.ReLU(), 21 | ) 22 | else: 23 | return nn.Sequential( 24 | nn.Conv2d(c_in, c_out, kernel_size=ks, stride=sd, padding=(ks-1)//2, bias=True), 25 | nn.ReLU(), 26 | ) 27 | 28 | 29 | def fc(c_in, c_out, activation=False): 30 | if activation: 31 | return nn.Sequential( 32 | nn.Linear(c_in, c_out), 33 | nn.ReLU(), 34 | ) 35 | else: 36 | return nn.Linear(c_in, c_out) 37 | 38 | 39 | class Net(nn.Module): 40 | def __init__(self, batch_norm=False): 41 | super(Net, self).__init__() 42 | self.batch_norm = batch_norm 43 | self.conv1 = conv(self.batch_norm, 3, 64, ks=7, sd=2) 44 | self.conv2 = conv(self.batch_norm, 64, 128, ks=5, sd=2) 45 | self.conv3 = conv(self.batch_norm, 128, 256, ks=5, sd=2) 46 | self.conv3_1 = conv(self.batch_norm, 256, 256) 47 | self.conv4 = conv(self.batch_norm, 256, 512, sd=2) 48 | self.conv4_1 = conv(self.batch_norm, 512, 512) 49 | self.conv5 = conv(self.batch_norm, 512, 512, sd=2) 50 | self.conv5_1 = conv(self.batch_norm, 512, 512) 51 | self.conv6 = conv(self.batch_norm, 512, 1024, sd=2) 52 | self.conv6_1 = conv(self.batch_norm, 1024, 1024) 53 | self.pool_1 = nn.MaxPool2d(2, stride=2) 54 | self.dropout1 = nn.Dropout(0.5) 55 | self.fc_1 = fc(1024 * 3 * 10, 2048, activation=True) 56 | 57 | self.conv1_2 = conv(self.batch_norm, 3, 64, ks=7, sd=2) 58 | self.conv2_2 = conv(self.batch_norm, 64, 128, ks=5, sd=2) 59 | self.conv3_2 = conv(self.batch_norm, 128, 256, ks=5, sd=2) 60 | self.conv3_1_2 = conv(self.batch_norm, 256, 256) 61 | self.conv4_2 = conv(self.batch_norm, 256, 512, sd=2) 62 | self.conv4_1_2 = conv(self.batch_norm, 512, 512) 63 | self.conv5_2 = conv(self.batch_norm, 512, 512, sd=2) 64 | self.conv5_1_2 = conv(self.batch_norm, 512, 512) 65 | self.conv6_2 = conv(self.batch_norm, 512, 1024, sd=2) 66 | self.conv6_1_2 = conv(self.batch_norm, 1024, 1024) 67 | self.pool_1_2 = nn.MaxPool2d(2, stride=2) 68 | self.dropout1_2 = nn.Dropout(0.5) 69 | self.fc_1_2 = fc(1024*3*10, 2048, activation=True) 70 | 71 | self.dropout2 = nn.Dropout(0.5) 72 | self.fc_2 = fc(4096, 1024, activation=True) 73 | self.fc_3 = fc(1024, 128, activation=True) 74 | self.fc_4 = fc(128, 6) 75 | 76 | for m in self.modules(): 77 | if isinstance(m, nn.Conv2d): 78 | xavier_normal(m.weight.data) 79 | if m.bias is not None: 80 | m.bias.data.zero_() 81 | elif isinstance(m, nn.BatchNorm2d): 82 | m.weight.data.fill_(1) 83 | m.bias.data.zero_() 84 | 85 | def forward(self, x1, x2): 86 | x1 = self.conv2(self.conv1(x1)) 87 | x1 = self.conv3_1(self.conv3(x1)) 88 | x1 = self.conv4_1(self.conv4(x1)) 89 | x1 = self.conv5_1(self.conv5(x1)) 90 | x1 = self.conv6_1(self.conv6(x1)) 91 | x1 = self.pool_1(x1) 92 | x1 = x1.view(x1.size(0), -1) 93 | x1 = self.dropout1(x1) 94 | x1 = self.fc_1(x1) # NxD 95 | 96 | x2 = self.conv2_2(self.conv1_2(x2)) 97 | x2 = self.conv3_1_2(self.conv3_2(x2)) 98 | x2 = self.conv4_1_2(self.conv4_2(x2)) 99 | x2 = self.conv5_1_2(self.conv5_2(x2)) 100 | x2 = self.conv6_1_2(self.conv6_2(x2)) 101 | x2 = self.pool_1_2(x2) 102 | x2 = x2.view(x2.size(0), -1) 103 | x2 = self.dropout1_2(x2) 104 | x2 = self.fc_1_2(x2) 105 | 106 | x = torch.cat((x1, x2), dim=1) 107 | x = self.dropout2(x) 108 | x = self.fc_2(x) 109 | x = self.fc_3(x) 110 | x = self.fc_4(x) 111 | 112 | return x 113 | 114 | def weight_parameters(self): 115 | return [param for name, param in self.named_parameters() if 'weight' in name] 116 | 117 | def bias_parameters(self): 118 | return [param for name, param in self.named_parameters() if 'bias' in name] 119 | 120 | 121 | def main(): 122 | net = Net() 123 | print(net) 124 | 125 | 126 | if __name__ == '__main__': 127 | main() 128 | -------------------------------------------------------------------------------- /net/cnn_seperate_conv_1.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Author: Linjian Zhang 5 | Email: linjian93@foxmail.com 6 | Create Time: 2018-01-08 21:24:01 7 | Program: 8 | Description: 9 | """ 10 | import torch 11 | import torch.nn as nn 12 | from torch.nn.init import xavier_normal 13 | 14 | 15 | def conv(batch_norm, c_in, c_out, ks=3, sd=1): 16 | if batch_norm: 17 | return nn.Sequential( 18 | nn.Conv2d(c_in, c_out, kernel_size=ks, stride=sd, padding=(ks-1)//2, bias=False), 19 | nn.BatchNorm2d(c_out), 20 | nn.ReLU(), 21 | ) 22 | else: 23 | return nn.Sequential( 24 | nn.Conv2d(c_in, c_out, kernel_size=ks, stride=sd, padding=(ks-1)//2, bias=True), 25 | nn.ReLU(), 26 | ) 27 | 28 | 29 | def fc(c_in, c_out, activation=False): 30 | if activation: 31 | return nn.Sequential( 32 | nn.Linear(c_in, c_out), 33 | nn.ReLU(), 34 | ) 35 | else: 36 | return nn.Linear(c_in, c_out) 37 | 38 | 39 | class Net(nn.Module): 40 | def __init__(self, batch_norm=False): 41 | super(Net, self).__init__() 42 | self.batch_norm = batch_norm 43 | self.conv1 = conv(self.batch_norm, 3, 64, ks=7, sd=2) 44 | self.conv2 = conv(self.batch_norm, 64, 128, ks=5, sd=2) 45 | self.conv3 = conv(self.batch_norm, 128, 256, ks=5, sd=2) 46 | self.conv3_1 = conv(self.batch_norm, 256, 256) 47 | self.conv4 = conv(self.batch_norm, 256, 512, sd=2) 48 | self.conv4_1 = conv(self.batch_norm, 512, 512) 49 | self.conv5 = conv(self.batch_norm, 512, 512, sd=2) 50 | self.conv5_1 = conv(self.batch_norm, 512, 512) 51 | self.conv6 = conv(self.batch_norm, 512, 1024, sd=2) 52 | self.conv6_1 = conv(self.batch_norm, 1024, 1024) 53 | self.pool_1 = nn.MaxPool2d(2, stride=2) 54 | self.dropout1 = nn.Dropout(0.5) 55 | self.fc_1 = fc(1024 * 3 * 10, 4096, activation=True) 56 | 57 | self.dropout2 = nn.Dropout(0.5) 58 | self.fc_2 = fc(4096*2, 1024, activation=True) 59 | self.fc_3 = fc(1024, 128, activation=True) 60 | self.fc_4 = fc(128, 6) 61 | 62 | for m in self.modules(): 63 | if isinstance(m, nn.Conv2d): 64 | xavier_normal(m.weight.data) 65 | if m.bias is not None: 66 | m.bias.data.zero_() 67 | elif isinstance(m, nn.BatchNorm2d): 68 | m.weight.data.fill_(1) 69 | m.bias.data.zero_() 70 | 71 | def block(self, x): 72 | x = self.conv2(self.conv1(x)) 73 | x = self.conv3_1(self.conv3(x)) 74 | x = self.conv4_1(self.conv4(x)) 75 | x = self.conv5_1(self.conv5(x)) 76 | x = self.conv6_1(self.conv6(x)) 77 | x = self.pool_1(x) 78 | x = x.view(x.size(0), -1) 79 | x = self.dropout1(x) 80 | x = self.fc_1(x) # NxD 81 | 82 | return x 83 | 84 | def forward(self, x1, x2): 85 | x1 = self.block(x1) 86 | x2 = self.block(x2) 87 | x = torch.cat((x1, x2), dim=1) 88 | x = self.dropout2(x) 89 | x = self.fc_2(x) 90 | x = self.fc_3(x) 91 | x = self.fc_4(x) 92 | 93 | return x 94 | 95 | def weight_parameters(self): 96 | return [param for name, param in self.named_parameters() if 'weight' in name] 97 | 98 | def bias_parameters(self): 99 | return [param for name, param in self.named_parameters() if 'bias' in name] 100 | 101 | 102 | def main(): 103 | net = Net() 104 | print(net) 105 | 106 | 107 | if __name__ == '__main__': 108 | main() 109 | -------------------------------------------------------------------------------- /net/cnn_tb.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Author: Linjian Zhang 5 | Email: linjian93@foxmail.com 6 | Create Time: 2018-01-01 11:37:50 7 | Program: 8 | Description: 9 | 10 | 用两个分支分别回归xyz和euler 11 | 12 | """ 13 | import torch 14 | import torch.nn as nn 15 | from torch.nn.init import xavier_normal 16 | 17 | 18 | def conv(batch_norm, c_in, c_out, ks=3, sd=1): 19 | if batch_norm: 20 | return nn.Sequential( 21 | nn.Conv2d(c_in, c_out, kernel_size=ks, stride=sd, padding=(ks-1)//2, bias=False), 22 | nn.BatchNorm2d(c_out), 23 | nn.ReLU(), 24 | ) 25 | else: 26 | return nn.Sequential( 27 | nn.Conv2d(c_in, c_out, kernel_size=ks, stride=sd, padding=(ks-1)//2, bias=True), 28 | nn.ReLU(), 29 | ) 30 | 31 | 32 | def fc(c_in, c_out, activation=False): 33 | if activation: 34 | return nn.Sequential( 35 | nn.Linear(c_in, c_out), 36 | nn.ReLU(), 37 | ) 38 | else: 39 | return nn.Linear(c_in, c_out) 40 | 41 | 42 | class Net(nn.Module): 43 | def __init__(self, batch_norm=False): 44 | super(Net, self).__init__() 45 | self.batch_norm = batch_norm 46 | self.conv1 = conv(self.batch_norm, 6, 64, ks=7, sd=2) 47 | self.conv2 = conv(self.batch_norm, 64, 128, ks=5, sd=2) 48 | self.conv3 = conv(self.batch_norm, 128, 256, ks=5, sd=2) 49 | self.conv3_1 = conv(self.batch_norm, 256, 256) 50 | self.conv4 = conv(self.batch_norm, 256, 512, sd=2) 51 | self.conv4_1 = conv(self.batch_norm, 512, 512) 52 | self.conv5 = conv(self.batch_norm, 512, 512, sd=2) 53 | self.conv5_1 = conv(self.batch_norm, 512, 512) 54 | self.conv6 = conv(self.batch_norm, 512, 1024, sd=2) 55 | self.conv6_1 = conv(self.batch_norm, 1024, 1024) 56 | self.pool_1 = nn.MaxPool2d(2, stride=2) 57 | self.dropout1 = nn.Dropout(0.5) 58 | self.fc1 = fc(1024*3*10, 4096, activation=True) 59 | self.dropout2 = nn.Dropout(0.5) 60 | self.fc2 = fc(4096, 1024, activation=True) 61 | self.fc3 = fc(1024, 128, activation=True) 62 | self.fc4 = fc(128, 3) 63 | self.dropout2_1 = nn.Dropout(0.5) 64 | self.fc2_1 = fc(4096, 1024, activation=True) 65 | self.fc3_1 = fc(1024, 128, activation=True) 66 | self.fc4_1 = fc(128, 3) 67 | 68 | for m in self.modules(): 69 | if isinstance(m, nn.Conv2d): 70 | xavier_normal(m.weight.data) 71 | if m.bias is not None: 72 | m.bias.data.zero_() 73 | elif isinstance(m, nn.BatchNorm2d): 74 | m.weight.data.fill_(1) 75 | m.bias.data.zero_() 76 | 77 | def forward(self, x): 78 | x = self.conv2(self.conv1(x)) 79 | x = self.conv3_1(self.conv3(x)) 80 | x = self.conv4_1(self.conv4(x)) 81 | x = self.conv5_1(self.conv5(x)) 82 | x = self.conv6_1(self.conv6(x)) 83 | x = self.pool_1(x) 84 | x = x.view(x.size(0), -1) 85 | x = self.dropout1(x) 86 | x = self.fc1(x) 87 | x_1 = self.dropout2(x) 88 | x_1 = self.fc2(x_1) 89 | x_1 = self.fc3(x_1) 90 | x_1 = self.fc4(x_1) # Nx3 91 | 92 | x_2 = self.dropout2_1(x) 93 | x_2 = self.fc2_1(x_2) 94 | x_2 = self.fc3_1(x_2) 95 | x_2 = self.fc4_1(x_2) 96 | 97 | x = torch.cat((x_1, x_2), dim=1) 98 | 99 | return x 100 | 101 | def weight_parameters(self): 102 | return [param for name, param in self.named_parameters() if 'weight' in name] 103 | 104 | def bias_parameters(self): 105 | return [param for name, param in self.named_parameters() if 'bias' in name] 106 | 107 | 108 | def main(): 109 | net = Net() 110 | print(net) 111 | 112 | 113 | if __name__ == '__main__': 114 | main() 115 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Pytorch-DeepVO 2 | 3 | This is an Implementation of DeepVO with CNN / CNN-LSTM. 4 | 5 | As for the experiment results, you can read my [Master's thesis](http://kns.cnki.net/KCMS/detail/detail.aspx?dbcode=CMFD&dbname=CMFD201802&filename=1018186763.nh&v=MTcyNDNMdXhZUzdEaDFUM3FUcldNMUZyQ1VSTE9mWnVkc0ZDbmdWYnJJVkYyNkZyS3dHTmJLckpFYlBJUjhlWDE=), or go to [Zhihu](https://www.zhihu.com/question/65068625/answer/256306051) for detailed discussion. 6 | 7 | ## 代码架构 8 | 9 | - dataset 10 | 11 | 数据集处理的代码都放这 12 | 13 | - net 14 | 15 | 网络结构的代码都放这 16 | 17 | - utils 18 | 19 | 各种其他函数 20 | 21 | - evaluation 22 | 23 | 测试结果处理,包括画误差曲线、画轨迹图 24 | 25 | - main.py 26 | 27 | 主函数 28 | 29 | - evaluation.sh 30 | 31 | 对测试结果进行评估 32 | 33 | ## 服务器端的其他数据 34 | 35 | - 数据集 36 | 37 | - log 38 | 39 | 训练网络时保存的loss等参数,用于tensorboard显示 40 | 41 | - model 42 | 43 | 保存的网络参数都放这,用于继续训练或测试 44 | 45 | - test 46 | 47 | 测试结果都放这 48 | 49 | ## 训练样例 50 | 51 | 从头训练 52 | ```bash 53 | $ python main.py \ 54 | --server=6499 \ 55 | --net_architecture=cnn \ 56 | --phase=Train \ 57 | --resume=No \ 58 | --net_name=cnn-vo \ 59 | --dir0=20180109 \ 60 | --gpu=0 \ 61 | ``` 62 | 63 | 继续训练 64 | ```bash 65 | $ python main.py \ 66 | --server=6499 \ 67 | --net_architecture=cnn \ 68 | --phase=Train \ 69 | --resume=Yes \ 70 | --net_restore=cnn-vo \ 71 | --dir_restore=20180101 \ 72 | --model_restore=model-120 \ 73 | --net_name=cnn-vo-cons \ 74 | --dir0=20180103 \ 75 | --epoch_test=10 \ 76 | --gpu=2,3 \ 77 | ``` 78 | 79 | 测试 80 | ```bash 81 | $ python main.py --server=6499 --net_architecture=cnn-lstm --phase=Test --img_pairs=2 --net_restore=cnn-lstm-vo --dir_restore=20180114 --model_restore=model-100 --gpu=2 82 | ``` 83 | -------------------------------------------------------------------------------- /rnn/modules.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Author: Linjian Zhang 5 | Email: linjian93@foxmail.com 6 | Create Time: 2018-01-14 14:31:15 7 | Program: 8 | Description: 9 | """ 10 | import math 11 | import torch 12 | import warnings 13 | from torch.nn import Module, Parameter 14 | import torch.nn.functional as F 15 | from torch.autograd import Variable 16 | 17 | 18 | def clip_grad(v, min, max): 19 | v.register_hook(lambda g: g.clamp(min, max)) 20 | return v 21 | 22 | 23 | class RNNCellBase(Module): 24 | def __repr__(self): 25 | s = '{name}({input_size}, {hidden_size}' 26 | if 'bias' in self.__dict__ and self.bias is not True: 27 | s += ', bias={bias}' 28 | if 'nonlinearity' in self.__dict__ and self.nonlinearity != "tanh": 29 | s += ', nonlinearity={nonlinearity}' 30 | s += ')' 31 | return s.format(name=self.__class__.__name__, **self.__dict__) 32 | 33 | 34 | class RNNCell(RNNCellBase): 35 | def __init__(self, input_size, hidden_size, bias=True, grad_clip=None): 36 | super(RNNCell, self).__init__() 37 | self.input_size = input_size 38 | self.hidden_size = hidden_size 39 | self.grad_clip = grad_clip 40 | 41 | self.weight_ih = Parameter(torch.Tensor(hidden_size, input_size)) 42 | self.weight_hh = Parameter(torch.Tensor(hidden_size, hidden_size)) 43 | if bias: 44 | self.bias = Parameter(torch.Tensor(hidden_size)) 45 | else: 46 | self.register_parameter('bias', None) 47 | 48 | self.reset_parameters() 49 | 50 | def reset_parameters(self): 51 | stdv = 1.0 / math.sqrt(self.hidden_size) 52 | for weight in self.parameters(): 53 | weight.data.uniform_(-stdv, stdv) 54 | 55 | def forward(self, input, h): 56 | output = F.linear(input, self.weight_ih, self.bias) + F.linear(h, self.weight_hh) 57 | if self.grad_clip: 58 | output = clip_grad(output, -self.grad_clip, self.grad_clip) # avoid explosive gradient 59 | output = F.relu(output) 60 | 61 | return output 62 | 63 | 64 | class GRUCell(RNNCellBase): 65 | def __init__(self, input_size, hidden_size, bias=True, grad_clip=None): 66 | super(GRUCell, self).__init__() 67 | self.input_size = input_size 68 | self.hidden_size = hidden_size 69 | self.grad_clip = grad_clip 70 | 71 | self.weight_ih = Parameter(torch.Tensor(3 * hidden_size, input_size)) 72 | self.weight_hh_rz = Parameter(torch.Tensor(2 * hidden_size, hidden_size)) 73 | self.weight_hh = Parameter(torch.Tensor(hidden_size, hidden_size)) 74 | if bias: 75 | self.bias = Parameter(torch.Tensor(3 * hidden_size)) 76 | else: 77 | self.register_parameter('bias', None) 78 | 79 | self.reset_parameters() 80 | 81 | def reset_parameters(self): 82 | stdv = 1.0 / math.sqrt(self.hidden_size) 83 | for weight in self.parameters(): 84 | weight.data.uniform_(-stdv, stdv) 85 | 86 | def forward(self, input, h): 87 | ih = F.linear(input, self.weight_ih, self.bias) 88 | hh_rz = F.linear(h, self.weight_hh_rz) 89 | 90 | if self.grad_clip: 91 | ih = clip_grad(ih, -self.grad_clip, self.grad_clip) 92 | hh_rz = clip_grad(hh_rz, -self.grad_clip, self.grad_clip) 93 | 94 | r = F.sigmoid(ih[:, :self.hidden_size] + hh_rz[:, :self.hidden_size]) 95 | i = F.sigmoid(ih[:, self.hidden_size: self.hidden_size * 2] + hh_rz[:, self.hidden_size:]) 96 | 97 | hhr = F.linear(h * r, self.weight_hh) 98 | if self.grad_clip: 99 | hhr = clip_grad(hhr, -self.grad_clip, self.grad_clip) 100 | 101 | n = F.relu(ih[:, self.hidden_size * 2:] + hhr) 102 | h = (1 - i) * n + i * h 103 | 104 | return h 105 | 106 | 107 | class LSTMCell(RNNCellBase): 108 | def __init__(self, input_size, hidden_size, bias=True, grad_clip=None): 109 | super(LSTMCell, self).__init__() 110 | self.input_size = input_size 111 | self.hidden_size = hidden_size 112 | self.grad_clip = grad_clip 113 | 114 | self.weight_ih = Parameter(torch.Tensor(4 * hidden_size, input_size)) 115 | self.weight_hh = Parameter(torch.Tensor(4 * hidden_size, hidden_size)) 116 | if bias: 117 | self.bias = Parameter(torch.Tensor(4 * hidden_size)) 118 | else: 119 | self.register_parameter('bias', None) 120 | 121 | self.reset_parameters() 122 | 123 | def reset_parameters(self): 124 | stdv = 1.0 / math.sqrt(self.hidden_size) 125 | for weight in self.parameters(): 126 | weight.data.uniform_(-stdv, stdv) 127 | 128 | def forward(self, input, hx): 129 | h, c = hx 130 | 131 | pre = F.linear(input, self.weight_ih, self.bias) + F.linear(h, self.weight_hh) 132 | 133 | if self.grad_clip: 134 | pre = clip_grad(pre, -self.grad_clip, self.grad_clip) 135 | 136 | i = F.sigmoid(pre[:, :self.hidden_size]) 137 | f = F.sigmoid(pre[:, self.hidden_size: self.hidden_size * 2]) 138 | # g = F.tanh(pre[:, self.hidden_size * 2: self.hidden_size * 3]) # change to relu 139 | g = F.relu(pre[:, self.hidden_size * 2: self.hidden_size * 3]) 140 | o = F.sigmoid(pre[:, self.hidden_size * 3:]) 141 | 142 | c = f * c + i * g 143 | # h = o * F.tanh(c) # change to relu 144 | h = o * F.relu(c) 145 | return h, c 146 | 147 | 148 | class LSTMPCell(RNNCellBase): 149 | def __init__(self, input_size, hidden_size, recurrent_size, bias=True, grad_clip=None): 150 | super(LSTMPCell, self).__init__() 151 | self.input_size = input_size 152 | self.hidden_size = hidden_size 153 | self.recurrent_size = recurrent_size 154 | self.grad_clip = grad_clip 155 | 156 | self.weight_ih = Parameter(torch.Tensor(4 * hidden_size, input_size)) 157 | self.weight_hh = Parameter(torch.Tensor(4 * hidden_size, recurrent_size)) 158 | self.weight_rec = Parameter(torch.Tensor(recurrent_size, hidden_size)) 159 | if bias: 160 | self.bias = Parameter(torch.Tensor(4 * hidden_size)) 161 | else: 162 | self.register_parameter('bias', None) 163 | 164 | self.reset_parameters() 165 | 166 | def reset_parameters(self): 167 | stdv = 1.0 / math.sqrt(self.hidden_size) 168 | for weight in self.parameters(): 169 | weight.data.uniform_(-stdv, stdv) 170 | 171 | def forward(self, input, hx): 172 | h, c = hx 173 | 174 | pre = F.linear(input, self.weight_ih, self.bias) \ 175 | + F.linear(h, self.weight_hh) 176 | 177 | if self.grad_clip: 178 | pre = clip_grad(pre, -self.grad_clip, self.grad_clip) 179 | 180 | i = F.sigmoid(pre[:, :self.hidden_size]) 181 | f = F.sigmoid(pre[:, self.hidden_size: self.hidden_size * 2]) 182 | g = F.tanh(pre[:, self.hidden_size * 2: self.hidden_size * 3]) 183 | o = F.sigmoid(pre[:, self.hidden_size * 3:]) 184 | 185 | c = f * c + i * g 186 | h = o * F.tanh(c) 187 | h = F.linear(h, self.weight_rec) 188 | return h, c 189 | 190 | 191 | class MGRUCell(RNNCellBase): 192 | '''Minimal GRU 193 | Reference: 194 | Ravanelli et al. [Improving speech recognition by revising gated recurrent units](https://arxiv.org/abs/1710.00641). 195 | ''' 196 | 197 | def __init__(self, input_size, hidden_size, bias=True, grad_clip=None): 198 | super(MGRUCell, self).__init__() 199 | self.input_size = input_size 200 | self.hidden_size = hidden_size 201 | self.grad_clip = grad_clip 202 | 203 | self.weight_ih = Parameter(torch.Tensor(2 * hidden_size, input_size)) 204 | self.weight_hh = Parameter(torch.Tensor(2 * hidden_size, hidden_size)) 205 | if bias: 206 | self.bias = Parameter(torch.Tensor(2 * hidden_size)) 207 | else: 208 | self.register_parameter('bias', None) 209 | 210 | self.reset_parameters() 211 | 212 | def reset_parameters(self): 213 | stdv = 1.0 / math.sqrt(self.hidden_size) 214 | for weight in self.parameters(): 215 | weight.data.uniform_(-stdv, stdv) 216 | 217 | def forward(self, input, h): 218 | ih = F.linear(input, self.weight_ih, self.bias) 219 | hh = F.linear(h, self.weight_hh) 220 | 221 | if self.grad_clip: 222 | ih = clip_grad(ih, -self.grad_clip, self.grad_clip) 223 | hh = clip_grad(hh, -self.grad_clip, self.grad_clip) 224 | 225 | z = F.sigmoid(ih[:, :self.hidden_size] + hh[:, :self.hidden_size]) 226 | n = F.relu(ih[:, self.hidden_size:] + hh[:, self.hidden_size:]) 227 | h = (1 - z) * n + z * h 228 | 229 | return h 230 | 231 | 232 | class RNNBase(Module): 233 | def __init__(self, mode, input_size, hidden_size, recurrent_size=None, num_layers=1, bias=True, 234 | return_sequences=True, grad_clip=None): 235 | super(RNNBase, self).__init__() 236 | self.mode = mode 237 | self.input_size = input_size 238 | self.hidden_size = hidden_size 239 | self.recurrent_size = recurrent_size 240 | self.num_layers = num_layers 241 | self.bias = bias 242 | self.return_sequences = return_sequences 243 | self.grad_clip = grad_clip 244 | self.flatten_parameters() 245 | 246 | mode2cell = {'RNN': RNNCell, 247 | 'GRU': GRUCell, 248 | 'MGRU': GRUCell, 249 | 'LSTM': LSTMCell, 250 | 'LSTMP': LSTMPCell} 251 | Cell = mode2cell[mode] 252 | 253 | kwargs = {'input_size': input_size, 254 | 'hidden_size': hidden_size, 255 | 'bias': bias, 256 | 'grad_clip': grad_clip} 257 | if self.mode == 'LSTMP': 258 | kwargs['recurrent_size'] = recurrent_size 259 | 260 | self.cell0 = Cell(**kwargs) 261 | for i in range(1, num_layers): 262 | kwargs['input_size'] = recurrent_size if self.mode == 'LSTMP' else hidden_size 263 | cell = Cell(**kwargs) 264 | setattr(self, 'cell{}'.format(i), cell) 265 | 266 | def flatten_parameters(self): 267 | """Resets parameter data pointer so that they can use faster code paths. 268 | 269 | Right now, this works only if the module is on the GPU and cuDNN is enabled. 270 | Otherwise, it's a no-op. 271 | """ 272 | any_param = next(self.parameters()).data 273 | if not any_param.is_cuda or not torch.backends.cudnn.is_acceptable(any_param): 274 | self._data_ptrs = [] 275 | return 276 | 277 | # If any parameters alias, we fall back to the slower, copying code path. This is 278 | # a sufficient check, because overlapping parameter buffers that don't completely 279 | # alias would break the assumptions of the uniqueness check in 280 | # Module.named_parameters(). 281 | unique_data_ptrs = set(p.data_ptr() for l in self.all_weights for p in l) 282 | if len(unique_data_ptrs) != sum(len(l) for l in self.all_weights): 283 | self._data_ptrs = [] 284 | return 285 | 286 | with torch.cuda.device_of(any_param): 287 | # This is quite ugly, but it allows us to reuse the cuDNN code without larger 288 | # modifications. It's really a low-level API that doesn't belong in here, but 289 | # let's make this exception. 290 | from torch.backends.cudnn import rnn 291 | from torch.backends import cudnn 292 | from torch.nn._functions.rnn import CudnnRNN 293 | handle = cudnn.get_handle() 294 | with warnings.catch_warnings(record=True): 295 | fn = CudnnRNN( 296 | self.mode, 297 | self.input_size, 298 | self.hidden_size, 299 | num_layers=self.num_layers, 300 | batch_first=self.batch_first, 301 | dropout=self.dropout, 302 | train=self.training, 303 | bidirectional=self.bidirectional, 304 | dropout_state=self.dropout_state, 305 | ) 306 | 307 | # Initialize descriptors 308 | fn.datatype = cudnn._typemap[any_param.type()] 309 | fn.x_descs = cudnn.descriptor(any_param.new(1, self.input_size), 1) 310 | fn.rnn_desc = rnn.init_rnn_descriptor(fn, handle) 311 | 312 | # Allocate buffer to hold the weights 313 | self._param_buf_size = rnn.get_num_weights(handle, fn.rnn_desc, fn.x_descs[0], fn.datatype) 314 | fn.weight_buf = any_param.new(self._param_buf_size).zero_() 315 | fn.w_desc = rnn.init_weight_descriptor(fn, fn.weight_buf) 316 | 317 | # Slice off views into weight_buf 318 | all_weights = [[p.data for p in l] for l in self.all_weights] 319 | params = rnn.get_parameters(fn, handle, fn.weight_buf) 320 | 321 | # Copy weights and update their storage 322 | rnn._copyParams(all_weights, params) 323 | for orig_layer_param, new_layer_param in zip(all_weights, params): 324 | for orig_param, new_param in zip(orig_layer_param, new_layer_param): 325 | orig_param.set_(new_param.view_as(orig_param)) 326 | 327 | self._data_ptrs = list(p.data.data_ptr() for p in self.parameters()) 328 | 329 | def forward(self, input, initial_states=None): 330 | if initial_states is None: 331 | zeros = Variable(torch.zeros(input.size(0), self.hidden_size)) 332 | if self.mode == 'LSTM': 333 | initial_states = [(zeros, zeros), ] * self.num_layers 334 | elif self.mode == 'LSTMP': 335 | zeros_h = Variable(torch.zeros(input.size(0), self.recurrent_size)) 336 | initial_states = [(zeros_h, zeros), ] * self.num_layers 337 | else: 338 | initial_states = [zeros] * self.num_layers 339 | assert len(initial_states) == self.num_layers 340 | 341 | states = initial_states 342 | outputs = [] 343 | 344 | time_steps = input.size(1) 345 | for t in range(time_steps): 346 | x = input[:, t, :] 347 | for l in range(self.num_layers): 348 | hx = getattr(self, 'cell{}'.format(l))(x, states[l]) 349 | states[l] = hx 350 | if self.mode.startswith('LSTM'): 351 | x = hx[0] 352 | else: 353 | x = hx 354 | outputs.append(hx) 355 | 356 | if self.return_sequences: 357 | if self.mode.startswith('LSTM'): 358 | hs, cs = zip(*outputs) 359 | h = torch.stack(hs).transpose(0, 1) 360 | c = torch.stack(cs).transpose(0, 1) 361 | output = (h, c) 362 | else: 363 | output = torch.stack(outputs).transpose(0, 1) 364 | else: 365 | output = outputs[-1] 366 | return output 367 | 368 | 369 | class RNN(RNNBase): 370 | def __init__(self, *args, **kwargs): 371 | super(RNN, self).__init__('RNN', *args, **kwargs) 372 | 373 | 374 | class GRU(RNNBase): 375 | def __init__(self, *args, **kwargs): 376 | super(GRU, self).__init__('GRU', *args, **kwargs) 377 | 378 | 379 | class MGRU(RNNBase): 380 | def __init__(self, *args, **kwargs): 381 | super(MGRU, self).__init__('MGRU', *args, **kwargs) 382 | 383 | 384 | class LSTM(RNNBase): 385 | def __init__(self, *args, **kwargs): 386 | super(LSTM, self).__init__('LSTM', *args, **kwargs) 387 | 388 | 389 | class LSTMP(RNNBase): 390 | def __init__(self, *args, **kwargs): 391 | super(LSTMP, self).__init__('LSTMP', *args, **kwargs) 392 | -------------------------------------------------------------------------------- /rnn/rnn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Author: Linjian Zhang 5 | Email: linjian93@foxmail.com 6 | Create Time: 2018-01-14 15:29:07 7 | Program: 8 | Description: 9 | """ 10 | import torch 11 | import math 12 | import warnings 13 | from torch.nn.modules import Module 14 | from torch.nn.parameter import Parameter 15 | from torch.nn.utils.rnn import PackedSequence 16 | import torch.nn.functional as F 17 | 18 | 19 | class RNNBase(Module): 20 | 21 | def __init__(self, mode, input_size, hidden_size, 22 | num_layers=1, bias=True, batch_first=False, 23 | dropout=0, bidirectional=False): 24 | super(RNNBase, self).__init__() 25 | self.mode = mode 26 | self.input_size = input_size 27 | self.hidden_size = hidden_size 28 | self.num_layers = num_layers 29 | self.bias = bias 30 | self.batch_first = batch_first 31 | self.dropout = dropout 32 | self.dropout_state = {} 33 | self.bidirectional = bidirectional 34 | num_directions = 2 if bidirectional else 1 35 | 36 | if mode == 'LSTM': 37 | gate_size = 4 * hidden_size 38 | elif mode == 'GRU': 39 | gate_size = 3 * hidden_size 40 | else: 41 | gate_size = hidden_size 42 | 43 | self._all_weights = [] 44 | for layer in range(num_layers): 45 | for direction in range(num_directions): 46 | layer_input_size = input_size if layer == 0 else hidden_size * num_directions 47 | 48 | w_ih = Parameter(torch.Tensor(gate_size, layer_input_size)) 49 | w_hh = Parameter(torch.Tensor(gate_size, hidden_size)) 50 | b_ih = Parameter(torch.Tensor(gate_size)) 51 | b_hh = Parameter(torch.Tensor(gate_size)) 52 | layer_params = (w_ih, w_hh, b_ih, b_hh) 53 | 54 | suffix = '_reverse' if direction == 1 else '' 55 | param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}'] 56 | if bias: 57 | param_names += ['bias_ih_l{}{}', 'bias_hh_l{}{}'] 58 | param_names = [x.format(layer, suffix) for x in param_names] 59 | 60 | for name, param in zip(param_names, layer_params): 61 | setattr(self, name, param) 62 | self._all_weights.append(param_names) 63 | 64 | self.flatten_parameters() 65 | self.reset_parameters() 66 | 67 | def flatten_parameters(self): 68 | """Resets parameter data pointer so that they can use faster code paths. 69 | 70 | Right now, this works only if the module is on the GPU and cuDNN is enabled. 71 | Otherwise, it's a no-op. 72 | """ 73 | any_param = next(self.parameters()).data 74 | if not any_param.is_cuda or not torch.backends.cudnn.is_acceptable(any_param): 75 | self._data_ptrs = [] 76 | return 77 | 78 | # If any parameters alias, we fall back to the slower, copying code path. This is 79 | # a sufficient check, because overlapping parameter buffers that don't completely 80 | # alias would break the assumptions of the uniqueness check in 81 | # Module.named_parameters(). 82 | unique_data_ptrs = set(p.data_ptr() for l in self.all_weights for p in l) 83 | if len(unique_data_ptrs) != sum(len(l) for l in self.all_weights): 84 | self._data_ptrs = [] 85 | return 86 | 87 | with torch.cuda.device_of(any_param): 88 | # This is quite ugly, but it allows us to reuse the cuDNN code without larger 89 | # modifications. It's really a low-level API that doesn't belong in here, but 90 | # let's make this exception. 91 | from torch.backends.cudnn import rnn 92 | from torch.backends import cudnn 93 | from torch.nn._functions.rnn import CudnnRNN 94 | handle = cudnn.get_handle() 95 | with warnings.catch_warnings(record=True): 96 | fn = CudnnRNN( 97 | self.mode, 98 | self.input_size, 99 | self.hidden_size, 100 | num_layers=self.num_layers, 101 | batch_first=self.batch_first, 102 | dropout=self.dropout, 103 | train=self.training, 104 | bidirectional=self.bidirectional, 105 | dropout_state=self.dropout_state, 106 | ) 107 | 108 | # Initialize descriptors 109 | fn.datatype = cudnn._typemap[any_param.type()] 110 | fn.x_descs = cudnn.descriptor(any_param.new(1, self.input_size), 1) 111 | fn.rnn_desc = rnn.init_rnn_descriptor(fn, handle) 112 | 113 | # Allocate buffer to hold the weights 114 | self._param_buf_size = rnn.get_num_weights(handle, fn.rnn_desc, fn.x_descs[0], fn.datatype) 115 | fn.weight_buf = any_param.new(self._param_buf_size).zero_() 116 | fn.w_desc = rnn.init_weight_descriptor(fn, fn.weight_buf) 117 | 118 | # Slice off views into weight_buf 119 | all_weights = [[p.data for p in l] for l in self.all_weights] 120 | params = rnn.get_parameters(fn, handle, fn.weight_buf) 121 | 122 | # Copy weights and update their storage 123 | rnn._copyParams(all_weights, params) 124 | for orig_layer_param, new_layer_param in zip(all_weights, params): 125 | for orig_param, new_param in zip(orig_layer_param, new_layer_param): 126 | orig_param.set_(new_param.view_as(orig_param)) 127 | 128 | self._data_ptrs = list(p.data.data_ptr() for p in self.parameters()) 129 | 130 | def _apply(self, fn): 131 | ret = super(RNNBase, self)._apply(fn) 132 | self.flatten_parameters() 133 | return ret 134 | 135 | def reset_parameters(self): 136 | stdv = 1.0 / math.sqrt(self.hidden_size) 137 | for weight in self.parameters(): 138 | weight.data.uniform_(-stdv, stdv) 139 | 140 | def forward(self, input, hx=None): 141 | is_packed = isinstance(input, PackedSequence) 142 | if is_packed: 143 | input, batch_sizes = input 144 | max_batch_size = batch_sizes[0] 145 | else: 146 | batch_sizes = None 147 | max_batch_size = input.size(0) if self.batch_first else input.size(1) 148 | 149 | if hx is None: 150 | num_directions = 2 if self.bidirectional else 1 151 | hx = torch.autograd.Variable(input.data.new(self.num_layers * 152 | num_directions, 153 | max_batch_size, 154 | self.hidden_size).zero_(), requires_grad=False) 155 | if self.mode == 'LSTM': 156 | hx = (hx, hx) 157 | 158 | has_flat_weights = list(p.data.data_ptr() for p in self.parameters()) == self._data_ptrs 159 | if has_flat_weights: 160 | first_data = next(self.parameters()).data 161 | assert first_data.storage().size() == self._param_buf_size 162 | flat_weight = first_data.new().set_(first_data.storage(), 0, torch.Size([self._param_buf_size])) 163 | else: 164 | flat_weight = None 165 | 166 | # func = self._backend.RNN( 167 | # self.mode, 168 | # self.input_size, 169 | # self.hidden_size, 170 | # num_layers=self.num_layers, 171 | # batch_first=self.batch_first, 172 | # dropout=self.dropout, 173 | # train=self.training, 174 | # bidirectional=self.bidirectional, 175 | # batch_sizes=batch_sizes, 176 | # dropout_state=self.dropout_state, 177 | # flat_weight=flat_weight 178 | # ) 179 | # output, hidden = func(input, self.all_weights, hx) 180 | # if is_packed: 181 | # output = PackedSequence(output, batch_sizes) 182 | # return output, hidden 183 | 184 | h, c = hx 185 | pre = F.linear(input, self.w_ih, self.bias) + F.linear(h, self.weight_hh) 186 | # 187 | # if self.grad_clip: 188 | # pre = clip_grad(pre, -self.grad_clip, self.grad_clip) 189 | 190 | i = F.sigmoid(pre[:, :self.hidden_size]) 191 | f = F.sigmoid(pre[:, self.hidden_size: self.hidden_size * 2]) 192 | g = F.relu(pre[:, self.hidden_size * 2: self.hidden_size * 3]) 193 | o = F.sigmoid(pre[:, self.hidden_size * 3:]) 194 | 195 | c = f * c + i * g 196 | h = o * F.relu(c) 197 | return h, c 198 | 199 | def __repr__(self): 200 | s = '{name}({input_size}, {hidden_size}' 201 | if self.num_layers != 1: 202 | s += ', num_layers={num_layers}' 203 | if self.bias is not True: 204 | s += ', bias={bias}' 205 | if self.batch_first is not False: 206 | s += ', batch_first={batch_first}' 207 | if self.dropout != 0: 208 | s += ', dropout={dropout}' 209 | if self.bidirectional is not False: 210 | s += ', bidirectional={bidirectional}' 211 | s += ')' 212 | return s.format(name=self.__class__.__name__, **self.__dict__) 213 | 214 | def __setstate__(self, d): 215 | super(RNNBase, self).__setstate__(d) 216 | self.__dict__.setdefault('_data_ptrs', []) 217 | if 'all_weights' in d: 218 | self._all_weights = d['all_weights'] 219 | if isinstance(self._all_weights[0][0], str): 220 | return 221 | num_layers = self.num_layers 222 | num_directions = 2 if self.bidirectional else 1 223 | self._all_weights = [] 224 | for layer in range(num_layers): 225 | for direction in range(num_directions): 226 | suffix = '_reverse' if direction == 1 else '' 227 | weights = ['weight_ih_l{}{}', 'weight_hh_l{}{}', 'bias_ih_l{}{}', 'bias_hh_l{}{}'] 228 | weights = [x.format(layer, suffix) for x in weights] 229 | if self.bias: 230 | self._all_weights += [weights] 231 | else: 232 | self._all_weights += [weights[:2]] 233 | 234 | @property 235 | def all_weights(self): 236 | return [[getattr(self, weight) for weight in weights] for weights in self._all_weights] 237 | 238 | 239 | class LSTM(RNNBase): 240 | r"""Applies a multi-layer long short-term memory (LSTM) RNN to an input 241 | sequence. 242 | """ 243 | 244 | def __init__(self, *args, **kwargs): 245 | super(LSTM, self).__init__('LSTM', *args, **kwargs) -------------------------------------------------------------------------------- /test.sh: -------------------------------------------------------------------------------- 1 | ######################################################################### 2 | # File Name: main.sh 3 | # Author: Linjian Zhang 4 | # Mail: linjian93@foxmail.com 5 | # Created Time: 2018年01月 1日 10:38:54 6 | ######################################################################### 7 | #!/bin/bash 8 | 9 | for i in 170 180 190 200 10 | do 11 | # cnn-vo-cons 12 | 13 | # 80 90 100 / 110 130 150 14 | # python -u main.py --server=6099 --net_architecture=cnn --phase=Test --net_restore=cnn-vo --dir_restore=20180110 --batch_size=32 --model_restore=model-$i --gpu=0 15 | # /home/jiange/dl/project/tf-cnn-vo/evaluation/cpp/test /home/jiange/dl/project/pytorch-deepvo/test/cnn-vo/20180110_model-$i 16 | 17 | # 40 65 80 90 100 110 / 120 130 / 140 18 | # python -u main.py --server=6499 --net_architecture=cnn --phase=Test --net_restore=cnn-vo-cons --dir_restore=20180109 --batch_size=16 --model_restore=model-$i --gpu=3 19 | # /home/jiange/mydocument/mycode/pytorch-deepvo/evaluation/cpp/test /home/jiange/mydocument/mycode/pytorch-deepvo/test/cnn-vo-cons/20180109_model-$i 20 | 21 | # cnn-lstm-vo 22 | # 50 55 80 90 /100 110 120 130 / 140 150 160 / 170 180 190 200 23 | python -u main.py --server=6499 --net_architecture=cnn-lstm --phase=Test --img_pairs=2 --net_restore=cnn-lstm-vo --dir_restore=20180114 --model_restore=model-$i --gpu=2 24 | /home/jiange/mydocument/mycode/pytorch-deepvo/evaluation/cpp/test /home/jiange/mydocument/mycode/pytorch-deepvo/test/cnn-lstm-vo/20180114_model-$i 25 | done 26 | 27 | # nohup sh test.sh > nohup/test.log 2>&1 & -------------------------------------------------------------------------------- /utils/misc.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Author: Linjian Zhang 5 | Email: linjian93@foxmail.com 6 | Create Time: 2017-12-08 10:06:43 7 | Program: 8 | Description: 9 | """ 10 | import torch 11 | import shutil 12 | import os 13 | import math 14 | import numpy as np 15 | from torch.autograd import Variable 16 | from torch.nn import init 17 | 18 | 19 | def pre_create_file_train(model_dir, log_dir, args): 20 | dir_models = model_dir + '/' + args.net_name 21 | dir_logs = log_dir + '/' + args.net_name 22 | dir_model = dir_models + '/' + args.dir0 23 | dir_log = dir_logs + '/' + args.dir0 24 | if not os.path.exists(dir_models): 25 | os.mkdir(dir_models) 26 | if not os.path.exists(dir_logs): 27 | os.mkdir(dir_logs) 28 | if not os.path.exists(dir_model): 29 | os.mkdir(dir_model) 30 | if os.path.exists(dir_log): 31 | shutil.rmtree(dir_log) 32 | os.mkdir(dir_log) 33 | return dir_model, dir_log 34 | 35 | 36 | def pre_create_file_test(args): 37 | dir_net = 'test/' + args.net_restore 38 | dir_time = dir_net + '/' + args.dir_restore + '_' + args.model_restore 39 | if not os.path.exists(dir_net): 40 | os.mkdir(dir_net) 41 | if not os.path.exists(dir_time): 42 | os.mkdir(dir_time) 43 | return dir_time 44 | 45 | 46 | def to_var(x): 47 | if torch.cuda.is_available(): 48 | return Variable(x).cuda() 49 | else: 50 | return Variable(x) 51 | 52 | 53 | def init_xavier(m): 54 | if isinstance(m, torch.nn.Conv2d): 55 | init.xavier_normal(m.weight.data) 56 | init.constant(m.bias.data, 0.0) 57 | if isinstance(m, torch.nn.Linear): 58 | init.xavier_normal(m.weight.data) 59 | init.constant(m.bias.data, 0.0) 60 | if isinstance(m, torch.nn.BatchNorm2d): 61 | init.xavier_normal(m.weight.data) 62 | init.constant(m.bias.data, 0.0) 63 | 64 | 65 | def adjust_learning_rate(optimizer, epoch, lr_base, gamma=0.316, epoch_lr_decay=25): 66 | """ 67 | epoch lr 68 | 000-025 1e-4 69 | 025-050 3e-5 70 | 050-075 1e-5 71 | 075-100 3e-6 72 | 100-125 1e-6 73 | 125-150 3e-7 74 | """ 75 | 76 | exp = int(math.floor(epoch / epoch_lr_decay)) 77 | lr_decay = gamma ** exp 78 | for param_group in optimizer.param_groups: 79 | param_group['lr'] = lr_decay * lr_base 80 | 81 | 82 | def display_loss_tb(hour_per_epoch, epoch, args, step, step_per_epoch, optimizer, loss, loss1, loss2, 83 | loss_list, loss1_list, loss2_list, writer, step_global): 84 | print('\n{:.3f} [{:03d}/{:03d}] [{:03d}/{:03d}] lr {:.7f}: {:.4f}({:.4f})={:.4f}({:.4f})+{:d}' 85 | '*{:.4f}({:.4f})'.format(hour_per_epoch, epoch + 1, args.epoch_max, step + 1, 86 | step_per_epoch, 87 | optimizer.param_groups[0]['lr'], loss, np.mean(loss_list), loss1, 88 | np.mean(loss1_list), args.beta, loss2, np.mean(loss2_list))) 89 | writer.add_scalars('./train-val', 90 | {'loss_t': loss, 'loss1_t': loss1, 'loss2_t': loss2}, 91 | step_global) 92 | 93 | 94 | def display_loss_tb_val(batch_v, loss_v, loss1_v, loss2_v, args, writer, step_global): 95 | print('\n{:d} batches: L {:.4f}={:.4f}+{:d}*{:.4f}'.format(batch_v, loss_v, loss1_v, args.beta, loss2_v)) 96 | writer.add_scalars('./train-val', {'loss_v': loss_v, 'loss1_v': loss1_v, 'loss2_v': loss2_v}, step_global) 97 | -------------------------------------------------------------------------------- /utils/post_process.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Author: Linjian Zhang 5 | Email: linjian93@foxmail.com 6 | Create Time: 2017-12-07 23:01:32 7 | Program: 8 | Description: 9 | """ 10 | import io 11 | import matplotlib 12 | matplotlib.use('Agg') 13 | import matplotlib.pyplot as plt 14 | from matplotlib import style 15 | import numpy as np 16 | from numpy import mat 17 | from tqdm import tqdm 18 | import math 19 | 20 | 21 | def cal_absolute_from_relative(xyz_euler): 22 | """ 23 | 根据相对位姿计算绝对位姿 24 | :param xyz_euler: 6-d vector [x y z theta_x theta_y theta_z] 25 | :return: 12-d vector 26 | """ 27 | xyz_euler = np.array(xyz_euler) 28 | pose_absolute = [] # 12-d 29 | t1 = mat(np.eye(4)) 30 | pose_absolute.extend([np.array(t1[0: 3, :]).reshape([-1])]) 31 | for i in tqdm(range(len(xyz_euler))): 32 | x12 = xyz_euler[i, 0] 33 | y12 = xyz_euler[i, 1] 34 | z12 = xyz_euler[i, 2] 35 | theta1 = xyz_euler[i, 3] / 180 * np.pi 36 | theta2 = xyz_euler[i, 4] / 180 * np.pi 37 | theta3 = xyz_euler[i, 5] / 180 * np.pi 38 | tx = mat([[1, 0, 0], [0, math.cos(theta1), -math.sin(theta1)], [0, math.sin(theta1), math.cos(theta1)]]) 39 | ty = mat([[math.cos(theta2), 0, math.sin(theta2)], [0, 1, 0], [-math.sin(theta2), 0, math.cos(theta2)]]) 40 | tz = mat([[math.cos(theta3), -math.sin(theta3), 0], [math.sin(theta3), math.cos(theta3), 0], [0, 0, 1]]) 41 | tr = tz * ty * tx 42 | t12 = np.row_stack((np.column_stack((tr, [[x12], [y12], [z12]])), [0, 0, 0, 1])) 43 | t2 = t1 * t12 44 | pose_absolute.extend([np.array(t2[0: 3, :]).reshape([-1])]) 45 | t1 = t2 46 | 47 | return pose_absolute 48 | 49 | 50 | def plot_from_pose(seq, dir_save, pose_abs, epoch=None, args=None): 51 | """ 52 | 训练和测试时画图的legend、命名、保存位置不一样 53 | """ 54 | plt.close('all') 55 | style.use("ggplot") 56 | pose_gt = np.loadtxt('dataset/ground-truth/{:02d}.txt'.format(seq)) 57 | pose_pre = np.array(pose_abs) # [image_numbers, 6] 58 | plt.plot(pose_gt[:, 3], pose_gt[:, 11], '--', c='k', lw=1.5, label='Ground truth') 59 | if args.phase == 'Train': 60 | plt.plot(pose_pre[:, 3], pose_pre[:, 11], '-', c='r', lw=1.5, label='model-{:d}'.format(epoch)) 61 | else: 62 | plt.plot(pose_pre[:, 3], pose_pre[:, 11], '-', c='r', lw=1.5, label=args.model_restore) 63 | 64 | plt.title('Sequence {:02d}'.format(seq)) 65 | plt.xlabel('X', fontsize=12) 66 | plt.ylabel('Z', fontsize=12) 67 | plt.xticks(fontsize=10) 68 | plt.yticks(fontsize=10) 69 | plt.axis('equal') 70 | plt.grid(True) 71 | plt.legend() 72 | if args.phase == 'Train': 73 | plt.savefig(dir_save+'/{:d}-epoch-{:d}.png'.format(seq, epoch)) 74 | else: 75 | plt.savefig(dir_save + '/{:d}.png'.format(seq)) 76 | 77 | 78 | def main(): 79 | from tensorboardX import SummaryWriter 80 | writer = SummaryWriter('./img') 81 | plt.figure() 82 | plt.plot([1, 2]) 83 | plt.title("test") 84 | buf = io.BytesIO() 85 | plt.savefig(buf, format='png') 86 | buf.seek(0) 87 | writer.add_image('Image', buf) 88 | 89 | 90 | if '__name__' == '__main__': 91 | main() 92 | -------------------------------------------------------------------------------- /utils/preprocess.py: -------------------------------------------------------------------------------- 1 | from torchvision import transforms, utils 2 | from skimage import io, transform 3 | 4 | 5 | 6 | def main(): 7 | pass 8 | # scale = Rescale(256) 9 | # scale(sample) # 直接调用就好了 10 | 11 | if __name__ == '__main__': 12 | main() --------------------------------------------------------------------------------