├── 2-_I-80_Metadata_Documentation.pdf ├── ASPeachtree.jpg ├── README.md ├── animate_multiple.py ├── data vis.ipynb ├── data-analysis-report-0400-0415.pdf ├── data_vis.py ├── emeryville.dwg ├── feature_extract.py ├── generate_trainData.ipynb ├── i80_data_vis.py ├── mlpnetwork.py ├── moving_circles.py ├── moving_rect.py ├── newex.py ├── peachtree.jpg ├── peachtree.png ├── predTraj_lstmMDN.ipynb ├── rain.py └── sort_data.py /2-_I-80_Metadata_Documentation.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/numpee/ngsim/45c22c89e11411f8ab8d325879482c077f0370b4/2-_I-80_Metadata_Documentation.pdf -------------------------------------------------------------------------------- /ASPeachtree.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/numpee/ngsim/45c22c89e11411f8ab8d325879482c077f0370b4/ASPeachtree.jpg -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NGSIM Data Visualization 2 | 3 | 4 | -------------------------------------------------------------------------------- /animate_multiple.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib 3 | from matplotlib.patches import Circle, Wedge, Polygon 4 | from matplotlib.collections import PatchCollection 5 | import matplotlib.pyplot as plt 6 | 7 | 8 | fig, ax = plt.subplots() 9 | 10 | resolution = 50 # the number of vertices 11 | N = 3 12 | x = np.random.rand(N) 13 | y = np.random.rand(N) 14 | radii = 0.1*np.random.rand(N) 15 | patches = [] 16 | for x1, y1, r in zip(x, y, radii): 17 | circle = Circle((x1, y1), r) 18 | patches.append(circle) 19 | 20 | x = np.random.rand(N) 21 | y = np.random.rand(N) 22 | radii = 0.1*np.random.rand(N) 23 | theta1 = 360.0*np.random.rand(N) 24 | theta2 = 360.0*np.random.rand(N) 25 | for x1, y1, r, t1, t2 in zip(x, y, radii, theta1, theta2): 26 | wedge = Wedge((x1, y1), r, t1, t2) 27 | patches.append(wedge) 28 | 29 | # Some limiting conditions on Wedge 30 | patches += [ 31 | Wedge((.3, .7), .1, 0, 360), # Full circle 32 | Wedge((.7, .8), .2, 0, 360, width=0.05), # Full ring 33 | Wedge((.8, .3), .2, 0, 45), # Full sector 34 | Wedge((.8, .3), .2, 45, 90, width=0.10), # Ring sector 35 | ] 36 | 37 | for i in range(N): 38 | polygon = Polygon(np.random.rand(N, 2), True) 39 | patches.append(polygon) 40 | 41 | colors = 100*np.random.rand(len(patches)) 42 | p = PatchCollection(patches, alpha=0.4) 43 | p.set_array(np.array(colors)) 44 | ax.add_collection(p) 45 | fig.colorbar(p, ax=ax) 46 | 47 | plt.show() 48 | -------------------------------------------------------------------------------- /data vis.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import numpy as np\n", 10 | "import pandas as pd\n", 11 | "import matplotlib.pyplot as plt\n", 12 | "from matplotlib import animation\n", 13 | "import matplotlib.patches as patches\n", 14 | "import math\n", 15 | "import time\n", 16 | "%matplotlib inline" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": 2, 22 | "metadata": {}, 23 | "outputs": [], 24 | "source": [ 25 | "filepath = 'trajectories-0400-0415.csv'\n", 26 | "data = pd.read_csv(filepath)" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 9, 32 | "metadata": {}, 33 | "outputs": [], 34 | "source": [ 35 | "data_cut = data[['Vehicle_ID', 'Frame_ID', 'Local_X', 'Local_Y','Lane_ID', 'v_Length', 'v_Width', 'Angle']]\n", 36 | "sorted_frame = data_cut.sort_values(by=['Frame_ID'])\n", 37 | "sorted_np = sorted_frame.values\n", 38 | "sorted_np = sorted_np[20000:50000,:] # Omit data upto 100*1000ms = 100s\n", 39 | "sorted_id = data_cut.values # sort by vehicle id\n", 40 | "\n", 41 | "# init array of sliced values, by frame number\n", 42 | "sliced = []\n", 43 | "\n", 44 | "# slice data by frame number\n", 45 | "for i in range(int(min(sorted_np[:,1])),int(max(sorted_np[:,1]))):\n", 46 | " sliced.append(sorted_np[sorted_np[:,1]==i])\n" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": 7, 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [ 55 | "def updateVel(x,y,theta,vel):\n", 56 | " if (y < 40):\n", 57 | " theta = 5\n", 58 | " else:\n", 59 | " theta = 0\n", 60 | " vel = vel\n", 61 | " theta = theta\n", 62 | " return vel, theta" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": 10, 68 | "metadata": {}, 69 | "outputs": [ 70 | { 71 | "data": { 72 | "text/plain": [ 73 | "array([[1.00000000e+00, 1.20000000e+01, 1.68840000e+01, ...,\n", 74 | " 1.43000000e+01, 6.40000000e+00, 2.47363964e+00],\n", 75 | " [1.00000000e+00, 1.30000000e+01, 1.69380000e+01, ...,\n", 76 | " 1.43000000e+01, 6.40000000e+00, 2.42982837e+00],\n", 77 | " [1.00000000e+00, 1.40000000e+01, 1.69910000e+01, ...,\n", 78 | " 1.43000000e+01, 6.40000000e+00, 2.47166476e+00],\n", 79 | " ...,\n", 80 | " [2.91100000e+03, 8.59000000e+03, 5.37460000e+01, ...,\n", 81 | " 1.49000000e+01, 5.90000000e+00, 4.76689887e-01],\n", 82 | " [2.91100000e+03, 8.59100000e+03, 5.37720000e+01, ...,\n", 83 | " 1.49000000e+01, 5.90000000e+00, 4.92501734e-01],\n", 84 | " [2.91100000e+03, 8.59200000e+03, 5.37990000e+01, ...,\n", 85 | " 1.49000000e+01, 5.90000000e+00, nan]])" 86 | ] 87 | }, 88 | "execution_count": 10, 89 | "metadata": {}, 90 | "output_type": "execute_result" 91 | } 92 | ], 93 | "source": [] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": 8, 98 | "metadata": {}, 99 | "outputs": [ 100 | { 101 | "name": "stderr", 102 | "output_type": "stream", 103 | "text": [ 104 | "C:\\Users\\dongwan123\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\matplotlib\\patches.py:91: UserWarning: Setting the 'color' property will overridethe edgecolor or facecolor properties. \n", 105 | " warnings.warn(\"Setting the 'color' property will override\"\n" 106 | ] 107 | }, 108 | { 109 | "name": "stdout", 110 | "output_type": "stream", 111 | "text": [ 112 | "334.0\n" 113 | ] 114 | }, 115 | { 116 | "data": { 117 | "image/png": "iVBORw0KGgoAAAANSUhEUgAABBEAAADGCAYAAACEjxjvAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAH69JREFUeJzt3X9snXd96PH3Z3W4TTrcru7Ij5rdHDTM7TVKAliovtxZgLcldBbFUhKx5PaaqVIitLs7uB2jVNWdN7GqaJcVru5UNYINDyWA02E1slAy5FH5Xsnr5kIyZroZqDs4a5JupmBYIpawz/3DT0qcE5onyTn+cfx+SdY5z/N5Hp+vk895/PhzPs/3icxEkiRJkiTpSn5qqQcgSZIkSZJWBosIkiRJkiSpFIsIkiRJkiSpFIsIkiRJkiSpFIsIkiRJkiSpFIsIkiRJkiSplFJFhIh4f0RMRcTfRsRnIuLGiKhExFMR8fWI+FxEvKLRg5UkSZIkSUvnikWEiLgd+O9AV2a+HrgBeDfwEeCRzHwt8CJwbyMHKkmSJEmSllbZyxlagLUR0QKsA04CbwceL+JDwLvqPzxJkiRJkrRcXLGIkJn/CPwv4FvMFw++BzwNfDczzxebVYHbGzVISZIkSZK09FqutEFE/AxwN1ABvgscBt5xmU3zJ+y/D9gHsGbNmjfddttt1zxYSZIkSc3t5MmTDX+NjRs3Nvw1pJXm5MmT/5yZP3ul7a5YRAB+EZjJzH8CiIjPA/8JuCUiWopuhHbg+cvtnJkHgAMAmzZtyv3795f8ESRJkiStNoODgw1/Df8mkWoNDg7+Q5ntysyJ8C3gzohYFxEB9AJfA74E7Cy2GQCeuJaBSpIkSZKklaHMnAhPMT+B4peBrxb7HAA+CPyPiPgG0AZ8soHjlCRJkiRJS6zM5Qxk5u8Av3PJ6meBN9d9RJIkSZIkaVkqe4tHSZIkSZK0ypXqRJAkSZKkxbAYEytKunZ2IkiSJEmSpFIsIkiSJEmSpFIsIkiSJEmSpFIsIkiSJEmSpFIsIkiSJEmSpFIsIkiSJEmSpFIsIkiSJEmSpFIsIkiSJEmSpFIiMxftxbZs2ZJHjhxZsG5qaorJyUlaWlrYu3dvzT7Hjx/nxIkTrF27lt27d9fEJycnmZqaorW1lf7+/pr4xMQE09PTtLW10dfXVxMfHx9nZmaG9evXs2PHjpr42NgY1WqV9vZ2ent7a+JHjx7l9OnTVCoVenp6auKjo6PMzs7S0dFBd3d3TXxkZIS5uTk6Ozvp6uqqiQ8PD3P27Fm2bt3Ktm3bauIHDx7k/PnzdHV10dnZWRMfGhoCoLu7m46OjgWxc+fOcejQIQB6enqoVCoL4mfOnOHw4cMA9Pb20t7eviA+NzfHyMgIANu3b2fDhg0L4rOzs4yOjgLQ19dHW1vbgvipU6c4duwYAP39/bS2ti6IV6tVxsbGANi1axfr1q1bEJ+ZmWF8fByAPXv2sGbNmgXx6elpJiYmABgYGOBS5p65B+aeuWfuXczcM/fA3DP3zL2LmXvmHqye3KtUKk9nZm2iXMJOBEmSJEmSVMoVOxEi4nXA5y5a9RrgfwJ/WqzfDDwH7M7MF1/ue23atCn3799/HcOVJEmSJEn1Njg4WKoToeVKG2Tm3wPbACLiBuAfgRHgfmAsMx+OiPuL5Q9e16glSdKKNjg4uKy/nyRJuj5XLCJcohf4Zmb+Q0TcDby1WD8EPIlFBEmSJOmqLFaxzKKcpHq42iLCu4HPFM/XZ+ZJgMw8GRGvutwOEbEP2Adw8803X+s4pWWh0b98/eUuSZIkaTkrXUSIiFcA7wQ+dDUvkJkHgAMwPyfCVY1OkppQI4tFFqIkSavF9f7O83fmtVvsfzv/r5aXq+lEeAfw5cw8XSyfjoiNRRfCRuCF+g9PkiSpOdndJklaia7mFo+/yo8vZQA4Aly4IeYA8ES9BiVJkiRJkpafUkWEiFgH/BLw+YtWPwz8UkR8vYg9XP/hSZIkSZKk5aLU5QyZeQZou2TdLPN3a5AkSZIkSZfRbJevXe3dGSRJaojF/AXoteKSJEnX5mrmRJAkSZIkSauYRQRJkiRJklSKlzNIkqS68VIRSZKam50IkiRJkiSpFIsIkiRJkiSpFC9nkKRFZru3JOli/l64Nv67SUvDTgRJkiRJklSKnQjSVbDiLUmSJGk1s4ggSZIkSSrND9ZWN4sIkiRJS8CTcEnSSlRqToSIuCUiHo+Iv4uIZyKiOyJujYgvRsTXi8efafRgJUmSJEnS0inbifBx4Ghm7oyIVwDrgAeAscx8OCLuB+4HPtigcUqSJEmStOI0W+dZZObLbxDRCpwAXpMXbRwRfw+8NTNPRsRG4MnMfN3Lfa8tW7bkkSNHFqybmppicnKSlpYW9u7dW7PP8ePHOXHiBGvXrmX37t018cnJSaampmhtbaW/v78mPjExwfT0NG1tbfT19dXEx8fHmZmZYf369ezYsaMmPjY2RrVapb29nd7e3pr40aNHOX36NJVKhZ6enpr46Ogos7OzdHR00N3dXRMfGRlhbm6Ozs5Ourq6auLDw8OcPXuWrVu3sm3btpr4wYMHOX/+PF1dXXR2dtbEh4aGAOju7qajo2NB7Ny5cxw6dAiAnp4eKpXKgviZM2c4fPgwAL29vbS3ty+Iz83NMTIyAsD27dvZsGHDgvjs7Cyjo6MA9PX10dbWtiB+6tQpjh07BkB/fz+tra0L4tVqlbGxMQB27drFunXrFsRnZmYYHx8HYM+ePaxZs2ZBfHp6momJCQAGBga4lLln7oG5Z+6Zexcz98w9MPfMPXPvYuaeuQerJ/cqlcrTmVmbKJcocznDa4B/Av4kIr4SEZ+IiJuA9Zl5EqB4fNXldo6IfRExGRGTP/rRj0q8nCRJkiRJWo7KdCJ0AX8JvCUzn4qIjwNzwG9k5i0XbfdiZr7svAibNm3K/fv312HYkiRJkiSpXgYHB+vWiVAFqpn5VLH8OPBG4HRxGQPF4wvXOlhJkiRJkrT8XbGIkJmngG9HxIX5DnqBrwFHgAsXfwwATzRkhJIkSZIkaVkoe3eG3wAOFndmeBb4NeYLEMMRcS/wLWBXY4YoSZIkSZKWg1JFhMw8Dlzu2ojaKUQlSZIkSVJTKtuJoGWskfcdbbZ7mqr5LWbO+v6QJEnSamMRQZIkSVJT80M3qX7K3J1BkiRJkiTJTgRJkrS4/ERQkqSVyyKCtELV+0TZE28thXrmnTks1V8j3le+VyVpZfNyBkmSJEmSVIpFBEmSJEmSVIpFBEmSJEmSVIpFBEmSJEmSVIpFBEmSJEmSVIpFBEmSJEmSVEqpWzxGxHPA94EfAeczsysibgU+B2wGngN2Z+aLjRmmJEmSJF0bby0q1U+pIkLhbZn5zxct3w+MZebDEXF/sfzBuo5OpXhQlCRJkiQthqspIlzqbuCtxfMh4EksIkhaYhbVJEmSpMYpW0RI4M8jIoHHMvMAsD4zTwJk5smIeFWjBilJak4WfVYn/98lSVq5yhYR3pKZzxeFgi9GxN+VfYGI2AfsA7j55puvYYiSLseTcEmSJEmLrdTdGTLz+eLxBWAEeDNwOiI2AhSPL/yEfQ9kZldmdq1bt64+o5YkSZIkSYvuip0IEXET8FOZ+f3i+S8DvwccAQaAh4vHJxo5UEmSJC0uu94kSZeKzHz5DSJew3z3AcwXHQ5l5u9HRBswDPwc8C1gV2Z+5+W+15YtW/LIkSML1k1NTTE5OUlLSwt79+6t2ef48eOcOHGCtWvXsnv37pr45OQkU1NTtLa20t/fXxOfmJhgenqatrY2+vr6auLj4+PMzMywfv16duzYURMfGxujWq3S3t5Ob29vTfzo0aOcPn2aSqVCT09PTXx0dJTZ2Vk6Ojro7u6uiY+MjDA3N0dnZyddXV018eHhYc6ePcvWrVvZtm1bTfzgwYOcP3+erq4uOjs7a+JDQ0MAdHd309HRsSB27tw5Dh06BEBPTw+VSmVB/MyZMxw+fBiA3t5e2tvbF8Tn5uYYGZlPje3bt7Nhw4YF8dnZWUZHRwHo6+ujra1tQfzUqVMcO3YMgP7+flpbWxfEq9UqY2NjAOzatYtLO1lmZmYYHx8HYM+ePaxZs2ZBfHp6momJCQAGBga4lLln7oG5Z+6Zexcz98w9MPfMPXPvYuaeuQerJ/cqlcrTmVmbKJe4YidCZj4LbL3M+lmgNtMlSZIkSVJTumInQj1t2rQp9+/fv2ivJ0mSJEmSrmxwcLA+nQirVSOvAfT6QkmSJEnSSlTq7gySJEmSJEl2Ikiqm/v+4A/46X/5l2va9wc33cRHP/CBOo9IkqTF0+huU7tZJS0HFhEk1c21FhCud19JkqRGalQBx8KQViIvZ5AkSZIkSaUsaifCyZMnL1ttswKnZmRLoyRJkqRm4+UMkiRp0Xj3o2tncVqStBx4OYMkSZIkSSrFTgRJkiRJUtNqRKfVau7esoig67bYb6DV/IaV1Fi2i0uSJL08L2eQVDc/uOmmJdlXkiRJ0uIo3YkQETcAk8A/ZmZfRFSAzwK3Al8G7snMf23MMCWtBB/9wAeWegiSJEmSGuhqOhF+E3jmouWPAI9k5muBF4F76zkwSZIkSZK0vJQqIkREO/ArwCeK5QDeDjxebDIEvKsRA5QkSZIkSctD2csZPgb8NvDKYrkN+G5mni+Wq8Dtl9sxIvYB+65nkJIkSdJy5+SpklaDKxYRIqIPeCEzn46It15YfZlN83L7Z+YB4EDxvS67zXLkLwFJkiRJ4N8G0sXKdCK8BXhnRNwF3Ai0Mt+ZcEtEtBTdCO3A840bpiRJkiRJWmpXLCJk5oeADwEUnQi/lZl7I+IwsJP5OzQMAE80cJySJKkJ+GmeJEkrW+lbPF7GB4HPRsSHga8An7zSDhs3bmT//v3X8ZKSJEmSJGmpXFURITOfBJ4snj8LvLn+Q5Kag5+2SZIkSWo2pW7xKEmSJEmSdD2XM1y12267jYGBgQXrpqammJycpKWlhb1799bsc/z4cU6cOMHatWvZvXt3TXxycpKpqSlaW1vp7++viU9MTDA9PU1bWxt9fX018fHxcWZmZli/fj07duyoiY+NjVGtVmlvb6e3t7cmfvToUU6fPk2lUqGnp6cmPjo6yuzsLB0dHXR3d9fER0ZGmJubo7Ozk66urpr48PAwZ8+eZevWrWzbtq0mfvDgQc6fP09XVxednZ018aGhIQC6u7vp6OhYEDt37hyHDh0CoKenh0qlsiB+5swZDh8+DEBvby/t7e0L4nNzc4yMjADwyCOP1Ixvenr6pctXHnvssZrXP378OO9///sB+PSnP13z/ScmJnjggQcAePzxx2lrawNg8+bNAMzMzDA+Pg7Anj17WLNmTc3rT0xMANTkHZh7zZJ727dvZ8OGDQvis7OzjI6OAtDX1/dS7lxw6tQpjh07BkB/fz+tra0L4tVqlbGxMQB27drFunXrFsTNvebNvde//vXs3LkTgIceeqhm/NVqlXvuuQe4tuPejTfeaO6Ze9d83LvA4565dyl/55p75t7L596JEyfMvUtcLvfKdlIvahFBzWlwcJA777yz5o39yle+8qVEfNOb3lTzxr7xxhtfim/ZsqXmjd3S0vJS/I477qh5Y0tSvd1xxx0vHXd+4Rd+oeaE5tZbb30pfi3HvVOnTjVk3JIkSYslMnPRXmzTpk3pxIqSJEmSJC0vg4ODT2dmbcvKJZwTQZIkSZIklWIRQZIkSZIklWIRQZIkSZIkleLEipIkSZLUAGVnu19OVuKYtbgsIkiSSmnESYUnKpIkSSuLlzNIkiRJkqRS7ETQsuWnnpIkSZK0vFyxEyEiboyIv4qIExExFRG/W6yvRMRTEfH1iPhcRLyi8cOVJEmSJElLpczlDD8E3p6ZW4FtwI6IuBP4CPBIZr4WeBG4t3HDlCRJkiRJS+2KlzNkZgI/KBbXFF8JvB3YU6wfAgaBR+s/REmSdL28nGtp+e8vSWoWpeZEiIgbgKeBnwf+CPgm8N3MPF9sUgVub8gIJV2X1Xbiutp+XkmSJGkxlbo7Q2b+KDO3Ae3Am4E7LrfZ5faNiH0RMRkRk2fOnLn2kUqSJEmSpCV1Vbd4zMzvAk8CdwK3RMSFToZ24PmfsM+BzOzKzK5169Zdz1glSZIkSdISKnN3hp+NiFuK52uBXwSeAb4E7Cw2GwCeaNQgJUmSJEnS0iszJ8JGYKiYF+GngOHMHI2IrwGfjYgPA18BPtnAcUqSJEmSpCVW5u4MfwO84TLrn2V+fgRJ0irgpJWSJEm6qjkRJEmSJEnS6lXqFo/SUvBTT0mSJElaXiwiSJIkSVID+KGYmpGXM0iSJEmSpFIsIkiSJEmSpFIsIkiSJEmSpFIiMxftxbZs2ZJHjhxZsG5qaorJyUlaWlrYu3dvzT7Hjx/nxIkTrF27lt27d9fEJycnmZqaorW1lf7+/pr4xMQE09PTtLW10dfXVxMfHx9nZmaG9evXs2PHjpr42NgY1WqV9vZ2ent7a+JHjx7l9OnTVCoVenp6auKjo6PMzs7S0dFBd3d3TXxkZIS5uTk6Ozvp6uqqiQ8PD3P27Fm2bt3Ktm3bauIHDx7k/PnzdHV10dnZWRMfGhoCoLu7m46OjgWxc+fOcejQIQB6enqoVCoL4mfOnOHw4cMA9Pb20t7eviA+NzfHyMgIANu3b2fDhg0L4rOzs4yOjgLQ19dHW1vbgvipU6c4duwYAP39/bS2ti6IV6tVxsbGANi1axfr1q1bEJ+ZmWF8fByAPXv2sGbNmgXx6elpJiYmABgYGOBSqyX3Jicnue+++2ri99xzD9Vqld27d/Pe9763Jr5z505mZ2cZGBjgPe95T038rrvu4uzZs7z3ve+97L/P2972NgDuu+++mp//7Nmz3HXXXQA8+OCDNT/f7OwsO3fuBOChhx6qee9Uq1XuueceAB555JEF743Nmzebe8sk9zzuLa/ce+655xgeHubRRx9l7dq1fOELX+BSn/rUpxgaGqKtrY3HH3+8Jv7oo48yPDxMe3s7n/70p2viH/3oRxkdHaWjo4PHHnusJv7hD3+YsbExtm7dysc+9rGa+AMPPMDExATd3d089NBDNfH3ve99nDhxgt7eXh588MGa+P79+5menqavr2/ZHfduv/32VZt74HHP4565Z+6Ze5dajrlXqVSezszaRLmEnQiSJEmSJKmURe1E2LRpU+7fv3/RXk/S6psVeLX9vJIkSVI9DA4OlupE8BaPUpPzj2pJkiRJ9eLlDJIkSZIkqRSLCJIkSZIkqZQrXs4QEa8G/hTYAPwbcCAzPx4RtwKfAzYDzwG7M/PFxg11ZVjs1nFb1SVJy8li/l7yd6AkSYuvzJwI54H7MvPLEfFK4OmI+CLwHmAsMx+OiPuB+4EPNm6okpZKI07UPfmXJEmN0ujzjOV8HlPvsS3nn1VL44pFhMw8CZwsnn8/Ip4BbgfuBt5abDYEPIlFBNXRaj74a3Uwx8vz021JenkW/CUtlqu6O0NEbAbeADwFrC8KDGTmyYh41U/YZx+wD+Dmm2++nrFKkiRJklTaci6GLeexvZzSRYSI+Gngz4D3ZeZcRJTaLzMPAAcANm3alNcySEmSJDUP260laeUqdXeGiFjDfAHhYGZ+vlh9OiI2FvGNwAuNGaIkSZIkSVoOrlhEiPmWg08Cz2TmH14UOgIMFM8HgCfqPzxJkiRJkrRclLmc4S3APcBXI+J4se4B4GFgOCLuBb4F7GrMECVJkiRJ0nJQ5u4M/w/4SRMg9NZ3OJIkSZIkabkqNSeCJEmSJEmSRQRJkiRJklRK6Vs8Slq9vHWWJElaSTx3kRrHIkKdecCSJEnSYvMcVBeYC2o0iwhatjwASrrA44EkSdLyYBFBkpaIfxirGZnXkiQ1NydWlCRJkiRJpdiJIEmSpEVlx4okrVx2IkiSJEmSpFIiMxftxbZs2ZJHjhxZsG5qaorJyUlaWlrYu3dvzT7Hjx/nxIkTrF27lt27d9fEJycnmZqaorW1lf7+/pr4xMQE09PTtLW10dfXVxMfHx9nZmaG9evXs2PHjpr42NgY1WqV9vZ2ent7a+JHjx7l9OnTVCoVenp6auKjo6PMzs7S0dFBd3d3TXxkZIS5uTk6Ozvp6uqqiQ8PD3P27Fm2bt3Ktm3bauIHDx7k/PnzdHV10dnZWRMfGhoCoLu7m46OjgWxc+fOcejQIQB6enqoVCoL4mfOnOHw4cMA9Pb20t7eviA+NzfHyMgIANu3b2fDhg0L4rOzs4yOjgLQ19dHW1vbgvipU6c4duwYAP39/bS2ti6IV6tVxsbGANi1axfr1q1bEJ+ZmWF8fByAPXv2sGbNmgXx6elpJiYmABgYGOBS5p65B+aeuWfuXczcM/fA3FuJuXchn+67776an//s2bPcddddADz44IMv/XybN28GzD1zz+MemHsXcq9SqTydmbWJcgk7ESRJkiRJUilX7ESIiD8G+oAXMvP1xbpbgc8Bm4HngN2Z+eKVXmzTpk25f//+6xyyJEmSpAuuZY4J56WQdKnBwcG6dSJ8Cri0/+F+YCwzXwuMFcuSJEmSJKmJXbGIkJnjwHcuWX03MFQ8HwLeVedxSZIkSZKkZeZab/G4PjNPAmTmyYh41U/aMCL2AfsAbr755mt8uZWnkS1itp9JkiRJkpZCwydWzMwDmdmVmV2XzngpSZIkSZJWjmvtRDgdERuLLoSNwAv1HJQk1YtdQZKWUqOPEx6HJEmL7VqLCEeAAeDh4vGJuo1IkqRlxD8CJUmSfuyKRYSI+AzwVuC2iKgCv8N88WA4Iu4FvgXsauQgJUmSJDUXi7TSynTFIkJm/upPCPXWeSySJEmSJGkZu9bLGSRJkiQtA37iLmkxNfzuDJIkSZIkqTlYRJAkSZIkSaVYRJAkSZIkSaVYRJAkSZIkSaU4sWKDOMGNJEmSJGmxNfpvUTsRJEmSJElSKXYiSGpqdgVJkiRJ9WMngiRJkiRJKsVOBEmSXobdLJIkST9mJ4IkSZIkSSrlujoRImIH8HHgBuATmflwXUYlSZLUBOxkkSQ1m2suIkTEDcAfAb8EVIG/jogjmfm1eg1OkiRJUnOyyCatTNdzOcObgW9k5rOZ+a/AZ4G76zMsSZIkSZK03FxPEeF24NsXLVeLdZIkSZIkqQldz5wIcZl1WbNRxD5gX7H4w8HBwb+9jteUVoLbgH9e6kFIDWaeazUwz7UamOdaDczzcv59mY2up4hQBV590XI78PylG2XmAeAAQERMZmbXdbymtOyZ51oNzHOtBua5VgPzXKuBeV5f13M5w18Dr42ISkS8Ang3cKQ+w5IkSZIkScvNNXciZOb5iPhvwDHmb/H4x5k5VbeRSZIkSZKkZeV6LmcgM78AfOEqdjlwPa8nrRDmuVYD81yrgXmu1cA812pgntdRZNbMhShJkiRJklTjeuZEkCRJkiRJq0jdiggR8eqI+FJEPBMRUxHxm8X6WyPiixHx9eLxZ4r1ERH/OyK+ERF/ExFvrNdYpEaJiBsj4q8i4kSR579brK9ExFNFnn+umGyUiPh3xfI3ivjmpRy/dDUi4oaI+EpEjBbL5rmaSkQ8FxFfjYjjETFZrPO8RU0lIm6JiMcj4u+K8/Ru81zNJCJeVxzHL3zNRcT7zPPGqWcnwnngvsy8A7gT+PWI+I/A/cBYZr4WGCuWAd4BvLb42gc8WsexSI3yQ+DtmbkV2AbsiIg7gY8AjxR5/iJwb7H9vcCLmfnzwCPFdtJK8ZvAMxctm+dqRm/LzG0X3frL8xY1m48DRzPzPwBbmT+um+dqGpn598VxfBvwJuAMMIJ53jB1KyJk5snM/HLx/PvMH6BuB+4GhorNhoB3Fc/vBv405/0lcEtEbKzXeKRGKPL1B8XimuIrgbcDjxfrL83zC/n/ONAbEbFIw5WuWUS0A78CfKJYDsxzrQ6et6hpREQr0AN8EiAz/zUzv4t5rubVC3wzM/8B87xhGjInQtHK+gbgKWB9Zp6E+UID8Kpis9uBb1+0W7VYJy1rRYv3ceAF4IvAN4HvZub5YpOLc/mlPC/i3wPaFnfE0jX5GPDbwL8Vy22Y52o+Cfx5RDwdEfuKdZ63qJm8Bvgn4E+Ky9M+ERE3YZ6reb0b+Ezx3DxvkLoXESLip4E/A96XmXMvt+ll1nmrCC17mfmjol2qHXgzcMflNisezXOtOBHRB7yQmU9fvPoym5rnWunekplvZL619dcjoudltjXPtRK1AG8EHs3MNwD/wo9bui/HPNeKVczV9E7g8JU2vcw68/wq1LWIEBFrmC8gHMzMzxerT19oDykeXyjWV4FXX7R7O/B8PccjNVLRDvgk83OA3BIRLUXo4lx+Kc+L+M3AdxZ3pNJVewvwzoh4Dvgs85cxfAzzXE0mM58vHl9g/vrZN+N5i5pLFahm5lPF8uPMFxXMczWjdwBfzszTxbJ53iD1vDtDMH+91TOZ+YcXhY4AA8XzAeCJi9b/12J2zDuB711oN5GWq4j42Yi4pXi+FvhF5uf/+BKws9js0jy/kP87gb/ITCudWtYy80OZ2Z6Zm5lvC/yLzNyLea4mEhE3RcQrLzwHfhn4WzxvURPJzFPAtyPidcWqXuBrmOdqTr/Kjy9lAPO8YaJe53kR8Z+B/wt8lR9fQ/sA8/MiDAM/B3wL2JWZ3ymKDv8H2MH8DJq/lpmTdRmM1CARsYX5iVluYL4IN5yZvxcRr2H+E9tbga8A/yUzfxgRNwKfZn6OkO8A787MZ5dm9NLVi4i3Ar+VmX3muZpJkc8jxWILcCgzfz8i2vC8RU0kIrYxP0nuK4BngV+jOIfBPFeTiIh1zM9z8JrM/F6xzuN5g9StiCBJkiRJkppbQ+7OIEmSJEmSmo9FBEmSJEmSVIpFBEmSJEmSVIpFBEmSJEmSVIpFBEmSJEmSVIpFBEmSJEmSVIpFBEmSJEmSVIpFBEmSJEmSVMr/B4A8zrWvfmpGAAAAAElFTkSuQmCC\n", 118 | "text/plain": [ 119 | "" 120 | ] 121 | }, 122 | "metadata": {}, 123 | "output_type": "display_data" 124 | } 125 | ], 126 | "source": [ 127 | "# set figure size\n", 128 | "fig = plt.figure(figsize=(18,3))\n", 129 | "#ax = fig.add_axes([0,0,1,1],frameon=False)\n", 130 | "# ax = fig.add_subplot(2,1,2)\n", 131 | "\n", 132 | "#add subplot\n", 133 | "ax1 = fig.add_subplot(1,1,1)\n", 134 | "count = 0\n", 135 | "myvehicle_x_pos = 330\n", 136 | "myvehicle_y_pos = 42\n", 137 | "myvehicle_vel = 40\n", 138 | "myvehicle_theta = 0\n", 139 | "# Animation function\n", 140 | "\n", 141 | "def animate(i):\n", 142 | " global count; global myvehicle_x_pos; global myvehicle_vel; global myvehicle_y_pos; global myvehicle_theta\n", 143 | "\n", 144 | " timestep = 0.1\n", 145 | "\n", 146 | " # update vehicle velocity and theta\n", 147 | " myvehicle_vel, myvehicle_theta = updateVel(myvehicle_x_pos, myvehicle_y_pos, myvehicle_theta, myvehicle_vel)\n", 148 | " myvehicle_x_pos = timestep * math.cos(math.radians(myvehicle_theta))*myvehicle_vel + myvehicle_x_pos\n", 149 | " myvehicle_y_pos = timestep * math.sin(math.radians(myvehicle_theta)) * myvehicle_vel + myvehicle_y_pos\n", 150 | " # Slice relevant information by frame number\n", 151 | " x = sliced[i][:,2]\n", 152 | " y = sliced[i][:,3]\n", 153 | " names = sliced[i][:,0]\n", 154 | " lane_label = sliced[i][:,4]\n", 155 | " vehicle_length = sliced[i][:,5]\n", 156 | " vehicle_width = sliced[i][:,6]\n", 157 | "\n", 158 | " # ax.clear()\n", 159 | " ax1.clear()\n", 160 | " plt.axhline(y=12, color='white', linestyle = '--')\n", 161 | " plt.axhline(y=24, color='white', linestyle = '--')\n", 162 | " plt.axhline(y=36, color='white', linestyle = '--')\n", 163 | " plt.axhline(y=48, color='white', linestyle = '--')\n", 164 | " plt.axhline(y=60, color='white', linestyle = '--')\n", 165 | " plt.axhline(y=72, color='white', linestyle = '--')\n", 166 | " #ax.imshow(img, extent = [-300,300,0,1500])\n", 167 | " # ax.set_autoscaley_on(False)\n", 168 | " # ax.set_autoscalex_on(False)\n", 169 | " # ax.set_xlim([200,330])\n", 170 | " # ax.set_ylim([0,100])\n", 171 | " # ax.scatter(y,x, s = 10)\n", 172 | "\n", 173 | " # set autoscale off, set x,y axis\n", 174 | " ax1.set_autoscaley_on(False)\n", 175 | " ax1.set_autoscalex_on(False)\n", 176 | " ax1.set_xlim([200,740])\n", 177 | " ax1.set_ylim([0,80])\n", 178 | " ax1.set_facecolor('gray')\n", 179 | " # ax1.scatter(y,x,s=10)\n", 180 | " patches = []\n", 181 | " patches1 = []\n", 182 | " lane_color = [\"white\", \"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"black\", \"pink\"]\n", 183 | " # ax1.scatter(y,x, s = 50, marker = \"s\")\n", 184 | "\n", 185 | " # unzip by category, create rectangle for each car by frame\n", 186 | " for x_cent, y_cent, lane, vlength, vwidth in zip(x,y,lane_label,vehicle_length, vehicle_width):\n", 187 | " # print(x_cent, y_cent)\n", 188 | " vlen = vlength*0.75\n", 189 | " vwid = vwidth*0.75\n", 190 | " # colored vehicles\n", 191 | " # patches.append(ax1.add_patch(plt.Rectangle((y_cent-vlen/2, x_cent-vwid/2), vlen, vwid,\n", 192 | " # fill=True, angle=0, linewidth = 2, edgecolor = lane_color[int(lane)], color = lane_color[int(lane)])))\n", 193 | "\n", 194 | " patches.append(ax1.add_patch(plt.Rectangle((y_cent-vlen/2, x_cent-vwid/2), vlen, vwid,\n", 195 | " fill=True, angle=0, linewidth = 2, edgecolor = lane_color[int(lane)], color = 'k')))\n", 196 | "\n", 197 | " #patches1.append(ax.add_patch(plt.Rectangle((y_cent, x_cent), 3, 2, fill=False, edgecolor=\"blue\",label=lane_label)))\n", 198 | " # for i, txt in enumerate(names):\n", 199 | " # ax1.annotate(int(txt), (int(y[i]),int(x[i])), fontsize=10)\n", 200 | " # if i%2==0:\n", 201 | " # return patches\n", 202 | " # else:\n", 203 | " # return patches1\n", 204 | " patches.append(ax1.add_patch(plt.Rectangle((myvehicle_x_pos,myvehicle_y_pos), 8, 4, fill=True,\n", 205 | " angle = myvehicle_theta, color = 'red' )))\n", 206 | " count = count +1\n", 207 | " print(myvehicle_x_pos)\n", 208 | " time.sleep(0.5)\n", 209 | " return patches\n", 210 | "\n", 211 | "\n", 212 | "# Animate at interval of 100ms\n", 213 | "ani = animation.FuncAnimation(fig, animate, frames = range(2,30000), interval=100, blit=True)\n", 214 | "#ani.save('video.mp4')\n", 215 | "\n", 216 | "\n", 217 | "plt.show()\n" 218 | ] 219 | }, 220 | { 221 | "cell_type": "code", 222 | "execution_count": null, 223 | "metadata": {}, 224 | "outputs": [], 225 | "source": [] 226 | } 227 | ], 228 | "metadata": { 229 | "kernelspec": { 230 | "display_name": "Python 3", 231 | "language": "python", 232 | "name": "python3" 233 | }, 234 | "language_info": { 235 | "codemirror_mode": { 236 | "name": "ipython", 237 | "version": 3 238 | }, 239 | "file_extension": ".py", 240 | "mimetype": "text/x-python", 241 | "name": "python", 242 | "nbconvert_exporter": "python", 243 | "pygments_lexer": "ipython3", 244 | "version": "3.6.4" 245 | } 246 | }, 247 | "nbformat": 4, 248 | "nbformat_minor": 2 249 | } 250 | -------------------------------------------------------------------------------- /data-analysis-report-0400-0415.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/numpee/ngsim/45c22c89e11411f8ab8d325879482c077f0370b4/data-analysis-report-0400-0415.pdf -------------------------------------------------------------------------------- /data_vis.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import numpy as np 4 | import pandas as pd 5 | import matplotlib.pyplot as plt 6 | from matplotlib import animation 7 | 8 | filepath = 'vehicle-trajectory-data/0400pm-0415pm/trajectories-0400pm-0415pm.csv' 9 | data = pd.read_csv(filepath) 10 | data_cut = data[['Vehicle_ID', 'Frame_ID', 'Local_X', 'Local_Y']] 11 | sorted_frame = data_cut.sort_values(by=['Frame_ID']) 12 | sorted_np = sorted_frame.values 13 | sorted_np = sorted_np[0:30000,:] 14 | 15 | # init array of sliced values, by frame number 16 | sliced = [] 17 | 18 | # slice data by frame number 19 | for i in range(int(min(sorted_np[:,1])),int(max(sorted_np[:,1]))): 20 | sliced.append(sorted_np[sorted_np[:,1]==i]) 21 | 22 | #fig, ax = plt.subplots() 23 | img = plt.imread("ASPeachtree.jpg") 24 | fig = plt.figure(figsize=(7,7)) 25 | #ax = fig.add_axes([0,0,1,1],frameon=False) 26 | ax = fig.add_subplot(1,2,1) 27 | ax1 = fig.add_subplot(1,2,2) 28 | #fig, ax = plt.subplots() 29 | 30 | 31 | def animate(i): 32 | x = sliced[i][:,2] 33 | y = sliced[i][:,3] 34 | names = sliced[i][:,0] 35 | ax.clear() 36 | ax1.clear() 37 | #ax.imshow(img, extent = [-300,300,0,1500]) 38 | ax.set_autoscaley_on(False) 39 | ax.set_autoscalex_on(False) 40 | ax.set_xlim([-300,300]) 41 | ax.set_ylim([0,1500]) 42 | ax.scatter(x,y, s = 10) 43 | 44 | ax1.set_autoscaley_on(False) 45 | ax1.set_autoscalex_on(False) 46 | ax1.set_xlim([-100,100]) 47 | ax1.set_ylim([0,600]) 48 | ax1.scatter(x,y, s = 50, marker = "s") 49 | 50 | for i, txt in enumerate(names): 51 | ax.annotate(int(txt), (int(x[i]),int(y[i])), fontsize=8) 52 | ax1.annotate(int(txt), (int(x[i]),int(y[i])), fontsize=10) 53 | 54 | 55 | ani = animation.FuncAnimation(fig,animate,frames = range(2,30000), interval=50) 56 | plt.show() 57 | -------------------------------------------------------------------------------- /emeryville.dwg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/numpee/ngsim/45c22c89e11411f8ab8d325879482c077f0370b4/emeryville.dwg -------------------------------------------------------------------------------- /feature_extract.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import math 4 | 5 | # Read CSV through Pandas 6 | filepath = 'trajectories-0400-0415.csv' 7 | data = pd.read_csv(filepath) 8 | 9 | # Leave data with the following headings only 10 | data = data[['Vehicle_ID', 'Frame_ID', 'Local_X', 11 | 'Local_Y','v_Vel', 'Lane_ID']] 12 | data_np = data.values.astype(np.float32) # convert to numpy 13 | 14 | # NGSIM has 1 million data points. Change num_data to another value to reduce 15 | # # of data points obtained 16 | num_data = len(data_np) 17 | data_np = data_np[0:num_data] 18 | 19 | # Create empty list to organize data by Vehicle ID 20 | sliced = [] 21 | #slice data by vehicle ID 22 | for i in range(int(min(data_np[:,0])),int(max(data_np[:,0]))+1): 23 | sliced.append(data_np[data_np[:,0]==i]) 24 | 25 | # Create empty list for each variable 26 | dx, dy, theta, x_p, y_p, v, lane_num, d0 = ([] for i in range(8)) 27 | 28 | ''' 29 | Iterate for each vehicle ID. Must iterate for each vehicle because of vehicle orientation 30 | which is found by atan2(dx,dy) over 5 time steps. 31 | ''' 32 | for z in range(0,len(sliced)): 33 | 34 | # z contains vehicle number. Thus, sliced[1][:,2] contains x_pos 35 | # data for 1st vehicle, and so on. 36 | x_pos = sliced[z][:,2] 37 | y_pos = sliced[z][:,3] 38 | vel = sliced[z][:,4] 39 | lane = sliced[z][:,5] 40 | 41 | # Finding orientation in radians 42 | for count in range(len(x_pos)): 43 | if lane[count] == 7: 44 | d0.append(abs( (18/721) * y_pos[count] + x_pos[count] + 18)/ (np.sqrt((18/721)**2 + (1)))) 45 | else: 46 | d0.append(x_pos[count]%12) #calculate distance between from lane 47 | 48 | # Due to noise in data, 5 time steps are used. 49 | # The delta_x is found by x(t) - x(t-4), same for delta_y 50 | # Assume orientation of 0 (parallel to lane) for first 4 timesteps of each vehicle 51 | if count <4: 52 | dx.append(0) 53 | dy.append(0) 54 | theta.append(0) 55 | else: 56 | delta_x = x_pos[count]-x_pos[count-4] 57 | delta_y = y_pos[count]-y_pos[count-4] 58 | dx.append(delta_x) 59 | dy.append(delta_y) 60 | theta.append(math.atan2(delta_x,delta_y)) # OUTPUT: RADIANS! 61 | 62 | # Append back to list. 63 | x_p = np.append(x_p, x_pos) 64 | y_p = np.append(y_p, y_pos) 65 | v = np.append(v, vel) 66 | lane_num = np.append(lane_num, lane) 67 | 68 | 69 | # Now create new dataframe called features, which is fed into feature extractor below for 70 | # d1~d6, v1~ v6 calculation 71 | 72 | features = pd.DataFrame({'x_position': [], 'y_position': [], 'theta': [], 'lane': []}) 73 | features = features.assign(vehicle_id = data['Vehicle_ID'], frame = data_np[:,1], 74 | x_position = x_p, y_position = y_p, velocity = v, lane = lane_num, 75 | theta = theta, d0=d0) 76 | 77 | '''Now we must iterate through the whole list to find d0~d6 and v0~v6. Could be more efficient to 78 | sort by frame first, but then it becomes harder to add back into data ''' 79 | 80 | # create empty lists for variables 81 | d1, d2, d3, d4, d5, d6, v1,v2,v3,v4,v5,v6 = ([] for i in range(12))# iterate through whole list 82 | 83 | 84 | for i in range(0,num_data): 85 | # init values behind as zero, infront as 1000 86 | ve2, ve4, ve6 = (0 for f in range(3)) 87 | di1, di2, di3, di4, di5, di6, ve1, ve3, ve5 = (1000 for n in range(9)) 88 | 89 | #obtain relevant data 90 | vehicle_x = x_p[i] 91 | vehicle_y = y_p[i] 92 | frame = data_np[i,1] 93 | lane = lane_num[i] 94 | velocity = v[i] 95 | 96 | # The next two lines collect data for surrounding cars at that specific time step. 97 | # Procedure: 1) List all vehicles in that frame 2) Leave vehicles with greater y value 98 | # in variable 'infront', and lower y value in variable 'behind' 99 | infront = features.loc[(features['frame']==frame) & (features['y_position']>vehicle_y), 100 | ['x_position','y_position', 'velocity', 'lane']] 101 | behind = features.loc[(features['frame']==frame) & (features['y_position'] (Hmin_prev + Hmin_post)):\n", 117 | " for nidx_t in range(Hmin_prev,(len_idx_found-Hmin_post)):\n", 118 | " info_cur_init = data_np_found[nidx_t,indexes_info]\n", 119 | " p_cur_init = data_np_found[nidx_t,indexes_p]\n", 120 | " f_cur_init = data_np_found[nidx_t,indexes_f]\n", 121 | " \n", 122 | " # Set prev-trajectory\n", 123 | " prevTraj_tmp = np.zeros((dim_p,H_prev))\n", 124 | " H_prev_tmp = np.min([H_prev,nidx_t])\n", 125 | " cnt_prev_tmp = 0\n", 126 | " p_cur = p_cur_init\n", 127 | " for nidx_tt in range(1,H_prev_tmp+1):\n", 128 | " p_next = data_np_found[nidx_t-nidx_tt,indexes_p]\n", 129 | " cnt_prev_tmp = cnt_prev_tmp + 1\n", 130 | " prevTraj_tmp[:,cnt_prev_tmp-1] = p_next - p_cur\n", 131 | " p_cur = p_next\n", 132 | " \n", 133 | " if(cnt_prev_tmp < H_prev):\n", 134 | " remain_tmp = H_prev - cnt_prev_tmp\n", 135 | " indexes_remain = range(cnt_prev_tmp,H_prev)\n", 136 | " prev_Traj_tmp_last = np.reshape(prevTraj_tmp[:,cnt_prev_tmp-1],(dim_p,1))\n", 137 | " prevTraj_tmp[:,indexes_remain] = np.tile(prev_Traj_tmp_last,(1,remain_tmp))\n", 138 | " \n", 139 | " prevTraj_tmp_vector = np.reshape(prevTraj_tmp,H_prev*dim_p)\n", 140 | " \n", 141 | " # Set post-trajectory\n", 142 | " postTraj_tmp = np.zeros((dim_p,H_post))\n", 143 | " H_post_tmp = np.min([H_post,(len_idx_found-nidx_t-1)])\n", 144 | " cnt_post_tmp = 0\n", 145 | " p_cur = p_cur_init\n", 146 | " for nidx_tt in range(1,H_post_tmp+1):\n", 147 | " p_next = data_np_found[nidx_t+nidx_tt,indexes_p]\n", 148 | " cnt_post_tmp = cnt_post_tmp + 1\n", 149 | " postTraj_tmp[:,cnt_post_tmp-1] = p_next - p_cur\n", 150 | " p_cur = p_next\n", 151 | " \n", 152 | " if(cnt_post_tmp < H_post):\n", 153 | " remain_tmp = H_post - cnt_post_tmp\n", 154 | " indexes_remain = range(cnt_post_tmp,H_post)\n", 155 | " post_Traj_tmp_last = np.reshape(postTraj_tmp[:,cnt_post_tmp-1],(dim_p,1))\n", 156 | " postTraj_tmp[:,indexes_remain] = np.tile(post_Traj_tmp_last,(1,remain_tmp))\n", 157 | " \n", 158 | " postTraj_tmp_vector = np.reshape(postTraj_tmp,H_post*dim_p)\n", 159 | " \n", 160 | " # Update 'data'\n", 161 | " cnt_data = cnt_data + 1\n", 162 | " info_data[cnt_data-1,:] = info_cur_init\n", 163 | " p_data[cnt_data-1,:] = p_cur_init\n", 164 | " f_data[cnt_data-1,:] = f_cur_init\n", 165 | " prevTraj_data[cnt_data-1,:] = prevTraj_tmp_vector\n", 166 | " postTraj_data[cnt_data-1,:] = postTraj_tmp_vector\n", 167 | "\n", 168 | "info_data = info_data[range(0,cnt_data),:]\n", 169 | "p_data = p_data[range(0,cnt_data),:]\n", 170 | "f_data = f_data[range(0,cnt_data),:]\n", 171 | "prevTraj_data = prevTraj_data[range(0,cnt_data),:]\n", 172 | "postTraj_data = postTraj_data[range(0,cnt_data),:]" 173 | ] 174 | }, 175 | { 176 | "cell_type": "code", 177 | "execution_count": 4, 178 | "metadata": {}, 179 | "outputs": [], 180 | "source": [ 181 | "# Save data\n", 182 | "import scipy\n", 183 | "import scipy.io as sio\n", 184 | "\n", 185 | "filename2save = \"trainData_i80.mat\"\n", 186 | "sio.savemat(filename2save, {'info_data':info_data,'p_data':p_data,'f_data':f_data,'prevTraj_data':prevTraj_data,'postTraj_data':postTraj_data,})" 187 | ] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "execution_count": 10, 192 | "metadata": {}, 193 | "outputs": [ 194 | { 195 | "data": { 196 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYAAAAD8CAYAAAB+UHOxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4xLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvAOZPmwAAIABJREFUeJzt3Xt83HWd7/HXZ3KDYGjT+yVNSrV2\noWXFJtCiLiIIUuRsK1Vayq7gol326O7qw3O0sCvuou6WddfVPbqUCl1kD6UglMtxi4IXqC6kkhS0\nNyslNGl6TdtQCoUkk/mcP+Y3YZKZSSa3uSTv5+PRR2a+v99kPmmb72e+d3N3RERk9AllOwAREckO\nJQARkVFKCUBEZJRSAhARGaWUAERERiklABGRUUoJQERklFICEBEZpZQARERGqcJsB9CbCRMm+MyZ\nM7MdhohIXqmvrz/q7hP7ui+nE8DMmTOpq6vLdhgiInnFzBrTuU9dQCIio5QSgIjIKKUEICIySikB\niIiMUkoAIiKjlBKAiMgopQQgIqNefWMr3/vFHuobW7MdSkb1mQDMbJ2ZHTGz7T3K/9LMfmdmO8zs\nn+LKbzazPWa228w+Eld+RVC2x8xWDe2PISIyMOu3NPGJNc/yzZ/sZvna50ZVEkinBXAPcEV8gZl9\nCFgMvMfd5wL/HJSfAywH5gav+XczKzCzAuB7wCLgHODa4F4RkaxZvWkXtzyyjUhwNHpHp3PnMy9n\nN6gM6nMlsLtvNrOZPYr/Aljt7m3BPUeC8sXAhqD8FTPbA1wQXNvj7g0AZrYhuHfnoH8CEZF+qm9s\n5W8f2cauQycTrh1+7a0sRJQdAx0DeDfwR2a2xcyeMbPzg/LpwL64+5qDslTlIiIZtX5LE0vveDZp\n5Q+w7PzKDEeUPQPdC6gQGAcsBM4HHjSzWUMRkJmtBFYCVFaOnn8IERl+qzftYs3mhpTXb7poFisW\njJ56Z6AJoBnY6O4O/NrMIsAEYD8wI+6+iqCMXsq7cfe1wFqAmpoaH2B8IjLK1Te2UttwjIWzxgOw\n5pmXeWrn4aT3njO1jK8tOZfqqvJMhph1A00AjwIfAn5hZu8GioGjwOPAejP7FjANmA38GjBgtpmd\nRbTiXw6sGGTsIjKKxSr48tJiWk+1d32NVfjX3VVLezhCYcjAjPZwJOn3WXLeNL69/L2ZDD1n9JkA\nzOx+4GJggpk1A18F1gHrgqmh7cD1QWtgh5k9SHRwNwx81t07g+/zOeAnQAGwzt13DMPPIyKjwOpN\nu7jzlw140EdggAMhg+LCEEvnV9AejhDx6Mye6NVEN100i1VXnp2psHNOOrOArk1x6U9S3P8N4BtJ\nyjcBm/oVnYhID+u3NCX048eq94hDRziCE00EHeEIBUELIByOEGsDhAy+vuTcUdXfn0xOHwgjItLT\nP/3kdymvhQyKghbA0vkV3cYA4ruLFs4aP+r6+5NRAhCRvLF60y5ePdWRUH7dgkrmThuTULnHV/Kq\n8BMpAYhI3nj0xaSTB7l6foUq+AHQZnAikjdmjCtNWv6VR7dlOJKRQQlARPLGqkVnE7LE8p0HT46q\nTdyGihKAiOSN6qpyfnjT+ygtLki4dvsTu7IQUX5TAhCRvFJdVc7ffjRxM+GtTWoB9JcSgIjknRUL\nKikp6N4XFI5EZwlJ+pQARCQvfer9ZyWUrfvvV7IQSf5SAhCRvLTqyrMp6lGDtXe6WgH9oAQgInnr\nxg8k7kKvVkD6lABEJG+pFTA4SgAikteStQLueW5vxuPIR0oAIpLXVl15Nj0mBPFWR4T1W5qyE1Ae\nUQIQkbw3P8k+QP+4aWcWIskvSgAikvdWLUo81OVkWyefvHtLFqLJH30mADNbZ2ZHgtO/el77opm5\nmU0InpuZ/ZuZ7TGz35rZ/Lh7rzezl4I/1w/tjyEio1l1VTlLzpuWUL75paMaEO5FOi2Ae4Arehaa\n2QzgciC+o20R0XOAZwMrgTuCe8cRPUpyAXAB8FUz096tIjJkvr38vUwpK0koX7O5QRvFpdBnAnD3\nzcDxJJf+FfgS3Q/bXAzc61G1wFgzmwp8BHjK3Y+7eyvwFEmSiojIYHzvT6qTln/uvvoMR5IfBjQG\nYGaLgf3u/psel6YD++KeNwdlqcpFRIZMdVU5N12UOC304GttGg9Iot8JwMxKgVuAW4c+HDCzlWZW\nZ2Z1LS0tw/EWIjKCrbrybC6aPSGhfPNLRzU1tIeBtADeCZwF/MbM9gIVwFYzmwLsB2bE3VsRlKUq\nT+Dua929xt1rJk6cOIDwRGS0u/fGBUnHA/7u8e0aD4jT7wTg7tvcfZK7z3T3mUS7c+a7+yHgceCT\nwWyghcAJdz8I/AS43MzKg8Hfy4MyEZFhkWw8oL3TWXrHs0oCgXSmgd4PPAfMMbNmM7uxl9s3AQ3A\nHuD7wP8EcPfjwNeA54M/twVlIiLDItXUUIAvPvhihqPJTYV93eDu1/ZxfWbcYwc+m+K+dcC6fsYn\nIjJg317+XvYefYMXm090K9977BT1ja1UJ1lBPJpoJbCIjGiPfu4DnFaYWNV95dFtWYgmtygBiMiI\nd+v/mJtQ9ruDJ7MQSW5RAhCREW/FgsqEHUMj6AxhJQARGRWS7Rg62s8NUAIQkVEh2Y6ho/3cACUA\nERkVqqvKOXtKWUL5aD43QAlAREaNr3/s3ISyk22do7YV0Oc6ABGRkaK6qpyqcaU0Hj/VrfwfN+1k\nx4ETOLB0fsWoWR9g0bVbuammpsbr6uqyHYaIjCD1ja0svePZlNcLC4xL5kwCYEJZSV4mBDOrd/ea\nvu5TC0BERpXqqnIumj2BzS8dTXo93Ok8ufNw1/MH6/ZxyZxJTCgrYd60MSOqpaAWgIiMSp+8e0vK\nJJCOkMGlZ0/mQ3Mm0XqqnYWzxudMQki3BaAEICKjVn1jK7UNxygvLWbHgRO0nGzj57uPEO7sX71o\nQChkXPIHk/jQnElZbyUoAYiIDEB9YysbtzbTcrINYEAJISZk8MOb3pfxJKAxABGRAaiuKu9WYccS\nggPzpo3h6d1H+Omuw0TSyAkRh9uf2MWDN71v+AIeBCUAEZFe9EwIKxZUdksKZ5YUsvaXDSkTQlOP\nKae5RAlARKSf4pNCfWMrd/0qdQJYct70bmMNuTRgrAQgIjIItQ3H6Iwkv2ZA5fgzuO6uWtrDka4k\nETL48NmT+fMPvjOriSCdIyHXmdkRM9seV/ZNM/udmf3WzB4xs7Fx1242sz1mttvMPhJXfkVQtsfM\nVg39jyIiknkLZ42nqOde04GQwRPbD3ar/CE6NvDkzsNc+/3arJ5PnM5eQPcAV/QoewqY5+5/CPwe\nuBnAzM4BlgNzg9f8u5kVmFkB8D1gEXAOcG1wr4hIXquuKuf+lReyYkElF8ws512T3kFhCAoMigpD\nLJo3lVTjxR3hCLUNxzIab7x0zgTebGYze5Q9Gfe0Fvh48HgxsMHd24BXzGwPcEFwbY+7NwCY2Ybg\n3tG7DZ+IjBjJZg7F+vz/7ae/J9Vs+6LCEAtnjc9QlImGYjfQPwOeCB5PB/bFXWsOylKVJzCzlWZW\nZ2Z1LS0tQxCeiEhmVVeVc/LNDm55ZBuHgvUEPV1+zmTu/8zCrI4BDGoQ2Mz+BggD9w1NOODua4G1\nEF0INlTfV0QkU9ZvaWLN5oaU1//hY+eyYkFlBiNKbsAJwMxuAK4CLvW3lxPvB2bE3VYRlNFLuYjI\niPLvv3gpafnUM0v47nXVOTEFFAaYAMzsCuBLwAfdPX6Vw+PAejP7FjANmA38muhsqNlmdhbRin85\nsGIwgYuI5JL4fv8DJ95KuL7kvGl8e/l7sxBZan0mADO7H7gYmGBmzcBXic76KQGeMjOAWne/yd13\nmNmDRAd3w8Bn3b0z+D6fA34CFADr3H3HMPw8IiIZVd/YyppnXubnvztCJOJJZ/yUFoVyrvKH9GYB\nXZuk+O5e7v8G8I0k5ZuATf2KTkQkx9Q3tvLw1mYMKCsp5Pu/bKCvveI+eeHMTITWb1oJLCKSps9v\neIFHXzzQr9csOW8aq648e5giGhwlABGRNKzetKvPyr8gZHzmA2fxWlsYA67O8VPDlABERNLwYH1z\n0nLj7dPBsr23T38pAYiIpKG9o7PbcwO+8bFzc2p3z/5SAhARSUNxYQja304CDvx4+0HuvXFB9oIa\npKHYCkJEZMS7pmZGQtnml47y+Q0vZCGaoaEEICLSi/rGVr73iz1cNncKsyeekXD90RcPsH5LUxYi\nGzx1AYmI8PZK3tjunLFVvbf9aAft4QiFIQNLvu//E9sP5sTePv2lBCAiI16yyr2vij7cGSFkRsSd\niENHp0OKnf0XzZuaqR9lSCkBiMiIEV/RV1eVd63afai+mXBn98q9r4reAdwJhQzDKQju7+yMUBAy\nzpsxlrZwhGXnV+blp39QAhCRPNRXd01xYYhbr5rLbT/aQVtHpOtze3zlnk5FXxR8n9hUz9h75eu0\nz56UAEQkL6zf0sQDzzdRUhjixeYTvX+KD0e6zuKNVf4G0bN74z7FD6SiHwkVf4wSgIjkvNWbdiU9\nYCXVp/jYWbzP7z1ORzha2X+iZgZXz68AEscARnpFn4oSgIjktFSna/X8RN/zU3x1VTlzppQlreBT\nPR5tlABEJGet39LELY9sSyg3YMWCyoRP9Mk+xY/mCr4vSgAikpNSVf4Q3YMnfuaNKvmB6XMlsJmt\nM7MjZrY9rmycmT1lZi8FX8uDcjOzfzOzPWb2WzObH/ea64P7XzKz64fnxxGRkaC+sZWvPLY96bWb\nLprVrfKPrdStb2zNVHgjRjotgHuA7wL3xpWtAn7m7qvNbFXw/MvAIqLnAM8GFgB3AAvMbBzRoyRr\niK6kqDezx91d/2IikqC24RidkeSLru55bi+XzZ3SNc//urtqu6Z+3vfphV2vjw3ybtzajANLc3xv\n/mxI50jIzWY2s0fxYqLnBAP8AHiaaAJYDNzr7g7UmtlYM5sa3PuUux8HMLOngCuA+wf9E4jIiLNw\n1niKC0O0hyMJ1zrCEWobjlFdVU5twzHaw5GuqZ8btzbz8Nbm6IreghARd8LBeY0P/LqJz/zRLMpO\nLxox8/gHa6BjAJPd/WDw+BAwOXg8HdgXd19zUJaqXEQkQXVVOfd/ZmHXp/d508Zw24920BGOzvaJ\nfbqPJYpYuUO3hBDfhuh0WLO5AQNKipLP+y8vLc7r/f37a9CDwO7uZtbHkcjpM7OVwEqAysr8XF4t\nIoPXcwZPsimd1VXl3PfphQldPh3hCAU9WgAxsSRx62PbibhTWBACd8KR6CIyA4oKQ3y8uoKlfcwy\nyncDTQCHzWyqux8MuniOBOX7gfhNsyuCsv283WUUK3862Td297XAWoCampohSywikt9STensWd4z\nIdz5zMv8bNdhIh6t/ENAyIzOSLAlRNDNFKtsYgni/i1NPFTf3JUcCguiSWHetDEjZmsIi3bX93FT\ndAzgR+4+L3j+TeBY3CDwOHf/kpl9FPgccCXRQeB/c/cLgkHgeiA2K2grUB0bE0ilpqbG6+rqBvaT\niYgEYnsHxbp4YvsGxVoK8S2AeLHNn+O3k3DebiX0lhyymRDMrN7da/q6r88WgJndT/TT+wQzayY6\nm2c18KCZ3Qg0AtcEt28iWvnvAU4BnwJw9+Nm9jXg+eC+2/qq/EVEhkqy1kN8lxK8PQaw48AJfli3\nj86IdyWHjs5oayG+lRDfcoi1GGLXiwqMDSsvzPlWQVotgGxRC0BEsqHnbqMbtzbzw7p9Xa2EEFBY\n2D059HT5OZNZ+8k+P4QPiyFrAYiIjDY9WwzVVeVcPb8iYaYQvJ0c2nsMNr/QlPvLnJQARETS0Nsg\n9NXzK1i+9rlgd9KoN9o7MxnegOhQeBGRQaquKqe0pKBbWVFh8vODc4kSgIjIECgqCPX6PBflfoQi\nInkg3GPbip7Pc5ESgIjIEHizI9Lr81ykBCAikqZkW0/XN7ZyyyPbaOvxib8tHMn5Lao1C0hEpIee\n6wBi0z9v+9GOhK2nr7urlrYkn/YteF0uLwZTAhARiVPf2No1pdOAwoLovkEhMyLuXTuN1jYcA6Kr\ngJMtBCspenvX0lylBCAiEuf2J3Z1zed3eHtuvzuhkGF4ty2pY9tRF4SMT9TMYG6O7AeUDiUAEZFA\nfWMrv96b2G9vQHGPMwRilXv87qO5XuH3pAQgIhLYuLU5ocyAFQsquTrFkZKpVgjnAyUAERn1YoO+\nLx0+mXDtD6aU8Y2PnZuFqIafEoCIjDr1ja08vLUZA8pKCrnrV690HRDTU3HhyJ0trwQgIqNGfWMr\na+JOCEvHsvNH7tG0SgAiMqKt39LEA883UVIYYuu+VxPOCO4pZNE/c6eNYdn5laxYoASQlJl9Afg0\n0dlS24ieADYV2ACMJ3oM5J+6e7uZlQD3AtXAMWCZu+8dzPuLiPRm/ZYmbnlkW5/3GVAQMj79gbMo\nO70oL2f0DMSAE4CZTQf+CjjH3d80sweB5USPhPxXd99gZmuAG4E7gq+t7v4uM1sO3A4sG/RPICKS\nwrr/fiXltZDBh8+ezMVzJuXNvP2hNtguoELgdDPrAEqBg8AlwIrg+g+AvyOaABYHjwEeAr5rZua5\nfCaliOS1wyfeTCi7/JzJTCwrSTmtczQZcAJw9/1m9s9AE/Am8CTRLp9X3T0c3NYMTA8eTwf2Ba8N\nm9kJot1ERwcag4hIKvWNrZxs634qV2lRKGvn9OaiAc9vMrNyop/qzwKmAWcAVww2IDNbaWZ1ZlbX\n0tIy2G8nIqPUnc+8nFBWNf6MLESSuwYzwfXDwCvu3uLuHcBG4P3AWDOLtSwqgP3B4/3ADIDg+hii\ng8HduPtad69x95qJEycOIjwRGc12HHwtoWzGuNIsRJK7BpMAmoCFZlZqZgZcCuwEfgF8PLjneuCx\n4PHjwXOC6z9X/7+IDJfTkyzg2nf8VBYiyV2DGQPYYmYPAVuBMPACsBb4L2CDmX09KLs7eMndwH+a\n2R7gONEZQyIiQyq2yjfZmbyNSgDdDGoWkLt/Ffhqj+IG4IIk974FfGIw7ycikkys0t9z+CR1ja0p\nV/kWWGbjynVaCSwieSm2rcPOAyc48OpbSffx6em6BVXDHlc+UQIQkbxT39jK0jue7ddrlpw3jVVX\nnj1MEeUnJQARyTtfebT37R1CBjVV5YwtLdair14oAYhI3nnl6BtJy8eeXsi7J5cxtrSYCWUlLFXF\n3yslABHJOzPHn8GuQ4mHt7z6ZrjbkY4P1u3jkjmTAJhQVsK8PDqvNxOUAEQk73z9Y+emNQYQ7nSe\n3Hm4W1nPnT/LS4tpPdXe9XU0JQfL5bVYNTU1XldXl+0wRCQH1Te2cvsTu9h16CQn3wr3/YIkjOhe\n9vFfiwpDXPzu6C4E+dpqMLN6d+9z0yMlABHJe/WNrWzc2owD86aNYceBE7ScbOPnu4/0eQBMOuJb\nDSfbwl3vk6uJId0EoC4gEcl71VXlSSvhWGJoOdnGhLISzuxx/m/PFkAqDoQjzprNDd3KeyaG2Pvk\ny+CzWgAiMqrUN7ZS23Asoe+/vLSYHQdO8EDdvkG3GgoLjEvmTOrqQtpx4AQOGUsM6gISERmAvloN\ngxEymFRWwpLzpg/rojR1AYmIDECy7qTL5k7pajXEPs0PJDFEHA691tbVlZTtlclKACIifUg1xhBL\nDCff7OCnvzvCmx2dnFlSmHSNQk8/3nFICUBEJF/FksKytc91jRvs7+0Fca6YO2WYokqfEoCIyCBs\n3Nrc70Hji2ZP4LK5U/jeL/ZkdQGaEoCIyCAMZGC49pXj1K59jnDEiXh0OmlJUYhbr5rL07uP0NDy\nOrMmvoM//+A7hzUpDOZISBGRUW/p/AoKk5w0EzIoDEUr9546whE6Or3r4BoH2sMRbnlkG0/uPMye\nljd4cudhlq19jvrG1iTfYWgMqgVgZmOBu4B5RH+GPwN2Aw8AM4G9wDXu3hqcG/wd4ErgFHCDu28d\nzPuLiGRbdVU5D6y8sNtK5FiXDkS7iH5Yt6/r034IKCwMgTsdnXEL0pI0JcKdTm3DsWFrBQy2C+g7\nwI/d/eNmVgyUArcAP3P31Wa2ClgFfBlYBMwO/iwA7gi+iojktd5WIrecbKNyXCnjzihmTGkxk4Lz\nCXYfOsmtj20nHHFCBsmGEQoLrCuRDIcBJwAzGwNcBNwA4O7tQLuZLQYuDm77AfA00QSwGLjXoyvP\nas1srJlNdfeDA45eRCRH1Te2dpsdREv0DIPiwhBXz6+g9VQ7keBjf7LKv6jA2LDywpwdAzgLaAH+\nw8xeMLO7zOwMYHJcpX4ImBw8ng7si3t9c1DWjZmtNLM6M6traWkZRHgiItlT23As6eyg9nCEjVub\nKS8tTnl4PcDf//G8YZ8VNJgEUAjMB+5w9/cCbxDt7ukSfNrv1yC5u6919xp3r5k4ceIgwhMRyZ6F\ns8YnHRyGaKXYeqo96QAxQNW4UlYsqBy22GIGkwCagWZ33xI8f4hoQjhsZlMBgq9Hguv7gRlxr68g\n/TUTIiJ5JTY4fPk5k3nXpHdQYNHB3uICY+n8ChbOGk9JUfIquC3cmZEYB5wA3P0QsM/M5gRFlwI7\ngceB64Oy64HHgsePA5+0qIXACfX/i8hIVl1VztpP1nD70j9k2QWVrFhQyf0rLwSiXUQ3XDgzaStg\nyXkJvePDYrCzgP4SuC+YAdQAfIpoUnnQzG4EGoFrgns3EZ0CuofoNNBPDfK9RURy3upNu1j7ywbc\no4u9yvrYRC5E5jaJG1QCcPcXgWRbjl6a5F4HPjuY9xMRySfrtzR1O0SmrSPC2l829Dr4Wz0zc9tB\naCWwiMgweWJ7Yi93b5V/yGDVosztEKoEICIyTBbNm9rt+eLzpnFaUShlxbv8gsqMbginzeBERIZJ\nbCrnE9sPsmjeVFYsqOw6kvJfntyd0BpYOr8io/EpAYiIDKMVCyq7zemPbRvxrad2d1slFTIyvh20\nuoBERLIgZN0ngBaEUi0LG8YYMv6OIiKC99j+s6PTufibv2D1pl0Zi0EJQEQkR+w9doo1mxsylgQ0\nBiAiMszqG1t5eGszBsydNoZf7D5COJL6/kwdGK8EICIyjFZv2tVtMVg6MnVgvBKAiMgw6bkSuDcG\nTD6zhCXnTc+PrSBERCS1ZCuB44UMaqrKmT25jKvnV2R8GqgSgIjIEKpvbGXNMy/zSsvrvP5WOOk9\nKxZUYpCVSj+eEoCIyCDEBniPnmzj1VPtPL+3tddTsEoKjH/42LkZi683SgAiImmIbeFQXlrM9gMn\nuir8usbWXjd46+lT7z9r+ILsJyUAERl1YpX5wlnju7pg4suAhMr+6d+30BGO9O+M2zjjzijmmuqK\njA3wpkMJQERGhfrGVu585mUaWl6n8fgpOiNOcWGIW6+ay/YDJ3iovplwZ4TCkOFEV+YOlAHTyk9n\n+pjTsjbAm45BJwAzKwDqgP3ufpWZnQVsAMYD9cCfunu7mZUA9wLVwDFgmbvvHez7i4j0pb6xlWVr\nnyPco1JvD0e49bHt3U7n6uhMflJXb2KzeQDawhGWnf/2BnCxlgVEN3uL70pqPdXerRWSaUPRAvhr\nYBdwZvD8duBf3X2Dma0BbgTuCL62uvu7zGx5cN+yIXh/EZFe1TYcS6j8IbohW3zlb0Qr81Qf/gsM\nPvNHs2g4+gY/3XmYSFD2tSXnMmdKGdfdVUt7OMLuwzuYM6UMoKusuDDEDRfO5PvBcZBvxwAfPnsy\nf/7Bd+bXbqBmVgF8FLgreG7AJcBDwS0/AJYEjxcHzwmuXxrcLyIyrBbOGk9hQffq5vJzJnPb4nmU\nFIUoMCguMFYsqORrS86luMAwoDAE75p4RrfXlZ1exHtmjCX+NPfWU+3UNhyjPRwh4tGjH7/88G/5\n8sO/5a2OaNlbHRHWbG7oVvlD9ISwJ3ce5trv11Lf2DpcfwVJDbYF8G3gS0BZ8Hw88Kq7xya/NgOx\n4+2nA/sA3D1sZieC+48OMgYRkV5VV5XzwMoLufOZlzn82lvdumjmTClLGBCOL4Pop/iOcISiwhAL\nZ43nP5/b+/bMHzNOvtnBy0ff6Ho/B/Yceb1fMXaEI9Q2HMuPE8HM7CrgiLvXm9nFQxWQma0EVgJU\nVlb2cbeISHqqq8pZ+8mapOU9K92eZfd9emFXQnhqxyEeffFA17XOiPd7r59kYsklkwbTAng/8Mdm\ndiVwGtExgO8AY82sMGgFVAD7g/v3AzOAZjMrBMYQHQzuxt3XAmsBampqBj4MLyIyROITwhcffHFQ\n3ys2YDy2tLirbGJZSX5tBeHuNwM3AwQtgP/l7teZ2Q+BjxOdCXQ98FjwkseD588F13/uPU9EEBHJ\nYZf9y9PsPXZqQK89vSjE1fMrcmpK6HCsA/gysMHMvg68ANwdlN8N/KeZ7QGOA8uH4b1FRIbFZf/y\nNC+1vNH3jSlcf+HMnFoEBkOUANz9aeDp4HEDcEGSe94CPjEU7ycikmmDqfyXnDct5yp/0EpgEZFh\nM/EdxXzhsjldM45yjc4EFhFJw+we6wHS0fJ6O3/76LaMz+9PlxKAiEgaXh5gF1DE6doKItcoAYiI\n9OGdN/8XvZzh3quQkfH5/enSGICISB8GujHolDNL+MOKsdz5zMsJ1yaUlTBv2hh2HDhBy8k2JpSV\nsDTDU0SVAEREhsmh19o4tPNw2vev39LERbMncO+NC4YxqrepC0hEpA/nVYzJ2HttfukoS777q4y8\nl1oAIiJ9ePRzH2DJd3/Ftv0nGHN6ETUzxyV04QADOiIymRebT7B+S9OwTx+1XN6Noaamxuvq6rId\nhohI2uobW9m4tRmHhAQRL5ZA/s/Pfs/B1xKvhwwa/vGjA4rBzOrdPXHnu573KQGIiGTXvFt/zOvt\nnQnl51WM4dHPfaDf3y/dBKAxABGRLNt+2xWcXphYHf+m+cSwvq8SgIhIlq3etIu2cOJKg+Hunxmx\ng8Cf3/ACT/++hYvfPZFvL39vtsMREUmwetMu/uO/X6EtxUKDi2ZPGNb3H5EJ4PMbXug6sSf2VUlA\nRHJBfWMrq5/YxYtNrXT0srx47OmFw74eYEQmgCe2H0p4/u0sxSIiAtGK/28f2cauQyf7vLdi7Gn8\natWlwx7TiEwA4Uj3tNoWjnSfnIgZAAAMo0lEQVTtxtfz8GcRkeG0fksT33pqN0dfb+/z3tOKQtyQ\nwYNjBnMo/AzgXmAy0bGKte7+HTMbBzwAzAT2Ate4e6uZGdEzg68ETgE3uPvWwYWfXGlRASfbuk+p\nWrb2OUJAOOIUF4a479MLgcSEUN/YqiQhIoO2fksTt/94FyfeDPd579jSIr70kT/I+LkBg2kBhIEv\nuvtWMysD6s3sKeAG4GfuvtrMVgGriB4TuQiYHfxZANwRfB1y1y2oYs3mhu7BdjpGNFN1hCNs3NrM\nw1ubaQ9HuiWE6+6q7VZWXVWekBSSJQklDhGBvgd2400qK+bzH87egTGDORT+IHAweHzSzHYB04HF\nwMXBbT8gelTkl4Pye4OD4GvNbKyZTQ2+z5BadeXZHHrtra4BYIDCAiMEdEacosIQDrSHI0Q8mhBi\n+3UnK4tPCrdeNZfbfrSjz8QBybublChERqb+VPznTC3ja0vOzXodMCRjAGY2E3gvsAWYHFepHyLa\nRQTR5LAv7mXNQdmQJwCIzvr50wtndi3JXjq/Ani7UgbYuLWZjnCEosJQV1lxYahbWW3DsW5J4Ynt\nB/tMHMlaF7GWQ18tjFiM5aXFtJ5qV6IQyWH1ja2seeZlnt1zlDeSrOSNV1xgnDdjLF9edHbO/E4P\nOgGY2TuAh4HPu/tr0a7+KHd3M+vXWgYzWwmsBKisHFyzqLqqPOEvOv75fZ9emPBpPFlZfFJYNG8q\nz+893mviSNa6qK4qT0gmseRx7fdr6QhHCIUM3In42wtAigtD3P+ZhTnzH0ZEouobW/nEmmf73Pit\nuMD4s/efNfIOhTezIqKV/33uvjEoPhzr2jGzqcCRoHw/MCPu5RVBWTfuvhZYC9G9gAYTX19SJYj4\nsuqq8oSkMGdKWa+JAxJbF/WNrex/9U0KQoZ3OgUFIcpLi7nt/0W7kyDaPdVTe9CiUAIQyS13PvNy\nWrt+djpsbWpl5b3d9zXLxgEwPQ1mFpABdwO73P1bcZceB64HVgdfH4sr/5yZbSA6+HtiOPr/h0Oy\npJAqcdQ3tvLw1mYumj0RiP4j7z50ktt+tIO2jggOGBCJRPi7uMq/N7m7XZ/I6HX4tbfSuq8z4vx6\nb/JD4R+s28clcyZ1K3v1VDvH32hn1sR38OcffOewJojBtADeD/wpsM3MXgzKbiFa8T9oZjcCjcA1\nwbVNRKeA7iE6DfRTg3jvnFTf2Mqytc8R7jEIVBAy3L2rInegMwKdkcTK34huA4tBJAJFBdY1fiEi\nuWPZ+ZX8pnnboL5HuNN5MsWJYXta3uDnu4/wwMoLhy0JDGYW0K+I1lfJJCxhC2b/fHag75cPNm5t\nTqj8IfoJoOdfVEGBETKjszNCQcj4RM0M5k4b0zXwC1q0JpLLViyopOnYGzxY30woBBPPKOG1tjDt\nHZ0UFxVQaEbT8VODasGHO71rDHE4jMiVwNnS2z90z2vhTufsKe9gflU5V6foB1TFL5K7eu7hf/Rk\n3yt9+6uwwLo+EA4HbQc9hJbOr6CwIFWjKNGuQyd5oG5f3zeKSE5JdYDLULpgZvmwdv+AWgBDZv2W\nJtb9qoGxpUWYQ0sa+37A8DfxRGRo1Te2DqryP70wxLkVYxhbWszEshLmThvD9gMnOBocGzmxrCRl\nr8BQUwIYAuu3NHHLIwMbDBruJp6IDK3Y+p1U3jXxDCA6iJvMm+EIv97bSlEwDvjYC/vpiDgRd4pC\nxtzpY4Y85lSUAIbAP27amfa9IYM5k8voiDizJpwx7NO8RGRopfrAFgKKi0Lc/vH3ANHtYWJTv5Pp\n6HTAaetR9vzeVpbe8Sw3XTRr2BePaQxgCPTcebSn0wtDXDCznJnjSykMGfuOnwLg4jmTVPmL5Jnq\nqnIe/ov3Ma60CAPGlRbxDx87ly9+ZE7X9i6xBaSD2eRtzeYG1m9pGrrAk1ALYAicUVzQ6z4gsSZf\nTHtnJ3uOvN7VbZStnQBFZGCqq8rZeuvlfd4DcORkG0+lmOvflye2HxzW+kEJYAj8zUfPGfAYwHD/\nA4tI5qzf0sQDzzdRUhji5FvhtE7/6s2ieVOHKLLklACGQNOx5IM96Zg79cwhjEREMiW27cuewyc5\n/kY7HZ1OY9C9OxAGTC4r4fipdsacXsQXLhv+cwKUAIbAj3cc6vumFO55bi+XzZ2isQCRPFLf2Mo1\ndz5LZ99beSVlwLTy05k+5rSu6aCZmvoZTwlgCFwxd0rCCWTpit8uWkTyw53PvNyvyn/KmSW8o6SQ\nooIQxYUhlp1fyYoFlVk/IEoJYAjEpmrdt6WRNzs6MTPGlxZz5PW2XreLDUG3MwVEJD+kuxNozKtv\ndvBXl7676zTB3Yd3AHQ9NzOqK8fyrsllGd0i2qJ7tOWmmpoar6ur6/vGHFXf2MrGrc20nGzrVj6h\nrIR5cRu/6dO/SH7p7+LPAoP3vWsC/73nKBFPfB6vuMC4f5BbQJhZvbvX9HWfWgDDKNm5ASKS/2KD\nsw8838SkM0/jQ3Mm8fTuI/x01+GECj3W0u95muCieVN57uVjRHp8CO/I4PYwSgAiIgOwYkFlt1k6\nsT79WKs/WUs/2WmCX3l0G/G7yBdlcHsYdQGJiGRRbDrp0SBpDMUYQM52AZnZFcB3gALgLndfnekY\nRERyRTa7ijO6F5CZFQDfAxYB5wDXmtk5mYxBRESiMr0Z3AXAHndvcPd2YAOwOMMxiIgImU8A04H4\nI7CagzIREcmwnNsO2sxWmlmdmdW1tLRkOxwRkREr0wlgPzAj7nlFUNbF3de6e42710ycODGjwYmI\njCaZTgDPA7PN7CwzKwaWA49nOAYRESEL6wDM7Erg20Snga5z92/0cm8L0Jjmt54AHB18hBmnuDMr\nH+POx5hBcWdafNxV7t5nF0pOLwTrDzOrS2fhQ65R3JmVj3HnY8yguDNtIHHn3CCwiIhkhhKAiMgo\nNZISwNpsBzBAijuz8jHufIwZFHem9TvuETMGICIi/TOSWgAiItIPIyIBmFmBmb1gZj/KdizpMrOx\nZvaQmf3OzHaZ2YXZjikdZvYFM9thZtvN7H4zOy3bMSVjZuvM7IiZbY8rG2dmT5nZS8HXnDutJ0Xc\n3wz+n/zWzB4xs7HZjDGZZHHHXfuimbmZTchGbL1JFbeZ/WXwd77DzP4pW/GlkuL/yXlmVmtmLwa7\nKVzQ1/cZEQkA+GtgV7aD6KfvAD929z8A3kMexG9m04G/AmrcfR7RtRzLsxtVSvcAV/QoWwX8zN1n\nAz8Lnueae0iM+ylgnrv/IfB74OZMB5WGe0iMGzObAVwONGU6oDTdQ4+4zexDRDepfI+7zwX+OQtx\n9eUeEv++/wn4e3c/D7g1eN6rvE8AZlYBfBS4K9uxpMvMxgAXAXcDuHu7u7+a3ajSVgicbmaFQClw\nIMvxJOXum4HjPYoXAz8IHv8AWJLRoNKQLG53f9Ldw8HTWqJbqOSUFH/fAP8KfAnIycHGFHH/BbDa\n3duCe45kPLA+pIjbgTODx2NI43cz7xMA0VXFXwIi2Q6kH84CWoD/CLqu7jKzM7IdVF/cfT/RT0NN\nwEHghLs/md2o+mWyux8MHh8CJmczmAH6M+CJbAeRDjNbDOx3999kO5Z+ejfwR2a2xcyeMbPzsx1Q\nmj4PfNPM9hH9Pe2zpZjXCcDMrgKOuHt9tmPpp0JgPnCHu78XeIPc7I7oJugzX0w0gU0DzjCzP8lu\nVAPj0elvOfmpNBUz+xsgDNyX7Vj6YmalwC1EuyLyTSEwDlgI/G/gQTOz7IaUlr8AvuDuM4AvEPQw\n9CavEwDwfuCPzWwv0cNlLjGz/5vdkNLSDDS7+5bg+UNEE0Ku+zDwiru3uHsHsBF4X5Zj6o/DZjYV\nIPiac037VMzsBuAq4DrPj7nb7yT6QeE3we9nBbDVzKZkNar0NAMbPerXRHsXcm4AO4nrif5OAvyQ\n6AFcvcrrBODuN7t7hbvPJDoY+XN3z/lPpO5+CNhnZnOCokuBnVkMKV1NwEIzKw0+EV1KHgxex3mc\n6C8JwdfHshhL2oJztL8E/LG7n8p2POlw923uPsndZwa/n83A/OD/fq57FPgQgJm9GygmPzaHOwB8\nMHh8CfBSXy/I+KHw0uUvgfuCbbEbgE9lOZ4+ufsWM3sI2Eq0K+IFcnTVpJndD1wMTDCzZuCrwGqi\nzfkbie4ye032IkwuRdw3AyXAU0FPRK2735S1IJNIFre799kFkW0p/r7XAeuCKZbtwPW51upKEfdn\ngO8EEzTeAlb2+X1y7OcSEZEMyesuIBERGTglABGRUUoJQERklFICEBEZpZQARERGKSUAEZFRSglA\nRGSUUgIQERml/j9WgkrlLK5pIwAAAABJRU5ErkJggg==\n", 197 | "text/plain": [ 198 | "" 199 | ] 200 | }, 201 | "metadata": {}, 202 | "output_type": "display_data" 203 | } 204 | ], 205 | "source": [ 206 | "plt.plot(p_data[range(0,1000),0],p_data[range(0,1000),1],'.')\n", 207 | "#plt.axis('equal')\n", 208 | "plt.show()" 209 | ] 210 | } 211 | ], 212 | "metadata": { 213 | "kernelspec": { 214 | "display_name": "Python 3", 215 | "language": "python", 216 | "name": "python3" 217 | }, 218 | "language_info": { 219 | "codemirror_mode": { 220 | "name": "ipython", 221 | "version": 3 222 | }, 223 | "file_extension": ".py", 224 | "mimetype": "text/x-python", 225 | "name": "python", 226 | "nbconvert_exporter": "python", 227 | "pygments_lexer": "ipython3", 228 | "version": "3.6.4" 229 | } 230 | }, 231 | "nbformat": 4, 232 | "nbformat_minor": 2 233 | } 234 | -------------------------------------------------------------------------------- /i80_data_vis.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import matplotlib.pyplot as plt 4 | from matplotlib import animation 5 | import matplotlib.patches as patches 6 | import math 7 | import time 8 | import os, sys 9 | from matplotlib.transforms import Affine2D 10 | import matplotlib.lines as mlines 11 | 12 | #COMMENT IF NOT SAVING VIDEO 13 | #ff_path = os.path.join('C:/Program Files/', 'ImageMagick', 'ffmpeg.exe') 14 | #plt.rcParams['animation.ffmpeg_path'] = ff_path 15 | #if ff_path not in sys.path: sys.path.append(ff_path) 16 | # 17 | ## This second one will ensure the ".gif" creation works. 18 | #imgk_path = os.path.join('C:/Program Files/', 'ImageMagick', 'convert.exe') 19 | #plt.rcParams['animation.convert_path'] = imgk_path 20 | #if ff_path not in sys.path: sys.path.append(imgk_path) 21 | 22 | # Units are in FEET! 23 | 24 | print("initializing...") 25 | 26 | #Obtain File in CSV, using Pandas 27 | filepath = 'trajectories-0400-0415.csv' 28 | data = pd.read_csv(filepath) 29 | 30 | #Keep useful data, sort by Frame ID. Graphing is done by frame 31 | data_cut = data[['Vehicle_ID', 'Frame_ID', 'Local_X', 'Local_Y','Lane_ID', 'v_Length', 'v_Width']] 32 | 33 | #reduce size 34 | data_cut = data_cut.loc[data_cut['Vehicle_ID']%3!=0] 35 | sorted_frame = data_cut.sort_values(by=['Frame_ID']) 36 | sorted_np = sorted_frame.values 37 | sorted_np = sorted_np[40000:90000,:] # Omit data upto 100*1000ms = 100s 38 | sorted_id = data_cut.values 39 | 40 | # init array of sliced values, by frame number 41 | sliced = [] 42 | 43 | # slice data by frame number 44 | for i in range(int(min(sorted_np[:,1])),int(max(sorted_np[:,1]))): 45 | sliced.append(sorted_np[sorted_np[:,1]==i]) 46 | 47 | def currentLane(y): 48 | return y//12 # lane has height of 12. int division of 12. 49 | 50 | # return lane boundaries of upper, current, and lower lanes w.r.t my car 51 | def laneBoundaries(lane): 52 | if (lane == 6): 53 | above_upper = (lane+1)*12 54 | above_lower = (lane+1)*12 55 | current_lower = (lane)*12 56 | below_lower = (lane-1)*12 57 | elif (lane == 1): 58 | above_upper = (lane+2)*12 59 | above_lower = (lane+1)*12 60 | current_lower = (lane)*12 61 | below_lower = (lane)*12 62 | else: 63 | above_upper = (lane+2)*12 64 | above_lower = (lane+1)*12 65 | current_lower = (lane)*12 66 | below_lower = (lane-1)*12 67 | return above_upper, above_lower, current_lower, below_lower 68 | 69 | #LOGIC TO FIND DISTANCES 70 | def findDistances(mycar_x, mycar_y, other_x, other_y, mycar_lane): 71 | # mycar_x, mycar_y are x y position of my car 72 | # other_x, other_y is the dataset, which contains x y position of other cars at the same time 73 | 74 | # Find lane boundary values w.r.t the lane of my car. Ex, lane =3, then find y boundaries of lane 4, 3 and 2 75 | above_upper, above_lower, current_lower, below_lower = laneBoundaries(mycar_lane) 76 | 77 | # init all distances to 0 78 | dist_above_infront = dist_current_infront = dist_below_infront = \ 79 | dist_above_behind = dist_current_behind = dist_below_behind = 0 80 | 81 | #find indexes of cars in specific lanes from the dataset. np.where returns index values of the datset 82 | index_above = np.where((other_y=above_lower))[0] 83 | index_current = np.where((other_y=current_lower))[0] 84 | index_below = np.where((other_y=below_lower))[0] 85 | 86 | # Using the indexes, create an array of x_values of above, current and below lane 87 | x_above = other_x[index_above] 88 | x_current = other_x[index_current] 89 | x_below = other_x[index_below] 90 | 91 | #subract by current x values, then find minimum distance 92 | 93 | # NUMPY MASKING. ONLY LEAVES VALUES THAT MATCH THE CONDITION 94 | x_above_infront = x_above[x_above>mycar_x] # Numpy masking 95 | if (x_above_infront.size != 0): 96 | dist_above_infront = np.min(x_above_infront) - mycar_x # find minimum, then subtract by mycar_x 97 | x_above_behind = x_above[x_abovemycar_x] 103 | if (x_current_infront.size != 0): 104 | dist_current_infront = np.min(x_current_infront) - mycar_x 105 | x_current_behind = x_current[x_currentmycar_x] 110 | if (x_below_infront.size != 0): 111 | dist_below_infront = np.min(x_below_infront) - mycar_x 112 | x_below_behind = x_below[x_below clearance_front) and (dist_above_behind >clearance_back)): 143 | above_available = True 144 | if ((dist_below_infront > clearance_front) and (dist_below_behind > clearance_back)): 145 | below_available = True 146 | 147 | if (above_available and below_available): # if both OK, change to larger clearance lane 148 | if(dist_above_infront > dist_below_infront): 149 | target_x, target_y = createWayPoint(x_pos, y_pos, lane+1, lookahead) 150 | theta = followWayPoint(x_pos, y_pos, target_x, target_y) 151 | else: 152 | target_x, target_y = createWayPoint(x_pos, y_pos, lane-1, lookahead) 153 | theta = followWayPoint(x_pos, y_pos, target_x, target_y) 154 | elif (above_available and not below_available): # if only one is ok 155 | target_x, target_y = createWayPoint(x_pos, y_pos, lane+1, lookahead) 156 | theta = followWayPoint(x_pos, y_pos, target_x, target_y) 157 | elif (not above_available and below_available): 158 | target_x, target_y = createWayPoint(x_pos, y_pos, lane-1, lookahead) 159 | theta = followWayPoint(x_pos, y_pos, target_x, target_y) 160 | 161 | #if neither, reduce speed but follow same lane 162 | else: 163 | current_vel = current_vel - 10 164 | target_x, target_y = createWayPoint(x_pos, y_pos, lane, lookahead) 165 | theta = followWayPoint(x_pos, y_pos, target_x, target_y) 166 | # when no lane change is needed, follow center lane 167 | else: 168 | current_vel = 50 169 | target_x, target_y = createWayPoint(x_pos, y_pos, lane, lookahead) 170 | theta = followWayPoint(x_pos, y_pos, target_x, target_y) 171 | 172 | #print(target_x, target_y) 173 | return current_vel, theta 174 | 175 | # def changeLane() 176 | 177 | def prediction(): 178 | ''' 179 | INPUT TENSORFLOW PREDICTION CODE HERE 180 | ''' 181 | vel =0 182 | theta = 0 183 | return vel, theta 184 | 185 | 186 | # set figure size 187 | fig = plt.figure(figsize=(27,20)) 188 | #ax = fig.add_axes([0,0,1,1],frameon=False) 189 | # ax = fig.add_subplot(2,1,2) 190 | 191 | #add subplot 192 | ax1 = fig.add_subplot(1,1,1) 193 | count = 0 194 | myvehicle_x_pos = 220 195 | myvehicle_y_pos = 32 196 | myvehicle_vel = 40 197 | myvehicle_theta = 0 198 | myvehicle_lane = currentLane(myvehicle_y_pos) 199 | # Animation function 200 | 201 | print("Initialization Finished!") 202 | 203 | def animate(i): 204 | global count; global myvehicle_x_pos; global myvehicle_vel; global myvehicle_y_pos; global myvehicle_theta 205 | timestep = 0.1 206 | global myvehicle_lane 207 | 208 | # Slice relevant information by frame number 209 | x = sliced[i][:,2] 210 | 211 | 212 | y = sliced[i][:,3] 213 | x = np.array(x) 214 | y = np.array(y) 215 | theta = np.radians(30) 216 | rot = np.array([[np.cos(theta), np.sin(theta), 0], [-np.sin(theta), np.cos(theta), 0], [0, 0, 1]]) 217 | pos = np.vstack((y,x)) 218 | pos = np.vstack((pos, np.zeros(len(x)))) 219 | rotate = np.matmul(rot, pos) 220 | names = sliced[i][:,0] 221 | lane_label = sliced[i][:,4] 222 | vehicle_length = sliced[i][:,5] 223 | vehicle_width = sliced[i][:,6] 224 | vehicle_id = sliced[i][:,0] 225 | current_frame = sliced[i][0,1] 226 | 227 | x_future = [] 228 | y_future = [] 229 | # look 10 steps ahead 230 | for count, j in enumerate(vehicle_id): 231 | idx = np.where((sliced[i+10][:,0]==j))[0] 232 | print(idx) 233 | if len(sliced[i+10][idx,3])!=0: 234 | x_future.append(sliced[i+10][idx,3][0]) 235 | y_future.append(sliced[i+10][idx,2][0]) 236 | else: 237 | x_future.append(y[count]) 238 | y_future.append(x[count]) 239 | 240 | print(x_future) 241 | print(y) 242 | print("length of future: {}, length of current: {}".format(len(x_future), len(y))) 243 | 244 | myvehicle_lane = currentLane(myvehicle_y_pos) 245 | dist_above_infront, dist_current_infront, dist_below_infront, dist_above_behind, dist_current_behind, dist_below_behind \ 246 | = findDistances(myvehicle_x_pos, myvehicle_y_pos, y, x, myvehicle_lane) 247 | 248 | # UPDATE MYVEHICLE_VEL and MYVEHICLE_THETA using changelane function 249 | # or when prediction, using prediction() function 250 | myvehicle_vel, myvehicle_theta = changeLane(myvehicle_vel, myvehicle_y_pos, myvehicle_x_pos, 251 | myvehicle_lane, dist_above_infront,dist_current_infront, 252 | dist_below_infront, dist_above_behind, dist_current_behind, 253 | dist_below_behind) 254 | 255 | 256 | # Update position based on velocity 257 | myvehicle_x_pos = timestep * math.cos(math.radians(myvehicle_theta))*myvehicle_vel + myvehicle_x_pos 258 | myvehicle_y_pos = timestep * math.sin(math.radians(myvehicle_theta)) * myvehicle_vel + myvehicle_y_pos 259 | 260 | # ADD ROTATION 261 | myvehicle_pos = np.matmul(rot, np.vstack((myvehicle_x_pos, myvehicle_y_pos, 0))) 262 | 263 | # ax.clear() 264 | ax1.clear() 265 | x_lines = [0,421] 266 | y_lines = [73,73] #lane 6 267 | x_lines1 = [0,421] 268 | y_lines1 = [73+21, 73+6.6] #incoming lane 7 269 | x_lines2 = [421, 421+146] 270 | y_lines2= [73,73] #dotted line 1 271 | x_lines3=[421,421+146] 272 | y_lines3=[73+6.6,73] # dotted line 2 273 | x_lines4 = [0, 421, 421+146, 986,2000] 274 | y_lines4= [73+40, 73+6.6+12,72+12,73, 73] #outside line 275 | lines = np.matmul(rot, np.vstack((x_lines, y_lines, np.zeros(2)))) 276 | lines1 = np.matmul(rot, np.vstack((x_lines1, y_lines1, np.zeros(2)))) 277 | lines2 = np.matmul(rot, np.vstack((x_lines2, y_lines2, np.zeros(2)))) 278 | lines3 = np.matmul(rot, np.vstack((x_lines3, y_lines3, np.zeros(2)))) 279 | lines4 = np.matmul(rot, np.vstack((x_lines4, y_lines4, np.zeros(5)))) 280 | 281 | 282 | plt.plot(lines[0], lines[1], '-w', LineWidth=1.5) 283 | plt.plot(lines1[0], lines1[1], '-w', linestyle ='-', LineWidth = 1.5) 284 | plt.plot(lines2[0],lines2[1], '-w', linestyle='--', LineWidth = 1.5) 285 | plt.plot(lines3[0],lines3[1], '-w', linestyle='--', LineWidth = 1.5) 286 | 287 | plt.plot(lines4[0], lines4[1], '-w', LineWidth = 1.5) 288 | 289 | x_lines5 = [986, 1650] 290 | 291 | lane1 = np.array([[-30, 2000], [0,0], [0,0]]) 292 | lane2 = np.array([[-30, 2000], [12,12], [0,0]]) 293 | lane3 = np.array([[-30, 2000], [24,24], [0,0]]) 294 | lane4 = np.array([[-30, 2000], [36,36], [0,0]]) 295 | lane5 = np.array([[-30, 2000], [48,48], [0,0]]) 296 | lane6 = np.array([[-30, 2000], [60,60], [0,0]]) 297 | 298 | rot_lane1 = np.matmul(rot, lane1) 299 | rot_lane2 = np.matmul(rot, lane2) 300 | rot_lane3 = np.matmul(rot, lane3) 301 | rot_lane4 = np.matmul(rot, lane4) 302 | rot_lane5 = np.matmul(rot, lane5) 303 | rot_lane6 = np.matmul(rot, lane6) 304 | 305 | plt.plot(rot_lane1[0],rot_lane1[1], '-w', linestyle='-', LineWidth = 1.5) 306 | plt.plot(rot_lane2[0],rot_lane2[1], '-w', linestyle='--', LineWidth = 1.5) 307 | plt.plot(rot_lane3[0],rot_lane3[1], '-w', linestyle='--', LineWidth = 1.5) 308 | plt.plot(rot_lane4[0],rot_lane4[1], '-w', linestyle='--', LineWidth = 1.5) 309 | plt.plot(rot_lane5[0],rot_lane5[1], '-w', linestyle='--', LineWidth = 1.5) 310 | plt.plot(rot_lane6[0],rot_lane6[1], '-w', linestyle='--', LineWidth = 1.5) 311 | 312 | # set autoscale off, set x,y axis 313 | ax1.set_autoscaley_on(True) 314 | ax1.set_autoscalex_on(True) 315 | ax1.set_xlim([0,1650]) 316 | ax1.set_ylim([-0,-1000]) 317 | ax1.set_facecolor('#708090') 318 | 319 | 320 | ax1.fill_between(lines[0], lines[1], lines1[1], color='black') 321 | ax1.fill_between(lines2[0], lines2[1],lines3[1], color='black') 322 | ax1.fill_between(lines4[0], lines4[1], 100, color='black') 323 | # ax1.fill_between([0,2000], -10, 0, color='black') 324 | ax1.fill_between(rot_lane1[0], rot_lane1[1], -2000, color = 'black') 325 | 326 | 327 | # ax1.scatter(y,x,s=10) 328 | patches = [] 329 | lane_color = ["white", "red", "orange", "yellow", "green", "blue", "black", "pink"] 330 | # ax1.scatter(y,x, s = 50, marker = "s") 331 | 332 | # unzip by category, create rectangle for each car by frame 333 | for y_cent, x_cent, lane, vlength, vwidth, v_id in zip(rotate[0],rotate[1],lane_label,vehicle_length, vehicle_width, vehicle_id): 334 | # print(x_cent, y_cent) 335 | vlen = vlength*0.75 336 | vwid = vwidth*0.75 337 | # colored vehicles 338 | # patches.append(ax1.add_patch(plt.Rectangle((y_cent-vlen/2, x_cent-vwid/2), vlen, vwid, 339 | # fill=True, angle=0, linewidth = 2, edgecolor = lane_color[int(lane)], color = lane_color[int(lane)]))) 340 | if lane != 7: 341 | patches.append(ax1.add_patch(plt.Rectangle((y_cent-10/2, x_cent), 10, 4, 342 | fill=True, angle=-30, linewidth = 2, edgecolor = lane_color[int(lane)], color = '#ff007f', joinstyle = 'round', 343 | capstyle = 'butt'))) 344 | else: 345 | patches.append(ax1.add_patch(plt.Rectangle((y_cent-10/2, x_cent), 10, 4, 346 | fill=True, angle=-30, linewidth = 2, edgecolor = lane_color[int(lane)], color = 'black', joinstyle = 'round', 347 | capstyle = 'butt'))) 348 | idx = np.where((sliced[i+10][:,0]==v_id))[0] 349 | x_future = sliced[i+10][idx,3] 350 | y_future = sliced[i+10][idx,2] 351 | 352 | if (len(x_future)!=0): 353 | future_pos = np.vstack((x_future[0], y_future[0], 0)) 354 | future_pos = np.matmul(rot, future_pos) 355 | # print("x current: {}, x_future, {}".format(y_cent, future_pos[0][0])) 356 | # print("y current: {}, y_future: {} ".format(x_cent, future_pos[1][0])) 357 | l = mlines.Line2D([y_cent, future_pos[0][0]], [x_cent, future_pos[1][0]]) 358 | patches.append(ax1.add_line(l)) 359 | 360 | patches.append(ax1.add_patch(plt.Rectangle((myvehicle_pos[0]-4,myvehicle_pos[1]-2), 8, 4, fill=True, 361 | angle = myvehicle_theta-30, color = 'blue', joinstyle = 'round' ))) 362 | count = count +1 363 | #print("lane {} below distance {}".format(myvehicle_lane, dist_below_infront)) 364 | 365 | return patches 366 | 367 | 368 | # Animate at interval of 100ms 369 | ani = animation.FuncAnimation(fig, animate, frames = range(2,30000), interval=100, blit=True) 370 | #FFwriter = animation.FFMpegWriter() 371 | 372 | plt.show() 373 | #ani.save('video.mp4', writer =FFwriter) 374 | 375 | 376 | 377 | 378 | 379 | -------------------------------------------------------------------------------- /mlpnetwork.py: -------------------------------------------------------------------------------- 1 | # Import all packages 2 | import tensorflow as tf 3 | import numpy as np 4 | import pandas as pd 5 | from sort_data import remove_unnecessary_data 6 | 7 | # import extracted dataset 8 | filepath = "features_redone.csv" 9 | data = pd.read_csv(filepath) 10 | 11 | #data already sorted by vehicle id. For batch processing, sort by timestamp? 12 | data = data[['vehicle_id', 'frame', 'velocity', 'theta', 'd0', 'd1', 13 | 'd2', 'd3', 'd4', 'd5', 'd6', 'v1', 'v2', 'v3', 'v4', 'v5', 'v6']] 14 | data_np = data.values.astype(np.float32) 15 | len_data = len(data_np) 16 | 17 | processed_input, processed_output = remove_unnecessary_data(data_np, len_data) 18 | 19 | # Network Parameters 20 | n_hidden1 = 128 21 | n_hidden2 = 128 22 | n_input = 13 # 15 inputs! 23 | n_output = 2 # velocity and theta 24 | batch_size = 128 25 | lr = 0.005 26 | 27 | X = tf.placeholder(tf.float32, [None,n_input]) 28 | Y = tf.placeholder(tf.float32, [None, n_output]) 29 | 30 | weights = { 31 | 'h1': tf.Variable(tf.random_normal([n_input, n_hidden1])), 32 | 'h2': tf.Variable(tf.random_normal([n_hidden1, n_hidden2])), 33 | 'output': tf.Variable(tf.random_normal([n_hidden2, n_output])) 34 | } 35 | 36 | biases = { 37 | 'b1': tf.Variable(tf.random_normal([n_hidden1])), 38 | 'b2': tf.Variable(tf.random_normal([n_hidden2])), 39 | 'out': tf.Variable(tf.random_normal([n_output])) 40 | } 41 | 42 | def mlp(x): 43 | #two hidden layer with RELU activation (Use LEAKY RELU?) 44 | layer1 = tf.nn.relu(tf.add(tf.matmul(x, weights['h1']), biases['b1'])) 45 | layer2 = tf.nn.relu(tf.add(tf.matmul(layer1, weights['h2']), biases['b2'])) 46 | 47 | out_layer = tf.matmul(layer2, weights['output'])+biases['out'] 48 | return out_layer 49 | 50 | # construct model 51 | 52 | outputs = mlp(X) 53 | 54 | #define loss and optimizer 55 | loss_out = tf.reduce_sum(tf.square(Y - outputs)) 56 | optimizer = tf.train.AdamOptimizer(learning_rate = lr) 57 | train_mlp = optimizer.minimize(loss_out) 58 | 59 | # init variables 60 | init = tf.global_variables_initializer() 61 | 62 | with tf.Session() as sess: 63 | sess.run(init) 64 | 65 | iterations = 3000 66 | epoch = 100 67 | for i in range(epoch): 68 | for n in range(iterations): 69 | rand_int = np.random.randint(len(processed_input)-1, size = batch_size) 70 | 71 | feed = {X:processed_input[rand_int, :], Y:processed_output[rand_int]} 72 | _, cost = sess.run([train_mlp, loss_out], feed_dict = feed) 73 | 74 | cost = cost/batch_size 75 | 76 | if n % 100 == 0: 77 | print("Epoch: {} \t Iteration: {} \t Loss: {}".format(i, n, cost)) 78 | 79 | 80 | 81 | -------------------------------------------------------------------------------- /moving_circles.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import matplotlib.patches as patches 4 | import matplotlib.animation as animation 5 | 6 | fig = plt.figure() 7 | ax = fig.add_subplot(111) 8 | 9 | plt.xlim(-100, 100) 10 | plt.ylim(-100, 100) 11 | 12 | width = 5 13 | bars = 25 14 | 15 | RB = [] # Establish RB as a Python list 16 | for a in range(bars): 17 | RB.append(patches.Rectangle((a*15-140,-100), width, 200, 18 | color="blue", alpha=0.50)) 19 | 20 | def init(): 21 | for a in range(bars): 22 | ax.add_patch(RB[a]) 23 | return RB 24 | 25 | def animate(i): 26 | for a in range(bars): 27 | temp = np.array(RB[i].get_xy()) 28 | temp[0] = temp[0] + 3; 29 | RB[i].set_xy(temp) 30 | return RB 31 | 32 | anim = animation.FuncAnimation(fig, animate, 33 | init_func=init, 34 | frames=15, 35 | interval=20, 36 | blit=True) 37 | 38 | plt.show() 39 | -------------------------------------------------------------------------------- /moving_rect.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import matplotlib.patches as patches 4 | from matplotlib import animation 5 | 6 | x = [0, 1, 2,3,4,5,6,7,8,9,10,11,12,13,14,15,16] 7 | y = [0, 0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] 8 | yaw = [0.0, 0.1, 0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.1,1.2, 1.3] 9 | fig = plt.figure() 10 | plt.axis('equal') 11 | plt.grid() 12 | ax = fig.add_subplot(111) 13 | ax.set_xlim(-0, 40) 14 | ax.set_ylim(-10, 10) 15 | 16 | patch = patches.Rectangle((0, 0), 0, 0, fc='y', angle=30) 17 | 18 | def init(): 19 | ax.add_patch(patch) 20 | return patch, 21 | 22 | def animate(i): 23 | patch.set_width(1.2) 24 | patch.set_height(1.0) 25 | patch.set_xy([x[i], y[i]]) 26 | patch.angle = -np.rad2deg(yaw[i]) 27 | return patch, 28 | 29 | anim = animation.FuncAnimation(fig, animate, 30 | init_func=init, 31 | frames=len(x), 32 | interval=100, 33 | blit=True) 34 | plt.show() 35 | -------------------------------------------------------------------------------- /newex.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import matplotlib.animation as animation 3 | import numpy as np 4 | 5 | class AnimatedScatter(object): 6 | """An animated scatter plot using matplotlib.animations.FuncAnimation.""" 7 | def __init__(self, numpoints=50): 8 | self.numpoints = numpoints 9 | self.stream = self.data_stream() 10 | 11 | # Setup the figure and axes... 12 | self.fig, self.ax = plt.subplots() 13 | # Then setup FuncAnimation. 14 | self.ani = animation.FuncAnimation(self.fig, self.update, interval=5, 15 | init_func=self.setup_plot, blit=True) 16 | 17 | def setup_plot(self): 18 | """Initial drawing of the scatter plot.""" 19 | x, y, s, c = next(self.stream) 20 | self.scat = self.ax.scatter(x, y, c=c, s=s, animated=True) 21 | self.ax.axis([-10, 10, -10, 10]) 22 | 23 | # For FuncAnimation's sake, we need to return the artist we'll be using 24 | # Note that it expects a sequence of artists, thus the trailing comma. 25 | return self.scat, 26 | 27 | def data_stream(self): 28 | """Generate a random walk (brownian motion). Data is scaled to produce 29 | a soft "flickering" effect.""" 30 | data = np.random.random((4, self.numpoints)) 31 | xy = data[:2, :] 32 | s, c = data[2:, :] 33 | xy -= 0.5 34 | xy *= 10 35 | while True: 36 | xy += 0.03 * (np.random.random((2, self.numpoints)) - 0.5) 37 | s += 0.05 * (np.random.random(self.numpoints) - 0.5) 38 | c += 0.02 * (np.random.random(self.numpoints) - 0.5) 39 | yield data 40 | 41 | def update(self, i): 42 | """Update the scatter plot.""" 43 | data = next(self.stream) 44 | 45 | # Set x and y data... 46 | self.scat.set_offsets(data[:2, :]) 47 | # Set sizes... 48 | self.scat._sizes = 300 * abs(data[2])**1.5 + 100 49 | # Set colors.. 50 | self.scat.set_array(data[3]) 51 | 52 | # We need to return the updated artist for FuncAnimation to draw.. 53 | # Note that it expects a sequence of artists, thus the trailing comma. 54 | return self.scat, 55 | 56 | def show(self): 57 | plt.show() 58 | 59 | if __name__ == '__main__': 60 | a = AnimatedScatter() 61 | a.show() 62 | -------------------------------------------------------------------------------- /peachtree.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/numpee/ngsim/45c22c89e11411f8ab8d325879482c077f0370b4/peachtree.jpg -------------------------------------------------------------------------------- /peachtree.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/numpee/ngsim/45c22c89e11411f8ab8d325879482c077f0370b4/peachtree.png -------------------------------------------------------------------------------- /predTraj_lstmMDN.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stderr", 10 | "output_type": "stream", 11 | "text": [ 12 | "C:\\Users\\dongwan123\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", 13 | " from ._conv import register_converters as _register_converters\n" 14 | ] 15 | } 16 | ], 17 | "source": [ 18 | "from __future__ import print_function\n", 19 | "\n", 20 | "import tensorflow as tf\n", 21 | "from tensorflow.contrib import rnn\n", 22 | "import numpy as np\n", 23 | "import matplotlib.pyplot as plt\n", 24 | "import scipy\n", 25 | "import scipy.io as sio\n", 26 | "%matplotlib inline\n", 27 | "\n", 28 | "# GPU settings\n", 29 | "config = tf.ConfigProto()\n", 30 | "config.allow_soft_placement = True\n", 31 | "config.gpu_options.per_process_gpu_memory_fraction = 0.4\n", 32 | "config.gpu_options.allow_growth = True" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": 2, 38 | "metadata": {}, 39 | "outputs": [ 40 | { 41 | "ename": "OSError", 42 | "evalue": "could not read bytes", 43 | "output_type": "error", 44 | "traceback": [ 45 | "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", 46 | "\u001b[1;31mOSError\u001b[0m Traceback (most recent call last)", 47 | "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;31m# Load data\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0mmat\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mloadmat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'./train_data/trainData_i80_0400-0415.mat'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 3\u001b[0m \u001b[0mf_data\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0marray\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmat\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'f_data'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[0mprevTraj_data\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0marray\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmat\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'prevTraj_data'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[0mpostTraj_data\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0marray\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmat\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'postTraj_data'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 48 | "\u001b[1;32m~\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\scipy\\io\\matlab\\mio.py\u001b[0m in \u001b[0;36mloadmat\u001b[1;34m(file_name, mdict, appendmat, **kwargs)\u001b[0m\n\u001b[0;32m 140\u001b[0m \u001b[0mvariable_names\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpop\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'variable_names'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 141\u001b[0m \u001b[0mMR\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfile_opened\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmat_reader_factory\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfile_name\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mappendmat\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 142\u001b[1;33m \u001b[0mmatfile_dict\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mMR\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_variables\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mvariable_names\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 143\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mmdict\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 144\u001b[0m \u001b[0mmdict\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mupdate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmatfile_dict\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 49 | "\u001b[1;32m~\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\scipy\\io\\matlab\\mio5.py\u001b[0m in \u001b[0;36mget_variables\u001b[1;34m(self, variable_names)\u001b[0m\n\u001b[0;32m 290\u001b[0m \u001b[1;32mcontinue\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 291\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 292\u001b[1;33m \u001b[0mres\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mread_var_array\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mhdr\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mprocess\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 293\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mMatReadError\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0merr\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 294\u001b[0m warnings.warn(\n", 50 | "\u001b[1;32m~\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\scipy\\io\\matlab\\mio5.py\u001b[0m in \u001b[0;36mread_var_array\u001b[1;34m(self, header, process)\u001b[0m\n\u001b[0;32m 250\u001b[0m \u001b[0;31m`\u001b[0m\u001b[0mprocess\u001b[0m\u001b[0;31m`\u001b[0m\u001b[1;33m.\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 251\u001b[0m '''\n\u001b[1;32m--> 252\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_matrix_reader\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0marray_from_header\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mheader\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mprocess\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 253\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 254\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mget_variables\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvariable_names\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 51 | "\u001b[1;32mmio5_utils.pyx\u001b[0m in \u001b[0;36mscipy.io.matlab.mio5_utils.VarReader5.array_from_header\u001b[1;34m()\u001b[0m\n", 52 | "\u001b[1;32mmio5_utils.pyx\u001b[0m in \u001b[0;36mscipy.io.matlab.mio5_utils.VarReader5.array_from_header\u001b[1;34m()\u001b[0m\n", 53 | "\u001b[1;32mmio5_utils.pyx\u001b[0m in \u001b[0;36mscipy.io.matlab.mio5_utils.VarReader5.read_real_complex\u001b[1;34m()\u001b[0m\n", 54 | "\u001b[1;32mmio5_utils.pyx\u001b[0m in \u001b[0;36mscipy.io.matlab.mio5_utils.VarReader5.read_numeric\u001b[1;34m()\u001b[0m\n", 55 | "\u001b[1;32mmio5_utils.pyx\u001b[0m in \u001b[0;36mscipy.io.matlab.mio5_utils.VarReader5.read_element\u001b[1;34m()\u001b[0m\n", 56 | "\u001b[1;32mstreams.pyx\u001b[0m in \u001b[0;36mscipy.io.matlab.streams.GenericStream.read_string\u001b[1;34m()\u001b[0m\n", 57 | "\u001b[1;32mstreams.pyx\u001b[0m in \u001b[0;36mscipy.io.matlab.streams.GenericStream.read_into\u001b[1;34m()\u001b[0m\n", 58 | "\u001b[1;31mOSError\u001b[0m: could not read bytes" 59 | ] 60 | } 61 | ], 62 | "source": [ 63 | "# Load data\n", 64 | "mat = sio.loadmat('./train_data/trainData_i80_0400-0415.mat')\n", 65 | "f_data = np.array(mat['f_data'])\n", 66 | "prevTraj_data = np.array(mat['prevTraj_data'])\n", 67 | "postTraj_data = np.array(mat['postTraj_data'])\n", 68 | "\n", 69 | "# Set train-data\n", 70 | "f_trainData = f_data\n", 71 | "prevTraj_trainData = prevTraj_data\n", 72 | "postTraj_trainData = postTraj_data\n", 73 | "\n", 74 | "num_traindata = f_trainData.shape[0] # number of train-data\n", 75 | "\n", 76 | "f_mean_trainData = np.mean(f_trainData,axis=0)\n", 77 | "prevTraj_mean_trainData = np.mean(prevTraj_trainData,axis=0)\n", 78 | "postTraj_mean_trainData = np.mean(postTraj_trainData,axis=0)\n", 79 | "\n", 80 | "f_trainData = f_trainData - np.tile(f_mean_trainData,(num_traindata,1))\n", 81 | "prevTraj_trainData = prevTraj_trainData - np.tile(prevTraj_mean_trainData,(num_traindata,1))\n", 82 | "postTraj_trainData = postTraj_trainData - np.tile(postTraj_mean_trainData,(num_traindata,1))\n", 83 | "\n", 84 | "f_absmax = np.max(np.abs(f_trainData),axis=0)\n", 85 | "f_trainData = f_trainData/np.tile(f_absmax,(num_traindata,1))\n", 86 | "prevTraj_absmax = np.max(np.abs(prevTraj_trainData),axis=0)\n", 87 | "prevTraj_trainData = prevTraj_trainData/np.tile(prevTraj_absmax,(num_traindata,1))\n", 88 | "postTraj_absmax = np.max(np.abs(postTraj_trainData),axis=0)\n", 89 | "postTraj_trainData = postTraj_trainData/np.tile(postTraj_absmax,(num_traindata,1))\n", 90 | "\n", 91 | "dim_x = 3 # dimension of state\n", 92 | "Hx_prev = 15 # timesteps of input (horizon) : less than 6\n", 93 | "Hx_post = 15 # timesteps of post (horizon) : less than 10\n", 94 | "dim_t = dim_x*Hx_post # dimension of trajectory\n", 95 | "dim_f = f_trainData.shape[1]" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": 3, 101 | "metadata": {}, 102 | "outputs": [], 103 | "source": [ 104 | "# Network Parameters\n", 105 | "n_hidden_lstm = dim_x*3\n", 106 | "n_components_mdn = 12\n", 107 | "n_outputs_mdn = (2*dim_t + 1)*n_components_mdn\n", 108 | "lam_mdn = 0.01\n", 109 | "\n", 110 | "#hidden_size_mdn = n_outputs_mdn*2\n", 111 | "hidden_size_mdn = [(n_outputs_mdn)*4, (n_outputs_mdn)*4]\n", 112 | "\n", 113 | "sigma_max = 10 # max value of variance\n", 114 | "\n", 115 | "# Set epsilon value\n", 116 | "epsilon_init = 1e-5\n", 117 | "decayRate_epsilon =0.95\n", 118 | "decaySteps_epsilon = 200\n", 119 | "learning_epsilon = epsilon_init\n", 120 | "\n", 121 | "# tf Graph input\n", 122 | "X = tf.placeholder(\"float\", [None, Hx_prev, dim_x])\n", 123 | "Y = tf.placeholder(\"float\", [None, dim_t])\n", 124 | "F = tf.placeholder(\"float\", [None, dim_f])\n", 125 | "EPS = tf.placeholder(\"float\", None)\n", 126 | "\n", 127 | "# Save params\n", 128 | "filename2save0 = \"./tf_saved_model/params_predTraj_lstmMDN_ti%dto%d\" % (Hx_prev,Hx_post)\n", 129 | "sio.savemat(filename2save0, {'f_mean_trainData':f_mean_trainData,'prevTraj_mean_trainData':prevTraj_mean_trainData,\n", 130 | " 'postTraj_mean_trainData':postTraj_mean_trainData,'f_absmax':f_absmax,'prevTraj_absmax':prevTraj_absmax,\n", 131 | " 'postTraj_absmax':postTraj_absmax,'n_hidden_lstm':n_hidden_lstm,'n_components_mdn':n_components_mdn,\n", 132 | " 'sigma_max':sigma_max})" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 4, 138 | "metadata": {}, 139 | "outputs": [], 140 | "source": [ 141 | "def LSTMwMDN(x_, f_):\n", 142 | " # Prepare data shape to match `rnn` function requirements\n", 143 | " # Current data input shape: (batch_size, timesteps, n_input)\n", 144 | " # Required shape: 'timesteps' tensors list of shape (batch_size, n_input)\n", 145 | "\n", 146 | " # Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input)\n", 147 | " \n", 148 | " x_ = tf.unstack(x_, Hx_prev, 1)\n", 149 | "\n", 150 | " # Define a lstm cell with tensorflow\n", 151 | " lstm_cell = rnn.BasicLSTMCell(n_hidden_lstm)\n", 152 | "\n", 153 | " # Get lstm cell output\n", 154 | " rnn_outputs, rnn_states = rnn.static_rnn(lstm_cell, x_, dtype=tf.float32)\n", 155 | " \n", 156 | " rnn_encoded = rnn_outputs[-1]\n", 157 | " \n", 158 | " x_concat = tf.concat([rnn_encoded, f_], 1)\n", 159 | "\n", 160 | " output_mdn_1 = tf.layers.dense(x_concat, \n", 161 | " hidden_size_mdn[0], \n", 162 | " activation = tf.nn.relu, \n", 163 | " kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=False),\n", 164 | " bias_initializer=tf.contrib.layers.xavier_initializer(uniform=False),\n", 165 | " kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=lam_mdn)\n", 166 | " )\n", 167 | " \n", 168 | " output_mdn_2 = tf.layers.dense(output_mdn_1, \n", 169 | " hidden_size_mdn[1], \n", 170 | " activation = tf.nn.relu,\n", 171 | " kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=False),\n", 172 | " bias_initializer=tf.contrib.layers.xavier_initializer(uniform=False),\n", 173 | " kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=lam_mdn)\n", 174 | " )\n", 175 | " output_mdn_out = tf.layers.dense(output_mdn_2, \n", 176 | " n_outputs_mdn,\n", 177 | " kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=False),\n", 178 | " bias_initializer=tf.contrib.layers.xavier_initializer(uniform=False),\n", 179 | " kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=lam_mdn)\n", 180 | " )\n", 181 | " \n", 182 | " indexes_split = [dim_t*n_components_mdn, dim_t*n_components_mdn, n_components_mdn]\n", 183 | " means_mdn_out, sigma_mdn_act, fracs_mdn_act = tf.split(output_mdn_out, indexes_split, axis=1)\n", 184 | "\n", 185 | " sigma_mdn_out = sigma_max*tf.nn.sigmoid(sigma_mdn_act)\n", 186 | "\n", 187 | " fracs_mdn_out_max = tf.reduce_max(fracs_mdn_act,axis=1)\n", 188 | " fracs_mdn_out_max_ext_ = tf.expand_dims(fracs_mdn_out_max, 1)\n", 189 | " fracs_mdn_out_max_ext = tf.tile(fracs_mdn_out_max_ext_,[1, n_components_mdn])\n", 190 | " fracs_mdn_out = fracs_mdn_act - fracs_mdn_out_max_ext\n", 191 | " fracs_mdn_out = tf.nn.softmax(fracs_mdn_out)\n", 192 | " \n", 193 | " return means_mdn_out, sigma_mdn_out, fracs_mdn_out" 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "execution_count": 5, 199 | "metadata": {}, 200 | "outputs": [], 201 | "source": [ 202 | "def logsumexp_trick(x_):\n", 203 | " xmax_ = tf.reduce_max(x_,axis = 1)\n", 204 | " xmax_ext__ = tf.expand_dims(xmax_, 1)\n", 205 | " xmax_ext_ = tf.tile(xmax_ext__,[1, n_components_mdn])\n", 206 | " diffx_ = x_ - xmax_ext_\n", 207 | " x_logsumexp_ = tf.reduce_logsumexp(diffx_,1)\n", 208 | " x_logsumexp = xmax_ + x_logsumexp_\n", 209 | " \n", 210 | " return x_logsumexp" 211 | ] 212 | }, 213 | { 214 | "cell_type": "code", 215 | "execution_count": 6, 216 | "metadata": {}, 217 | "outputs": [], 218 | "source": [ 219 | "# RNN graph + MDN\n", 220 | "means_mdn_, sigma_mdn_, fracs_mdn_ = LSTMwMDN(X, F)\n", 221 | "means_mdn = tf.reshape(means_mdn_,[-1,dim_t,n_components_mdn]) # (num of data, dim_out*horizon_out, num of components)\n", 222 | "sigma_mdn = tf.reshape(sigma_mdn_,[-1,dim_t,n_components_mdn]) # (num of data, dim_out*horizon_out, num of components)\n", 223 | "fracs_mdn = tf.reshape(fracs_mdn_,[-1,n_components_mdn]) # (num of data, num of components)\n", 224 | "\n", 225 | "# Define loss and optimizer\n", 226 | "Y_ext_ = tf.expand_dims(Y, 2)\n", 227 | "Y_ext = tf.tile(Y_ext_,[1,1,n_components_mdn]) # (num of data, dim_out*horizon_out, num of components)\n", 228 | "diff = Y_ext - means_mdn\n", 229 | "\n", 230 | "squared_diff = tf.square(diff)\n", 231 | "scaled_squared_diff = tf.div(squared_diff,(sigma_mdn+EPS))\n", 232 | "scaled_dist = tf.reduce_sum(scaled_squared_diff,1) # (num of data, num of components)\n", 233 | "\n", 234 | "logsum_sigma_mdn = tf.reduce_sum(tf.log(sigma_mdn+EPS),1) # (num of data, num of components)\n", 235 | "\n", 236 | "loss_exponent = tf.log(fracs_mdn+EPS) - 0.5*dim_t*tf.log(np.pi*2) - 0.5*logsum_sigma_mdn - 0.5*scaled_dist\n", 237 | "# loss_op = tf.reduce_mean(-tf.reduce_logsumexp(loss_exponent,axis=1))\n", 238 | "loss_op = -tf.reduce_mean(logsumexp_trick(loss_exponent))\n", 239 | "\n", 240 | "learning_rate = 0.001\n", 241 | "# train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss_op)\n", 242 | "train_op = tf.train.RMSPropOptimizer(learning_rate=learning_rate, decay=0.8).minimize(loss_op)\n", 243 | "\n", 244 | "# Initialize the variables (i.e. assign their default value)\n", 245 | "init = tf.global_variables_initializer()\n", 246 | "\n", 247 | "# Create a saver object which will save all the variables\n", 248 | "saver = tf.train.Saver()\n", 249 | "\n", 250 | "# Export meta graph\n", 251 | "tf.add_to_collection('X', X)\n", 252 | "tf.add_to_collection('Y', Y)\n", 253 | "tf.add_to_collection('F', F)\n", 254 | "tf.add_to_collection('means_mdn', means_mdn)\n", 255 | "tf.add_to_collection('sigma_mdn', sigma_mdn)\n", 256 | "tf.add_to_collection('fracs_mdn', fracs_mdn)" 257 | ] 258 | }, 259 | { 260 | "cell_type": "code", 261 | "execution_count": 7, 262 | "metadata": {}, 263 | "outputs": [ 264 | { 265 | "name": "stdout", 266 | "output_type": "stream", 267 | "text": [ 268 | "epoch 1, epsilon 0.000010, Minibatch Loss= -42.3842751, sigma= 0.000717/0.005304/0.029754\n", 269 | "epoch 50, epsilon 0.000010, Minibatch Loss= -155.5826875, sigma= 0.000000/0.000411/0.008790\n", 270 | "epoch 100, epsilon 0.000010, Minibatch Loss= -157.0464780, sigma= 0.000000/0.000300/0.009226\n", 271 | "epoch 150, epsilon 0.000010, Minibatch Loss= -159.8415329, sigma= 0.000000/0.000622/0.023783\n", 272 | "epoch 200, epsilon 0.000010, Minibatch Loss= -159.7974338, sigma= 0.000000/0.000713/0.035433\n", 273 | "epoch 250, epsilon 0.000010, Minibatch Loss= -158.5976043, sigma= 0.000000/0.000444/0.013901\n", 274 | "epoch 300, epsilon 0.000010, Minibatch Loss= -158.8930314, sigma= 0.000000/0.000419/0.053860\n", 275 | "epoch 350, epsilon 0.000010, Minibatch Loss= -158.5232968, sigma= 0.000000/0.001003/0.085903\n", 276 | "epoch 400, epsilon 0.000009, Minibatch Loss= -161.5180862, sigma= 0.000000/0.000878/0.037936\n", 277 | "epoch 450, epsilon 0.000009, Minibatch Loss= -159.7439754, sigma= 0.000000/0.000537/0.062314\n", 278 | "epoch 500, epsilon 0.000009, Minibatch Loss= -157.9385229, sigma= 0.000000/0.001261/0.135410\n", 279 | "epoch 550, epsilon 0.000009, Minibatch Loss= -163.0460245, sigma= 0.000000/0.000428/0.023615\n", 280 | "epoch 600, epsilon 0.000009, Minibatch Loss= -148.4081847, sigma= 0.000000/0.002069/0.088368\n", 281 | "epoch 650, epsilon 0.000009, Minibatch Loss= -160.0496247, sigma= 0.000000/0.001846/0.152572\n", 282 | "epoch 700, epsilon 0.000009, Minibatch Loss= -153.1792538, sigma= 0.000000/0.001533/0.067298\n", 283 | "epoch 750, epsilon 0.000009, Minibatch Loss= -149.4600848, sigma= 0.000000/0.000867/0.050586\n", 284 | "epoch 800, epsilon 0.000008, Minibatch Loss= -158.0661188, sigma= 0.000000/0.000798/0.044887\n", 285 | "epoch 850, epsilon 0.000008, Minibatch Loss= -160.1343628, sigma= 0.000000/0.001504/0.134759\n", 286 | "epoch 900, epsilon 0.000008, Minibatch Loss= -158.4707912, sigma= 0.000000/0.000398/0.088992\n", 287 | "epoch 950, epsilon 0.000008, Minibatch Loss= -156.2202232, sigma= 0.000000/0.001004/0.306435\n", 288 | "epoch 1000, epsilon 0.000008, Minibatch Loss= -157.5734465, sigma= 0.000000/0.004374/2.176343\n", 289 | "Optimization Finished!\n" 290 | ] 291 | }, 292 | { 293 | "ename": "ValueError", 294 | "evalue": "Cannot feed value of shape (20, 1, 7) for Tensor 'Placeholder_2:0', which has shape '(?, 7)'", 295 | "output_type": "error", 296 | "traceback": [ 297 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 298 | "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", 299 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 87\u001b[0m \u001b[0mf_test\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mf_trainData\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0midx_test\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 89\u001b[0;31m \u001b[0mmeans_mdn_test\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmeans_mdn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mx_test\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mf_test\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 90\u001b[0m \u001b[0msigma_mdn_test\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msigma_mdn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mx_test\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mf_test\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 91\u001b[0m \u001b[0mfracs_mdn_test\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfracs_mdn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mx_test\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mf_test\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 300 | "\u001b[0;32m/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 903\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 904\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 905\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 906\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 907\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 301 | "\u001b[0;32m/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1114\u001b[0m \u001b[0;34m'which has shape %r'\u001b[0m \u001b[0;34m%\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1115\u001b[0m (np_val.shape, subfeed_t.name,\n\u001b[0;32m-> 1116\u001b[0;31m str(subfeed_t.get_shape())))\n\u001b[0m\u001b[1;32m 1117\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_feedable\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msubfeed_t\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1118\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Tensor %s may not be fed.'\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0msubfeed_t\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 302 | "\u001b[0;31mValueError\u001b[0m: Cannot feed value of shape (20, 1, 7) for Tensor 'Placeholder_2:0', which has shape '(?, 7)'" 303 | ] 304 | } 305 | ], 306 | "source": [ 307 | "# Training parameters\n", 308 | "training_epochs = 1000\n", 309 | "batch_size = 256\n", 310 | "num_batches = np.ceil(num_traindata/batch_size)\n", 311 | "num_batches = num_batches.astype(np.int32)\n", 312 | "display_epochs = 50\n", 313 | "save_epochs = 200\n", 314 | "\n", 315 | "h_x_train = np.array(range(0,dim_x*Hx_prev), dtype = np.int32)\n", 316 | "h_y_train = np.array(range(0,dim_x*Hx_post), dtype = np.int32)\n", 317 | "\n", 318 | "# Suffle indexes\n", 319 | "idx_array = np.arange(num_traindata)\n", 320 | "np.random.shuffle(idx_array)\n", 321 | "\n", 322 | "with tf.Session(config = config) as sess:\n", 323 | " # Run the initializer\n", 324 | " sess.run(init)\n", 325 | " \n", 326 | " loss_epochs = np.zeros(training_epochs)\n", 327 | " # Train phase\n", 328 | " for epoch in range(1, training_epochs+1):\n", 329 | " \n", 330 | " # Suffle indexes\n", 331 | " np.random.shuffle(idx_array)\n", 332 | " \n", 333 | " # Set epsilon value\n", 334 | " if (epoch % decaySteps_epsilon) == 0 and epoch > 1:\n", 335 | " learning_epsilon = learning_epsilon*decayRate_epsilon\n", 336 | " \n", 337 | " cnt_data = 0\n", 338 | " loss_array = np.zeros(num_batches)\n", 339 | " for nidx_batch in range(0,num_batches):\n", 340 | " # Get batch data\n", 341 | " if((cnt_data + batch_size) > num_traindata):\n", 342 | " idx_batch = idx_array[range((cnt_data),(num_traindata))]\n", 343 | " else:\n", 344 | " idx_batch = idx_array[range((cnt_data),(cnt_data + batch_size))]\n", 345 | " \n", 346 | " x_batch = prevTraj_trainData[idx_batch,:]\n", 347 | " x_batch = x_batch[:,h_x_train]\n", 348 | " x_batch = x_batch.reshape(len(idx_batch),Hx_prev,dim_x) # convert to (batch_size, timesteps, dim_x)\n", 349 | " \n", 350 | " y_batch = postTraj_trainData[idx_batch,:]\n", 351 | " y_batch = y_batch[:,h_y_train]\n", 352 | " \n", 353 | " f_batch = f_trainData[idx_batch,:]\n", 354 | " \n", 355 | " cnt_data = cnt_data + len(idx_batch)\n", 356 | " \n", 357 | " # Run optimization op (backprop)\n", 358 | " sess.run(train_op, feed_dict={X: x_batch, Y: y_batch, F: f_batch, EPS: learning_epsilon})\n", 359 | " \n", 360 | " # Compute batch loss\n", 361 | " loss_val = sess.run(loss_op, feed_dict={X: x_batch, Y: y_batch, F: f_batch, EPS: learning_epsilon})\n", 362 | " loss_array[nidx_batch] = loss_val\n", 363 | " \n", 364 | " loss_epochs[epoch-1] = np.mean(loss_array)\n", 365 | " \n", 366 | " if epoch % display_epochs == 0 or epoch == 1:\n", 367 | " # Check sigma\n", 368 | " val_sigma_mdn = sess.run(sigma_mdn, feed_dict={X: x_batch, F: f_batch})\n", 369 | " minval_sigma_mdn = np.min(val_sigma_mdn)\n", 370 | " meanval_sigma_mdn = np.mean(val_sigma_mdn)\n", 371 | " maxval_sigma_mdn = np.max(val_sigma_mdn)\n", 372 | " \n", 373 | " print(\"epoch \" + str(epoch) + \", epsilon {:.6f}\".format(learning_epsilon) +\n", 374 | " \", Minibatch Loss= \" + \"{:.7f}\".format(np.mean(loss_array)) + \n", 375 | " \", sigma= \" + \"{:.6f}/{:.6f}/{:.6f}\".format(minval_sigma_mdn,meanval_sigma_mdn,maxval_sigma_mdn))\n", 376 | " \n", 377 | " # Save\n", 378 | " if epoch % save_epochs == 0 or epoch == 1:\n", 379 | " # Now, save the graph\n", 380 | " filename2save1 = \"./tf_saved_model/predTraj_lstmMDN_ti%dto%d\" % (Hx_prev,Hx_post)\n", 381 | " saver.save(sess,filename2save1,global_step=100)\n", 382 | " \n", 383 | " \n", 384 | " print(\"Optimization Finished!\")\n", 385 | " \n", 386 | " # Test phase\n", 387 | " idx_test = np.random.randint(num_traindata,size=(20,1))\n", 388 | " x_test = prevTraj_trainData[idx_test,h_x_train]\n", 389 | " x_test = np.reshape(x_test,(idx_test.shape[0],Hx_prev,dim_x))\n", 390 | " \n", 391 | " y_test = postTraj_trainData[idx_test,:]\n", 392 | " y_test = postTraj_trainData[:,h_y_train]\n", 393 | " \n", 394 | " f_test = f_trainData[idx_test,:]\n", 395 | " \n", 396 | " means_mdn_test = sess.run(means_mdn, feed_dict={X: x_test, F: f_test})\n", 397 | " sigma_mdn_test = sess.run(sigma_mdn, feed_dict={X: x_test, F: f_test})\n", 398 | " fracs_mdn_test = sess.run(fracs_mdn, feed_dict={X: x_test, F: f_test})\n", 399 | " \n", 400 | " # Now, save test results\n", 401 | " filename2save2 = \"./tf_saved_model/results_predTraj_lstmMDN_ti%dto%d\" % (Hx_prev,Hx_post)\n", 402 | " sio.savemat(filename2save2, {'means_mdn_test':means_mdn_test,'sigma_mdn_test':sigma_mdn_test,'fracs_mdn_test':fracs_mdn_test})" 403 | ] 404 | }, 405 | { 406 | "cell_type": "code", 407 | "execution_count": 1, 408 | "metadata": {}, 409 | "outputs": [ 410 | { 411 | "ename": "NameError", 412 | "evalue": "name 'x_test' is not defined", 413 | "output_type": "error", 414 | "traceback": [ 415 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 416 | "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", 417 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mx_test\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", 418 | "\u001b[0;31mNameError\u001b[0m: name 'x_test' is not defined" 419 | ] 420 | } 421 | ], 422 | "source": [ 423 | "x_test" 424 | ] 425 | } 426 | ], 427 | "metadata": { 428 | "kernelspec": { 429 | "display_name": "Python 3", 430 | "language": "python", 431 | "name": "python3" 432 | }, 433 | "language_info": { 434 | "codemirror_mode": { 435 | "name": "ipython", 436 | "version": 3 437 | }, 438 | "file_extension": ".py", 439 | "mimetype": "text/x-python", 440 | "name": "python", 441 | "nbconvert_exporter": "python", 442 | "pygments_lexer": "ipython3", 443 | "version": "3.6.4" 444 | } 445 | }, 446 | "nbformat": 4, 447 | "nbformat_minor": 2 448 | } 449 | -------------------------------------------------------------------------------- /rain.py: -------------------------------------------------------------------------------- 1 | """ 2 | =============== 3 | Rain simulation 4 | =============== 5 | 6 | Simulates rain drops on a surface by animating the scale and opacity 7 | of 50 scatter points. 8 | 9 | Author: Nicolas P. Rougier 10 | """ 11 | import numpy as np 12 | import matplotlib.pyplot as plt 13 | from matplotlib.animation import FuncAnimation 14 | 15 | 16 | # Create new Figure and an Axes which fills it. 17 | fig = plt.figure(figsize=(7, 7)) 18 | ax = fig.add_axes([0, 0, 1, 1], frameon=False) 19 | ax.set_xlim(0, 1), ax.set_xticks([]) 20 | ax.set_ylim(0, 1), ax.set_yticks([]) 21 | 22 | # Create rain data 23 | n_drops = 50 24 | rain_drops = np.zeros(n_drops, dtype=[('position', float, 2), 25 | ('size', float, 1), 26 | ('growth', float, 1), 27 | ('color', float, 4)]) 28 | 29 | # Initialize the raindrops in random positions and with 30 | # random growth rates. 31 | rain_drops['position'] = np.random.uniform(0, 1, (n_drops, 2)) 32 | rain_drops['growth'] = np.random.uniform(50, 200, n_drops) 33 | 34 | # Construct the scatter which we will update during animation 35 | # as the raindrops develop. 36 | scat = ax.scatter(rain_drops['position'][:, 0], rain_drops['position'][:, 1], 37 | s=rain_drops['size'], lw=0.5, edgecolors=rain_drops['color'], 38 | facecolors='none') 39 | 40 | 41 | def update(frame_number): 42 | # Get an index which we can use to re-spawn the oldest raindrop. 43 | current_index = frame_number % n_drops 44 | 45 | # Make all colors more transparent as time progresses. 46 | rain_drops['color'][:, 3] -= 1.0/len(rain_drops) 47 | rain_drops['color'][:, 3] = np.clip(rain_drops['color'][:, 3], 0, 1) 48 | 49 | # Make all circles bigger. 50 | rain_drops['size'] += rain_drops['growth'] 51 | 52 | # Pick a new position for oldest rain drop, resetting its size, 53 | # color and growth factor. 54 | rain_drops['position'][current_index] = np.random.uniform(0, 1, 2) 55 | rain_drops['size'][current_index] = 5 56 | rain_drops['color'][current_index] = (0, 0, 0, 1) 57 | rain_drops['growth'][current_index] = np.random.uniform(50, 200) 58 | 59 | # Update the scatter collection, with the new colors, sizes and positions. 60 | scat.set_edgecolors(rain_drops['color']) 61 | scat.set_sizes(rain_drops['size']) 62 | scat.set_offsets(rain_drops['position']) 63 | 64 | 65 | # Construct the animation, using the update function as the animation 66 | # director. 67 | animation = FuncAnimation(fig, update, interval=10) 68 | plt.show() 69 | 70 | -------------------------------------------------------------------------------- /sort_data.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import tensorflow as tf 4 | 5 | filepath = "features_redone.csv" 6 | data = pd.read_csv(filepath) 7 | 8 | #data already sorted by vehicle id. For batch processing, sort by timestamp? 9 | data = data[['vehicle_id', 'frame', 'velocity', 'theta', 'd0', 'd1', 10 | 'd2', 'd3', 'd4', 'd5', 'd6', 'v1', 'v2', 'v3', 'v4', 'v5', 'v6']] 11 | data_np = data.values.astype(np.float32) 12 | data_length = len(data_np) 13 | 14 | 15 | def remove_unnecessary_data(data, data_length): 16 | 17 | #first split velocity, theta data into separate array 18 | output_data = data_np[:,(2,3)] 19 | 20 | #shift output data up by one index 21 | output_data = np.delete(output_data, (0), axis = 0) 22 | 23 | #delete rows of data in which vehicle ID switches 24 | temp_id = data_np[0,0] 25 | delete_rows = [] 26 | for i in range(0,data_length): 27 | if(data_np[i,0] != temp_id): 28 | temp_id = data_np[i,0] 29 | delete_rows.append(i-1) 30 | 31 | #delete rows of unused data 32 | data_np_processed = np.delete(data_np, delete_rows, axis=0) 33 | 34 | #delete column for input 35 | data_np_processed = np.delete(data_np_processed, (0,1,2,3), axis=1) 36 | output_data_processed = np.delete(output_data, delete_rows, axis=0) 37 | return data_np_processed, output_data_processed 38 | 39 | 40 | 41 | --------------------------------------------------------------------------------