\n",
574 | " "
575 | ]
576 | },
577 | "metadata": {},
578 | "execution_count": 5
579 | }
580 | ]
581 | },
582 | {
583 | "cell_type": "code",
584 | "source": [
585 | "rat.dendrogram(model)"
586 | ],
587 | "metadata": {
588 | "colab": {
589 | "base_uri": "https://localhost:8080/",
590 | "height": 288
591 | },
592 | "id": "l5N4nntVmiFz",
593 | "outputId": "380e8f2f-f8ba-46d1-efce-f7117815ce12"
594 | },
595 | "execution_count": null,
596 | "outputs": [
597 | {
598 | "output_type": "display_data",
599 | "data": {
600 | "text/plain": [
601 | ""
602 | ],
603 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAEPCAYAAAC5sYRSAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAbpElEQVR4nO3dfZxdVX3v8c8XQogBlacxQQKSIqhYdYCA+MKHQGwFtAUr0hC1gNh4LQ/lYqvQ6ouX9d4WrSJ4r9BG0ERLIoggXIUqRVIuWpAERjCggFAwkECUZ/EByq9/rDVwGM5kztl7n5kza77v1+u85sze5/zOmv3wm33W/u21FRGYmVlZNpnoBpiZWfOc3M3MCuTkbmZWICd3M7MCObmbmRVo2kQ3AGC77baLnXfeeaKbYWY2qaxevfoXETHQbl5fJPedd96ZVatWTXQzzMwmFUl3jzbP3TJmZgVycjczK5CTu5lZgcZM7pK+JOkBST9umbaNpCsk3Z5/bp2nS9LnJd0h6SZJe/ay8WZm1l4nR+5LgQNHTDsZuDIidgWuzL8DHATsmh+LgbObaaaZmXVjzOQeEVcDD46YfAiwLD9fBhzaMv0rkVwLbCVp+6Yaa2Zmnana5z4rItbl5+uBWfn5DsDPW163Nk97HkmLJa2StGrDhg0Vm2FmZu3UPqEaaczgrscNjoglETEvIuYNDLStwTczs4qqXsR0v6TtI2Jd7nZ5IE+/F9ix5XVz8jQbJ0uWwPLlE90Ks/YWLYLFiye6FVND1SP3S4Ej8/MjgUtapv9ZrprZF3ikpfvGxsHy5TA0NNGtMHu+oSEfeIynMY/cJa0A5gPbSVoLnAqcBlwg6RjgbuDw/PLLgIOBO4AngKN70GYbw+AgrFw50a0we6758ye6BVPLmMk9Io4YZdaCNq8N4Ni6jTIzs3p8haqZWYGc3M3MCuTkbmZWICd3M7MCObmbmRXIyd3MrEBO7mZmBXJyNzMrkJO7mVmBnNzNzArk5G5mViAndzOzAjm5m5kVyMndzKxATu5mZgVycjczK5CTu5lZgZzczcwK5ORuZlYgJ3czswI5uZuZFcjJ3cysQE7uZmYFcnI3MyuQk7uZWYGc3M3MCuTkbmZWICd3M7MCObmbmRXIyd3MrEBO7mZmBXJyNzMrkJO7mVmBaiV3Sf9T0hpJP5a0QtIMSXMlXSfpDknnS5reVGPNzKwzlZO7pB2AE4B5EfH7wKbAQuBTwOci4uXAQ8AxTTTUzMw6V7dbZhrwAknTgJnAOuAA4MI8fxlwaM3PMDOzLlVO7hFxL/AZ4B5SUn8EWA08HBFP5ZetBXZo935JiyWtkrRqw4YNVZthZmZt1OmW2Ro4BJgLvBTYAjiw0/dHxJKImBcR8wYGBqo2w8zM2qjTLfNW4K6I2BARTwIXAfsBW+VuGoA5wL0122hmZl2qk9zvAfaVNFOSgAXALcBVwGH5NUcCl9RropmZdatOn/t1pBOnNwA351hLgI8CJ0m6A9gWOLeBdpqZWRemjf2S0UXEqcCpIybfCexTJ66ZmdXjK1TNzArk5G5mViAndzOzAjm5m5kVyMndzKxATu5mZgVycjczK5CTu5lZgZzczcwK5ORuZlYgJ3czswI5uZuZFcjJ3cysQE7uZmYFcnI3MyuQk7uZWYGc3M3MCuTkbmZWICd3M7MCObmbmRWo1g2yzawPLFkCy5dPdCvGNnRG+jn/xIltRycWLYLFiye6FbU4uZtNdsuXw9AQDA5OdEs2auXgJEjqkJYlOLmbWR8YHISVKye6FWWYP3+iW9AI97mbmRXIyd3MrEBO7mZmBXJyNzMrkJO7mVmBnNzNzArk5G5mViAndzOzAjm5m5kVyMndzKxAtZK7pK0kXSjpJ5JulfQGSdtIukLS7fnn1k011szMOlP3yP1M4F8j4pXA64BbgZOBKyNiV+DK/LuZmY2jysld0ouBNwPnAkTE7yLiYeAQYFl+2TLg0LqNNDOz7tQ5cp8LbAC+LOlGSedI2gKYFRHr8mvWA7PavVnSYkmrJK3asGFDjWaYmdlIdZL7NGBP4OyI2AP4FSO6YCIigGj35ohYEhHzImLewMBAjWaYmdlIdZL7WmBtRFyXf7+QlOzvl7Q9QP75QL0mmplZtyrfrCMi1kv6uaRXRMRPgQXALflxJHBa/nlJIy2dYEtWL2H5zf1/K7Oh9elWZvOX9v9dbxa9ZhGL95rcd7sx61d178R0PHCepOnAncDRpG8DF0g6BrgbOLzmZ/SF5TcvZ2j9EIOz+/tWZoMn939SBxhan25l5uRu1hu1kntEDAHz2sxaUCduvxqcPcjKo1ZOdDOKMH/p/IluglnRfIWqmVmBnNzNzArk5G5mViAndzOzAjm5m5kVyMndzKxATu5mZgVycjczK5CTu5lZgZzczcwK5ORuZlYgJ3czswI5uZuZFajukL82BfRiLPvhIX+bHh3SY8SbJT5ytzENj2XfpMHZg42PjT+0fmhS3FDFbDz4yN06MhnGsvcY8WbP8pG7mVmBnNzNzArk5G5mViAndzOzAjm5m5kVyMndzKxATu5mZgVycjczK5CTu5lZgZzczcwK5ORuZlYgJ3czswI5uZuZFcjJ3cysQE7uZmYFcnI3MyuQk7uZWYFqJ3dJm0q6UdK38u9zJV0n6Q5J50uaXr+ZZmbWjSZus/eXwK3Ai/LvnwI+FxFfk/RPwDHA2Q18jk1B3dycu5ubbvtG2la6WkfukuYAbwfOyb8LOAC4ML9kGXBonc+wqa2bm3N3etNt30jbpoK6R+5nAB8BXph/3xZ4OCKeyr+vBXZo90ZJi4HFADvttFPNZljJmr45t2+kbVNB5SN3Se8AHoiI1VXeHxFLImJeRMwbGBio2gwzM2ujzpH7fsAfSzoYmEHqcz8T2ErStHz0Pge4t34zzcysG5WP3CPilIiYExE7AwuB70XEe4CrgMPyy44ELqndSjMz60ov6tw/Cpwk6Q5SH/y5PfgMMzPbiCZKIYmIlcDK/PxOYJ8m4pqZWTW+QtXMrEBO7mZmBXJyNzMrkJO7mVmBnNzNzArk5G5mVqBGSiHNzCbEkiWwvOFB4IbyQHXz5zcbd9EiWDx+I5H6yN3MJq/ly59Nxk0ZHEyPJg0NNf9PaAw+cjezyW1wEFaunOhWbFzT3wI64CN3M7MCObmbmRXIyd3MrEBO7mZmBXJyNzMrkJO7mVmBnNzNzArk5G5mViBfxGQ2nny5vI0TH7mbjSdfLm/jxEfuZuPNl8vbOPCRu5lZgZzczcwK5ORuZlYgJ3czswL5hKrZVNJpKWa35ZUum+w7PnI3m0o6LcXsprzSZZN9yUfuZlNN06WYLpvsSz5yNzMrkJO7mVmBnNzNzApUZJ/7ktVLWH5zsyd4htank1Dzl85vNO6i1yxi8V6uMjCbdLoZBK6b6qOGKo+KPHJffvPyZ5JxUwZnDzI4u9nBmYbWDzX+T8jMxkk3g8B1Wn3UYOVRkUfukJLxyqNWTnQzNqrpbwFmNs76uPKo8pG7pB0lXSXpFklrJP1lnr6NpCsk3Z5/bt1Ya83MrCN1umWeAj4cEbsD+wLHStodOBm4MiJ2Ba7Mv5uZ2TiqnNwjYl1E3JCfPwbcCuwAHAIsyy9bBhxat5FmZtadRk6oStoZ2AO4DpgVEevyrPXArFHes1jSKkmrNmzY0EQzzMwsq53cJW0JfAM4MSIebZ0XEQFEu/dFxJKImBcR8wYGBuo2w8zMWtRK7pI2IyX28yLiojz5fknb5/nbAw/Ua6KZmXWrTrWMgHOBWyPi9JZZlwJH5udHApdUb56ZmVVRp859P+B9wM2Shiv5/wY4DbhA0jHA3cDh9ZpoZmbdqpzcI+IaQKPMXlA1rpmZ1Vfk8ANmZlNdscMPTJRuBi3rZjAyDzBmU4pvB1ibj9wb1s2gZZ0ORuYBxmzK8e0Aa/ORew80PWiZBxizKamPB+WaDHzkbmZWICd3M7MCObmbmRXIyd3MrEBO7mZmBXK1jFkTXJdtfcZH7mZNcF229RkfuZs1xXXZ1kcmVXLv9NJ+X9ZvZlPdpOqW6fTSfl/Wb2ZT3aQ6codmL+33Zf1mVqpJdeRuZmadcXI3MyuQk7uZWYGc3M3MCuTkbmZWICd3M7MCObmbmRXIyd3MrEBO7mZmBXJyNzMrkJO7mVmBnNzNzArk5G5mViAndzOzAjm5m5kVyMndzKxAk+5mHdaMTm9ZCL5todlk5CP3KarTWxaCb1toNhn15Mhd0oHAmcCmwDkRcVovPsfqafKWheDbFpr1k8aP3CVtCnwBOAjYHThC0u5Nf46ZmY2uF90y+wB3RMSdEfE74GvAIT34HDMzG4UiotmA0mHAgRHxgfz7+4DXR8RxI163GBg+8/YK4KeNNsTMrHwvi4iBdjMmrFomIpYASybq883MStaLbpl7gR1bfp+Tp5mZ2TjpRXK/HthV0lxJ04GFwKU9+BwzMxtF490yEfGUpOOA75BKIb8UEWua/hwzMxtd4ydUzcxs4vkKVTOzAjm5m5kVyMndzKxATu5mZgXq+yF/Jc0A3gG8CXgp8Gvgx8C361ThSJrXJuYVEfFQA23eAvhNRPxXzThvAN6b27l9Szu/DfxLRDxSt61NkrQ1zy7P/4yIpxuI2ciybInXaBslvQTYj+duR6vqxG1625Q0h1SS/Lx9CLi8gWXQ1Pbe+L7ei31o0sTs52oZSZ8greyVwGrgAWAGsBuwf37+4Yi4qYuYRwPHA3e1ibkfaYF+PCLu6SLmJqSd5z3A3sBvgc2BX5BWzj9HxB2dxssxLwfuAy4BVvH8v/2PgNMjoqNrCCSdAFwcET/vph0dxH0xcCxwBDAd2JDbOQu4FjgrIq7qIl4vlmWjbcwx9wdOBrYBbuS562cX4ELgsxHxaBcxe7FtfhnYAfgW7bejvYCTI+LqLmL2Yh31Yl9vdB+aTDEBiIi+fQBvH2P+S4B5XcY8FnjBRuYPAgu6jPnvwMeB1wKbtEzfBngX8A3gvV3G3K6J17S89pG8Af1/4C+AgYbW0RXA+4Ct2szbCzgDOGaCl2Wjbczv+0dgp1HmTQMOBd7VB9vm748xfzrw8i5j9mId9WJfb3QfmkwxI6K/j9wnC0mbRcSTdV8zxvtfBOwK3BkVvp5LupGUyN4K/Cnwx6QjpBXARRHxWNW2NWk8lqXVM5nWkaRpEfFUfr4l8ErSPvRgg5+xTZPxmtLXJ1QlXSTpvXmlNBXzdElvbCpetsVYL+h2Q5f0L5K2y8/fRvpK/ilgSNK7K7QxIuLpiPhuRBxD6tM8CzgQuLNCvNa2zpY0Oz8fkPQnkl5dJdbGltPwdlA1aUjarM207arEyu99m6SzJV2aH2fnG9U0RtJtNd//SkmXS/q2pF0kLZX0sKQfSnpVlZjtlr+kbcZ6zRjt3FTSByV9UtJ+I+Z9rEo7JR0F3C/pNkkHATeR9qEfSTqiYsz9JN0qaY2k10u6Arhe0s9zv3mjJN1c+c3dHuqP54M04NiFwIPABcA7gek1Y24g9WvdDXwa2KOBdj4F/BtwDG2++leMeXPL8x8AOw9/PQN+VCHejRuZN7NGOz9I6iP+T+BDwHXAuaQhnLvq6ujgs+6p+L79gbWkPuHvDi/LPO+GijHPAC4j9T2/MT8W5mlnVoz5GPBofjyWH/81PL1izKtJfbZH5G1+IaA87cqKMT/W8nx34LaWbeD1FWOeAywHTiR9ozy9gXV0c95f5uZlukuePgu4qWLMHwKvAd6Qt6c35ul7At+vGPNPRnm8C9hQJWZE9H1yvzH/fBGpz/SynJy/DPxhzZi7kfoN1wA/AU4FdquxEb0DOA/4JenEyEI20n/aQcw1wIvy82t4bt/mmgrxKv1tHf7tM4FtgceB2Xn61sBQhXgnjfL4MPBgxTZeD7w6Pz8MuB3Yt3V7qBDztlGmC7i9YszPA18BZrVMu6vm+rmx5fkdI+ZVTZo3tDz/NnBQfr4P8IOKMW9qeT6NNBz4RaQTtVXX0VDL8/tG+7way/PWhpbnk8DSnNdGPh6ruu77ulsGCICIeDQivhoRB5P6zK4jVSrUiXlbRHwyIl4NHE46O31ZxZhPRsS3IuI9pCGOz8sx10qqesfoTwBXSXo/8H3g65KOlLQU+Ndug0XE877eS/qLim1r9WREPBERvwR+FhHr8+c9RF7WXfp70j+GF454bEn1bsTpkUvpIuJC0snOZZIOrdhGgN9I2rvN9L2B31QJGBEnkO49vELSCbkqpe5JsU1bnp8+Yt70mrEBXhoRlwNExA+BF1SM80xbIuKpiFgMDAHfI637Ku6R9A+S/i/wE0mfzd0qpwLrKsZs3QZPGTGv6vK8CfhMRBw98gE8XDFm39e5Pz5yQk4i/5QfVahNzJtIC3jkyuo6ZkT8mtSFdEEuwTu0SsCIuCCfBP0A6VvGNGBfYEVEfKfrBkontWnzKbm2mIgYueN33NSWk2dvb/m8GVRLxjcA34yI1SNnSPpAxTY+KWl2yz+eNZIWkMoDd6kY8yjgbEkvJHX5QLqPwSN5XiURsVrSW4HjSFUpM6rGyr4gacuIeDwizhqeKOnlpK7EKn5P0qWkbWiOpJkR8USe97zzGh1aJenAiHjmwCUi/k7SfcDZFWO+l1SB9AjpYPBA0j5+D9XX0ceH/96I+ObwREm7kL51VXEiqduonXdWjDn1qmWGN/SGY/5VRHymyZhNk/QY6ZvJGp79Z3Qiqe+YiPhExbg7AusiVyS0TN8BeFVE/JskRYcbmqRXAL+MiF+0mTcrIu6v0Ma3kvoufzRi+lbAsRHxv7uN2RJjNqmOHODe4X8gTZC0PemcUNVvlD0h6S0jJq2OiMclzQIOi4gvTES7bISq/TkT/QD+oMZ7Z/Ns3/AA6eTFqyf6bxrRxpnAR4C/Jh29HUm66cmngS0rxNsJ+DqpWmBmnnZnA+1cSbrwZqcR06cDC4BlwFETvTzbtHvPBmK8GXhFfr4f8FfAwQ22cW7eNl9ZI8bvAV8C/hepe+OLpMqrr9NyYnmiH6TS3BkNx9wEOJr0De1HpG+FXwPeUiPmceSac+DlpBPWD5O6ijd6TcFGYo7c14+qs68PP/q9z31jzq3yJkkfBP4DuFbSh0gr/u3ARZKOabB9w59X9T6xS0ln9eeSTlrtTbpwRlT4mhoR90TEu0mVN1co3ci8CQeSKjpWSLpP0i2S7iSdtFwInBERS5v4oKrLUtKeIx57AZdK2kPSnhVjngGcBnxV0idJ6+YFwEmS/rFizNav+YeQ+pv/CLgkl/VVsZR0Qvlx0tW4PwEOIp23+VLFdr625flmkj6WS0H/XtLMiu08n3SO6quSDpa06ZjvGNu5wMtI6+kq0r5+Lqlr5fiKMT8Uz36rPBP4XERsBXwU+OeKMZfy3H19HjX29WdM9H/sMf6jXTrK4/8Bv6oYs9HqjvzebUZ5bAusrRhzKP8UsJ5nu9BExTP9LbG3yBvP1Q2vr81I42JULgft0bJ8mvRP7aqWx6/zz+9VjDncvTUTeIhnvw1tBvy4YszWSowfAHPz80rlr21i3jPavC5jtlbLfJaUnN4CfA74StV25n3wz4ErgftJ59XeUmNbumnE79fmn5szotKli5g/bXl+/cY+r4uYPdnX+/2E6ptIJ0VG9pGLVHZVxZORTv48Iek51R2Sqp6A2ECqIW49WRv595dUjEluV0i6LPLazr/XOlESEb8ifQVsVKSTqlWrEIb1Ylm+GzgB+HTkyg5Jd0XE/jXaGXldDA+6NbxOnqZ6VU/rep0WEXflD/pFy+d062lJuwEvBmZKmhcRq/IJ1apHx63rZgGwd0Q8KelqUvdHFRGpwuqLwBfzuYzDgdMkzYmIHSvEfFLSLhHxs/wN7Xf5g35bYx+6MFes/R1wsaQTgYuBA0gnaitrel/v9+R+LfBERPz7yBmSfloxZtPVHZCu8FwQbQZ0klR1oK5VLVUO72+JtwvpopauSLqBVDe8IiJ+VrFN46HxZRkR35D0HeCTubT0w9QvMbxM0jWko8BzSNVR15KOYDsehGuE10l6lJQ8N5e0fUSsU7rRfNVE/BHSN92nSZVbp0h6HenakT+vGPPFkt5J2l82z/tS3WT0nCq2fND1eeDzkl5WMeZfk8qJf0vKdQshXUVN6qLpWkT8be4iW0GqtNocWAx8kzSQWhWN7uvPvD//k+hLnVRZdFOJkV/faHVHfu+xwDUxohojzzs+Iv5Pp7E6/Lyu2pffcxdpQKfDSV/9VgDnR8R9Tbatrl4vy3wE91nSya+BGnFEKk2NiLg274jvJB29XRgRT1fYjjaJNsPv5qqeV0XEf1RZ923ibQc8FBWH6FUaabLVyRFxfz7aPi8iFlSIuX+MMTJnxe1ewLbRpvpqMqi1vqv254zHg41XYhxAhUqMMWL2VXUH6ehqlzbTX1shVms/6ZtI48qsJ/U7L57ov3Wcl6vIV//WiDHe22almGN8XuWKsx6sk5787U3uQ71cnr1o54Sv1DH+4Bmk4Wm/Txqu9hbS1/a7SX1zXY8L04uYvVjhpCPs+0hX6a0h9WsOz+v6Mud27yF91T8Q+PJEr+sR7Wp0Qyd9Jf8gqUJk+IK1y4H/AWw2hbfNSmP19Ggd9WJ5NroP9Wp59qqdfd0t00ppRL/tgF9HROVLcnsds81n3BMRO1V43xBpzI51kvYhXf12SkRcLOnGiNijy3hfi4iF3bZjvEk6nHRh1QOkypOjIuL6PO+GiOi6dFHSClIt8jKevZp0DunagW0i4k9rtrlvt02lK0nbzgIOiIgxRzRtE7PxdTQiflN/e6P7UI7Zi+XZeDuh/0+oPiOaqcToScwxVvi2FcNuGhHrII3ZoXTnn2/lcwZd/0feWGKXdHREjOxHnSh/A+zVsqF/VdIpEXExbYaO6NBeEbHbiGlrSdc61BpSF/p726Q3FWe9WEfPaPBvb3QfynqxPHvRzsmT3PtcL1b4Y8NlXAB5R5pPOitfaaz0jfgEaQS6ftCLDf1BpTHwvxH5hKXSoFzvJtWol6wXFWc9SUY90It9qBfLsyf7upN7M3qxwo/j+eVhjyndDOLwHLubMVtGu/ekSFfH9YtebOgLScMunCVpOJlvRTqZ3PddVTUdPNo2EhFvhkoVGeN54FFHo/tQ1ovl2Yt2Tp4+937Wo5LNlaTSxUuipeY71zy/Cfgz4Kro8NJ+SfcDb+P5R6oijcH90k7b1ku5VPHRGHGD5dwPe3hEnFenPEzStvDM6KLFG2M7eiPpvEPH21F+b0/XUVOa3oc6iFl1eTbeTnByb0SPVvgM4P2kCyPmkk4IziBVuHwXOCsibuwi3rmkqphr2sxbHhGLOo3VS73a0DfyeX8QEVc0EasfNb0d5ZgrGcd1VFWP/vZJEROc3BvRq5XTEr/nVT39otfLss3nVapmmowarEIZ13XUhH6uaOpVTCf3hk2lRNxr/VwOaIm39/7l5G7FyydRR6tmOj8i+umEslkjXC1jU0EvqpnM+pqP3K14vahmMut3k/lOTGadukrS8ZKec+JU0nRJB0haRqpoMiuGj9yteJOxusOsLid3m1Jc3WFThZO7mVmB3OduZlYgJ3czswI5uZuZFcjJ3cysQP8NZQDtLIW37uoAAAAASUVORK5CYII=\n"
604 | },
605 | "metadata": {
606 | "needs_background": "light"
607 | }
608 | }
609 | ]
610 | }
611 | ]
612 | }
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 PyRat
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://pypi.org/project/pyratlib/)
2 | [](https://pypi.org/project/pyratlib/)
3 | [](https://github.com/pyratlib/pyrat/commits/main)
4 | [](https://github.com/pyrat/pyratlib)
5 | [](https://doi.org/10.5281/zenodo.5883277)
6 | [](https://github.com/pyratlib/pyrat/stargazers)
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 | # PyRat - Python in Rodent Analysis and Tracking
16 | ------------
17 | PyRat is a user friendly library in python to analyze data from the DeepLabCut. Developed to help researchers unfamiliar with programming can perform animal behavior analysis more simpler.
18 |
19 | # Installation
20 | ------------
21 |
22 | The latest stable release is available on PyPi, and you can install it by saying
23 | ```
24 | pip install pyratlib
25 | ```
26 | Anaconda users can install using ``conda-forge``:
27 | ```
28 | conda install -c conda-forge pyratlib
29 | ```
30 |
31 | To build PyRat from source, say `python setup.py build`.
32 | Then, to install PyRat, say `python setup.py install`.
33 | If all went well, you should be able to execute the demo scripts under `examples/`
34 | (OS X users should follow the installation guide given below).
35 |
36 | Alternatively, you can download or clone the repository and use `pip` to handle dependencies:
37 |
38 | ```
39 | unzip pyratlib.zip
40 | pip install -e pyratlib
41 | ```
42 | or
43 | ```
44 | git clone https://github.com/pyratlib/pyrat.git
45 | pip install -e pyratlib
46 | ```
47 |
48 | By calling `pip list` you should see `pyrat` now as an installed package:
49 | ```
50 | pyrat (0.x.x, /path/to/pyratlib)
51 | ```
52 | # Data
53 | ------
54 |
55 | The data is available on [Zenodo](https://doi.org/10.5281/zenodo.5865893)
56 |
57 | # Examples
58 | -----------
59 |
61 | - Basic Usage [](https://colab.research.google.com/github/pyratlib/pyrat/blob/main/PyRAT_Basic_Plots.ipynb)
62 | - Behavior Classification [](https://colab.research.google.com/github/pyratlib/pyrat/blob/main/PyRAT_Behavior_Classification.ipynb)
63 | - Behavior Classification of multiple videos [](https://github.com/pyratlib/pyrat/blob/main/Classify_Multiple_Videos.ipynb)
64 | - Metrics in mice [](https://colab.research.google.com/github/pyratlib/pyrat/blob/main/PyRAT_Mice.ipynb)
65 | - Neural Data example [](https://github.com/pyratlib/pyrat/blob/main/PyRAT_Neural_Data.ipynb)
66 |
67 | # References:
68 | -----------
69 |
70 | If you use our code we kindly as that you please cite [De Almeida et al, 2022](https://www.frontiersin.org/articles/10.3389/fnins.2022.779106/full) and, if you use the dataset please also cite [De Almeida et al, 2021](https://zenodo.org/record/5865893).
71 |
72 | - De Almeida et al, 2022: [https://doi.org/10.3389/fnins.2022.779106](https://doi.org/10.3389/fnins.2022.779106)
73 | - De Almeida et al, 2021: [10.5281/zenodo.5883277](https://zenodo.org/record/5883277)
74 |
75 |
76 | Please check out the following references for more details:
77 |
78 | @article{deAlmeida2022,
79 | title = {PyRAT: An open source-python library for fast and robust animal behavior analysis and neural data synchronization},
80 | author = {De Almeida, Tulio Fernandes and
81 | Spinelli, Bruno Guedes and
82 | Hypolito Lima, Ram{\'o}n and
83 | Gonzalez, Maria Carolina and
84 | Rodrigues, Abner Cardoso},
85 | journal = {Frontiers in Neuroscience},
86 | pages = {505},
87 | publisher = {Frontiers}
88 | }
89 |
90 | @dataset{deAlmeida2021,
91 | title = {PyRAT-data-example},
92 | author = {Almeida, Túlio and
93 | Spinelli, Bruno and
94 | Gonzalez, Maria Carolina and
95 | Lima, Ramón and
96 | Rodrigues, Abner},
97 | month = sep,
98 | year = 2021,
99 | publisher = {Zenodo},
100 | version = {1.0.0},
101 | doi = {10.5281/zenodo.5883277},
102 | url = {https://doi.org/10.5281/zenodo.5883277}
103 | }
104 |
105 | # Development Team:
106 | ------------
107 |
108 | - Tulio Almeida - [GitHub](https://github.com/tuliofalmeida) - [Google Scholar](https://scholar.google.com/citations?user=kkOy-JkAAAAJ&hl=en) - [Site](https://tuliofalmeida.com/)
109 | - Bruno Spinelli - [GitHub](https://github.com/brunospinelli) - [Google Scholar](https://scholar.google.com/)
110 | - Ramon Hypolito - [GitHub](https://github.com/ramonhypolito) - [Google Scholar](https://scholar.google.com/citations?user=5lKx5GcAAAAJ&hl=pt-BR&oi=ao)
111 | - Maria Carolina Gonzalez - [GitHub](https://github.com/pyratlib) - [Google Scholar](https://scholar.google.com/citations?user=7OXkSPcAAAAJ&hl=pt-BR&oi=ao)
112 | - Abner Rodrigues - [GitHub](https://github.com/abnr) - [Google Scholar](https://scholar.google.com.br/citations?user=0dTid9EAAAAJ&hl=en)
113 |
114 |
115 |
--------------------------------------------------------------------------------
/build/lib/pyratlib/__init__.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | import pandas as pd
4 | import csv
5 | from pyratlib.processing import *
6 | from matplotlib import cm
--------------------------------------------------------------------------------
/dist/pyratlib-0.7.2-py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyratlib/pyrat/7508d465f3f3c1ee7fe7854fe6c624ce2cdb65a9/dist/pyratlib-0.7.2-py3-none-any.whl
--------------------------------------------------------------------------------
/dist/pyratlib-0.7.2.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyratlib/pyrat/7508d465f3f3c1ee7fe7854fe6c624ce2cdb65a9/dist/pyratlib-0.7.2.tar.gz
--------------------------------------------------------------------------------
/dist/pyratlib-0.7.3-py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyratlib/pyrat/7508d465f3f3c1ee7fe7854fe6c624ce2cdb65a9/dist/pyratlib-0.7.3-py3-none-any.whl
--------------------------------------------------------------------------------
/dist/pyratlib-0.7.3.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyratlib/pyrat/7508d465f3f3c1ee7fe7854fe6c624ce2cdb65a9/dist/pyratlib-0.7.3.tar.gz
--------------------------------------------------------------------------------
/dist/pyratlib-0.7.4-py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyratlib/pyrat/7508d465f3f3c1ee7fe7854fe6c624ce2cdb65a9/dist/pyratlib-0.7.4-py3-none-any.whl
--------------------------------------------------------------------------------
/dist/pyratlib-0.7.4.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyratlib/pyrat/7508d465f3f3c1ee7fe7854fe6c624ce2cdb65a9/dist/pyratlib-0.7.4.tar.gz
--------------------------------------------------------------------------------
/dist/pyratlib-0.7.5-py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyratlib/pyrat/7508d465f3f3c1ee7fe7854fe6c624ce2cdb65a9/dist/pyratlib-0.7.5-py3-none-any.whl
--------------------------------------------------------------------------------
/dist/pyratlib-0.7.5.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyratlib/pyrat/7508d465f3f3c1ee7fe7854fe6c624ce2cdb65a9/dist/pyratlib-0.7.5.tar.gz
--------------------------------------------------------------------------------
/dist/pyratlib-0.7.6-py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyratlib/pyrat/7508d465f3f3c1ee7fe7854fe6c624ce2cdb65a9/dist/pyratlib-0.7.6-py3-none-any.whl
--------------------------------------------------------------------------------
/dist/pyratlib-0.7.6.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyratlib/pyrat/7508d465f3f3c1ee7fe7854fe6c624ce2cdb65a9/dist/pyratlib-0.7.6.tar.gz
--------------------------------------------------------------------------------
/dist/pyratlib-0.7.7.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyratlib/pyrat/7508d465f3f3c1ee7fe7854fe6c624ce2cdb65a9/dist/pyratlib-0.7.7.tar.gz
--------------------------------------------------------------------------------
/docs/LOGO PYRAT.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyratlib/pyrat/7508d465f3f3c1ee7fe7854fe6c624ce2cdb65a9/docs/LOGO PYRAT.png
--------------------------------------------------------------------------------
/pyratlib.egg-info/PKG-INFO:
--------------------------------------------------------------------------------
1 | Metadata-Version: 2.1
2 | Name: pyratlib
3 | Version: 0.7.7
4 | Summary: PyRat is a user friendly library in python to analyze data from the DeepLabCut. Developed to help researchers unfamiliar with programming can perform animal behavior analysis more simpler.
5 | Home-page: https://github.com/pyratlib/pyrat
6 | Download-URL: https://github.com/pyratlib/pyrat
7 | License: MIT
8 | Keywords: Data analysis,Animal Behavior,Electrophysiology,Tracking,DeepLabCut
9 | Platform: UNKNOWN
10 | Classifier: Development Status :: 4 - Beta
11 | Classifier: Intended Audience :: Developers
12 | Classifier: Topic :: Software Development :: Build Tools
13 | Classifier: License :: OSI Approved :: MIT License
14 | Classifier: Programming Language :: Python :: 3
15 | Classifier: Programming Language :: Python :: 3.4
16 | Classifier: Programming Language :: Python :: 3.5
17 | Classifier: Programming Language :: Python :: 3.6
18 | Description-Content-Type: text/markdown
19 | License-File: LICENSE
20 |
21 | [](https://pypi.org/project/pyratlib/)
22 | [](https://pypi.org/project/pyratlib/)
23 | [](https://github.com/pyratlib/pyrat/commits/main)
24 | [](https://github.com/pyrat/pyratlib)
25 | [](https://doi.org/10.5281/zenodo.5883277)
26 | [](https://github.com/pyratlib/pyrat/stargazers)
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 | # PyRat - Python in Rodent Analysis and Tracking
36 | ------------
37 | PyRat is a user friendly library in python to analyze data from the DeepLabCut. Developed to help researchers unfamiliar with programming can perform animal behavior analysis more simpler.
38 |
39 | # Installation
40 | ------------
41 |
42 | The latest stable release is available on PyPi, and you can install it by saying
43 | ```
44 | pip install pyratlib
45 | ```
46 | Anaconda users can install using ``conda-forge``:
47 | ```
48 | conda install -c conda-forge pyratlib
49 | ```
50 |
51 | To build PyRat from source, say `python setup.py build`.
52 | Then, to install PyRat, say `python setup.py install`.
53 | If all went well, you should be able to execute the demo scripts under `examples/`
54 | (OS X users should follow the installation guide given below).
55 |
56 | Alternatively, you can download or clone the repository and use `pip` to handle dependencies:
57 |
58 | ```
59 | unzip pyratlib.zip
60 | pip install -e pyratlib
61 | ```
62 | or
63 | ```
64 | git clone https://github.com/pyratlib/pyrat.git
65 | pip install -e pyratlib
66 | ```
67 |
68 | By calling `pip list` you should see `pyrat` now as an installed package:
69 | ```
70 | pyrat (0.x.x, /path/to/pyratlib)
71 | ```
72 | # Data
73 | ------
74 |
75 | The data is available on [Zenodo](https://doi.org/10.5281/zenodo.5865893)
76 |
77 | # Examples
78 | -----------
79 |
81 | - Basic Usage [](https://colab.research.google.com/github/pyratlib/pyrat/blob/main/PyRAT_Basic_Plots.ipynb)
82 | - Behavior Classification [](https://colab.research.google.com/github/pyratlib/pyrat/blob/main/PyRAT_Behavior_Classification.ipynb)
83 | - Behavior Classification of multiple videos [](https://github.com/pyratlib/pyrat/blob/main/Classify_Multiple_Videos.ipynb)
84 | - Metrics in mice [](https://colab.research.google.com/github/pyratlib/pyrat/blob/main/PyRAT_Mice.ipynb)
85 | - Neural Data example [](https://github.com/pyratlib/pyrat/blob/main/PyRAT_Neural_Data.ipynb)
86 |
87 | # References:
88 | -----------
89 |
90 | If you use our code we kindly as that you please cite [De Almeida et al, 2022](https://www.frontiersin.org/articles/10.3389/fnins.2022.779106/full) and, if you use the dataset please also cite [De Almeida et al, 2021](https://zenodo.org/record/5865893).
91 |
92 | - De Almeida et al, 2022: [https://doi.org/10.3389/fnins.2022.779106](https://doi.org/10.3389/fnins.2022.779106)
93 | - De Almeida et al, 2021: [10.5281/zenodo.5883277](https://zenodo.org/record/5883277)
94 |
95 |
96 | Please check out the following references for more details:
97 |
98 | @article{deAlmeida2022,
99 | title = {PyRAT: An open source-python library for fast and robust animal behavior analysis and neural data synchronization},
100 | author = {De Almeida, Tulio Fernandes and
101 | Spinelli, Bruno Guedes and
102 | Hypolito Lima, Ram{\'o}n and
103 | Gonzalez, Maria Carolina and
104 | Rodrigues, Abner Cardoso},
105 | journal = {Frontiers in Neuroscience},
106 | pages = {505},
107 | publisher = {Frontiers}
108 | }
109 |
110 | @dataset{deAlmeida2021,
111 | title = {PyRAT-data-example},
112 | author = {Almeida, Túlio and
113 | Spinelli, Bruno and
114 | Gonzalez, Maria Carolina and
115 | Lima, Ramón and
116 | Rodrigues, Abner},
117 | month = sep,
118 | year = 2021,
119 | publisher = {Zenodo},
120 | version = {1.0.0},
121 | doi = {10.5281/zenodo.5883277},
122 | url = {https://doi.org/10.5281/zenodo.5883277}
123 | }
124 |
125 | # Development Team:
126 | ------------
127 |
128 | - Tulio Almeida - [GitHub](https://github.com/tuliofalmeida) - [Google Scholar](https://scholar.google.com/citations?user=kkOy-JkAAAAJ&hl=en) - [Site](https://tuliofalmeida.com/)
129 | - Bruno Spinelli - [GitHub](https://github.com/brunospinelli) - [Google Scholar](https://scholar.google.com/)
130 | - Ramon Hypolito - [GitHub](https://github.com/ramonhypolito) - [Google Scholar](https://scholar.google.com/citations?user=5lKx5GcAAAAJ&hl=pt-BR&oi=ao)
131 | - Maria Carolina Gonzalez - [GitHub](https://github.com/pyratlib) - [Google Scholar](https://scholar.google.com/citations?user=7OXkSPcAAAAJ&hl=pt-BR&oi=ao)
132 | - Abner Rodrigues - [GitHub](https://github.com/abnr) - [Google Scholar](https://scholar.google.com.br/citations?user=0dTid9EAAAAJ&hl=en)
133 |
134 |
135 |
136 |
137 |
--------------------------------------------------------------------------------
/pyratlib.egg-info/SOURCES.txt:
--------------------------------------------------------------------------------
1 | LICENSE
2 | README.md
3 | setup.cfg
4 | setup.py
5 | pyratlib/__init__.py
6 | pyratlib/processing.py
7 | pyratlib.egg-info/PKG-INFO
8 | pyratlib.egg-info/SOURCES.txt
9 | pyratlib.egg-info/dependency_links.txt
10 | pyratlib.egg-info/requires.txt
11 | pyratlib.egg-info/top_level.txt
--------------------------------------------------------------------------------
/pyratlib.egg-info/dependency_links.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/pyratlib.egg-info/requires.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | pandas
3 | neo
4 | scikit-learn
5 | wheel
6 |
--------------------------------------------------------------------------------
/pyratlib.egg-info/top_level.txt:
--------------------------------------------------------------------------------
1 | pyratlib
2 |
--------------------------------------------------------------------------------
/pyratlib/__init__.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | import pandas as pd
4 | import csv
5 | from pyratlib.processing import *
6 | from matplotlib import cm
--------------------------------------------------------------------------------
/pyratlib/processing.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | import pandas as pd
4 | from matplotlib import cm
5 |
6 | def Trajectory(data,bodyPart,bodyPartBox = None, **kwargs):
7 | """
8 | Plots the trajectory of the determined body part.
9 | Parameters
10 | ----------
11 | data : pandas DataFrame
12 | The input tracking data.
13 | bodyPart : str
14 | Body part you want to plot the tracking.
15 | bodyPartBox : str
16 | The body part you want to use to estimate the limits of the environment,
17 | usually the base of the tail is the most suitable for this determination.
18 | start : int, optional
19 | Moment of the video you want tracking to start, in seconds. If the variable
20 | is empty (None), the entire video will be processed.
21 | end : int, optional
22 | Moment of the video you want tracking to end, in seconds. If the variable is
23 | empty (None), the entire video will be processed.
24 | fps : int
25 | The recording frames per second.
26 | cmapType : str, optional
27 | matplotlib colormap.
28 | figureTitle : str, optional
29 | Figure title.
30 | hSize : int, optional
31 | Determine the figure height size (x).
32 | wSize : int, optional
33 | Determine the figure width size (y).
34 | fontsize : int, optional
35 | Determine of all font sizes.
36 | invertY : bool, optional
37 | Determine if de Y axis will be inverted (used for DLC output).
38 | limit_boundaries : bool, optional.
39 | Limits the points to the box boundary.
40 | xLimMin : int, optional
41 | Determines the minimum size on the axis X.
42 | xLimMax : int, optional
43 | Determines the maximum size on the axis X.
44 | yLimMin : int, optional
45 | Determines the minimum size on the axis Y.
46 | yLimMax : int, optional
47 | Determines the maximum size on the axis Y.
48 | saveName : str, optional
49 | Determine the save name of the plot.
50 | figformat : str, optional
51 | Determines the type of file that will be saved. Used as base the ".eps",
52 | which may be another supported by matplotlib.
53 | res : int, optional
54 | Determine the resolutions (dpi), default = 80.
55 | ax : fig, optional
56 | Creates an 'axs' to be added to a figure created outside the role by the user.
57 | fig : fig, optional
58 | Creates an 'fig()' to be added to a figure created outside the role by the user.
59 | Returns
60 | -------
61 | out : plot
62 | The output of the function is the figure with the tracking plot of the
63 | selected body part.
64 | See Also
65 | --------
66 | For more information and usage examples: https://github.com/pyratlib/pyrat
67 | Notes
68 | -----
69 | This function was developed based on DLC outputs and is able to support
70 | matplotlib configurations."""
71 |
72 | import numpy as np
73 | import matplotlib.pyplot as plt
74 | import pandas as pd
75 | from matplotlib import cm
76 | from mpl_toolkits.axes_grid1 import make_axes_locatable
77 |
78 | saveName= kwargs.get('saveName')
79 | start= kwargs.get('start')
80 | end= kwargs.get('end')
81 | figureTitle = kwargs.get('figureTitle')
82 | fps = kwargs.get('fps')
83 | ax = kwargs.get('ax')
84 | limit_boundaries = kwargs.get('limit_boundaries')
85 | xLimMin = kwargs.get('xLimMin')
86 | xLimMax = kwargs.get('xLimMax')
87 | yLimMin = kwargs.get('yLimMin')
88 | yLimMax = kwargs.get('yLimMax')
89 |
90 | if type(limit_boundaries) == type(None):
91 | limit_boundaries = False
92 | fig = kwargs.get('fig')
93 | if type(fps) == type(None):
94 | fps = 30
95 | cmapType = kwargs.get('cmapType')
96 | if type(cmapType) == type(None):
97 | cmapType = 'viridis'
98 | hSize = kwargs.get('hSize')
99 | if type(hSize) == type(None):
100 | hSize = 6
101 | wSize = kwargs.get('wSize')
102 | if type(wSize) == type(None):
103 | wSize = 8
104 | bins = kwargs.get('bins')
105 | if type(bins) == type(None):
106 | bins = 30
107 | fontsize = kwargs.get('fontsize')
108 | if type(fontsize) == type(None):
109 | fontsize = 15
110 | invertY = kwargs.get('invertY')
111 | if type(invertY) == type(None):
112 | invertY = True
113 | figformat = kwargs.get('figformat')
114 | if type(figformat) == type(None):
115 | figformat = '.eps'
116 | res = kwargs.get('res')
117 | if type(res) == type(None):
118 | res = 80
119 |
120 | values = (data.iloc[2:,1:].values).astype(float)
121 | lista1 = (data.iloc[0][1:].values +" - " + data.iloc[1][1:].values).tolist()
122 |
123 | if type(start) == type(None):
124 | x = values[:,lista1.index(bodyPart+" - x")]
125 | y = values[:,lista1.index(bodyPart+" - y")]
126 | else:
127 | init = int(start*fps)
128 | finish = int(end*fps)
129 | x = values[:,lista1.index(bodyPart+" - x")][init:finish]
130 | y = values[:,lista1.index(bodyPart+" - y")][init:finish]
131 |
132 |
133 | cmap = plt.get_cmap(cmapType)
134 |
135 |
136 | if type(bodyPartBox) == type(None):
137 | c = np.linspace(0, x.size/fps, x.size)
138 | esquerda = xLimMin
139 | direita = xLimMax
140 | baixo = yLimMin
141 | cima = yLimMax
142 | else:
143 | c = np.linspace(0, x.size/fps, x.size)
144 | esquerda = values[:,lista1.index(bodyPartBox+" - x")].min()
145 | direita = values[:,lista1.index(bodyPartBox+" - x")].max()
146 | baixo = values[:,lista1.index(bodyPartBox+" - y")].min()
147 | cima = values[:,lista1.index(bodyPartBox+" - y")].max()
148 |
149 | if limit_boundaries:
150 | testeX = []
151 | for i in range(len(x)):
152 | if x[i] >= direita:
153 | testeX.append(direita)
154 | elif x[i] <= esquerda:
155 | testeX.append(esquerda)
156 | else:
157 | testeX.append(x[i])
158 |
159 | testeY = []
160 | for i in range(len(x)):
161 | if y[i] >= cima:
162 | testeY.append(cima)
163 | elif y[i] <= baixo:
164 | testeY.append(baixo)
165 | else:
166 | testeY.append(y[i])
167 | else:
168 | testeX = x
169 | testeY = y
170 |
171 | if type(ax) == type(None):
172 | plt.figure(figsize=(wSize, hSize), dpi=res)
173 | plt.title(figureTitle, fontsize=fontsize)
174 | plt.scatter(testeX, testeY, c=c, cmap=cmap, s=3)
175 | plt.plot([esquerda,esquerda] , [baixo,cima],"k")
176 | plt.plot([esquerda,direita] , [cima,cima],"k")
177 | plt.plot([direita,direita] , [cima,baixo],"k")
178 | plt.plot([direita,esquerda] , [baixo,baixo],"k")
179 | cb = plt.colorbar()
180 |
181 | if invertY == True:
182 | plt.gca().invert_yaxis()
183 | cb.set_label('Time (s)',fontsize=fontsize)
184 | cb.ax.tick_params(labelsize=fontsize*0.8)
185 | plt.xlabel("X (px)",fontsize=fontsize)
186 | plt.ylabel("Y (px)",fontsize=fontsize)
187 | plt.xticks(fontsize = fontsize*0.8)
188 | plt.yticks(fontsize = fontsize*0.8)
189 |
190 | if type(saveName) != type(None):
191 | plt.savefig(saveName+figformat)
192 |
193 | plt.show()
194 |
195 | else:
196 | ax.set_aspect('equal')
197 | plot = ax.scatter(testeX, testeY, c=c, cmap=cmap, s=3)
198 | ax.plot([esquerda,esquerda] , [baixo,cima],"k")
199 | ax.plot([esquerda,direita] , [cima,cima],"k")
200 | ax.plot([direita,direita] , [cima,baixo],"k")
201 | ax.plot([direita,esquerda] , [baixo,baixo],"k")
202 | ax.tick_params(axis='both', which='major', labelsize=fontsize*0.8)
203 | ax.set_title(figureTitle, fontsize=fontsize)
204 | ax.set_xlabel("X (px)", fontsize = fontsize)
205 | ax.set_ylabel("Y (px)", fontsize = fontsize)
206 |
207 | divider = make_axes_locatable(ax)
208 | cax = divider.append_axes('right',size='5%', pad=0.05)
209 | cb = fig.colorbar(plot,cax=cax)
210 | cb.ax.tick_params(labelsize=fontsize*0.8)
211 | cb.set_label(label='Time (s)', fontsize=fontsize)
212 |
213 | if invertY == True:
214 | ax.invert_yaxis()
215 |
216 | def Heatmap(data, bodyPart, **kwargs):
217 | """
218 | Plots the trajectory heatmap of the determined body part.
219 |
220 | Parameters
221 | ----------
222 | data : pandas DataFrame
223 | The input tracking data.
224 | bodyPart : str
225 | Body part you want to plot the heatmap.
226 | bodyPartBox : str
227 | The body part you want to use to estimate the limits of the environment,
228 | usually the base of the tail is the most suitable for this determination.
229 | start : int, optional
230 | Moment of the video you want tracking to start, in seconds. If the variable
231 | is empty (None), the entire video will be processed.
232 | end : int, optional
233 | Moment of the video you want tracking to end, in seconds. If the variable is
234 | empty (None), the entire video will be processed.
235 | fps : int
236 | The recording frames per second.
237 | cmapType : str, optional
238 | matplotlib colormap.
239 | limit_boundaries : bool, optional.
240 | Limits the points to the box boundary.
241 | figureTitle : str, optional
242 | Figure title.
243 | hSize : int, optional
244 | Determine the figure height size (x).
245 | wSize : int, optional
246 | Determine the figure width size (y).
247 | ax : fig axs, optional
248 | Allows the creation of an out-of-function figure to use this plot.
249 | bins : int, optional
250 | Determine the heatmap resolution, the higher the value, the higher the
251 | resolution.
252 | vmax : int, optional
253 | Determine the heatmap scale.
254 | fontsize : int, optional
255 | Determine of all font sizes.
256 | invertY : bool, optional
257 | Determine if de Y axis will be inverted (used for DLC output).
258 | saveName : str, optional
259 | Determine the save name of the plot.
260 | figformat : str, optional
261 | Determines the type of file that will be saved. Used as base the ".eps",
262 | which may be another supported by matplotlib.
263 | res : int, optional
264 | Determine the resolutions (dpi), default = 80.
265 | ax : fig, optional
266 | Creates an 'axs' to be added to a figure created outside the role by the user.
267 | fig : fig, optional
268 | Creates an 'fig()' to be added to a figure created outside the role by the user.
269 |
270 | Returns
271 | -------
272 | out : plot
273 | The output of the function is the figure with the heatpmat of trackin.
274 |
275 | See Also
276 | --------
277 | For more information and usage examples: https://github.com/pyratlib/pyrat
278 |
279 | Notes
280 | -----
281 | This function was developed based on DLC outputs and is able to support
282 | matplotlib configurations."""
283 | import numpy as np
284 | import matplotlib.pyplot as plt
285 | import pandas as pd
286 | from matplotlib import cm
287 | from mpl_toolkits.axes_grid1 import make_axes_locatable
288 |
289 | saveName= kwargs.get('saveName')
290 | start= kwargs.get('start')
291 | end= kwargs.get('end')
292 | figureTitle = kwargs.get('figureTitle')
293 | fps = kwargs.get('fps')
294 | ax = kwargs.get('ax')
295 | fig = kwargs.get('fig')
296 | bodyPartBox = kwargs.get('bodyPartBox')
297 | limit_boundaries = kwargs.get('limit_boundaries')
298 | xLimMin = kwargs.get('xLimMin')
299 | xLimMax = kwargs.get('xLimMax')
300 | yLimMin = kwargs.get('yLimMin')
301 | yLimMax = kwargs.get('yLimMax')
302 |
303 | if type(limit_boundaries) == type(None):
304 | limit_boundaries = False
305 | if type(fps) == type(None):
306 | fps = 30
307 | if type(bodyPartBox) == type(None):
308 | bodyPartBox = bodyPart
309 | cmapType = kwargs.get('cmapType')
310 | if type(cmapType) == type(None):
311 | cmapType = 'viridis'
312 | hSize = kwargs.get('hSize')
313 | if type(hSize) == type(None):
314 | hSize = 6
315 | wSize = kwargs.get('wSize')
316 | if type(wSize) == type(None):
317 | wSize = 8
318 | bins = kwargs.get('bins')
319 | if type(bins) == type(None):
320 | bins = 30
321 | fontsize = kwargs.get('fontsize')
322 | if type(fontsize) == type(None):
323 | fontsize = 15
324 | invertY = kwargs.get('invertY')
325 | if type(invertY) == type(None):
326 | invertY = True
327 | figformat = kwargs.get('figformat')
328 | if type(figformat) == type(None):
329 | figformat = '.eps'
330 | vmax = kwargs.get('vmax')
331 | if type(vmax) == type(None):
332 | vmax = 1000
333 | res = kwargs.get('res')
334 | if type(res) == type(None):
335 | res = 80
336 |
337 | values = (data.iloc[2:,1:].values).astype(float)
338 | lista1 = (data.iloc[0][1:].values +" - " + data.iloc[1][1:].values).tolist()
339 |
340 | esquerda = values[:,lista1.index(bodyPartBox+" - x")].min()
341 | direita = values[:,lista1.index(bodyPartBox+" - x")].max()
342 | baixo = values[:,lista1.index(bodyPartBox+" - y")].min()
343 | cima = values[:,lista1.index(bodyPartBox+" - y")].max()
344 |
345 | if type(start) == type(None):
346 | x = values[:,lista1.index(bodyPart+" - x")]
347 | y = values[:,lista1.index(bodyPart+" - y")]
348 | else:
349 | init = int(start*fps)
350 | finish = int(end*fps)
351 | x = values[:,lista1.index(bodyPart+" - x")][init:finish]
352 | y = values[:,lista1.index(bodyPart+" - y")][init:finish]
353 |
354 | if limit_boundaries:
355 | xx = []
356 | for i in range(len(x)):
357 | if x[i] >= direita:
358 | xx.append(direita)
359 | elif x[i] <= esquerda:
360 | xx.append(esquerda)
361 | else:
362 | xx.append(x[i])
363 |
364 | yy = []
365 | for i in range(len(x)):
366 | if y[i] >= cima:
367 | yy.append(cima)
368 | elif y[i] <= baixo:
369 | yy.append(baixo)
370 | else:
371 | yy.append(y[i])
372 | else:
373 | xx = x
374 | yy = y
375 |
376 | if type(ax) == type(None):
377 | plt.figure(figsize=(wSize, hSize), dpi=res)
378 |
379 | if type(xLimMin) != type(None):
380 | plt.hist2d(xx,yy, bins = bins, vmax = vmax,cmap=plt.get_cmap(cmapType), range=[[xLimMin,xLimMax],[yLimMin,yLimMax]])
381 | else:
382 | plt.hist2d(xx,yy, bins = bins, vmax = vmax,cmap=plt.get_cmap(cmapType))
383 |
384 | cb = plt.colorbar()
385 |
386 | plt.title(figureTitle, fontsize=fontsize)
387 | cb.ax.tick_params(labelsize=fontsize*0.8)
388 | plt.xlabel("X (px)",fontsize=fontsize)
389 | plt.ylabel("Y (px)",fontsize=fontsize)
390 | plt.xticks(fontsize = fontsize*0.8)
391 | plt.yticks(fontsize = fontsize*0.8)
392 | if invertY == True:
393 | plt.gca().invert_yaxis()
394 |
395 | if type(saveName) != type(None):
396 | plt.savefig(saveName+figformat)
397 |
398 | plt.show()
399 | else:
400 | if type(xLimMin) != type(None):
401 | ax.hist2d(xx,yy, bins = bins, vmax = vmax,cmap=plt.get_cmap(cmapType), range=[[xLimMin,xLimMax],[yLimMin,yLimMax]])
402 | else:
403 | ax.hist2d(xx,yy, bins = bins, vmax = vmax,cmap=plt.get_cmap(cmapType))
404 | ax.tick_params(axis='both', which='major', labelsize=fontsize*0.8)
405 | ax.set_title(figureTitle, fontsize=fontsize)
406 | ax.set_xlabel("X (px)", fontsize = fontsize)
407 | ax.set_ylabel("Y (px)", fontsize = fontsize)
408 | if invertY == True:
409 | ax.invert_yaxis()
410 |
411 | divider = make_axes_locatable(ax)
412 | cax = divider.append_axes('right',size='5%', pad=0.05)
413 |
414 | im = ax.imshow([xx,yy], cmap=plt.get_cmap(cmapType))
415 | cb = fig.colorbar(im,cax=cax, orientation='vertical')
416 | cb.ax.tick_params(labelsize=fontsize*0.8)
417 |
418 | def pixel2centimeters(data, pixel_max,pixel_min,max_real, min_real=0):
419 | """
420 | It performs the pixel conversion to the determined real scale (meter,
421 | centimeter, millimeter...).
422 |
423 | Parameters
424 | ----------
425 | data : pandas DataFrame
426 | The input tracking data.
427 | pixel_max : int
428 | Pixel maximum value.
429 | pixel_min : int
430 | Pixel minimum value.
431 | max_real : int
432 | Box maxixum value (eg., box wall).
433 | min_real : int
434 | Box maxixum value, usually zero.
435 |
436 | Returns
437 | -------
438 | out : int
439 | Scale factor of your box.
440 |
441 | See Also
442 | --------
443 | For more information and usage examples: https://github.com/pyratlib/pyrat
444 |
445 | Notes
446 | -----
447 | This function was developed based on DLC outputs and is able to support
448 | matplotlib configurations."""
449 |
450 | return min_real + ((data-pixel_min)/(pixel_max-pixel_min)) * (max_real-min_real)
451 |
452 | def MotionMetrics (data,bodyPart,filter=1,fps=30,max_real=60,min_real=0):
453 | """
454 | Performs motion-related metrics such as velocity, acceleration, and distance.
455 |
456 | Parameters
457 | ----------
458 | data : pandas DataFrame
459 | The input tracking data.
460 | bodyPart : str
461 | Body part you want use as reference.
462 | filter : float
463 | Threshold to remove motion artifacts. Adjust according to the tracking
464 | quality and speed of what is moving.
465 | fps : int
466 | The recording frames per second.
467 | max_real : int
468 | Box maxixum value (eg., box wall).
469 | min_real : int
470 | Box maxixum value, usually zero.
471 |
472 | Returns
473 | -------
474 | out : pandas DataFrame
475 | All metrics in the df.
476 |
477 | See Also
478 | --------
479 | For more information and usage examples: https://github.com/pyratlib/pyrat
480 |
481 | Notes
482 | -----
483 | This function was developed based on DLC outputs and is able to support
484 | matplotlib configurations."""
485 | import numpy as np
486 | import pandas as pd
487 |
488 | values = (data.iloc[2:,1:].values).astype(float)
489 | lista1 = (data.iloc[0][1:].values +" - " + data.iloc[1][1:].values).tolist()
490 |
491 | dataX = values[:,lista1.index(bodyPart+" - x")]
492 | dataY = values[:,lista1.index(bodyPart+" - y")]
493 |
494 | dataX = pixel2centimeters(dataX,dataX.max(),dataX.min(), max_real,0)
495 | dataY = pixel2centimeters(dataY,dataY.max(),dataY.min(), min_real,0)
496 |
497 | time = np.arange(0,((1/fps)*len(dataX)), (1/fps))
498 | df = pd.DataFrame(time/60, columns = ["Time"])
499 | dist = np.hypot(np.diff(dataX, prepend=dataX[0]), np.diff(dataY, prepend=dataY[0]))
500 | dist[dist>=filter] = 0
501 | dist[0] = "nan"
502 | df["Distance"] = dist
503 | df['Speed'] = df['Distance']/(1/fps)
504 | df['Acceleration'] = df['Speed'].diff().abs()/(1/fps)
505 |
506 | return df
507 |
508 | def FieldDetermination(Fields=1,plot=False,**kwargs):
509 | """
510 | Creates a data frame with the desired dimensions to extract information about
511 | an area. Therefore, you must determine the area in which you want to extract
512 | information. Works perfectly with objects. We suggest using ImageJ, DLC GUI
513 | or other image software that is capable of informing the coordinates of a frame.
514 | If you have difficulty in positioning the areas, this parameter will plot the
515 | graph where the areas were positioned. It needs to receive the DataFrame of the
516 | data and the part of the body that will be used to determine the limits of the
517 | environment (usually the tail).
518 | ***ATTENTION***
519 | If plot = True, the user must pass the variable 'data' and 'bodypart' as input
520 | to the function.
521 |
522 | Parameters
523 | ----------
524 | Fields : int
525 | Determines the number of fields or objects you want to create.
526 | plot : bool, optional
527 | Plot of objects created for ease of use. If you have difficulty in positioning
528 | the areas, this parameter will plot the graph where the areas were positioned.
529 | It needs to receive the DataFrame of the data and the part of the body that will
530 | be used to determine the limits of the environment (usually the tail).
531 | data : pandas DataFrame, optional
532 | The input tracking data.
533 | bodyPartBox : str, optional
534 | The body part you want to use to estimate the limits of the environment,
535 | usually the base of the tail is the most suitable for this determination.
536 | posit : dict, optional
537 | A dictionary to pass objects with directions and not need to use input. It
538 | must contain a cache and 8 dice ('objt_type','center_x','center_y', 'radius',
539 | 'a_x', 'a_y' , 'height', 'width'), 'obj_type' must be 0 or 1 (0 = circle and
540 | 1 = rectangle). An example of this dictionary is in Section examples.
541 | obj_color : str, optional
542 | Allows you to determine the color of the objects created in the plot.
543 | invertY : bool, optional
544 | Determine if de Y axis will be inverted (used for DLC output).
545 |
546 | Returns
547 | -------
548 | out : pandas DataFrame
549 | The coordinates of the created fields.
550 | plot
551 | Plot of objects created for ease of use.
552 |
553 | Example
554 | --------
555 | Dictionary :
556 | >>>> posições = {'circ': [0,200,200,50,0 ,0 ,0 ,0 ],
557 | >>>> 'rect': [1,0 ,0 ,0 ,400,200,75,75],}
558 |
559 | See Also
560 | --------
561 | For more information and usage examples: https://github.com/pyratlib/pyrat
562 |
563 | Notes
564 | -----
565 | This function was developed based on DLC outputs and is able to support
566 | matplotlib configurations."""
567 |
568 | import numpy as np
569 | import pandas as pd
570 | import matplotlib.pyplot as plt
571 | import matplotlib.patches as patches
572 |
573 | ax = kwargs.get('ax')
574 | ret = kwargs.get('ret')
575 | posit = kwargs.get('posit')
576 | data = kwargs.get('data')
577 | bodyPartBox = kwargs.get('bodyPartBox')
578 | invertY = kwargs.get('invertY')
579 | if type(invertY) == type(None):
580 | invertY = True
581 | obj_color = kwargs.get('obj_color')
582 | if type(obj_color) == type(None):
583 | obj_color = 'r'
584 | if type(ret) == type(None):
585 | ret = True
586 |
587 | null = 0
588 | fields = pd.DataFrame(columns=['fields','center_x','center_y', 'radius', 'a_x', 'a_y' , 'height', 'width'])
589 | circle = []
590 | rect = []
591 | if plot:
592 | values = (data.iloc[2:,1:].values).astype(float)
593 | lista1 = (data.iloc[0][1:].values +" - " + data.iloc[1][1:].values).tolist()
594 | ax = plt.gca()
595 | esquerda = values[:,lista1.index(bodyPartBox+" - x")].min()
596 | direita = values[:,lista1.index(bodyPartBox+" - x")].max()
597 | baixo = values[:,lista1.index(bodyPartBox+" - y")].min()
598 | cima = values[:,lista1.index(bodyPartBox+" - y")].max()
599 |
600 | if type(posit) == type(None):
601 | for i in range(Fields):
602 | print('Enter the object type '+ str(i+1) + " (0 - circular, 1 - rectangular):")
603 | objectType = int(input())
604 | if objectType == 0:
605 | print('Enter the X value of the center of the field ' + str(i+1) + ':')
606 | centerX = int(input())
607 | print('Enter the Y value of the center of the field ' + str(i+1) + ':')
608 | centerY = int(input())
609 | print('Enter the radius value of the field ' + str(i+1) + ':')
610 | radius = int(input())
611 | circle.append(plt.Circle((centerX, centerY), radius, color=obj_color,fill = False))
612 | df2 = pd.DataFrame([[objectType, centerX, centerY,radius,null,null,null,null]], columns=['fields','center_x','center_y', 'radius', 'a_x', 'a_y' , 'height', 'width'])
613 | else:
614 | print('Enter the X value of the field\'s lower left vertex ' + str(i+1) + ':')
615 | aX = int(input())
616 | print('Enter the Y value of the field\'s lower left vertex ' + str(i+1) + ':')
617 | aY = int(input())
618 | print('Enter the field height value ' + str(i+1) + ':')
619 | height = int(input())
620 | print('Enter the field\'s width value ' + str(i+1) + ':')
621 | width = int(input())
622 | rect.append(patches.Rectangle((aX, aY), height, width, linewidth=1, edgecolor=obj_color, facecolor='none'))
623 | df2 = pd.DataFrame([[objectType, null,null, null ,aX,aY,height,width]], columns=['fields','center_x','center_y', 'radius', 'a_x', 'a_y' , 'height', 'width'])
624 | fields = fields.append(df2, ignore_index=True)
625 | else:
626 | for i,v in enumerate(posit):
627 | df2 = pd.DataFrame([[posit[v][0], posit[v][1], posit[v][2],posit[v][3],posit[v][4],posit[v][5],posit[v][6],posit[v][7]]],
628 | columns=['fields','center_x','center_y', 'radius', 'a_x', 'a_y','height', 'width'])
629 | if posit[v][0] == 1:
630 | rect.append(patches.Rectangle((float(posit[v][4]),float(posit[v][5])), float(posit[v][6]), float(posit[v][7]), linewidth=1, edgecolor=obj_color, facecolor='none'))
631 | if posit[v][0] == 0:
632 | circle.append(plt.Circle((float(posit[v][1]),float(posit[v][2])), float(posit[v][3]), color=obj_color,fill = False))
633 | fields = fields.append(df2, ignore_index=True)
634 |
635 | if plot:
636 | ax.plot([esquerda,esquerda] , [baixo,cima],"k")
637 | ax.plot([esquerda,direita] , [cima,cima],"k")
638 | ax.plot([direita,direita] , [cima,baixo],"k")
639 | ax.plot([direita,esquerda] , [baixo,baixo],"k")
640 | if invertY == True:
641 | ax.invert_yaxis()
642 | for i in range(len(circle)):
643 | ax.add_patch(circle[i])
644 | for i in range(len(rect)):
645 | ax.add_patch(rect[i])
646 |
647 | if ret:
648 | return fields
649 |
650 | def Interaction(data,bodyPart,fields,fps=30):
651 | """
652 | Performs the metrification of the interaction of the point of the determined
653 | body part and the marked area.
654 |
655 | Parameters
656 | ----------
657 | data : pandas DataFrame
658 | The input tracking data.
659 | bodyPart : str
660 | Body part you want use as reference.
661 | fields : pandas DataFrame
662 | The DataFrame with the coordinates of the created fields (output of FieldDetermination()).
663 | fps : int, optional
664 | The recording frames per second.
665 |
666 | Returns
667 | -------
668 | interactsDf: DataFrame
669 | DataFrame with all interactions. 0 = no interaction; 1 = first object; 2 = second object...
670 | interacts: list
671 | List with the interactions without processing.
672 |
673 | See Also
674 | --------
675 | For more information and usage examples: https://github.com/pyratlib/pyrat
676 |
677 | Notes
678 | -----
679 | This function was developed based on DLC outputs and is able to support
680 | matplotlib configurations."""
681 |
682 | import numpy as np
683 | import pandas as pd
684 |
685 | values = (data.iloc[2:,1:].values).astype(float)
686 | lista1 = (data.iloc[0][1:].values +" - " + data.iloc[1][1:].values).tolist()
687 |
688 | dataX = values[:,lista1.index(bodyPart+" - x")]
689 | dataY = values[:,lista1.index(bodyPart+" - y")]
690 |
691 | numObjects = len(fields.index)
692 | interact = np.zeros(len(dataX))
693 |
694 | for i in range(len(interact)):
695 | for j in range(numObjects):
696 | if fields['fields'][0] == 0:
697 | if ((dataX[i] - fields['center_x'][j])**2 + (dataY[i] - fields['center_y'][j])**2 <= fields['radius'][j]**2):
698 | interact[i] = j +1
699 | else:
700 | if fields['a_x'][j] <= dataX[i] <= (fields['a_x'][j] + fields['height'][j]) and fields['a_y'][j] <= dataY[i] <= (fields['a_y'][j] + fields['width'][j]):
701 | interact[i] = j +1
702 |
703 | interactsDf = pd.DataFrame(columns=['start','end','obj'])
704 |
705 | obj = 0
706 | start = 0
707 | end = 0
708 | fps =fps
709 |
710 | for i in range(len(interact)):
711 | if obj != interact[i]:
712 | end = ((i-1)/fps)
713 | df = pd.DataFrame([[start,end,obj]],columns=['start','end','obj'])
714 | obj = interact[i]
715 | start = end
716 | interactsDf = interactsDf.append(df, ignore_index=True)
717 |
718 | start = end
719 | end = (len(interact)-1)/fps
720 | obj = interact[-1]
721 | df = pd.DataFrame([[start,end,obj]],columns=['start','end','obj'])
722 | interactsDf = interactsDf.append(df, ignore_index=True)
723 |
724 | return interactsDf, interact
725 |
726 | def Reports(df_list,list_name,bodypart,fields=None,filter=0.3,fps=30):
727 | """
728 | Produces a report of all data passed along the way with movement and interaction metrics in a
729 | given box space.
730 |
731 | Parameters
732 | ----------
733 | df_list : list
734 | List with all DataFrames.
735 | list_name : list
736 | List with names of each data.
737 | bodypart : str
738 | Body part you want use as reference.
739 | fields : pandas DataFrame
740 | The DataFrame with the coordinates of the created fields (output of FieldDetermination()).
741 | filter : float
742 | Threshold to remove motion artifacts. Adjust according to the tracking
743 | quality and speed of what is moving.
744 | fps : int
745 | The recording frames per second.
746 |
747 | Returns
748 | -------
749 | out : pandas DataFrame
750 | DataFrame with report of each data in one line.
751 |
752 | Examples
753 | --------
754 | DataFrame :
755 | >>>> columns = 'file','video time (min)', 'dist (cm)','speed (cm/s)','field','time_field'
756 | >>>> file = file name
757 | >>>> video time (min) = video duration
758 | >>>> dist (cm) = ditance traveled in centimeters
759 | >>>> speed (cm/s) = animal velocity in centimeters per second
760 | >>>> field = how many times did you interact with the field, organized by order (if fields != None)
761 | >>>> time_field = time spent in each field
762 |
763 | See Also
764 | --------
765 | For more information and usage examples: https://github.com/pyratlib/pyrat
766 |
767 | Notes
768 | -----
769 | This function was developed based on DLC outputs and is able to support
770 | matplotlib configurations."""
771 |
772 | import numpy as np
773 | import pandas as pd
774 | import pyratlib as rat
775 |
776 | relatorio = pd.DataFrame(columns=['file','video time (min)','dist (cm)', 'speed (cm/s)'])
777 |
778 | if type(fields) != type(None):
779 | for i in range(len(fields)):
780 | relatorio["field_{0}".format(i+1)] = []
781 | relatorio["time_field_{0}".format(i+1)] = []
782 |
783 | for i,v in enumerate(df_list):
784 | lista = [list_name[i]]
785 |
786 | DF = rat.MotionMetrics(df_list[i], bodypart, filter=filter, fps=fps)
787 |
788 | time = DF.Time.iloc[-1]
789 | dist = DF.Distance.sum()
790 | vMedia = DF.Speed.mean()
791 |
792 | lista.append(time)
793 | lista.append(dist)
794 | lista.append(vMedia)
795 | if type(fields) != type(None):
796 | interacts,_ = rat.Interaction(df_list[i], bodypart, fields, fps = fps)
797 | for i in range(len(fields)):
798 | lista.append(interacts["obj"][interacts["obj"] == i+1].count())
799 | lista.append((interacts["end"][interacts["obj"] == i+1]-interacts["start"][interacts["obj"] == i+1]).sum())
800 | relatorio_temp = pd.DataFrame([lista], columns=relatorio.columns)
801 | relatorio = relatorio.append(relatorio_temp, ignore_index=True)
802 |
803 | return relatorio
804 |
805 | def DrawLine(x, y, angle, **kwargs):
806 | """
807 | Makes the creation of arrows to indicate the orientation of the animal's head in a superior
808 | view. Used in the HeadOrientation() function.
809 |
810 | Parameters
811 | ----------
812 | x : float
813 | X axis coordinates.
814 | y : float
815 | Y axis coordinates.
816 | angle : float
817 | Angle in radians, output of the arctan2 function.
818 | ax : fig, optional
819 | Creates an 'axs' to be added to a figure created outside the role by the user.
820 | arrow_width : int, optional
821 | Determines the width of the arrow's body.
822 | head_width : int, optional
823 | Determines the width of the arrow head.
824 | arrow_color : str, optional
825 | Determines the arrow color.
826 | arrow_size : int, optional
827 | Determines the arrow size.
828 |
829 | Returns
830 | -------
831 | out : plot
832 | Arrow based on head coordinates.
833 |
834 | See Also
835 | --------
836 | For more information and usage examples: https://github.com/pyratlib/pyrat
837 |
838 | Notes
839 | -----
840 | This function was developed based on DLC outputs and is able to support
841 | matplotlib configurations."""
842 |
843 | import numpy as np
844 | import matplotlib.pyplot as plt
845 | ax = kwargs.get('ax')
846 | arrow_color = kwargs.get('arrow_color')
847 | arrow_width = kwargs.get('arrow_width')
848 | if type(arrow_width) == type(None):
849 | arrow_width = 2
850 | head_width = kwargs.get('head_width')
851 | if type(head_width) == type(None):
852 | head_width = 7
853 | arrow_size = kwargs.get('arrow_size')
854 | if type(arrow_size) == type(None):
855 | arrow_size = 10
856 |
857 | if type(ax) == type(None):
858 | return plt.arrow(x, y, arrow_size*np.cos(angle), arrow_size*np.sin(angle),width = arrow_width,head_width=head_width,fc = arrow_color)
859 | else:
860 | return ax.arrow(x, y, arrow_size*np.cos(angle), arrow_size*np.sin(angle),width = arrow_width,head_width=head_width,fc = arrow_color)
861 |
862 | def HeadOrientation(data, step, head = None, tail = None, **kwargs):
863 | """
864 | Plots the trajectory of the determined body part.
865 |
866 | Parameters
867 | ----------
868 | data : pandas DataFrame
869 | The input tracking data.
870 | step : int
871 | Step used in the data, will use a data point for each 'x' steps. The
872 | smaller the step, the greater the amount of arrows and the more difficult
873 | the interpretation.
874 | head : str
875 | Head coordinates to create the arrow. You can use data referring to another
876 | part of the body that you want to have as a reference for the line that will
877 | create the arrow. The angulation will be based on the arrow.
878 | tail : str
879 | Tail coordinates to create the arrow. You can use data referring to another
880 | part of the body that you want to have as a reference for the line that will
881 | create the arrow. The angulation will be based on the arrow.
882 | bodyPartBox : str, optional
883 | The body part you want to use to estimate the limits of the environment,
884 | usually the base of the tail is the most suitable for this determination.
885 | start : int, optional
886 | Moment of the video you want tracking to start, in seconds. If the variable
887 | is empty (None), the entire video will be processed.
888 | end : int, optional
889 | Moment of the video you want tracking to end, in seconds. If the variable is
890 | empty (None), the entire video will be processed.
891 | fps : int
892 | The recording frames per second.
893 | limit_boundaries : bool, optional.
894 | Limits the points to the box boundary.
895 | xLimMin : int, optional
896 | Determines the minimum size on the axis X.
897 | xLimMax : int, optional
898 | Determines the maximum size on the axis X.
899 | yLimMin : int, optional
900 | Determines the minimum size on the axis Y.
901 | yLimMax : int, optional
902 | Determines the maximum size on the axis Y.
903 | figureTitle : str, optional
904 | Figure title.
905 | hSize : int, optional
906 | Determine the figure height size (x).
907 | wSize : int, optional
908 | Determine the figure width size (y).
909 | fontsize : int, optional
910 | Determine of all font sizes.
911 | invertY : bool, optional
912 | Determine if de Y axis will be inverted (used for DLC output).
913 | saveName : str, optional
914 | Determine the save name of the plot.
915 | figformat : str, optional
916 | Determines the type of file that will be saved. Used as base the ".eps",
917 | which may be another supported by matplotlib.
918 | res : int, optional
919 | Determine the resolutions (dpi), default = 80.
920 | ax : fig, optional
921 | Creates an 'axs' to be added to a figure created outside the role by the user.
922 | arrow_width : int, optional
923 | Determines the width of the arrow's body.
924 | head_width : int, optional
925 | Determines the width of the arrow head.
926 | arrow_color : str, optional
927 | Determines the arrow color.
928 | arrow_size : int, optional
929 | Determines the arrow size.
930 |
931 | Returns
932 | -------
933 | out : plot
934 | The output of the function is the figure with the tracking plot of the
935 | selected body part.
936 |
937 | See Also
938 | --------
939 | For more information and usage examples: https://github.com/pyratlib/pyrat
940 |
941 | Notes
942 | -----
943 | This function was developed based on DLC outputs and is able to support
944 | matplotlib configurations."""
945 |
946 | import numpy as np
947 | import matplotlib.pyplot as plt
948 | import pyratlib as rat
949 |
950 | ax = kwargs.get('ax')
951 | start= kwargs.get('start')
952 | end= kwargs.get('end')
953 | figureTitle = kwargs.get('figureTitle')
954 | saveName = kwargs.get('saveName')
955 | hSize = kwargs.get('hSize')
956 | bodyPartBox = kwargs.get('bodyPartBox')
957 | arrow_color = kwargs.get('arrow_color')
958 | limit_boundaries = kwargs.get('limit_boundaries')
959 | xLimMin = kwargs.get('xLimMin')
960 | xLimMax = kwargs.get('xLimMax')
961 | yLimMin = kwargs.get('yLimMin')
962 | yLimMax = kwargs.get('yLimMax')
963 | if type(limit_boundaries) == type(None):
964 | limit_boundaries = False
965 | if type(bodyPartBox) == type(None):
966 | bodyPartBox = tail
967 | fps = kwargs.get('fps')
968 | if type(fps) == type(None):
969 | fps = 30
970 | res = kwargs.get('res')
971 | if type(res) == type(None):
972 | res = 80
973 | if type(hSize) == type(None):
974 | hSize = 6
975 | wSize = kwargs.get('wSize')
976 | if type(wSize) == type(None):
977 | wSize = 8
978 | fontsize = kwargs.get('fontsize')
979 | if type(fontsize) == type(None):
980 | fontsize = 15
981 | invertY = kwargs.get('invertY')
982 | if type(invertY) == type(None):
983 | invertY = True
984 | figformat = kwargs.get('figformat')
985 | if type(figformat) == type(None):
986 | figformat = '.eps'
987 | arrow_width = kwargs.get('arrow_width')
988 | if type(arrow_width) == type(None):
989 | arrow_width = 2
990 | head_width = kwargs.get('head_width')
991 | if type(head_width) == type(None):
992 | head_width = 7
993 | arrow_size = kwargs.get('arrow_size')
994 | if type(arrow_size) == type(None):
995 | arrow_size = 10
996 |
997 | values = (data.iloc[2:,1:].values).astype(float)
998 | lista1 = (data.iloc[0][1:].values +" - " + data.iloc[1][1:].values).tolist()
999 |
1000 | if type(start) == type(None):
1001 | tailX = values[:,lista1.index(tail+" - x")]
1002 | tailY = values[:,lista1.index(tail+" - y")]
1003 |
1004 | cervicalX = values[:,lista1.index(head+" - x")]
1005 | cervicalY = values[:,lista1.index(head+" - y")]
1006 | else:
1007 | init = int(start*fps)
1008 | finish = int(end*fps)
1009 |
1010 | tailX = values[:,lista1.index(tail+" - x")][init:finish]
1011 | tailY = values[:,lista1.index(tail+" - y")][init:finish]
1012 |
1013 | cervicalX = values[:,lista1.index(head+" - x")][init:finish]
1014 | cervicalY = values[:,lista1.index(head+" - y")][init:finish]
1015 |
1016 | boxX = values[:,lista1.index(bodyPartBox+" - x")]
1017 | boxY = values[:,lista1.index(bodyPartBox+" - y")]
1018 |
1019 | if type(bodyPartBox) == type(None):
1020 |
1021 | esquerda = xLimMin
1022 | direita = xLimMax
1023 | baixo = yLimMin
1024 | cima = yLimMax
1025 | else:
1026 |
1027 | esquerda = values[:,lista1.index(bodyPartBox+" - x")].min()
1028 | direita = values[:,lista1.index(bodyPartBox+" - x")].max()
1029 | baixo = values[:,lista1.index(bodyPartBox+" - y")].min()
1030 | cima = values[:,lista1.index(bodyPartBox+" - y")].max()
1031 |
1032 | if limit_boundaries:
1033 | testeX = []
1034 | for i in range(len(tailX)):
1035 | if tailX[i] >= direita:
1036 | testeX.append(direita)
1037 | elif tailX[i] <= esquerda:
1038 | testeX.append(esquerda)
1039 | else:
1040 | testeX.append(tailX[i])
1041 |
1042 | testeY = []
1043 | for i in range(len(tailY)):
1044 | if tailY[i] >= cima:
1045 | testeY.append(cima)
1046 | elif tailY[i] <= baixo:
1047 | testeY.append(baixo)
1048 | else:
1049 | testeY.append(tailY[i])
1050 | else:
1051 | testeX = tailX
1052 | testeY = tailY
1053 |
1054 | if limit_boundaries:
1055 | tX = []
1056 | for i in range(len(cervicalX)):
1057 | if cervicalX[i] >= direita:
1058 | tX.append(direita)
1059 | elif cervicalX[i] <= esquerda:
1060 | tX.append(esquerda)
1061 | else:
1062 | tX.append(cervicalX[i])
1063 |
1064 | tY = []
1065 | for i in range(len(cervicalY)):
1066 | if cervicalY[i] >= cima:
1067 | tY.append(cima)
1068 | elif cervicalY[i] <= baixo:
1069 | tY.append(baixo)
1070 | else:
1071 | tY.append(cervicalY[i])
1072 | else:
1073 | tX = cervicalX
1074 | tY = cervicalY
1075 |
1076 | rad = np.arctan2((np.asarray(tY) - np.asarray(testeY)),(np.asarray(tX) - np.asarray(testeX)))
1077 |
1078 | if type(ax) == type(None):
1079 | plt.figure(figsize=(wSize, hSize), dpi=res)
1080 | plt.title(figureTitle, fontsize=fontsize)
1081 | plt.gca().set_aspect('equal')
1082 |
1083 | if invertY == True:
1084 | plt.gca().invert_yaxis()
1085 |
1086 | plt.xlabel("X (px)",fontsize=fontsize)
1087 | plt.ylabel("Y (px)",fontsize=fontsize)
1088 | plt.xticks(fontsize = fontsize*0.8)
1089 | plt.yticks(fontsize = fontsize*0.8)
1090 |
1091 | for i in range(0,len(tailY),step):
1092 | rat.DrawLine(tX[i], tY[i], (rad[i]), ax = ax,arrow_color = arrow_color, arrow_size = arrow_size)
1093 |
1094 | plt.plot([esquerda,esquerda] , [baixo,cima],"k")
1095 | plt.plot([esquerda,direita] , [cima,cima],"k")
1096 | plt.plot([direita,direita] , [cima,baixo],"k")
1097 | plt.plot([direita,esquerda] , [baixo,baixo],"k")
1098 |
1099 | if type(saveName) != type(None):
1100 | plt.savefig(saveName+figformat)
1101 |
1102 | plt.show()
1103 |
1104 | else:
1105 | ax.set_aspect('equal')
1106 | for i in range(0,len(tailY),step):
1107 | rat.DrawLine(tX[i], tY[i], (rad[i]), ax =ax,arrow_color = arrow_color,arrow_size = arrow_size)
1108 | ax.plot([esquerda,esquerda] , [baixo,cima],"k")
1109 | ax.plot([esquerda,direita] , [cima,cima],"k")
1110 | ax.plot([direita,direita] , [cima,baixo],"k")
1111 | ax.plot([direita,esquerda] , [baixo,baixo],"k")
1112 | ax.set_title(figureTitle, fontsize=fontsize)
1113 | ax.tick_params(axis='both', which='major', labelsize=fontsize*0.8)
1114 | ax.set_xlabel("X (px)", fontsize = fontsize)
1115 | ax.set_ylabel("Y (px)", fontsize = fontsize)
1116 | if invertY == True:
1117 | ax.invert_yaxis()
1118 |
1119 | def SignalSubset(sig_data,freq,fields, **kwargs):
1120 | """
1121 | Performs the extraction of substes from electrophysiology data. For proper functioning, the
1122 | data must be organized in a data. To subset the data with its own list of markers, the
1123 | variable fields must be empty (fields = None).
1124 |
1125 | Parameters
1126 | ----------
1127 | sig_data : pandas DataFrame
1128 | The input electrophysiology data organized with the channels in columns. We use the function
1129 | LFP().
1130 | freq : pandas DataFrame
1131 | Frequency of electrophysiology data collection.
1132 | fields : str
1133 | Event time markers. Developed to use the output of the "Interaction()" function. But with
1134 | standardized data like the output of this function, it is possible to assemble the dataframe.
1135 | start_time : list, optional
1136 | Moment of the video you want subset to start, in seconds. If the variable is empty (None),
1137 | the entire video will be processed.
1138 | end_time : list, optional
1139 | Moment of the video you want subset to end, in seconds. If the variable is empty (None),
1140 | the entire video will be processed.
1141 |
1142 | Returns
1143 | -------
1144 | out : dict
1145 | if fields = None:
1146 | return a dictionary with size equal to the list in start_time or end_time lenght, with others
1147 | dictionaries with the subset in each channel.
1148 | else:
1149 | Dictionary with the objects/places passed in the input, inside each one will have the channels
1150 | passed with the subset of the data.
1151 |
1152 | See Also
1153 | --------
1154 | For more information and usage examples: https://github.com/pyratlib/pyrat
1155 |
1156 | Notes
1157 | -----
1158 | This function was based using data from the electrophysiology plexon. Input data was formatted using
1159 | the LFP() function0. Lista first index is to enter the list; Lista second index is to choose between
1160 | start([0]) ou end([1]); Lista third index is to choose between object/fields([0],[1] ...).
1161 | """
1162 |
1163 | start_time= kwargs.get('start_time')
1164 | end_time = kwargs.get('end_time')
1165 |
1166 | if type(fields) == type(None):
1167 | dicts = {}
1168 | if type(start_time) == type(None):
1169 | keys = range(len(end_time))
1170 | else:
1171 | keys = range(len(start_time))
1172 | for j in keys:
1173 | cortes = {}
1174 | for ç,canal in enumerate(sig_data.columns):
1175 | if type(start_time) == type(None):
1176 | cortes[ç] = sig_data[canal][None:end_time[j]*freq]
1177 | dicts[j] = cortes
1178 | elif type(end_time) == type(None):
1179 | cortes[ç] = sig_data[canal][start_time[j]*freq:None]
1180 | dicts[j] = cortes
1181 | else:
1182 | cortes[ç] = sig_data[canal][start_time[j]*freq:end_time[j]*freq]
1183 | dicts[j] = cortes
1184 |
1185 | else:
1186 | lista = []
1187 | start = []
1188 | end = []
1189 |
1190 | for i in fields['obj'].unique():
1191 | if i != 0:
1192 | start.append(fields.start.loc[(fields.obj == i)].values)
1193 | end.append(fields.end.loc[(fields.obj == i)].values)
1194 | lista.append((start,end))
1195 |
1196 | dicts = {}
1197 | keys = range(len(list(fields['obj'].unique())[1:]))
1198 |
1199 | for j in keys:
1200 | cortes = {}
1201 | for ç,canal in enumerate(sig_data.columns):
1202 | for i in range(len(lista[0][0][j])):
1203 | cortes[ç] = sig_data[canal][int(lista[0][0][j][i]*freq):int(lista[0][1][j][i]*freq)]
1204 | dicts[j] = cortes
1205 |
1206 | return dicts
1207 |
1208 | def LFP(data):
1209 | """
1210 | Performs LFP data extraction from a MATLAB file (.mat) and returns all channels arranged in
1211 | columns of a DataFrame. This data was converted from a plexon file (.plx).
1212 |
1213 | Parameters
1214 | ----------
1215 | data : .mat
1216 | Plexon data in .mat format.
1217 |
1218 | Returns
1219 | -------
1220 | out : pandas DataFrame
1221 | A DataFrame with the data arranged in columns by channels.
1222 |
1223 | See Also
1224 | --------
1225 | For more information and usage examples: https://github.com/pyratlib/pyrat
1226 |
1227 | Notes
1228 | -----
1229 | This function was based using data from the electrophysiology plexon. Input data was formatted using
1230 | the LFP() function."""
1231 |
1232 | import numpy as np
1233 | column_name = []
1234 | if len(data['allad'][0]) == 192:
1235 | time = np.arange(0,len(data['allad'][0][128])/data['adfreq'][0][0],1/data['adfreq'][0][0])
1236 | values = np.zeros((len(data['allad'][0][128]),64))
1237 | ç = 128
1238 | for j in range(64):
1239 | for r in range(len(data['allad'][0][128])):
1240 | values[r][j] = data['allad'][0][ç][r]
1241 | column_name.append(data['adnames'][ç])
1242 | ç +=1
1243 | elif len(data['allad'][0]) == 96:
1244 | time = np.arange(0,len(data['allad'][0][64])/data['adfreq'][0][0],1/data['adfreq'][0][0])
1245 | values = np.zeros((len(data['allad'][0][64]),32))
1246 | ç = 64
1247 | for j in range(32):
1248 | for r in range(len(data['allad'][0][64])):
1249 | values[r][j] = data['allad'][0][ç][r]
1250 | column_name.append(data['adnames'][ç])
1251 | ç +=1
1252 |
1253 | df = pd.DataFrame(values,columns=column_name,index= None)
1254 | df.insert(0, "Time", time, True)
1255 |
1256 | return df
1257 |
1258 | def PlotInteraction(interactions, **kwargs):
1259 | """
1260 | Plots a bar with interactions times of the determined body with the fields.
1261 |
1262 | Parameters
1263 | ----------
1264 | interactions : pandas DataFrame
1265 | The DataFrame with the interactions of the fields (output of Interaction()).
1266 | barH : float, optional
1267 | Bar height.
1268 | start : int, optional
1269 | Moment of the video you want tracking to start, in seconds. If the variable
1270 | is empty (None), the entire video will be processed.
1271 | end : int, optional
1272 | Moment of the video you want tracking to end, in seconds. If the variable is
1273 | empty (None), the entire video will be processed.
1274 | fps : int
1275 | The recording frames per second.
1276 | figureTitle : str, optional
1277 | Figure title.
1278 | hSize : int, optional
1279 | Determine the figure height size (x).
1280 | wSize : int, optional
1281 | Determine the figure width size (y).
1282 | fontsize : int, optional
1283 | Determine of all font sizes.
1284 | saveName : str, optional
1285 | Determine the save name of the plot.
1286 | figformat : str, optional
1287 | Determines the type of file that will be saved. Used as base the ".eps",
1288 | which may be another supported by matplotlib.
1289 | res : int, optional
1290 | Determine the resolutions (dpi), default = 80.
1291 | ax : fig, optional
1292 | Creates an 'axs' to be added to a figure created outside the role by the user.
1293 |
1294 | Returns
1295 | -------
1296 | out : plot
1297 | The output of the function is the figure with the interactions times with fields.
1298 |
1299 | See Also
1300 | --------
1301 | For more information and usage examples: https://github.com/pyratlib/pyrat
1302 | Notes
1303 | -----
1304 | This function was developed based on DLC outputs and is able to support
1305 | matplotlib configurations."""
1306 |
1307 |
1308 | import pyratlib as rat
1309 | import matplotlib.pyplot as plt
1310 |
1311 | saveName= kwargs.get('saveName')
1312 | start= kwargs.get('start')
1313 | end= kwargs.get('end')
1314 | figureTitle = kwargs.get('figureTitle')
1315 | fps = kwargs.get('fps')
1316 | ax = kwargs.get('ax')
1317 | aspect = kwargs.get('aspect')
1318 | if type(aspect) == type(None):
1319 | aspect = 'equal'
1320 | if type(fps) == type(None):
1321 | fps = 30
1322 | hSize = kwargs.get('hSize')
1323 | if type(hSize) == type(None):
1324 | hSize = 2
1325 | wSize = kwargs.get('wSize')
1326 | if type(wSize) == type(None):
1327 | wSize = 8
1328 | fontsize = kwargs.get('fontsize')
1329 | if type(fontsize) == type(None):
1330 | fontsize = 15
1331 | figformat = kwargs.get('figformat')
1332 | if type(figformat) == type(None):
1333 | figformat = '.eps'
1334 | res = kwargs.get('res')
1335 | if type(res) == type(None):
1336 | res = 80
1337 | barH = kwargs.get('barH')
1338 | if type(barH) == type(None):
1339 | barH = .5
1340 |
1341 | if type(start) == type(None):
1342 | init = 0
1343 | finish = interactions.end.iloc[-1]
1344 | else:
1345 | init = int(start)
1346 | finish = int(end)
1347 |
1348 | times = []
1349 | starts = []
1350 | for i in range (int(interactions.obj.max())+1):
1351 | times.append((interactions.end.loc[(interactions.obj == i) & (interactions.start >= init) & (interactions.start <= finish)])-(interactions.start.loc[(interactions.obj == i) & (interactions.start >= init) & (interactions.start <= finish)]).values)
1352 | starts.append((interactions.start.loc[(interactions.obj == i) & (interactions.start >= init) & (interactions.start <= finish)]).values)
1353 |
1354 | barHeight = barH
1355 |
1356 | if type(ax) == type(None):
1357 | plt.figure(figsize=(wSize,hSize))
1358 | for i in range (1,int(interactions.obj.max())+1):
1359 | plt.barh(0,times[i], left=starts[i], height = barHeight, label = "Field "+str(i))
1360 |
1361 | plt.title(figureTitle,fontsize=fontsize)
1362 | plt.legend(ncol=int(interactions.obj.max()))
1363 | plt.xlim(init, finish)
1364 | plt.yticks([])
1365 | plt.xticks(fontsize = fontsize*0.8)
1366 | plt.xlabel("Time (s)",fontsize=fontsize)
1367 | plt.ylim([-barHeight,barHeight])
1368 |
1369 | if type(saveName) != type(None):
1370 | plt.savefig(saveName+figformat)
1371 |
1372 | plt.show()
1373 |
1374 | else:
1375 | for i in range (1,int(interactions.obj.max())+1):
1376 | ax.barh(0,times[i], left=starts[i], height = barHeight, label = "Field "+str(i))
1377 | ax.set_title(figureTitle, fontsize=fontsize)
1378 | if aspect == type(None):
1379 | ax.set_aspect(aspect)
1380 | ax.set_xlim([init, finish])
1381 | ax.set_yticklabels([])
1382 | ax.get_yaxis().set_visible(False)
1383 | ax.tick_params(axis='x', labelsize=fontsize*.8)
1384 | ax.tick_params(axis='y', labelsize=fontsize*.8)
1385 | ax.legend(ncol=int(interactions.obj.max()),fontsize=fontsize*.8)
1386 | ax.set_xlabel('Time (s)',fontsize=fontsize)
1387 |
1388 | ax.set_ylim([-barHeight,barHeight])
1389 |
1390 | def Blackrock(data_path, freq):
1391 | """
1392 | Transform a Blackrock file (.ns2) to a pandas DataFrame with all LFP channels.
1393 |
1394 | Parameters
1395 | ----------
1396 | data_path : path
1397 | Str with data path.
1398 | freq : int
1399 | Aquisition frequency.
1400 |
1401 | Returns
1402 | -------
1403 | out : pandas DataFrame
1404 | The output of the function is the figure with the interactions times with fields.
1405 |
1406 | See Also
1407 | --------
1408 | For more information and usage examples: https://github.com/pyratlib/pyrat
1409 | """
1410 | from neo.io import BlackrockIO
1411 | import numpy as np
1412 | import pandas as pd
1413 |
1414 | reader = BlackrockIO(data_path)
1415 | seg = reader.read_segment()
1416 |
1417 | column_name = []
1418 | time = np.arange(0,len(seg.analogsignals[0])/freq,1/freq)
1419 | values = np.zeros((len(seg.analogsignals[0]),len(seg.analogsignals[0][0])))
1420 |
1421 | for i in range(len(seg.analogsignals[0][0])):
1422 | for ç in range(len(seg.analogsignals[0])):
1423 | values[ç][i] = float(seg.analogsignals[0][ç][i])
1424 |
1425 | channels = []
1426 | for i in range(len(seg.analogsignals[0][0])):
1427 | channels.append('Channel ' + str(i+1))
1428 |
1429 | df = pd.DataFrame(values,columns=channels,index= None)
1430 | df.insert(0, "Time", time, True)
1431 |
1432 | return df
1433 |
1434 | def SpacialNeuralActivity(neural_data, unit):
1435 | """
1436 | Performs the visualizaton of the neural data on pixel space.
1437 |
1438 | Parameters
1439 | ----------
1440 | neural_data : pandas DataFrame
1441 | a dataframe with the positions of rat in columns: x, y
1442 | and the number of spikes for each unit in columns:
1443 | unit, ..., unit.
1444 | unit : int
1445 | The unit (column) to plot.
1446 |
1447 | Returns
1448 | -------
1449 | out : heatmap (ndarray)
1450 | The matrix with spike triggered avarages for selected unit.
1451 |
1452 | See Also
1453 | --------
1454 | For more information and usage examples: https://github.com/pyratlib/pyrat
1455 |
1456 | Notes
1457 | -----
1458 | This function was developed based on Fujisawa et al., 2008 data."""
1459 |
1460 | import numpy as np
1461 | import pandas as pd
1462 |
1463 | neural_data = neural_data.loc[ neural_data['x'] > 100, : ]
1464 |
1465 | xmin, xmax = neural_data['x'].min(), neural_data['x'].max()
1466 | ymin, ymax = neural_data['y'].min(), neural_data['y'].max()
1467 |
1468 | xsteps = np.linspace(xmin, xmax, num=100)
1469 | ysteps = np.linspace(ymin, ymax, num=100)
1470 |
1471 | heatmap = np.zeros( (xsteps.shape[0], ysteps.shape[0]) )
1472 |
1473 | for x in range(xsteps.shape[0]-1):
1474 | for y in range(ysteps.shape[0]-1):
1475 | df_tmp = neural_data.loc[ (neural_data['x'] >= xsteps[x]) & (neural_data['x'] < xsteps[x+1]) &
1476 | (neural_data['y'] >= ysteps[y]) & (neural_data['y'] < ysteps[y+1]), : ]
1477 | heatmap[x, y] = df_tmp[unit].sum()
1478 |
1479 | return heatmap
1480 |
1481 | def IntervalBehaviors(cluster_labels, fps=30 , filter = 10, correction = 0):
1482 | """
1483 | Extract the subsets of behaviors from ClassifyBehavior() function. This
1484 | time stamps must be applied in function SingalSusbset().
1485 |
1486 | Parameters
1487 | ----------
1488 | cluster_labels : ndarray
1489 | output from ClassifyBehavior.
1490 | fps : int
1491 | Video frame per second rate.
1492 | filter: int
1493 | The maximum size of intervals allowed between frames found in
1494 | clusters.
1495 | correction: int
1496 | Used to correct the synchronization between the frames and the
1497 | neural data, if the entire video has not been passed to the
1498 | ClassifyBehavior() function. The correction value will be the
1499 | same as the 'startIndex' parameter
1500 |
1501 | Returns
1502 | -------
1503 | out : dict
1504 | Dictionary with the cluster number as key and the timestamps in
1505 | seconds.
1506 |
1507 | Example
1508 | --------
1509 | ClassifyBehavior
1510 |
1511 | >>>> cluster_labels, _, _, _ = rat.ClassifyBehavior(df,
1512 | video ='/content/video.AVI',
1513 | bodyparts_list=["nose", "neck", "center", "tail"],
1514 | n_components = 2,
1515 | distance = 28,
1516 | directory = "/content/images_rat")
1517 |
1518 | Interval behaviors
1519 |
1520 | >>>> intervals = rat.IntervalBehaviors(cluster_labels)
1521 | >>>> intervals (showing only cluster 0 intervals)
1522 | >>>> {0: ([1113, 3957, 4098, 4167, 4609, 4827],
1523 | >>>> [1157, 3976, 4137, 4218, 4734, 4860]),
1524 |
1525 | SignalSubset
1526 |
1527 | >>>> subset = rat.SignalSubset(dados,1000, start_time = intervals[0][0], end_time = intervals[0][1])
1528 | >>>> subset (showing subsets of cluster 0)
1529 | >>>> {0: {0: 37000 37.000
1530 | >>>> 37001 37.001
1531 | >>>> 37002 37.002
1532 | >>>> 37003 37.003
1533 | >>>> 37004 37.004
1534 | ...
1535 | >>>> 37995 37.995
1536 | >>>> 37996 37.996
1537 | >>>> 37997 37.997
1538 | >>>> 37998 37.998
1539 | >>>> 37999 37.999
1540 | >>>> Name: Time, Length: 1000, dtype: float64, 1: 37000 -143.25
1541 |
1542 | See Also
1543 | --------
1544 | For more information and usage examples: https://github.com/pyratlib/pyrat
1545 | """
1546 |
1547 | cluster_num = set(cluster_labels)
1548 | intervals = {}
1549 | for ç in cluster_num:
1550 |
1551 | index = np.where(cluster_labels==ç)
1552 |
1553 | dicts = {}
1554 | dicts2 = {}
1555 | count = 0
1556 | init = []
1557 | end = []
1558 |
1559 | for i in range(len(index[0])-1):
1560 | if index[0][i+1] - index[0][i] <=filter:
1561 | count +=1
1562 | if index[0][i+1] - index[0][i] >filter:
1563 | dicts[i] = index[0][i] + correction
1564 | dicts2[i] = index[0][i] - count + correction
1565 | count = 0
1566 |
1567 | for i in range(len(list(zip(dicts2.values(),dicts.values())))):
1568 | if list(zip(dicts2.values(),dicts.values()))[i][0] != list(zip(dicts2.values(),dicts.values()))[i][1]:
1569 | init.append(int(list(zip(dicts2.values(),dicts.values()))[i][0]/fps))
1570 | end.append(int(list(zip(dicts2.values(),dicts.values()))[i][1]/fps))
1571 |
1572 | intervals[ç] = (init,end)
1573 |
1574 | return intervals
1575 |
1576 | def TrajectoryMA(data,bodyPart,bodyPartBox = None, **kwargs):
1577 | """
1578 | Plots the trajectory of the determined body part.
1579 | The input file MUST BE the .h5 format!
1580 |
1581 | Parameters
1582 | ----------
1583 | data : pandas DataFrame
1584 | The input tracking data in h5/hdf format.
1585 | bodyPart : str
1586 | Body part you want to plot the tracking.
1587 | bodyPartBox : str
1588 | The body part you want to use to estimate the limits of the environment,
1589 | usually the base of the tail is the most suitable for this determination.
1590 | animals: list
1591 | If you have multi-animal data and want to plot only one, just pass in this
1592 | variable a list with the name of the designated animal (e.g. animals ['rat1']).
1593 | This will allow you to plot using 'fig,axs' and plot the animals separately.
1594 | start : int, optional
1595 | Moment of the video you want tracking to start, in seconds. If the variable
1596 | is empty (None), the entire video will be processed.
1597 | end : int, optional
1598 | Moment of the video you want tracking to end, in seconds. If the variable is
1599 | empty (None), the entire video will be processed.
1600 | fps : int
1601 | The recording frames per second.
1602 | cmapType : str, optional
1603 | matplotlib colormap.
1604 | figureTitle : str, optional
1605 | Figure title.
1606 | hSize : int, optional
1607 | Determine the figure height size (x).
1608 | wSize : int, optional
1609 | Determine the figure width size (y).
1610 | fontsize : int, optional
1611 | Determine of all font sizes.
1612 | invertY : bool, optional
1613 | Determine if de Y axis will be inverted (used for DLC output).
1614 | limit_boundaries : bool, optional.
1615 | Limits the points to the box boundary.
1616 | xLimMin : int, optional
1617 | Determines the minimum size on the axis X.
1618 | xLimMax : int, optional
1619 | Determines the maximum size on the axis X.
1620 | yLimMin : int, optional
1621 | Determines the minimum size on the axis Y.
1622 | yLimMax : int, optional
1623 | Determines the maximum size on the axis Y.
1624 | saveName : str, optional
1625 | Determine the save name of the plot.
1626 | figformat : str, optional
1627 | Determines the type of file that will be saved. Used as base the ".eps",
1628 | which may be another supported by matplotlib.
1629 | res : int, optional
1630 | Determine the resolutions (dpi), default = 80.
1631 | ax : fig, optional
1632 | Creates an 'axs' to be added to a figure created outside the role by the user.
1633 | fig : fig, optional
1634 | Creates an 'fig()' to be added to a figure created outside the role by the user.
1635 | joint_plot bool, optional
1636 | If true it will plot all trajectories in a single plot, ideal for multi-animal
1637 | tracking of several setups (eg. openfield). If false, will plot each animal
1638 | separately.
1639 |
1640 | Returns
1641 | -------
1642 | out : plot
1643 | The output of the function is the figure with the tracking plot of the
1644 | selected body part.
1645 |
1646 | See Also
1647 | --------
1648 | For more information and usage examples: https://github.com/pyratlib/pyrat
1649 |
1650 | Notes
1651 | -----
1652 | This function was developed based on DLC outputs and is able to support
1653 | matplotlib configurations."""
1654 |
1655 | import numpy as np
1656 | import matplotlib.pyplot as plt
1657 | import pandas as pd
1658 | from matplotlib import cm
1659 | from mpl_toolkits.axes_grid1 import make_axes_locatable
1660 |
1661 | saveName= kwargs.get('saveName')
1662 | start= kwargs.get('start')
1663 | end= kwargs.get('end')
1664 | figureTitle = kwargs.get('figureTitle')
1665 | fps = kwargs.get('fps')
1666 | ax = kwargs.get('ax')
1667 | limit_boundaries = kwargs.get('limit_boundaries')
1668 | xLimMin = kwargs.get('xLimMin')
1669 | xLimMax = kwargs.get('xLimMax')
1670 | yLimMin = kwargs.get('yLimMin')
1671 | yLimMax = kwargs.get('yLimMax')
1672 | joint_plot = kwargs.get('joint_plot')
1673 | animals = kwargs.get('animals')
1674 |
1675 | if type(limit_boundaries) == type(None):
1676 | limit_boundaries = False
1677 | fig = kwargs.get('fig')
1678 | if type(fps) == type(None):
1679 | fps = 30
1680 | cmapType = kwargs.get('cmapType')
1681 | if type(cmapType) == type(None):
1682 | cmapType = 'viridis'
1683 | hSize = kwargs.get('hSize')
1684 | if type(hSize) == type(None):
1685 | hSize = 6
1686 | wSize = kwargs.get('wSize')
1687 | if type(wSize) == type(None):
1688 | wSize = 8
1689 | bins = kwargs.get('bins')
1690 | if type(bins) == type(None):
1691 | bins = 30
1692 | fontsize = kwargs.get('fontsize')
1693 | if type(fontsize) == type(None):
1694 | fontsize = 15
1695 | invertY = kwargs.get('invertY')
1696 | if type(invertY) == type(None):
1697 | invertY = True
1698 | figformat = kwargs.get('figformat')
1699 | if type(figformat) == type(None):
1700 | figformat = '.eps'
1701 | res = kwargs.get('res')
1702 | if type(res) == type(None):
1703 | res = 80
1704 | if type(joint_plot) == type(None):
1705 | joint_plot = False
1706 |
1707 |
1708 | if type(animals) == type(None):
1709 | animals = list(set([i[0] for i in list(set(data[data.columns[0][0]].columns))]))
1710 |
1711 | animals_data = {}
1712 |
1713 | c = []
1714 | esquerda = []
1715 | direita = []
1716 | baixo = []
1717 | cima = []
1718 | x = []
1719 | y = []
1720 |
1721 | cmap = plt.get_cmap(cmapType)
1722 |
1723 | for i,animal in enumerate(animals):
1724 | temp = data[data.columns[0][0]][animal][bodyPart].iloc[:,0:3]
1725 | temp = temp.dropna()
1726 |
1727 | if type(start) == type(None) and type(end) == type(None):
1728 | animals_data[animal] = ((temp['x'].values).astype(float),
1729 | (temp['y'].values).astype(float),
1730 | (temp['likelihood'].values).astype(float))
1731 | else:
1732 | if type(start) == type(None):
1733 | finish = int(end[i]*fps)
1734 | animals_data[animal] = ((temp['x'][None:finish].values).astype(float),
1735 | (temp['y'][None:finish].values).astype(float),
1736 | (temp['likelihood'][None:finish].values).astype(float))
1737 | elif type(end) == type(None):
1738 | init = int(start[i]*fps)
1739 | animals_data[animal] = ((temp['x'][init:None].values).astype(float),
1740 | (temp['y'][init:None].values).astype(float),
1741 | (temp['likelihood'][init:None].values).astype(float))
1742 | else:
1743 | init = int(start[i]*fps)
1744 | finish = int(end[i]*fps)
1745 | animals_data[animal] = ((temp['x'][init:finish].values).astype(float),
1746 | (temp['y'][init:finish].values).astype(float),
1747 | (temp['likelihood'][init:finish].values).astype(float))
1748 |
1749 | if type(bodyPartBox) == type(None):
1750 | c.append(np.linspace(0, animals_data[animal][0].size/fps, animals_data[animal][0].size))
1751 | esquerda = xLimMin
1752 | direita = xLimMax
1753 | baixo = yLimMin
1754 | cima = yLimMax
1755 | else:
1756 | c.append(np.linspace(0, animals_data[animal][0].size/fps, animals_data[animal][0].size))
1757 | esquerda.append(animals_data[animal][0].min())
1758 | direita.append(animals_data[animal][0].max())
1759 | baixo.append(animals_data[animal][1].min())
1760 | cima.append(animals_data[animal][1].max())
1761 |
1762 | x_temp = []
1763 | y_temp = []
1764 |
1765 | if limit_boundaries:
1766 | for ç in range(len(animals_data[animal][0])):
1767 | if animals_data[animal][0][ç] >= direita[i]:
1768 | x_temp.append(direita[i])
1769 | elif animals_data[animal][0][ç] <= esquerda[i]:
1770 | x_temp.append(esquerda[i])
1771 | else:
1772 | x_temp.append(animals_data[animal][0][ç])
1773 |
1774 | for ç in range(len(animals_data[animal][1])):
1775 | if animals_data[animal][1][ç] >= cima[i]:
1776 | y_temp.append(cima[i])
1777 | elif animals_data[animal][1][ç] <= baixo[i]:
1778 | y_temp.append(baixo[i])
1779 | else:
1780 | y_temp.append(animals_data[animal][1][ç])
1781 |
1782 | x.append(x_temp)
1783 | y.append(y_temp)
1784 |
1785 | else:
1786 | x.append(animals_data[animal][0])
1787 | y.append(animals_data[animal][1])
1788 |
1789 | if type(ax) == type(None):
1790 | if joint_plot:
1791 | plt.figure(figsize=(wSize, hSize), dpi=res)
1792 | plt.title(figureTitle, fontsize=fontsize)
1793 | for i,animal in enumerate(animals):
1794 | plt.scatter(x[i], y[i], c=c[i], cmap=cmap, s=3)
1795 | plt.plot([esquerda[i],esquerda[i]] , [baixo[i],cima[i]],"k")
1796 | plt.plot([esquerda[i],direita[i]] , [cima[i],cima[i]],"k")
1797 | plt.plot([direita[i],direita[i]] , [cima[i],baixo[i]],"k")
1798 | plt.plot([direita[i],esquerda[i]] , [baixo[i],baixo[i]],"k")
1799 | cb = plt.colorbar()
1800 |
1801 | if invertY == True:
1802 | plt.gca().invert_yaxis()
1803 | cb.set_label('Time (s)',fontsize=fontsize)
1804 | cb.ax.tick_params(labelsize=fontsize*0.8)
1805 | plt.xlabel("X (px)",fontsize=fontsize)
1806 | plt.ylabel("Y (px)",fontsize=fontsize)
1807 | plt.xticks(fontsize = fontsize*0.8)
1808 | plt.yticks(fontsize = fontsize*0.8)
1809 |
1810 | if type(saveName) != type(None):
1811 | plt.savefig(saveName+figformat)
1812 |
1813 | plt.show()
1814 |
1815 | else:
1816 |
1817 | for i,animal in enumerate(animals):
1818 | plt.figure(figsize=(wSize, hSize), dpi=res)
1819 | plt.title(str(animal), fontsize=fontsize)
1820 | plt.scatter(x[i], y[i], c=c[i], cmap=cmap, s=3)
1821 | plt.plot([esquerda[i],esquerda[i]] , [baixo[i],cima[i]],"k")
1822 | plt.plot([esquerda[i],direita[i]] , [cima[i],cima[i]],"k")
1823 | plt.plot([direita[i],direita[i]] , [cima[i],baixo[i]],"k")
1824 | plt.plot([direita[i],esquerda[i]] , [baixo[i],baixo[i]],"k")
1825 | cb = plt.colorbar()
1826 |
1827 | if invertY == True:
1828 | plt.gca().invert_yaxis()
1829 | cb.set_label('Time (s)',fontsize=fontsize)
1830 | cb.ax.tick_params(labelsize=fontsize*0.8)
1831 | plt.xlabel("X (px)",fontsize=fontsize)
1832 | plt.ylabel("Y (px)",fontsize=fontsize)
1833 | plt.xticks(fontsize = fontsize*0.8)
1834 | plt.yticks(fontsize = fontsize*0.8)
1835 | plt.xlim((esquerda[i],direita[i]))
1836 | plt.ylim((baixo[i],cima[i]))
1837 |
1838 | if type(saveName) != type(None):
1839 | plt.savefig(saveName+figformat)
1840 |
1841 | plt.show()
1842 |
1843 | else:
1844 | if joint_plot:
1845 |
1846 | ax.set_title(figureTitle, fontsize=fontsize)
1847 | for i,animal in enumerate(animals):
1848 | plot = ax.scatter(x[i], y[i], c=c[i], cmap=cmap, s=3)
1849 | ax.plot([esquerda[i],esquerda[i]] , [baixo[i],cima[i]],"k")
1850 | ax.plot([esquerda[i],direita[i]] , [cima[i],cima[i]],"k")
1851 | ax.plot([direita[i],direita[i]] , [cima[i],baixo[i]],"k")
1852 | ax.plot([direita[i],esquerda[i]] , [baixo[i],baixo[i]],"k")
1853 |
1854 | if invertY == True:
1855 | ax.invert_yaxis()
1856 | divider = make_axes_locatable(ax)
1857 | cax = divider.append_axes('right',size='5%', pad=0.05)
1858 | cb = fig.colorbar(plot,cax=cax)
1859 | cb.set_label(label='Time (s)', fontsize=fontsize)
1860 | cb.ax.tick_params(labelsize=fontsize*0.8)
1861 | ax.set_xlabel("X (px)", fontsize = fontsize)
1862 | ax.set_ylabel("Y (px)", fontsize = fontsize)
1863 | cb.ax.tick_params(labelsize=fontsize*0.8)
1864 |
1865 | if type(saveName) != type(None):
1866 | plt.savefig(saveName+figformat)
1867 |
1868 | plt.show()
1869 |
1870 | else:
1871 |
1872 | ax.set_aspect('equal')
1873 | plot = ax.scatter(x, y, c=c, cmap=cmap, s=3)
1874 | ax.plot([esquerda,esquerda] , [baixo,cima],"k")
1875 | ax.plot([esquerda,direita] , [cima,cima],"k")
1876 | ax.plot([direita,direita] , [cima,baixo],"k")
1877 | ax.plot([direita,esquerda] , [baixo,baixo],"k")
1878 | ax.tick_params(axis='both', which='major', labelsize=fontsize*0.8)
1879 | ax.set_title(figureTitle, fontsize=fontsize)
1880 | ax.set_xlabel("X (px)", fontsize = fontsize)
1881 | ax.set_ylabel("Y (px)", fontsize = fontsize)
1882 |
1883 | divider = make_axes_locatable(ax)
1884 | cax = divider.append_axes('right',size='5%', pad=0.05)
1885 | cb = fig.colorbar(plot,cax=cax)
1886 | cb.ax.tick_params(labelsize=fontsize*0.8)
1887 | cb.set_label(label='Time (s)', fontsize=fontsize)
1888 |
1889 | if invertY == True:
1890 | ax.invert_yaxis()
1891 |
1892 | def splitMultiAnimal(data,data_type = '.h5',**kwargs):
1893 | """
1894 | This function is not intended for the end user. _splitMultiAnimal performs
1895 | the extraction of information from the hdf of multi-animals from the DLC to
1896 | separate them into different DataFrames. Its output is a dictionary with
1897 | this data.
1898 |
1899 | Parameters
1900 | ----------
1901 | data : pandas DataFrame
1902 | The input tracking data in h5/hdf/csv format (multi animal data).
1903 | data_type : str
1904 | Determine if the data format from DLC is in '.h5' or '.csv' format.
1905 | bodyPart : str, optional
1906 | Body part you want to plot the tracking.
1907 | animals: list, optional
1908 | If you have multi-animal data and want to extract only specific animals,
1909 | just pass in this variable a list with the name of the designated animal
1910 | (e.g. animals ['rat1']).
1911 | start : int, optional
1912 | Moment of the video you want tracking to start, in seconds. If the variable
1913 | is empty (None), the entire video will be processed.
1914 | end : int, optional
1915 | Moment of the video you want tracking to end, in seconds. If the variable is
1916 | empty (None), the entire video will be processed.
1917 | fps : int
1918 | The recording frames per second.
1919 |
1920 | Returns
1921 | -------
1922 | out : dict
1923 | The output of this function is a dictionary with the data of each
1924 | animal present in the HDF.
1925 |
1926 | See Also
1927 | --------
1928 | For more information and usage examples: https://github.com/pyratlib/pyrat
1929 |
1930 | Notes
1931 | -----
1932 | This function was developed based on DLC multianimal output."""
1933 |
1934 | import numpy as np
1935 | import pandas as pd
1936 |
1937 | animals = kwargs.get('animals')
1938 | bodyParts = kwargs.get('bodyParts')
1939 | start= kwargs.get('start')
1940 | end= kwargs.get('end')
1941 | fps= kwargs.get('fps')
1942 |
1943 | if type(fps) == type(None):
1944 | fps = 30
1945 |
1946 | animals_data = {}
1947 |
1948 | if data_type == '.h5':
1949 | if type(animals) == type(None):
1950 | animals = list(set([i[0] for i in list(set(data[data.columns[0][0]].columns))]))
1951 |
1952 | if type(bodyParts) == type(None):
1953 | bodyParts = list(set([i[1] for i in list(set(data[data.columns[0][0]].columns))]))
1954 |
1955 | for i,animal in enumerate(animals):
1956 | parts = {}
1957 | for ç,bodyPart in enumerate(bodyParts):
1958 | temp = data[data.columns[0][0]][animal][bodyPart].iloc[:,0:3]
1959 |
1960 | if type(start) == type(None) and type(end) == type(None):
1961 | parts[bodyPart] = ((temp['x'].values).astype(float),
1962 | (temp['y'].values).astype(float),
1963 | (temp['likelihood'].values).astype(float))
1964 | else:
1965 | if type(start) == type(None):
1966 | finish = int(end[i]*fps)
1967 | parts[bodyPart] = ((temp['x'][None:finish].values).astype(float),
1968 | (temp['y'][None:finish].values).astype(float),
1969 | (temp['likelihood'][None:finish].values).astype(float))
1970 | elif type(end) == type(None):
1971 | init = int(start[i]*fps)
1972 | parts[bodyPart] = ((temp['x'][init:None].values).astype(float),
1973 | (temp['y'][init:None].values).astype(float),
1974 | (temp['likelihood'][init:None].values).astype(float))
1975 | else:
1976 | init = int(start[i]*fps)
1977 | finish = int(end[i]*fps)
1978 | parts[bodyPart] = ((temp['x'][init:finish].values).astype(float),
1979 | (temp['y'][init:finish].values).astype(float),
1980 | (temp['likelihood'][init:finish].values).astype(float))
1981 |
1982 | animals_data[animal] = parts
1983 |
1984 | if data_type == '.csv':
1985 | header = [(data[i][0]+' '+data[i][1]) for i in data.columns]
1986 | data.columns = header
1987 | if type(animals) == type(None):
1988 | animals = list(set(data.iloc[0][1:]))
1989 | if type(bodyParts) == type(None):
1990 | bodyParts = list(set(data.iloc[1][1:]))
1991 |
1992 | for i,animal in enumerate(animals):
1993 | parts = {}
1994 | for ç,bodyPart in enumerate(bodyParts):
1995 | if type(start) == type(None) and type(end) == type(None):
1996 | parts[bodyPart] = ((data[animal+' '+bodyPart].iloc[3:,0]).values.astype(float),
1997 | (data[animal+' '+bodyPart].iloc[3:,1]).values.astype(float),
1998 | (data[animal+' '+bodyPart].iloc[3:,2]).values.astype(float))
1999 | else:
2000 | if type(start) == type(None):
2001 | finish = int(end[i]*fps)
2002 | parts[bodyPart] = ((data[animal+' '+bodyPart].iloc[3:,0][None:finish]).values.astype(float),
2003 | (data[animal+' '+bodyPart].iloc[3:,1][None:finish]).values.astype(float),
2004 | (data[animal+' '+bodyPart].iloc[3:,2][None:finish]).values.astype(float))
2005 | elif type(end) == type(None):
2006 | init = int(start[i]*fps)
2007 | parts[bodyPart] = ((data[animal+' '+bodyPart].iloc[3:,0][init:None]).values.astype(float),
2008 | (data[animal+' '+bodyPart].iloc[3:,1][init:None]).values.astype(float),
2009 | (data[animal+' '+bodyPart].iloc[3:,2][init:None]).values.astype(float))
2010 | else:
2011 | init = int(start[i]*fps)
2012 | finish = int(end[i]*fps)
2013 | parts[bodyPart] = ((data[animal+' '+bodyPart].iloc[3:,0][init:finish]).values.astype(float),
2014 | (data[animal+' '+bodyPart].iloc[3:,1][init:finish]).values.astype(float),
2015 | (data[animal+' '+bodyPart].iloc[3:,2][init:finish]).values.astype(float))
2016 |
2017 | animals_data[animal] = parts
2018 |
2019 | return animals_data
2020 |
2021 | def multi2single(data,animal,data_type = '.h5',**kwargs):
2022 | """
2023 | This function is used to remove information from a single animal from the
2024 | h5/hdf file of a DLC multi-animal analysis. The main purpose of this
2025 | function is to facilitate data analysis, by returning a DataFrame that can
2026 | be used as input in all PyRAT functions, without the need for any adaptation.
2027 | WARNING: If you run this function and found 'KeyError: 0', just read the data
2028 | again (pd.read_csv(data)).
2029 |
2030 | Parameters
2031 | ----------
2032 | data : pandas DataFrame
2033 | The input tracking data in h5/hdf format (multi animal data).
2034 | data_type : str
2035 | Determine if the data format from DLC is in h5 or csv format.
2036 | animal : str
2037 | The key of the animal you want to extract from the hdf file.
2038 | The same name used to label the DLC.
2039 | drop: bool, optional
2040 | If true, will drop the NaN values in the DataFrame.
2041 | bodyPart : str, optional
2042 | Body part you want to plot the tracking.
2043 | start : int, optional
2044 | Moment of the video you want tracking to start, in seconds. If the variable
2045 | is empty (None), the entire video will be processed.
2046 | end : int, optional
2047 | Moment of the video you want tracking to end, in seconds. If the variable is
2048 | empty (None), the entire video will be processed.
2049 | fps : int
2050 | The recording frames per second.
2051 |
2052 | Returns
2053 | -------
2054 | out : DataFrame
2055 | The output of this function is a DataFrame with the data of the animal
2056 | passed in the input.
2057 |
2058 | See Also
2059 | --------
2060 | For more information and usage examples: https://github.com/pyratlib/pyrat
2061 |
2062 | Notes
2063 | -----
2064 | This function was developed based on DLC multianimal output."""
2065 |
2066 | import numpy as np
2067 | import pandas as pd
2068 | import pyratlib as rat
2069 |
2070 | animals = kwargs.get('animals')
2071 | bodyParts = kwargs.get('bodyParts')
2072 | start= kwargs.get('start')
2073 | end= kwargs.get('end')
2074 | fps= kwargs.get('fps')
2075 | drop= kwargs.get('drop')
2076 |
2077 | if type(drop) == type(None):
2078 | drop = False
2079 |
2080 | data = rat.splitMultiAnimal(data,
2081 | data_type=data_type,
2082 | animals=animals,
2083 | bodyParts=bodyParts,
2084 | start=start,
2085 | end=end,
2086 | fps=fps)
2087 |
2088 | parts = list(np.repeat(list(data[animal].keys()),3))
2089 | coord = ['x','y','likelihood']*len(list(data[animal].keys()))
2090 | header = ["{}.{}".format(animal, ç) for ç in range(len(parts))]
2091 |
2092 | df_header = pd.DataFrame([list([-1]+parts),
2093 | list([0]+coord)],
2094 | columns=['coords']+header)
2095 |
2096 | count = 0
2097 | temp_dict = {}
2098 |
2099 | for ç,part in enumerate(list(data[animal].keys())):
2100 | temp = {header[ç+count]: data[animal][part][0],
2101 | header[ç+1+count]: data[animal][part][1],
2102 | header[ç+2+count]: data[animal][part][2],}
2103 | count +=2
2104 | temp_dict.update(temp)
2105 |
2106 | df_temp = pd.DataFrame(temp_dict)
2107 |
2108 | df = pd.concat([df_header, df_temp], ignore_index=True)
2109 | df['coords'] = np.arange(0,len(df['coords']),1)
2110 |
2111 | if drop:
2112 | df = df.dropna()
2113 |
2114 | return df
2115 |
2116 | def distance_metrics(data, bodyparts_list,distance=28):
2117 | """
2118 | Returns the distance between the bodyparts.
2119 |
2120 | Parameters
2121 | ----------
2122 | data : pandas DataFrame
2123 | The input tracking data.
2124 | bodyparts_list : list
2125 | List with name of body parts.
2126 | distance : int
2127 | The linkage distance threshold above which, clusters will not be merged.
2128 |
2129 | Returns
2130 | -------
2131 | d : array
2132 | High dimension data.
2133 |
2134 | See Also
2135 | --------
2136 | For more information and usage examples: https://github.com/pyratlib/pyrat
2137 |
2138 | Notes
2139 | -----
2140 | This function was developed based on DLC outputs and is able to support
2141 | matplotlib configurations."""
2142 |
2143 | import numpy as np
2144 |
2145 | values = (data.iloc[2:,1:].values).astype(float)
2146 | lista1 = (data.iloc[0][1:].values +" - " + data.iloc[1][1:].values).tolist()
2147 | bodyparts = []
2148 | for i in range(len(bodyparts_list)):
2149 | bodyparts.append(np.concatenate(((values[:,lista1.index(bodyparts_list[i]+" - x")]).reshape(1,-1).T,(values[:,lista1.index(bodyparts_list[i]+" - y")]).reshape(1,-1).T), axis=1))
2150 | distances = []
2151 | for k in range(len(bodyparts[0])):
2152 | frame_distances = []
2153 | for i in range(len(bodyparts)):
2154 | distance_row = []
2155 | for j in range( len(bodyparts) ):
2156 | distance_row.append(np.linalg.norm(bodyparts[i][k] - bodyparts[j][k]))
2157 | frame_distances.append(distance_row)
2158 | distances.append(frame_distances)
2159 | distances2 = np.asarray(distances)
2160 | for i in range(len(bodyparts)):
2161 | for k in range(len(bodyparts)):
2162 | distances2[:, i, j] = distances2[:, i, j]/np.max(distances2[:, i, j])
2163 | dist = []
2164 | for i in range(distances2.shape[0]):
2165 | dist.append(distances2[i, np.triu_indices(len(bodyparts), k = 1)[0], np.triu_indices(len(bodyparts), k = 1)[1]])
2166 |
2167 | return dist
2168 |
2169 | def model_distance(dimensions = 2,distance=28,n_jobs=None,verbose=None, perplexity=None,learning_rate=None):
2170 | """
2171 | Returns an array with the cluster by frame, an array with the embedding data in low-dimensional
2172 | space and the clusterization model.
2173 |
2174 | Parameters
2175 | ----------
2176 | dimensions : int
2177 | Dimension of the embedded space.
2178 | distance : int
2179 | The linkage distance threshold above which, clusters will not be merged.
2180 | n_jobs : int, optional
2181 | The number of parallel jobs to run for neighbors search.
2182 | verbose : int, optional
2183 | Verbosity level.
2184 | perplexity : float, optional
2185 | The perplexity is related to the number of nearest neighbors that is used in other manifold learning algorithms. Larger datasets usually require a larger perplexity.
2186 | learning_rate : float, optional
2187 | t-SNE learning rate.
2188 |
2189 | Returns
2190 | -------
2191 | model : Obj
2192 | AgglomerativeClustering model.
2193 | embedding : Obj
2194 | TSNE embedding
2195 |
2196 | See Also
2197 | --------
2198 | For more information and usage examples: https://github.com/pyratlib/pyrat
2199 |
2200 | Notes
2201 | -----
2202 | This function was developed based on DLC outputs and is able to support
2203 | matplotlib configurations."""
2204 |
2205 | from sklearn.manifold import TSNE
2206 | from sklearn.cluster import AgglomerativeClustering
2207 |
2208 | model = AgglomerativeClustering(n_clusters=None,distance_threshold=distance)
2209 |
2210 | embedding = TSNE(n_components=dimensions,
2211 | n_jobs=n_jobs,
2212 | verbose=verbose,
2213 | perplexity=perplexity,
2214 | random_state = 42,
2215 | n_iter = 5000,
2216 | learning_rate =learning_rate,
2217 | init='pca',
2218 | early_exaggeration =12)
2219 |
2220 | return model, embedding
2221 |
2222 | def ClassifyBehaviorMultiVideos(data, bodyparts_list, dimensions = 2,distance=28, **kwargs):
2223 | """
2224 | Returns an array with the cluster by frame, an array with the embedding data in low-dimensional
2225 | space and the clusterization model.
2226 |
2227 | Parameters
2228 | ----------
2229 | data : dict with DataFrames
2230 | The input tracking data concatenated.
2231 | bodyparts_list : list
2232 | List with name of body parts.
2233 | dimensions : int
2234 | Dimension of the embedded space.
2235 | distance : int
2236 | The linkage distance threshold above which, clusters will not be merged.
2237 | n_jobs : int, optional
2238 | The number of parallel jobs to run for neighbors search.
2239 | verbose : int, optional
2240 | Verbosity level.
2241 | perplexity : float, optional
2242 | The perplexity is related to the number of nearest neighbors that is used in other manifold learning algorithms. Larger datasets usually require a larger perplexity.
2243 | learning_rate : float, optional
2244 | t-SNE learning rate.
2245 |
2246 | Returns
2247 | -------
2248 | cluster_df : df
2249 | Array with the cluster by frame/video.
2250 | cluster_coord : DataFrame
2251 | Embedding of the training data in low-dimensional space.
2252 | fitted_model : Obj
2253 | AgglomerativeClustering model.
2254 |
2255 | See Also
2256 | --------
2257 | For more information and usage examples: https://github.com/pyratlib/pyrat
2258 |
2259 | Notes
2260 | -----
2261 | This function was developed based on DLC outputs and is able to support
2262 | matplotlib configurations."""
2263 |
2264 | import numpy as np
2265 | import pandas as pd
2266 | import pyratlib as rat
2267 | from sklearn.preprocessing import StandardScaler
2268 |
2269 | n_jobs = kwargs.get('n_jobs')
2270 | verbose = kwargs.get('verbose')
2271 | perplexity = kwargs.get("perplexity")
2272 | learning_rate = kwargs.get("learning_rate")
2273 |
2274 | if type(n_jobs) == type(None):
2275 | n_jobs=-1
2276 | if type(verbose) == type(None):
2277 | verbose=0
2278 | if type(perplexity) == type(None):
2279 | perplexity = data[next(iter(data))].shape[0]//100
2280 | if type(learning_rate) == type(None):
2281 | learning_rate = (data[next(iter(data))].shape[0]//12)/4
2282 |
2283 | distancias = {}
2284 | dist_scaled = {}
2285 | cluster_labels = {}
2286 | distance_df = {}
2287 |
2288 | model,embedding = rat.model_distance(dimensions = dimensions,
2289 | distance=distance,
2290 | n_jobs=n_jobs,
2291 | verbose=verbose,
2292 | perplexity=perplexity,
2293 | learning_rate=learning_rate)
2294 |
2295 |
2296 | for i,video in enumerate(data):
2297 | dist_temp = np.asarray(rat.distance_metrics(data[video],
2298 | bodyparts_list=bodyparts_list,
2299 | distance=distance))
2300 | distancias[video] = dist_temp
2301 | dist_scaled[video] = StandardScaler().fit_transform(distancias[video])
2302 |
2303 |
2304 | dist_scaled_all = np.concatenate([dist_scaled[x] for x in dist_scaled], 0)
2305 | X_transformed = embedding.fit_transform(dist_scaled_all)
2306 | fitted_model = model.fit(dist_scaled_all)
2307 | cluster_labels_all = model.labels_
2308 |
2309 |
2310 | for i,video in enumerate(data):
2311 | if i == 0:
2312 | cluster_labels[video] = cluster_labels_all[0:dist_scaled[video].shape[0]]
2313 | index0 = dist_scaled[video].shape[0]
2314 | else:
2315 | cluster_labels[video] = cluster_labels_all[index0:(index0+dist_scaled[video].shape[0])]
2316 | index0 = index0+dist_scaled[video].shape[0]
2317 |
2318 |
2319 | cluster_coord = pd.DataFrame.from_dict({ 'x_n_samples':X_transformed[:,0],'y_n_components':X_transformed[:,1] })
2320 | distance_df = pd.DataFrame.from_dict({'distance '+str(dist): dist_scaled_all[:,dist] for dist in range(dist_scaled_all.shape[1])})
2321 | cluster_coord[distance_df.columns] = distance_df
2322 | cluster_df = pd.DataFrame.from_dict(cluster_labels)
2323 |
2324 |
2325 | return cluster_df, cluster_coord ,fitted_model
2326 |
2327 | def dendrogram(model, **kwargs):
2328 | from scipy.cluster.hierarchy import dendrogram
2329 |
2330 | counts = np.zeros(model.children_.shape[0])
2331 | n_samples = len(model.labels_)
2332 |
2333 | for i, merge in enumerate(model.children_):
2334 | current_count = 0
2335 | for child_idx in merge:
2336 | if child_idx < n_samples:
2337 | current_count += 1 # leaf node
2338 | else:
2339 | current_count += counts[child_idx - n_samples]
2340 | counts[i] = current_count
2341 |
2342 | linkage_matrix = np.column_stack([model.children_, model.distances_, counts]).astype(float)
2343 |
2344 | dendrogram(linkage_matrix, truncate_mode="level", p=3,leaf_rotation=90, leaf_font_size=10)
2345 |
2346 | def ClassifyBehavior(data,video, bodyparts_list, dimensions = 2,distance=28,**kwargs):
2347 | """
2348 | Returns an array with the cluster by frame, an array with the embedding data in low-dimensional
2349 | space and the clusterization model.
2350 |
2351 | Parameters
2352 | ----------
2353 | data : pandas DataFrame
2354 | The input tracking data.
2355 | video : str
2356 | Video directory
2357 | bodyparts_list : list
2358 | List with name of body parts.
2359 | dimensions : int
2360 | Dimension of the embedded space.
2361 | distance : int
2362 | The linkage distance threshold above which, clusters will not be merged.
2363 | startIndex : int, optional
2364 | Initial index.
2365 | endIndex : int, optional
2366 | Last index.
2367 | n_jobs : int, optional
2368 | The number of parallel jobs to run for neighbors search.
2369 | verbose : int, optional
2370 | Verbosity level.
2371 | perplexity : float, optional
2372 | The perplexity is related to the number of nearest neighbors that is used in other manifold learning algorithms. Larger datasets usually require a larger perplexity.
2373 | learning_rate : float, optional
2374 | t-SNE learning rate.
2375 | directory : str, optional
2376 | Path where frame images will be saved.
2377 | return_metrics : bool, optional
2378 | Where True, returns t-SNE metrics, otherwise does not return t-SNE metrics.
2379 | knn_n_neighbors : int, optional
2380 | Number of neighbors to use by default for kneighbors queries in KNN metric.
2381 | knc_n_neighbors : int, optional
2382 | Number of neighbors to use by default for kneighbors queries in KNC metric.
2383 | n : int, optional
2384 | Number of N randomly chosen points in CPD metric.
2385 | Returns
2386 | -------
2387 | cluster_labels : array
2388 | Array with the cluster by frame.
2389 | X_transformed : array
2390 | Embedding of the training data in low-dimensional space.
2391 | model : Obj
2392 | AgglomerativeClustering model.
2393 | d : array
2394 | High dimension data.
2395 | knn : int, optional
2396 | The fraction of k-nearest neighbours in the original highdimensional data that are preserved as k-nearest neighbours in the embedding.
2397 | knc : int, optional
2398 | The fraction of k-nearest class means in the original data that are preserved as k-nearest class means in the embedding. This is computed for class means only and averaged across all classes.
2399 | cpd : Obj, optional
2400 | Spearman correlation between pairwise distances in the high-dimensional space and in the embedding.
2401 |
2402 | See Also
2403 | --------
2404 | For more information and usage examples: https://github.com/pyratlib/pyrat
2405 |
2406 | Notes
2407 | -----
2408 | This function was developed based on DLC outputs and is able to support
2409 | matplotlib configurations."""
2410 | from sklearn.manifold import TSNE
2411 | from sklearn.cluster import AgglomerativeClustering
2412 | from sklearn.preprocessing import StandardScaler
2413 | import os
2414 | import cv2
2415 | import matplotlib.pyplot as plt
2416 | from scipy.cluster.hierarchy import dendrogram
2417 | import numpy as np
2418 | from sklearn.neighbors import NearestNeighbors
2419 | from scipy import stats
2420 | startIndex = kwargs.get('startIndex')
2421 | endIndex = kwargs.get('endIndex')
2422 | n_jobs = kwargs.get('n_jobs')
2423 | verbose = kwargs.get('verbose')
2424 | perplexity = kwargs.get("perplexity")
2425 | learning_rate = kwargs.get("learning_rate")
2426 | directory = kwargs.get("directory")
2427 | return_metrics = kwargs.get("return_metrics")
2428 | knn_n_neighbors = kwargs.get("knn_n_neighbors")
2429 | knc_n_neighbors = kwargs.get("knc_n_neighbors")
2430 | n = kwargs.get("n")
2431 | k = 1
2432 | if type(startIndex) == type(None):
2433 | startIndex = 0
2434 | if type(endIndex) == type(None):
2435 | endIndex = data.shape[0]-3
2436 | if type(n_jobs) == type(None):
2437 | n_jobs=-1
2438 | if type(verbose) == type(None):
2439 | verbose=0
2440 | if type(perplexity) == type(None):
2441 | perplexity = data[startIndex:endIndex].shape[0]//100
2442 | if type(learning_rate) == type(None):
2443 | learning_rate = (data[startIndex:endIndex].shape[0]//12)/4
2444 | if type(directory) == type(None):
2445 | directory = os.getcwd()
2446 | if type(return_metrics) == type(None):
2447 | return_metrics == 0
2448 |
2449 | directory=directory+os.sep+"images"
2450 | try:
2451 | os.makedirs(directory)
2452 | except FileExistsError:
2453 | pass
2454 |
2455 | values = (data.iloc[2:,1:].values).astype(float)
2456 | lista1 = (data.iloc[0][1:].values +" - " + data.iloc[1][1:].values).tolist()
2457 | bodyparts = []
2458 | for i in range(len(bodyparts_list)):
2459 | bodyparts.append(np.concatenate(((values[:,lista1.index(bodyparts_list[i]+" - x")]).reshape(1,-1).T,(values[:,lista1.index(bodyparts_list[i]+" - y")]).reshape(1,-1).T), axis=1))
2460 | distances = []
2461 | for k in range(len(bodyparts[0])):
2462 | frame_distances = []
2463 | for i in range(len(bodyparts)):
2464 | distance_row = []
2465 | for j in range( len(bodyparts) ):
2466 | distance_row.append(np.linalg.norm(bodyparts[i][k] - bodyparts[j][k]))
2467 | frame_distances.append(distance_row)
2468 | distances.append(frame_distances)
2469 | distances2 = np.asarray(distances)
2470 | for i in range(len(bodyparts)):
2471 | for k in range(len(bodyparts)):
2472 | distances2[:, i, j] = distances2[:, i, j]/np.max(distances2[:, i, j])
2473 | d = []
2474 | for i in range(distances2.shape[0]):
2475 | d.append(distances2[i, np.triu_indices(len(bodyparts), k = 1)[0], np.triu_indices(len(bodyparts), k = 1)[1]])
2476 |
2477 | d = StandardScaler().fit_transform(d)
2478 | embedding = TSNE(n_components=dimensions, n_jobs=n_jobs, verbose=verbose, perplexity=perplexity, random_state = 42, n_iter = 5000, learning_rate=learning_rate, init = "pca", early_exaggeration = 12)
2479 | X_transformed = embedding.fit_transform(d[startIndex:endIndex])
2480 | model = AgglomerativeClustering(n_clusters=None,distance_threshold=distance)
2481 | model = model.fit(d[startIndex:endIndex])
2482 | cluster_labels = model.labels_
2483 | frames = data.scorer[2:].values.astype(int)
2484 | for i in np.unique(cluster_labels):
2485 | os.makedirs(directory+os.sep+"cluster"+ str(i))
2486 | vidcap = cv2.VideoCapture(video)
2487 | success,image = vidcap.read()
2488 | count = 0
2489 | position = (10,50)
2490 | ind = 0
2491 | while success:
2492 | if (np.isin(count, frames[startIndex:endIndex])):
2493 | a = cv2.imwrite(directory+os.sep+"cluster"+str(model.labels_[ind])+os.sep+"frame%d.jpg" % count, image)
2494 | ind = ind +1
2495 | success,image = vidcap.read()
2496 | count += 1
2497 | for i in np.unique(cluster_labels):
2498 | plt.bar(i, cluster_labels[cluster_labels==i].shape, color = "C0")
2499 | plt.xticks(np.arange(model.n_clusters_))
2500 | plt.xlabel("Clusters")
2501 | plt.ylabel("Frames")
2502 | plt.show()
2503 | counts = np.zeros(model.children_.shape[0])
2504 | n_samples = len(model.labels_)
2505 | for i, merge in enumerate(model.children_):
2506 | current_count = 0
2507 | for child_idx in merge:
2508 | if child_idx < n_samples:
2509 | current_count += 1 # leaf node
2510 | else:
2511 | current_count += counts[child_idx - n_samples]
2512 | counts[i] = current_count
2513 | linkage_matrix = np.column_stack(
2514 | [model.children_, model.distances_, counts]
2515 | ).astype(float)
2516 | plt.figure(figsize=(10,5))
2517 | plt.title("Hierarchical Clustering Dendrogram")
2518 | dendrogram(linkage_matrix, truncate_mode="level", p=3)
2519 | plt.xlabel("Number of points in node (or index of point if no parenthesis).")
2520 | plt.show()
2521 | fig, ax = plt.subplots(figsize=(10,10), dpi =80)
2522 | i = 0
2523 | color = plt.cm.get_cmap("rainbow", model.n_clusters_)
2524 | for x in range(model.n_clusters_):
2525 | sel = cluster_labels == x
2526 |
2527 | pontos = ax.scatter(X_transformed[sel,0], X_transformed[sel,1], label=str(x), s=1, color = color(i))
2528 | i = i+1
2529 | plt.legend()
2530 | plt.title('Clusters')
2531 | plt.show()
2532 | if return_metrics == 1:
2533 | if type(knn_n_neighbors) == type(None):
2534 | knn_n_neighbors = model.n_clusters_//2
2535 | if type(knc_n_neighbors) == type(None):
2536 | knc_n_neighbors = model.n_clusters_//2
2537 | if type(n) == type(None):
2538 | n = 1000
2539 | data_HDim = d[startIndex:endIndex]
2540 | data_emb = X_transformed
2541 | neigh = NearestNeighbors(n_neighbors=knn_n_neighbors)
2542 | neigh.fit(data_HDim)
2543 | neigh2 = NearestNeighbors(n_neighbors=knn_n_neighbors)
2544 | neigh2.fit(data_emb)
2545 | intersections = 0.0
2546 | for i in range(len(data_HDim)):
2547 | intersections += len(set(neigh.kneighbors(data_HDim, return_distance = False)[i]) & set(neigh2.kneighbors(data_emb, return_distance = False)[i]))
2548 | knn = intersections / len(data_HDim) / knn_n_neighbors
2549 | clusters = len(np.unique(cluster_labels))
2550 | clusters_HDim = np.zeros((clusters,data_HDim.shape[1]))
2551 | clusters_tsne = np.zeros((clusters,data_emb.shape[1]))
2552 | for i in np.unique(cluster_labels):
2553 | clusters_HDim[i,:] = np.mean(data_HDim[np.unique(cluster_labels, return_inverse=True)[1] == np.unique(cluster_labels, return_inverse=True)[0][i], :], axis = 0)
2554 | clusters_tsne[i,:] = np.mean(data_emb[np.unique(cluster_labels, return_inverse=True)[1] == np.unique(cluster_labels, return_inverse=True)[0][i], :], axis = 0)
2555 | neigh = NearestNeighbors(n_neighbors=knc_n_neighbors)
2556 | neigh.fit(clusters_HDim)
2557 | neigh2 = NearestNeighbors(n_neighbors=knc_n_neighbors)
2558 | neigh2.fit(clusters_tsne)
2559 | intersections = 0.0
2560 | for i in range(clusters):
2561 | intersections += len(set(neigh.kneighbors(clusters_HDim, return_distance = False)[i]) & set(neigh2.kneighbors(clusters_tsne, return_distance = False)[i]))
2562 | knc = intersections / clusters / knc_n_neighbors
2563 | dist_alto = np.zeros(n)
2564 | dist_tsne = np.zeros(n)
2565 | for i in range(n):
2566 | a = np.random.randint(0,len(data_HDim), size = 1)
2567 | b = np.random.randint(0,len(data_HDim), size = 1)
2568 | dist_alto[i] = np.linalg.norm(data_HDim[a] - data_HDim[b])
2569 | dist_tsne[i] = np.linalg.norm(data_emb[a] - data_emb[b])
2570 | cpd = stats.spearmanr(dist_alto, dist_tsne)
2571 | return cluster_labels, data_emb, model, data_HDim, knn, knc, cpd
2572 | else:
2573 | return cluster_labels, X_transformed, model, d[startIndex:endIndex]
2574 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | description-file = README.md
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import wheel
2 | import setuptools
3 |
4 | with open("README.md", "r") as fh:
5 | long_description = fh.read()
6 |
7 | setuptools.setup(
8 | name = 'pyratlib',
9 | packages = ['pyratlib'],
10 | version = '0.7.7',
11 | license='MIT',
12 | description = 'PyRat is a user friendly library in python to analyze data from the DeepLabCut. Developed to help researchers unfamiliar with programming can perform animal behavior analysis more simpler.', # Give a short description about your library
13 | long_description=long_description,
14 | long_description_content_type="text/markdown",
15 | url = 'https://github.com/pyratlib/pyrat',
16 | download_url = 'https://github.com/pyratlib/pyrat',
17 | keywords = ['Data analysis', 'Animal Behavior', 'Electrophysiology', 'Tracking', 'DeepLabCut'],
18 | install_requires=[
19 | 'numpy',
20 | 'pandas',
21 | 'neo',
22 | 'scikit-learn',
23 | 'wheel'
24 | ],
25 | classifiers=[
26 | 'Development Status :: 4 - Beta',
27 | 'Intended Audience :: Developers',
28 | 'Topic :: Software Development :: Build Tools',
29 | 'License :: OSI Approved :: MIT License',
30 | 'Programming Language :: Python :: 3',
31 | 'Programming Language :: Python :: 3.4',
32 | 'Programming Language :: Python :: 3.5',
33 | 'Programming Language :: Python :: 3.6',
34 | ],
35 | )
--------------------------------------------------------------------------------