├── .all-contributorsrc ├── .gitattributes ├── .gitignore ├── LICENSE ├── Magnum.ipynb ├── Magnum.py ├── Magnum_Lite_Official.ipynb ├── README.md ├── Training_Example ├── readme.md ├── testing_text │ ├── 1.txt │ └── 2..txt ├── text2latex │ ├── ReadMe.md │ ├── latex │ │ └── latex.txt │ └── sources │ │ └── source.txt └── text2py │ ├── ReadMe.md │ ├── python │ └── targets.txt │ └── sources │ └── source.txt ├── api ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-38.pyc │ ├── demo_web_app.cpython-38.pyc │ ├── gpt.cpython-38.pyc │ └── ui_config.cpython-38.pyc └── gpt.py ├── api_keys ├── appid ├── openai └── readme.md ├── app ├── apiInterface.py ├── apiWrapper.py ├── docMaker.py ├── latex2Manim.py ├── steps.txt └── textToManim.py ├── assets └── water_mark.png └── requirements.txt /.all-contributorsrc: -------------------------------------------------------------------------------- 1 | { 2 | "files": [ 3 | "README.md" 4 | ], 5 | "imageSize": 100, 6 | "commit": false, 7 | "contributors": [ 8 | { 9 | "login": "Shreenabh664", 10 | "name": "Shreenabh Agrawal", 11 | "avatar_url": "https://avatars3.githubusercontent.com/u/62369422?v=4", 12 | "profile": "http://shreenabh.com", 13 | "contributions": [ 14 | "bug", 15 | "code", 16 | "content", 17 | "doc", 18 | "design", 19 | "projectManagement", 20 | "test" 21 | ] 22 | }, 23 | { 24 | "login": "mrselukar", 25 | "name": "Mayur Selukar", 26 | "avatar_url": "https://avatars1.githubusercontent.com/u/35305019?v=4", 27 | "profile": "https://mrselukar.github.io", 28 | "contributions": [ 29 | "code", 30 | "bug", 31 | "doc" 32 | ] 33 | }, 34 | { 35 | "login": "Nanu00", 36 | "name": "Shantanu Deshmukh", 37 | "avatar_url": "https://avatars3.githubusercontent.com/u/66585423?v=4", 38 | "profile": "https://github.com/Nanu00", 39 | "contributions": [ 40 | "code", 41 | "bug", 42 | "test" 43 | ] 44 | }, 45 | { 46 | "login": "kingekartik", 47 | "name": "Kartik Kinge", 48 | "avatar_url": "https://avatars3.githubusercontent.com/u/26987169?v=4", 49 | "profile": "http://www.arvidhya.com", 50 | "contributions": [ 51 | "ideas" 52 | ] 53 | } 54 | ], 55 | "contributorsPerLine": 7, 56 | "projectName": "Magnum", 57 | "projectOwner": "Magnum-Math", 58 | "repoType": "github", 59 | "repoHost": "https://github.com", 60 | "skipCi": true 61 | } 62 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | *.cfg 3 | *venv* 4 | 5 | *pycache* 6 | *.ipynb_checkpoints* 7 | 8 | # dependencies 9 | /node_modules 10 | /.pnp 11 | .pnp.js 12 | 13 | # testing 14 | /coverage 15 | 16 | # production 17 | /build 18 | /media 19 | /Animations 20 | # misc 21 | .DS_Store 22 | .env.local 23 | .env.development.local 24 | .env.test.local 25 | .env.production.local 26 | .env 27 | 28 | npm-debug.log* 29 | yarn-debug.log* 30 | yarn-error.log* 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Shreya Shankar 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Magnum.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Convert your Math Question to Animated solution\n", 8 | "This is the main notebook for the Magnum Project \n", 9 | "The installation instructions for all the dependencies are in the readme at our [github repository](https://github.com/GPT-3-Manim/AI-Math-Animator-GPT3) \n", 10 | "\n", 11 | "### Using Custom Priming Data\n", 12 | "We have provided you with the basic priming data for the text to manim GPT model. \n", 13 | "The Latex conversion is slightly non standard as the text is interperetd in tex so to introduct spacing we have to inserte a \" / \". \n", 14 | "\n", 15 | "If you wish to provide your own examples for priming you can edit the files in the Training_Examples directoriy. \n", 16 | "\n", 17 | "### A note if you are using non standard latex packages \n", 18 | "We use Manim to animate the solution from wolfram follow the instructions at [manim github page](https://github.com/3b1b/manim) to get manim up and running \n", 19 | "\n", 20 | "If your latex code uses non-standard or additional packages you will need the manim source code and not the pip version \n", 21 | "\n", 22 | "Again the instructions to install the required version are given on [manim github page](https://github.com/3b1b/manim) or you can follow [the manim docs here](https://readthedocs.org/projects/manim/downloads/pdf/latest/)\n", 23 | "\n", 24 | "For non standard latex packages follow [this amazing video](https://www.youtube.com/watch?v=VPYmZWTjHoU)\n", 25 | "\n", 26 | "### Rendering options \n", 27 | "Manim provides you with a full array of rendering options from setting aspect ratios to resoultion and framerate. \n", 28 | "\n", 29 | "Follow the [video here to get insight on all the options](https://www.youtube.com/watch?v=d_2V5mC2hx0)" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": 1, 35 | "metadata": {}, 36 | "outputs": [ 37 | { 38 | "name": "stdout", 39 | "output_type": "stream", 40 | "text": [ 41 | "/home/mrselukar/manim/gpt-sandbox-cloned\n" 42 | ] 43 | } 44 | ], 45 | "source": [ 46 | "import openai\n", 47 | "import os \n", 48 | "from pathlib import Path\n", 49 | "data_folder = Path(os.getcwd())\n", 50 | "openai.api_key = open(data_folder / 'api_keys/openai').readline().rstrip('\\n')\n", 51 | "print(data_folder)" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 2, 57 | "metadata": {}, 58 | "outputs": [ 59 | { 60 | "name": "stdout", 61 | "output_type": "stream", 62 | "text": [ 63 | "Enter Input Question\n", 64 | "x^2-2x-6=0\n", 65 | "Would you like to print intermediate code results? yes/no\n", 66 | "yes\n", 67 | "Query Received is x^2 - 2 x - 6 = 0\n", 68 | "Solution Generated\n" 69 | ] 70 | } 71 | ], 72 | "source": [ 73 | "from app import apiWrapper\n", 74 | "print(\"Enter Input Question\")\n", 75 | "qry = input()\n", 76 | "qry += \" \"\n", 77 | "while qry.isspace():\n", 78 | " qry = input(\"Enter Input Question\")\n", 79 | "\n", 80 | "print(\"Would you like to print intermediate code results? yes/no\")\n", 81 | "selection = input()\n", 82 | "while selection not in [\"yes\", \"no\"]:\n", 83 | " selection = input()\n", 84 | " print(\"Would you like to print intermediate code results? yes/no\")\n", 85 | "\n", 86 | "apiWrapper.getUsrQues(qry)\n", 87 | "RAW_TEXT, Query = apiWrapper.callApi()\n", 88 | "Query = Query.replace(\"|\",\"\")\n", 89 | "print(\"Query Received is \", Query)\n", 90 | "print(\"Solution Generated\")" 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": 3, 96 | "metadata": {}, 97 | "outputs": [ 98 | { 99 | "name": "stdout", 100 | "output_type": "stream", 101 | "text": [ 102 | "Solve for x over the real numbers:\n", 103 | "x^2 - 2 x - 6 = 0\n", 104 | "Add 6 to both sides:\n", 105 | "x^2 - 2 x = 6\n", 106 | "Add 1 to both sides:\n", 107 | "x^2 - 2 x + 1 = 7\n", 108 | "Write the left hand side as a square:\n", 109 | "(x - 1)^2 = 7\n", 110 | "Take the square root of both sides:\n", 111 | "x - 1 = sqrt(7) or x - 1 = -sqrt(7)\n", 112 | "Add 1 to both sides:\n", 113 | "x = 1 + sqrt(7) or x - 1 = -sqrt(7)\n", 114 | "Add 1 to both sides:\n", 115 | "Answer: x = 1 + sqrt(7) or x = 1 - sqrt(7)\n", 116 | "\n" 117 | ] 118 | } 119 | ], 120 | "source": [ 121 | "if selection == \"yes\":\n", 122 | " for line in RAW_TEXT:\n", 123 | " print(line)" 124 | ] 125 | }, 126 | { 127 | "cell_type": "code", 128 | "execution_count": 4, 129 | "metadata": {}, 130 | "outputs": [], 131 | "source": [ 132 | "import os\n", 133 | "import sys\n", 134 | "sys.path.append(os.getcwd())\n", 135 | "\n", 136 | "from api import GPT, Example\n", 137 | "from glob import glob\n", 138 | "def read_file(path_to_file):\n", 139 | " retval = \"\"\n", 140 | " file = open(path_to_file)\n", 141 | " retval = file.readlines()\n", 142 | " file.close()\n", 143 | " #Make sure the new line character is not read it throws the model off \n", 144 | " retval = [x.split(\"/n\")[0][:-1] for x in retval]\n", 145 | " return retval\n", 146 | "\n", 147 | "\n", 148 | "# Construct GPT object and show some examples\n", 149 | "gpt = GPT(engine=\"davinci\",\n", 150 | " temperature=0.01,\n", 151 | " max_tokens=150)\n", 152 | "\n", 153 | "\n", 154 | "# reade file and convert it to source string and target string tuples\n", 155 | "source_names = [item for item in sorted(glob(str(data_folder / \"Training_Example/text2latex/sources/*\")))]\n", 156 | "target_names = [item for item in sorted(glob( str(data_folder / \"Training_Example/text2latex/latex/*\")))]\n", 157 | "\n", 158 | "\n", 159 | "# open each file in the Training_Example directory\n", 160 | "for src_path, target_path in zip(source_names,target_names):\n", 161 | " \n", 162 | " # For each files read the RAW and corrosponding Latex Code\n", 163 | " src_RAW = read_file(src_path)\n", 164 | " target_RAW = read_file(target_path)\n", 165 | " \n", 166 | " # for each pair of RAW and latex prime the GPT model\n", 167 | " if len(src_RAW) != len(target_RAW):\n", 168 | " raise Exception(\"Source and Latex have mismached number of line {} {} in file {} and {}\".format(str(len(src_RAW)), str(len(target_RAW)),src_path,target_path))\n", 169 | "\n", 170 | " for s_RAW, t_RAW in zip(src_RAW,target_RAW):\n", 171 | " gpt.add_example(Example(s_RAW,t_RAW))\n", 172 | " # Uncomment the following if you would like to see the priming examples\n", 173 | " #print(\"Source: \", s_RAW)\n", 174 | " #print(\"Output: \", t_RAW)\n", 175 | " #print(\"----\")\n", 176 | " \n" 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": 5, 182 | "metadata": {}, 183 | "outputs": [ 184 | { 185 | "name": "stdout", 186 | "output_type": "stream", 187 | "text": [ 188 | "\n" 189 | ] 190 | } 191 | ], 192 | "source": [ 193 | "# Construct GPT object and show some examples\n", 194 | "gpt_py = GPT(engine=\"davinci\",\n", 195 | " temperature=0.01,\n", 196 | " max_tokens=100)\n", 197 | "\n", 198 | "\n", 199 | "# reade file and convert it to source string and target string tuples\n", 200 | "source_names = [item for item in sorted(glob( str(data_folder / \"Training_Example/text2py/sources/*\")))]\n", 201 | "target_names = [item for item in sorted(glob( str(data_folder / \"Training_Example/text2py/python/*\")))]\n", 202 | "\n", 203 | "\n", 204 | "# open each file in the Training_Example directory\n", 205 | "for src_path, target_path in zip(source_names,target_names):\n", 206 | " \n", 207 | " # For each files read the RAW and corrosponding Latex Code\n", 208 | " src_RAW = read_file(src_path)\n", 209 | " target_RAW = read_file(target_path)\n", 210 | " \n", 211 | " # for each pair of RAW and latex prime the GPT model\n", 212 | " if len(src_RAW) != len(target_RAW):\n", 213 | " raise Exception(\"Source and Latex have mismached number of line {} {} in file {} and {}\".format(str(len(src_RAW)), str(len(target_RAW)),src_path,target_path))\n", 214 | "\n", 215 | " for s_RAW, t_RAW in zip(src_RAW,target_RAW):\n", 216 | " gpt_py.add_example(Example(s_RAW,t_RAW))\n", 217 | " # Uncomment the following if you would like to see the priming examples\n", 218 | " #print(\"Source: \", s_RAW)\n", 219 | " #print(\"Output: \", t_RAW)\n", 220 | " #print(\"----\")\n", 221 | " \n", 222 | "print(\"\")" 223 | ] 224 | }, 225 | { 226 | "cell_type": "code", 227 | "execution_count": 6, 228 | "metadata": {}, 229 | "outputs": [ 230 | { 231 | "name": "stdout", 232 | "output_type": "stream", 233 | "text": [ 234 | "Attempting to convert input query to graphable python function\n", 235 | "Interpereted python function is x**2 - 2*x - 6\n", 236 | "\n", 237 | "\n" 238 | ] 239 | } 240 | ], 241 | "source": [ 242 | "# Converting RAW_TEXT Query to Python Function:\n", 243 | "print(\"Attempting to convert input query to graphable python function\")\n", 244 | "python_func = gpt_py.get_top_reply(Query)\n", 245 | "python_func = python_func[7:]\n", 246 | "python_func = python_func.split(\"/n\")[0]\n", 247 | "print(\"Interpereted python function is\", python_func)" 248 | ] 249 | }, 250 | { 251 | "cell_type": "code", 252 | "execution_count": 7, 253 | "metadata": {}, 254 | "outputs": [ 255 | { 256 | "name": "stdout", 257 | "output_type": "stream", 258 | "text": [ 259 | "Fetching the intermediate LateX code from OpenAI GPT3 API\n" 260 | ] 261 | }, 262 | { 263 | "data": { 264 | "application/vnd.jupyter.widget-view+json": { 265 | "model_id": "e57ec4be438b45b9aebc54d90bda4e9b", 266 | "version_major": 2, 267 | "version_minor": 0 268 | }, 269 | "text/plain": [ 270 | "HBox(children=(FloatProgress(value=0.0, max=15.0), HTML(value='')))" 271 | ] 272 | }, 273 | "metadata": {}, 274 | "output_type": "display_data" 275 | }, 276 | { 277 | "name": "stdout", 278 | "output_type": "stream", 279 | "text": [ 280 | "\n", 281 | "Intermediate LateX generated\n" 282 | ] 283 | } 284 | ], 285 | "source": [ 286 | "# Converting RAW_TEXT to Latex:\n", 287 | "from tqdm.auto import tqdm\n", 288 | "print(\"Fetching the intermediate LateX code from OpenAI GPT3 API\")\n", 289 | "response = []\n", 290 | "for i in tqdm(range(len(RAW_TEXT))) :\n", 291 | " line = RAW_TEXT[i]\n", 292 | " t = gpt.get_top_reply(line)\n", 293 | " response.append(t)\n", 294 | "print(\"Intermediate LateX generated\")" 295 | ] 296 | }, 297 | { 298 | "cell_type": "code", 299 | "execution_count": 8, 300 | "metadata": {}, 301 | "outputs": [], 302 | "source": [ 303 | "latex_code = []\n", 304 | "for line in response:\n", 305 | " text = line.split(\"\\n\")[0][7:]\n", 306 | " if text.isspace() or text == \"\":\n", 307 | " continue\n", 308 | " else:\n", 309 | " latex_code.append(text +\"\\n\")" 310 | ] 311 | }, 312 | { 313 | "cell_type": "code", 314 | "execution_count": 9, 315 | "metadata": {}, 316 | "outputs": [], 317 | "source": [ 318 | "f = open('./latex.txt','w')\n", 319 | "for i in range(len(latex_code)):\n", 320 | " f.write(latex_code[i])\n", 321 | "f.close()" 322 | ] 323 | }, 324 | { 325 | "cell_type": "code", 326 | "execution_count": 10, 327 | "metadata": {}, 328 | "outputs": [ 329 | { 330 | "name": "stdout", 331 | "output_type": "stream", 332 | "text": [ 333 | " Solve \\ for \\ x \\ over \\ the \\ real \\ numbers:\n", 334 | " x^{2} - 2 x - 6 = 0\n", 335 | " Add 6 \\ to \\ both \\ sides:\n", 336 | " x^{2} - 2 x = 6\n", 337 | " Add 1 \\ to \\ both \\ sides:\n", 338 | " x^{2} - 2 x + 1 = 7\n", 339 | " Write \\ the \\ left \\ hand \\ side \\ as \\ a \\ square:\n", 340 | " (x - 1)^{2} = 7\n", 341 | " Take \\ the \\ square \\ root \\ of \\ both \\ sides:\n", 342 | " x - 1 = \\sqrt{7} \\ or \\ x - 1 = -\\sqrt{7}\n", 343 | " Add 1 \\ to \\ both \\ sides:\n", 344 | " x = 1 + \\sqrt{7} \\ or \\ x - 1 = -\\sqrt{7}\n", 345 | " Add 1 \\ to \\ both \\ sides:\n", 346 | " Answer: x = 1 + \\sqrt{7} \\ or \\ x = 1 - \\sqrt{7}\n" 347 | ] 348 | } 349 | ], 350 | "source": [ 351 | "if selection == \"yes\":\n", 352 | " for line in latex_code:\n", 353 | " print(line, end=\"\")\n", 354 | " " 355 | ] 356 | }, 357 | { 358 | "cell_type": "code", 359 | "execution_count": 11, 360 | "metadata": {}, 361 | "outputs": [], 362 | "source": [ 363 | "python_func = python_func.split(\"\\n\")[0]" 364 | ] 365 | }, 366 | { 367 | "cell_type": "code", 368 | "execution_count": 12, 369 | "metadata": { 370 | "scrolled": false 371 | }, 372 | "outputs": [ 373 | { 374 | "name": "stdout", 375 | "output_type": "stream", 376 | "text": [ 377 | "Converting Latex to Maxnim Code\n", 378 | "Do you want to generate a graph for x**2 - 2*x - 6? yes/no\n", 379 | "yes\n", 380 | "from manimlib.imports import *\n", 381 | "from math import *\n", 382 | "class Solution(GraphScene):\n", 383 | "\tCONFIG = {\n", 384 | "\t\t'graph_origin': ORIGIN,\n", 385 | "\t\t'function_color': WHITE,\n", 386 | "\t\t'axes_color': BLUE,\n", 387 | "\t\t'x_min':\t-10,\n", 388 | "\t\t'x_max':\t10,\n", 389 | "\t\t'x_labeled_nums' :range(-10,10, 2),\n", 390 | "\t\t'y_min':\t-114,\n", 391 | "\t\t'y_max':\t114,\n", 392 | "\t\t'y_labeled_nums' :range(-114,114,23)}\n", 393 | "\n", 394 | "\tdef construct(self):\n", 395 | "\t\twatermark = ImageMobject(\"./assets/water_mark.png\",opacity=0.7)\n", 396 | "\t\twatermark.scale(1.5)\n", 397 | "\t\twatermark.to_corner(DOWN+RIGHT, buff=0)\n", 398 | "\t\tself.play(FadeIn(watermark))\n", 399 | "\t\tSolve = TexMobject(r\" Solve \\ for \\ x \\ over \\ the \\ real \\ numbers:\" )\n", 400 | "\t\tSolve.to_edge(UP)\n", 401 | "\t\tself.play(Write(Solve))\n", 402 | "\t\talign_mark = TexMobject( r'abs', fill_opacity=0.00,height=0.5)\n", 403 | "\t\talign_mark.next_to(Solve,DOWN)\n", 404 | "\t\tself.wait(1)\n", 405 | "\t\tR0 = TexMobject(r\" x^{2} - 2 x - 6 = 0\" )\n", 406 | "\t\tif R0.get_height() > 1:\n", 407 | "\t\t\tR0.set_height(height=1,stretch=False)\n", 408 | "\t\tif R0.get_width() > 12:\n", 409 | "\t\t\tR0.set_width(width=12,stretch=False)\n", 410 | "\t\tR1 = TexMobject(r\" Add 6 \\ to \\ both \\ sides:\" )\n", 411 | "\t\tif R1.get_height() > 1:\n", 412 | "\t\t\tR1.set_height(height=1,stretch=False)\n", 413 | "\t\tif R1.get_width() > 12:\n", 414 | "\t\t\tR1.set_width(width=12,stretch=False)\n", 415 | "\t\tR2 = TexMobject(r\" x^{2} - 2 x = 6\" )\n", 416 | "\t\tif R2.get_height() > 1:\n", 417 | "\t\t\tR2.set_height(height=1,stretch=False)\n", 418 | "\t\tif R2.get_width() > 12:\n", 419 | "\t\t\tR2.set_width(width=12,stretch=False)\n", 420 | "\t\tR3 = TexMobject(r\" Add 1 \\ to \\ both \\ sides:\" )\n", 421 | "\t\tif R3.get_height() > 1:\n", 422 | "\t\t\tR3.set_height(height=1,stretch=False)\n", 423 | "\t\tif R3.get_width() > 12:\n", 424 | "\t\t\tR3.set_width(width=12,stretch=False)\n", 425 | "\t\tR4 = TexMobject(r\" x^{2} - 2 x + 1 = 7\" )\n", 426 | "\t\tif R4.get_height() > 1:\n", 427 | "\t\t\tR4.set_height(height=1,stretch=False)\n", 428 | "\t\tif R4.get_width() > 12:\n", 429 | "\t\t\tR4.set_width(width=12,stretch=False)\n", 430 | "\t\tR5 = TexMobject(r\" Write \\ the \\ left \\ hand \\ side \\ as \\ a \\ square:\" )\n", 431 | "\t\tif R5.get_height() > 1:\n", 432 | "\t\t\tR5.set_height(height=1,stretch=False)\n", 433 | "\t\tif R5.get_width() > 12:\n", 434 | "\t\t\tR5.set_width(width=12,stretch=False)\n", 435 | "\t\tR6 = TexMobject(r\" (x - 1)^{2} = 7\" )\n", 436 | "\t\tif R6.get_height() > 1:\n", 437 | "\t\t\tR6.set_height(height=1,stretch=False)\n", 438 | "\t\tif R6.get_width() > 12:\n", 439 | "\t\t\tR6.set_width(width=12,stretch=False)\n", 440 | "\t\tR7 = TexMobject(r\" Take \\ the \\ square \\ root \\ of \\ both \\ sides:\" )\n", 441 | "\t\tif R7.get_height() > 1:\n", 442 | "\t\t\tR7.set_height(height=1,stretch=False)\n", 443 | "\t\tif R7.get_width() > 12:\n", 444 | "\t\t\tR7.set_width(width=12,stretch=False)\n", 445 | "\t\tR8 = TexMobject(r\" x - 1 = \\sqrt{7} \\ or \\ x - 1 = -\\sqrt{7}\" )\n", 446 | "\t\tif R8.get_height() > 1:\n", 447 | "\t\t\tR8.set_height(height=1,stretch=False)\n", 448 | "\t\tif R8.get_width() > 12:\n", 449 | "\t\t\tR8.set_width(width=12,stretch=False)\n", 450 | "\t\tR9 = TexMobject(r\" Add 1 \\ to \\ both \\ sides:\" )\n", 451 | "\t\tif R9.get_height() > 1:\n", 452 | "\t\t\tR9.set_height(height=1,stretch=False)\n", 453 | "\t\tif R9.get_width() > 12:\n", 454 | "\t\t\tR9.set_width(width=12,stretch=False)\n", 455 | "\t\tR10 = TexMobject(r\" x = 1 + \\sqrt{7} \\ or \\ x - 1 = -\\sqrt{7}\" )\n", 456 | "\t\tif R10.get_height() > 1:\n", 457 | "\t\t\tR10.set_height(height=1,stretch=False)\n", 458 | "\t\tif R10.get_width() > 12:\n", 459 | "\t\t\tR10.set_width(width=12,stretch=False)\n", 460 | "\t\tR11 = TexMobject(r\" Add 1 \\ to \\ both \\ sides:\" )\n", 461 | "\t\tif R11.get_height() > 1:\n", 462 | "\t\t\tR11.set_height(height=1,stretch=False)\n", 463 | "\t\tif R11.get_width() > 12:\n", 464 | "\t\t\tR11.set_width(width=12,stretch=False)\n", 465 | "\t\tR12 = TexMobject(r\" Answer: x = 1 + \\sqrt{7} \\ or \\ x = 1 - \\sqrt{7}\" )\n", 466 | "\t\tif R12.get_height() > 1:\n", 467 | "\t\t\tR12.set_height(height=1,stretch=False)\n", 468 | "\t\tif R12.get_width() > 12:\n", 469 | "\t\t\tR12.set_width(width=12,stretch=False)\n", 470 | "\t\tR0.next_to(align_mark,DOWN)\n", 471 | "\t\tself.play(Write(R0))\n", 472 | "\t\tself.wait(1)\n", 473 | "\t\tR1.next_to(R0, DOWN)\n", 474 | "\t\tself.play(Write(R1))\n", 475 | "\t\tself.wait(1)\n", 476 | "\t\tR2.next_to(R1, DOWN)\n", 477 | "\t\tself.play(Write(R2))\n", 478 | "\t\tself.wait(1)\n", 479 | "\t\tself.play(FadeOut(R0))\n", 480 | "\t\tself.play(ApplyMethod(R1.next_to,align_mark,DOWN))\n", 481 | "\t\tself.play(ApplyMethod(R2.next_to,R1, DOWN))\n", 482 | "\t\tR3.next_to(R2, DOWN)\n", 483 | "\t\tself.play(Write(R3))\n", 484 | "\t\tself.play(FadeOut(R1))\n", 485 | "\t\tself.play(ApplyMethod(R2.next_to,align_mark,DOWN))\n", 486 | "\t\tself.play(ApplyMethod(R3.next_to,R2, DOWN))\n", 487 | "\t\tR4.next_to(R3, DOWN)\n", 488 | "\t\tself.play(Write(R4))\n", 489 | "\t\tself.play(FadeOut(R2))\n", 490 | "\t\tself.play(ApplyMethod(R3.next_to,align_mark,DOWN))\n", 491 | "\t\tself.play(ApplyMethod(R4.next_to,R3, DOWN))\n", 492 | "\t\tR5.next_to(R4, DOWN)\n", 493 | "\t\tself.play(Write(R5))\n", 494 | "\t\tself.play(FadeOut(R3))\n", 495 | "\t\tself.play(ApplyMethod(R4.next_to,align_mark,DOWN))\n", 496 | "\t\tself.play(ApplyMethod(R5.next_to,R4, DOWN))\n", 497 | "\t\tR6.next_to(R5, DOWN)\n", 498 | "\t\tself.play(Write(R6))\n", 499 | "\t\tself.play(FadeOut(R4))\n", 500 | "\t\tself.play(ApplyMethod(R5.next_to,align_mark,DOWN))\n", 501 | "\t\tself.play(ApplyMethod(R6.next_to,R5, DOWN))\n", 502 | "\t\tR7.next_to(R6, DOWN)\n", 503 | "\t\tself.play(Write(R7))\n", 504 | "\t\tself.play(FadeOut(R5))\n", 505 | "\t\tself.play(ApplyMethod(R6.next_to,align_mark,DOWN))\n", 506 | "\t\tself.play(ApplyMethod(R7.next_to,R6, DOWN))\n", 507 | "\t\tR8.next_to(R7, DOWN)\n", 508 | "\t\tself.play(Write(R8))\n", 509 | "\t\tself.play(FadeOut(R6))\n", 510 | "\t\tself.play(ApplyMethod(R7.next_to,align_mark,DOWN))\n", 511 | "\t\tself.play(ApplyMethod(R8.next_to,R7, DOWN))\n", 512 | "\t\tR9.next_to(R8, DOWN)\n", 513 | "\t\tself.play(Write(R9))\n", 514 | "\t\tself.play(FadeOut(R7))\n", 515 | "\t\tself.play(ApplyMethod(R8.next_to,align_mark,DOWN))\n", 516 | "\t\tself.play(ApplyMethod(R9.next_to,R8, DOWN))\n", 517 | "\t\tR10.next_to(R9, DOWN)\n", 518 | "\t\tself.play(Write(R10))\n", 519 | "\t\tself.play(FadeOut(R8))\n", 520 | "\t\tself.play(ApplyMethod(R9.next_to,align_mark,DOWN))\n", 521 | "\t\tself.play(ApplyMethod(R10.next_to,R9, DOWN))\n", 522 | "\t\tR11.next_to(R10, DOWN)\n", 523 | "\t\tself.play(Write(R11))\n", 524 | "\t\tself.play(FadeOut(R9))\n", 525 | "\t\tself.play(ApplyMethod(R10.next_to,align_mark,DOWN))\n", 526 | "\t\tself.play(ApplyMethod(R11.next_to,R10, DOWN))\n", 527 | "\t\tR12.next_to(R11, DOWN)\n", 528 | "\t\tself.play(Write(R12))\n", 529 | "\t\tself.play(FadeOut(R12))\n", 530 | "\t\tself.play(FadeOut(R11))\n", 531 | "\t\tself.play(FadeOut(R10))\n", 532 | "\t\tself.play(FadeOut(Solve))\n", 533 | "\t\tself.setup_axes(animate=True)\n", 534 | "\t\tfunc_graph = self.get_graph(self.func, self.function_color)\n", 535 | "\t\tself.play(ShowCreation(func_graph))\n", 536 | "\t\tself.wait(3)\n", 537 | "\t\tself.play(FadeOut(func_graph))\n", 538 | "\t\tself.play(FadeOut(self.axes))\n", 539 | "\t\tself.play(ApplyMethod(watermark.next_to,align_mark,DOWN))\n", 540 | "\t\tself.play(FadeOut(watermark))\n", 541 | "\n", 542 | "\tdef func(self, x):\n", 543 | "\t\tf = x**2 - 2*x - 6\n", 544 | "\t\treturn f\n", 545 | "Manim Code Generated\n" 546 | ] 547 | } 548 | ], 549 | "source": [ 550 | "from app import latex2Manim\n", 551 | "import importlib\n", 552 | "importlib.reload(latex2Manim)\n", 553 | "print(\"Converting Latex to Maxnim Code\")\n", 554 | "manim_code = latex2Manim.latex2Manim(latex_code, python_func ,python_func)\n", 555 | "if selection == \"yes\":\n", 556 | " print(manim_code)\n", 557 | "print(\"Manim Code Generated\")" 558 | ] 559 | }, 560 | { 561 | "cell_type": "code", 562 | "execution_count": 13, 563 | "metadata": {}, 564 | "outputs": [ 565 | { 566 | "name": "stdout", 567 | "output_type": "stream", 568 | "text": [ 569 | "Manim Code saved at /home/mrselukar/manim/gpt-sandbox-cloned/solution.py\n" 570 | ] 571 | } 572 | ], 573 | "source": [ 574 | "fptr = open(data_folder / \"solution.py\", \"w\") \n", 575 | "fptr.write(manim_code)\n", 576 | "fptr.close()\n", 577 | "print(\"Manim Code saved at {}/solution.py\".format(data_folder))" 578 | ] 579 | }, 580 | { 581 | "cell_type": "code", 582 | "execution_count": 14, 583 | "metadata": {}, 584 | "outputs": [ 585 | { 586 | "name": "stdout", 587 | "output_type": "stream", 588 | "text": [ 589 | "Media will be written to ./Animations/. You can change this behavior with the --media_dir flag.\n", 590 | " \n", 591 | "File ready at /home/mrselukar/manim/gpt-sandbox-cloned/Animations/videos/solution/480p15/Solution.mp4\n", 592 | "\n", 593 | "Played 60 animations\n" 594 | ] 595 | } 596 | ], 597 | "source": [ 598 | "# if you are rendering inside the notebook use the cell below\n", 599 | "!manim solution.py Solution -pl --media_dir \"./Animations\"" 600 | ] 601 | }, 602 | { 603 | "cell_type": "code", 604 | "execution_count": 15, 605 | "metadata": {}, 606 | "outputs": [ 607 | { 608 | "data": { 609 | "text/plain": [ 610 | "'\\nimport os\\nprint(\"Starting to Animate. Arguments for manim if any?\")\\nargs = input()\\nretval = os.system(\\'manim \\' + str(data_folder) + \\'/solution.py Solution \\' + args +\\' --media_dir \\' + str(data_folder) +\\'\"/Animations\"\\')\\nif retval == 0:\\n print(\"Animation Completed check ./Animations/video for output\")\\nelse:\\n print(\"Animation Error Check Manim Logs!!\")\\n'" 611 | ] 612 | }, 613 | "execution_count": 15, 614 | "metadata": {}, 615 | "output_type": "execute_result" 616 | } 617 | ], 618 | "source": [ 619 | "# code to make the magnum.py file\n", 620 | "\"\"\"\n", 621 | "import os\n", 622 | "print(\"Starting to Animate. Arguments for manim if any?\")\n", 623 | "args = input()\n", 624 | "retval = os.system('manim ' + str(data_folder) + '/solution.py Solution ' + args +' --media_dir ' + str(data_folder) +'\"/Animations\"')\n", 625 | "if retval == 0:\n", 626 | " print(\"Animation Completed check ./Animations/video for output\")\n", 627 | "else:\n", 628 | " print(\"Animation Error Check Manim Logs!!\")\n", 629 | "\"\"\"" 630 | ] 631 | } 632 | ], 633 | "metadata": { 634 | "kernelspec": { 635 | "display_name": "Python 3", 636 | "language": "python", 637 | "name": "python3" 638 | }, 639 | "language_info": { 640 | "codemirror_mode": { 641 | "name": "ipython", 642 | "version": 3 643 | }, 644 | "file_extension": ".py", 645 | "mimetype": "text/x-python", 646 | "name": "python", 647 | "nbconvert_exporter": "python", 648 | "pygments_lexer": "ipython3", 649 | "version": "3.8.3" 650 | }, 651 | "titlepage": { 652 | "author": "Mayur Selukar", 653 | "email": "mayur.selukar1@gmail.com", 654 | "linkedin": "https://www.linkedin.com/in/mayur-selukar/", 655 | "tagline": "Convert Latex code to beautiful animations", 656 | "title": "Latex2manim", 657 | "website": "https://mrselukar.github.io/" 658 | } 659 | }, 660 | "nbformat": 4, 661 | "nbformat_minor": 4 662 | } 663 | -------------------------------------------------------------------------------- /Magnum.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # ## Convert your Math Question to Animated solution 5 | # This is the main notebook for the Magnum Project 6 | # The installation instructions for all the dependencies are in the readme at our [github repository](https://github.com/GPT-3-Manim/AI-Math-Animator-GPT3) 7 | # 8 | # ### Using Custom Priming Data 9 | # We have provided you with the basic priming data for the text to manim GPT model. 10 | # The Latex conversion is slightly non standard as the text is interperetd in tex so to introduct spacing we have to inserte a " / ". 11 | # 12 | # If you wish to provide your own examples for priming you can edit the files in the Training_Examples directoriy. 13 | # 14 | # ### A note if you are using non standard latex packages 15 | # We use Manim to animate the solution from wolfram follow the instructions at [manim github page](https://github.com/3b1b/manim) to get manim up and running 16 | # 17 | # If your latex code uses non-standard or additional packages you will need the manim source code and not the pip version 18 | # 19 | # Again the instructions to install the required version are given on [manim github page](https://github.com/3b1b/manim) or you can follow [the manim docs here](https://readthedocs.org/projects/manim/downloads/pdf/latest/) 20 | # 21 | # For non standard latex packages follow [this amazing video](https://www.youtube.com/watch?v=VPYmZWTjHoU) 22 | # 23 | # ### Rendering options 24 | # Manim provides you with a full array of rendering options from setting aspect ratios to resoultion and framerate. 25 | # 26 | # Follow the [video here to get insight on all the options](https://www.youtube.com/watch?v=d_2V5mC2hx0) 27 | 28 | # In[1]: 29 | 30 | 31 | import openai 32 | import os 33 | from pathlib import Path 34 | data_folder = Path(os.getcwd()) 35 | openai.api_key = open(data_folder / 'api_keys/openai').readline().rstrip('\n') 36 | print(data_folder) 37 | 38 | 39 | # In[2]: 40 | 41 | 42 | from app import apiWrapper 43 | print("Enter Input Question") 44 | qry = input() 45 | qry += " " 46 | while qry.isspace(): 47 | qry = input("Enter Input Question") 48 | 49 | print("Would you like to print intermediate code results? yes/no") 50 | selection = input() 51 | while selection not in ["yes", "no"]: 52 | selection = input() 53 | print("Would you like to print intermediate code results? yes/no") 54 | 55 | apiWrapper.getUsrQues(qry) 56 | RAW_TEXT, Query = apiWrapper.callApi() 57 | Query = Query.replace("|","") 58 | print("Query Received is ", Query) 59 | print("Solution Generated") 60 | 61 | 62 | # In[3]: 63 | 64 | 65 | if selection == "yes": 66 | for line in RAW_TEXT: 67 | print(line) 68 | 69 | 70 | # In[4]: 71 | 72 | 73 | import os 74 | import sys 75 | sys.path.append(os.getcwd()) 76 | 77 | from api import GPT, Example 78 | from glob import glob 79 | def read_file(path_to_file): 80 | retval = "" 81 | file = open(path_to_file) 82 | retval = file.readlines() 83 | file.close() 84 | #Make sure the new line character is not read it throws the model off 85 | retval = [x.split("/n")[0][:-1] for x in retval] 86 | return retval 87 | 88 | 89 | # Construct GPT object and show some examples 90 | gpt = GPT(engine="davinci", 91 | temperature=0.01, 92 | max_tokens=150) 93 | 94 | 95 | # reade file and convert it to source string and target string tuples 96 | source_names = [item for item in sorted(glob(str(data_folder / "Training_Example/text2latex/sources/*")))] 97 | target_names = [item for item in sorted(glob( str(data_folder / "Training_Example/text2latex/latex/*")))] 98 | 99 | 100 | # open each file in the Training_Example directory 101 | for src_path, target_path in zip(source_names,target_names): 102 | 103 | # For each files read the RAW and corrosponding Latex Code 104 | src_RAW = read_file(src_path) 105 | target_RAW = read_file(target_path) 106 | 107 | # for each pair of RAW and latex prime the GPT model 108 | if len(src_RAW) != len(target_RAW): 109 | raise Exception("Source and Latex have mismached number of line {} {} in file {} and {}".format(str(len(src_RAW)), str(len(target_RAW)),src_path,target_path)) 110 | 111 | for s_RAW, t_RAW in zip(src_RAW,target_RAW): 112 | gpt.add_example(Example(s_RAW,t_RAW)) 113 | # Uncomment the following if you would like to see the priming examples 114 | #print("Source: ", s_RAW) 115 | #print("Output: ", t_RAW) 116 | #print("----") 117 | 118 | 119 | 120 | # In[5]: 121 | 122 | 123 | # Construct GPT object and show some examples 124 | gpt_py = GPT(engine="davinci", 125 | temperature=0.01, 126 | max_tokens=100) 127 | 128 | 129 | # reade file and convert it to source string and target string tuples 130 | source_names = [item for item in sorted(glob( str(data_folder / "Training_Example/text2py/sources/*")))] 131 | target_names = [item for item in sorted(glob( str(data_folder / "Training_Example/text2py/python/*")))] 132 | 133 | 134 | # open each file in the Training_Example directory 135 | for src_path, target_path in zip(source_names,target_names): 136 | 137 | # For each files read the RAW and corrosponding Latex Code 138 | src_RAW = read_file(src_path) 139 | target_RAW = read_file(target_path) 140 | 141 | # for each pair of RAW and latex prime the GPT model 142 | if len(src_RAW) != len(target_RAW): 143 | raise Exception("Source and Latex have mismached number of line {} {} in file {} and {}".format(str(len(src_RAW)), str(len(target_RAW)),src_path,target_path)) 144 | 145 | for s_RAW, t_RAW in zip(src_RAW,target_RAW): 146 | gpt_py.add_example(Example(s_RAW,t_RAW)) 147 | # Uncomment the following if you would like to see the priming examples 148 | #print("Source: ", s_RAW) 149 | #print("Output: ", t_RAW) 150 | #print("----") 151 | 152 | print("") 153 | 154 | 155 | # In[6]: 156 | 157 | 158 | # Converting RAW_TEXT Query to Python Function: 159 | print("Attempting to convert input query to graphable python function") 160 | python_func = gpt_py.get_top_reply(Query) 161 | python_func = python_func[7:] 162 | python_func = python_func.split("/n")[0] 163 | print("Interpereted python function is", python_func) 164 | 165 | 166 | # In[7]: 167 | 168 | 169 | # Converting RAW_TEXT to Latex: 170 | from tqdm.auto import tqdm 171 | print("Fetching the intermediate LateX code from OpenAI GPT3 API") 172 | response = [] 173 | for i in tqdm(range(len(RAW_TEXT))) : 174 | line = RAW_TEXT[i] 175 | t = gpt.get_top_reply(line) 176 | response.append(t) 177 | print("Intermediate LateX generated") 178 | 179 | 180 | # In[8]: 181 | 182 | 183 | latex_code = [] 184 | for line in response: 185 | text = line.split("\n")[0][7:] 186 | if text.isspace() or text == "": 187 | continue 188 | else: 189 | latex_code.append(text +"\n") 190 | 191 | 192 | # In[9]: 193 | 194 | 195 | f = open('./latex.txt','w') 196 | for i in range(len(latex_code)): 197 | f.write(latex_code[i]) 198 | f.close() 199 | 200 | 201 | # In[10]: 202 | 203 | 204 | if selection == "yes": 205 | for line in latex_code: 206 | print(line, end="") 207 | 208 | 209 | 210 | # In[11]: 211 | 212 | 213 | python_func = python_func.split("\n")[0] 214 | 215 | 216 | # In[12]: 217 | 218 | 219 | from app import latex2Manim 220 | import importlib 221 | importlib.reload(latex2Manim) 222 | print("Converting Latex to Maxnim Code") 223 | manim_code = latex2Manim.latex2Manim(latex_code, python_func ,python_func) 224 | if selection == "yes": 225 | print(manim_code) 226 | print("Manim Code Generated") 227 | 228 | 229 | # In[13]: 230 | 231 | 232 | fptr = open(data_folder / "solution.py", "w") 233 | fptr.write(manim_code) 234 | fptr.close() 235 | print("Manim Code saved at {}/solution.py".format(data_folder)) 236 | 237 | 238 | # In[16]: 239 | 240 | 241 | # if you are rendering inside the notebook use the cell below 242 | #get_ipython().system('manim solution.py Solution -pl --media_dir "./Animations"') 243 | 244 | 245 | # In[15]: 246 | 247 | 248 | # code to make the magnum.py file 249 | 250 | import os 251 | print("Starting to Animate. Arguments for manim if any?") 252 | args = input() 253 | retval = os.system('manim ' + str(data_folder) + '/solution.py Solution ' + args +' --media_dir ' + str(data_folder) +'"/Animations"') 254 | if retval == 0: 255 | print("Animation Completed check ./Animations/video for output") 256 | else: 257 | print("Animation Error Check Manim Logs!!") 258 | 259 | 260 | -------------------------------------------------------------------------------- /Magnum_Lite_Official.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "Magnum Lite Official.ipynb", 7 | "private_outputs": true, 8 | "provenance": [], 9 | "collapsed_sections": [], 10 | "toc_visible": true 11 | }, 12 | "kernelspec": { 13 | "name": "python3", 14 | "display_name": "Python 3" 15 | }, 16 | "accelerator": "GPU" 17 | }, 18 | "cells": [ 19 | { 20 | "cell_type": "markdown", 21 | "metadata": { 22 | "id": "VrR2S2F2PO7B" 23 | }, 24 | "source": [ 25 | "# Magnum Lite Official Playground\n", 26 | "\n", 27 | "\n", 28 | "\n", 29 | "\n", 30 | "\n", 31 | "\n", 32 | "\n", 33 | "\n", 34 | "---\n", 35 | "\n" 36 | ] 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "metadata": { 41 | "id": "EdAKBO2xPOkt" 42 | }, 43 | "source": [ 44 | "Welcome to the guided tutorial for Magnum Lite! This is the official playground where you can connect to a cloud instance and run Magnum right from your Browser!\n", 45 | "\n", 46 | "Before we begin to install Magnum here, please have a look at the [Magnum website](https://magnum.shreenabh.com) where you can find all related documentation and lot of information about Magnum. There is also a FULL version of Magnum (all information and comparison with Magnum Lite on the [Magnum website](https://magnum.shreenabh.com)'s \"[Getting Started](https://magnum.shreenabh.com/benefits/getting-started)\" page ).\n", 47 | "\n", 48 | "If you are here, we assume you have read the differences between Magnum and Magnum Lite and have chosen this to be the better option for you.\n", 49 | "\n", 50 | "---\n", 51 | "\n" 52 | ] 53 | }, 54 | { 55 | "cell_type": "markdown", 56 | "metadata": { 57 | "id": "NYxGIst4Sjm2" 58 | }, 59 | "source": [ 60 | "## Setting up and installation\n", 61 | "\n", 62 | "The aim of this section is to connect to a cloud instance, install all dependencies correctly and set up Magnum here.\n", 63 | " So let's get started!\n", 64 | "\n", 65 | "---\n", 66 | "\n", 67 | "#####**Heads up!**: If you are willing to generate a high quality render of your animation (1440p, 60 fps to be precise- defaults are 480p and 15 fps), you should switch to a GPU hardware accelerator right away before you execute any cell. You can do this on the top bar from Runtime > Change Runtime Type > Hardware Accelerator > GPU.\n", 68 | "\n", 69 | "---\n" 70 | ] 71 | }, 72 | { 73 | "cell_type": "markdown", 74 | "metadata": { 75 | "id": "wT-Lt952S_73" 76 | }, 77 | "source": [ 78 | "First of all, let's install the libraries. Just click on the square brackets (which turns to a play button) on the left side of each box to execute it.\n", 79 | "\n", 80 | "Please note that it will take sometime to start up because a cloud instance needs to be allocated. Also, this is a BIG install and your first code block, so just sit back and relax." 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "metadata": { 86 | "id": "Dl7IPV3ZS_B6" 87 | }, 88 | "source": [ 89 | "!sudo apt-get update\n", 90 | "!sudo apt-get install texlive-full\n", 91 | "!apt-get install pkg-config libcairo2-dev\n", 92 | "!pip3 install jupyter-manim" 93 | ], 94 | "execution_count": null, 95 | "outputs": [] 96 | }, 97 | { 98 | "cell_type": "markdown", 99 | "metadata": { 100 | "id": "Le5__ZJNVNp9" 101 | }, 102 | "source": [ 103 | "Now, if the status button has stopped spinning and you see a [1] on the left side of the above cell, we are good to go.\n", 104 | "\n", 105 | "The next cell will install \"Magnum\" from [our GitHub repository](https://github.com/Magnum-Math/Magnum). Just execute it.\n" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "metadata": { 111 | "id": "Du2DBLVKS6AE" 112 | }, 113 | "source": [ 114 | "!git clone https://github.com/Magnum-Math/Magnum.git\n", 115 | "!mv /content/Magnum/* /content/" 116 | ], 117 | "execution_count": null, 118 | "outputs": [] 119 | }, 120 | { 121 | "cell_type": "markdown", 122 | "metadata": { 123 | "id": "Zgpe6hp3XXpP" 124 | }, 125 | "source": [ 126 | "## The Animator\n", 127 | "\n", 128 | "Welcome to the next part of your Magnum experience. Now, Magnum and all related libraries are installed on your system. It is time to start animating your files now!" 129 | ] 130 | }, 131 | { 132 | "cell_type": "markdown", 133 | "metadata": { 134 | "id": "KRk3VLeEXq2K" 135 | }, 136 | "source": [ 137 | "\n", 138 | "\n", 139 | "---\n", 140 | "IMPORTANT: Please make sure you have your \"latex.txt\" file ready as per the specifications mentioned on the [Magnum Website's](https://magnum.shreenabh.com) \"[Getting Started](https://magnum.shreenabh.com/benefits/getting-started)\" page. If not, please format your files before hand if you don't want to play with some weird errors.\n", 141 | "\n", 142 | "---\n", 143 | "\n", 144 | "Click on the folder icon on the left panel. You will find a lot of files and folders already present here. Click on the upload files button on the top of the expanded panel to upload your \"latex.txt\" file.\n", 145 | "\n", 146 | "Once your file is uploaded, execute the below cell:\n", 147 | "\n", 148 | "---\n", 149 | "\n", 150 | "NOTE: You'll be asked some questions during execution. Answer them to keep the execution running." 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "metadata": { 156 | "id": "SFZsB10yZFTM" 157 | }, 158 | "source": [ 159 | "import os \n", 160 | "from pathlib import Path\n", 161 | "data_folder = Path(os.getcwd())\n", 162 | "print(data_folder)\n", 163 | "def read_file(path_to_file):\n", 164 | " retval = \"\"\n", 165 | " file = open(path_to_file)\n", 166 | " retval = file.readlines()\n", 167 | " file.close()\n", 168 | " #Make sure the new line character is not read it throws the model off \n", 169 | " retval = [x.split(\"/n\")[0][:-1] for x in retval]\n", 170 | " return retval\n", 171 | "\n", 172 | "\n", 173 | "#Change the ./latex_file.txt to your files location\n", 174 | "latex_code = read_file('/content/latex.txt')\n", 175 | "for i in range(len(latex_code)):\n", 176 | " latex_code[i] += \"\\n\"\n", 177 | "selection = input(\"Do you want to print intermediate code: (yes/no)\")" 178 | ], 179 | "execution_count": null, 180 | "outputs": [] 181 | }, 182 | { 183 | "cell_type": "markdown", 184 | "metadata": { 185 | "id": "AOF_Y7FMZ_hY" 186 | }, 187 | "source": [ 188 | "Confirm if you see a [3] before proceeding. If not, answer the question asked.\n", 189 | "\n", 190 | "Once this is done, it is time to identify if there are any graphable functions in your text. Magnum may/may not identify a function (if it exists). If you want to graph a custom function, please input the *Correct Python equivalent* of it. If you are not interested in it, just enter \"no\"" 191 | ] 192 | }, 193 | { 194 | "cell_type": "code", 195 | "metadata": { 196 | "id": "duQg6NhGanVa" 197 | }, 198 | "source": [ 199 | "from app import latex2Manim\n", 200 | "print(\"Converting Latex to Maxnim Code\")\n", 201 | "manim_code = latex2Manim.latex2Manim(latex_code)\n", 202 | "if selection == \"yes\":\n", 203 | " print(manim_code)\n", 204 | "print(\"Manim Code Generated\")" 205 | ], 206 | "execution_count": null, 207 | "outputs": [] 208 | }, 209 | { 210 | "cell_type": "markdown", 211 | "metadata": { 212 | "id": "7WjUuUFYav03" 213 | }, 214 | "source": [ 215 | "Yay! You've generated the Manim code! It is time to move it in the right place now. Keep executing:" 216 | ] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "metadata": { 221 | "id": "hEQ3H6d1avDs" 222 | }, 223 | "source": [ 224 | "fptr = open(\"./solution.py\", \"w\") \n", 225 | "fptr.write(manim_code)\n", 226 | "fptr.close()\n", 227 | "print(\"Manim Code saved at ./solution.py\")" 228 | ], 229 | "execution_count": null, 230 | "outputs": [] 231 | }, 232 | { 233 | "cell_type": "markdown", 234 | "metadata": { 235 | "id": "IigprGe6bKLY" 236 | }, 237 | "source": [ 238 | "Now is the time to render your animation! Everything is set from our side. If you face any errors while executing the next and final cell, it will be because you haven't formatted your LaTeX document \"Magnum style\". Have a look at the \"Getting Started\" docs on our [website](https://magnum.shreenabh.com/) again!\n", 239 | "\n", 240 | "---\n", 241 | "If everything goes fine, your file will be saved in a 480p resolution. Instructions to download/modify it later:\n" 242 | ] 243 | }, 244 | { 245 | "cell_type": "code", 246 | "metadata": { 247 | "id": "27IRc3KicPEQ" 248 | }, 249 | "source": [ 250 | "!manim solution.py Solution -pl" 251 | ], 252 | "execution_count": null, 253 | "outputs": [] 254 | }, 255 | { 256 | "cell_type": "markdown", 257 | "metadata": { 258 | "id": "SyvbflaacYRq" 259 | }, 260 | "source": [ 261 | "### Fixing bugs and injecting custom code (Optional)\n", 262 | "\n", 263 | "We won't blame you for the errors if any. Before you start searching for your video, here's how you can easily identify and fix bugs. You might see a message like: \n", 264 | "\n", 265 | "```\n", 266 | "Exception: Latex error converting to dvi. See log output above or the log file:\n", 267 | "```\n", 268 | "This is pretty normal and can be fixed easily. Just navigate to a file called \"solution.py\" from the files menu in the left panel. (The file will open to your right).\n", 269 | "\n", 270 | "\n", 271 | "[To inject custom code, the floor is yours. Feel free to add/modify anything in this file if you're confident and just run the above cell again to render it]\n", 272 | "\n", 273 | "Analyze the error message carefully and identify the line where the exact error is. Some common errors might be:\n", 274 | "1. Using \"\\\\:\" for spacing -> Use \"\\\\\" instead.\n", 275 | "2. Leaving a space in the end of a line. -> Just delete it.\n", 276 | "3. Using custom library notation. -> Please declare any extra libraries that you are using beforehand in the Manim configuration. (We've added a [special section](https://github.com/Magnum-Math/Magnum#a-note-if-you-are-using-non-standard-latex-packages) about this on our GitHub readme too)\n", 277 | "\n", 278 | "---" 279 | ] 280 | }, 281 | { 282 | "cell_type": "markdown", 283 | "metadata": { 284 | "id": "GKvuqSTGem7I" 285 | }, 286 | "source": [ 287 | "If there are no errors/ the errors are fixed, voila!\n", 288 | "\n", 289 | "Navigate to: media > videos > solution > 480p15 > Solution.mp4\n", 290 | "(from the left panel- in the files section) to grab your file!\n", 291 | "\n", 292 | "If you click on it, it will be downloaded to your local system.\n", 293 | "\n", 294 | "---\n", 295 | "### Exporting in High-Resolution (Optional)\n", 296 | "\n", 297 | "Run the code cell below to get a 1440p (60 fps) render of your file. (This might take sometime to process, please be patient)\n", 298 | "\n", 299 | "[The previous render was a low quality one 480p - 15 fps]" 300 | ] 301 | }, 302 | { 303 | "cell_type": "code", 304 | "metadata": { 305 | "id": "y4tUbW09du13" 306 | }, 307 | "source": [ 308 | "!manim solution.py Solution " 309 | ], 310 | "execution_count": null, 311 | "outputs": [] 312 | }, 313 | { 314 | "cell_type": "markdown", 315 | "metadata": { 316 | "id": "gI7PvZ59i8a-" 317 | }, 318 | "source": [ 319 | "For most cases, the normal quality will suffice but if you specially need the high quality file, it can be found here after the above cell has finished execution:\n", 320 | "\n", 321 | "media > videos > solution > 1440p60 > Solution.mp4\n", 322 | "\n", 323 | "---\n", 324 | "\n" 325 | ] 326 | }, 327 | { 328 | "cell_type": "markdown", 329 | "metadata": { 330 | "id": "eH2RoqW9jU70" 331 | }, 332 | "source": [ 333 | "# We are done!\n", 334 | "\n", 335 | "Hope you liked the overall Magnum experience. Feel free to reach out to us for custom inquiries via: magnum@shreenabh.com\n", 336 | "\n", 337 | "(If you had any rough experience/ had some errors, please raise a community issue on GitHub [here](https://github.com/Magnum-Math/Magnum))" 338 | ] 339 | } 340 | ] 341 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MAGNUM (AI Math Animator powered by GPT-3 and Wolfram Alpha) 2 | 3 | [](#contributors-) 4 | 5 | ## Visualize Beautiful Math solutions 6 |  7 | 8 | Initial release date: 15 August 2020 9 | 10 | Fork Credits: The [gpt3-sandbox project](https://github.com/shreyashankar/gpt3-sandbox) was taken as the starting point for this project. We would like to thank the authors of GPT-3-Sandbox for all their help. :) 11 | ## Description 12 | 13 | The goal of this project is to create an Open Source toolkit that makes Math animations effortless. Either from a plain English query or from formatted LaTeX! NO PYTHON CODING NEEDED. 14 | 15 | This project addresses the following issues: 16 | 17 | 1. Create an end to end package using Wolfram Alpha and GPT-3 to visualize Step-by-step Math soultions to user questions. 18 | 2. Create a standalone package which enables you to convert LaTeX into beautiful animations without any knowledge of Manim. 19 | 20 | For full docs and detailed information, have a look at our website: https://magnum.shreenabh.com/ . 21 | 22 | ## Magnum Lite 23 | 24 | Before we get to Setting up, we would like to point out that we have made a special NO-CODE + BROWSER-BASED (Cloud hosted) [Playground for Magnum Lite](https://colab.research.google.com/drive/1Vhyx39pztGeVthKrBZZRGVroEYMwh4T5?usp=sharing) (!). If you are not much into software and code, this might be a nice option for you. Have a look at the detailed comparison [here](https://magnum.shreenabh.com/benefits/getting-started). 25 | 26 | ## Setup 27 | 28 | First, clone or fork this repository. Then to set up your virtual environment, do the following: 29 | 30 | 1. Create a virtual environment in the root directory: `python -m venv $ENV_NAME` 31 | 2. Activate the virtual environment: ` source $ENV_NAME/bin/activate` (for MacOS, Unix, or Linux users) or ` .\ENV_NAME\Scripts\activate` (for Windows users) 32 | 3. Install requirements: `pip install -r requirements.txt` 33 | 4. To add your OpenAI Secret Key: Open the file called `openai` in the `api_keys` folder and add your Seceret Key there `$YOUR_SECRET_KEY`, where `$YOUR_SECRET_KEY` looks something like `'sk-somerandomcharacters'` (excluding quotes). If you are unsure what your secret key is, navigate to the [API docs](https://beta.openai.com/developer-quickstart) and copy the token displayed next to the "secret" key type. 34 | 5. To add your Wolfram Alpha AppID: open the file called `appid` int the `api_keys` folder and add your AppID there `$YOUR_SECRET_KEY`, where `$YOUR_SECRET_KEY` looks something like `'ZHR$%D-GET$%ASBF$'` (excluding quotes). 35 | 36 | (For detailed Setup information for the no code playground, check our website) 37 | 38 | 39 | ## Interactive Priming 40 | 41 | The real power of GPT-3 is in its ability to learn to specialize to tasks given a few examples. However, priming can at times be more of an art than a science. Using the GPT and Example classes, you can easily experiment with different priming examples and immediately see their GPT on GPT-3's performance. Below is an example showing it improve incrementally at translating English to LaTeX as we feed it more examples in the python interpreter: 42 | 43 | ``` 44 | >>> from api import GPT, Example, set_openai_key 45 | >>> gpt = GPT() 46 | >>> set_openai_key(key) 47 | >>> prompt = "integral from a to b of f of x" 48 | >>> print(gpt.get_top_reply(prompt)) 49 | output: integral from at to be of f of x 50 | 51 | >>> gpt.add_example(Example("Two plus two equals four", "2 + 2 = 4")) 52 | >>> print(gpt.get_top_reply(prompt)) 53 | output: 54 | 55 | >>> gpt.add_example(Example('The integral from zero to infinity', '\\int_0^{\\infty}')) 56 | >>> print(gpt.get_top_reply(prompt)) 57 | output: \int_a^b f(x) dx 58 | 59 | ``` 60 | ### Using Custom Priming Data 61 | We have provided you with the basic priming data for the text to Manim GPT-3 model. 62 | The LaTeX conversion is slightly non standard as the text is interperetd in TeX so to introduct spacing we have to insert a " / ". Another way of doing this is by explicitly declaring text as \text{Your Text Here}. 63 | 64 | If you wish to provide your own examples for priming you can edit the files in the "Training_Examples" directory. 65 | 66 | ### A note if you are using Non-Standard LaTeX packages 67 | We use Manim to animate the solution from Wolfram. Follow the instructions at [manim github page](https://github.com/3b1b/manim) to get Manim up and running. 68 | 69 | If your LaTeX code uses non-standard or additional packages you will need the manim source code and NOT the "pip" version . 70 | 71 | Again the instructions to install the required version are given on [Manim github page](https://github.com/3b1b/manim) or you can follow [the Manim docs here](https://readthedocs.org/projects/manim/downloads/pdf/latest/) 72 | 73 | For non standard LaTeX packages, follow [this amazing video](https://www.youtube.com/watch?v=VPYmZWTjHoU) 74 | 75 | ### Rendering options for manim 76 | Manim provides you with a full array of rendering options from setting aspect ratios to resoultion and framerate. 77 | 78 | Follow the [video here to get insight on all the options](https://www.youtube.com/watch?v=d_2V5mC2hx0) 79 | 80 | ## Contributions 81 | 82 | We actively encourage people to contribute by adding their own examples or even adding functionalities to the modules. Please make a pull request if you would like to add something, or create an issue if you have a question. We will update the contributors list on a regular basis. 83 | 84 | Please *do not* leave your secret key and/or AppID in plaintext in your pull request! 85 | 86 | ## Contributors ✨ 87 | 88 | Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)): 89 | 90 | 91 | 92 | 93 |
Shreenabh Agrawal 🐛 💻 🖋 📖 🎨 📆 ⚠️ |
96 | Mayur Selukar 💻 🐛 📖 🎨 ⚠️ |
97 | Nanu 💻 🐛 ⚠️ |
98 | Kartik Kinge 🤔 |
99 |