├── README.md ├── app.py └── Autograder.ipynb /README.md: -------------------------------------------------------------------------------- 1 | # Student-s-Autograder 2 | 3 | requires any python notebook with python 3.8+ 4 | 5 | Required libraries: 6 | - pandas 7 | - numpy 8 | - re 9 | - PyPDF2 10 | 11 | Steps to run: 12 | 1. Download your quiz or endterm response pdf on or after May'22 term. 13 | 2. Download your related question paper from link as provided in IITM portal. 14 | 3. All three files(Python notebook,response pdf, qa pdf) must be in same directory. 15 | 4. Write response pdf and question and answer pdf filename with pdf extension within "" at mentioned places. 16 | 5. Now execute all cells generate your required summarized report. 17 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import pandas as pd 3 | import numpy as np 4 | 5 | st.title('Uber pickups in NYC') 6 | 7 | DATE_COLUMN = 'date/time' 8 | DATA_URL = ('https://s3-us-west-2.amazonaws.com/' 9 | 'streamlit-demo-data/uber-raw-data-sep14.csv.gz') 10 | 11 | @st.cache 12 | def load_data(nrows): 13 | data = pd.read_csv(DATA_URL, nrows=nrows) 14 | lowercase = lambda x: str(x).lower() 15 | data.rename(lowercase, axis='columns', inplace=True) 16 | data[DATE_COLUMN] = pd.to_datetime(data[DATE_COLUMN]) 17 | return data 18 | 19 | data_load_state = st.text('Loading data...') 20 | data = load_data(10000) 21 | data_load_state.text("Done! (using st.cache)") 22 | 23 | if st.checkbox('Show raw data'): 24 | st.subheader('Raw data') 25 | st.write(data) 26 | 27 | st.subheader('Number of pickups by hour') 28 | hist_values = np.histogram(data[DATE_COLUMN].dt.hour, bins=24, range=(0,24))[0] 29 | st.bar_chart(hist_values) 30 | 31 | # Some number in the range 0-23 32 | hour_to_filter = st.slider('hour', 0, 23, 17) 33 | filtered_data = data[data[DATE_COLUMN].dt.hour == hour_to_filter] 34 | 35 | st.subheader('Map of all pickups at %s:00' % hour_to_filter) 36 | st.map(filtered_data) 37 | -------------------------------------------------------------------------------- /Autograder.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "id": "aCDpAf4EWQhO" 7 | }, 8 | "source": [ 9 | "## **Student's Autograder**\n", 10 | "This is a project to calculate quiz and term scores from q&a pdf and response pdf." 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": { 16 | "id": "bETNiv5nVhkv" 17 | }, 18 | "source": [ 19 | "Instructions:\n", 20 | "\n", 21 | "\n", 22 | "1. on\n", 23 | "2. List item\n", 24 | "\n" 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "metadata": { 30 | "id": "6V2_XMBIDFin" 31 | }, 32 | "source": [ 33 | "### Importing the required libraries " 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "source": [ 39 | "!pip install re\n", 40 | "!pip install pandas\n", 41 | "!pip install PyPDF2" 42 | ], 43 | "metadata": { 44 | "id": "hR256CwHVo3n" 45 | }, 46 | "execution_count": null, 47 | "outputs": [] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": null, 52 | "metadata": { 53 | "id": "AJWhrjFBs-wV", 54 | "tags": [] 55 | }, 56 | "outputs": [], 57 | "source": [ 58 | "import os\n", 59 | "import pandas as pd\n", 60 | "import numpy as np\n", 61 | "import re\n", 62 | "from PyPDF2 import PdfFileReader, PdfFileWriter\n", 63 | "from PyPDF2.generic import DecodedStreamObject, EncodedStreamObject,ArrayObject" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": null, 69 | "metadata": { 70 | "id": "utdx74G3Vhk4" 71 | }, 72 | "outputs": [], 73 | "source": [ 74 | "#current directory for reading files later.\n", 75 | "import os\n", 76 | "os.getcwd()" 77 | ] 78 | }, 79 | { 80 | "cell_type": "markdown", 81 | "metadata": { 82 | "id": "wE0xdNo6DSQE" 83 | }, 84 | "source": [ 85 | "### Reading pdfs" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "metadata": { 92 | "id": "tp-hWtBMVhk7" 93 | }, 94 | "outputs": [], 95 | "source": [ 96 | "# Enter response pdf name\n", 97 | "resp_pdf = \"POD22S1C21140309.pdf\"\n", 98 | "\n", 99 | "#POD22S1C11500062.pdf\n", 100 | "\n", 101 | "#Enter corresponding question and answer key pdf name\n", 102 | "qa_pdf = \"IIT M DIPLOMA FN2 EXAM ETD2.pdf\"\n", 103 | "\n", 104 | "#IIT M DAD DIPLOMA AN2 EXAM QPD1.pdf\n" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": null, 110 | "metadata": { 111 | "id": "dPLWR7pj20Be" 112 | }, 113 | "outputs": [], 114 | "source": [ 115 | "filename=qa_pdf" 116 | ] 117 | }, 118 | { 119 | "cell_type": "markdown", 120 | "metadata": { 121 | "id": "4E6nPj3HWpPu" 122 | }, 123 | "source": [ 124 | "Students response for exam dataframe" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": null, 130 | "metadata": { 131 | "id": "NnC5kExD9z2P" 132 | }, 133 | "outputs": [], 134 | "source": [ 135 | "rpdf_df=PdfFileReader(resp_pdf)" 136 | ] 137 | }, 138 | { 139 | "cell_type": "code", 140 | "execution_count": null, 141 | "metadata": { 142 | "id": "O9aE3j3OVhlC" 143 | }, 144 | "outputs": [], 145 | "source": [ 146 | "#Updated response pdf reading method from tabula to raw \n", 147 | "myans=[]\n", 148 | "for i in range(rpdf_df.getNumPages()):\n", 149 | " myans+=(rpdf_df.pages[i].extractText().split(\"\\n\"))" 150 | ] 151 | }, 152 | { 153 | "cell_type": "markdown", 154 | "metadata": { 155 | "id": "uS-kGTuOVhlF" 156 | }, 157 | "source": [ 158 | "#### Variables initialization" 159 | ] 160 | }, 161 | { 162 | "cell_type": "code", 163 | "execution_count": null, 164 | "metadata": { 165 | "id": "zvZtEDRUVhlG" 166 | }, 167 | "outputs": [], 168 | "source": [ 169 | "resp_qid,my_answer=list(filter((lambda x: re.match(r'[Qq]uestion.*', x)),myans))[0],\\\n", 170 | " list(filter((lambda x: re.match(r'[Oo]ption.*', x)),myans))[0]\n", 171 | "unanswered=\"Unanswered\"\n", 172 | "file_qid=\"Question Id\"" 173 | ] 174 | }, 175 | { 176 | "cell_type": "code", 177 | "execution_count": null, 178 | "metadata": { 179 | "id": "RY8ltFXXVhlI" 180 | }, 181 | "outputs": [], 182 | "source": [ 183 | "myans=myans[myans.index(resp_qid):]\n", 184 | "myans=pd.DataFrame(np.vstack((myans[2::2],myans[3::2])).T,columns=[myans[0],myans[1]])\n", 185 | "myans[resp_qid]=myans[resp_qid].apply(int)\n", 186 | "myans" 187 | ] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "execution_count": null, 192 | "metadata": { 193 | "id": "K50kMzV9VhlJ" 194 | }, 195 | "outputs": [], 196 | "source": [ 197 | "# with pd.option_context(\"display.max_rows\",None):\n", 198 | "# print(df3)\n", 199 | "\n", 200 | "resp_qid,my_answer" 201 | ] 202 | }, 203 | { 204 | "cell_type": "markdown", 205 | "metadata": { 206 | "id": "WPzDcsX-Wwpf", 207 | "outputId": "3785d44b-c29c-4252-9688-1d6082f57551" 208 | }, 209 | "source": [ 210 | "### Useful functions" 211 | ] 212 | }, 213 | { 214 | "cell_type": "code", 215 | "execution_count": null, 216 | "metadata": { 217 | "id": "EX4ZXoGmVhlL" 218 | }, 219 | "outputs": [], 220 | "source": [ 221 | "def gentextarr(content):\n", 222 | " \"\"\"\n", 223 | " Returns an array of text block properties of pdf\n", 224 | " Parameter:\n", 225 | " content: string object of page content obtained by page.getContents().getData().decode() \n", 226 | " \"\"\"\n", 227 | " lines = content.splitlines()\n", 228 | " result = []\n", 229 | " in_text = False\n", 230 | " for line in lines:\n", 231 | " if line == \"BT\":\n", 232 | " in_text = True\n", 233 | " temp=[]\n", 234 | "\n", 235 | " elif line == \"ET\":\n", 236 | " in_text = False\n", 237 | " result.append(temp)\n", 238 | " temp=[]\n", 239 | "\n", 240 | " elif in_text:\n", 241 | " if line[-2:].lower()==\"tj\" or line[-2:].lower()==\"tf\" or line[-2:].lower()==\"rg\":\n", 242 | " temp.append(line[:])\n", 243 | " return result\n", 244 | "\n", 245 | "def gen_cmap(font,pg):\n", 246 | " \"\"\"\n", 247 | " Returns a character map dictionary of a pdf 'filename' page 'pg' and font \n", 248 | " Parameter:\n", 249 | " font: font key name in meta properties of pdf.\n", 250 | " pg: int page number of pdf\n", 251 | " \"\"\"\n", 252 | " # pdf = PdfFileReader(filename)\n", 253 | " # page = pdf.getPage(pg)\n", 254 | " page=pg\n", 255 | " temp=False\n", 256 | " cmap={}\n", 257 | " for i in (page.getObject()['/Resources']['/Font'][font]['/ToUnicode'].getObject().getData().decode().split('\\n')):\n", 258 | " if \"endbfrange\" in i:\n", 259 | " temp=False\n", 260 | " if temp:\n", 261 | " cmap[i[1:5]]=i[-5:-1]\n", 262 | " if \"beginbfrange\" in i:\n", 263 | " temp=True\n", 264 | " return cmap\n", 265 | "\n", 266 | "def font_decoder(text,cmap):\n", 267 | " \"\"\"\n", 268 | " Returns decoded word from pdf text hex code. \n", 269 | " \"\"\"\n", 270 | "\n", 271 | " count=0\n", 272 | " word=\"\"\n", 273 | " temp=\"\"\n", 274 | " for i in text:\n", 275 | " count+=1\n", 276 | " if count==4:\n", 277 | " temp+=i\n", 278 | " word+=chr(int(cmap.get(temp.lower(),temp.lower()),16))\n", 279 | " temp=\"\"\n", 280 | " count=0\n", 281 | " else:\n", 282 | " temp+=i\n", 283 | " return word\n" 284 | ] 285 | }, 286 | { 287 | "cell_type": "markdown", 288 | "metadata": { 289 | "id": "2zu-gheuVhlO" 290 | }, 291 | "source": [ 292 | "## PDF Content" 293 | ] 294 | }, 295 | { 296 | "cell_type": "code", 297 | "execution_count": null, 298 | "metadata": { 299 | "id": "iQtIaay4VhlP" 300 | }, 301 | "outputs": [], 302 | "source": [ 303 | "\"\"\"\n", 304 | "This piece of code extracts question answer pdf text content as well as correct answer key.\n", 305 | "\"\"\"\n", 306 | "pdf = PdfFileReader(filename)\n", 307 | "sum=0\n", 308 | "df=[[file_qid,\"Correct Marks\",\"Answers\"]]\n", 309 | "m_bool=False\n", 310 | "marks=\"\"\n", 311 | "ans_bool=False\n", 312 | "ans=\"\"\n", 313 | "row=[]\n", 314 | "temp=[]\n", 315 | "s_head=[]\n", 316 | "temp_head=\"\"\n", 317 | "ishead=False\n", 318 | "for p in range(0,pdf.getNumPages()):\n", 319 | " content=pdf.pages[p].getContents()\n", 320 | " #a different type content\n", 321 | " if type(content)==ArrayObject:\n", 322 | " m=\"\"\n", 323 | " for n in content:\n", 324 | " m+=n.getObject().getData().decode()\n", 325 | " content=m\n", 326 | " elif type(content)==EncodedStreamObject or type(content)==DecodedStreamObject:\n", 327 | " content=content.getData().decode()\n", 328 | " for i in gentextarr(content):\n", 329 | " text=\"\"\n", 330 | " ans_bool=False\n", 331 | " for j in i:\n", 332 | " # getting character map for a font type\n", 333 | " if j[-2:].lower()==\"tf\":\n", 334 | " cmap=gen_cmap(j.split()[0],pdf.pages[p])\n", 335 | " #checking for subject headings with font size =18\n", 336 | " if j.split()[1]==\"18\":\n", 337 | " ishead=True\n", 338 | " # checking green color as ind of correctness \n", 339 | " if j==\"0 0.50196 0 rg\" and len(row)>1:\n", 340 | " ans_bool=True\n", 341 | " \n", 342 | " elif j[-2:].lower()==\"tj\":\n", 343 | " text=font_decoder(j[1:-3],cmap)\n", 344 | " if ishead:\n", 345 | " ishead=False\n", 346 | " temp_head=text\n", 347 | " \n", 348 | " if (not m_bool) and (not ans_bool):\n", 349 | " # checking for question id\n", 350 | " if str(file_qid) in text:\n", 351 | " if len(temp)>0:\n", 352 | " row.append(temp)\n", 353 | " df.append(row)\n", 354 | " row=[]\n", 355 | " temp=[]\n", 356 | " if text.split()[7].isnumeric():\n", 357 | " s_head.append(temp_head)\n", 358 | " row=[text.split()[7]]\n", 359 | " m_bool=True\n", 360 | " \n", 361 | " # extracting marks\n", 362 | " elif m_bool:\n", 363 | " if \"Correct Marks\" in text:\n", 364 | " # appending to heading list if marks is 0\n", 365 | " # if text.split()[-1]==\"0\":\n", 366 | " # s_head.append(temp_head)\n", 367 | " if len(row)==1:\n", 368 | " row.append(text.split()[3])\n", 369 | " m_bool=False\n", 370 | " \n", 371 | " #extracting answer key\n", 372 | " elif ans_bool:\n", 373 | " if text!=\" \":\n", 374 | " if len(text)>=13 and text[:13].isnumeric():\n", 375 | " temp.append(text[:13])\n", 376 | " elif len(text.split())==3 and \"to\" in text.split()[1]:\n", 377 | " t=text.split()\n", 378 | " if not(t[0].isalpha() or t[-1].isalpha()):\n", 379 | " temp=[float(t[0]),float(t[-1])]\n", 380 | " elif len(temp)==0:\n", 381 | " temp.append(text)\n", 382 | "if len(temp)>=1:\n", 383 | " # s_head.append(temp_head)\n", 384 | " row.append(temp)\n", 385 | " df.append(row)" 386 | ] 387 | }, 388 | { 389 | "cell_type": "code", 390 | "execution_count": null, 391 | "metadata": { 392 | "id": "MCWLsyg6VhlQ" 393 | }, 394 | "outputs": [], 395 | "source": [] 396 | }, 397 | { 398 | "cell_type": "code", 399 | "execution_count": null, 400 | "metadata": { 401 | "id": "LOwqd38RVhlQ" 402 | }, 403 | "outputs": [], 404 | "source": [ 405 | "df2=pd.DataFrame(df[1:],columns=df[0])\n", 406 | "df2[file_qid]=df2[file_qid].apply(int)\n", 407 | "df2[\"Subjects\"]=np.array(s_head[-df2.shape[0]:])\n", 408 | "df2=df2.sort_values([file_qid])\n", 409 | "# df2[df2[\"Subjects\"]==\"MLT\"][\"Correct Marks\"].apply(float).sum()" 410 | ] 411 | }, 412 | { 413 | "cell_type": "code", 414 | "execution_count": null, 415 | "metadata": { 416 | "tags": [], 417 | "id": "tuispdiSVhlS" 418 | }, 419 | "outputs": [], 420 | "source": [ 421 | "def evaluate(df2):\n", 422 | " \n", 423 | " #Multi option\n", 424 | " df2['Score'] = df2.apply(lambda x: (len(set(x[\"My answer\"].split(\",\"))\\\n", 425 | " .intersection(set(x[\"Answers\"])))/len(x[\"Answers\"]))*float(x['Correct Marks'])\\\n", 426 | " if type(x[\"Answers\"][0])==str and len(x[\"Answers\"][0])==13 and\\\n", 427 | " len(set(x[\"My answer\"].split(\",\")))<=len(set(x[\"Answers\"])) and\\\n", 428 | " set(x[\"My answer\"].split(\",\")).issubset(set(x[\"Answers\"]))\\\n", 429 | " else x[\"Score\"], axis=1)\n", 430 | " #integer ranged\n", 431 | " df2['Score'] = df2.apply(lambda x: float(x['Correct Marks'])\\\n", 432 | " if len(x[\"My answer\"])<13 and len(x[\"My answer\"])>0 and\\\n", 433 | " len(x['Answers'])>=1 and\\\n", 434 | " float(x['My answer']) >= float(x[\"Answers\"][0]) and\\\n", 435 | " float(x['My answer']) <= float(x[\"Answers\"][-1])\\\n", 436 | " else x[\"Score\"], axis=1)\n", 437 | " return df2" 438 | ] 439 | }, 440 | { 441 | "cell_type": "markdown", 442 | "metadata": { 443 | "id": "2FeMfB8dVhlT" 444 | }, 445 | "source": [ 446 | "## Final score" 447 | ] 448 | }, 449 | { 450 | "cell_type": "markdown", 451 | "metadata": { 452 | "id": "Ymh6m_OSVhlT" 453 | }, 454 | "source": [ 455 | "### Method 1: using index of 0 correct marks" 456 | ] 457 | }, 458 | { 459 | "cell_type": "code", 460 | "execution_count": null, 461 | "metadata": { 462 | "id": "ms9GcsQaVhlU" 463 | }, 464 | "outputs": [], 465 | "source": [ 466 | "# s_index=df3[df3[\"Correct Marks\"]==\"0\"].index\n", 467 | "# s_index" 468 | ] 469 | }, 470 | { 471 | "cell_type": "code", 472 | "execution_count": null, 473 | "metadata": { 474 | "id": "7xro0Qv2VhlU" 475 | }, 476 | "outputs": [], 477 | "source": [ 478 | "# subjects={}\n", 479 | "# for i in range(len(s_index)):\n", 480 | "# if i==len(s_index)-1:\n", 481 | "# subjects[df3[\"Subjects\"][s_index[i]]]=df3.iloc[s_index[i]:]\n", 482 | "# else:\n", 483 | "# subjects[df3[\"Subjects\"][s_index[i]]]=df3.iloc[s_index[i]:s_index[i+1]]\n", 484 | "# subjects.keys()" 485 | ] 486 | }, 487 | { 488 | "cell_type": "code", 489 | "execution_count": null, 490 | "metadata": { 491 | "id": "1UtEIC-4VhlV" 492 | }, 493 | "outputs": [], 494 | "source": [ 495 | "# reports=[]\n", 496 | "# for i in subjects.values():\n", 497 | "# print(i[\"Correct Marks\"].apply(float).values)\n", 498 | "# reports.append([i[\"Subjects\"].unique()[0],i[\"Correct Marks\"].apply(float).values.sum(),i[\"Score\"].sum()])\n", 499 | "# for i in reports:\n", 500 | "# print(i[0],\":\",i[2]/i[1]*100,\"%\")" 501 | ] 502 | }, 503 | { 504 | "cell_type": "code", 505 | "execution_count": null, 506 | "metadata": { 507 | "id": "xm5Uq5alVhlW" 508 | }, 509 | "outputs": [], 510 | "source": [ 511 | "# with pd.option_context(\"display.max_rows\",None):\n", 512 | "# display(df)" 513 | ] 514 | }, 515 | { 516 | "cell_type": "markdown", 517 | "metadata": { 518 | "id": "rn81hhWdVhlX" 519 | }, 520 | "source": [ 521 | "### Method 2: Using groupby function" 522 | ] 523 | }, 524 | { 525 | "cell_type": "code", 526 | "execution_count": null, 527 | "metadata": { 528 | "id": "KCJRwsenVhlX" 529 | }, 530 | "outputs": [], 531 | "source": [ 532 | "df3=df2.set_index(file_qid).combine_first(myans.set_index(resp_qid)).reset_index().replace([unanswered,np.NAN],\"\")" 533 | ] 534 | }, 535 | { 536 | "cell_type": "code", 537 | "execution_count": null, 538 | "metadata": { 539 | "id": "FipQercmVhlY" 540 | }, 541 | "outputs": [], 542 | "source": [ 543 | "df3.columns=[\"Question Id\",\"Answers\",\"Correct Marks\",\"My answer\",\"Subjects\" ]\n", 544 | "df3[\"Score\"]=np.zeros(df3.shape[0])\n", 545 | "df3=evaluate(df3)" 546 | ] 547 | }, 548 | { 549 | "cell_type": "code", 550 | "execution_count": null, 551 | "metadata": { 552 | "id": "lW02xMG5VhlY" 553 | }, 554 | "outputs": [], 555 | "source": [ 556 | "df3[\"Correct Marks\"]=df3[\"Correct Marks\"].apply(float)" 557 | ] 558 | }, 559 | { 560 | "cell_type": "code", 561 | "execution_count": null, 562 | "metadata": { 563 | "id": "Nu5GhBUuVhlZ" 564 | }, 565 | "outputs": [], 566 | "source": [ 567 | "with pd.option_context(\"display.max_rows\",None):\n", 568 | " display(df3[df3[\"Subjects\"].isin(df3[df3[\"My answer\"]!=\"\"][\"Subjects\"].unique())])\n" 569 | ] 570 | }, 571 | { 572 | "cell_type": "code", 573 | "execution_count": null, 574 | "metadata": { 575 | "id": "eNGIpJ_CVhlZ" 576 | }, 577 | "outputs": [], 578 | "source": [ 579 | "report2=pd.DataFrame(df3.groupby(\"Subjects\")[[\"Correct Marks\",\"Score\"]].sum())" 580 | ] 581 | }, 582 | { 583 | "cell_type": "code", 584 | "execution_count": null, 585 | "metadata": { 586 | "id": "CpOg5Ii0Vhla" 587 | }, 588 | "outputs": [], 589 | "source": [ 590 | "report2[\"Percentage\"]=(report2[\"Score\"]*100/report2[\"Correct Marks\"]).round(2)" 591 | ] 592 | }, 593 | { 594 | "cell_type": "code", 595 | "execution_count": null, 596 | "metadata": { 597 | "id": "KJSZfD9NVhlb" 598 | }, 599 | "outputs": [], 600 | "source": [ 601 | "report2.columns=[\"Total Marks\",\"Scored\",\"Percentage\"]\n", 602 | "headers = {\n", 603 | " 'selector': 'thead',\n", 604 | " 'props': 'background-color: #0dcaf0; color: white;'\n", 605 | "}\n", 606 | "report2.style.applymap(lambda v: 'color:red;' if (v == 0) else 'color:green',subset=[\"Percentage\",\"Scored\"])\\\n", 607 | " .set_table_styles([headers])\\\n", 608 | " .format({\"Total Marks\":'{:.2f}',\"Scored\":'{:.2f}',\"Percentage\":'{0:,.2f}%'})" 609 | ] 610 | }, 611 | { 612 | "cell_type": "code", 613 | "execution_count": null, 614 | "metadata": { 615 | "id": "VQ53_D4PVhlc" 616 | }, 617 | "outputs": [], 618 | "source": [ 619 | "report2[report2[\"Percentage\"]>0]" 620 | ] 621 | }, 622 | { 623 | "cell_type": "markdown", 624 | "metadata": { 625 | "id": "UUPNkMU-Vhlc" 626 | }, 627 | "source": [ 628 | "### Exporting dataframe as csv" 629 | ] 630 | }, 631 | { 632 | "cell_type": "code", 633 | "execution_count": null, 634 | "metadata": { 635 | "id": "wL1EyL1XVhld" 636 | }, 637 | "outputs": [], 638 | "source": [ 639 | "df3[df3[\"Subjects\"].isin(report2[report2[\"Percentage\"]>0].index)].to_csv(\"summary.csv\",index=False)\n", 640 | "report2[report2[\"Percentage\"]>0].to_csv(\"report_card.csv\")" 641 | ] 642 | }, 643 | { 644 | "cell_type": "code", 645 | "execution_count": null, 646 | "metadata": { 647 | "id": "-rYkqfx_Vhld" 648 | }, 649 | "outputs": [], 650 | "source": [] 651 | }, 652 | { 653 | "cell_type": "code", 654 | "execution_count": null, 655 | "metadata": { 656 | "id": "e1sC8isgVhle" 657 | }, 658 | "outputs": [], 659 | "source": [] 660 | } 661 | ], 662 | "metadata": { 663 | "colab": { 664 | "name": "Students_Autograder.ipynb", 665 | "provenance": [] 666 | }, 667 | "kernelspec": { 668 | "display_name": "Python 3 (ipykernel)", 669 | "language": "python", 670 | "name": "python3" 671 | }, 672 | "language_info": { 673 | "codemirror_mode": { 674 | "name": "ipython", 675 | "version": 3 676 | }, 677 | "file_extension": ".py", 678 | "mimetype": "text/x-python", 679 | "name": "python", 680 | "nbconvert_exporter": "python", 681 | "pygments_lexer": "ipython3", 682 | "version": "3.9.13" 683 | } 684 | }, 685 | "nbformat": 4, 686 | "nbformat_minor": 0 687 | } 688 | --------------------------------------------------------------------------------