├── .gitignore ├── LICENSE ├── README.md ├── Report ├── Final-Report.pdf ├── UG-midterm.pptm └── UG_final.pptm ├── celery.sh ├── models ├── ARIMA.ipynb ├── EDA.ipynb ├── FFNN.ipynb ├── GRU.ipynb ├── HW.ipynb ├── LSTM-eda.ipynb ├── LSTM-v2.ipynb ├── LSTM-v3.ipynb ├── LSTM.ipynb ├── RNN.ipynb ├── SES.ipynb ├── SMA.ipynb ├── WMA.ipynb ├── aws.py ├── aws_arima.py ├── aws_rnn.py ├── aws_smoothing.py ├── delhi.ipynb ├── load_scrap.py ├── log ├── pdq_search.py ├── readme.md ├── requirements.txt ├── test.ipynb ├── test.py ├── utils.py └── whether_scrap.py ├── screenshots └── website.png └── server ├── README.md ├── celerybeat.pid ├── db.cnf ├── manage.py ├── nano.save ├── requirements.txt ├── static ├── c3-0.4.18 │ ├── c3.css │ ├── c3.min.js │ └── d3.v3.min.js ├── jquery-3.3.1.js ├── main.js ├── main1.js └── scrap.py ├── swag ├── __init__.py ├── admin.py ├── forms.py ├── load_data.py ├── migrations │ ├── 0001_initial.py │ └── __init__.py ├── models.py ├── tasks.py ├── tests.py └── views.py ├── tamplates ├── Home_page.html └── form.html ├── users ├── __init__.py ├── admin.py ├── apps.py ├── migrations │ ├── 0001_initial.py │ └── __init__.py ├── models.py ├── tests.py └── views.py └── website ├── __init__.py ├── for_celery.py ├── settings.py ├── urls.py └── wsgi.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | celerybeat* 6 | others 7 | staticfiles/* 8 | server/redis-4.0.8/ 9 | server/*.gz 10 | server/db.sqlite3 11 | SLDC_Data 12 | Whether_Data 13 | # C extensions 14 | *.so 15 | *.csv 16 | *.txt 17 | *.npy 18 | *.pkl 19 | .ipynb_checkpoints 20 | # Distribution / packaging 21 | .Python 22 | env/ 23 | build/ 24 | develop-eggs/ 25 | dist/ 26 | downloads/ 27 | eggs/ 28 | .eggs/ 29 | lib/ 30 | lib64/ 31 | parts/ 32 | sdist/ 33 | var/ 34 | wheels/ 35 | *.egg-info/ 36 | .installed.cfg 37 | *.egg 38 | 39 | # PyInstaller 40 | # Usually these files are written by a python script from a template 41 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 42 | *.manifest 43 | *.spec 44 | 45 | # Installer logs 46 | pip-log.txt 47 | pip-delete-this-directory.txt 48 | 49 | # Unit test / coverage reports 50 | htmlcov/ 51 | .tox/ 52 | .coverage 53 | .coverage.* 54 | .cache 55 | nosetests.xml 56 | coverage.xml 57 | *.cover 58 | .hypothesis/ 59 | 60 | # Translations 61 | *.mo 62 | *.pot 63 | 64 | # Django stuff: 65 | *.log 66 | local_settings.py 67 | 68 | # Flask stuff: 69 | instance/ 70 | .webassets-cache 71 | 72 | # Scrapy stuff: 73 | .scrapy 74 | 75 | # Sphinx documentation 76 | docs/_build/ 77 | 78 | # PyBuilder 79 | target/ 80 | 81 | # Jupyter Notebook 82 | .ipynb_checkpoints 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # celery beat schedule file 88 | celerybeat-schedule 89 | 90 | # SageMath parsed files 91 | *.sage.py 92 | 93 | # dotenv 94 | .env 95 | 96 | # virtualenv 97 | .venv 98 | venv/ 99 | ENV/ 100 | 101 | # Spyder project settings 102 | .spyderproject 103 | .spyproject 104 | 105 | # Rope project settings 106 | .ropeproject 107 | 108 | # mkdocs documentation 109 | /site 110 | 111 | # mypy 112 | .mypy_cache/ 113 | server/celerybeat.pid 114 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Rishabh Agrahari 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Electric Load Forecasting 2 | 3 | Under graduate project on short term electric load forecasting. Data was taken from [State Load Despatch Center, Delhi](www.delhisldc.org/) website and multiple time series algorithms were implemented during the course of the project. 4 | 5 | ### Models implemented: 6 | 7 | `models` folder contains all the algorithms/models implemented during the course of the project: 8 | 9 | * Feed forward Neural Network [FFNN.ipynb](models/FFNN.ipynb) 10 | * Simple Moving Average [SMA.ipynb](models/SMA.ipynb) 11 | * Weighted Moving Average [WMA.ipynb](models/WMA.ipynb) 12 | * Simple Exponential Smoothing [SES.ipynb](models/SES.ipynb) 13 | * Holts Winters [HW.ipynb](models/HW.ipynb) 14 | * Autoregressive Integrated Moving Average [ARIMA.ipynb](models/ARIMA.ipynb) 15 | * Recurrent Neural Networks [RNN.ipynb](models/RNN.ipynb) 16 | * Long Short Term Memory cells [LSTM.ipynb](models/LSTM.ipynb) 17 | * Gated Recurrent Unit cells [GRU.ipynb](models/GRU.ipynb) 18 | 19 | scripts: 20 | 21 | * `aws_arima.py` fits ARIMA model on last one month's data and forecasts load for each day. 22 | * `aws_rnn.py` fits RNN, LSTM, GRU on last 2 month's data and forecasts load for each day. 23 | * `aws_smoothing.py` fits SES, SMA, WMA on last one month's data and forecasts load for each day. 24 | * `aws.py` a scheduler to run all above three scripts everyday 00:30 IST. 25 | * `pdq_search.py` for grid search of hyperparameters of ARIMA model on last one month's data. 26 | * `load_scrap.py` scraps day wise load data of Delhi from [SLDC](https://www.delhisldc.org/Loaddata.aspx?mode=17/01/2018) site and stores it in csv format. 27 | * `wheather_scrap.py` scraps day wise whether data of Delhi from [wunderground](https://www.wunderground.com/history/airport/VIDP/2017/8/1/DailyHistory.html) site and stores it in csv format. 28 | 29 | `server` folder contains django webserver code, developed to show the implemented algorithms and compare their performance. All the implemented algorithms are being used to forecast today's Delhi electricity load [here](http://forecast.energyandsystems.com) [now deprecated]. Project report can be found in [Report](Report) folder. 30 | 31 | ![A screenshot of the website](screenshots/website.png "A screenshot of the website") 32 | 33 | 34 | ### Team Members: 35 | 36 | * Ayush Kumar Goyal 37 | * Boragapu Sunil Kumar 38 | * Srimukha Paturi 39 | * Rishabh Agrahari 40 | 41 | ## Star History 42 | 43 | [![Star History Chart](https://api.star-history.com/svg?repos=pyaf/load_forecasting&type=Date)](https://star-history.com/#pyaf/load_forecasting&Date) 44 | -------------------------------------------------------------------------------- /Report/Final-Report.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyaf/load_forecasting/6525815ff162e4551f59febe8bc9b94c695e9222/Report/Final-Report.pdf -------------------------------------------------------------------------------- /Report/UG-midterm.pptm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyaf/load_forecasting/6525815ff162e4551f59febe8bc9b94c695e9222/Report/UG-midterm.pptm -------------------------------------------------------------------------------- /Report/UG_final.pptm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyaf/load_forecasting/6525815ff162e4551f59febe8bc9b94c695e9222/Report/UG_final.pptm -------------------------------------------------------------------------------- /celery.sh: -------------------------------------------------------------------------------- 1 | celery -A website worker -c1 -l info 2 | celery -A website beat -l info -------------------------------------------------------------------------------- /models/SES.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "from matplotlib import pyplot as plt\n", 12 | "\n", 13 | "\n", 14 | "%matplotlib inline" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 2, 20 | "metadata": {}, 21 | "outputs": [ 22 | { 23 | "name": "stdout", 24 | "output_type": "stream", 25 | "text": [ 26 | "['value', 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.6000000000000001, 0.7000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.5, 0.6000000000000001, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.6000000000000001, 0.5, 0.5, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.5, 0.6000000000000001, 0.5, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 0.7000000000000001, 0.7000000000000001, 0.6000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.7000000000000001, 0.8, 0.7000000000000001, 0.8, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.8, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.8, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.8, 0.7000000000000001, 0.8, 0.8, 0.8, 0.7000000000000001, 0.8, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.8, 0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.9, 0.9, 0.8, 0.8, 0.8, 0.8, 0.9, 0.9, 0.9, 0.8, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9]\n" 27 | ] 28 | } 29 | ], 30 | "source": [ 31 | "import csv\n", 32 | "dict={}\n", 33 | "dates = ['01-10-2017.csv','02-10-2017.csv','03-10-2017.csv','04-10-2017.csv','05-10-2017.csv','06-10-2017.csv','07-10-2017.csv','08-10-2017.csv','09-10-2017.csv','10-10-2017.csv','11-10-2017.csv','12-10-2017.csv','13-10-2017.csv','14-10-2017.csv','15-10-2017.csv','16-10-2017.csv','17-10-2017.csv','18-10-2017.csv','19-10-2017.csv','20-10-2017.csv','21-10-2017.csv','22-10-2017.csv','23-10-2017.csv','24-10-2017.csv','25-10-2017.csv','26-10-2017.csv','27-10-2017.csv','28-10-2017.csv','29-10-2017.csv','30-10-2017.csv']\n", 34 | "for i in range(len(dates)):\n", 35 | " dict[dates[i]] = []\n", 36 | " time = []\n", 37 | " for d in csv.DictReader(open(dates[i]), delimiter=','):\n", 38 | " time.append(d['time'])\n", 39 | " dict[dates[i]].append(float(d['value']))\n", 40 | "n = len(dict[dates[len(dates)-1]])\n", 41 | "m = len(dates)\n", 42 | "alpha = [0]*n\n", 43 | "alphamin = ['value']+[0.1]*n\n", 44 | "forecast =[[0]*n for i in range(m)]\n", 45 | "forecast[1] = [dict[dates[0]][j] for j in range(n)]\n", 46 | "for j in range(n):\n", 47 | " mse = [0]*9\n", 48 | " for k in range(1,10):\n", 49 | " alpha[j] = k*0.1\n", 50 | " for i in range(2,m):\n", 51 | " forecast[i][j] = (alpha[j]*dict[dates[i-1]][j]) + ((1 - alpha[j])*forecast[i-1][j])\n", 52 | " for i in range(1,m):\n", 53 | " mse[k-1] += (forecast[i][j] - dict[dates[i]][j])**2\n", 54 | " min = mse[0]\n", 55 | " for i in range(1,9):\n", 56 | " if mse[i]" 400 | ] 401 | }, 402 | "metadata": {}, 403 | "output_type": "display_data" 404 | } 405 | ], 406 | "source": [ 407 | "plt.plot(dict[data[len(data)-1]])\n", 408 | "plt.plot(forecast[data[len(data)-1]])\n", 409 | "plt.legend(['load', 'forecast'])\n", 410 | "plt.show()" 411 | ] 412 | }, 413 | { 414 | "cell_type": "code", 415 | "execution_count": null, 416 | "metadata": { 417 | "collapsed": true 418 | }, 419 | "outputs": [], 420 | "source": [] 421 | }, 422 | { 423 | "cell_type": "code", 424 | "execution_count": null, 425 | "metadata": { 426 | "collapsed": true 427 | }, 428 | "outputs": [], 429 | "source": [] 430 | }, 431 | { 432 | "cell_type": "code", 433 | "execution_count": null, 434 | "metadata": { 435 | "collapsed": true 436 | }, 437 | "outputs": [], 438 | "source": [] 439 | }, 440 | { 441 | "cell_type": "code", 442 | "execution_count": null, 443 | "metadata": { 444 | "collapsed": true 445 | }, 446 | "outputs": [], 447 | "source": [] 448 | }, 449 | { 450 | "cell_type": "code", 451 | "execution_count": null, 452 | "metadata": { 453 | "collapsed": true 454 | }, 455 | "outputs": [], 456 | "source": [] 457 | }, 458 | { 459 | "cell_type": "code", 460 | "execution_count": null, 461 | "metadata": { 462 | "collapsed": true 463 | }, 464 | "outputs": [], 465 | "source": [] 466 | } 467 | ], 468 | "metadata": { 469 | "kernelspec": { 470 | "display_name": "Python 3", 471 | "language": "python", 472 | "name": "python3" 473 | }, 474 | "language_info": { 475 | "codemirror_mode": { 476 | "name": "ipython", 477 | "version": 3 478 | }, 479 | "file_extension": ".py", 480 | "mimetype": "text/x-python", 481 | "name": "python", 482 | "nbconvert_exporter": "python", 483 | "pygments_lexer": "ipython3", 484 | "version": "3.6.5" 485 | } 486 | }, 487 | "nbformat": 4, 488 | "nbformat_minor": 2 489 | } 490 | -------------------------------------------------------------------------------- /models/SMA.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import csv\n", 12 | "\n", 13 | "from matplotlib import pyplot as plt\n", 14 | "\n", 15 | "\n", 16 | "%matplotlib inline" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": 4, 22 | "metadata": {}, 23 | "outputs": [ 24 | { 25 | "name": "stdout", 26 | "output_type": "stream", 27 | "text": [ 28 | "00:00 : 1808.85 : 1766.004\n", 29 | "00:05 : 1795.36 : 1741.8380000000002\n", 30 | "00:10 : 1766.32 : 1714.406\n", 31 | "00:15 : 1748.26 : 1697.832\n", 32 | "00:20 : 1749.86 : 1680.766\n", 33 | "00:25 : 1717.41 : 1667.8200000000002\n", 34 | "00:30 : 1705.32 : 1650.04\n", 35 | "00:35 : 1704.37 : 1631.8139999999999\n", 36 | "00:40 : 1678.86 : 1620.328\n", 37 | "00:45 : 1668.22 : 1616.918\n", 38 | "00:50 : 1656.33 : 1602.856\n", 39 | "00:55 : 1651.54 : 1592.8980000000001\n", 40 | "01:00 : 1630.3 : 1585.996\n", 41 | "01:05 : 1610.78 : 1573.24\n", 42 | "01:10 : 1600.4 : 1555.2759999999998\n", 43 | "01:15 : 1588.47 : 1549.5880000000002\n", 44 | "01:20 : 1581.51 : 1534.3219999999997\n", 45 | "01:25 : 1575.98 : 1527.9840000000002\n", 46 | "01:30 : 1575.93 : 1538.614\n", 47 | "01:35 : 1559.56 : 1503.8220000000001\n", 48 | "01:40 : 1552.19 : 1501.152\n", 49 | "01:45 : 1551.38 : 1501.576\n", 50 | "01:50 : 1543.89 : 1503.196\n", 51 | "01:55 : 1542.85 : 1493.9300000000003\n", 52 | "02:00 : 1542.64 : 1488.596\n", 53 | "02:05 : 1534.65 : 1484.752\n", 54 | "02:10 : 1527.04 : 1471.266\n", 55 | "02:15 : 1511.33 : 1474.492\n", 56 | "02:20 : 1519.44 : 1468.5059999999999\n", 57 | "02:25 : 1509.53 : 1467.88\n", 58 | "02:30 : 1518.42 : 1464.37\n", 59 | "02:35 : 1498.1 : 1456.146\n", 60 | "02:40 : 1499.05 : 1447.184\n", 61 | "02:45 : 1500.21 : 1449.4679999999998\n", 62 | "02:50 : 1488.61 : 1447.9140000000002\n", 63 | "02:55 : 1491.55 : 1439.802\n", 64 | "03:00 : 1486.88 : 1440.356\n", 65 | "03:05 : 1477.68 : 1430.882\n", 66 | "03:10 : 1479.34 : 1432.688\n", 67 | "03:15 : 1472.24 : 1428.6399999999999\n", 68 | "03:20 : 1483.99 : 1434.392\n", 69 | "03:25 : 1475.19 : 1430.712\n", 70 | "03:30 : 1485.59 : 1433.8380000000002\n", 71 | "03:35 : 1480.74 : 1431.19\n", 72 | "03:40 : 1474.51 : 1437.7759999999998\n", 73 | "03:45 : 1483.92 : 1433.724\n", 74 | "03:50 : 1492.99 : 1442.552\n", 75 | "03:55 : 1480.25 : 1439.876\n", 76 | "04:00 : 1492.59 : 1444.212\n", 77 | "04:05 : 1496.89 : 1452.3380000000002\n", 78 | "04:10 : 1509.35 : 1463.156\n", 79 | "04:15 : 1511.96 : 1470.314\n", 80 | "04:20 : 1524.07 : 1478.174\n", 81 | "04:25 : 1532.3 : 1481.06\n", 82 | "04:30 : 1544.58 : 1490.6580000000001\n", 83 | "04:35 : 1555.64 : 1509.1399999999999\n", 84 | "04:40 : 1576.98 : 1528.484\n", 85 | "04:45 : 1598.68 : 1540.594\n", 86 | "04:50 : 1604.99 : 1557.8360000000002\n", 87 | "04:55 : 1629.71 : 1570.95\n", 88 | "05:00 : 1651.03 : 1598.848\n", 89 | "05:05 : 1698.51 : 1643.0439999999999\n", 90 | "05:10 : 1730.26 : 1672.796\n", 91 | "05:15 : 1787.26 : 1707.908\n", 92 | "05:20 : 1823.43 : 1756.0459999999998\n", 93 | "05:25 : 1867.54 : 1788.0500000000002\n", 94 | "05:30 : 1914.2 : 1823.894\n", 95 | "05:35 : 1974.57 : 1881.04\n", 96 | "05:40 : 2042.14 : 1932.21\n", 97 | "05:45 : 2107.9 : 1989.234\n", 98 | "05:50 : 2173.08 : 2043.0859999999998\n", 99 | "05:55 : 2226.36 : 2089.534\n", 100 | "06:00 : 2285.74 : 2135.494\n", 101 | "06:05 : 2382.18 : 2218.402\n", 102 | "06:10 : 2447.89 : 2255.832\n", 103 | "06:15 : 2505.46 : 2326.7699999999995\n", 104 | "06:20 : 2595.27 : 2389.318\n", 105 | "06:25 : 2650.89 : 2461.9579999999996\n", 106 | "06:30 : 2683.31 : 2511.916\n", 107 | "06:35 : 2749.09 : 2569.814\n", 108 | "06:40 : 2786.1 : 2612.006\n", 109 | "06:45 : 2839.64 : 2659.444\n", 110 | "06:50 : 2891.54 : 2708.7160000000003\n", 111 | "06:55 : 2919.91 : 2750.5780000000004\n", 112 | "07:00 : 2940.0 : 2783.6639999999998\n", 113 | "07:05 : 2998.32 : 2814.9219999999996\n", 114 | "07:10 : 3015.39 : 2838.9579999999996\n", 115 | "07:15 : 3033.68 : 2883.59\n", 116 | "07:20 : 3043.32 : 2909.456\n", 117 | "07:25 : 3009.32 : 2926.754\n", 118 | "07:30 : 3071.27 : 2952.0359999999996\n", 119 | "07:35 : 3083.51 : 2960.852\n", 120 | "07:40 : 3082.28 : 2975.556\n", 121 | "07:45 : 3215.23 : 2973.956\n", 122 | "07:50 : 3199.93 : 2966.928\n", 123 | "07:55 : 3148.06 : 2978.586\n", 124 | "08:00 : 3178.05 : 3009.944\n", 125 | "08:05 : 3182.73 : 3021.932\n", 126 | "08:10 : 3159.38 : 3030.7819999999997\n", 127 | "08:15 : 3196.34 : 3044.0440000000003\n", 128 | "08:20 : 3206.95 : 3048.456\n", 129 | "08:25 : 3227.51 : 3067.788\n", 130 | "08:30 : 3227.41 : 3093.4100000000003\n", 131 | "08:35 : 3232.77 : 3075.5440000000003\n", 132 | "08:40 : 3233.61 : 3100.6319999999996\n", 133 | "08:45 : 3209.52 : 3118.8640000000005\n", 134 | "08:50 : 3208.53 : 3100.958\n", 135 | "08:55 : 3232.71 : 3102.048\n", 136 | "09:00 : 3203.2 : 3115.654\n", 137 | "09:05 : 3230.5 : 3121.46\n", 138 | "09:10 : 3248.35 : 3147.522\n", 139 | "09:15 : 3299.91 : 3167.5660000000003\n", 140 | "09:20 : 3330.86 : 3173.292\n", 141 | "09:25 : 3362.19 : 3207.42\n", 142 | "09:30 : 3373.7 : 3244.8419999999996\n", 143 | "09:35 : 3379.7 : 3245.516\n", 144 | "09:40 : 3414.32 : 3285.0780000000004\n", 145 | "09:45 : 3416.15 : 3278.942\n", 146 | "09:50 : 3422.08 : 3288.7879999999996\n", 147 | "09:55 : 3439.71 : 3327.2039999999997\n", 148 | "10:00 : 3486.23 : 3340.4799999999996\n", 149 | "10:05 : 3464.71 : 3331.4680000000003\n", 150 | "10:10 : 3457.5 : 3334.992\n", 151 | "10:15 : 3464.33 : 3330.224\n", 152 | "10:20 : 3443.77 : 3338.2799999999997\n", 153 | "10:25 : 3484.55 : 3352.6620000000003\n", 154 | "10:30 : 3461.59 : 3361.9979999999996\n", 155 | "10:35 : 3474.82 : 3336.7999999999997\n", 156 | "10:40 : 3427.14 : 3318.4659999999994\n", 157 | "10:45 : 3445.28 : 3313.274\n", 158 | "10:50 : 3467.32 : 3317.21\n", 159 | "10:55 : 3433.9 : 3324.6859999999997\n", 160 | "11:00 : 3402.56 : 3310.312\n", 161 | "11:05 : 3394.07 : 3303.272\n", 162 | "11:10 : 3393.95 : 3291.15\n", 163 | "11:15 : 3406.97 : 3271.5699999999997\n", 164 | "11:20 : 3391.69 : 3273.7\n", 165 | "11:25 : 3385.4 : 3245.072\n", 166 | "11:30 : 3396.24 : 3254.986\n", 167 | "11:35 : 3363.63 : 3241.154\n", 168 | "11:40 : 3336.78 : 3224.3799999999997\n", 169 | "11:45 : 3357.95 : 3214.804\n", 170 | "11:50 : 3241.56 : 3189.614\n", 171 | "11:55 : 3328.33 : 3195.81\n", 172 | "12:00 : 3315.95 : 3187.9300000000003\n", 173 | "12:05 : 3253.66 : 3163.566\n", 174 | "12:10 : 3253.91 : 3146.078\n", 175 | "12:15 : 3279.05 : 3126.9359999999997\n", 176 | "12:20 : 3249.2 : 3105.582\n", 177 | "12:25 : 3204.56 : 3107.3320000000003\n", 178 | "12:30 : 3228.74 : 3085.962\n", 179 | "12:35 : 3203.81 : 3078.38\n", 180 | "12:40 : 3196.03 : 3056.6860000000006\n", 181 | "12:45 : 3169.14 : 3049.304\n", 182 | "12:50 : 3149.04 : 3026.4379999999996\n", 183 | "12:55 : 3139.79 : 2995.866\n", 184 | "13:00 : 3053.39 : 2947.9320000000002\n", 185 | "13:05 : 2947.18 : 2883.512\n", 186 | "13:10 : 2868.05 : 2839.116\n", 187 | "13:15 : 2866.15 : 2817.0460000000003\n", 188 | "13:20 : 2757.95 : 2794.0699999999997\n", 189 | "13:25 : 2805.64 : 2778.086\n", 190 | "13:30 : 2741.13 : 2767.2819999999997\n", 191 | "13:35 : 2758.78 : 2760.63\n", 192 | "13:40 : 2805.72 : 2754.75\n", 193 | "13:45 : 2800.97 : 2763.9480000000003\n", 194 | "13:50 : 2851.84 : 2779.7799999999997\n", 195 | "13:55 : 2800.64 : 2765.1760000000004\n", 196 | "14:00 : 2877.13 : 2762.696\n", 197 | "14:05 : 2893.24 : 2752.6800000000003\n", 198 | "14:10 : 2906.51 : 2748.344\n", 199 | "14:15 : 2916.48 : 2752.0200000000004\n", 200 | "14:20 : 2913.36 : 2750.502\n", 201 | "14:25 : 2907.14 : 2754.3259999999996\n", 202 | "14:30 : 2903.58 : 2752.592\n", 203 | "14:35 : 2909.1 : 2753.1620000000003\n", 204 | "14:40 : 2888.89 : 2747.4939999999997\n", 205 | "14:45 : 2925.51 : 2742.484\n", 206 | "14:50 : 2899.97 : 2745.776\n", 207 | "14:55 : 2887.11 : 2729.512\n", 208 | "15:00 : 2884.59 : 2728.6639999999998\n", 209 | "15:05 : 2898.23 : 2670.498\n", 210 | "15:10 : 2828.91 : 2706.308\n", 211 | "15:15 : 2847.21 : 2707.9359999999997\n", 212 | "15:20 : 2855.89 : 2697.788\n", 213 | "15:25 : 2855.94 : 2688.5\n", 214 | "15:30 : 2857.9 : 2697.2200000000003\n", 215 | "15:35 : 2854.82 : 2692.12\n", 216 | "15:40 : 2856.75 : 2652.736\n", 217 | "15:45 : 2811.95 : 2654.748\n", 218 | "15:50 : 2831.98 : 2650.39\n", 219 | "15:55 : 2846.47 : 2641.644\n", 220 | "16:00 : 2845.85 : 2642.42\n", 221 | "16:05 : 2832.46 : 2644.512\n", 222 | "16:10 : 2831.57 : 2648.046\n", 223 | "16:15 : 2829.75 : 2625.902\n", 224 | "16:20 : 2841.21 : 2674.418\n", 225 | "16:25 : 2873.08 : 2679.13\n", 226 | "16:30 : 2881.36 : 2689.114\n", 227 | "16:35 : 2884.01 : 2688.812\n", 228 | "16:40 : 2920.42 : 2686.4840000000004\n", 229 | "16:45 : 2871.89 : 2678.7080000000005\n", 230 | "16:50 : 2866.82 : 2690.2580000000003\n", 231 | "16:55 : 2878.76 : 2700.596\n", 232 | "17:00 : 2921.75 : 2739.24\n", 233 | "17:05 : 2937.99 : 2767.864\n", 234 | "17:10 : 2964.4 : 2773.564\n", 235 | "17:15 : 2981.04 : 2802.672\n", 236 | "17:20 : 2989.76 : 2837.892\n", 237 | "17:25 : 3014.41 : 2851.978\n", 238 | "17:30 : 3026.19 : 2884.4179999999997\n", 239 | "17:35 : 3114.37 : 2940.144\n", 240 | "17:40 : 3145.86 : 2987.1720000000005\n", 241 | "17:45 : 3158.57 : 3024.0640000000003\n", 242 | "17:50 : 3160.41 : 3032.006\n", 243 | "17:55 : 3184.97 : 3048.46\n", 244 | "18:00 : 3176.36 : 3069.846\n", 245 | "18:05 : 3208.53 : 3095.808\n", 246 | "18:10 : 3213.91 : 3082.7079999999996\n", 247 | "18:15 : 3214.25 : 3105.6499999999996\n", 248 | "18:20 : 3214.56 : 3110.4800000000005\n", 249 | "18:25 : 3230.06 : 3110.63\n", 250 | "18:30 : 3213.27 : 3105.05\n", 251 | "18:35 : 3198.3 : 3084.666\n", 252 | "18:40 : 3165.26 : 3067.684\n", 253 | "18:45 : 3185.36 : 3059.512\n", 254 | "18:50 : 3182.07 : 3085.7879999999996\n", 255 | "18:55 : 3125.41 : 3072.2799999999997\n", 256 | "19:00 : 3123.9 : 3070.482\n", 257 | "19:05 : 3066.72 : 3039.574\n", 258 | "19:10 : 3079.7 : 3036.6139999999996\n", 259 | "19:15 : 3077.81 : 3012.38\n", 260 | "19:20 : 3065.05 : 3018.78\n", 261 | "19:25 : 3043.23 : 2994.816\n", 262 | "19:30 : 3038.18 : 2984.768\n", 263 | "19:35 : 3025.41 : 2960.858\n", 264 | "19:40 : 3010.04 : 2969.2219999999998\n", 265 | "19:45 : 2971.6 : 2933.004\n", 266 | "19:50 : 2960.5 : 2914.3859999999995\n", 267 | "19:55 : 2964.95 : 2896.8059999999996\n", 268 | "20:00 : 2907.44 : 2869.9580000000005\n", 269 | "20:05 : 2871.93 : 2833.498\n", 270 | "20:10 : 2853.1 : 2822.172\n", 271 | "20:15 : 2817.12 : 2790.1000000000004\n", 272 | "20:20 : 2783.68 : 2775.786\n", 273 | "20:25 : 2781.56 : 2756.9860000000003\n", 274 | "20:30 : 2787.61 : 2697.1980000000003\n", 275 | "20:35 : 2719.76 : 2691.214\n", 276 | "20:40 : 2698.2 : 2682.504\n", 277 | "20:45 : 2644.67 : 2647.334\n", 278 | "20:50 : 2619.65 : 2614.7540000000004\n", 279 | "20:55 : 2614.94 : 2596.4080000000004\n", 280 | "21:00 : 2581.03 : 2562.0699999999997\n", 281 | "21:05 : 2542.24 : 2513.9139999999998\n", 282 | "21:10 : 2486.97 : 2493.142\n", 283 | "21:15 : 2475.59 : 2479.11\n", 284 | "21:20 : 2472.49 : 2474.864\n", 285 | "21:25 : 2462.56 : 2446.546\n", 286 | "21:30 : 2439.05 : 2442.826\n", 287 | "21:35 : 2380.38 : 2427.902\n", 288 | "21:40 : 2385.53 : 2413.764\n", 289 | "21:45 : 2347.06 : 2389.046\n", 290 | "21:50 : 2371.3 : 2376.8419999999996\n", 291 | "21:55 : 2335.07 : 2366.494\n", 292 | "22:00 : 2334.67 : 2339.9139999999998\n", 293 | "22:05 : 2278.79 : 2319.462\n", 294 | "22:10 : 2267.62 : 2286.3160000000003\n", 295 | "22:15 : 2227.62 : 2260.67\n", 296 | "22:20 : 2211.03 : 2241.446\n", 297 | "22:25 : 2190.52 : 2223.752\n", 298 | "22:30 : 2161.86 : 2196.106\n", 299 | "22:35 : 2149.82 : 2175.614\n", 300 | "22:40 : 2148.27 : 2148.674\n", 301 | "22:45 : 2098.53 : 2121.98\n", 302 | "22:50 : 2081.83 : 2108.0260000000003\n", 303 | "22:55 : 2038.77 : 2082.174\n", 304 | "23:00 : 2039.82 : 2068.138\n", 305 | "23:05 : 2012.06 : 2024.132\n", 306 | "23:10 : 1994.64 : 1992.922\n", 307 | "23:15 : 1946.31 : 1981.036\n", 308 | "23:20 : 1943.95 : 1954.3919999999998\n", 309 | "23:25 : 1968.3 : 1938.052\n", 310 | "23:30 : 1952.46 : 1906.3519999999999\n", 311 | "23:35 : 1918.29 : 1882.592\n", 312 | "23:40 : 1885.93 : 1875.324\n", 313 | "23:45 : 1893.45 : 1856.542\n", 314 | "23:50 : 1858.06 : 1832.7800000000002\n", 315 | "23:55 : 1829.17 : 1773.196\n" 316 | ] 317 | } 318 | ], 319 | "source": [ 320 | "import csv\n", 321 | "\n", 322 | "p = 5\n", 323 | "dict={}\n", 324 | "dates = ['25-11-2017.csv','26-11-2017.csv','27-11-2017.csv','28-11-2017.csv','29-11-2017.csv','30-11-2017.csv']\n", 325 | "for i in range(len(dates)):\n", 326 | " dict[dates[i]] = []\n", 327 | " time = []\n", 328 | " for d in csv.DictReader(open(dates[i]), delimiter=','):\n", 329 | " time.append(d['time'])\n", 330 | " dict[dates[i]].append(float(d['value']))\n", 331 | "n = len(dict[dates[len(dates)-1]])\n", 332 | "#actualload = [load[i] for i in range(len(load))]\n", 333 | "forecast =[0]*n\n", 334 | "for i in range(n):\n", 335 | " for j in range(p):\n", 336 | " forecast[i] += dict[dates[j]][i]/p \n", 337 | "for i in range(n):\n", 338 | " print(time[i],\" : \",dict[dates[len(dates)-1]][i],\" : \",forecast[i])" 339 | ] 340 | }, 341 | { 342 | "cell_type": "code", 343 | "execution_count": 5, 344 | "metadata": {}, 345 | "outputs": [ 346 | { 347 | "data": { 348 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYUAAAD8CAYAAACYebj1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvpW3flQAAIABJREFUeJzs3Xd0VNX68PHvnsmkF9I7Cb2GEHpH\nUQGRInZFUazYvV577z+vr1yv5YoVlSsqNhCVIii9t4SSQBISIL33npn9/nEmkZKQBJKclP1ZK2uS\nfc6ZeQ4k88zuQkqJoiiKogAY9A5AURRFaTtUUlAURVFqqaSgKIqi1FJJQVEURamlkoKiKIpSSyUF\nRVEUpZZKCoqiKEotlRQURVGUWiopKIqiKLVs9A7gXLy8vGRoaKjeYSiKorQre/fuzZZSep/PtW06\nKYSGhrJnzx69w1AURWlXhBAnzvda1XykKIqi1FJJQVEURamlkoKiKIpSq8E+BSGEPbAJsLOe/6OU\n8kUhxJfARKDAeuptUspIIYQA3gWmAaXW8n3W57oVeM56/mtSyq+a82YURWn/qqqqSE5Opry8XO9Q\n2jx7e3uCgoIwmUzN9pyN6WiuACZJKYuFECZgixBilfXY41LKH884/3Kgl/VrJLAQGCmE8ABeBIYB\nEtgrhFghpcxrjhtRFKVjSE5OxsXFhdDQULTPmEpdpJTk5OSQnJxMt27dmu15G2w+kppi648m69e5\nduaZBSy2XrcD6CKE8AemAGullLnWRLAWmHph4SuK0tGUl5fj6empEkIDhBB4eno2e42qUX0KQgij\nECISyER7Y99pPfS6EOKAEOIdIYSdtSwQSDrl8mRrWX3liqIop1EJoXFa4t+pUUlBSmmWUg4GgoAR\nQoiBwNNAX2A44AE8WRNnXU9xjvLTCCHuFkLsEULsycrKakx4inKabfHZHEwuOK2ssLyKw6kF9Vyh\nKEqNJo0+klLmAxuAqVLKNGsTUQXwBTDCeloyEHzKZUFA6jnKz3yNT6SUw6SUw7y9z2tCntKJSSl5\neGkk87/eS0W1ubb8nbWxzPpgK9Gphaw/kqljhEp74Ozs3CzP89JLL/H22283y3O1lgaTghDCWwjR\nxfq9A3ApcMTaT4B1tNGVwCHrJSuAuUIzCiiQUqYBa4DJQgh3IYQ7MNlapijN5nhOKVlFFaTkl/H1\njpPEZxZxMLmAjUezqLZIpr23mXlf7uZYVjEFpVU8u+wg2cUVeoetKG1GY0Yf+QNfCSGMaEnkeynl\nb0KIv4QQ3mjNQpHAfOv5K9GGo8ajDUmdByClzBVCvArstp73ipQyt/luRVFg93HtV6qHtxOv/hbN\nGwaBjUFQUW3B3dFEXmmVdl5iLidyS1my8yQBXRy4/+KeeoattFFSSp544glWrVqFEILnnnuO66+/\nnuLiYmbNmkVeXh5VVVW89tprzJo1C4DXX3+dxYsXExwcjLe3N0OHDtX5LpqmwaQgpTwARNRRPqme\n8yVwfz3HFgGLmhijojTa7sRc3B1N/PbgeD5YH0dWUQU/70sB4KvbR2C2SO74ag9rozPYnpADwK9R\nqSoptFEv/3qY6NTCZn3O/gGuvDhjQKPO/fnnn4mMjCQqKors7GyGDx/OhAkT8Pb2ZtmyZbi6upKd\nnc2oUaOYOXMm+/bt47vvvmP//v1UV1czZMiQjpcUFKW9KK8yszkum2GhHjjYGnl8Sl8AXO1NrD+a\nSVigG0IIhnR1Z11MBkaD4OZRXWubmXr6uOh8B0pbs2XLFm688UaMRiO+vr5MnDiR3bt3c/nll/PM\nM8+wadMmDAYDKSkpZGRksHnzZmbPno2joyMAM2fO1PkOmk4lBaVDkFLy7p9xpBeWs2BM+GnHnr2i\nH89M61c7fG9YqJYUbh8byp3ju7Nk50l+jUrjH5eppNDWNPYTfUvRGj7OtmTJErKysti7dy8mk4nQ\n0NDa+QLtfTitWvtIadeklCz44yiz/ruVhRuOcfWQIMb29DrtHCEEBsPff6izIwKZNzaURy7tja+r\nPSNCPfjtQGq9bwBK5zVhwgSWLl2K2WwmKyuLTZs2MWLECAoKCvDx8cFkMrF+/XpOnDhRe/6yZcso\nKyujqKiIX3/9Vec7aDpVU1DatcikfN7/K55+/q68PHMAN47o2uA1vq72p30CnREewHPLD3EopZAe\nPk489O1+Rvfw4o5xzbd0gNI+zZ49m+3btxMeHo4Qgrfeegs/Pz/mzJnDjBkzGDZsGIMHD6ZvX62p\ncsiQIVx//fUMHjyYkJAQxo8fr/MdNJ1oy5+Ohg0bJtUmO8q5vLnqCJ9tTmDvc5fh5nh+i4LlllQy\n8a312NoYCOjiwMGUAgwCPr9tOBuPZtHF0cQjl/Zu5siV+sTExNCvXz+9w2g36vr3EkLslVIOO5/n\nU81HSrslpWTN4XRG9/A874QA4OFky7L7x9DTx5mSympenNGfUC8n5n2xmy+3HWfhhmMUV1Q3Y+SK\n0nap5iOlXcosKueBJftJzC7h3ok9Lvj5evq4sPSe0bU/XzM0iE83J1JQWslX20+wLjqDKyPUUl1K\nx6dqCkq7U1Rexc2f7eRQagGvzx7INUODGndhxmHY/Tk0osnUxd7Eo5f15sUZA/BztefXqFSqzRYK\nrJPfFKWjUjUFpd1Zvj+F2Ixivrp9BBN7N3J9rIQN8O1NUFUCXr2hW+M6AA0GwayIAD7bnMjkdzaR\nkF3C8TevOP/gFaWNUzUFpd1ZHplKH1+X+hNC/Do4uePvn/NPwg+3QZeu4OgJOxY26fVuGtEVs0WS\nkF0CQGml6l9QOi5VU1DalaTcUvaeyOOJqX3qPiH+T1hyrfa9R3cwmKAsFyxmuGEJRH0Lm96G9IPg\nF9ao1wzxdGJib282xmZZYyijtLKaiK7uzXFLitKmqJqC0q78EqmtYzQzPODsgxYL/PIAePeDwXO0\nmoGdM7gFw7xV4NkDRt8Pjh7w+z+hLB92fwYL+sLh5ed83bevDefZadqwvwV/HGX2h9tYvP14M9+d\n0la899579OvXjzlz5ugdCpGRkaxcubLVXk/VFJR2Q0rJ8shUhoe6E+TuePYJ6QegKBUueQEG31j3\nkzi4w+TXYfl8+FeIVmbrDL/cDy7+0HVknZd5u9jVzpTec0LbVvylFYeZMsAPX1f7C743pW358MMP\nWbVqVaP2Pq6ursbGpuXeSiMjI9mzZw/Tpk1rsdc4laopKO1GdFoh8ZnFzBpcz9DQuLXaY89Lz/1E\ng2+E29fA2Idhzo9w/04tWSyaAjs+qvcybxdtx9nckkoALBIOpajd3Dqa+fPnk5CQwMyZM1mwYAFX\nXnklgwYNYtSoURw4cADQNs+5++67mTx5MnPnzsVsNvP4448zfPhwBg0axMcff1z7fG+99RZhYWGE\nh4fz1FNPAfDpp58yfPhwwsPDufrqqyktLQXghx9+YODAgYSHhzNhwgQqKyt54YUXWLp0KYMHD2bp\n0qUtfv+qpqC0G5tiswGYOtDv9AOluWDnCkdXQsAQcG7EiKSuo7SvGvdug+X3wuonwTUA+p+9uqWH\nky0GoSWDcT292BKfzZH0Ii7p53sht6Wcy6qntP6f5uQXBpe/We/hjz76iNWrV7N+/XpefvllIiIi\nWL58OX/99Rdz584lMjISgL1797JlyxYcHBz45JNPcHNzY/fu3VRUVDB27FgmT57MkSNHWL58OTt3\n7sTR0ZHcXG2/j6uuuoq77roLgOeee47PP/+cBx98kFdeeYU1a9YQGBhIfn4+tra2vPLKK+zZs4cP\nPvigef8d6qFqCkq7EZmUR4inI17Odn8XlmTDOwNgQR9I3QeDrju/J7d3has/B/9wWPWklmgsltNO\nMRpE7Wv38XMhyN2BI+lFlFeZeWdtLPmlled7a0obtWXLFm655RYAJk2aRE5ODgUFWu1w5syZODg4\nAPDHH3+wePFiBg8ezMiRI8nJySEuLo5169Yxb9682qW0PTw8ADh06BDjx48nLCyMJUuWcPjwYQDG\njh3LbbfdxqefforZbD4znFahagpKuxGVVMDI7h6nFx77C6pKwTUQRs2HkfPrvrgxTPYw9U344nJ4\nqxsMmQsz3z/tFG8XOzKLKgh2d6CvnytH0gr5YU8S7/4Zh0EIHr601/m/vnK2c3yibw11rQ1XszS2\nk5PTaee9//77TJky5bRzV69eXedS2rfddhvLly8nPDycL7/8kg0bNgBaLWXnzp38/vvvDB48uLZW\n0ppUTUFpF9ILykkvLGdwcJfTD8T/qc09uH8XTHgcLnQt+5AxcPlbEDwSDnwPWbFQmFZ7uKZfIcjd\nkb5+LiRkl/DZlkQAft6frJbf7mAmTJjAkiVLANiwYQNeXl64urqedd6UKVNYuHAhVVXajPfY2FhK\nSkqYPHkyixYtqu0zqGk+Kioqwt/fn6qqqtrnBzh27BgjR47klVdewcvLi6SkJFxcXCgqKmrpW62l\nkoLSLuw/qY34qU0KUmq1hPh10GMSGJrxV3nkPVqNobocPhwJH0+AwlQAvK3NR8EejgwIcMVskZzI\nKeXiPt6cyNHmUCgdx0svvcSePXsYNGgQTz31FF999VWd5915553079+fIUOGMHDgQO655x6qq6uZ\nOnUqM2fOrF1i++233wbg1VdfZeTIkVx22WW1y24DPP7444SFhTFw4EAmTJhAeHg4F198MdHR0a3W\n0ayWzlbajILSKo7nlBDi6UgXR1uqzBYqqy3Ym4zM+u8WMgsr2PzkxdjZGOHEdvhiqnbhtV/CgNnN\nG4yU8PllUJoDxZngOwDmrWLBung+WB/PwZemYG9jYGNsFjZGAxFduxDxylrundiDx6bUM7FOaRS1\ndHbTNPfS2apPQdGVlJJ31sURm17E+qOZVFRbGBrizo/zR/PssoPsSMjlrgndOZRSyAc3RWgJASDm\nVzDawn07tElpzU0ImLtCe41DP8Gyu2HHQm4ZdSdhgW4422l/OqeOPBoY4Mr2hBye/PEAsyICGNPD\nq75nV6z+b2UMheVVvDE7rN1vY9lRqKSgtKjs4gp2JuQyLcyvzj/69Uczee/POALc7JkZHoCrg4nP\ntySyIiqVn/alYLZI3lwZQ1igG1eE+WsXSQlHf4duE1smIdSwtU6QG3QdRC+Htc/jY+vI5GG313n6\nqO6efLwpgb0n8ohJL+SX+8eqN7ozSCnZdzKP49mlJGaX8PGmBAAGBroxZ2SIztEpoJKC0sI+2ZTA\nJ5sSePOqMG44Y6vM0spq/rXqKCGejqx7dCImo4GySjPL9qfw6PdRALjY21BUXs3No7r+/QabGQN5\nx7XJZ61BCLj6M/h+Lvz+GPSaDG5nL9c9srsHH29KwGgQHEguYGdiLqO6e7ZOjO2AlJK7Fu9lXUxG\nbdmo7h7YGAy89lsMY3p40c3LqfZclVAb1hLN/6qjWWlRldXaWP9Xfos+bS+CarOF277YTVxmES9M\n74/JqP0qOtga+eCmCGZHBPLC9P5cPywYDydbZpy61tHR37XH3pe32n1g6wTT3wGkNo/hr9e1tZNO\nMTzUAy9nW16eOQAvZzvuWryHzXFZrRdjG3copZB1MRncPaE7Gx+/iO1PT2LJnaN4+9pwbG0MPPmj\nNlvY3t6enJwcNZKrAVJKcnJysLdv3mVWVE1BaVF51gldpZVm9iflMaKbB/tO5GMQsCsxl1evHHjW\njOAxPbxq2+OrzBYemNQTR9tTflWPrITAoeDq32r3AWgL7PWbqTUlHfkNDiyFezZqS2Sgbcyz+9lL\nEUIwoZc3t32xi1d/i2bNIxM69adei0WyZOcJdiTkYmMQ3HdRD7o42tYe93Oz58FJPXnt9xjWRmdw\n4GQ5k4KryMpSCbUh9vb2BAU1cpOpRlJJQWlROcWV9PJxJj6rmJ/3pfDI0kjyS6sIcLPH1mhgdgNb\nXJqMhtPeQMhN0GYuT3q+hSOvx9Q3teYjF1/4+mrYswjG/7P2cM2bf1dPR+6Z2J0nfzrI7uNaMuys\nNsZm8fwv2ozdSX19Tv//tLpikD+v/R7DvV/vpdoieR/Y8fQl+LmpxQZbm2o+UlpUTkklIZ6O9PJx\nZkVUKqUVZvr6uZBaUM7I7h61o3ga5fgWWDgWjHbNPwS1sVz9IWKOtuhej0tg58dQXVHnqTPDA3Gx\nt2GRdXJbZ1RWaebjTcfwc7Xn6iFB3H9xzzrP83dzYFiIO9UWWbu96kG12KAuVFJQWlRuSQUeTra1\nk84u7e/DPydr4/gv69+EheQsZlj5BDh5w33bW3bUUWONewSKM2D7f+s87GBr5Pax3Vh9OJ19Jzvf\npLbNcVkMeXUtOxJyuWNcNxZcF87QkPo3Jrp7QnemD/LnxRn9EUKtQKsXlRSUFiOlJLekEg8nOwYH\na28GsyOCuLSfD1/OG84Nw7s28AynOPgjZB6GS19qGwkBoNsE6DsdNv0/yE+q85S7J3TH28WOR5dG\nkmjdzrMzSM4r5b6v9xHi6cinc4dxx7iG9yWYPMCPD24agou9iR7ezhxOVUlBDyopKC2mqKKaKrPE\ny9mW2RGBLLg2nEv6+iCE4KI+PtjaNPLXT0rY8V/w7qtfs1F9pv6fFt+ap+s87GRnw0c3D6GgrIon\nfoxq5eD088OeZIorq/nklmFc1t8Xg6FpHe0DA1w5lFLYQtEp56KSgtJicoq1kUceTrY42Bq5emhQ\n494cKkuhovjvnxPWQ1oUjLjrwhe8a25dusKEx7QZ1sfW13nK0BAPpoX5E5dZXOfxjkZKya9RqYzq\n5klXzzp2yGuEgYFupBeWc+/Xeyksr2r4AqXZqKSgtJjcEq0D1sPp7NEm9TJXw6LJ8OnFUFUOW9+F\n/10Fzn4w6IYWivQCjXlQ2wf6r1ehvECrOZwh2MOR/NIqijrBG9zh1EISskuYNbiOfbRrSAkJG2HX\np5Cw4ax/s2lh/kzq68OqQ+lsP5bTsgErp1FJQWkxNTUFTye7Bs48xa6PtZ22smPhm2th7QvQb4bW\nuWzn3EKRXiAbO5j4BKTshTe7aiOSzhBs3VM6Oa+staNrdTUdxDV7Wp9FSvj2Blg8E1Y+Botnwfe3\nnJYYAro48N+bhmAQWpJRWo+ap6C0mJq9jD2dG1FTWP0M+A2ETW9rS2G7BsCBH6DnZXDVJ2ByaOFo\nL1D4TVCSpTUjbXpLG7Zq51J7OMhdiz8pt5R+/mevx9+RJOeVYTQI/M+cY2CugkVTwdkXYlfDuEe1\nTZF2fQybF0DsGuhjXflWShxsjVqHsxqF1KpUUlBaxLe7TvL0z9reug02H6Xs0zqSa4z/J4SOg5kf\ntL0+hPoYbbS4u18En06CH+bB7I/BSVv7KNhDqykkdYKaQnJeKf5u9tgYz2iIiFkBKdal8J19YeKT\n2m53Fz0Nh5drtcIekyB2lbY385zvGRDgytZjOayISsXJ1sjE3t5nP6/SrNS/rtLsVh1M4+mfD9LT\nx5lrhgZhbzKe+4KdH4PJSZuU5jcIQsZq5e0lIZwqcChMexsSN8GvD9UWuzuacLQ1kpxXqmNwrSM5\nr6y2ZnSanZ+AezcY/YC2jpTJWpMwmmDKG5B9FLb8W6stFqXCkmu5tfxrCoqKeejb/dzx1R7+8X0U\nFotaE6klqZqC0uy+2XWSbl5O/P7QuL/3P6hPzjFtv4Jht0P/WdrktPaYDE414i4ozYUNb0DqfgiI\nQAhBsLsjSbmdoaZQxrheZ/QnHN8KSTu0ZUJG3Xv2RX2mQti1sOH/tJ+H3QFpkUQc/4wZBhss4TcS\n6unEO+tiGdq1C7eNbXjeg3J+VFJQmt3R9CLG9/JuOCGU5cPKx7WO2vH/1NYT6ihGzYedC+Gb6+Hq\nz6HbeII9HDp8TaGi2kxGUfnpNQUp4a/XtBFkQ2+r/+KZ74OdK5zcAZe9ArZOyPcG87LjYRyvDUcI\n2JmYwwfr47luePDpiyQqzUY1HynNoqC0iju/2sM/lkaSWVRBXz+Xc1+Qfgj+MwiO/QkXP9uxEgKA\nvRvc+qu25Pby+8Bchb+bA2kF5XpH1qLS8suREoLcT5mfEPUdnNwGEx8/94ABkwNM/zfct00baSYE\nIuxanFO3Yjj4PUJK/nFZb7KLK7nnf3uJ7yTzPlpbg0lBCGEvhNglhIgSQhwWQrxsLe8mhNgphIgT\nQiwVQthay+2sP8dbj4ee8lxPW8uPCiGmtNRNKa2r2mxh7qKdrIvJYNn+FAD6+p8jKWTHa0MSbZ3g\nrvUw+r5WirSV+YVpzSUFJ+HA9/i62lFQVkV5lVnvyFpMzZDb2ppC4iZt/4muo2HovKY/4ZC54OKv\nbYe66xOGh3rwzLS+RCXlM+ezHaQVdPzmuNbWmJpCBTBJShkODAamCiFGAf8C3pFS9gLygDus598B\n5EkpewLvWM9DCNEfuAEYAEwFPhRCNNC+oLQHX247TlRyAbeO/ns7xT711RSS91gnppXCTd9B4JBW\nilInvSaDbxjsWIivq9axmlHYMWsLVWYLX2xNRAjo7uUEmUe0OQjO3jD7IzCcx597l67wyCEtqWz/\nADa+xd0DBUvvGU1JhZl7v95HldnS/DfTiTWYFKSmpp5msn5JYBLwo7X8K+BK6/ezrD9jPX6J0BaZ\nnwV8J6WskFImAvHAiGa5C0U3Kfll/HttLJf09eGFGQPwcrbF08kWb+c6JqwVJMOSa8DRA+7eAP7h\nrR1u6xNC+7SbcZDulhMApHfQJqRPNiXw55FMXpk1EB9Xe20IqpRw2+/gHnr+T2wwwJiHoCAJ1r8O\na56ln78rb14dRmRSPgs3HGu2e1Aa2acghDAKISKBTGAtcAzIl1JWW09JBmp2SwkEkgCsxwsAz1PL\n67hGaadeXnEYKeHlWQMwGgQPX9KLeWND695pbP/XWufynB+1T4CdxcCrQBjplqZtI5pRVPf+C+1Z\nRbWZL7cdZ0Jvb24ZZa0xxq7Whui6+F34C/SeqnVER9wCR1dC1lGmDwpgYm9vvt+TpLbubEaNSgpS\nSrOUcjAQhPbpvl9dp1kf6xpPKM9RfhohxN1CiD1CiD1qO762rbSymrUxGcwdE1LbsXjL6FAemNTr\n7JOlhIM/aJPSvOo43pE5eUGPi3E7vhqAjA5WU5BS8tGGBLKKKrhrvHWoaFGGtuxHzQzlC2UwaDWu\nS18CkyN8fysUpjJ5gC/JeWUcy1Kdzs2lSaOPpJT5wAZgFNBFCFEzJiwISLV+nwwEA1iPuwG5p5bX\ncc2pr/GJlHKYlHKYt7d3U8JTWtmR9CKkhKFd69k4pTgT9v0PknbBia2QE6+NRe+MQsdhzEsg0FTc\n4foU3lkXxzvrYpk6wI9xNesdJW7SHnte1rwv5uQFNy2F/JPw+2Nc1McHgA1H1QfI5tLgQF8hhDdQ\nJaXMF0I4AJeidR6vB64BvgNuBX6xXrLC+vN26/G/pJRSCLEC+EYI8W8gAOgF7Grm+1FaUbR1obL+\nAXWs5VNVBouv1DbGAW22sltXGHDl2ed2BsGjALjYMYH0wo5TU4pKyueDv+K4KiKQt68N/7vZ8OR2\nsHXRRmA1t24TYMwDsPFfBNrcy+NdvPlmlxO9fV1wsbchor4PKUqjNKam4A+sF0IcAHYDa6WUvwFP\nAo8KIeLR+gw+t57/OeBpLX8UeApASnkY+B6IBlYD90spO+7YvE7gcGohbg4mArucMfa8LB+W3qIl\nhGu+gEnPg/8gmLtcG7/fGQVEgNGWETZxZBZ2nD6Ff60+gpezHS/NGnD6Xhknd0DwiPMbcdQYI+4G\nG3s4vIy7WEZKXglzF+1izmc7Vf/CBWqwpiClPABE1FGeQB2jh6SU5UCdbQRSyteB15septIWRacV\n0t/f9exO5V8f0jbGmf6O1skK2kY0nZnJHvwHE5ZzhPQO0nwUn1nEtmM5PD6lD672pr8PlOVBZnTL\n7pLn5AU3/wTH/sJ28wKWX+nA0zvtiEzKJ7Wg/OwPKkqjqRnNynkpKKviSFrh2U1HeccheoU2hHDY\n7brE1mYFDiWw4hjpBSWYO8Cibp9vScTWaOCG4cGnH0jaDUjoOrJlAwgdpy2uJ4z0K9jK89P7A6il\nti+QSgrKeflwfTyVZgtXDTljVPGOhVqTwYi79AmsLfPph62lHF9LJqn5zTcTN62gjP+35giV1S03\niaukovq0RLb9WA7f7krippFd8TxzTkrqfkBoTWYtzdEDuo2H/V/Tz0OqTXmagUoKSpPlFFfwxbbj\nzI4IZEDAKX0E6Qe17RUH36RtkqOczkf7JNtHJHMip2kL45ktknfXxfHgt/tZdTCttt3cYpEs3n6C\n/64/xm8HzhrM1yzKq8xMeGs9n2xKqC1b8MdRgj0ceGJqn7MvSIsEz56nbTLUoi55AYozcNz6/+ju\n7czhVFVTuBBqmUGlyZbtT6Gy2sK9E3ucfmDVU9ont0tf1iewts5bewPtLZI5nlNy9vLS9ZBS8tgP\nUSzbn4K7o4lfo1Lp6+eCvclIan4ZDrZaZ+4XW7VELYRASln3BMLzsP1YDjkllWyKzeLei3pQXFHN\n/qR85k/sXvdKpWlREDKmWV67UQKHak2VOz9ickgEP6V4NOv9dzaqpqA0iZSS73YnMaRrF3r5nvJJ\nMO0AnNgCYx/REoNyNntXpFsQ/YzJnMgpafRl7/8Vz7L9KTx6WW/2PHcZb10zCHuTkYKyKrKLKziR\nU0pfPxcOphTw495k9p/MI/zlP9gWn90sYa+NyQAgMimfarOF3cdzMVskY3rUkdSKs6AwBfwHN8tr\nN9olz4ODB7cVLCSjsIKjGUWt+/odiEoKSpPEZxYTn1nM1UODTj+w62NtpmnEzfoE1k4In/4MsGl8\n89Ge47n8Z10ssyMCeXBST4wGwXXDgll+/1jWP3YR11j/Hz64KYKxPT156ueDXP/JDgrLq9kUd2FJ\nwWKRLN+fwppD6bjY2VBWZeZIehHbj+VgazQwNKSO+QBpkdpja69r5eAOEx7HJ28fYYYE/jic0bqv\n34GopKA0SWRSPgAju51SG7BY4Mjv2s5pDl10iqyd8BtEiCWJ9OzGvWE/t/wQAV0cePXKgXU2h7w4\nYwDf3jWKnj4uLLx5KPPGhDI8VHuzTrqADX0qqs3M+3I3jyzV3uRfmz0QgJ/3pfBLZApDQ9zr3mY1\nbq02f6A1OpnPNPhGMDnykOuv2lIqAAAgAElEQVQm1karpHC+VFJQmiQqOR9nOxu6ezn/XZgZrY1N\n7zZRv8Dai9CxGLHQJXsfk9/ZSPU5ln0uq9Q+mV8/LBhnu7q7/5zsbBjdwxMAV3sTz03vz5I7RzGp\nrw/HztiEJrekkrJKM1LKBid4rYvOZGNsFs9d0Y89z13KzPAABgW5sWhrIiUVZp69oo7lzyxmiF4O\nvS7TNslpbfZuMPBqLqrcRGxKFlubqfmss1EdzUqTHEguICzQ7fTZq8e3aI+hY/UJqj0JHok02DDX\nP5k7U4rJKq7A3+3siVbf7jqJn3X/hVAvpya/TE8fZ7bEZ3MopYDHfojieE4J5VUWbI0GbG0MjO3p\nyce3DAO0fRBsDOK0msiKqBR8XOyYN7Zbbfn394zm9wNp9PZ1YWBgHTPTj2+G4gwYcFWT4202/a/E\ntP9/zHSL56UVHqx+ZAJGg+pwbgqVFJRGq6g2E5NWyB3jup9+4MQWbSnszrQc9vmydUIEDGFYWTRw\nBekF5WclhaTcUp7++SDBHlp5t/NICj28naistjD9/S34uNhx88gQfF3tySquYPuxHNYfyaK8ysy7\nf8axcMMxgj0c+ObOUQR7OJJfWsn6o1nMGdn1tDdUe5Px7L6kGuWF8Ps/wckHeuu4qWK38WDrzL1+\nsUw62p99J/MYHqoGPjSFSgpKo8WkFVFllgwOPuVTYnkhJGyE/jP1C6y96T4Rt83/JkhkklHHOkj7\nrf02SbnaBLfzrSnU+GH+aEI8/36OP2MyuOOrPby56ghfbjvO5P6+bE/I4ebPdzKprw+RSflYLLK2\nE7tRtv4HchP+3pdaLzZ20ONiQk9uwNM4hTWH0lVSaCLVp6A0WpT1zWpQ0Cmdyfu+gopCtaRFUwy7\nHQxG7jOuILPo7HWQ9p/Mq/3ey9mu3v6Ec6kZLjxvbOhpCQFgWKgHBqFtozogwJUPbhrCxzcPxcFk\n5PvdScSkFfLO9YNPn5jYkJhftdVLQ8c1OdZmN3I+hrI8ljov4I9DKWqBvCZSNQWl0aKS8vF2scPf\nTWvrxmKGHR9B6HhtApHSOK4ByIhbuGb3lyzKOkF5VXDtSJ6Simr2n8zHIMAioZuX4/m9hL2JQy9P\nwcn27BFCbg4m+ge4ciilkOen98fWxsCYnl6sfmQCUkqqLRKTsQmfF7NiITtWW7m0LQgdB9P/Tc8V\nDxJYuZ/otBFNS3CdnKopKI0WlZxPeFCX09fML0yGobfpGld7ZBj3CAYBXlEfMejlP0jNLyMpt5Sw\nl9YQmZTPlAHaFpahnuffFONsZ1PvrN67xnfn/ot7MKq752nlQoimJQSAI79pj30uP58wW0bYtVhM\nTsw0bmPNoXS9o2lXVFJQGqWwvIpjWSWEB53yievQT9qEtbb0ZtBedOnKX3aXMqN6Lf7mVA4kF7A1\nPhuLhAA3e+6/uCczwgO4PKwZ9jeuw6zBgTw+pe+FP5GUEPUdBI0Atyb0QbQ0kwOGftOZYbOb9YdO\nUl5lZv3RzA6xOm1LU0lBaZT9J7X+hPBga3+CuQqif9ESgp4di+3YOt95lGPi36aFxKflsvt4Hh5O\ntmx9YiID4z/h/Ul2TOrrq3eY55ayF7KPQsQcvSM525BbcZbFzMn9gL7Pr2beF7tZG61qDQ1RSUFp\nlL9iMrA3GRhRM5M5cSOU5ug7Jr2ds/MI5rmq2xlqiGNC5D+JOZ7EsBB3xNb/wPrX4JvroSBZ7zDr\nl7IXfrkfbBza5u9B6Fiqx/6TG2w28FQ3bYXXI+lqTaSGqKSgNEhKyZ9HMhnX0+vvpQ0OLQM7V+h5\nqb7BtWO+rnb8ahnD69zOwJLtLC65l4cqP4MN/wchY6EoDd4ZAD/fA9WVf19YXgg5x6CiCKp02sVN\nSlh+H5QXwLVfgH0d+3S3ATaTngavPswv/4ye7jbEnTHLWzmbGn2kNCgus5jkvDLuv7inVlBdqQ1B\n7Dtd22ZSOS/DQz3o7+9KVbc7mbW9O8+b/seI5G+hxyS49ksoTIPIr2Hb+5C8Cww2UJqrJQOzdX6D\njYPW0T/oOijJ1pK0oRU+66XshawjMOPdtt2nZDTBpS/BdzdyhU88azLVNp0NUUlBadC+E9q4+THW\nNXZIi4SKAugzVceo2r+R3T1Z+fB4ftiTxJeyO4v7LGTEJJO2GY/RRlvLZ/Jr0HW0tqOdMEDXUWDr\nDD79rHshH4Fdn8DOhdqTXv4WjLyn5YPf/z9tkEFbbDY6k3X5lQjbJBamdKfabMGmqSOsOhGVFJQG\nxWYU42AyEuxuHTN/Ypv22LUVN1LpwKaF+ZNZVMG8saFQ16Y1fa/Qvupz0VPa+lOHfoS1L2grlc74\nT8uNBqpZFbfP5W222eg09m7QpSvdLYlUmseTlFd2XkuHdBYqXSoNisssoqeP89+L4J3YBp69wNlb\n38A6CCc7G+6/uGfdu5g1hnuINvrnyo+gzzSIXwtHVzVvkKdKj4KSLOg1ueVeo7n5huFTEgdAtNrD\n+ZxUUlAaFJtRRC9f61o6Fgsk7YCQ0foGpZzNxReuWaRtOJN+ADb8C76bo32qb05xa7XHHpc07/O2\nJL8w7AoTCXUVfL4lQS19cQ6q+Ug5p4KyKjIKK+hds/VmZrQ24iRELZPdJgkBfmFw+Bet38feTUsK\nk1/T+iCEAUbde/5bppZkQ9S32iY67amm6DcQIS38X594btzdnS3x2Yzv1Y7ib0UqKSjnFGfd67Z3\nTU3h5HbtsauqKbRZfoMgcRMII9y3A1Y+Dn88az0otJFDt/zc8POk7IVl87UPAZ69tJ3Ntv8XClO1\nUUftSeh48OrN6IPP867tOLbGBKikUA/VfKSc0z7rip19/awdiie2gWug2juhLfMbpD2GjAHXALhu\nMVz6Mly/BMY+pE08rDjHJC5zNax+Br64QpsH0WsylGRqE9VyE+Gm77UVUdsThy5w7za46BlmGbYQ\nHLtY74jaLFVTUM7p9wNphAW6EdDFQZuwdGKbtgplPQutKW1A4BDtse907dFghHGPaN/bu8HWd7Wa\nRH0jmuL+gB3/hQGzYeqb4OKnLWuyZxEEDIHg4S1/Dy3BaIKLniRp9y8MLtqA2SLVrmx1UDUFpV4n\nc0qJSi5g+iB/rSAvEYrTVSdzW+fVC+atguF3nH0seKQ2z2HjW/DX65B3/OxzknaAwaSNZnKxLshn\nNGnzH9prQjhFfsjlDBCJnIiP1juUNknVFJR6rY3JALRx9ACcsPYnqE7mti+knjkkNrbQbwYc/FEb\nobTtPRj/GFiqtf6inHiwsYeAwR12trrLkKsh+m2K9v0EvQfoHU6bo5KCUq+Dyfn4udoT7GGdtHZy\nmzbc0auPvoEpF+bKhTDzA63W9/M92uJ7CPDuq80/MFe27aUrLlDX7v04IrrjmLASeEHvcNoc1Xyk\n1OtgSgEDA0/ZP+HEdm3UUWusraO0HCG0ZTTcguC23+DRI/BsGty/4+8Nk4JH6hpiSzIYBPmhl9Or\nMobY2KN6h9PmqL9upU7FFdUkZJcQVpMUijIg95gaitrRCAGu/mCyLhQ38UkY81CHX/22/yW3ALDs\n6/dYEZWqczRti0oKSp2iUwuREgYG1gxF3aI9qv6Ejs3JCya/Crbntzd0e+Ea1I/SgFHMN/7C4j/3\n6x1Om6KSglKnQykFAH/XFBI2gp2b1gGpKB2A48wFuFDKZblLSMhS+yzUUElBqdOJnBJc7GzwcbWO\nQEncqM1PMBj1DUxRmovfQCq6XcYM43ZWHUzTO5o2QyUFpU7JeWUEulvbmfOOa1/dJ+oZkqI0O4dB\nVxIgcjl+cIveobQZKikodUrJLyOoJimc3KE9qv4EpaPpMxWzMNI960/ySiobPr8TUElBOYuUkuS8\nMoJqNtVJP6hNaPLuq29gitLcHNwp9R3GaMNhNsdn6x1Nm6CSgnKWwrJqiiuq/64ppEX9vUWkonQw\nTj3HMcBwgq0xJ/UOpU1oMCkIIYKFEOuFEDFCiMNCiIet5S8JIVKEEJHWr2mnXPO0ECJeCHFUCDHl\nlPKp1rJ4IcRTLXNLyoVKzi8FILBmEbz0A+A/SOeoFKVlGLqOxISZiuO79Q6lTWjMR79q4J9Syn1C\nCBdgrxDCuvUS70gp3z71ZCFEf+AGYAAQAKwTQvS2Hv4vcBmQDOwWQqyQUqpVqdqY5LwyAK35KP+k\ntp6+n0oKSgcVpC3yF1h0gMLyKlztTToHpK8GawpSyjQp5T7r90VADBB4jktmAd9JKSuklIlAPDDC\n+hUvpUyQUlYC31nPVdqYFGtSCHR30GoJAP7hOkakKC3I0YMS155cZIzkcHKB3tHorkl9CkKIUCAC\n2GktekAIcUAIsUgI4W4tCwSSTrks2VpWX7nSxpzMLcXJ1oi7o0nbfctgAl+1mqTSgQ2bx3BDLHkH\n1+gdie4anRSEEM7AT8AjUspCYCHQAxgMpAELak6t43J5jvIzX+duIcQeIcSerKysxoanNKPo1EL6\n+rsihICk3dqevzVr4yhKB+Q05i5ShQ8RMf+Cis49u7lRSUEIYUJLCEuklD8DSCkzpJRmKaUF+BSt\neQi0GkDwKZcHAannKD+NlPITKeUwKeUwb2+1h2prs1gkh1MLGBDgqm3LmLoPgkc0fKGitGc2dvwe\n8jQ+FSdJ+2gWxYk7G76mg2rM6CMBfA7ESCn/fUq5/ymnzQYOWb9fAdwghLATQnQDegG7gN1ALyFE\nNyGELVpn9IrmuQ2luZzILaWk0qwlhczDUFVa2xGnKB3ZzTfdyqIuD+KQGwNfzSLqWFLDF3VAjakp\njAVuASadMfz0LSHEQSHEAeBi4B8AUsrDwPdANLAauN9ao6gGHgDWoHVWf289V2lDDqdqHW0DAty0\n/gSAoGE6RqQorcPB1sjtD79CyrSvcKaMn//3AQVlVXqH1eoaHJIqpdxC3f0BK89xzevA63WUrzzX\ndYr+DqcWYmMQ9PJ1hgNHtf18u4ToHZaitAqjQTBgxKWU7+jFTTm/sWbtOK6bOUPvsFqVmtGsnOZo\nehE9fZyxszFq+/V69tA2YlGUzkII7C95hm7GTK7eN5fqhM16R9SqVFJQThObUUQvXxfth+w48Oyl\nb0CKooeBV7F68gaOW3zhx9u1nQc7CZUUlFolFdUk55XR28cZqsq12cyePfUOS1F00btbMPdVPQwV\nhfDTHWAx6x1Sq1BJQakVn6mNz+7l6wK5CYAEL1VTUDqnHt7OJBpC+SP4ETi+GY52ju5QlRSUWrEZ\nRQD09nWGnDitUNUUlE7KZDTQ28+Z76onaoMttn2gd0itQiUFpVZcZjG2NgZCPJ20/gRQSUHp1Pr7\nu7IpPo+fbWdA0g44+KPeIbU4lRSUWglZJXTzdMJoEJAZDW5dwc5Z77AURTfDQjwAePrkMHK8hmFZ\nNh9zzO86R9WyVFJQaqXmn7Ivc0a0WgRP6fSuHhrE+scuwtbekWvzH+JAdVfE97f8vUVtB6SSglIr\nraAMfzd7qK6A7Fjw7a93SIqiK6NB0M3LiQm9vEkotmFO5TOUGpxge8ftX1BJQQGgrNJMXmkVAV0c\ntIQgzdoWnIqicFEfbXFOZ9cufG++CHlkJRSk6BxVy1BJQQEgtUDbWCegi73WdASq+UhRrK6MCOTD\nOUN4eeYAFlVMwiIlmWve0jusFqGSggJAWn45AP5uDlons8GkRh4pipXJaGBamD+T+voyY+JofjRP\nxDPma8g7rndozU4lBQXQOpkBAmuajzx7grFz71WrKGeytTHw5NS+/OZ5G1ICuz7VO6Rmp5KCAmjN\nR0KAr6u9lhTUTGZFqVdAUHe2MQgZ/Qtadug4VFJQAK35yMvZDluqITcRvHrrHZKitFkDg9z4pXIE\noiAJUvbpHU6zUklBASA5v1QbeZSXqI08UklBUeo1MMCVtZahmA0mOLBU73CalUoKClJKjqQV0cfX\n+e/lLbxUJ7Oi1Kefvyu2zu6sZQyWyCVQXqh3SM1GJQWFzKIKckoq6e/vqvUngNpHQVHOwd5k5Kvb\nR/BZ1WQMlcWw/396h9RsVFJQiE7TPuX083eF9APgGgj2rjpHpSht24AAN7r0HMkewyDY+BaU5Ogd\nUrNQSUEhOtWaFHwd4Nhf0ONinSNSlPZhQm9vni67GVlZDH++rHc4zUIlBYXotEKC3B1wzdoH5QXQ\na4reISlKuzChlzdxMojo4Bth32JI2at3SBdMJQWF6NRCrT8hdo02k1nVFBSlUUI8Henv78qDqVOw\nOHnDupf0DumCqaTQyRWUVpGYXUJ4cBdIiwL/QWDnondYitIuCCF48+owTpQY+dNlFiRugpxjeod1\nQVRS6OQOpOQDMCjITVvHxaO7vgEpSjszKKgLt4wK4fmTEUhhhL1f6h3SBVFJoZM7kFwAwCA/JyhI\nBvdQfQNSlHbowUk9KTZ5Eek8HvZ8ASXZeod03lRS6OSikvIJ9XTErSpdm8mskoKiNJmnsx3XDgvi\n6dwZyKoS2LxA75DOm0oKndzh1ELCgrr8vQSwSgqKcl6uHx7MEbM/8f7TtSaksny9QzovKil0YsUV\n1aTkl2nLW6ikoCgXpK+fK+HBXXgpYxxUlULUd3qHdF5UUujE4jOLAejl66IlBaMtuPjrG5SitGML\nrh1EtOxGpKUHqes+aJfLaquk0InFZhQB0LsmKXTpCgajvkEpSjvW08eFZfeNJdL3GgKqkyiJXa93\nSE2mkkInFpdRhK2Nga4ejpAaCd599Q5JUdq9UC8nek+aS750onTLx3qH02QqKXRicZnF9PB2xliU\nCvknIGSs3iEpSocQ1s2Xb82T8E5aDUdX6x1Ok6ik0InFZRTT29cZTm7XCkJG6xuQonQQLvYmVnrc\nSqKpFyy7u13tt6CSQidVM/Kot68LnNgGts7gG6Z3WIrSYYR18+PJ8tu0RSbb0UgklRQ6qZqRRz19\nrDWF4BFgtNE5KkXpOO4e350D9OCYbV/Y9TGYq/UOqVFUUuikakYe9XWrhsxo6DpG54gUpWMJ9XLi\n3ok9+Vfx5ZATD+te1DukRlFJoZOqGXkUVHRAK1D9CYrS7C7t78MfluEcC70Btn/QLlZQVUmhk6od\neZS0XdtDIXCo3iEpSofTz8+VLo4mfrKZrhUc36xvQI2gkkInddrIo8AhYHLQOyRF6XAMBsGobp58\nn2hHia0XlsQteofUoAaTghAiWAixXggRI4Q4LIR42FruIYRYK4SIsz66W8uFEOI9IUS8EOKAEGLI\nKc91q/X8OCHErS13W8q51Iw86uttD2kHIGi43iEpSoc1daAf2SVV/FnWi6pjm9r80heNqSlUA/+U\nUvYDRgH3CyH6A08Bf0opewF/Wn8GuBzoZf26G1gIWhIBXgRGAiOAF2sSidK6akYehdulgrlCqyko\nitIirowI5MirUzlkGoRdWQZkx+od0jk1mBSklGlSyn3W74uAGCAQmAV8ZT3tK+BK6/ezgMVSswPo\nIoTwB6YAa6WUuVLKPGAtMLVZ70ZplNo1j6rjtYIAlRQUpSXZm4y4DZ6ORQoKdrXtOQtN6lMQQoQC\nEcBOwFdKmQZa4gB8rKcFAkmnXJZsLauvXGllcRlF2NkY8Cg4BA7uarlsRWkFV00czi4GULb/O6TF\nonc49Wp0UhBCOAM/AY9IKc81Z1vUUSbPUX7m69wthNgjhNiTlZXV2PCUJqgZeWRI2w8BESDq+q9R\nFKU5+bs5UNX/GvyqU0nY8FXDF+ikUUlBCGFCSwhLpJQ/W4szrM1CWB8zreXJQPAplwcBqecoP42U\n8hMp5TAp5TBvb++m3IvSSHEZxfTzNkFmjJYUFEVpFWGX38EuSx9CNz8GaVF6h1Onxow+EsDnQIyU\n8t+nHFoB1IwguhX45ZTyudZRSKOAAmvz0hpgshDC3drBPNlaprSiovIqUvLLGOWUDpZq8B+sd0iK\n0ml0cXXlDbcXqcKEZfuHLPjjaG0fX1vRmJrCWOAWYJIQItL6NQ14E7hMCBEHXGb9GWAlkADEA58C\n9wFIKXOBV4Hd1q9XrGVKKzqarv0ChokErSBAJQVFaU19QoL5RU5AHvqZr//ax/yv91JWadY7rFoN\nroAmpdxC3f0BAJfUcb4E7q/nuRYBi5oSoNK8YtK07qDgilhw9AS34AauUBSlOQ0NdefTvZdwrd0f\nPO2wnCey5rJoayL3X9xT79AANaO504lOK8LNwYRj9kGt6Uh1MitKqxrfy4tcx+58WT2F6+RqbvZP\n4deos7pXdaOSQicTk1bIID9bRGaMajpSFB34uzmw+9lLmfbIQqSDO/Ps/uJIehHHsor1Dg1QSaFT\nMVskR9OLmOiWCdKsRh4pik4MBoGflwei30y65WzEngpWHkjTOyxAJYVOJT6zmLIqM0NMJ7QCNfJI\nUfQVdg2GqlLu8TnC7wdVUlBa2a7EHAB6m49ZO5mDdI5IUTq5kLHgHsocsYoj6YX8b8cJ4nQeoqqS\nQieyMzEXP1d7nHIOqE5mRWkLDEYY/QA+BQcYazjE88sP8dTPB/UNSddXV1qNlJJdibmMC3VCZB5R\n/QmK0lZE3AyuQXxlt4B/+B1g38k8sooqdAtHJYVO4nhOKZlFFVzqmWXtZFb9CYrSJpgc4O712AQO\n5oHShbjLQv6MydAtHJUUOoma/oShNse1AtXJrChth7MPzHwfQ3UJzzkvZ+WhdN1CUUmhk9iZmIuH\nky1eRTGqk1lR2iKfvoiIm5lp/osjcXGkF5TrEoZKCp3ErsRcRoR6INKi1HLZitJWjXkIo6xirnEN\nP+1L1iUElRQ6gZT8MpLzyhgT4qAtl62ajhSlbfLsgeg2gSvsD7F8f4ouIXTIpCCl5LtdJykoq9I7\nlDZhw1Ftq4tJdke1TuaQMTpHpChKvQIGE2JJ4ts79Nkmt0MmhYTsEp7/5RAPfbsfs+Wszd06nXXR\nGXT1cCQw/S+wdYHQ8XqHpChKffwGYbBU4lV2QpeX75BJoYe3M6/MGsjG2Cy+3HZc73B0VVJRzdZj\nOVzWzxsRuxp6XQo2tnqHpShKfXwHao8Zh3R5+Q6ZFABuHNGVYSHufLPzBNoWD53TrsRcKqstXOmZ\nDCWZ0He63iEpinIunj3BaAfp+sxs7phJQUo4voU5g5w5llXC/qR8vSPSzcGUAoSAPtl/gI0D9J6q\nd0iKopyL0QZ8+6uk0KxyE+DLK5hWvhJHWyMfbTimd0S6OZRSQA8Pe2yProDeU8DOWe+QFEVpSMQt\n0OdyXV66YyYFzx7QazJ2ez/l4YlB/BGdwd2L97DmsH6zBPVyOLWQK9yToCQLBszWOxxFURpj+B0w\n6l5dXrpjJgWAMQ9BaTZ3uOxiWIg7G2OzeHPVkU7Vv5BXUklKfhljbOO0gm4T9A1IUZQ2r+MmhdBx\n4BuGzb5F/Dh/NC/NHEBidgmHUwv1jqzVHEotAKBnxWHw6g2OHjpHpChKW9dxk4IQMOw2rbMmZR9T\nB/hhYxD8EqnPLEE9rIvOwM4GPHIiIXiE3uEoitIOdNykABB2Hdg6w4Y3cHc0cXmYP4u2HueTTcdI\nyi3VO7oWVW228PvBNG7qXoEoz4PgUXqHpChKO9Cxk4K9K0x6HuLXQdR3vHlVGOFBbryx8gizP9xG\neZVZ7whbzI6EXLKLK7muS6xWEDpO34AURWkXOnZSABhxt/YpefWTOFVm88P8MSycM4Ts4gp+O9A2\nNspuCVviszEZBb3zNoLPAPDopndIiqK0Ax0/KRgMMOsDqCqHZfMxSjNTB/rR29eZjzYeo6i8Yy6a\nt+d4LmP8wZi8A/peoXc4iqK0Ex0/KQB49YIr3oaE9bD2BYQQPDOtH8ezS5j3xW5KKqr1jrBZlVeZ\nOZBcwPXOUSAt0E8tbaEoSuN0jqQAMGQuDL8TdnwIKXu5qI8P790Ywf6kfG7+fCd7T+TqHWGzOZRS\nQKXZwuiiNeDdF/wG6R2SoijtROdJCgCXvAjOvvDbo2AxMy3Mn/duiOB4dgnXfrSdQykFekfYLNbG\nZNDNkIZ7zn4Iv1HtsqYoSqN1rqRg7wpT34C0SNj9OQBXDPJnw2MX4+pg4l+rj+gc4IXLLq5g8bYT\nPBxgHXUUdq2+ASmK0q50rqQAMOAq6DEJ/ngOYteAlLg5mnjg4p5sjstmc1yW3hFekE83JVBRbWay\nYxx49QG3QL1DUhSlHel8SUEIuPpzbc3yb66DTy+G8gJuGR1CkLsDb646gqWd7tZWXFHNN7tOMn2g\nD47pu9TcBEVRmqzzJQXQ1gC6fTVc/v+0ZTB+vgc7YeHxKX04nFrICysOtbuF80orq3ljZQxF5dU8\n0K8EKotVUlAUpcls9A5AN/auMPJureaw8jH49gZmzvqQmIk9+GjjMQxC8PLMAYh20kn76NIoVh9O\nZ87IrvQu/F0rDBmrb1CKorQ7nTcp1BhxFxhNsPJxxIcjeXLuCiyyO59sSiAuo5jnp/enf4Cr3lGe\n04HkfFYfTufhS3rxj0t6wrs3QbeJ4OKrd2iKorQznbP56ExDb4P5W8HkiPjmep6e4MUbs8M4nFrA\ntPc20/vZVXyxNVHvKOu14I9YujiauHN8Nzj2FxSc1O5JURSliVRSqOHdG278DkqyEGtf4KaRXdn8\nxCSeu6IfEV278MbKGA4mt715DHtP5LIxNov5E3vgUp0Pv/0DXAPV0haKopwXlRRO5T8IxjwIUd/A\nb4/iZiznzvHdWXjzUDycbLnmo23ct2Qv3+9OahMd0cUV1by44jBezrbMHdUVlt+rbbt5wxKwsdM7\nPEVR2iHVp3CmiU9ARSHsWQTlBXD1Z3hk7+HXu8J5bV0y+07msfJgOm//cZQujiYGBrjx7BX98HRu\n3Tfh8iozd3y5m5i0Ij65ZSiOR36C+LUw9V8QENGqsSiK0nE0mBSEEIuA6UCmlHKgtewl4C6gZqbX\nM1LKldZjTwN3AGbgISnlGmv5VOBdwAh8JqV8s3lvpZmYHOCKBeDsB+tfg6J0OLEFn2F38N6N/0ZK\nyQ97ktl1PJeCsip+O41XMD0AAAn6SURBVJDG5vhs3r42nIm9vVslxLySSh78dj+7jufyn+sHc4lD\nPPz4kLZE+Ii7WiUGRVE6JtFQM4gQYgJQDCw+IykUSynfPuPc/sC3wAggAFgH9LYejgX+f3v3HhxV\ndQdw/PsjL0KC4SUKhEAiaSsFhUxUqkI7IIJMa2rrg6qVqQ+o1VG0/cMOM5XWOk47Ra1FBNsyY52W\ngKBTtcMoRSwdrTzkJcgrIJVACC8hSEhIwq9/nJM1hOyu2STc3O3vM7Ozd8+92f39cjb55Zxzc3c8\nUA6sAX6gqh/Heu3i4mJdu3ZtK1NqJ2ca4J8z3QX0UrtCehY8us1diruJj/dXMX3henYfOsmsWy+n\nsqqG7xXl8l7ZYSYN70daSvvO0B04XsMt896n8ngtT940jFsu6w2zr4C0rnDPMvscZmMMIvKhqhYn\n8rVxRwqqulJEBn/J5ysBSlW1FvhERMpwBQKgTFV3A4hIqT82ZlEIVJcUuP4JN520fSm8eh+Ur4G8\nq846bGj/C3hl2tVM/P1KHi7dAMDv3trB6YYzHKyq5b4xBe0STl3DGZ5bvpNX1+3jWPVpSqeNoiiv\nJ6x4CqrK4UdLrSAYY9qsLWsKD4rIXcBa4Keq+hkwAPigyTHlvg1gb7P2s3+7dlYZ3eErEyAlHRZM\ndldZPVEBFw+Hm+dDdl9yuqUx544iSlfvZXhuDqVrPgVg9ooydh36nBO19dx7bT4j83q26qVVFRHh\nRE0dM1//mCXryhlV0ItnbhvhCsKxT+G9Z2HY92HQ1R2RvTHm/0yiReEF4AlA/f0s4G6gpX//VVo+\ny6nFeSsRmQpMBcjLy0swvHbWNQduXwSbFrlF6Nxi+Gixm7a54h4YeScj8woiv/TvHDWIrRVVTH7x\nA5ZvO0hdwxlWbj/EoD7dGHJhNg+OLWRI32zAjQDcTdlz+CSX5eYA8Kd/f8Lcf+3i3tEFzFlRxona\neh4aV8ij4/1sXF0NvDEdEBj/qyC+K8aYJBR3TQHATx+92bimEG2fX2RGVZ/y+94CZvpDZ6rqBN9+\n1nHRBLqmEE/lFnjnSdixFFIz4c4lkN0Xel9yzqH7j53ikYVuamnL/ioAxn6tL4V9s1m2tZLyz07R\nIzON3YdPMmzABQzuncWbmyrolp5C9ekG8vtkMevWy93ooK4G3v8DbFoIR3bCt5+B4rvPa+rGmM6t\nLWsKCRUFEemnqhV++xHgKlWdLCJfB/7GFwvNy4FC3AhiBzAO2IdbaL5dVbfEet1OXRQafbYH5k90\nU0oA+WPcp50NGQ+F48/5gJuK46d4uHQDnx6p5kBVDaldhP49Mjl68jTTxhRQumYv+46d4qGxQ7j1\nioHMeXcX93/zEgb26gb718NrP4ZD2yDvavjGA/ZRm8aYc3RoURCRBcC3gD5AJfC4fzwCNwW0B5jW\npEjMwE0l1QPTVXWpb58EPIs7JXW+qj4ZL7hQFAWAA5th6xsgXWDzEqja565Seul3oGSOu/heC97f\ndZguIozM60F1bQM9s9KpqWtg79FqCi/qfvbB2/4Bi6ZAVh+4cTYUXnceEjPGhFGHjxSCEpqi0Fz9\naVg9D5Y9Djm57qygzJ5w5TT46sQv9xyq7r+T07Ng1Tx459fQfwTcsdjOMjLGxNShp6SaBKSmu8tl\n9C+Ct2dAxgVwdDcsuA0ycmDojW5/Zi+oPuymnrIvhs8PwMkjsHsF1FXDltdwM28KQ0vcCCHKqMMY\nY9qDFYWONPgamPqu266vhf8879YD1r/sbtIF9My5X5eaCfWn3MgiLROGjIPBo89ZnzDGmPZmReF8\nSc2A0Y+67ZE/hON73eghJQMuHgbVR6B7P6g75f4vovaETRMZY847KwpByB8d/xgrCMaYANils40x\nxkRYUTDGGBNhRcEYY0yEFQVjjDERVhSMMcZEWFEwxhgTYUXBGGNMhBUFY4wxEZ36gngicgj4bxue\nog9wuJ3C6Swsp3CwnMIhWXPKUtULE/niTl0U2kpE1iZ6pcDOynIKB8spHCync9n0kTHGmAgrCsYY\nYyKSvSi8GHQAHcByCgfLKRwsp2aSek3BGGNM6yT7SMEYY0wrJGVREJGJIrJdRMpE5LGg40mUiOwR\nkY9EZIOIrPVtvURkmYjs9Pc9g44zFhGZLyIHRWRzk7YWcxDnOd9vm0SkKLjIo4uS00wR2ef7aoOI\nTGqy7+c+p+0iMiGYqGMTkYEiskJEtorIFhF52LeHtq9i5BTavhKRriKyWkQ2+px+6dvzRWSV76eF\nIpLu2zP84zK/f3DcF1HVpLoBKcAuoABIBzYCQ4OOK8Fc9gB9mrX9FnjMbz8G/CboOOPkMAYoAjbH\nywGYBCzFfTD1KGBV0PG3IqeZwM9aOHaofw9mAPn+vZkSdA4txNkPKPLb3YEdPvbQ9lWMnELbV/77\nne2304BV/vu/CJjs2+cC9/vtnwBz/fZkYGG810jGkcKVQJmq7lbV00ApUBJwTO2pBHjJb78EfDfA\nWOJS1ZXA0WbN0XIoAf6izgdADxHpd34i/fKi5BRNCVCqqrWq+glQhnuPdiqqWqGq6/z2CWArMIAQ\n91WMnKLp9H3lv9+f+4dp/qbAWGCxb2/eT439txgYJxL7w96TsSgMAPY2eVxO7DdCZ6bA2yLyoYhM\n9W0XqWoFuDc90Dew6BIXLYew992DfiplfpNpvdDl5KcYRuL+Ck2KvmqWE4S4r0QkRUQ2AAeBZbgR\nzTFVrfeHNI07kpPffxzoHev5k7EotFQFw3qK1TWqWgTcADwgImOCDqiDhbnvXgAuAUYAFcAs3x6q\nnEQkG1gCTFfVqliHttDWKfNqIadQ95WqNqjqCCAXN5K5tKXD/H2rc0rGolAODGzyOBfYH1AsbaKq\n+/39QeA13BugsnGY7u8PBhdhwqLlENq+U9VK/8N6BvgjX0w7hCYnEUnD/fL8q6q+6ptD3Vct5ZQM\nfQWgqseAd3FrCj1EJNXvahp3JCe/P4c4U5/JWBTWAIV+NT4dt7jyesAxtZqIZIlI98Zt4HpgMy6X\nKf6wKcDfg4mwTaLl8Dpwlz+zZRRwvHHqorNrNp9+E66vwOU02Z8Fkg8UAqvPd3zx+HnmPwNbVfXp\nJrtC21fRcgpzX4nIhSLSw29nAtfh1kpWADf7w5r3U2P/3Qy8o37VOaqgV9M7aIV+Eu5Mg13AjKDj\nSTCHAtyZEBuBLY154OYDlwM7/X2voGONk8cC3BC9DvdXyz3RcsANdZ/3/fYRUBx0/K3I6WUf8yb/\ng9ivyfEzfE7bgRuCjj9KTtfiphU2ARv8bVKY+ypGTqHtK+AyYL2PfTPwC99egCtgZcArQIZv7+of\nl/n9BfFew/6j2RhjTEQyTh8ZY4xJkBUFY4wxEVYUjDHGRFhRMMYYE2FFwRhjTIQVBWOMMRFWFIwx\nxkRYUTDGGBPxP+Cv6V9KnmBNAAAAAElFTkSuQmCC\n", 349 | "text/plain": [ 350 | "" 351 | ] 352 | }, 353 | "metadata": {}, 354 | "output_type": "display_data" 355 | } 356 | ], 357 | "source": [ 358 | "plt.plot(dict[dates[len(dates)-1]])\n", 359 | "plt.plot(forecast)\n", 360 | "plt.legend(['load', 'forecast'])\n", 361 | "plt.show()" 362 | ] 363 | }, 364 | { 365 | "cell_type": "code", 366 | "execution_count": null, 367 | "metadata": { 368 | "collapsed": true 369 | }, 370 | "outputs": [], 371 | "source": [] 372 | } 373 | ], 374 | "metadata": { 375 | "kernelspec": { 376 | "display_name": "Python 3", 377 | "language": "python", 378 | "name": "python3" 379 | }, 380 | "language_info": { 381 | "codemirror_mode": { 382 | "name": "ipython", 383 | "version": 3 384 | }, 385 | "file_extension": ".py", 386 | "mimetype": "text/x-python", 387 | "name": "python", 388 | "nbconvert_exporter": "python", 389 | "pygments_lexer": "ipython3", 390 | "version": "3.6.5" 391 | } 392 | }, 393 | "nbformat": 4, 394 | "nbformat_minor": 2 395 | } 396 | -------------------------------------------------------------------------------- /models/aws.py: -------------------------------------------------------------------------------- 1 | import os 2 | import schedule 3 | import time 4 | 5 | 6 | def job(): 7 | os.system("./aws_arima.py 1") 8 | print('Done with ARIMA, running smoothing models') 9 | os.system("./aws_smoothing.py 1") 10 | print('Done with smoothing models, running RNN models') 11 | os.system("./aws_rnn.py 1") 12 | print('Done') 13 | return None 14 | 15 | 16 | schedule.every().day.at("00:15").do(job) 17 | 18 | while True: 19 | schedule.run_pending() 20 | time.sleep(60) # wait one minute 21 | -------------------------------------------------------------------------------- /models/aws_arima.py: -------------------------------------------------------------------------------- 1 | #!/home/eee/ug/15084015/miniconda3/envs/btp/bin/python 2 | """ 3 | The script is to run half an hour after midnight. Scrap last day's data and update monthsdata.csv 4 | """ 5 | import os 6 | import logging 7 | from math import sqrt 8 | from subprocess import call 9 | from datetime import datetime, timedelta 10 | import csv 11 | import requests 12 | import numpy as np 13 | import pandas as pd 14 | import statsmodels.api as sm 15 | from bs4 import BeautifulSoup 16 | from statsmodels.tsa.arima_model import ARIMAResults 17 | 18 | 19 | def get_load_data(date): 20 | url = "http://www.delhisldc.org/Loaddata.aspx?mode=" 21 | logger.info("Scraping " + date) 22 | resp = requests.get(url + date) # send a get request to the url, get response 23 | soup = BeautifulSoup(resp.text, "lxml") # Yummy HTML soup 24 | table = soup.find( 25 | "table", {"id": "ContentPlaceHolder3_DGGridAv"} 26 | ) # get the table from html 27 | trs = table.findAll("tr") # extract all rows of the table 28 | if len(trs[1:]) == 288: # no need to create csv file, if there's no data 29 | with open( 30 | "monthdata.csv", "a" 31 | ) as f: #'a' makes sure the values are appended at the end of the already existing file 32 | writer = csv.writer(f) 33 | for tr in trs[1:]: 34 | time, delhi = tr.findChildren("font")[:2] 35 | writer.writerow([date + " " + time.text, delhi.text]) 36 | if len(trs[1:]) != 288: 37 | logger.info("Some of the load values are missing..") 38 | else: 39 | logger.info("Done") 40 | 41 | 42 | def get_data(): 43 | return pd.read_csv( 44 | "monthdata.csv", 45 | header=None, 46 | index_col=["datetime"], 47 | names=["datetime", "load"], 48 | parse_dates=["datetime"], 49 | infer_datetime_format=True, 50 | ) 51 | 52 | 53 | # to store the log in a file called 'arima_log.txt' 54 | logging.basicConfig( 55 | filename="aws_arima_log.txt", 56 | filemode="a", 57 | level=logging.INFO, 58 | format="%(asctime)s %(message)s", 59 | ) 60 | logger = logging.getLogger() 61 | console = logging.StreamHandler() 62 | logger.addHandler(console) 63 | 64 | """Check if monthdata.csv exists, if no then create one, if yes then update it with yesterday's data and clip it so that it contains only last 30 days of data, 65 | as the model is to be trained on last 30 days of data.""" 66 | if os.path.exists("monthdata.csv"): 67 | data = get_data() 68 | # import pdb; pdb.set_trace() 69 | if (datetime.today() - timedelta(1)).date().strftime('%Y-%m-%d') != str(data.index.date[-1]): # yesterdays data not present, scrap it 70 | # only need to scrap for yesterday's data and append it to already existing file 71 | yesterday = datetime.today() - timedelta(1) 72 | yesterday = yesterday.strftime("%d/%m/%Y") 73 | get_load_data(yesterday) 74 | # re read updated monthdata.csv and clip data in monthdata.csv to last 30 days only 75 | data = get_data() 76 | day_to_clip_from = datetime.today() - timedelta(30) 77 | logger.info("Clipping data from " + day_to_clip_from.strftime("%d/%m/%Y")) 78 | data = data[day_to_clip_from.strftime("%d/%m/%Y"):] 79 | data.to_csv( 80 | "monthdata.csv", header=False 81 | ) # IMP: don't add any header to the monthdata.csv 82 | else: 83 | logger.info('Yesterday"s load already scrapped!') 84 | else: # scrap for last 30 days, prepare monthdata.csv 85 | for i in range(31, 0, -1): 86 | yesterday = datetime.today() - timedelta(i) 87 | yesterday = yesterday.strftime("%d/%m/%Y") 88 | get_load_data(yesterday) 89 | data = get_data() 90 | 91 | # exit() 92 | logger.info(data.shape) 93 | data = data.asfreq(freq="30Min", method="bfill") # sample the data in hourly manner 94 | 95 | # initialize the model 96 | model = sm.tsa.statespace.SARIMAX( 97 | data, 98 | order=(3, 1, 1), 99 | seasonal_order=(3, 0, 0, 24), 100 | enforce_stationarity=False, 101 | enforce_invertibility=False, 102 | ) 103 | 104 | # fit the model with the data 105 | logger.info("Starting model fitting...") 106 | model = model.fit() 107 | 108 | logger.info("Model fitting done!!") 109 | logger.info(model.summary().tables[1]) 110 | logger.info(model.summary()) 111 | 112 | # save the model 113 | model.save("ARIMA_month_model.pkl") 114 | # model = ARIMAResults.load('ARIMA_month_model.pkl') 115 | # import pdb; pdb.set_trace() 116 | # generate the predictions 117 | todays_date = datetime.today().strftime("%d/%m/%Y") 118 | tommorows_date = (datetime.today() + timedelta(1)).strftime("%d/%m/%Y") 119 | # pred = model.get_prediction( 120 | # start=data.shape[0]-9, # rolling mean of window 10 to be applied 121 | # end=data.shape[0]+48-1, # predict next 48 values (half hourly, for 24 hours), last value to be removed 122 | # dynamic=False, 123 | # ) 124 | pred = model.get_prediction( 125 | start=data.shape[0], # rolling mean of window 10 to be applied 126 | end=data.shape[0]+48, # predict next 48 values (half hourly, for 24 hours), last value to be removed 127 | dynamic=False, 128 | ) 129 | # save the pridictions in a csv file 130 | predictions = pred.predicted_mean 131 | predictions = predictions.asfreq(freq="5Min", method="bfill") # set to 5 min freq 132 | date = datetime.today().strftime(format="%d-%m-%Y") 133 | # predictions = predictions.rolling(window=10).mean().dropna() 134 | predictions.to_csv( 135 | "predictions/ARIMA/%s.csv" % date, index_label="datetime", header=["load"] 136 | ) 137 | 138 | # error = sqrt(((predictions - numpy.squeeze(todays_date['%s' % date:]))**2).mean()) 139 | # logger.log(error) 140 | 141 | # now, send the file to the AWS server using scp 142 | cmd = ( 143 | "scp -i /home/eee/ug/15084015/.ssh/btp.pem predictions/ARIMA/%s.csv ubuntu@13.126.97.91:/var/www/html/btech_project/server/predictions/ARIMA/" 144 | % (date) 145 | ) 146 | logger.info(call(cmd.split(" "))) 147 | print("ARIMA prediction done") 148 | -------------------------------------------------------------------------------- /models/aws_rnn.py: -------------------------------------------------------------------------------- 1 | #!/home/eee/ug/15084015/miniconda3/envs/btp/bin/python 2 | """The script is to run half an hour after midnight. Scrap last day's data and update lstm_data.csv""" 3 | import os 4 | import sys 5 | from tensorflow import set_random_seed 6 | from numpy.random import seed 7 | from keras.layers import Dense, SimpleRNN, LSTM, GRU 8 | from keras.models import Sequential 9 | from sklearn.preprocessing import MinMaxScaler 10 | import logging 11 | from subprocess import call 12 | from datetime import datetime, timedelta 13 | import csv 14 | import requests 15 | import numpy as np 16 | import pandas as pd 17 | from bs4 import BeautifulSoup 18 | 19 | seed(1) 20 | set_random_seed(2) 21 | # GPU is much slower due to small batch size, so use CPU only 22 | os.environ["CUDA_VISIBLE_DEVICES"] = "" 23 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # no tf shit warnings 24 | 25 | 26 | def get_load_data(date): 27 | url = "http://www.delhisldc.org/Loaddata.aspx?mode=" 28 | logger.info("Scraping " + date) 29 | # send a get request to the url, get response 30 | resp = requests.get(url + date) 31 | soup = BeautifulSoup(resp.text, "lxml") # Yummy HTML soup 32 | # get the table from html 33 | table = soup.find("table", {"id": "ContentPlaceHolder3_DGGridAv"}) 34 | trs = table.findAll("tr") # extract all rows of the table 35 | if len(trs[1:]) == 288: # no need to create csv file, if there's no data 36 | # 'a' makes sure the values are appended at the end of the already existing file 37 | with open("lstm_data.csv", "a") as f: 38 | writer = csv.writer(f) 39 | for tr in trs[1:]: 40 | time, delhi = tr.findChildren("font")[:2] 41 | writer.writerow([date + " " + time.text, delhi.text]) 42 | if len(trs[1:]) != 288: 43 | logger.info("Some of the load values are missing..") 44 | else: 45 | logger.info("Done") 46 | 47 | 48 | def get_data(): 49 | return pd.read_csv( 50 | "lstm_data.csv", 51 | header=None, 52 | index_col=["datetime"], 53 | names=["datetime", "load"], 54 | parse_dates=["datetime"], 55 | infer_datetime_format=True, 56 | ) 57 | 58 | 59 | def prepare_data(data, nlags): 60 | """prepares data for LSTM model, x=last nlags values, y=(nlags+1)'th value""" 61 | data_x, data_y = [], [] 62 | for i in range(data.shape[0]): 63 | for j in range(0, data.shape[1] - nlags): 64 | data_x.append(data[i, j : j + nlags]) 65 | data_y.append(data[i, j + nlags]) 66 | data_x = np.array(data_x) 67 | data_y = np.array(data_y).reshape(-1, 1) 68 | return data_x, data_y 69 | 70 | 71 | def get_model(model_name): 72 | batch_size = 1 73 | if model_name == "RNN": 74 | model = Sequential() 75 | model.add( 76 | SimpleRNN( 77 | 2, 78 | batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]), 79 | stateful=True, 80 | ) 81 | ) 82 | model.add(Dense(train_y.shape[1])) 83 | model.compile(loss="mean_squared_error", optimizer="adam") 84 | return model 85 | 86 | elif model_name == "LSTM": 87 | model = Sequential() 88 | model.add( 89 | LSTM( 90 | 1, 91 | batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]), 92 | stateful=True, 93 | ) 94 | ) 95 | model.add(Dense(train_y.shape[1])) 96 | model.compile(loss="mean_squared_error", optimizer="adam") 97 | return model 98 | 99 | elif model_name == "GRU": 100 | model = Sequential() 101 | model.add( 102 | GRU( 103 | 1, 104 | batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]), 105 | stateful=True, 106 | return_sequences=True, 107 | ) 108 | ) 109 | model.add(GRU(1, stateful=True)) 110 | model.add(Dense(train_y.shape[1])) 111 | model.compile(loss="mean_squared_error", optimizer="adam") 112 | return model 113 | 114 | 115 | # to store the log in a file called 'arima_log.txt' 116 | logging.basicConfig( 117 | filename="aws_rnn_log.txt", 118 | filemode="a", 119 | level=logging.INFO, 120 | format="%(asctime)s %(message)s", 121 | ) 122 | logger = logging.getLogger() 123 | console = logging.StreamHandler() 124 | logger.addHandler(console) 125 | sys.stdout = open('aws_rnn_log.txt', 'a') 126 | 127 | 128 | """Check if lstm_data.csv exists, if no then create one, if yes then update it 129 | with yesterday's data and clip it so that it contains only last 60 days of data, 130 | as the model is to be trained on last 60 days of data.""" 131 | if os.path.exists("lstm_data.csv"): 132 | data = get_data() 133 | # yesterdays data not present, scrap it 134 | if (datetime.today() - timedelta(1)).date().strftime('%Y-%m-%d') == str(data.index.date[-1]): 135 | # only need to scrap for yesterday's data and append it to already existing file 136 | yesterday = datetime.today() - timedelta(1) 137 | yesterday = yesterday.strftime("%d/%m/%Y") 138 | get_load_data(yesterday) 139 | # re read updated lstm_data.csv and clip data in lstm_data.csv to last 60 days only 140 | data = get_data() 141 | day_to_clip_from = datetime.today() - timedelta(61) 142 | logger.info("Clipping data from " + day_to_clip_from.strftime("%d/%m/%Y")) 143 | data = data[day_to_clip_from.strftime("%d/%m/%Y") :] 144 | # IMP: don't add any header to the lstm_data.csv 145 | data.to_csv("lstm_data.csv", header=False) 146 | else: 147 | logger.info('Yesterday"s load already scrapped!') 148 | else: # scrap for last 60 days, prepare lstm_data.csv 149 | print("Creating lstm_data.csv ..") 150 | for i in range(61, 0, -1): 151 | yesterday = datetime.today() - timedelta(i) 152 | yesterday = yesterday.strftime("%d/%m/%Y") 153 | get_load_data(yesterday) 154 | data = get_data() 155 | 156 | 157 | logger.info(data.shape) 158 | 159 | data = pd.read_csv( 160 | "lstm_data.csv", 161 | header=None, 162 | names=["datetime", "load"], 163 | index_col=[0], 164 | parse_dates=[0], 165 | infer_datetime_format=True, 166 | ) 167 | df = pd.DataFrame(columns=["time"] + list(map(str, range(int(data.shape[0] / 288))))) 168 | for idx, time in enumerate(sorted(set(data.index.time))): 169 | df.loc[idx] = [time.strftime(format="%H:%M:%S")] + list( 170 | data.at_time(time)["load"].values 171 | ) 172 | df.index = df["time"] 173 | df = df.drop("time", 1) 174 | dt_df = df.diff(1, axis=1) # detrending 175 | dt_df = dt_df.dropna(axis=1) # droping the na column created due to detrending 176 | scaler = MinMaxScaler(feature_range=(-1, 1)) # rescaling data to [-1, 1] 177 | scaler = scaler.fit(dt_df.values.reshape(-1, 1)) 178 | dt_df = scaler.transform(dt_df) # dt_df is now a numpy array 179 | split_idx = int(len(dt_df) * 0.8) 180 | train, val = dt_df[:split_idx, :], dt_df[split_idx:, :] 181 | 182 | nlags = 10 # number of previous days to use for prediction 183 | batch_size = 1 # batch size to train the models on 184 | train_x, train_y = prepare_data(train, nlags) 185 | val_x, val_y = prepare_data(val, nlags) 186 | train_x = train_x.reshape(train_x.shape[0], 1, nlags) 187 | val_x = val_x.reshape(val_x.shape[0], 1, nlags) 188 | 189 | # data for prediction of today's load 190 | # slice last nlags+1 days from df, will be used in prediction of yesterday's data 191 | df_last_nlags_plus_one = df.loc[:, df.columns[-nlags - 1:]] 192 | # taking last 21 days, differencing and dropping the nan value 193 | dt_df_last_nlags = df_last_nlags_plus_one.diff(1, axis=1).dropna(axis=1) 194 | dt_df_last_nlags = scaler.transform(dt_df_last_nlags) # df is now a numpy array 195 | X = dt_df_last_nlags.reshape(dt_df_last_nlags.shape[0], 1, nlags) # nlags=20 196 | today = datetime.today().strftime(format="%d-%m-%Y") 197 | models = ["LSTM", "RNN", "GRU"] 198 | 199 | for model_name in models: 200 | logger.info("%s training started" % model_name) 201 | model = get_model(model_name) 202 | logger.info(model.summary()) 203 | print(model.summary()) 204 | for i in range(15): 205 | history = model.fit( 206 | train_x, 207 | train_y, 208 | epochs=1, 209 | batch_size=batch_size, 210 | verbose=2, 211 | validation_data=(val_x, val_y), 212 | shuffle=False, 213 | ) 214 | model.reset_states() 215 | 216 | # calculate val RMSE 217 | val_y_pred = model.predict(val_x, batch_size=batch_size, verbose=2) 218 | inverted_val_y = scaler.inverse_transform(val_y) 219 | inverted_val_y_pred = scaler.inverse_transform(val_y_pred) 220 | val_RMSE = np.sqrt( 221 | np.sum(np.square(np.array(inverted_val_y_pred) - np.array(inverted_val_y))) 222 | / len(inverted_val_y) 223 | ) 224 | logger.info("model name: %s | val_RMSE: %f" % (model_name, val_RMSE)) 225 | # import pdb; pdb.set_trace() 226 | # Predict today's load 227 | Y = model.predict(X, batch_size=batch_size) # predict for today's values 228 | inv_Y = scaler.inverse_transform(Y) # invert to detrended values' scale 229 | # last day's values added to inv_Y to get it to original scale 230 | rescaled_Y = [x + y for x, y in zip(inv_Y[:, 0], df.iloc[:, -1])] 231 | 232 | # create a csv file to store the predictions 233 | pred_df = pd.DataFrame(columns=["time", "load"]) 234 | pred_df["time"] = list(df.index) 235 | pred_df["load"] = rescaled_Y 236 | pred_df.to_csv("predictions/%s/%s.csv" % (model_name, today), index=False) 237 | # now, send the file to the AWS server using scp 238 | cmd = ( 239 | "scp -i /home/eee/ug/15084015/.ssh/btp.pem predictions/%s/%s.csv ubuntu@13.126.97.91:/var/www/html/btech_project/server/predictions/%s/" 240 | % (model_name, today, model_name) 241 | ) 242 | logger.info(call(cmd.split(" "))) 243 | -------------------------------------------------------------------------------- /models/aws_smoothing.py: -------------------------------------------------------------------------------- 1 | #!/home/eee/ug/15084015/miniconda3/envs/btp/bin/python 2 | from subprocess import call 3 | import pdb 4 | import pandas as pd 5 | from datetime import datetime, timedelta 6 | 7 | 8 | def scp(directory, date): 9 | cmd = ( 10 | "scp -i /home/eee/ug/15084015/.ssh/btp.pem predictions/%s/%s.csv ubuntu@13.126.97.91:/var/www/html/btech_project/server/predictions/%s/" 11 | % (directory, date, directory) 12 | ) 13 | call(cmd.split(" ")) 14 | 15 | 16 | """Simple Moving Average (SMA)""" 17 | # pdb.set_trace() 18 | p = 5 # number of days to take average of 19 | n = 24 * 12 # hours * number of values per hour 20 | time = ['%02d:%02d' % (x, y) for x in range(24) for y in range(0, 60, 5)] 21 | #time = ['00:00', '00:05'... 22 | 23 | data = pd.read_csv( 24 | "monthdata.csv", 25 | header=None, 26 | index_col=["datetime"], 27 | names=["datetime", "load"], 28 | parse_dates=["datetime"], 29 | infer_datetime_format=True, 30 | ) 31 | # import pdb; pdb.set_trace() 32 | print(data.index[-1]) 33 | date = datetime.today().date().strftime("%d-%m-%Y") 34 | print('date today:', date) 35 | load = data["load"].values 36 | pred = [0] * n 37 | for i in range(n): 38 | forecast = 0 39 | for j in range(1, p + 1): 40 | forecast += load[-(j * n) + i] / p 41 | pred[i] = (time[i], forecast) 42 | 43 | df = pd.DataFrame.from_records(pred, columns=["time", "load"]) 44 | df.to_csv("predictions/SMA/%s.csv" % date, index=False) 45 | scp("SMA", date) 46 | 47 | 48 | """Simple Exponential Smoothing (SES)""" 49 | dict = {} 50 | 51 | m = int(len(data) / n) 52 | alpha = [0] * n 53 | alphamin = [0] * n 54 | forecast = [0] * len(data) 55 | forecast[:n] = [load[j] for j in range(n)] 56 | 57 | for j in range(n): 58 | mse = [0] * 9 59 | for k in range(1, 10): 60 | alpha[j] = k * 0.1 61 | mse[k - 1] += (forecast[j] - load[n + j]) ** 2 62 | for i in range(2, m): 63 | forecast[((i - 1) * n) + j] = (alpha[j] * load[((i - 1) * n) + j]) + ( 64 | (1 - alpha[j]) * forecast[((i - 2) * n) + j] 65 | ) 66 | mse[k - 1] += (forecast[((i - 1) * n) + j] - load[(i * n) + j]) ** 2 67 | min = mse[0] 68 | alphamin[j] = 0.1 69 | for i in range(1, 9): 70 | if mse[i] < min: 71 | min = mse[i] 72 | alphamin[j] = (i + 1) * 0.1 73 | 74 | a = 10 75 | forecast2 = [0] * (a * n) 76 | forecast2[:n] = [load[i - (a * n)] for i in range(n)] 77 | for j in range(1, a): 78 | for i in range(n): 79 | forecast2[i + (j * n)] = (alphamin[i] * load[i - (a * n) + (j * n)]) + ( 80 | (1 - alphamin[i]) * forecast2[i + (j * n) - n] 81 | ) 82 | pred = [0] * n 83 | for i in range(n): 84 | pred[i] = (time[i], forecast2[-n:][i]) 85 | labels = ["time", "load"] 86 | df = pd.DataFrame.from_records(pred, columns=labels) 87 | df.to_csv("predictions/SES/%s.csv" % date, index=False) 88 | scp("SES", date) 89 | 90 | 91 | """Weighted Moving Average (WMA)""" 92 | weights = [0.8019, 0.0426, 0.0226, -0.0169, 0.1497] 93 | pred = [0] * n 94 | for i in range(n): 95 | forecast = 0 96 | for j in range(1, len(weights) + 1): 97 | forecast += load[-(j * n) + i] * weights[j - 1] 98 | pred[i] = (time[i], forecast) 99 | 100 | labels = ["time", "load"] 101 | df = pd.DataFrame.from_records(pred, columns=labels) 102 | df.to_csv("predictions/WMA/%s.csv" % date, index=False) 103 | scp("WMA", date) 104 | -------------------------------------------------------------------------------- /models/load_scrap.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import csv 3 | import os 4 | from bs4 import BeautifulSoup 5 | url = 'http://www.delhisldc.org/Loaddata.aspx?mode=' 6 | 7 | day_range = list(range(1, 32)) # days, 1 to 31 8 | 9 | # months, Aug to Dec for 2017, and Jan for 2018 10 | month_range = { 11 | 2017: [], 12 | 2018: [1, 2, 3, 4, 5, 6, 7, 8] 13 | } 14 | 15 | year_range = [2017, 2018] 16 | 17 | if not os.path.exists('SLDC_Data'): 18 | os.makedirs('SLDC_Data') 19 | 20 | for year in year_range: 21 | for month in month_range[year]: 22 | month_dir = 'SLDC_Data/%d/%02d/' %(year, month) 23 | if not os.path.exists(month_dir): os.makedirs(month_dir) 24 | try: 25 | for day in day_range: 26 | date = '%02d/%02d/%d' %(day, month, year) 27 | print('Scraping', date) 28 | resp = requests.get(url+date) # send a get request to the url, get response 29 | soup = BeautifulSoup(resp.text, 'lxml') # Yummy HTML soup 30 | table = soup.find('table', {'id':'ContentPlaceHolder3_DGGridAv'}) # get the table from html 31 | trs = table.findAll('tr') # extract all rows of the table 32 | if len(trs[1:])!=0: # no need to create csv file, if there's no data, for Aug month of 2017 33 | csv_filename = month_dir + '%s.csv' % date.replace('/', '-') 34 | if os.path.exists(csv_filename): os.remove(csv_filename) # remove the file it already exists, can result in data duplicacy 35 | with open(csv_filename, 'a') as f: 36 | writer = csv.writer(f) 37 | writer.writerow(['time', 'value']) 38 | for tr in trs[1:]: 39 | time, delhi = tr.findChildren('font')[:2] 40 | writer.writerow([time.text, delhi.text]) 41 | except Exception as e: 42 | print(e) 43 | -------------------------------------------------------------------------------- /models/log: -------------------------------------------------------------------------------- 1 | LSTM 2 | 3 | Architecture => val_RMSE, RMSE on 25nov load 4 | LSTM(1) + LSTM(1) + Dense() => 130 5 | 6 | 7 | model.add(GRU(1, batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]), stateful=True, return_sequences=True)) 8 | model.add(GRU(1, stateful=True)) 9 | 10 | 172.30510765803461 70.23247637069744 11 | 12 | model.add(GRU(1, batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]), stateful=True, return_sequences=True)) 13 | model.add(GRU(1, stateful=True, return_sequences=True)) 14 | model.add(GRU(1, stateful=True)) 15 | 16 | 17 | 169.01878138738027 98.78463527925186 18 | 19 | 20 | model = Sequential() 21 | model.add(GRU(1, batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]), stateful=True, return_sequences=True)) 22 | model.add(GRU(1, stateful=True, return_sequences=True)) 23 | model.add(GRU(1, stateful=True, return_sequences=True)) 24 | model.add(GRU(1, stateful=True)) 25 | 26 | model.add(Dense(train_y.shape[1])) 27 | 28 | 179.42414235781897 137.3227309920991 29 | 30 | 31 | model = Sequential() 32 | model.add(GRU(1, batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]), stateful=True, return_sequences=True)) 33 | model.add(GRU(1, stateful=True, return_sequences=True)) 34 | model.add(GRU(1, stateful=True, return_sequences=True)) 35 | model.add(GRU(1, stateful=True)) 36 | 37 | model.add(Dense(train_y.shape[1])) 38 | model.add(Dropout(0.2)) 39 | 40 | model.add(Dense(train_y.shape[1])) 41 | 42 | 179.68186896684816 115.50372901507328 43 | 44 | 45 | batch_size = 1 46 | model = Sequential() 47 | model.add(GRU(1, batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]))) 48 | model.add(Dense(train_y.shape[1])) 49 | 50 | model.compile(loss='mean_squared_error', optimizer='sgd') 51 | es = EarlyStopping(monitor='val_loss', 52 | min_delta=0, 53 | patience=3, 54 | verbose=0, mode='auto') 55 | 56 | 161.98254979093554 111.68976793136368 57 | 58 | 59 | del model 60 | batch_size = 1 61 | model = Sequential() 62 | model.add(GRU(1, batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]), stateful=True)) 63 | model.add(Dense(train_y.shape[1])) 64 | 65 | model.compile(loss='mean_squared_error', optimizer='sgd') 66 | es = EarlyStopping(monitor='val_loss', 67 | min_delta=0, 68 | patience=3, 69 | verbose=0, mode='auto') 70 | 71 | 170.49926684704036 77.18821593330154 72 | 73 | model.add(LSTM(1, batch_input_shape=(1, train_x.shape[1], train_x.shape[2]), stateful=True, return_sequences=True)) 74 | model.add(LSTM(1, stateful=True, return_sequences=True)) 75 | model.add(LSTM(1, stateful=True)) 76 | model.add(Dense(train_y.shape[1])) 77 | 78 | 179.72401621324363 115.36772988379201 79 | 80 | model = Sequential() 81 | model.add(LSTM(1, batch_input_shape=(1, train_x.shape[1], train_x.shape[2]), stateful=True, return_sequences=True)) 82 | model.add(LSTM(1, stateful=True, return_sequences=True)) 83 | model.add(LSTM(1, stateful=True)) 84 | model.add(Dense(train_y.shape[1])) 85 | model.add(Dense(train_y.shape[1])) 86 | 87 | 179.6798363877226 115.55969177210693 88 | 89 | 90 | 91 | del model 92 | model = Sequential() 93 | model.add(LSTM(1, batch_input_shape=(1, train_x.shape[1], train_x.shape[2]), stateful=True, return_sequences=True)) 94 | model.add(LSTM(1, stateful=True, return_sequences=True)) 95 | model.add(LSTM(1, stateful=True)) 96 | model.add(Dense(train_y.shape[1])) 97 | model.add(Dense(train_y.shape[1])) 98 | model.compile(loss='mean_squared_error', optimizer='sgd') 99 | 100 | 179.64784244480092 115.40448187064597 101 | 102 | model = Sequential() 103 | model.add(LSTM(1, batch_input_shape=(1, train_x.shape[1], train_x.shape[2]))) 104 | model.add(Dense(train_y.shape[1])) 105 | model.compile(loss='mean_squared_error', optimizer='sgd') 106 | 179.64784244480092 115.40448187064597 107 | 108 | 109 | model = Sequential() 110 | model.add(LSTM(1, batch_input_shape=(1, train_x.shape[1], train_x.shape[2]), stateful=True)) 111 | model.add(Dense(train_y.shape[1])) 112 | model.compile(loss='mean_squared_error', optimizer='sgd') 113 | 114 | 170.15684534737213 73.97455785821403 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | del model 124 | batch_size = 1 125 | model = Sequential() 126 | model.add(SimpleRNN(1, batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]), stateful=True, return_sequences=True)) 127 | model.add(SimpleRNN(1, stateful=True)) 128 | 129 | model.add(Dense(train_y.shape[1])) 130 | 131 | model.compile(loss='mean_squared_error', optimizer='sgd') 132 | 133 | 179.69537398840617 119.75008940730767 134 | 135 | 136 | model = Sequential() 137 | model.add(SimpleRNN(1, batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]), stateful=True, return_sequences=True)) 138 | model.add(SimpleRNN(1, stateful=True, return_sequences=True)) 139 | model.add(SimpleRNN(1, stateful=True)) 140 | 141 | 179.4834537383419 116.1263437001296 142 | 143 | 144 | model = Sequential() 145 | model.add(SimpleRNN(2, batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]), stateful=True, return_sequences=True)) 146 | model.add(SimpleRNN(2, stateful=True, return_sequences=True)) 147 | model.add(SimpleRNN(2, stateful=True)) 148 | 149 | model.add(Dense(train_y.shape[1])) 150 | 151 | 179.5914806001997 131.20615554368203 152 | 153 | 154 | # del model 155 | batch_size = 1 156 | model = Sequential() 157 | model.add(SimpleRNN(1, batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]), stateful=True, return_sequences=True)) 158 | model.add(SimpleRNN(1, stateful=True, return_sequences=True)) 159 | model.add(SimpleRNN(1, stateful=True)) 160 | 161 | model.add(Dense(train_y.shape[1])) 162 | model.add(Dropout(0.2)) 163 | model.add(Dense(train_y.shape[1])) 164 | 165 | model.compile(loss='mean_squared_error', optimizer='sgd') 166 | 167 | 168 | 356.5411939524292 348.51289967287505 169 | 170 | # del model 171 | batch_size = 1 172 | model = Sequential() 173 | model.add(SimpleRNN(1, batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]), stateful=True, return_sequences=True)) 174 | model.add(SimpleRNN(1, stateful=True, return_sequences=True)) 175 | model.add(SimpleRNN(1, stateful=True, return_sequences=True)) 176 | model.add(SimpleRNN(1, stateful=True)) 177 | model.add(Dense(train_y.shape[1])) 178 | 179 | model.compile(loss='mean_squared_error', optimizer='sgd') 180 | 181 | 179.9243505278806 127.67319742877328 182 | 183 | 184 | # del model 185 | batch_size = 1 186 | model = Sequential() 187 | model.add(SimpleRNN(1, batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]), stateful=True)) 188 | model.add(Dense(train_y.shape[1])) 189 | 190 | model.compile(loss='mean_squared_error', optimizer='sgd') 191 | 192 | 176.81005209413934 127.72322737276058 193 | 194 | del model 195 | batch_size = 1 196 | model = Sequential() 197 | model.add(SimpleRNN(2, batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]), stateful=True)) 198 | model.add(Dense(train_y.shape[1])) 199 | 200 | model.compile(loss='mean_squared_error', optimizer='sgd') 201 | 202 | 169.50042814212946 75.2211069885581 203 | 204 | 205 | del model 206 | batch_size = 1 207 | model = Sequential() 208 | model.add(SimpleRNN(3, batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]), stateful=True)) 209 | model.add(Dense(train_y.shape[1])) 210 | 211 | model.compile(loss='mean_squared_error', optimizer='sgd') 212 | 213 | 175.55278890307227 110.81766216199584 214 | 215 | 216 | del model 217 | batch_size = 1 218 | model = Sequential() 219 | model.add(SimpleRNN(2, batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]), stateful=True, return_sequences=True)) 220 | model.add(SimpleRNN(2, stateful=True)) 221 | 222 | model.add(Dense(train_y.shape[1])) 223 | 224 | model.compile(loss='mean_squared_error', optimizer='sgd') 225 | 226 | 176.05695242370095 112.45128976645987 227 | 228 | 229 | nlags=7 230 | SimpleRNN(1) 231 | 232 | 167.12253483672743 164.3591296684758 233 | 234 | 235 | SimpleRNN(2) 236 | 168.12847130886865 187.35787301304964 237 | 238 | nlags=15 239 | 137.52007268769108 151.11466939045795 240 | 241 | nlags=20 242 | 160.61520803024845 227.80558679708057 243 | 244 | 245 | del model 246 | batch_size = 1 247 | model = Sequential() 248 | model.add(SimpleRNN(1, batch_input_shape=(batch_size, train_x.shape[1], train_x.shape[2]))) 249 | model.add(Dense(train_y.shape[1])) 250 | model.compile(loss='mean_squared_error', optimizer='adam') 251 | es = EarlyStopping(monitor='val_loss', 252 | min_delta=0, 253 | patience=3, 254 | verbose=0, mode='auto') 255 | 256 | model fit for 20 epochs, shuffle =True, not stateful -------------------------------------------------------------------------------- /models/pdq_search.py: -------------------------------------------------------------------------------- 1 | 2 | # from pyramid.arima import auto_arima 3 | import pandas as pd 4 | import logging 5 | import itertools 6 | import numpy as np 7 | import statsmodels.api as sm 8 | import warnings 9 | 10 | warnings.filterwarnings("ignore") # specify to ignore warning messages 11 | 12 | # to store the log in a file called 'arima_log.txt' 13 | logging.basicConfig( 14 | filename='pdq_log.txt', 15 | filemode='a', 16 | level=logging.INFO, 17 | format="%(asctime)s %(message)s", 18 | ) 19 | logger = logging.getLogger() 20 | console = logging.StreamHandler() 21 | logger.addHandler(console) 22 | 23 | data = pd.read_csv('monthdata.csv', header=None, index_col=['datetime'], names=['datetime', 'load'], parse_dates=['datetime'], infer_datetime_format=True) 24 | data = data.asfreq(freq='H', method='bfill') # sample the data in hourly manner 25 | 26 | 27 | # Define the p, d and q parameters to take any value between 0 and 3 28 | p = d = q = range(0, 3) 29 | 30 | # Generate all different combinations of p, q and q triplets 31 | pdq = list(itertools.product(p, d, q)) 32 | 33 | # Generate all different combinations of seasonal p, q and q triplets 34 | seasonal_pdq = [(x[0], x[1], x[2], 24) for x in list(itertools.product(p, d, q))] 35 | 36 | logger.info('pdq:') 37 | logger.info(pdq) 38 | logger.info('seasonal_pdq') 39 | logger.info(seasonal_pdq) 40 | 41 | bestAIC = np.inf 42 | bestParam = None 43 | bestSParam = None 44 | 45 | logger.info('Running GridSearch') 46 | 47 | #use gridsearch to look for optimial arima parameters 48 | for param in pdq: 49 | for param_seasonal in seasonal_pdq: 50 | try: 51 | mod = sm.tsa.statespace.SARIMAX(data, 52 | order=param, 53 | seasonal_order=param_seasonal, 54 | enforce_stationarity=False, 55 | enforce_invertibility=False) 56 | 57 | results = mod.fit() 58 | logger.info('ARIMA{}x{}12 - AIC:{}'.format(param, param_seasonal, results.aic)) 59 | 60 | #if current run of AIC is better than the best one so far, overwrite it 61 | if results.aic < bestAIC: 62 | bestAIC = results.aic 63 | bestParam = param 64 | bestSParam = param_seasonal 65 | 66 | except Exception as e: 67 | print(e) 68 | 69 | 70 | logger.info('the best bestAIC, bestParam, bestSParam:') 71 | logger.info(bestAIC) 72 | logger.info(bestParam) 73 | logger.info(bestSParam) 74 | -------------------------------------------------------------------------------- /models/readme.md: -------------------------------------------------------------------------------- 1 | # Models 2 | 3 | Models implemented during the course of the project: 4 | 5 | * Feed forward Neural Network [FFNN.ipynb](FFNN.ipynb) 6 | * Simple Moving Average [SMA.ipynb](SMA.ipynb) 7 | * Weighted Moving Average [WMA.ipynb](WMA.ipynb) 8 | * Simple Exponential Smoothing [SES.ipynb](SES.ipynb) 9 | * Holts Winters [SW.ipynb](SW.ipynb) 10 | * Autoregressive Integrated Moving Average [ARIMA.ipynb](ARIMA.ipynb) 11 | * Recurrent Neural Networks [RNN.ipynb](RNN.ipynb) 12 | * Long Short Term Memory cells [LSTM.ipynb](LSTM.ipynb) 13 | * Gated Recurrent Unit cells [GRU.ipynb](GRU.ipynb) 14 | 15 | 16 | Utility scripts 17 | 18 | * `aws_arima.py` fits ARIMA model on last one month's data and forecasts load for each day 19 | * `aws_rnn.py` fits RNN, LSTM, GRU on last 2 month's data and forecasts load for each day 20 | * `aws_smoothing.py` fits SES, SMA, WMA on last one month's data and forecasts load for each day 21 | * `aws.py` a scheduler to run all above three scripts everyday 00:30 IST 22 | * `pdq_search.py` for grid search of hyperparameters of ARIMA model on last one month's data 23 | * `load_scrap.py` to scrap load from SLDC's website 24 | * `wheather_scrap.py` to scrap wheather data from wunderground website 25 | 26 | -------------------------------------------------------------------------------- /models/requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.6.1 2 | astor==0.7.1 3 | beautifulsoup4==4.6.3 4 | certifi==2018.11.29 5 | chardet==3.0.4 6 | gast==0.2.0 7 | grpcio==1.17.1 8 | h5py==2.8.0 9 | idna==2.8 10 | Keras==2.2.4 11 | Keras-Applications==1.0.6 12 | Keras-Preprocessing==1.0.5 13 | lxml==4.2.5 14 | Markdown==3.0.1 15 | numpy==1.15.4 16 | pandas==0.23.4 17 | patsy==0.5.1 18 | protobuf==3.6.1 19 | python-dateutil==2.7.5 20 | pytz==2018.7 21 | PyYAML==3.13 22 | requests==2.21.0 23 | schedule==0.5.0 24 | scikit-learn==0.20.1 25 | scipy==1.2.0 26 | six==1.12.0 27 | sklearn==0.0 28 | statsmodels==0.9.0 29 | tensorboard==1.12.1 30 | tensorflow==1.12.0 31 | termcolor==1.1.0 32 | urllib3==1.24.1 33 | Werkzeug==0.14.1 34 | -------------------------------------------------------------------------------- /models/test.py: -------------------------------------------------------------------------------- 1 | #!/home/eee/ug/15084015/miniconda3/envs/TF/bin/python 2 | print('lolwa') -------------------------------------------------------------------------------- /models/utils.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import matplotlib.pyplot as plt 3 | 4 | def extract_dt(data): 5 | data['time'] += ':00' 6 | data['datetime'] = pd.to_timedelta(data['time']) 7 | data['hour'] = data['datetime'].dt.seconds // 3600 8 | return data 9 | 10 | def plot_curves(day_range, month, year): 11 | data = {} 12 | for day in day_range: 13 | date = '%02d-%02d-%d' % (day, month, year) 14 | data['df%02d' % day] = extract_dt(pd.read_csv('SLDC_Data/%d/%02d/%s.csv' % (year, month, date))) 15 | data['df%02d' % day]['date'] = date 16 | 17 | fig = plt.figure(figsize=(20, 10)) 18 | date = [] 19 | for i in sorted(data): 20 | frame = data[i] 21 | plt.plot(frame['time'], frame['value']); 22 | date.append(frame['date'][0]) 23 | plt.legend(date, loc='best'); 24 | plt.show() 25 | 26 | 27 | if __name__=='main': 28 | pass -------------------------------------------------------------------------------- /models/whether_scrap.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import csv 3 | import os 4 | from bs4 import BeautifulSoup 5 | url = 'https://www.wunderground.com/history/airport/VIDP/%d/%d/%d/DailyHistory.html' 6 | 7 | day_range = list(range(1, 32)) # days, 1 to 31 8 | 9 | # months, Aug to Dec for 2017, and Jan for 2018 10 | month_range = { 11 | 2017: list(range(8, 13)), 12 | 2018: [1] 13 | } 14 | 15 | year_range = [2017, 2018] 16 | 17 | if not os.path.exists('Whether_Data'): 18 | os.makedirs('Whether_Data') 19 | 20 | 21 | 22 | for year in year_range: 23 | for month in month_range[year]: 24 | month_dir = 'Whether_Data/%d/%02d/' %(year, month) 25 | if not os.path.exists(month_dir): os.makedirs(month_dir) 26 | for day in day_range: 27 | try: 28 | date = '%02d/%02d/%d' %(day, month, year) 29 | print('Scraping', date) 30 | current_url = url % (year, month, day) 31 | resp = requests.get(current_url) # send a get request to the url, get response 32 | soup = BeautifulSoup(resp.text, 'lxml') # Yummy HTML soup 33 | table = soup.find('table', {'id':'obsTable'}) # get the table from html 34 | trs = table.findAll('tr') # extract all rows of the table 35 | if len(trs[1:])!=0: 36 | csv_filename = month_dir + '%s.csv' % date.replace('/', '-') 37 | if os.path.exists(csv_filename): os.remove(csv_filename) # remove the file it already exists, can result in data duplicacy 38 | with open(csv_filename, 'a') as f: 39 | writer = csv.writer(f) 40 | columns = [th.text for th in trs[0].findChildren('th')] 41 | writer.writerow(columns) 42 | for tr in trs[1:]: 43 | row = [] 44 | tds = tr.findChildren('td') 45 | for td in tds: 46 | span = td.findChildren('span', {'class':'wx-value'}) 47 | if span: 48 | row.append(span[0].text.strip()) 49 | else: 50 | row.append(td.text.strip()) 51 | assert len(row) == len(columns) 52 | writer.writerow(row) 53 | except Exception as e: 54 | print('Exception', e) 55 | print(date) 56 | print(current_url) -------------------------------------------------------------------------------- /screenshots/website.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyaf/load_forecasting/6525815ff162e4551f59febe8bc9b94c695e9222/screenshots/website.png -------------------------------------------------------------------------------- /server/README.md: -------------------------------------------------------------------------------- 1 | # Django Source code for website 2 | 3 | 4 | ## MySQL setup 5 | 6 | # Install dependencies 7 | 8 | sudo apt-get update 9 | sudo apt-get install mysql-server python3-dev libmysqlclient-dev 10 | pip install mysqlclient 11 | 12 | # MySQL database setup 13 | # verify that mysql service is running 14 | 15 | systemctl status mysql.service 16 | 17 | # if mysql not running, run : sudo systemctl start mysql 18 | 19 | mysql -u root -p 20 | CREATE DATABASE db_name CHARACTER SET UTF8; 21 | 22 | # check databases by 'show databases;' 23 | 24 | CREATE USER db_user@localhost IDENTIFIED BY 'userpassword'; 25 | GRANT ALL PRIVILEGES ON db_name.* TO db_user@localhost; 26 | FLUSH PRIVILEGES; 27 | exit 28 | 29 | 30 | # Now create a file, /etc/mysql/db.cnf and add following content 31 | 32 | [client] 33 | database = db_name 34 | user = db_user 35 | password = userpassword 36 | default-character-set = utf8 37 | 38 | #for Completetly removing mysql 39 | sudo service mysql stop #or mysqld 40 | sudo killall -9 mysql 41 | sudo killall -9 mysqld 42 | sudo apt-get remove --purge mysql-server mysql-client mysql-common 43 | sudo apt-get autoremove 44 | sudo apt-get autoclean 45 | sudo deluser mysql 46 | sudo rm -rf /var/lib/mysql 47 | sudo apt-get purge mysql-server-core-5.7 48 | sudo apt-get purge mysql-client-core-5.7 49 | sudo rm -rf /var/log/mysql 50 | sudo rm -rf /etc/mysql 51 | 52 | 53 | -------------------------------------------------------------------------------- /server/celerybeat.pid: -------------------------------------------------------------------------------- 1 | 1363 2 | -------------------------------------------------------------------------------- /server/db.cnf: -------------------------------------------------------------------------------- 1 | [client] 2 | database = sldc 3 | user = sldc 4 | password = Love@12107233 5 | default-character-set = utf8 6 | -------------------------------------------------------------------------------- /server/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import os 3 | import sys 4 | 5 | if __name__ == "__main__": 6 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "website.settings") 7 | 8 | from django.core.management import execute_from_command_line 9 | 10 | execute_from_command_line(sys.argv) 11 | -------------------------------------------------------------------------------- /server/nano.save: -------------------------------------------------------------------------------- 1 | [client] 2 | database = db_name 3 | user = db_user 4 | password = userpassword 5 | efault-character-set = utf8 6 | -------------------------------------------------------------------------------- /server/requirements.txt: -------------------------------------------------------------------------------- 1 | amqp==1.4.9 2 | anyjson==0.3.3 3 | beautifulsoup4==4.6.0 4 | billiard==3.3.0.23 5 | bs4==0.0.1 6 | celery==3.1.18 7 | certifi==2018.1.18 8 | chardet==3.0.4 9 | Django==2.0.2 10 | idna==2.6 11 | kombu==3.0.37 12 | lxml 13 | mysqlclient==1.3.12 14 | pytz==2018.3 15 | redis==2.10.3 16 | requests==2.18.4 17 | urllib3==1.22 18 | -------------------------------------------------------------------------------- /server/static/c3-0.4.18/c3.css: -------------------------------------------------------------------------------- 1 | /*-- Chart --*/ 2 | .c3 svg { 3 | font: 10px sans-serif; 4 | -webkit-tap-highlight-color: transparent; } 5 | 6 | .c3 path, .c3 line { 7 | fill: none; 8 | stroke: #000; } 9 | 10 | .c3 text { 11 | -webkit-user-select: none; 12 | -moz-user-select: none; 13 | user-select: none; } 14 | 15 | .c3-legend-item-tile, 16 | .c3-xgrid-focus, 17 | .c3-ygrid, 18 | .c3-event-rect, 19 | .c3-bars path { 20 | shape-rendering: crispEdges; } 21 | 22 | .c3-chart-arc path { 23 | stroke: #fff; } 24 | 25 | .c3-chart-arc text { 26 | fill: #fff; 27 | font-size: 13px; } 28 | 29 | /*-- Axis --*/ 30 | /*-- Grid --*/ 31 | .c3-grid line { 32 | stroke: #aaa; } 33 | 34 | .c3-grid text { 35 | fill: #aaa; } 36 | 37 | .c3-xgrid, .c3-ygrid { 38 | stroke-dasharray: 3 3; } 39 | 40 | /*-- Text on Chart --*/ 41 | .c3-text.c3-empty { 42 | fill: #808080; 43 | font-size: 2em; } 44 | 45 | /*-- Line --*/ 46 | .c3-line { 47 | stroke-width: 1px; } 48 | 49 | /*-- Point --*/ 50 | .c3-circle._expanded_ { 51 | stroke-width: 1px; 52 | stroke: white; } 53 | 54 | .c3-selected-circle { 55 | fill: white; 56 | stroke-width: 2px; } 57 | 58 | /*-- Bar --*/ 59 | .c3-bar { 60 | stroke-width: 0; } 61 | 62 | .c3-bar._expanded_ { 63 | fill-opacity: 1; 64 | fill-opacity: 0.75; } 65 | 66 | /*-- Focus --*/ 67 | .c3-target.c3-focused { 68 | opacity: 1; } 69 | 70 | .c3-target.c3-focused path.c3-line, .c3-target.c3-focused path.c3-step { 71 | stroke-width: 2px; } 72 | 73 | .c3-target.c3-defocused { 74 | opacity: 0.3 !important; } 75 | 76 | /*-- Region --*/ 77 | .c3-region { 78 | fill: steelblue; 79 | fill-opacity: .1; } 80 | 81 | /*-- Brush --*/ 82 | .c3-brush .extent { 83 | fill-opacity: .1; } 84 | 85 | /*-- Select - Drag --*/ 86 | /*-- Legend --*/ 87 | .c3-legend-item { 88 | font-size: 12px; } 89 | 90 | .c3-legend-item-hidden { 91 | opacity: 0.15; } 92 | 93 | .c3-legend-background { 94 | opacity: 0.75; 95 | fill: white; 96 | stroke: lightgray; 97 | stroke-width: 1; } 98 | 99 | /*-- Title --*/ 100 | .c3-title { 101 | font: 14px sans-serif; } 102 | 103 | /*-- Tooltip --*/ 104 | .c3-tooltip-container { 105 | z-index: 10; } 106 | 107 | .c3-tooltip { 108 | border-collapse: collapse; 109 | border-spacing: 0; 110 | background-color: #fff; 111 | empty-cells: show; 112 | -webkit-box-shadow: 7px 7px 12px -9px #777777; 113 | -moz-box-shadow: 7px 7px 12px -9px #777777; 114 | box-shadow: 7px 7px 12px -9px #777777; 115 | opacity: 0.9; } 116 | 117 | .c3-tooltip tr { 118 | border: 1px solid #CCC; } 119 | 120 | .c3-tooltip th { 121 | background-color: #aaa; 122 | font-size: 14px; 123 | padding: 2px 5px; 124 | text-align: left; 125 | color: #FFF; } 126 | 127 | .c3-tooltip td { 128 | font-size: 13px; 129 | padding: 3px 6px; 130 | background-color: #fff; 131 | border-left: 1px dotted #999; } 132 | 133 | .c3-tooltip td > span { 134 | display: inline-block; 135 | width: 10px; 136 | height: 10px; 137 | margin-right: 6px; } 138 | 139 | .c3-tooltip td.value { 140 | text-align: right; } 141 | 142 | /*-- Area --*/ 143 | .c3-area { 144 | stroke-width: 0; 145 | opacity: 0.2; } 146 | 147 | /*-- Arc --*/ 148 | .c3-chart-arcs-title { 149 | dominant-baseline: middle; 150 | font-size: 1.3em; } 151 | 152 | .c3-chart-arcs .c3-chart-arcs-background { 153 | fill: #e0e0e0; 154 | stroke: none; } 155 | 156 | .c3-chart-arcs .c3-chart-arcs-gauge-unit { 157 | fill: #000; 158 | font-size: 16px; } 159 | 160 | .c3-chart-arcs .c3-chart-arcs-gauge-max { 161 | fill: #777; } 162 | 163 | .c3-chart-arcs .c3-chart-arcs-gauge-min { 164 | fill: #777; } 165 | 166 | .c3-chart-arc .c3-gauge-value { 167 | fill: #000; 168 | /* font-size: 28px !important;*/ } 169 | 170 | .c3-chart-arc.c3-target g path { 171 | opacity: 1; } 172 | 173 | .c3-chart-arc.c3-target.c3-focused g path { 174 | opacity: 1; } 175 | -------------------------------------------------------------------------------- /server/static/main.js: -------------------------------------------------------------------------------- 1 | $('#ddd').on('click', function() { 2 | var gape = { 3 | "from": $("#from").val(), 4 | "to": $("#to").val(), 5 | }; 6 | console.log("adsfadsfdff"); 7 | // console.log(gape); 8 | // console.log(gape.from); 9 | var csrftoken = getCookie('csrftoken'); 10 | $.ajaxSetup({ 11 | beforeSend: function(xhr, settings) { 12 | xhr.setRequestHeader("X-CSRFToken", csrftoken); 13 | } 14 | }); 15 | $.ajax({ 16 | type: 'POST', 17 | url: '/show_data/', 18 | data: gape, 19 | success: function(x){ 20 | console.log(x); 21 | fun(x); 22 | }, 23 | }); 24 | }); 25 | 26 | const fun = function(da){ 27 | console.log('asdgf',da); 28 | console.log("ooooooooooooooooooooooooooooooo"); 29 | let load = da; 30 | console.log(load.Load); 31 | // let datee = (parseInt(tarikh[8]+tarikh[9]) + i).toString() + '-' + tarikh[5]+tarikh[6] +'-'+tarikh[0]+tarikh[1]+tarikh[2]+tarikh[3]; 32 | var chart = c3.generate({ 33 | bindto: '#d', 34 | data: { 35 | x: 'x', 36 | xFormat:'%H:%M', 37 | columns: load.Load, 38 | }, 39 | axis: { 40 | y: { 41 | label:{ 42 | text:'POWER IN MW', 43 | position: 'outer-middle', 44 | }, 45 | }, 46 | x: { 47 | label:{ 48 | text: 'Time', 49 | position:'outer-right', 50 | }, 51 | type: 'timeseries', 52 | tick:{ 53 | format:'%H:%M' 54 | } 55 | } 56 | }, 57 | point: { 58 | show: false 59 | }, 60 | // zoom: { 61 | // enabled:true, 62 | // rescale:true, 63 | // extent: [1, 100], 64 | // }, 65 | grid: { 66 | x: { 67 | show: true, 68 | }, 69 | y: { 70 | show: true, 71 | }, 72 | }, 73 | }); 74 | }; 75 | 76 | function getCookie(name) { 77 | var cookieValue = null; 78 | if (document.cookie && document.cookie !== '') { 79 | var cookies = document.cookie.split(';'); 80 | for (var i = 0; i < cookies.length; i++) { 81 | var cookie = jQuery.trim(cookies[i]); 82 | // Does this cookie string begin with the name we want? 83 | if (cookie.substring(0, name.length + 1) === (name + '=')) { 84 | cookieValue = decodeURIComponent(cookie.substring(name.length + 1)); 85 | break; 86 | } 87 | } 88 | } 89 | return cookieValue; 90 | } 91 | 92 | // $(document).ready(function(){ 93 | // $('#kp').on('click', function(){ 94 | // console.log("ooooooooooooooooooooooooooooooo"); 95 | // let load = data; 96 | // // let datee = (parseInt(tarikh[8]+tarikh[9]) + i).toString() + '-' + tarikh[5]+tarikh[6] +'-'+tarikh[0]+tarikh[1]+tarikh[2]+tarikh[3]; 97 | // var chart = c3.generate({ 98 | // bindto: '#d', 99 | // data: { 100 | // x: 'x', 101 | // xFormat:'%H:%M', 102 | // columns: load.Load, 103 | // }, 104 | // axis: { 105 | // y: { 106 | // label:{ 107 | // text:'POWER IN MW', 108 | // position: 'outer-middle', 109 | // }, 110 | // }, 111 | // x: { 112 | // label:{ 113 | // text: 'Time', 114 | // position:'outer-center', 115 | // }, 116 | // type: 'timeseries', 117 | // tick:{ 118 | // format:'%H:%M' 119 | // } 120 | // } 121 | // }, 122 | // zoom: { 123 | // enabled:true, 124 | // rescale:true, 125 | // extent: [1, 100], 126 | // }, 127 | // grid: { 128 | // x: { 129 | // show: true, 130 | // }, 131 | // y: { 132 | // show: true, 133 | // }, 134 | // }, 135 | // }); 136 | // }); 137 | // }); -------------------------------------------------------------------------------- /server/static/main1.js: -------------------------------------------------------------------------------- 1 | $('#dddd').on('click', function() { 2 | var gap = { 3 | "fc": $("#fc").val(), 4 | // to: $("#to").val(), 5 | }; 6 | // console.log("adsfadsfdff"); 7 | // console.log(gap); 8 | // console.log(gap.fc); 9 | var csrftoken = getCookie('csrftoken'); 10 | $.ajaxSetup({ 11 | beforeSend: function(xhr, settings) { 12 | xhr.setRequestHeader("X-CSRFToken", csrftoken); 13 | } 14 | }); 15 | $.ajax({ 16 | type: 'POST', 17 | url: '/show_forecasted_smavg_data/', 18 | data: gap, 19 | success: function(x){ 20 | // console.log(x); 21 | console.log(",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,"); 22 | fun1(x); 23 | }, 24 | }); 25 | }); 26 | 27 | const fun1 = function(data){ 28 | // console.log('asdgf',data); 29 | // console.log("ooooooooooooooooooooooooooooooo"); 30 | let load = data; 31 | console.log("zzzzzzzzzzzzzzz",load.rmseSMA); 32 | console.log(load.forecasted_Load); 33 | var a1 = document.getElementById('sma1'); 34 | a1.innerHTML = load.rmseSMA; 35 | var b1 = document.getElementById('wma1'); 36 | b1.innerHTML = load.rmseWMA; 37 | var c1 = document.getElementById('ses1'); 38 | c1.innerHTML = load.rmseSES; 39 | var d1 = document.getElementById('arima1'); 40 | d1.innerHTML = load.rmseARIMA; 41 | var e1 = document.getElementById('lstm1'); 42 | e1.innerHTML = load.rmseLSTM; 43 | var f1 = document.getElementById('gru1'); 44 | f1.innerHTML = load.rmseGRU; 45 | var g1 = document.getElementById('rnn1'); 46 | g1.innerHTML = load.rmseRNN; 47 | 48 | var a1M = document.getElementById('sma1M'); 49 | a1M.innerHTML = load.mapeSMA; 50 | var b1M = document.getElementById('wma1M'); 51 | b1M.innerHTML = load.mapeWMA; 52 | var c1M = document.getElementById('ses1M'); 53 | c1M.innerHTML = load.mapeSES; 54 | var d1M = document.getElementById('arima1M'); 55 | d1M.innerHTML = load.mapeARIMA; 56 | var e1M = document.getElementById('lstm1M'); 57 | e1M.innerHTML = load.mapeLSTM; 58 | var f1M = document.getElementById('gru1M'); 59 | f1M.innerHTML = load.mapeGRU; 60 | var g1M = document.getElementById('rnn1M'); 61 | g1M.innerHTML = load.mapeRNN; 62 | // let datee = (parseInt(tarikh[8]+tarikh[9]) + i).toString() + '-' + tarikh[5]+tarikh[6] +'-'+tarikh[0]+tarikh[1]+tarikh[2]+tarikh[3]; 63 | var chart = c3.generate({ 64 | bindto: '#Forecasting', 65 | data: { 66 | x: 'x', 67 | xFormat:'%H:%M', 68 | columns: load.forecasted_Load, 69 | }, 70 | axis: { 71 | y: { 72 | label:{ 73 | text:'POWER IN MW', 74 | position: 'outer-middle', 75 | }, 76 | }, 77 | x: { 78 | label:{ 79 | text: 'Time', 80 | position:'outer-right', 81 | }, 82 | type: 'timeseries', 83 | tick:{ 84 | format:'%H:%M' 85 | } 86 | } 87 | }, 88 | point: { 89 | show: false 90 | }, 91 | // zoom: { 92 | // enabled:true, 93 | // rescale:true, 94 | // extent: [1, 100], 95 | // }, 96 | grid: { 97 | x: { 98 | show: true, 99 | }, 100 | y: { 101 | show: true, 102 | }, 103 | }, 104 | }); 105 | }; 106 | 107 | function getCookie(name) { 108 | var cookieValue = null; 109 | if (document.cookie && document.cookie !== '') { 110 | var cookies = document.cookie.split(';'); 111 | for (var i = 0; i < cookies.length; i++) { 112 | var cookie = jQuery.trim(cookies[i]); 113 | // Does this cookie string begin with the name we want? 114 | if (cookie.substring(0, name.length + 1) === (name + '=')) { 115 | cookieValue = decodeURIComponent(cookie.substring(name.length + 1)); 116 | break; 117 | } 118 | } 119 | } 120 | return cookieValue; 121 | } 122 | 123 | // $(document).ready(function(){ 124 | // $('#kp').on('click', function(){ 125 | // console.log("ooooooooooooooooooooooooooooooo"); 126 | // let load = data; 127 | // // let datee = (parseInt(tarikh[8]+tarikh[9]) + i).toString() + '-' + tarikh[5]+tarikh[6] +'-'+tarikh[0]+tarikh[1]+tarikh[2]+tarikh[3]; 128 | // var chart = c3.generate({ 129 | // bindto: '#d', 130 | // data: { 131 | // x: 'x', 132 | // xFormat:'%H:%M', 133 | // columns: load.Load, 134 | // }, 135 | // axis: { 136 | // y: { 137 | // label:{ 138 | // text:'POWER IN MW', 139 | // position: 'outer-middle', 140 | // }, 141 | // }, 142 | // x: { 143 | // label:{ 144 | // text: 'Time', 145 | // position:'outer-center', 146 | // }, 147 | // type: 'timeseries', 148 | // tick:{ 149 | // format:'%H:%M' 150 | // } 151 | // } 152 | // }, 153 | // zoom: { 154 | // enabled:true, 155 | // rescale:true, 156 | // extent: [1, 100], 157 | // }, 158 | // grid: { 159 | // x: { 160 | // show: true, 161 | // }, 162 | // y: { 163 | // show: true, 164 | // }, 165 | // }, 166 | // }); 167 | // }); 168 | // }); -------------------------------------------------------------------------------- /server/static/scrap.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import csv 3 | import os 4 | from bs4 import BeautifulSoup 5 | url = 'https://www.delhisldc.org/Loaddata.aspx?mode=' 6 | 7 | day_range = list(range(1, 32)) # days, 1 to 31 8 | 9 | # months, Aug to Dec for 2017, and Jan for 2018 10 | month_range = { 11 | 2017: [8,9,10,11,12], 12 | 2018: [1,2,3] 13 | } 14 | 15 | year_range = [2017,2018] 16 | 17 | if not os.path.exists('SLDC_Data'): 18 | os.makedirs('SLDC_Data') 19 | 20 | for year in year_range: 21 | for month in month_range[year]: 22 | month_dir = 'SLDC_Data/%d/%02d/' %(year, month) 23 | if not os.path.exists(month_dir): os.makedirs(month_dir) 24 | try: 25 | for day in day_range: 26 | date = '%02d/%02d/%d' %(day, month, year) 27 | print('Scraping', date) 28 | resp = requests.get(url+date) # send a get request to the url, get response 29 | soup = BeautifulSoup(resp.text, 'lxml') # Yummy HTML soup 30 | table = soup.find('table', {'id':'ContentPlaceHolder3_DGGridAv'}) # get the table from html 31 | trs = table.findAll('tr') # extract all rows of the table 32 | if len(trs[1:])!=0: # no need to create csv file, if there's no data, for Aug month of 2017 33 | print("ayush") 34 | csv_filename = month_dir + '%s.csv' % date.replace('/', '-') 35 | if os.path.exists(csv_filename): os.remove(csv_filename) # remove the file it already exists, can result in data duplicacy 36 | with open(csv_filename, 'a') as f: 37 | writer = csv.writer(f) 38 | for tr in trs[1:]: 39 | time, delhi = tr.findChildren('font')[:2] 40 | writer.writerow([time.text, delhi.text]) 41 | except Exception as e: 42 | print(e) 43 | -------------------------------------------------------------------------------- /server/swag/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyaf/load_forecasting/6525815ff162e4551f59febe8bc9b94c695e9222/server/swag/__init__.py -------------------------------------------------------------------------------- /server/swag/admin.py: -------------------------------------------------------------------------------- 1 | from django.contrib import admin 2 | 3 | # Register your models here. 4 | from .forms import CSVForm 5 | from .models import CSV 6 | 7 | class CSVAdmin(admin.ModelAdmin): 8 | list_display = ["__str__","timestamp","load_value","date"] 9 | form = CSVForm 10 | # class Meta: 11 | # model = SignUp 12 | 13 | admin.site.register(CSV,CSVAdmin) 14 | -------------------------------------------------------------------------------- /server/swag/forms.py: -------------------------------------------------------------------------------- 1 | from django import forms 2 | from .models import CSV 3 | # 4 | class CSVForm(forms.ModelForm): 5 | class Meta: 6 | model = CSV 7 | fields = ['timestamp','load_value','date'] 8 | -------------------------------------------------------------------------------- /server/swag/load_data.py: -------------------------------------------------------------------------------- 1 | import sys,os 2 | import django 3 | import datetime 4 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'website.settings') 5 | django.setup() 6 | from swag.models import CSV 7 | import csv, datetime 8 | from datetime import timedelta 9 | 10 | # day_range = list(range(0, 31)) # days, 1 to 31 11 | 12 | # # months, Aug to Dec for 2017, and Jan for 2018 13 | # month_range = { 14 | # 2017: [8,9,10,11,12], 15 | # 2018: [1,2,3] 16 | # } 17 | 18 | # year_range = [2017,2018] 19 | 20 | # for year in year_range: 21 | # for month in month_range[year]: 22 | # for day in day_range: 23 | # try: 24 | # a = 1+day 25 | # print(a,month,year); 26 | # link = "static/SLDC_Data/"+str(year)+"/"+str(month).zfill(2)+"/"+str(a).zfill(2)+"-"+str(month).zfill(2)+"-"+str(year)+".csv" 27 | # dataReader = csv.reader(open(link), delimiter=',', quotechar='"') 28 | # for row in dataReader: 29 | # data = CSV.objects.create(timestamp = row[0], load_value = row[1], date = datetime.date(year,month,a)) 30 | # except Exception as e: 31 | # print(e) 32 | 33 | 34 | start_date = datetime.date(2017,8,27) 35 | start_time = datetime.time(0,0) 36 | end_date = datetime.date.today() 37 | dt = datetime.datetime.combine(start_date,start_time) 38 | while(dt.date() != end_date): 39 | year = dt.date().year 40 | month = dt.date().month 41 | day = dt.date().day 42 | link = "static/SLDC_Data/"+str(year)+"/"+str(month).zfill(2)+"/"+str(day).zfill(2)+"-"+str(month).zfill(2)+"-"+str(year)+".csv" 43 | try: 44 | print(day,month,year); 45 | dataReader = csv.reader(open(link), delimiter=',', quotechar='"') 46 | ip=0 47 | for row in dataReader: 48 | print("q",ip) 49 | ip += 1 50 | sp = row[0].split(':') 51 | tim = datetime.time(int(sp[0]),int(sp[1])) 52 | while(tim != dt.time()): 53 | print(tim, (dt.time())) 54 | print("w") 55 | results=[] 56 | results.append(CSV.objects.filter(date = dt.date(), timestamp = str(dt.time()))) 57 | if (len(results[0]) == 0): 58 | data = CSV.objects.create(timestamp = str(dt.time()), load_value = None, date = dt.date()) 59 | dt = (datetime.datetime.combine(dt.date(),dt.time())+timedelta(minutes=5)) 60 | 61 | results=[] 62 | results.append(CSV.objects.filter(date = dt.date(), timestamp = str(dt.time()))) 63 | if (len(results[0]) == 0): 64 | data = CSV.objects.create(timestamp = row[0], load_value = row[1], date = dt.date()) 65 | dt = (datetime.datetime.combine(dt.date(),dt.time())+timedelta(minutes=5)) 66 | print(",,,,",dt.time()) 67 | 68 | if(dt.time() != datetime.time(0,0)): 69 | print("in") 70 | results=[] 71 | results.append(CSV.objects.filter(date = dt.date(), timestamp = str(dt.time()))) 72 | if (len(results[0]) == 0): 73 | # print("kaka") 74 | data = CSV.objects.create(timestamp = str(dt.time()), load_value = None, date = dt.date()) 75 | # print("nana") 76 | while(str(datetime.time(23,55)) != str(dt.time())): 77 | print("e") 78 | dt = (datetime.datetime.combine(dt.date(),dt.time())+timedelta(minutes=5)) 79 | results=[] 80 | results.append(CSV.objects.filter(date = dt.date(), timestamp = str(dt.time()))) 81 | if (len(results[0]) == 0): 82 | data = CSV.objects.create(timestamp = str(dt.time()), load_value = None, date = dt.date()) 83 | dt = (datetime.datetime.combine(dt.date(),dt.time())+timedelta(minutes=5)) 84 | 85 | except Exception as e: 86 | print("fzdds") 87 | results=[] 88 | results.append(CSV.objects.filter(date = dt.date(), timestamp = str(dt.time()))) 89 | if (len(results[0]) == 0): 90 | data = CSV.objects.create(timestamp = str(dt.time()), load_value = None, date = dt.date()) 91 | while(str(dt.time()) != '23:55:00'): 92 | print("r") 93 | dt = (datetime.datetime.combine(dt.date(),dt.time())+timedelta(minutes=5)) 94 | results=[] 95 | results.append(CSV.objects.filter(date = dt.date(), timestamp = str(dt.time()))) 96 | if (len(results[0]) == 0): 97 | data = CSV.objects.create(timestamp = str(dt.time()), load_value = None, date = dt.date()) 98 | dt = (datetime.datetime.combine(dt.date(),dt.time())+timedelta(minutes=5)) 99 | print(e) 100 | -------------------------------------------------------------------------------- /server/swag/migrations/0001_initial.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 2.0.2 on 2018-03-31 13:10 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | initial = True 9 | 10 | dependencies = [ 11 | ] 12 | 13 | operations = [ 14 | migrations.CreateModel( 15 | name='CSV', 16 | fields=[ 17 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 18 | ('timestamp', models.TimeField()), 19 | ('load_value', models.FloatField(null=True)), 20 | ('date', models.DateField()), 21 | ], 22 | ), 23 | ] 24 | -------------------------------------------------------------------------------- /server/swag/migrations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyaf/load_forecasting/6525815ff162e4551f59febe8bc9b94c695e9222/server/swag/migrations/__init__.py -------------------------------------------------------------------------------- /server/swag/models.py: -------------------------------------------------------------------------------- 1 | from django.db import models 2 | 3 | # Create your models here. 4 | # import datetime 5 | class CSV(models.Model): 6 | timestamp = models.TimeField() 7 | load_value = models.FloatField(null=True) 8 | date = models.DateField() 9 | # statecode = models.CharField(max_length=2) 10 | # statename = models.CharField(max_length=32) 11 | # date = models.DateField('27-08-2017') 12 | 13 | 14 | def __str__(self): 15 | return "yes" 16 | 17 | 18 | # class SignUp(models.Model): 19 | # email = models.EmailField() 20 | # full_name = models.CharField(max_length = 120,blank=True,null=True) 21 | # timestamp = models.DateTimeField(auto_now_add=True,auto_now=False) 22 | # updated = models.DateTimeField(auto_now_add=False,auto_now=True) 23 | # mob_no = models.CharField(max_length = 12) 24 | # 25 | # 26 | # 27 | # def __str__(self): 28 | # return self.email 29 | -------------------------------------------------------------------------------- /server/swag/tasks.py: -------------------------------------------------------------------------------- 1 | from .models import CSV 2 | from celery.task.schedules import crontab 3 | from celery.decorators import periodic_task 4 | 5 | from random import randint 6 | # Create your views here. 7 | # from .forms import CSVForm 8 | 9 | import datetime 10 | from datetime import timedelta 11 | import requests 12 | import csv 13 | import os 14 | from bs4 import BeautifulSoup 15 | 16 | import sys 17 | import os 18 | djangoproject_home = "website" 19 | sys.path.append(djangoproject_home) 20 | os.environ['DJANGO_SETTINGS_MODULE'] = 'website.settings' 21 | 22 | 23 | @periodic_task(run_every=(crontab(minute=55, hour='0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23')), name="reload_task", ignore_result=True) 24 | def reload_task(): 25 | # global size 26 | print("date:", datetime.date.today().day) 27 | url = 'http://www.delhisldc.org/Loaddata.aspx?mode=' 28 | day_range = list(range(datetime.date.today().day, 29 | datetime.date.today().day+1)) # days, 1 to 31 30 | # months, Aug to Dec for 2017, and Jan for 2018 31 | 32 | month = datetime.date.today().month 33 | year = datetime.date.today().year 34 | 35 | if not os.path.exists('static/SLDC_Data'): 36 | os.makedirs('static/SLDC_Data') 37 | 38 | month_dir = 'static/SLDC_Data/%d/%02d/' % (year, month) 39 | if not os.path.exists(month_dir): 40 | os.makedirs(month_dir) 41 | try: 42 | for day in day_range: 43 | date = '%02d/%02d/%d' % (day, month, year) 44 | print('Scraping', date) 45 | # send a get request to the url, get response 46 | resp = requests.get(url+date) 47 | soup = BeautifulSoup(resp.text, 'lxml') # Yummy HTML soup 48 | # get the table from html 49 | table = soup.find( 50 | 'table', {'id': 'ContentPlaceHolder3_DGGridAv'}) 51 | trs = table.findAll('tr') # extract all rows of the table 52 | # no need to create csv file, if there's no data, for Aug month of 2017 53 | if len(trs[1:]) != 0: 54 | csv_filename = month_dir + \ 55 | '%s.csv' % date.replace('/', '-') 56 | if os.path.exists(csv_filename): 57 | # remove the file it already exists, can result in data duplicacy 58 | os.remove(csv_filename) 59 | with open(csv_filename, 'a') as f: 60 | writer = csv.writer(f) 61 | for tr in trs[1:]: 62 | time, delhi = tr.findChildren('font')[:2] 63 | t = (time.text).split(':') 64 | TimE = datetime.time(int(t[0]), int(t[1])) 65 | writer.writerow([str(TimE), delhi.text]) 66 | print(str(TimE)) 67 | results = [] 68 | results.append(CSV.objects.filter( 69 | date=datetime.date.today(), timestamp=str(TimE))) 70 | if (len(results[0]) == 0): 71 | print(TimE, delhi.text) 72 | data = CSV() 73 | data.timestamp = str(TimE) 74 | data.load_value = delhi.text 75 | data.date = datetime.date.today() 76 | data.save() 77 | 78 | if(datetime.datetime.time(datetime.datetime.now()).hour == 23 and datetime.datetime.time(datetime.datetime.now()).minute >= 55): 79 | print( 80 | '............................................') 81 | start = datetime.time(0, 0) 82 | end = datetime.time(23, 55) 83 | while(start != end): 84 | blanck_val = [] 85 | blanck_val.append(CSV.objects.filter( 86 | date=datetime.date.today(), timestamp=str(start))) 87 | if (len(blanck_val[0]) == 0): 88 | data = CSV() 89 | data.timestamp = str(start) 90 | data.load_value = None 91 | data.date = datetime.date.today() 92 | data.save() 93 | writer.writerow([str(start), None]) 94 | # CSV.objects.filter(date = datetime.date.today(), timestamp = str(start)).update(load_value = None) 95 | start = (datetime.datetime.combine(datetime.date( 96 | 1, 1, 1), start)+timedelta(minutes=5)).time() 97 | 98 | except Exception as e: 99 | print(e) 100 | 101 | 102 | -------------------------------------------------------------------------------- /server/swag/tests.py: -------------------------------------------------------------------------------- 1 | from django.test import TestCase 2 | 3 | # Create your tests here. 4 | -------------------------------------------------------------------------------- /server/swag/views.py: -------------------------------------------------------------------------------- 1 | from django.shortcuts import render, HttpResponse 2 | from django.utils.safestring import mark_safe 3 | import datetime 4 | from datetime import timedelta 5 | import json 6 | import requests 7 | import csv 8 | import os 9 | from bs4 import BeautifulSoup 10 | import numpy as np 11 | import pandas as pd 12 | from .models import CSV 13 | 14 | 15 | def home_page(request): 16 | print("date:", datetime.date.today().day) 17 | day = datetime.date.today().day 18 | month = datetime.date.today().month 19 | year = datetime.date.today().year 20 | return render(request, "Home_page.html", {'Day': str(day).zfill(2), 21 | 'Month': str(month).zfill(2), 22 | 'Year': year}) 23 | 24 | 25 | def graph_plot(request): 26 | if request.method == 'POST': 27 | # print(request.POST) 28 | a = request.POST['from'] 29 | b = request.POST['to'] 30 | ans = a.split('-') 31 | bns = b.split('-') 32 | aa = datetime.date(int(ans[0]),int(ans[1]),int(ans[2])) 33 | bb = datetime.date(int(bns[0]),int(bns[1]),int(bns[2])) 34 | query_results = [] 35 | last = [] 36 | t = ['x', '00:00', '00:05', '00:10', '00:15', '00:20', '00:25', '00:30', '00:35', '00:40', '00:45', '00:50', '00:55', '01:00', '01:05', '01:10', '01:15', '01:20', '01:25', '01:30', '01:35', '01:40', '01:45', '01:50', '01:55', '02:00', '02:05', '02:10', '02:15', '02:20', '02:25', '02:30', '02:35', '02:40', '02:45', '02:50', '02:55', '03:00', '03:05', '03:10', '03:15', '03:20', '03:25', '03:30', '03:35', '03:40', '03:45', '03:50', '03:55', '04:00', '04:05', '04:10', '04:15', '04:20', '04:25', '04:30', '04:35', '04:40', '04:45', '04:50', '04:55', '05:00', '05:05', '05:10', '05:15', '05:20', '05:25', '05:30', '05:35', '05:40', '05:45', '05:50', '05:55', '06:00', '06:05', '06:10', '06:15', '06:20', '06:25', '06:30', '06:35', '06:40', '06:45', '06:50', '06:55', '07:00', '07:05', '07:10', '07:15', '07:20', '07:25', '07:30', '07:35', '07:40', '07:45', '07:50', '07:55', '08:00', '08:05', '08:10', '08:15', '08:20', '08:25', '08:30', '08:35', '08:40', '08:45', '08:50', '08:55', '09:00', '09:05', '09:10', '09:15', '09:20', '09:25', '09:30', '09:35', '09:40', '09:45', '09:50', '09:55', '10:00', '10:05', '10:10', '10:15', '10:20', '10:25', '10:30', '10:35', '10:40', '10:45', '10:50', '10:55', '11:00', '11:05', '11:10', '11:15', '11:20', '11:25', '11:30', '11:35', '11:40', '11:45', '11:50', '11:55', '12:00', '12:05', '12:10', '12:15', '12:20', '12:25', '12:30', '12:35', '12:40', '12:45', '12:50', '12:55', '13:00', '13:05', '13:10', '13:15', '13:20', '13:25', '13:30', '13:35', '13:40', '13:45', '13:50', '13:55', '14:00', '14:05', '14:10', '14:15', '14:20', '14:25', '14:30', '14:35', '14:40', '14:45', '14:50', '14:55', '15:00', '15:05', '15:10', '15:15', '15:20', '15:25', '15:30', '15:35', '15:40', '15:45', '15:50', '15:55', '16:00', '16:05', '16:10', '16:15', '16:20', '16:25', '16:30', '16:35', '16:40', '16:45', '16:50', '16:55', '17:00', '17:05', '17:10', '17:15', '17:20', '17:25', '17:30', '17:35', '17:40', '17:45', '17:50', '17:55', '18:00', '18:05', '18:10', '18:15', '18:20', '18:25', '18:30', '18:35', '18:40', '18:45', '18:50', '18:55', '19:00', '19:05', '19:10', '19:15', '19:20', '19:25', '19:30', '19:35', '19:40', '19:45', '19:50', '19:55', '20:00', '20:05', '20:10', '20:15', '20:20', '20:25', '20:30', '20:35', '20:40', '20:45', '20:50', '20:55', '21:00', '21:05', '21:10', '21:15', '21:20', '21:25', '21:30', '21:35', '21:40', '21:45', '21:50', '21:55', '22:00', '22:05', '22:10', '22:15', '22:20', '22:25', '22:30', '22:35', '22:40', '22:45', '22:50', '22:55', '23:00', '23:05', '23:10', '23:15', '23:20', '23:25', '23:30', '23:35', '23:40', '23:45', '23:50', '23:55'] 37 | for i in range((bb-aa).days+1): 38 | query_results.append(CSV.objects.filter(date = aa+timedelta(days=i)).order_by('timestamp')) 39 | q = [str(aa+timedelta(days=i))] 40 | for x in query_results[i]: 41 | q.append(x.load_value) 42 | last.append(q) 43 | 44 | last.insert(0,t) 45 | 46 | else: 47 | last = None 48 | # print ('sadf',last) 49 | cont = { 50 | # "query_results":query_results, 51 | 'Load':last, 52 | # 'Tarikh':a, 53 | # 'T':mark_safe(t), 54 | # 'Load': [x.load_value for x in query_results], 55 | # 'Time': [x.timestamp for x in query_results], 56 | } 57 | 58 | return HttpResponse(json.dumps(cont),content_type='application/json') 59 | 60 | 61 | def forecasted_plot(request): 62 | if request.method == 'POST': 63 | qq = request.POST['fc'] 64 | ans = qq.split('-') 65 | aa = datetime.date(int(ans[0]),int(ans[1]),int(ans[2])) 66 | day = aa.day 67 | month = aa.month 68 | year = aa.year 69 | query_results = [] 70 | l = [] 71 | query_results.append((CSV.objects.filter(date = aa).order_by('timestamp'))) 72 | q = [str(aa)] 73 | for x in query_results[0]: 74 | q.append(x.load_value) 75 | 76 | # weights = [0.8019, 0.0426, 0.0226, -0.0169, 0.1497] 77 | ARIMA_load = ['Forecasted with ARIMA'] 78 | WMA_load = ['Forecasted with WMA'] 79 | SMA_load = ['Forecasted with SMA'] 80 | LSTM_load = ['Forecasted with LSTM'] 81 | SES_load = ['Forecasted with SES'] 82 | GRU_load = ['Forecasted with GRU'] 83 | RNN_load = ['Forecasted with RNN'] 84 | 85 | csv_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 86 | print(str(day).zfill(2)+"-"+str(month).zfill(2)+"-"+str(year)+'.csv') 87 | 88 | arima_csv = pd.read_csv(os.path.join(csv_path, 'predictions/ARIMA/'+str(day).zfill(2)+"-"+str(month).zfill(2)+"-"+str(year)+'.csv'))['load'].values 89 | ARIMA_load.extend(list(arima_csv)) 90 | wma_csv = pd.read_csv(os.path.join(csv_path, 'predictions/WMA/'+str(day).zfill(2)+"-"+str(month).zfill(2)+"-"+str(year)+'.csv'))['load'].values 91 | WMA_load.extend(list(wma_csv)) 92 | 93 | sma_csv = pd.read_csv(os.path.join(csv_path, 'predictions/SMA/'+str(day).zfill(2)+"-"+str(month).zfill(2)+"-"+str(year)+'.csv'))['load'].values 94 | SMA_load.extend(list(sma_csv)) 95 | 96 | ses_csv = pd.read_csv(os.path.join(csv_path, 'predictions/SES/'+str(day).zfill(2)+"-"+str(month).zfill(2)+"-"+str(year)+'.csv'))['load'].values 97 | SES_load.extend(list(ses_csv)) 98 | 99 | lstm_csv = pd.read_csv(os.path.join(csv_path, 'predictions/LSTM/'+str(day).zfill(2)+"-"+str(month).zfill(2)+"-"+str(year)+'.csv'))['load'].values 100 | LSTM_load.extend(list(lstm_csv)) 101 | 102 | gru_csv = pd.read_csv(os.path.join(csv_path, 'predictions/GRU/'+str(day).zfill(2)+"-"+str(month).zfill(2)+"-"+str(year)+'.csv'))['load'].values 103 | GRU_load.extend(list(gru_csv)) 104 | 105 | rnn_csv = pd.read_csv(os.path.join(csv_path, 'predictions/RNN/'+str(day).zfill(2)+"-"+str(month).zfill(2)+"-"+str(year)+'.csv'))['load'].values 106 | RNN_load.extend(list(rnn_csv)) 107 | 108 | l.append(SMA_load) 109 | l.append(WMA_load) 110 | l.append(SES_load) 111 | l.append(ARIMA_load) 112 | l.append(LSTM_load) 113 | l.append(GRU_load) 114 | l.append(RNN_load) 115 | l.append(q) #q==actual 116 | redu = q.count(None) 117 | length = len(q)-redu 118 | t = ['x', '00:00', '00:05', '00:10', '00:15', '00:20', '00:25', '00:30', '00:35', '00:40', '00:45', '00:50', '00:55', '01:00', '01:05', '01:10', '01:15', '01:20', '01:25', '01:30', '01:35', '01:40', '01:45', '01:50', '01:55', '02:00', '02:05', '02:10', '02:15', '02:20', '02:25', '02:30', '02:35', '02:40', '02:45', '02:50', '02:55', '03:00', '03:05', '03:10', '03:15', '03:20', '03:25', '03:30', '03:35', '03:40', '03:45', '03:50', '03:55', '04:00', '04:05', '04:10', '04:15', '04:20', '04:25', '04:30', '04:35', '04:40', '04:45', '04:50', '04:55', '05:00', '05:05', '05:10', '05:15', '05:20', '05:25', '05:30', '05:35', '05:40', '05:45', '05:50', '05:55', '06:00', '06:05', '06:10', '06:15', '06:20', '06:25', '06:30', '06:35', '06:40', '06:45', '06:50', '06:55', '07:00', '07:05', '07:10', '07:15', '07:20', '07:25', '07:30', '07:35', '07:40', '07:45', '07:50', '07:55', '08:00', '08:05', '08:10', '08:15', '08:20', '08:25', '08:30', '08:35', '08:40', '08:45', '08:50', '08:55', '09:00', '09:05', '09:10', '09:15', '09:20', '09:25', '09:30', '09:35', '09:40', '09:45', '09:50', '09:55', '10:00', '10:05', '10:10', '10:15', '10:20', '10:25', '10:30', '10:35', '10:40', '10:45', '10:50', '10:55', '11:00', '11:05', '11:10', '11:15', '11:20', '11:25', '11:30', '11:35', '11:40', '11:45', '11:50', '11:55', '12:00', '12:05', '12:10', '12:15', '12:20', '12:25', '12:30', '12:35', '12:40', '12:45', '12:50', '12:55', '13:00', '13:05', '13:10', '13:15', '13:20', '13:25', '13:30', '13:35', '13:40', '13:45', '13:50', '13:55', '14:00', '14:05', '14:10', '14:15', '14:20', '14:25', '14:30', '14:35', '14:40', '14:45', '14:50', '14:55', '15:00', '15:05', '15:10', '15:15', '15:20', '15:25', '15:30', '15:35', '15:40', '15:45', '15:50', '15:55', '16:00', '16:05', '16:10', '16:15', '16:20', '16:25', '16:30', '16:35', '16:40', '16:45', '16:50', '16:55', '17:00', '17:05', '17:10', '17:15', '17:20', '17:25', '17:30', '17:35', '17:40', '17:45', '17:50', '17:55', '18:00', '18:05', '18:10', '18:15', '18:20', '18:25', '18:30', '18:35', '18:40', '18:45', '18:50', '18:55', '19:00', '19:05', '19:10', '19:15', '19:20', '19:25', '19:30', '19:35', '19:40', '19:45', '19:50', '19:55', '20:00', '20:05', '20:10', '20:15', '20:20', '20:25', '20:30', '20:35', '20:40', '20:45', '20:50', '20:55', '21:00', '21:05', '21:10', '21:15', '21:20', '21:25', '21:30', '21:35', '21:40', '21:45', '21:50', '21:55', '22:00', '22:05', '22:10', '22:15', '22:20', '22:25', '22:30', '22:35', '22:40', '22:45', '22:50', '22:55', '23:00', '23:05', '23:10', '23:15', '23:20', '23:25', '23:30', '23:35', '23:40', '23:45', '23:50', '23:55'] 119 | l.insert(0, t) 120 | else: 121 | l = None 122 | print(l) 123 | 124 | def mean_absolute_percentage_error(y_pred, y_true): 125 | print('mape:', y_pred, y_true) 126 | try: 127 | y_true, y_pred = np.array(y_true), np.array(y_pred) 128 | mape = np.mean(np.abs((y_true - y_pred) / y_true)) * 100 129 | except Exception as e: 130 | mape = e 131 | return mape 132 | 133 | def root_mean_square_error(y_pred, y_true): 134 | print('rmse:', y_pred, y_true) 135 | try: 136 | y_true, y_pred = np.array(y_true), np.array(y_pred) 137 | rmse = np.sqrt((y_pred - y_true)**2).mean() 138 | except Exception as e: 139 | rmse = e 140 | return rmse 141 | 142 | 143 | cont = { 144 | # "query_results":query_results, 145 | 'forecasted_Load': l, 146 | 'rmseSMA': round(root_mean_square_error(l[1][1:length], l[8][1:length]), 2), 147 | 'rmseWMA': round(root_mean_square_error(l[2][1:length], l[8][1:length]), 2), 148 | 'rmseSES': round(root_mean_square_error(l[3][1:length], l[8][1:length]), 2), 149 | 'rmseARIMA': round(root_mean_square_error(l[4][1:length], l[8][1:length]), 2), 150 | 'rmseLSTM': round(root_mean_square_error(l[5][1:length], l[8][1:length]), 2), 151 | 'rmseGRU': round(root_mean_square_error(l[6][1:length], l[8][1:length]), 2), 152 | 'rmseRNN': round(root_mean_square_error(l[7][1:length], l[8][1:length]), 2), 153 | 154 | 'mapeSMA': round(mean_absolute_percentage_error(l[1][1:length], l[8][1:length]),2), 155 | 'mapeWMA': round(mean_absolute_percentage_error(l[2][1:length], l[8][1:length]),2), 156 | 'mapeSES': round(mean_absolute_percentage_error(l[3][1:length], l[8][1:length]),2), 157 | 'mapeARIMA': round(mean_absolute_percentage_error(l[4][1:length], l[8][1:length]),2), 158 | 'mapeLSTM': round(mean_absolute_percentage_error(l[5][1:length], l[8][1:length]),2), 159 | 'mapeGRU': round(mean_absolute_percentage_error(l[6][1:length], l[8][1:length]),2), 160 | 'mapeRNN': round(mean_absolute_percentage_error(l[7][1:length], l[8][1:length]),2), 161 | 162 | # 'Tarikh':a, 163 | # 'T':mark_safe(t), 164 | # 'Load': [x.load_value for x in query_results], 165 | # 'Time': [x.timestamp for x in query_results], 166 | } 167 | # print(cont) 168 | return HttpResponse(json.dumps(cont),content_type='application/json') 169 | -------------------------------------------------------------------------------- /server/tamplates/Home_page.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 48 | Delhi Load Data 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 83 | 84 |
85 |

Delhi Real Time Load and Forecasted plots

86 | 87 |
88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 |
FeaturesSMAWMASESARIMALSTMGRURNN
RMSE (in MW)WaitWaitWaitWaitWaitWaitWait
MAPE (in %)WaitWaitWaitWaitWaitWaitWait
125 | 312 | 313 | 314 | 318 | 319 | 320 | 321 | 326 | 327 | 334 | 335 |
336 | 337 |

Date wise Delhi Load Plots

338 | 339 |

340 | From: 341 | To: 342 | 343 |

344 | 345 |
346 | 347 | 348 |

Comparison of Forecasted Graphs

349 | 350 |

351 | Date: 352 | 353 | 354 |

355 |
356 | 357 | 358 | 359 | 360 | 361 | 362 | 363 | 364 | 365 | 366 | 367 | 368 | 369 | 370 | 371 | 372 | 373 | 374 | 375 | 376 | 377 | 378 | 379 | 380 | 381 | 382 | 383 | 384 | 385 | 386 | 387 | 388 | 389 | 390 | 391 | 392 | 393 |
FeaturesSMAWMASESARIMALSTMGRURNN
RMSE (in MW)Choose a DateChoose a DateChoose a DateChoose a DateChoose a DateChoose a DateChoose a Date
MAPE (in %)Choose a DateChoose a DateChoose a DateChoose a DateChoose a DateChoose a DateChoose a Date
394 | 395 | 396 | 397 | 398 | 399 | 400 | 401 | 402 | 403 | 404 | 405 | 406 | 407 | 408 | 409 | 424 | 425 | 439 | 440 | 5 |
6 | {% for message in messages %} 7 | 15 | {% endfor %} 16 |
17 |
18 |
19 |
20 |
21 |
Sign In
22 |
23 | 24 |
25 | 26 | 27 | 28 |
29 | {% csrf_token %} 30 |
31 | 32 | 33 |
34 | 35 |
36 | 37 | 38 |
39 | 40 | 41 | 42 |
43 | 44 | 45 |
46 | 47 | 48 |
49 |
50 | 51 | 52 |
53 |
54 |
55 | Don't have an account! 56 | 57 | Register Here 58 | 59 |
60 |
61 |
62 |
63 | 64 | 65 | 66 |
67 |
68 |
69 | 138 |
139 | -------------------------------------------------------------------------------- /server/users/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyaf/load_forecasting/6525815ff162e4551f59febe8bc9b94c695e9222/server/users/__init__.py -------------------------------------------------------------------------------- /server/users/admin.py: -------------------------------------------------------------------------------- 1 | from django.contrib import admin 2 | from users.models import UserProfile 3 | # Register your models here. 4 | 5 | 6 | admin.site.register(UserProfile) -------------------------------------------------------------------------------- /server/users/apps.py: -------------------------------------------------------------------------------- 1 | from django.apps import AppConfig 2 | 3 | 4 | class UsersConfig(AppConfig): 5 | name = 'users' 6 | -------------------------------------------------------------------------------- /server/users/migrations/0001_initial.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 2.0.2 on 2018-03-31 13:10 2 | 3 | from django.conf import settings 4 | from django.db import migrations, models 5 | import django.db.models.deletion 6 | 7 | 8 | class Migration(migrations.Migration): 9 | 10 | initial = True 11 | 12 | dependencies = [ 13 | ('auth', '0009_alter_user_last_name_max_length'), 14 | ] 15 | 16 | operations = [ 17 | migrations.CreateModel( 18 | name='UserProfile', 19 | fields=[ 20 | ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)), 21 | ('college', models.TextField(blank=True, null=True)), 22 | ], 23 | ), 24 | ] 25 | -------------------------------------------------------------------------------- /server/users/migrations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyaf/load_forecasting/6525815ff162e4551f59febe8bc9b94c695e9222/server/users/migrations/__init__.py -------------------------------------------------------------------------------- /server/users/models.py: -------------------------------------------------------------------------------- 1 | from django.db import models 2 | from django.contrib.auth.models import User 3 | 4 | 5 | class UserProfile(models.Model): 6 | user = models.OneToOneField(User, primary_key=True, on_delete=models.CASCADE) 7 | college = models.TextField(null = True, blank=True) 8 | 9 | def __str__(self): 10 | return '%s-%s' % (self.user.first_name, self.college) 11 | -------------------------------------------------------------------------------- /server/users/tests.py: -------------------------------------------------------------------------------- 1 | from django.test import TestCase 2 | 3 | # Create your tests here. 4 | -------------------------------------------------------------------------------- /server/users/views.py: -------------------------------------------------------------------------------- 1 | from django.shortcuts import render, render_to_response, HttpResponse, redirect 2 | from django.contrib.auth.models import User 3 | from django.contrib import messages 4 | from django.contrib.auth import authenticate, login, logout 5 | from users.models import * 6 | 7 | 8 | def FormView(request): 9 | template_name = 'form.html' 10 | return render(request, template_name) 11 | 12 | def LoginView(request): 13 | if request.user.is_authenticated: 14 | return redirect('/') 15 | 16 | template_name = 'form.html' 17 | if request.method == "POST": 18 | post = request.POST 19 | email = post.get('email') 20 | password = post.get('password') 21 | print(email, password) 22 | user = authenticate(username=email, email=email, password=password) 23 | print(user) 24 | if user is not None: 25 | login(request, user) 26 | return redirect('/') 27 | else: 28 | messages.error(request, 'Invalid Credentials', fail_silently=True) 29 | return render(request, template_name, {}) 30 | else: 31 | return render(request, template_name, {}) 32 | 33 | 34 | def RegistrationView(request): 35 | if request.user.is_authenticated: 36 | return redirect('/') 37 | template_name = 'form.html' 38 | if request.method == "POST": 39 | post = request.POST 40 | email = post.get('email') 41 | user, created = User.objects.get_or_create(username=email) 42 | if created:#create new User instance. 43 | user.email = email 44 | user.first_name = post.get('first-name') 45 | user.last_name = post.get('last-name') 46 | password = post.get('password') 47 | user.set_password(password) 48 | user.save() 49 | userprofile = UserProfile.objects.create(user=user) 50 | userprofile.college = post.get('college') 51 | userprofile.save() 52 | user = authenticate(username = email, password = password) 53 | login(request, user) 54 | return redirect('/') 55 | else:# already a user. 56 | messages.warning(request, "email already registered!, please try logging in.", fail_silently=True) 57 | return render(request, template_name) 58 | else: 59 | return render(request, template_name) 60 | 61 | def LogoutView(request): 62 | logout(request) 63 | return redirect('/') -------------------------------------------------------------------------------- /server/website/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | # This will make sure the app is always imported when 4 | # Django starts so that shared_task will use this app. 5 | from .for_celery import app as celery_app 6 | -------------------------------------------------------------------------------- /server/website/for_celery.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import os 3 | from celery import Celery 4 | from django.conf import settings 5 | 6 | # set the default Django settings module for the 'celery' program. 7 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'website.settings') 8 | app = Celery('website') 9 | 10 | # Using a string here means the worker will not have to 11 | # pickle the object when using Windows. 12 | app.config_from_object('django.conf:settings') 13 | app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) 14 | 15 | 16 | @app.task(bind=True) 17 | def debug_task(self): 18 | print('Request: {0!r}'.format(self.request)) -------------------------------------------------------------------------------- /server/website/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Django settings for django18 project. 3 | 4 | Generated by 'django-admin startproject' using Django 1.8. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/1.8/topics/settings/ 8 | 9 | For the full list of settings and their values, see 10 | https://docs.djangoproject.com/en/1.8/ref/settings/ 11 | """ 12 | 13 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 14 | import os 15 | # from datetime import datetime 16 | # from datetime import timedelta 17 | import datetime 18 | from datetime import timedelta 19 | 20 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 21 | 22 | 23 | # Quick-start development settings - unsuitable for production 24 | # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ 25 | 26 | # SECURITY WARNING: keep the secret key used in production secret! 27 | SECRET_KEY = '+p$27f(+3^om9=$*&%k-0k+a7a5_(ibh2wa8&c_3_29b=$xadg' 28 | 29 | # SECURITY WARNING: don't run with debug turned on in production! 30 | DEBUG = True 31 | 32 | ALLOWED_HOSTS = ['*'] 33 | 34 | 35 | # Application definition 36 | 37 | INSTALLED_APPS = ( 38 | 'swag', 39 | 'users', 40 | 'django.contrib.admin', 41 | 'django.contrib.auth', 42 | 'django.contrib.contenttypes', 43 | 'django.contrib.sessions', 44 | 'django.contrib.messages', 45 | 'django.contrib.staticfiles', 46 | ) 47 | 48 | MIDDLEWARE = [ 49 | 'django.middleware.security.SecurityMiddleware', 50 | 'django.contrib.sessions.middleware.SessionMiddleware', 51 | 'django.middleware.common.CommonMiddleware', 52 | 'django.middleware.csrf.CsrfViewMiddleware', 53 | 'django.contrib.auth.middleware.AuthenticationMiddleware', 54 | 'django.contrib.messages.middleware.MessageMiddleware', 55 | 'django.middleware.clickjacking.XFrameOptionsMiddleware', 56 | ] 57 | 58 | ROOT_URLCONF = 'website.urls' 59 | 60 | TEMPLATES = [ 61 | { 62 | 'BACKEND': 'django.template.backends.django.DjangoTemplates', 63 | 'DIRS': [os.path.join(BASE_DIR, "tamplates")], 64 | 'APP_DIRS': True, 65 | 'OPTIONS': { 66 | 'context_processors': [ 67 | 'django.template.context_processors.debug', 68 | 'django.template.context_processors.request', 69 | 'django.contrib.auth.context_processors.auth', 70 | 'django.contrib.messages.context_processors.messages', 71 | ], 72 | }, 73 | }, 74 | ] 75 | 76 | 77 | WSGI_APPLICATION = 'website.wsgi.application' 78 | 79 | 80 | # Database 81 | # https://docs.djangoproject.com/en/1.8/ref/settings/#databases 82 | 83 | DATABASES = { 84 | 'default': { 85 | 'ENGINE': 'django.db.backends.mysql', 86 | 'OPTIONS': { 87 | 'read_default_file': '/etc/mysql/db.cnf', 88 | } 89 | } 90 | } 91 | 92 | 93 | # Internationalization 94 | # https://docs.djangoproject.com/en/1.8/topics/i18n/ 95 | 96 | LANGUAGE_CODE = 'en-us' 97 | 98 | TIME_ZONE = 'Asia/Karachi' 99 | 100 | USE_I18N = True 101 | 102 | USE_L10N = True 103 | 104 | USE_TZ = True 105 | 106 | # Static files (CSS, JavaScript, Images) 107 | # https://docs.djangoproject.com/en/1.8/howto/static-files/ 108 | 109 | STATIC_URL = '/static/' 110 | STATIC_ROOT = os.path.join(BASE_DIR, 'static/staticfiles') 111 | STATICFILES_DIRS = ( 112 | os.path.join(BASE_DIR, 'static'), 113 | ) 114 | 115 | # CELERY STUFF 116 | BROKER_URL = 'redis://localhost:6379' 117 | CELERY_RESULT_BACKEND = 'redis://localhost:6379' 118 | CELERY_ACCEPT_CONTENT = ['application/json'] 119 | CELERY_TASK_SERIALIZER = 'json' 120 | CELERY_RESULT_SERIALIZER = 'json' 121 | CELERY_TIMEZONE = 'Asia/Karachi' 122 | -------------------------------------------------------------------------------- /server/website/urls.py: -------------------------------------------------------------------------------- 1 | from django.conf.urls import include, url 2 | from django.contrib import admin 3 | from swag.views import * 4 | from users.views import * 5 | 6 | 7 | urlpatterns = [ 8 | # Examples: 9 | url(r'^$', home_page, name='home_page'), 10 | url(r'^form/$', FormView, name='form_page'), 11 | url(r'^login/$', LoginView, name='form_page'), 12 | url(r'^register/$', RegistrationView, name='form_page'), 13 | url(r'^logout/$', LogoutView), 14 | url(r'^show_data/$', graph_plot, name = 'home_page'), 15 | url(r'^admin/', admin.site.urls), 16 | url(r'^show_forecasted_smavg_data/$', forecasted_plot, name='home_page') 17 | ] -------------------------------------------------------------------------------- /server/website/wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | WSGI config for django18 project. 3 | 4 | It exposes the WSGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.wsgi import get_wsgi_application 13 | 14 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "website.settings") 15 | 16 | application = get_wsgi_application() 17 | --------------------------------------------------------------------------------